X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fir%2Firgopt.c;h=200dfdbf28c232398d1a95c382b33f965b28db0b;hb=fba3706965dd1f7e0cd8fc13a79a608966ca2527;hp=287ef9fed0b1316c360cadce42dad7d93c65aee3;hpb=e818ee8a4bc281ea5a1824d07bbc38fc64d9c5d4;p=libfirm diff --git a/ir/ir/irgopt.c b/ir/ir/irgopt.c index 287ef9fed..200dfdbf2 100644 --- a/ir/ir/irgopt.c +++ b/ir/ir/irgopt.c @@ -1,18 +1,22 @@ -/* Coyright (C) 1998 - 2002 by Universitaet Karlsruhe -* All rights reserved. -* -* Author: Christian Schaefer, Goetz Lindenmaier, Sebastian Felis -* -* Optimizations for a whole ir graph, i.e., a procedure. -*/ +/* + * Project: libFIRM + * File name: ir/ir/irgopt.c + * Purpose: Optimizations for a whole ir graph, i.e., a procedure. + * Author: Christian Schaefer, Goetz Lindenmaier + * Modified by: Sebastian Felis + * Created: + * CVS-ID: $Id$ + * Copyright: (c) 1998-2003 Universität Karlsruhe + * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE. + */ -/* $Id$ */ #ifdef HAVE_CONFIG_H # include #endif # include +# include # include "irprog.h" # include "irgopt.h" @@ -21,7 +25,6 @@ # include "iropt_t.h" # include "irgwalk.h" # include "ircons.h" -# include "misc.h" # include "irgmod.h" # include "array.h" # include "pset.h" @@ -46,10 +49,13 @@ static void init_link (ir_node *n, void *env) { static void optimize_in_place_wrapper (ir_node *n, void *env) { int i; - ir_node *optimized; + ir_node *optimized, *old; for (i = 0; i < get_irn_arity(n); i++) { - optimized = optimize_in_place_2(get_irn_n(n, i)); + /* get?irn_n skips Id nodes, so comparison old != optimized does not + show all optimizations. Therefore always set new predecessor. */ + old = get_irn_n(n, i); + optimized = optimize_in_place_2(old); set_irn_n(n, i, optimized); } @@ -207,7 +213,7 @@ copy_preds (ir_node *n, void *env) { for (i = 0; i < get_irn_arity(n); i++) if (get_irn_opcode(get_irn_n(n, i)) != iro_Bad) { set_irn_n (nn, j, get_new_node(get_irn_n(n, i))); - //if (is_backedge(n, i)) set_backedge(nn, j); + /*if (is_backedge(n, i)) set_backedge(nn, j);*/ j++; } /* repair the block visited flag from above misuse. Repair it in both @@ -231,7 +237,7 @@ copy_preds (ir_node *n, void *env) { for (i = 0; i < get_irn_arity(n); i++) if (get_irn_opcode(get_irn_n(block, i)) != iro_Bad) { set_irn_n (nn, j, get_new_node(get_irn_n(n, i))); - //if (is_backedge(n, i)) set_backedge(nn, j); + /*if (is_backedge(n, i)) set_backedge(nn, j);*/ j++; } /* If the pre walker reached this Phi after the post walker visited the @@ -253,7 +259,7 @@ copy_preds (ir_node *n, void *env) { /* Copies the graph recursively, compacts the keepalive of the end node. */ static void -copy_graph () { +copy_graph (void) { ir_node *oe, *ne; /* old end, new end */ ir_node *ka; /* keep alive */ int i; @@ -309,7 +315,7 @@ copy_graph () { Then fixes the fields in current_ir_graph containing nodes of the graph. */ static void -copy_graph_env () { +copy_graph_env (void) { ir_node *old_end; /* Not all nodes remembered in current_ir_graph might be reachable from the end node. Assure their link is set to NULL, so that @@ -555,10 +561,12 @@ void inline_method(ir_node *call, ir_graph *called_graph) { get_Call_type(call))); */ assert(get_type_tpop(get_Call_type(call)) == type_method); - if (called_graph == current_ir_graph) return; - + if (called_graph == current_ir_graph) { + set_optimize(rem_opt); + return; + } -/* -- + /* -- the procedure and later replaces the Start node of the called graph. Post_call is the old Call node and collects the results of the called graph. Both will end up being a tuple. -- */ @@ -573,7 +581,7 @@ void inline_method(ir_node *call, ir_graph *called_graph) { pre_call = new_Tuple(5, in); post_call = call; -/* -- + /* -- The new block gets the ins of the old block, pre_call and all its predecessors and all Phi nodes. -- */ part_block(pre_call); @@ -753,38 +761,41 @@ void inline_method(ir_node *call, ir_graph *called_graph) { free(res_pred); free(cf_pred); -/* -- - If the exception control flow from the Call directly branched to the - end block we now have the following control flow predecessor pattern: - ProjX -> Tuple -> Jmp. - We must remove the Jmp along with it's empty block and add Jmp's - predecessors as predecessors of this end block. -- */ - /* find the problematic predecessor of the end block. */ - end_bl = get_irg_end_block(current_ir_graph); - for (i = 0; i < get_Block_n_cfgpreds(end_bl); i++) { - cf_op = get_Block_cfgpred(end_bl, i); - if (get_irn_op(cf_op) == op_Proj) { - cf_op = get_Proj_pred(cf_op); - if (get_irn_op(cf_op) == op_Tuple) { - cf_op = get_Tuple_pred(cf_op, 1); - assert(get_irn_op(cf_op) == op_Jmp); - break; + if (n_exc > 0) { + /* -- If the exception control flow from the inlined Call directly + branched to the end block we now have the following control + flow predecessor pattern: ProjX -> Tuple -> Jmp. We must + remove the Jmp along with it's empty block and add Jmp's + predecessors as predecessors of this end block. No problem if + there is no exception, because then branches Bad to End which + is fine. -- */ + /* find the problematic predecessor of the end block. */ + end_bl = get_irg_end_block(current_ir_graph); + for (i = 0; i < get_Block_n_cfgpreds(end_bl); i++) { + cf_op = get_Block_cfgpred(end_bl, i); + if (get_irn_op(cf_op) == op_Proj) { + cf_op = get_Proj_pred(cf_op); + if (get_irn_op(cf_op) == op_Tuple) { + cf_op = get_Tuple_pred(cf_op, 1); + assert(get_irn_op(cf_op) == op_Jmp); + break; + } } } - } - /* repair */ - if (i < get_Block_n_cfgpreds(end_bl)) { - bl = get_nodes_Block(cf_op); - arity = get_Block_n_cfgpreds(end_bl) + get_Block_n_cfgpreds(bl) - 1; - cf_pred = (ir_node **) malloc (arity * sizeof (ir_node *)); - for (j = 0; j < i; j++) - cf_pred[j] = get_Block_cfgpred(end_bl, j); - for (j = j; j < i + get_Block_n_cfgpreds(bl); j++) - cf_pred[j] = get_Block_cfgpred(bl, j-i); - for (j = j; j < arity; j++) - cf_pred[j] = get_Block_cfgpred(end_bl, j-get_Block_n_cfgpreds(bl) +1); - set_irn_in(end_bl, arity, cf_pred); - free(cf_pred); + /* repair */ + if (i < get_Block_n_cfgpreds(end_bl)) { + bl = get_nodes_Block(cf_op); + arity = get_Block_n_cfgpreds(end_bl) + get_Block_n_cfgpreds(bl) - 1; + cf_pred = (ir_node **) malloc (arity * sizeof (ir_node *)); + for (j = 0; j < i; j++) + cf_pred[j] = get_Block_cfgpred(end_bl, j); + for (j = j; j < i + get_Block_n_cfgpreds(bl); j++) + cf_pred[j] = get_Block_cfgpred(bl, j-i); + for (j = j; j < arity; j++) + cf_pred[j] = get_Block_cfgpred(end_bl, j-get_Block_n_cfgpreds(bl) +1); + set_irn_in(end_bl, arity, cf_pred); + free(cf_pred); + } } /* -- Turn cse back on. -- */ @@ -813,8 +824,8 @@ static void collect_calls(ir_node *call, void *env) { if (get_irn_op(addr) == op_Const) { /* Check whether the constant is the pointer to a compiled entity. */ tv = get_Const_tarval(addr); - if (tv->u.P.ent) { - called_irg = get_entity_irg(tv->u.P.ent); + if (tarval_to_entity(tv)) { + called_irg = get_entity_irg(tarval_to_entity(tv)); if (called_irg && pos < MAX_INLINE) { /* The Call node calls a locally defined method. Remember to inline. */ calls[pos] = call; @@ -856,7 +867,7 @@ void inline_small_irgs(ir_graph *irg, int size) { tarval *tv; ir_graph *callee; tv = get_Const_tarval(get_Call_ptr(calls[i])); - callee = get_entity_irg(tv->u.P.ent); + callee = get_entity_irg(tarval_to_entity(tv)); if ((_obstack_memory_used(callee->obst) - obstack_room(callee->obst)) < size) { inline_method(calls[i], callee); } @@ -941,7 +952,7 @@ place_floats_early (ir_node *n) Start, Call and end at pinned nodes as Store, Call. Place_early places all floating nodes reachable from its argument through floating nodes and adds all beginnings at pinned nodes to the worklist. */ -static INLINE void place_early () { +static INLINE void place_early (void) { assert(worklist); inc_irg_visited(current_ir_graph); @@ -1095,7 +1106,7 @@ place_floats_late (ir_node *n) } } -static INLINE void place_late() { +static INLINE void place_late(void) { assert(worklist); inc_irg_visited(current_ir_graph); @@ -1148,7 +1159,8 @@ void place_code(ir_graph *irg) { /********************************************************************/ /* Removes Tuples from Block control flow predecessors. - Optimizes blocks with equivalent_node(). */ + Optimizes blocks with equivalent_node(). + Replaces n by Bad if n is unreachable control flow. */ static void merge_blocks(ir_node *n, void *env) { int i; set_irn_link(n, NULL); @@ -1156,22 +1168,25 @@ static void merge_blocks(ir_node *n, void *env) { if (get_irn_op(n) == op_Block) { /* Remove Tuples */ for (i = 0; i < get_Block_n_cfgpreds(n); i++) - set_Block_cfgpred(n, i, skip_Tuple(get_Block_cfgpred(n, i))); - } else if (get_irn_mode(n) == mode_X) { + /* GL @@@ : is this possible? if (get_opt_normalize()) -- added, all tests go throug. + A different order of optimizations might cause problems. */ + if (get_opt_normalize()) + set_Block_cfgpred(n, i, skip_Tuple(get_Block_cfgpred(n, i))); + } else if (get_optimize() && (get_irn_mode(n) == mode_X)) { /* We will soon visit a block. Optimize it before visiting! */ ir_node *b = get_nodes_Block(n); ir_node *new = equivalent_node(b); while (irn_not_visited(b) && (!is_Bad(new)) && (new != b)) { - /* We would have to run gigo if new is bad. */ - if (!get_optimize() || (!get_opt_control_flow_straightening() - && !get_opt_control_flow_weak_simplification())) - /* how could something be optimized if flags are not set? */ - assert(0 && "strange ?? !!"); + /* We would have to run gigo if new is bad, so we + promote it directly below. */ + assert(((b == new) || get_opt_control_flow_straightening() || get_opt_control_flow_weak_simplification()) && + ("strange flag setting")); exchange (b, new); b = new; new = equivalent_node(b); } - if (is_Bad(new)) exchange (n, new_Bad()); + /* GL @@@ get_opt_normalize hinzugefuegt, 5.5.2003 */ + if (is_Bad(new) && get_opt_normalize()) exchange (n, new_Bad()); } } @@ -1453,11 +1468,49 @@ void optimize_cf(ir_graph *irg) { current_ir_graph = rem; } -/* Placed an empty basic block on critical control flow edges thereby - removing them. - A critical control flow edge is an edge from a block with several - control exits to a block with several control entries (See Muchnic - p. 407). */ + +/** + * Called by walker of remove_critical_cf_edges. + * + * Place an empty block to an edge between a blocks of multiple + * predecessors and a block of multiple sucessors. + * + * @param n IR node + * @param env Envirnment of walker. This field is unused and has + * the value NULL. + */ +static void walk_critical_cf_edges(ir_node *n, void *env) { + int arity, i; + ir_node *pre, *block, **in, *jmp; + + /* Block has multiple predecessors */ + if ((op_Block == get_irn_op(n)) && + (get_irn_arity(n) > 1)) { + arity = get_irn_arity(n); + + for (i=0; iobst, 1); + /* set predecessor of new block */ + in[0] = pre; + block = new_Block(1, in); + /* insert new jmp node to new block */ + switch_block(block); + jmp = new_Jmp(); + switch_block(n); + /* set sucessor of new block */ + set_irn_n(n, i, jmp); + + } /* predecessor has multiple sucessors */ + } /* for all predecessors */ + } /* n is a block */ +} + void remove_critical_cf_edges(ir_graph *irg) { - printf("WARNING: called unimplemented function!!!\n"); + if (get_opt_critical_edges()) + irg_walk_graph(irg, NULL, walk_critical_cf_edges, NULL); }