X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fir%2Firgopt.c;h=560d47e081ae7ebfd47d8d16c86aae0bd8bf7d03;hb=169fd803ea2ed08171113c1fd7ab4e528e1ebc26;hp=f19c7724f0b6047c6fc8e2432b6cff368d4aa597;hpb=64d38975c523c9cfe4a3890af23e9192f557e293;p=libfirm diff --git a/ir/ir/irgopt.c b/ir/ir/irgopt.c index f19c7724f..560d47e08 100644 --- a/ir/ir/irgopt.c +++ b/ir/ir/irgopt.c @@ -37,9 +37,12 @@ #include "irloop_t.h" #include "irbackedge_t.h" #include "cgana.h" +#include "trouts.h" #include "irflag_t.h" #include "irhooks.h" +#include "iredges_t.h" +#include "irtools.h" /* Defined in iropt.c */ pset *new_identities (void); @@ -55,7 +58,7 @@ static void init_link (ir_node *n, void *env) { } #if 0 /* Old version. Avoids Ids. - This is not necessary: we do a postwalk, and get_irn_n + This is not necessary: we do a post walk, and get_irn_n removes ids anyways. So it's much cheaper to call the optimization less often and use the exchange() algorithm. */ static void @@ -89,6 +92,7 @@ optimize_in_place_wrapper (ir_node *n, void *env) { static INLINE void do_local_optimize(ir_node *n) { /* Handle graph state */ assert(get_irg_phase_state(current_ir_graph) != phase_building); + if (get_opt_global_cse()) set_irg_pinned(current_ir_graph, op_pin_state_floats); if (get_irg_outs_state(current_ir_graph) == outs_consistent) @@ -97,8 +101,7 @@ static INLINE void do_local_optimize(ir_node *n) { set_irg_dom_inconsistent(current_ir_graph); set_irg_loopinfo_inconsistent(current_ir_graph); - - /* Clean the value_table in irg for the cse. */ + /* Clean the value_table in irg for the CSE. */ del_identities(current_ir_graph->value_table); current_ir_graph->value_table = new_identities(); @@ -113,7 +116,15 @@ void local_optimize_node(ir_node *n) { do_local_optimize(n); current_ir_graph = rem; +} +/** + * Block-Walker: uses dominance depth to mark dead blocks. + */ +static void kill_dead_blocks(ir_node *block, void *env) +{ + if (get_Block_dom_depth(block) < 0) + set_Block_dead(block); } void @@ -121,6 +132,9 @@ local_optimize_graph (ir_graph *irg) { ir_graph *rem = current_ir_graph; current_ir_graph = irg; + if (get_irg_dom_state(current_ir_graph) == dom_consistent) + irg_block_walk_graph(irg, NULL, kill_dead_blocks, NULL); + do_local_optimize(irg->end); current_ir_graph = rem; @@ -142,11 +156,10 @@ set_new_node (ir_node *old, ir_node *new) } /** - * Get this new node, before the old node is forgotton. + * Get this new node, before the old node is forgotten. */ static INLINE ir_node * -get_new_node (ir_node * n) -{ +get_new_node (ir_node * n) { return n->link; } @@ -210,27 +223,27 @@ static INLINE void new_backedge_info(ir_node *n) { * * Note: Also used for loop unrolling. */ -void copy_node (ir_node *n, void *env) { +static void copy_node(ir_node *n, void *env) { ir_node *nn, *block; int new_arity; - opcode op = get_irn_opcode(n); + ir_op *op = get_irn_op(n); int copy_node_nr = env != NULL; /* The end node looses it's flexible in array. This doesn't matter, as dead node elimination builds End by hand, inlineing doesn't use the End node. */ - /* assert(n->op == op_End || ((_ARR_DESCR(n->in))->cookie != ARR_F_MAGIC)); */ + /* assert(op == op_End || ((_ARR_DESCR(n->in))->cookie != ARR_F_MAGIC)); */ - if (op == iro_Bad) { + if (op == op_Bad) { /* node copied already */ return; - } else if (op == iro_Block) { + } else if (op == op_Block) { block = NULL; new_arity = compute_new_arity(n); n->attr.block.graph_arr = NULL; } else { block = get_nodes_block(n); - if (get_irn_opcode(n) == iro_Phi) { + if (op == op_Phi) { new_arity = compute_new_arity(block); } else { new_arity = get_irn_arity(n); @@ -239,16 +252,15 @@ void copy_node (ir_node *n, void *env) { nn = new_ir_node(get_irn_dbg_info(n), current_ir_graph, block, - get_irn_op(n), + op, get_irn_mode(n), new_arity, - get_irn_in(n)); + get_irn_in(n) + 1); /* Copy the attributes. These might point to additional data. If this was allocated on the old obstack the pointers now are dangling. This frees e.g. the memory of the graph_arr allocated in new_immBlock. */ copy_node_attr(n, nn); new_backedge_info(nn); - set_new_node(n, nn); #if DEBUG_libfirm if (copy_node_nr) { @@ -257,15 +269,14 @@ void copy_node (ir_node *n, void *env) { } #endif - /* printf("\n old node: "); DDMSG2(n); - printf(" new node: "); DDMSG2(nn); */ + set_new_node(n, nn); } /** * Copies new predecessors of old node to new node remembered in link. * Spare the Bad predecessors of Phi and Block nodes. */ -static void +void copy_preds (ir_node *n, void *env) { ir_node *nn, *block; int i, j, irn_arity; @@ -276,12 +287,12 @@ copy_preds (ir_node *n, void *env) { printf(" new node: "); DDMSG2(nn); printf(" arities: old: %d, new: %d\n", get_irn_arity(n), get_irn_arity(nn)); */ - if (get_irn_opcode(n) == iro_Block) { + if (is_Block(n)) { /* Don't copy Bad nodes. */ j = 0; irn_arity = get_irn_arity(n); for (i = 0; i < irn_arity; i++) - if (get_irn_opcode(get_irn_n(n, i)) != iro_Bad) { + if (! is_Bad(get_irn_n(n, i))) { set_irn_n (nn, j, get_new_node(get_irn_n(n, i))); /*if (is_backedge(n, i)) set_backedge(nn, j);*/ j++; @@ -306,7 +317,7 @@ copy_preds (ir_node *n, void *env) { exchange(nn, old); } } - } else if (get_irn_opcode(n) == iro_Phi) { + } else if (get_irn_op(n) == op_Phi) { /* Don't copy node if corresponding predecessor in block is Bad. The Block itself should not be Bad. */ block = get_nodes_block(n); @@ -314,7 +325,7 @@ copy_preds (ir_node *n, void *env) { j = 0; irn_arity = get_irn_arity(n); for (i = 0; i < irn_arity; i++) - if (get_irn_opcode(get_irn_n(block, i)) != iro_Bad) { + if (! is_Bad(get_irn_n(block, i))) { set_irn_n (nn, j, get_new_node(get_irn_n(n, i))); /*if (is_backedge(n, i)) set_backedge(nn, j);*/ j++; @@ -324,16 +335,16 @@ copy_preds (ir_node *n, void *env) { set_Block_block_visited(get_nodes_block(n), 0); /* Compacting the Phi's ins might generate Phis with only one predecessor. */ - if (get_irn_arity(n) == 1) - exchange(n, get_irn_n(n, 0)); + if (get_irn_arity(nn) == 1) + exchange(nn, get_irn_n(nn, 0)); } else { irn_arity = get_irn_arity(n); for (i = -1; i < irn_arity; i++) set_irn_n (nn, i, get_new_node(get_irn_n(n, i))); } - /* Now the new node is complete. We can add it to the hash table for cse. + /* Now the new node is complete. We can add it to the hash table for CSE. @@@ inlinening aborts if we identify End. Why? */ - if(get_irn_op(nn) != op_End) + if (get_irn_op(nn) != op_End) add_identities (current_ir_graph->value_table, nn); } @@ -384,7 +395,7 @@ copy_graph (int copy_node_nr) { set_new_node(om, nm); /* copy the live nodes */ - irg_walk(get_nodes_block(oe), copy_node, copy_preds, (void *)copy_node_nr); + irg_walk(get_nodes_block(oe), copy_node, copy_preds, INT_TO_PTR(copy_node_nr)); /* copy_preds for the end node ... */ set_nodes_block(ne, get_new_node(get_nodes_block(oe))); @@ -398,7 +409,7 @@ copy_graph (int copy_node_nr) { (get_irn_visited(ka) < get_irg_visited(current_ir_graph))) { /* We must keep the block alive and copy everything reachable */ set_irg_visited(current_ir_graph, get_irg_visited(current_ir_graph)-1); - irg_walk(ka, copy_node, copy_preds, (void *)copy_node_nr); + irg_walk(ka, copy_node, copy_preds, INT_TO_PTR(copy_node_nr)); add_End_keepalive(ne, get_new_node(ka)); } } @@ -411,7 +422,7 @@ copy_graph (int copy_node_nr) { if (get_irn_visited(ka) < get_irg_visited(current_ir_graph)) { /* We didn't copy the Phi yet. */ set_irg_visited(current_ir_graph, get_irg_visited(current_ir_graph)-1); - irg_walk(ka, copy_node, copy_preds, (void *)copy_node_nr); + irg_walk(ka, copy_node, copy_preds, INT_TO_PTR(copy_node_nr)); } add_End_keepalive(ne, get_new_node(ka)); } @@ -456,19 +467,19 @@ copy_graph_env (int copy_node_nr) { free_End(old_end); set_irg_end_block (current_ir_graph, get_new_node(get_irg_end_block(current_ir_graph))); if (get_irn_link(get_irg_frame(current_ir_graph)) == NULL) { - copy_node (get_irg_frame(current_ir_graph), (void *)copy_node_nr); + copy_node (get_irg_frame(current_ir_graph), INT_TO_PTR(copy_node_nr)); copy_preds(get_irg_frame(current_ir_graph), NULL); } if (get_irn_link(get_irg_globals(current_ir_graph)) == NULL) { - copy_node (get_irg_globals(current_ir_graph), (void *)copy_node_nr); + copy_node (get_irg_globals(current_ir_graph), INT_TO_PTR(copy_node_nr)); copy_preds(get_irg_globals(current_ir_graph), NULL); } if (get_irn_link(get_irg_initial_mem(current_ir_graph)) == NULL) { - copy_node (get_irg_initial_mem(current_ir_graph), (void *)copy_node_nr); + copy_node (get_irg_initial_mem(current_ir_graph), INT_TO_PTR(copy_node_nr)); copy_preds(get_irg_initial_mem(current_ir_graph), NULL); } if (get_irn_link(get_irg_args(current_ir_graph)) == NULL) { - copy_node (get_irg_args(current_ir_graph), (void *)copy_node_nr); + copy_node (get_irg_args(current_ir_graph), INT_TO_PTR(copy_node_nr)); copy_preds(get_irg_args(current_ir_graph), NULL); } set_irg_start (current_ir_graph, get_new_node(get_irg_start(current_ir_graph))); @@ -481,13 +492,13 @@ copy_graph_env (int copy_node_nr) { set_irg_args (current_ir_graph, get_new_node(get_irg_args(current_ir_graph))); if (get_irn_link(get_irg_bad(current_ir_graph)) == NULL) { - copy_node(get_irg_bad(current_ir_graph), (void *)copy_node_nr); + copy_node(get_irg_bad(current_ir_graph), INT_TO_PTR(copy_node_nr)); copy_preds(get_irg_bad(current_ir_graph), NULL); } set_irg_bad(current_ir_graph, get_new_node(get_irg_bad(current_ir_graph))); if (get_irn_link(get_irg_no_mem(current_ir_graph)) == NULL) { - copy_node(get_irg_no_mem(current_ir_graph), (void *)copy_node_nr); + copy_node(get_irg_no_mem(current_ir_graph), INT_TO_PTR(copy_node_nr)); copy_preds(get_irg_no_mem(current_ir_graph), NULL); } set_irg_no_mem(current_ir_graph, get_new_node(get_irg_no_mem(current_ir_graph))); @@ -498,8 +509,8 @@ copy_graph_env (int copy_node_nr) { * from block nodes and the corresponding inputs from Phi nodes. * Merges single exit blocks with single entry blocks and removes * 1-input Phis. - * Adds all new nodes to a new hash table for cse. Does not - * perform cse, so the hash table might contain common subexpressions. + * Adds all new nodes to a new hash table for CSE. Does not + * perform CSE, so the hash table might contain common subexpressions. */ void dead_node_elimination(ir_graph *irg) { @@ -508,6 +519,8 @@ dead_node_elimination(ir_graph *irg) { struct obstack *graveyard_obst = NULL; struct obstack *rebirth_obst = NULL; + edges_init_graph(irg); + /* inform statistics that we started a dead-node elimination run */ hook_dead_node_elim_start(irg); @@ -519,7 +532,8 @@ dead_node_elimination(ir_graph *irg) { /* Handle graph state */ assert(get_irg_phase_state(current_ir_graph) != phase_building); free_callee_info(current_ir_graph); - free_outs(current_ir_graph); + free_irg_outs(current_ir_graph); + free_trouts(); /* @@@ so far we loose loops when copying */ free_loop_information(current_ir_graph); @@ -554,7 +568,7 @@ dead_node_elimination(ir_graph *irg) { } /** - * Relink bad predeseccors of a block and store the old in array to the + * Relink bad predecessors of a block and store the old in array to the * link field. This function is called by relink_bad_predecessors(). * The array of link field starts with the block operand at position 0. * If block has bad predecessors, create a new in array without bad preds. @@ -565,12 +579,12 @@ static void relink_bad_block_predecessors(ir_node *n, void *env) { int i, new_irn_n, old_irn_arity, new_irn_arity = 0; /* if link field of block is NULL, look for bad predecessors otherwise - this is allready done */ + this is already done */ if (get_irn_op(n) == op_Block && get_irn_link(n) == NULL) { /* save old predecessors in link field (position 0 is the block operand)*/ - set_irn_link(n, (void *)get_irn_in(n)); + set_irn_link(n, get_irn_in(n)); /* count predecessors without bad nodes */ old_irn_arity = get_irn_arity(n); @@ -580,19 +594,19 @@ static void relink_bad_block_predecessors(ir_node *n, void *env) { /* arity changing: set new predecessors without bad nodes */ if (new_irn_arity < old_irn_arity) { /* Get new predecessor array. We do not resize the array, as we must - keep the old one to update Phis. */ + keep the old one to update Phis. */ new_in = NEW_ARR_D (ir_node *, current_ir_graph->obst, (new_irn_arity+1)); - /* set new predeseccors in array */ + /* set new predecessors in array */ new_in[0] = NULL; new_irn_n = 1; for (i = 0; i < old_irn_arity; i++) { - irn = get_irn_n(n, i); - if (!is_Bad(irn)) { - new_in[new_irn_n] = irn; - is_backedge(n, i) ? set_backedge(n, new_irn_n-1) : set_not_backedge(n, new_irn_n-1); - new_irn_n++; - } + irn = get_irn_n(n, i); + if (!is_Bad(irn)) { + new_in[new_irn_n] = irn; + is_backedge(n, i) ? set_backedge(n, new_irn_n-1) : set_not_backedge(n, new_irn_n-1); + new_irn_n++; + } } //ARR_SETLEN(int, n->attr.block.backedge, new_irn_arity); ARR_SHRINKLEN(n->attr.block.backedge, new_irn_arity); @@ -603,25 +617,25 @@ static void relink_bad_block_predecessors(ir_node *n, void *env) { } /* Block is not relinked */ } -/* - * Relinks Bad predecesors from Bocks and Phis called by walker +/** + * Relinks Bad predecessors from Blocks and Phis called by walker * remove_bad_predecesors(). If n is a Block, call - * relink_bad_block_redecessors(). If n is a Phinode, call also the relinking + * relink_bad_block_redecessors(). If n is a Phi-node, call also the relinking * function of Phi's Block. If this block has bad predecessors, relink preds - * of the Phinode. + * of the Phi-node. */ static void relink_bad_predecessors(ir_node *n, void *env) { ir_node *block, **old_in; int i, old_irn_arity, new_irn_arity; - /* relink bad predeseccors of a block */ + /* relink bad predecessors of a block */ if (get_irn_op(n) == op_Block) relink_bad_block_predecessors(n, env); /* If Phi node relink its block and its predecessors */ if (get_irn_op(n) == op_Phi) { - /* Relink predeseccors of phi's block */ + /* Relink predecessors of phi's block */ block = get_nodes_block(n); if (get_irn_link(block) == NULL) relink_bad_block_predecessors(block, env); @@ -629,17 +643,17 @@ static void relink_bad_predecessors(ir_node *n, void *env) { old_in = (ir_node **)get_irn_link(block); /* Of Phi's Block */ old_irn_arity = ARR_LEN(old_in); - /* Relink Phi predeseccors if count of predeseccors changed */ + /* Relink Phi predecessors if count of predecessors changed */ if (old_irn_arity != ARR_LEN(get_irn_in(block))) { - /* set new predeseccors in array - n->in[0] remains the same block */ + /* set new predecessors in array + n->in[0] remains the same block */ new_irn_arity = 1; for(i = 1; i < old_irn_arity; i++) - if (!is_Bad((ir_node *)old_in[i])) { - n->in[new_irn_arity] = n->in[i]; - is_backedge(n, i) ? set_backedge(n, new_irn_arity) : set_not_backedge(n, new_irn_arity); - new_irn_arity++; - } + if (!is_Bad((ir_node *)old_in[i])) { + n->in[new_irn_arity] = n->in[i]; + is_backedge(n, i) ? set_backedge(n, new_irn_arity) : set_not_backedge(n, new_irn_arity); + new_irn_arity++; + } ARR_SETLEN(ir_node *, n->in, new_irn_arity); ARR_SETLEN(int, n->attr.phi_backedge, new_irn_arity); @@ -649,7 +663,7 @@ static void relink_bad_predecessors(ir_node *n, void *env) { } /* - * Removes Bad Bad predecesors from Blocks and the corresponding + * Removes Bad Bad predecessors from Blocks and the corresponding * inputs to Phi nodes as in dead_node_elimination but without * copying the graph. * On walking up set the link field to NULL, on walking down call @@ -663,14 +677,14 @@ void remove_bad_predecessors(ir_graph *irg) { /*--------------------------------------------------------------------*/ -/* Funcionality for inlining */ +/* Functionality for inlining */ /*--------------------------------------------------------------------*/ /** * Copy node for inlineing. Updates attributes that change when * inlineing but not for dead node elimination. * - * Copies the node by calling copy_node and then updates the entity if + * Copies the node by calling copy_node() and then updates the entity if * it's a local one. env must be a pointer of the frame type of the * inlined procedure. The new entities must be in the link field of * the entities. @@ -919,7 +933,7 @@ int inline_method(ir_node *call, ir_graph *called_graph) { add_End_keepalive(get_irg_end(current_ir_graph), get_irn_n(end, i)); /* The new end node will die. We need not free as the in array is on the obstack: - copy_node only generated 'D' arrays. */ + copy_node() only generated 'D' arrays. */ /* -- Replace Return nodes by Jump nodes. -- */ n_ret = 0; @@ -1099,7 +1113,7 @@ int inline_method(ir_node *call, ir_graph *called_graph) { } #endif - /* -- Turn cse back on. -- */ + /* -- Turn CSE back on. -- */ set_optimize(rem_opt); return 1; @@ -1215,29 +1229,39 @@ typedef struct { int n_callers_orig; /**< for statistics */ } inline_irg_env; +/** + * Allocate a new environment for inlining. + */ static inline_irg_env *new_inline_irg_env(void) { - inline_irg_env *env = xmalloc(sizeof(*env)); - env->n_nodes = -2; /* uncount Start, End */ - env->n_nodes_orig = -2; /* uncount Start, End */ - env->call_nodes = eset_create(); - env->n_call_nodes = 0; + inline_irg_env *env = xmalloc(sizeof(*env)); + env->n_nodes = -2; /* do not count count Start, End */ + env->n_nodes_orig = -2; /* do not count Start, End */ + env->call_nodes = eset_create(); + env->n_call_nodes = 0; env->n_call_nodes_orig = 0; - env->n_callers = 0; - env->n_callers_orig = 0; + env->n_callers = 0; + env->n_callers_orig = 0; return env; } +/** + * destroy an environment for inlining. + */ static void free_inline_irg_env(inline_irg_env *env) { eset_destroy(env->call_nodes); free(env); } +/** + * post-walker: collect all calls in the inline-environment + * of a graph and sum some statistics. + */ static void collect_calls2(ir_node *call, void *env) { inline_irg_env *x = (inline_irg_env *)env; ir_op *op = get_irn_op(call); ir_graph *callee; - /* count nodes in irg */ + /* count meaningful nodes in irg */ if (op != op_Proj && op != op_Tuple && op != op_Sync) { x->n_nodes++; x->n_nodes_orig++; @@ -1246,22 +1270,30 @@ static void collect_calls2(ir_node *call, void *env) { if (op != op_Call) return; /* collect all call nodes */ - eset_insert(x->call_nodes, (void *)call); + eset_insert(x->call_nodes, call); x->n_call_nodes++; x->n_call_nodes_orig++; /* count all static callers */ callee = get_call_called_irg(call); if (callee) { - ((inline_irg_env *)get_irg_link(callee))->n_callers++; - ((inline_irg_env *)get_irg_link(callee))->n_callers_orig++; + inline_irg_env *callee_env = get_irg_link(callee); + callee_env->n_callers++; + callee_env->n_callers_orig++; } } +/** + * Returns TRUE if the number of callers in 0 in the irg's environment, + * hence this irg is a leave. + */ INLINE static int is_leave(ir_graph *irg) { return (((inline_irg_env *)get_irg_link(irg))->n_call_nodes == 0); } +/** + * Returns TRUE if the number of callers is smaller size in the irg's environment. + */ INLINE static int is_smaller(ir_graph *callee, int size) { return (((inline_irg_env *)get_irg_link(callee))->n_nodes < size); } @@ -1313,7 +1345,7 @@ void inline_leave_functions(int maxsize, int leavesize, int size) { for (call = eset_first(env->call_nodes); call; call = eset_next(env->call_nodes)) { ir_graph *callee; - if (get_irn_op(call) == op_Tuple) continue; /* We already inlined. */ + if (get_irn_op(call) == op_Tuple) continue; /* We already have inlined this call. */ callee = get_call_called_irg(call); if (env->n_nodes > maxsize) continue; // break; @@ -1400,24 +1432,47 @@ void inline_leave_functions(int maxsize, int leavesize, int size) { /* will be executed only if needed. */ /*******************************************************************/ +/** + * Returns non-zero, is a block is not reachable from Start. + * + * @param block the block to test + */ +static int +is_Block_unreachable(ir_node *block) { + return is_Block_dead(block) || get_Block_dom_depth(block) < 0; +} + /** * Find the earliest correct block for N. --- Place N into the * same Block as its dominance-deepest Input. + * + * We have to avoid calls to get_nodes_block() here + * because the graph is floating. + * + * move_out_of_loops() expects that place_floats_early() have placed + * all "living" nodes into a living block. That's why we must + * move nodes in dead block with "live" successors into a valid + * block. + * We move them just into the same block as it's successor (or + * in case of a Phi into the effective use block). For Phi successors, + * this may still be a dead block, but then there is no real use, as + * the control flow will be dead later. */ static void place_floats_early(ir_node *n, pdeq *worklist) { - int i, start, irn_arity; + int i, irn_arity; /* we must not run into an infinite loop */ - assert (irn_not_visited(n)); + assert(irn_not_visited(n)); mark_irn_visited(n); /* Place floating nodes. */ if (get_irn_pinned(n) == op_pin_state_floats) { - int depth = 0; - ir_node *b = new_Bad(); /* The block to place this node in */ - int bad_recursion = is_Bad(get_nodes_block(n)); + ir_node *curr_block = get_irn_n(n, -1); + int in_dead_block = is_Block_unreachable(curr_block); + int depth = 0; + ir_node *b = NULL; /* The block to place this node in */ assert(get_irn_op(n) != op_Block); @@ -1433,47 +1488,132 @@ place_floats_early(ir_node *n, pdeq *worklist) /* find the block for this node. */ irn_arity = get_irn_arity(n); for (i = 0; i < irn_arity; i++) { - ir_node *dep = get_irn_n(n, i); - ir_node *dep_block; - - if ((irn_not_visited(dep)) - && (get_irn_pinned(dep) == op_pin_state_floats)) { - place_floats_early(dep, worklist); + ir_node *pred = get_irn_n(n, i); + ir_node *pred_block; + + if ((irn_not_visited(pred)) + && (get_irn_pinned(pred) == op_pin_state_floats)) { + + /* + * If the current node is NOT in a dead block, but one of its + * predecessors is, we must move the predecessor to a live block. + * Such thing can happen, if global CSE chose a node from a dead block. + * We move it simple to our block. + * Note that neither Phi nor End nodes are floating, so we don't + * need to handle them here. + */ + if (! in_dead_block) { + if (get_irn_pinned(pred) == op_pin_state_floats && + is_Block_unreachable(get_irn_n(pred, -1))) + set_nodes_block(pred, curr_block); + } + place_floats_early(pred, worklist); } /* * A node in the Bad block must stay in the bad block, * so don't compute a new block for it. */ - if (bad_recursion) + if (in_dead_block) continue; /* Because all loops contain at least one op_pin_state_pinned node, now all - our inputs are either op_pin_state_pinned or place_early has already + our inputs are either op_pin_state_pinned or place_early() has already been finished on them. We do not have any unfinished inputs! */ - dep_block = get_nodes_block(dep); - if ((!is_Bad(dep_block)) && - (get_Block_dom_depth(dep_block) > depth)) { - b = dep_block; - depth = get_Block_dom_depth(dep_block); + pred_block = get_irn_n(pred, -1); + if ((!is_Block_dead(pred_block)) && + (get_Block_dom_depth(pred_block) > depth)) { + b = pred_block; + depth = get_Block_dom_depth(pred_block); } /* Avoid that the node is placed in the Start block */ - if ((depth == 1) && (get_Block_dom_depth(get_nodes_block(n)) > 1)) { + if ((depth == 1) && (get_Block_dom_depth(get_irn_n(n, -1)) > 1)) { b = get_Block_cfg_out(get_irg_start_block(current_ir_graph), 0); assert(b != get_irg_start_block(current_ir_graph)); depth = 2; } } - set_nodes_block(n, b); + if (b) + set_nodes_block(n, b); } - /* Add predecessors of non floating nodes on worklist. */ - start = (get_irn_op(n) == op_Block) ? 0 : -1; + /* + * Add predecessors of non floating nodes and non-floating predecessors + * of floating nodes to worklist and fix their blocks if the are in dead block. + */ irn_arity = get_irn_arity(n); - for (i = start; i < irn_arity; i++) { - ir_node *pred = get_irn_n(n, i); - if (irn_not_visited(pred)) { - pdeq_putr (worklist, pred); + + if (get_irn_op(n) == op_End) { + /* + * Simplest case: End node. Predecessors are keep-alives, + * no need to move out of dead block. + */ + for (i = -1; i < irn_arity; ++i) { + ir_node *pred = get_irn_n(n, i); + if (irn_not_visited(pred)) + pdeq_putr(worklist, pred); + } + } + else if (is_Block(n)) { + /* + * Blocks: Predecessors are control flow, no need to move + * them out of dead block. + */ + for (i = irn_arity - 1; i >= 0; --i) { + ir_node *pred = get_irn_n(n, i); + if (irn_not_visited(pred)) + pdeq_putr(worklist, pred); + } + } + else if (is_Phi(n)) { + ir_node *pred; + ir_node *curr_block = get_irn_n(n, -1); + int in_dead_block = is_Block_unreachable(curr_block); + + /* + * Phi nodes: move nodes from dead blocks into the effective use + * of the Phi-input if the Phi is not in a bad block. + */ + pred = get_irn_n(n, -1); + if (irn_not_visited(pred)) + pdeq_putr(worklist, pred); + + for (i = irn_arity - 1; i >= 0; --i) { + ir_node *pred = get_irn_n(n, i); + + if (irn_not_visited(pred)) { + if (! in_dead_block && + get_irn_pinned(pred) == op_pin_state_floats && + is_Block_unreachable(get_irn_n(pred, -1))) { + set_nodes_block(pred, get_Block_cfgpred_block(curr_block, i)); + } + pdeq_putr(worklist, pred); + } + } + } + else { + ir_node *pred; + ir_node *curr_block = get_irn_n(n, -1); + int in_dead_block = is_Block_unreachable(curr_block); + + /* + * All other nodes: move nodes from dead blocks into the same block. + */ + pred = get_irn_n(n, -1); + if (irn_not_visited(pred)) + pdeq_putr(worklist, pred); + + for (i = irn_arity - 1; i >= 0; --i) { + ir_node *pred = get_irn_n(n, i); + + if (irn_not_visited(pred)) { + if (! in_dead_block && + get_irn_pinned(pred) == op_pin_state_floats && + is_Block_unreachable(get_irn_n(pred, -1))) { + set_nodes_block(pred, curr_block); + } + pdeq_putr(worklist, pred); + } } } } @@ -1492,25 +1632,38 @@ static INLINE void place_early(pdeq *worklist) { place_floats_early(get_irg_end(current_ir_graph), worklist); /* Work the content of the worklist. */ - while (!pdeq_empty (worklist)) { - ir_node *n = pdeq_getl (worklist); - if (irn_not_visited(n)) place_floats_early(n, worklist); + while (!pdeq_empty(worklist)) { + ir_node *n = pdeq_getl(worklist); + if (irn_not_visited(n)) + place_floats_early(n, worklist); } set_irg_outs_inconsistent(current_ir_graph); current_ir_graph->op_pin_state_pinned = op_pin_state_pinned; } -/** Compute the deepest common ancestor of block and dca. */ +/** + * Compute the deepest common ancestor of block and dca. + */ static ir_node *calc_dca(ir_node *dca, ir_node *block) { assert(block); + + /* we do not want to place nodes in dead blocks */ + if (is_Block_dead(block)) + return dca; + + /* We found a first legal placement. */ if (!dca) return block; + + /* Find a placement that is dominates both, dca and block. */ while (get_Block_dom_depth(block) > get_Block_dom_depth(dca)) block = get_Block_idom(block); + while (get_Block_dom_depth(dca) > get_Block_dom_depth(block)) { dca = get_Block_idom(dca); } + while (block != dca) { block = get_Block_idom(block); dca = get_Block_idom(dca); } @@ -1522,7 +1675,7 @@ static ir_node *calc_dca(ir_node *dca, ir_node *block) * A data flow edge points from producer to consumer. */ static ir_node * -consumer_dom_dca (ir_node *dca, ir_node *consumer, ir_node *producer) +consumer_dom_dca(ir_node *dca, ir_node *consumer, ir_node *producer) { ir_node *block = NULL; @@ -1539,10 +1692,15 @@ consumer_dom_dca (ir_node *dca, ir_node *consumer, ir_node *producer) if (get_irn_n(consumer, i) == producer) { ir_node *new_block = get_nodes_block(get_Block_cfgpred(phi_block, i)); - block = calc_dca(block, new_block); + if (! is_Block_unreachable(new_block)) + block = calc_dca(block, new_block); } } - } else { + + if (! block) + block = get_irn_n(producer, -1); + } + else { assert(is_no_Block(consumer)); block = get_nodes_block(consumer); } @@ -1551,6 +1709,8 @@ consumer_dom_dca (ir_node *dca, ir_node *consumer, ir_node *producer) return calc_dca(dca, block); } +/* FIXME: the name clashes here with the function from ana/field_temperature.c + * please rename. */ static INLINE int get_irn_loop_depth(ir_node *n) { return get_loop_depth(get_irn_loop(n)); } @@ -1558,6 +1718,9 @@ static INLINE int get_irn_loop_depth(ir_node *n) { /** * Move n to a block with less loop depth than it's current block. The * new block must be dominated by early. + * + * @param n the node that should be moved + * @param early the earliest block we can n move to */ static void move_out_of_loops (ir_node *n, ir_node *early) @@ -1570,6 +1733,7 @@ move_out_of_loops (ir_node *n, ir_node *early) dca with the least loop nesting depth, but still dominated by our early placement. */ dca = get_nodes_block(n); + best = dca; while (dca != early) { dca = get_Block_idom(dca); @@ -1594,16 +1758,16 @@ move_out_of_loops (ir_node *n, ir_node *early) * `optimal' Block between the latest and earliest legal block. * The `optimal' block is the dominance-deepest block of those * with the least loop-nesting-depth. This places N out of as many - * loops as possible and then makes it as control dependant as + * loops as possible and then makes it as control dependent as * possible. */ static void place_floats_late(ir_node *n, pdeq *worklist) { int i; - ir_node *early; + ir_node *early_blk; - assert (irn_not_visited(n)); /* no multiple placement */ + assert(irn_not_visited(n)); /* no multiple placement */ mark_irn_visited(n); @@ -1611,14 +1775,15 @@ place_floats_late(ir_node *n, pdeq *worklist) if ((get_irn_op(n) != op_Block) && (!is_cfop(n)) && (get_irn_mode(n) != mode_X)) { - /* Remember the early placement of this block to move it - out of loop no further than the early placement. */ - early = get_nodes_block(n); + /* Remember the early_blk placement of this block to move it + out of loop no further than the early_blk placement. */ + early_blk = get_irn_n(n, -1); - /* Do not move code not reachable from Start. For - * these we could not compute dominator information. */ - if (is_Bad(early) || get_Block_dom_depth(early) == -1) - return; + /* + * BEWARE: Here we also get code, that is live, but + * was in a dead block. If the node is life, but because + * of CSE in a dead block, we still might need it. + */ /* Assure that our users are all placed, except the Phi-nodes. --- Each data flow cycle contains at least one Phi-node. We @@ -1628,44 +1793,56 @@ place_floats_late(ir_node *n, pdeq *worklist) final region of our users, which is OK with Phi-nodes, as they are op_pin_state_pinned, and they never have to be placed after a producer of one of their inputs in the same block anyway. */ - for (i = 0; i < get_irn_n_outs(n); i++) { + for (i = get_irn_n_outs(n) - 1; i >= 0; --i) { ir_node *succ = get_irn_out(n, i); if (irn_not_visited(succ) && (get_irn_op(succ) != op_Phi)) place_floats_late(succ, worklist); } - /* We have to determine the final block of this node... except for - constants. */ - if ((get_irn_pinned(n) == op_pin_state_floats) && - (get_irn_op(n) != op_Const) && - (get_irn_op(n) != op_SymConst)) { - ir_node *dca = NULL; /* deepest common ancestor in the - dominator tree of all nodes' - blocks depending on us; our final - placement has to dominate DCA. */ - for (i = 0; i < get_irn_n_outs(n); i++) { - ir_node *out = get_irn_out(n, i); - /* ignore if out is in dead code */ - ir_node *outbl = get_nodes_block(out); - if (is_Bad(outbl) || get_Block_dom_depth(outbl) == -1) - continue; - dca = consumer_dom_dca (dca, out, n); - } - if (dca) { - set_nodes_block(n, dca); + if (! is_Block_dead(early_blk)) { + /* do only move things that where not dead */ + + /* We have to determine the final block of this node... except for + constants. */ + if ((get_irn_pinned(n) == op_pin_state_floats) && + (get_irn_op(n) != op_Const) && + (get_irn_op(n) != op_SymConst)) { + ir_node *dca = NULL; /* deepest common ancestor in the + dominator tree of all nodes' + blocks depending on us; our final + placement has to dominate DCA. */ + for (i = get_irn_n_outs(n) - 1; i >= 0; --i) { + ir_node *succ = get_irn_out(n, i); + ir_node *succ_blk; + + if (get_irn_op(succ) == op_End) { + /* + * This consumer is the End node, a keep alive edge. + * This is not a real consumer, so we ignore it + */ + continue; + } - move_out_of_loops (n, early); + /* ignore if succ is in dead code */ + succ_blk = get_irn_n(succ, -1); + if (is_Block_unreachable(succ_blk)) + continue; + dca = consumer_dom_dca(dca, succ, n); + } + if (dca) { + set_nodes_block(n, dca); + move_out_of_loops(n, early_blk); + } } - /* else all outs are in dead code */ } } /* Add predecessors of all non-floating nodes on list. (Those of floating - nodes are placeded already and therefore are marked.) */ + nodes are placed already and therefore are marked.) */ for (i = 0; i < get_irn_n_outs(n); i++) { ir_node *succ = get_irn_out(n, i); if (irn_not_visited(get_irn_out(n, i))) { - pdeq_putr (worklist, succ); + pdeq_putr(worklist, succ); } } } @@ -1678,9 +1855,10 @@ static INLINE void place_late(pdeq *worklist) { place_floats_late(get_irg_start_block(current_ir_graph), worklist); /* And now empty the worklist again... */ - while (!pdeq_empty (worklist)) { - ir_node *n = pdeq_getl (worklist); - if (irn_not_visited(n)) place_floats_late(n, worklist); + while (!pdeq_empty(worklist)) { + ir_node *n = pdeq_getl(worklist); + if (irn_not_visited(n)) + place_floats_late(n, worklist); } } @@ -1707,8 +1885,9 @@ void place_code(ir_graph *irg) { worklist = new_pdeq(); place_early(worklist); - /* place_early invalidates the outs, place_late needs them. */ - compute_outs(irg); + /* place_early() invalidates the outs, place_late needs them. */ + compute_irg_outs(irg); + /* Now move the nodes down in the dominator tree. This reduces the unnecessary executions of the node. */ place_late(worklist);