X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fir%2Firgopt.c;h=3e947631cbc6781551af5cd0a1ec3a72286e413a;hb=6537e66d6b82a7f18f69a8cb81d0180e824fb017;hp=af55fb5ce1662a209e25a8d3fae878cdaa2a72f4;hpb=403d2f3c8f31e33df53721857c2e453363edfc41;p=libfirm diff --git a/ir/ir/irgopt.c b/ir/ir/irgopt.c index af55fb5ce..3e947631c 100644 --- a/ir/ir/irgopt.c +++ b/ir/ir/irgopt.c @@ -12,11 +12,10 @@ #ifdef HAVE_CONFIG_H -# include +# include "config.h" #endif #include -#include #include "irnode_t.h" #include "irgraph_t.h" @@ -32,14 +31,17 @@ #include "pset.h" #include "eset.h" #include "pdeq.h" /* Fuer code placement */ +#include "xmalloc.h" #include "irouts.h" #include "irloop_t.h" #include "irbackedge_t.h" #include "cgana.h" +#include "trouts.h" #include "irflag_t.h" -#include "firmstat.h" +#include "irhooks.h" +#include "iredges_t.h" /* Defined in iropt.c */ pset *new_identities (void); @@ -55,9 +57,9 @@ static void init_link (ir_node *n, void *env) { } #if 0 /* Old version. Avoids Ids. - This is not necessary: we do a postwalk, and get_irn_n - removes ids anyways. So it's much cheaper to call the - optimization less often and use the exchange() algorithm. */ + This is not necessary: we do a post walk, and get_irn_n + removes ids anyways. So it's much cheaper to call the + optimization less often and use the exchange() algorithm. */ static void optimize_in_place_wrapper (ir_node *n, void *env) { int i, irn_arity; @@ -98,7 +100,7 @@ static INLINE void do_local_optimize(ir_node *n) { set_irg_loopinfo_inconsistent(current_ir_graph); - /* Clean the value_table in irg for the cse. */ + /* Clean the value_table in irg for the CSE. */ del_identities(current_ir_graph->value_table); current_ir_graph->value_table = new_identities(); @@ -113,7 +115,6 @@ void local_optimize_node(ir_node *n) { do_local_optimize(n); current_ir_graph = rem; - } void @@ -197,34 +198,34 @@ static INLINE void new_backedge_info(ir_node *n) { } } -/** - * Copies the node to the new obstack. The Ins of the new node point to - * the predecessors on the old obstack. For block/phi nodes not all - * predecessors might be copied. n->link points to the new node. +/* + * Copies a node to the current_ir_graph. The Ins of the new node point to + * the predecessors on the graph of the old node. For block/phi nodes not all + * predecessors might be copied. * For Phi and Block nodes the function allocates in-arrays with an arity * only for useful predecessors. The arity is determined by counting * the non-bad predecessors of the block. */ -static void -copy_node (ir_node *n, void *env) { +ir_node *copy_irn(ir_node *n, int copy_node_nr) { ir_node *nn, *block; int new_arity; - opcode op = get_irn_opcode(n); + ir_op *op = get_irn_op(n); + /* The end node looses it's flexible in array. This doesn't matter, as dead node elimination builds End by hand, inlineing doesn't use the End node. */ - /* assert(n->op == op_End || ((_ARR_DESCR(n->in))->cookie != ARR_F_MAGIC)); */ + /* assert(op == op_End || ((_ARR_DESCR(n->in))->cookie != ARR_F_MAGIC)); */ - if (op == iro_Bad) { + if (op == op_Bad) { /* node copied already */ - return; - } else if (op == iro_Block) { + return NULL; + } else if (op == op_Block) { block = NULL; new_arity = compute_new_arity(n); n->attr.block.graph_arr = NULL; } else { block = get_nodes_block(n); - if (get_irn_opcode(n) == iro_Phi) { + if (op == op_Phi) { new_arity = compute_new_arity(block); } else { new_arity = get_irn_arity(n); @@ -233,27 +234,52 @@ copy_node (ir_node *n, void *env) { nn = new_ir_node(get_irn_dbg_info(n), current_ir_graph, block, - get_irn_op(n), + op, get_irn_mode(n), new_arity, - get_irn_in(n)); + get_irn_in(n) + 1); /* Copy the attributes. These might point to additional data. If this was allocated on the old obstack the pointers now are dangling. This frees e.g. the memory of the graph_arr allocated in new_immBlock. */ - copy_attrs(n, nn); + copy_node_attr(n, nn); new_backedge_info(nn); - set_new_node(n, nn); - /* printf("\n old node: "); DDMSG2(n); - printf(" new node: "); DDMSG2(nn); */ +#if DEBUG_libfirm + if (copy_node_nr) { + /* for easier debugging, we want to copy the node numbers too */ + nn->node_nr = n->node_nr; + } +#endif + return nn; } +/** + * Copies the node to the new obstack. The Ins of the new node point to + * the predecessors on the old obstack. For block/phi nodes not all + * predecessors might be copied. n->link points to the new node. + * For Phi and Block nodes the function allocates in-arrays with an arity + * only for useful predecessors. The arity is determined by counting + * the non-bad predecessors of the block. + * + * @param n The node to be copied + * @param env if non-NULL, the node number attribute will be copied to the new node + * + * Note: Also used for loop unrolling. + */ +static void firm_copy_node (ir_node *n, void *env) { + ir_node *nn = copy_irn(n, env != NULL); + + if (nn) + set_new_node(n, nn); +} + + /** * Copies new predecessors of old node to new node remembered in link. * Spare the Bad predecessors of Phi and Block nodes. */ -static void +void copy_preds (ir_node *n, void *env) { ir_node *nn, *block; int i, j, irn_arity; @@ -264,15 +290,15 @@ copy_preds (ir_node *n, void *env) { printf(" new node: "); DDMSG2(nn); printf(" arities: old: %d, new: %d\n", get_irn_arity(n), get_irn_arity(nn)); */ - if (get_irn_opcode(n) == iro_Block) { + if (is_Block(n)) { /* Don't copy Bad nodes. */ j = 0; irn_arity = get_irn_arity(n); for (i = 0; i < irn_arity; i++) - if (get_irn_opcode(get_irn_n(n, i)) != iro_Bad) { - set_irn_n (nn, j, get_new_node(get_irn_n(n, i))); - /*if (is_backedge(n, i)) set_backedge(nn, j);*/ - j++; + if (! is_Bad(get_irn_n(n, i))) { + set_irn_n (nn, j, get_new_node(get_irn_n(n, i))); + /*if (is_backedge(n, i)) set_backedge(nn, j);*/ + j++; } /* repair the block visited flag from above misuse. Repair it in both graphs so that the old one can still be used. */ @@ -283,18 +309,18 @@ copy_preds (ir_node *n, void *env) { We don't call optimize_in_place as it requires that the fields in ir_graph are set properly. */ if ((get_opt_control_flow_straightening()) && - (get_Block_n_cfgpreds(nn) == 1) && - (get_irn_op(get_Block_cfgpred(nn, 0)) == op_Jmp)) { + (get_Block_n_cfgpreds(nn) == 1) && + (get_irn_op(get_Block_cfgpred(nn, 0)) == op_Jmp)) { ir_node *old = get_nodes_block(get_Block_cfgpred(nn, 0)); if (nn == old) { - /* Jmp jumps into the block it is in -- deal self cycle. */ - assert(is_Bad(get_new_node(get_irg_bad(current_ir_graph)))); - exchange(nn, get_new_node(get_irg_bad(current_ir_graph))); + /* Jmp jumps into the block it is in -- deal self cycle. */ + assert(is_Bad(get_new_node(get_irg_bad(current_ir_graph)))); + exchange(nn, get_new_node(get_irg_bad(current_ir_graph))); } else { - exchange(nn, old); + exchange(nn, old); } } - } else if (get_irn_opcode(n) == iro_Phi) { + } else if (get_irn_op(n) == op_Phi) { /* Don't copy node if corresponding predecessor in block is Bad. The Block itself should not be Bad. */ block = get_nodes_block(n); @@ -302,35 +328,37 @@ copy_preds (ir_node *n, void *env) { j = 0; irn_arity = get_irn_arity(n); for (i = 0; i < irn_arity; i++) - if (get_irn_opcode(get_irn_n(block, i)) != iro_Bad) { - set_irn_n (nn, j, get_new_node(get_irn_n(n, i))); - /*if (is_backedge(n, i)) set_backedge(nn, j);*/ - j++; + if (! is_Bad(get_irn_n(block, i))) { + set_irn_n (nn, j, get_new_node(get_irn_n(n, i))); + /*if (is_backedge(n, i)) set_backedge(nn, j);*/ + j++; } /* If the pre walker reached this Phi after the post walker visited the block block_visited is > 0. */ set_Block_block_visited(get_nodes_block(n), 0); /* Compacting the Phi's ins might generate Phis with only one predecessor. */ - if (get_irn_arity(n) == 1) - exchange(n, get_irn_n(n, 0)); + if (get_irn_arity(nn) == 1) + exchange(nn, get_irn_n(nn, 0)); } else { irn_arity = get_irn_arity(n); for (i = -1; i < irn_arity; i++) set_irn_n (nn, i, get_new_node(get_irn_n(n, i))); } - /* Now the new node is complete. We can add it to the hash table for cse. + /* Now the new node is complete. We can add it to the hash table for CSE. @@@ inlinening aborts if we identify End. Why? */ - if(get_irn_op(nn) != op_End) + if (get_irn_op(nn) != op_End) add_identities (current_ir_graph->value_table, nn); } /** * Copies the graph recursively, compacts the keepalive of the end node. + * + * @param copy_node_nr If non-zero, the node number will be copied */ static void -copy_graph (void) { - ir_node *oe, *ne, *ob, *nb; /* old end, new end, old bad, new bad */ +copy_graph (int copy_node_nr) { + ir_node *oe, *ne, *ob, *nb, *om, *nm; /* old end, new end, old bad, new bad, old NoMem, new NoMem */ ir_node *ka; /* keep alive */ int i, irn_arity; @@ -344,9 +372,10 @@ copy_graph (void) { -1, NULL); /* Copy the attributes. Well, there might be some in the future... */ - copy_attrs(oe, ne); + copy_node_attr(oe, ne); set_new_node(oe, ne); + /* copy the Bad node */ ob = get_irg_bad(current_ir_graph); nb = new_ir_node(get_irn_dbg_info(ob), current_ir_graph, @@ -357,11 +386,21 @@ copy_graph (void) { NULL); set_new_node(ob, nb); + /* copy the NoMem node */ + om = get_irg_no_mem(current_ir_graph); + nm = new_ir_node(get_irn_dbg_info(om), + current_ir_graph, + NULL, + op_NoMem, + mode_M, + 0, + NULL); + set_new_node(om, nm); + /* copy the live nodes */ - irg_walk(get_nodes_block(oe), copy_node, copy_preds, NULL); + irg_walk(get_nodes_block(oe), firm_copy_node, copy_preds, (void *)copy_node_nr); /* copy_preds for the end node ... */ set_nodes_block(ne, get_new_node(get_nodes_block(oe))); - set_nodes_block(nb, get_new_node(get_nodes_block(ob))); /*- ... and now the keep alives. -*/ /* First pick the not marked block nodes and walk them. We must pick these @@ -370,10 +409,10 @@ copy_graph (void) { for (i = 0; i < irn_arity; i++) { ka = get_irn_intra_n(oe, i); if ((get_irn_op(ka) == op_Block) && - (get_irn_visited(ka) < get_irg_visited(current_ir_graph))) { + (get_irn_visited(ka) < get_irg_visited(current_ir_graph))) { /* We must keep the block alive and copy everything reachable */ set_irg_visited(current_ir_graph, get_irg_visited(current_ir_graph)-1); - irg_walk(ka, copy_node, copy_preds, NULL); + irg_walk(ka, firm_copy_node, copy_preds, (void *)copy_node_nr); add_End_keepalive(ne, get_new_node(ka)); } } @@ -384,13 +423,17 @@ copy_graph (void) { ka = get_irn_intra_n(oe, i); if ((get_irn_op(ka) == op_Phi)) { if (get_irn_visited(ka) < get_irg_visited(current_ir_graph)) { - /* We didn't copy the Phi yet. */ - set_irg_visited(current_ir_graph, get_irg_visited(current_ir_graph)-1); - irg_walk(ka, copy_node, copy_preds, NULL); + /* We didn't copy the Phi yet. */ + set_irg_visited(current_ir_graph, get_irg_visited(current_ir_graph)-1); + irg_walk(ka, firm_copy_node, copy_preds, (void *)copy_node_nr); } add_End_keepalive(ne, get_new_node(ka)); } } + + /* start block sometimes only reached after keep alives */ + set_nodes_block(nb, get_new_node(get_nodes_block(ob))); + set_nodes_block(nm, get_new_node(get_nodes_block(om))); } /** @@ -398,9 +441,11 @@ copy_graph (void) { * in current_ir_graph and fixes the environment. * Then fixes the fields in current_ir_graph containing nodes of the * graph. + * + * @param copy_node_nr If non-zero, the node number will be copied */ static void -copy_graph_env (void) { +copy_graph_env (int copy_node_nr) { ir_node *old_end; /* Not all nodes remembered in current_ir_graph might be reachable from the end node. Assure their link is set to NULL, so that @@ -409,12 +454,13 @@ copy_graph_env (void) { set_irn_link(get_irg_globals (current_ir_graph), NULL); set_irn_link(get_irg_args (current_ir_graph), NULL); set_irn_link(get_irg_initial_mem(current_ir_graph), NULL); + set_irn_link(get_irg_no_mem (current_ir_graph), NULL); /* we use the block walk flag for removing Bads from Blocks ins. */ inc_irg_block_visited(current_ir_graph); /* copy the graph */ - copy_graph(); + copy_graph(copy_node_nr); /* fix the fields in current_ir_graph */ old_end = get_irg_end(current_ir_graph); @@ -424,19 +470,19 @@ copy_graph_env (void) { free_End(old_end); set_irg_end_block (current_ir_graph, get_new_node(get_irg_end_block(current_ir_graph))); if (get_irn_link(get_irg_frame(current_ir_graph)) == NULL) { - copy_node (get_irg_frame(current_ir_graph), NULL); + firm_copy_node (get_irg_frame(current_ir_graph), (void *)copy_node_nr); copy_preds(get_irg_frame(current_ir_graph), NULL); } if (get_irn_link(get_irg_globals(current_ir_graph)) == NULL) { - copy_node (get_irg_globals(current_ir_graph), NULL); + firm_copy_node (get_irg_globals(current_ir_graph), (void *)copy_node_nr); copy_preds(get_irg_globals(current_ir_graph), NULL); } if (get_irn_link(get_irg_initial_mem(current_ir_graph)) == NULL) { - copy_node (get_irg_initial_mem(current_ir_graph), NULL); + firm_copy_node (get_irg_initial_mem(current_ir_graph), (void *)copy_node_nr); copy_preds(get_irg_initial_mem(current_ir_graph), NULL); } if (get_irn_link(get_irg_args(current_ir_graph)) == NULL) { - copy_node (get_irg_args(current_ir_graph), NULL); + firm_copy_node (get_irg_args(current_ir_graph), (void *)copy_node_nr); copy_preds(get_irg_args(current_ir_graph), NULL); } set_irg_start (current_ir_graph, get_new_node(get_irg_start(current_ir_graph))); @@ -449,10 +495,16 @@ copy_graph_env (void) { set_irg_args (current_ir_graph, get_new_node(get_irg_args(current_ir_graph))); if (get_irn_link(get_irg_bad(current_ir_graph)) == NULL) { - copy_node(get_irg_bad(current_ir_graph), NULL); + firm_copy_node(get_irg_bad(current_ir_graph), (void *)copy_node_nr); copy_preds(get_irg_bad(current_ir_graph), NULL); } set_irg_bad(current_ir_graph, get_new_node(get_irg_bad(current_ir_graph))); + + if (get_irn_link(get_irg_no_mem(current_ir_graph)) == NULL) { + firm_copy_node(get_irg_no_mem(current_ir_graph), (void *)copy_node_nr); + copy_preds(get_irg_no_mem(current_ir_graph), NULL); + } + set_irg_no_mem(current_ir_graph, get_new_node(get_irg_no_mem(current_ir_graph))); } /** @@ -460,28 +512,31 @@ copy_graph_env (void) { * from block nodes and the corresponding inputs from Phi nodes. * Merges single exit blocks with single entry blocks and removes * 1-input Phis. - * Adds all new nodes to a new hash table for cse. Does not - * perform cse, so the hash table might contain common subexpressions. + * Adds all new nodes to a new hash table for CSE. Does not + * perform CSE, so the hash table might contain common subexpressions. */ void dead_node_elimination(ir_graph *irg) { ir_graph *rem; - int rem_ipview = interprocedural_view; + int rem_ipview = get_interprocedural_view(); struct obstack *graveyard_obst = NULL; struct obstack *rebirth_obst = NULL; + edges_init_graph(irg); + /* inform statistics that we started a dead-node elimination run */ - stat_dead_node_elim_start(irg); + hook_dead_node_elim_start(irg); /* Remember external state of current_ir_graph. */ rem = current_ir_graph; current_ir_graph = irg; - interprocedural_view = 0; + set_interprocedural_view(false); /* Handle graph state */ assert(get_irg_phase_state(current_ir_graph) != phase_building); free_callee_info(current_ir_graph); - free_outs(current_ir_graph); + free_irg_outs(current_ir_graph); + free_trouts(); /* @@@ so far we loose loops when copying */ free_loop_information(current_ir_graph); @@ -492,7 +547,7 @@ dead_node_elimination(ir_graph *irg) { graveyard_obst = irg->obst; /* A new obstack, where the reachable nodes will be copied to. */ - rebirth_obst = (struct obstack *) xmalloc (sizeof (struct obstack)); + rebirth_obst = xmalloc (sizeof(*rebirth_obst)); current_ir_graph->obst = rebirth_obst; obstack_init (current_ir_graph->obst); @@ -501,7 +556,7 @@ dead_node_elimination(ir_graph *irg) { irg->value_table = new_identities (); /* Copy the graph from the old to the new obstack */ - copy_graph_env(); + copy_graph_env(1); /* Free memory from old unoptimized obstack */ obstack_free(graveyard_obst, 0); /* First empty the obstack ... */ @@ -509,14 +564,14 @@ dead_node_elimination(ir_graph *irg) { } /* inform statistics that the run is over */ - stat_dead_node_elim_stop(irg); + hook_dead_node_elim_stop(irg); current_ir_graph = rem; - interprocedural_view = rem_ipview; + set_interprocedural_view(rem_ipview); } /** - * Relink bad predeseccors of a block and store the old in array to the + * Relink bad predecessors of a block and store the old in array to the * link field. This function is called by relink_bad_predecessors(). * The array of link field starts with the block operand at position 0. * If block has bad predecessors, create a new in array without bad preds. @@ -527,7 +582,7 @@ static void relink_bad_block_predecessors(ir_node *n, void *env) { int i, new_irn_n, old_irn_arity, new_irn_arity = 0; /* if link field of block is NULL, look for bad predecessors otherwise - this is allready done */ + this is already done */ if (get_irn_op(n) == op_Block && get_irn_link(n) == NULL) { @@ -541,41 +596,49 @@ static void relink_bad_block_predecessors(ir_node *n, void *env) { /* arity changing: set new predecessors without bad nodes */ if (new_irn_arity < old_irn_arity) { - /* get new predecessor array without Block predecessor */ + /* Get new predecessor array. We do not resize the array, as we must + keep the old one to update Phis. */ new_in = NEW_ARR_D (ir_node *, current_ir_graph->obst, (new_irn_arity+1)); - /* set new predeseccors in array */ + /* set new predecessors in array */ new_in[0] = NULL; new_irn_n = 1; - for (i = 1; i < old_irn_arity; i++) { - irn = get_irn_n(n, i); - if (!is_Bad(irn)) new_in[new_irn_n++] = irn; + for (i = 0; i < old_irn_arity; i++) { + irn = get_irn_n(n, i); + if (!is_Bad(irn)) { + new_in[new_irn_n] = irn; + is_backedge(n, i) ? set_backedge(n, new_irn_n-1) : set_not_backedge(n, new_irn_n-1); + new_irn_n++; + } } + //ARR_SETLEN(int, n->attr.block.backedge, new_irn_arity); + ARR_SHRINKLEN(n->attr.block.backedge, new_irn_arity); n->in = new_in; + } /* ir node has bad predecessors */ } /* Block is not relinked */ } -/* - * Relinks Bad predecesors from Bocks and Phis called by walker +/** + * Relinks Bad predecessors from Blocks and Phis called by walker * remove_bad_predecesors(). If n is a Block, call - * relink_bad_block_redecessors(). If n is a Phinode, call also the relinking + * relink_bad_block_redecessors(). If n is a Phi-node, call also the relinking * function of Phi's Block. If this block has bad predecessors, relink preds - * of the Phinode. + * of the Phi-node. */ static void relink_bad_predecessors(ir_node *n, void *env) { ir_node *block, **old_in; int i, old_irn_arity, new_irn_arity; - /* relink bad predeseccors of a block */ + /* relink bad predecessors of a block */ if (get_irn_op(n) == op_Block) relink_bad_block_predecessors(n, env); /* If Phi node relink its block and its predecessors */ if (get_irn_op(n) == op_Phi) { - /* Relink predeseccors of phi's block */ + /* Relink predecessors of phi's block */ block = get_nodes_block(n); if (get_irn_link(block) == NULL) relink_bad_block_predecessors(block, env); @@ -583,22 +646,27 @@ static void relink_bad_predecessors(ir_node *n, void *env) { old_in = (ir_node **)get_irn_link(block); /* Of Phi's Block */ old_irn_arity = ARR_LEN(old_in); - /* Relink Phi predeseccors if count of predeseccors changed */ + /* Relink Phi predecessors if count of predecessors changed */ if (old_irn_arity != ARR_LEN(get_irn_in(block))) { - /* set new predeseccors in array - n->in[0] remains the same block */ + /* set new predecessors in array + n->in[0] remains the same block */ new_irn_arity = 1; for(i = 1; i < old_irn_arity; i++) - if (!is_Bad((ir_node *)old_in[i])) n->in[new_irn_arity++] = n->in[i]; + if (!is_Bad((ir_node *)old_in[i])) { + n->in[new_irn_arity] = n->in[i]; + is_backedge(n, i) ? set_backedge(n, new_irn_arity) : set_not_backedge(n, new_irn_arity); + new_irn_arity++; + } ARR_SETLEN(ir_node *, n->in, new_irn_arity); + ARR_SETLEN(int, n->attr.phi_backedge, new_irn_arity); } } /* n is a Phi node */ } -/** - * Removes Bad Bad predecesors from Blocks and the corresponding +/* + * Removes Bad Bad predecessors from Blocks and the corresponding * inputs to Phi nodes as in dead_node_elimination but without * copying the graph. * On walking up set the link field to NULL, on walking down call @@ -619,7 +687,7 @@ void remove_bad_predecessors(ir_graph *irg) { * Copy node for inlineing. Updates attributes that change when * inlineing but not for dead node elimination. * - * Copies the node by calling copy_node and then updates the entity if + * Copies the node by calling firm_copy_node and then updates the entity if * it's a local one. env must be a pointer of the frame type of the * inlined procedure. The new entities must be in the link field of * the entities. @@ -629,7 +697,7 @@ copy_node_inline (ir_node *n, void *env) { ir_node *new; type *frame_tp = (type *)env; - copy_node(n, NULL); + firm_copy_node(n, NULL); if (get_irn_op(n) == op_Sel) { new = get_new_node (n); assert(get_irn_op(new) == op_Sel); @@ -655,13 +723,13 @@ static void find_addr(ir_node *node, void *env) * - call with compound arguments * - graphs that take the address of a parameter * - * check these condition here + * check these conditions here */ static int can_inline(ir_node *call, ir_graph *called_graph) { type *call_type = get_Call_type(call); int params, ress, i, res; - assert(is_method_type(call_type)); + assert(is_Method_type(call_type)); params = get_method_n_params(call_type); ress = get_method_n_ress(call_type); @@ -701,9 +769,15 @@ int inline_method(ir_node *call, ir_graph *called_graph) { type *called_frame; irg_inline_property prop = get_irg_inline_property(called_graph); - if ( (prop != irg_inline_forced) && (!get_opt_optimize() || !get_opt_inline() || - (prop == irg_inline_forbidden))) return 0; + if ( (prop != irg_inline_forced) && + (!get_opt_optimize() || !get_opt_inline() || (prop == irg_inline_forbidden))) return 0; + /* Do not inline variadic functions. */ + if (get_method_variadicity(get_entity_type(get_irg_entity(called_graph))) == variadicity_variadic) + return 0; + + assert(get_method_n_params(get_entity_type(get_irg_entity(called_graph))) == + get_method_n_params(get_Call_type(call))); /* * currently, we cannot inline two cases: @@ -724,6 +798,7 @@ int inline_method(ir_node *call, ir_graph *called_graph) { if (get_irg_outs_state(current_ir_graph) == outs_consistent) set_irg_outs_inconsistent(current_ir_graph); set_irg_loopinfo_inconsistent(current_ir_graph); + set_irg_callee_info_state(current_ir_graph, irg_callee_info_inconsistent); /* -- Check preconditions -- */ assert(get_irn_op(call) == op_Call); @@ -739,11 +814,12 @@ int inline_method(ir_node *call, ir_graph *called_graph) { } /* here we know we WILL inline, so inform the statistics */ - stat_inline(call, called_graph); + hook_inline(call, called_graph); /* -- Decide how to handle exception control flow: Is there a handler for the Call node, or do we branch directly to End on an exception? - exc_handling: 0 There is a handler. + exc_handling: + 0 There is a handler. 1 Branches to End. 2 Exception handling not represented in Firm. -- */ { @@ -849,8 +925,8 @@ int inline_method(ir_node *call, ir_graph *called_graph) { arity = get_irn_arity(end_bl); /* arity = n_exc + n_ret */ n_res = get_method_n_ress(get_Call_type(call)); - res_pred = (ir_node **) malloc (n_res * sizeof (ir_node *)); - cf_pred = (ir_node **) malloc (arity * sizeof (ir_node *)); + res_pred = xmalloc (n_res * sizeof(*res_pred)); + cf_pred = xmalloc (arity * sizeof(*res_pred)); set_irg_current_block(current_ir_graph, post_bl); /* just to make sure */ @@ -860,7 +936,7 @@ int inline_method(ir_node *call, ir_graph *called_graph) { add_End_keepalive(get_irg_end(current_ir_graph), get_irn_n(end, i)); /* The new end node will die. We need not free as the in array is on the obstack: - copy_node only generated 'D' arrays. */ + firm_copy_node only generated 'D' arrays. */ /* -- Replace Return nodes by Jump nodes. -- */ n_ret = 0; @@ -905,9 +981,9 @@ int inline_method(ir_node *call, ir_graph *called_graph) { } } if (n_ret > 0) - phi = new_Phi(n_ret, cf_pred, get_irn_mode(cf_pred[0])); + phi = new_Phi(n_ret, cf_pred, get_irn_mode(cf_pred[0])); else - phi = new_Bad(); + phi = new_Bad(); res_pred[j] = phi; /* Conserve Phi-list for further inlinings -- but might be optimized */ if (get_nodes_block(phi) == post_bl) { @@ -981,7 +1057,7 @@ int inline_method(ir_node *call, ir_graph *called_graph) { } main_end_bl = get_irg_end_block(current_ir_graph); main_end_bl_arity = get_irn_arity(main_end_bl); - end_preds = (ir_node **) malloc ((n_exc + main_end_bl_arity) * sizeof (ir_node *)); + end_preds = xmalloc ((n_exc + main_end_bl_arity) * sizeof(*end_preds)); for (i = 0; i < main_end_bl_arity; ++i) end_preds[i] = get_irn_n(main_end_bl, i); @@ -1025,7 +1101,7 @@ int inline_method(ir_node *call, ir_graph *called_graph) { if (i < get_Block_n_cfgpreds(end_bl)) { bl = get_nodes_block(cf_op); arity = get_Block_n_cfgpreds(end_bl) + get_Block_n_cfgpreds(bl) - 1; - cf_pred = (ir_node **) malloc (arity * sizeof (ir_node *)); + cf_pred = xmalloc (arity * sizeof(*cf_pred)); for (j = 0; j < i; j++) cf_pred[j] = get_Block_cfgpred(end_bl, j); for (j = j; j < i + get_Block_n_cfgpreds(bl); j++) @@ -1040,7 +1116,7 @@ int inline_method(ir_node *call, ir_graph *called_graph) { } #endif - /* -- Turn cse back on. -- */ + /* -- Turn CSE back on. -- */ set_optimize(rem_opt); return 1; @@ -1068,16 +1144,15 @@ typedef struct _inline_env_t { */ static ir_graph *get_call_called_irg(ir_node *call) { ir_node *addr; - tarval *tv; ir_graph *called_irg = NULL; assert(get_irn_op(call) == op_Call); addr = get_Call_ptr(call); - if (get_irn_op(addr) == op_Const) { - /* Check whether the constant is the pointer to a compiled entity. */ - tv = get_Const_tarval(addr); + if ((get_irn_op(addr) == op_SymConst) && (get_SymConst_kind (addr) == symconst_addr_ent)) { + called_irg = get_entity_irg(get_SymConst_entity(addr)); } + return called_irg; } @@ -1093,8 +1168,8 @@ static void collect_calls(ir_node *call, void *env) { ir_graph *called_irg = get_entity_irg(get_SymConst_entity(addr)); inline_env_t *ienv = (inline_env_t *)env; if (called_irg && ienv->pos < MAX_INLINE) { - /* The Call node calls a locally defined method. Remember to inline. */ - ienv->calls[ienv->pos++] = call; + /* The Call node calls a locally defined method. Remember to inline. */ + ienv->calls[ienv->pos++] = call; } } } @@ -1133,10 +1208,8 @@ void inline_small_irgs(ir_graph *irg, int size) { collect_phiprojs(irg); for (i = 0; i < env.pos; i++) { ir_graph *callee; - //tv = get_Const_tarval(get_Call_ptr(env.calls[i])); - // callee = get_entity_irg(get_tarval_entity(tv)); callee = get_entity_irg(get_SymConst_entity(get_Call_ptr(env.calls[i]))); - if (((_obstack_memory_used(callee->obst) - obstack_room(callee->obst)) < size) || + if (((_obstack_memory_used(callee->obst) - (int)obstack_room(callee->obst)) < size) || (get_irg_inline_property(callee) == irg_inline_forced)) { inline_method(env.calls[i], callee); } @@ -1159,29 +1232,39 @@ typedef struct { int n_callers_orig; /**< for statistics */ } inline_irg_env; +/** + * Allocate a new nvironment for inlining. + */ static inline_irg_env *new_inline_irg_env(void) { - inline_irg_env *env = malloc(sizeof(inline_irg_env)); - env->n_nodes = -2; /* uncount Start, End */ - env->n_nodes_orig = -2; /* uncount Start, End */ - env->call_nodes = eset_create(); - env->n_call_nodes = 0; + inline_irg_env *env = xmalloc(sizeof(*env)); + env->n_nodes = -2; /* do not count count Start, End */ + env->n_nodes_orig = -2; /* do not count Start, End */ + env->call_nodes = eset_create(); + env->n_call_nodes = 0; env->n_call_nodes_orig = 0; - env->n_callers = 0; - env->n_callers_orig = 0; + env->n_callers = 0; + env->n_callers_orig = 0; return env; } +/** + * destroy an environment for inlining. + */ static void free_inline_irg_env(inline_irg_env *env) { eset_destroy(env->call_nodes); free(env); } +/** + * post-walker: collect all calls in the inline-environment + * of a graph and sum some statistics. + */ static void collect_calls2(ir_node *call, void *env) { inline_irg_env *x = (inline_irg_env *)env; ir_op *op = get_irn_op(call); ir_graph *callee; - /* count nodes in irg */ + /* count meaningful nodes in irg */ if (op != op_Proj && op != op_Tuple && op != op_Sync) { x->n_nodes++; x->n_nodes_orig++; @@ -1197,15 +1280,23 @@ static void collect_calls2(ir_node *call, void *env) { /* count all static callers */ callee = get_call_called_irg(call); if (callee) { - ((inline_irg_env *)get_irg_link(callee))->n_callers++; - ((inline_irg_env *)get_irg_link(callee))->n_callers_orig++; + inline_irg_env *callee_env = get_irg_link(callee); + callee_env->n_callers++; + callee_env->n_callers_orig++; } } +/** + * Returns TRUE if the number of callers in 0 in the irg's environment, + * hence this irg is a leave. + */ INLINE static int is_leave(ir_graph *irg) { return (((inline_irg_env *)get_irg_link(irg))->n_call_nodes == 0); } +/** + * Returns TRUE if the number of callers is smaller size in the irg's environment. + */ INLINE static int is_smaller(ir_graph *callee, int size) { return (((inline_irg_env *)get_irg_link(callee))->n_nodes < size); } @@ -1227,7 +1318,7 @@ void inline_leave_functions(int maxsize, int leavesize, int size) { if (!(get_opt_optimize() && get_opt_inline())) return; - /* extend all irgs by a temporary data structure for inlineing. */ + /* extend all irgs by a temporary data structure for inlining. */ for (i = 0; i < n_irgs; ++i) set_irg_link(get_irp_irg(i), new_inline_irg_env()); @@ -1238,59 +1329,49 @@ void inline_leave_functions(int maxsize, int leavesize, int size) { free_callee_info(current_ir_graph); irg_walk(get_irg_end(current_ir_graph), NULL, collect_calls2, - get_irg_link(current_ir_graph)); + get_irg_link(current_ir_graph)); } - /* and now inline. - Inline leaves recursively -- we might construct new leaves. */ - /* int itercnt = 1; */ + /* -- and now inline. -- */ + + /* Inline leaves recursively -- we might construct new leaves. */ while (did_inline) { - /* printf("iteration %d\n", itercnt++); */ did_inline = 0; + for (i = 0; i < n_irgs; ++i) { ir_node *call; - eset *walkset; int phiproj_computed = 0; current_ir_graph = get_irp_irg(i); env = (inline_irg_env *)get_irg_link(current_ir_graph); - /* we can not walk and change a set, nor remove from it. - So recompute.*/ - walkset = env->call_nodes; - env->call_nodes = eset_create(); - for (call = eset_first(walkset); call; call = eset_next(walkset)) { - inline_irg_env *callee_env; - ir_graph *callee = get_call_called_irg(call); - - if (env->n_nodes > maxsize) break; - if (callee && - ((is_leave(callee) && is_smaller(callee, leavesize)) || - (get_irg_inline_property(callee) == irg_inline_forced))) { + for (call = eset_first(env->call_nodes); call; call = eset_next(env->call_nodes)) { + ir_graph *callee; + + if (get_irn_op(call) == op_Tuple) continue; /* We already have inlined this call. */ + callee = get_call_called_irg(call); + + if (env->n_nodes > maxsize) continue; // break; + + if (callee && (is_leave(callee) && is_smaller(callee, leavesize))) { if (!phiproj_computed) { phiproj_computed = 1; collect_phiprojs(current_ir_graph); } - callee_env = (inline_irg_env *)get_irg_link(callee); -/* printf(" %s: Inlineing %s.\n", get_entity_name(get_irg_entity(current_ir_graph)), */ -/* get_entity_name(get_irg_entity(callee))); */ - if (inline_method(call, callee)) { - did_inline = 1; - env->n_call_nodes--; - eset_insert_all(env->call_nodes, callee_env->call_nodes); - env->n_call_nodes += callee_env->n_call_nodes; - env->n_nodes += callee_env->n_nodes; - callee_env->n_callers--; - } - } else { - eset_insert(env->call_nodes, call); + did_inline = inline_method(call, callee); + + if (did_inline) { + /* Do some statistics */ + inline_irg_env *callee_env = (inline_irg_env *)get_irg_link(callee); + env->n_call_nodes --; + env->n_nodes += callee_env->n_nodes; + callee_env->n_callers--; + } } } - eset_destroy(walkset); } } - /* printf("Non leaves\n"); */ /* inline other small functions. */ for (i = 0; i < n_irgs; ++i) { ir_node *call; @@ -1305,26 +1386,26 @@ void inline_leave_functions(int maxsize, int leavesize, int size) { walkset = env->call_nodes; env->call_nodes = eset_create(); for (call = eset_first(walkset); call; call = eset_next(walkset)) { - inline_irg_env *callee_env; - ir_graph *callee = get_call_called_irg(call); + ir_graph *callee; + + if (get_irn_op(call) == op_Tuple) continue; /* We already inlined. */ + callee = get_call_called_irg(call); - if (env->n_nodes > maxsize) break; - if (callee && is_smaller(callee, size)) { + if (callee && + ((is_smaller(callee, size) && (env->n_nodes < maxsize)) || /* small function */ + (get_irg_inline_property(callee) == irg_inline_forced))) { if (!phiproj_computed) { phiproj_computed = 1; collect_phiprojs(current_ir_graph); } - callee_env = (inline_irg_env *)get_irg_link(callee); -/* printf(" %s: Inlineing %s.\n", get_entity_name(get_irg_entity(current_ir_graph)), */ -/* get_entity_name(get_irg_entity(callee))); */ if (inline_method(call, callee)) { - did_inline = 1; - env->n_call_nodes--; - eset_insert_all(env->call_nodes, callee_env->call_nodes); - env->n_call_nodes += callee_env->n_call_nodes; - env->n_nodes += callee_env->n_nodes; - callee_env->n_callers--; - } + inline_irg_env *callee_env = (inline_irg_env *)get_irg_link(callee); + env->n_call_nodes--; + eset_insert_all(env->call_nodes, callee_env->call_nodes); /* @@@ ??? This are the wrong nodes !? Not the copied ones. */ + env->n_call_nodes += callee_env->n_call_nodes; + env->n_nodes += callee_env->n_nodes; + callee_env->n_callers--; + } } else { eset_insert(env->call_nodes, call); } @@ -1337,11 +1418,11 @@ void inline_leave_functions(int maxsize, int leavesize, int size) { #if 0 env = (inline_irg_env *)get_irg_link(current_ir_graph); if ((env->n_call_nodes_orig != env->n_call_nodes) || - (env->n_callers_orig != env->n_callers)) + (env->n_callers_orig != env->n_callers)) printf("Nodes:%3d ->%3d, calls:%3d ->%3d, callers:%3d ->%3d, -- %s\n", - env->n_nodes_orig, env->n_nodes, env->n_call_nodes_orig, env->n_call_nodes, - env->n_callers_orig, env->n_callers, - get_entity_name(get_irg_entity(current_ir_graph))); + env->n_nodes_orig, env->n_nodes, env->n_call_nodes_orig, env->n_call_nodes, + env->n_callers_orig, env->n_callers, + get_entity_name(get_irg_entity(current_ir_graph))); #endif free_inline_irg_env((inline_irg_env *)get_irg_link(current_ir_graph)); } @@ -1354,9 +1435,20 @@ void inline_leave_functions(int maxsize, int leavesize, int size) { /* will be executed only if needed. */ /*******************************************************************/ +/** + * Returns non-zero, is a block is not reachable from Start. + */ +static int +is_Block_unreachable(ir_node *block) { + return is_Block_dead(block) || get_Block_dom_depth(block) < 0; +} + /** * Find the earliest correct block for N. --- Place N into the * same Block as its dominance-deepest Input. + * + * We have to avoid calls to get_nodes_block() here + * because the graph is floating. */ static void place_floats_early(ir_node *n, pdeq *worklist) @@ -1368,17 +1460,17 @@ place_floats_early(ir_node *n, pdeq *worklist) mark_irn_visited(n); /* Place floating nodes. */ - if (get_op_pinned(get_irn_op(n)) == op_pin_state_floats) { + if (get_irn_pinned(n) == op_pin_state_floats) { int depth = 0; - ir_node *b = new_Bad(); /* The block to place this node in */ - int bad_recursion = is_Bad(get_nodes_block(n)); + ir_node *b = NULL; /* The block to place this node in */ + int bad_recursion = is_Block_unreachable(get_irn_n(n, -1)); assert(get_irn_op(n) != op_Block); if ((get_irn_op(n) == op_Const) || - (get_irn_op(n) == op_SymConst) || - (is_Bad(n)) || - (get_irn_op(n) == op_Unknown)) { + (get_irn_op(n) == op_SymConst) || + (is_Bad(n)) || + (get_irn_op(n) == op_Unknown)) { /* These nodes will not be placed by the loop below. */ b = get_irg_start_block(current_ir_graph); depth = 1; @@ -1391,8 +1483,8 @@ place_floats_early(ir_node *n, pdeq *worklist) ir_node *dep_block; if ((irn_not_visited(dep)) - && (get_op_pinned(get_irn_op(dep)) == op_pin_state_floats)) { - place_floats_early(dep, worklist); + && (get_irn_pinned(dep) == op_pin_state_floats)) { + place_floats_early(dep, worklist); } /* @@ -1405,20 +1497,21 @@ place_floats_early(ir_node *n, pdeq *worklist) /* Because all loops contain at least one op_pin_state_pinned node, now all our inputs are either op_pin_state_pinned or place_early has already been finished on them. We do not have any unfinished inputs! */ - dep_block = get_nodes_block(dep); - if ((!is_Bad(dep_block)) && - (get_Block_dom_depth(dep_block) > depth)) { - b = dep_block; - depth = get_Block_dom_depth(dep_block); + dep_block = get_irn_n(dep, -1); + if ((!is_Block_dead(dep_block)) && + (get_Block_dom_depth(dep_block) > depth)) { + b = dep_block; + depth = get_Block_dom_depth(dep_block); } /* Avoid that the node is placed in the Start block */ - if ((depth == 1) && (get_Block_dom_depth(get_nodes_block(n)) > 1)) { - b = get_Block_cfg_out(get_irg_start_block(current_ir_graph), 0); - assert(b != get_irg_start_block(current_ir_graph)); - depth = 2; + if ((depth == 1) && (get_Block_dom_depth(get_irn_n(n, -1)) > 1)) { + b = get_Block_cfg_out(get_irg_start_block(current_ir_graph), 0); + assert(b != get_irg_start_block(current_ir_graph)); + depth = 2; } } - set_nodes_block(n, b); + if (b) + set_nodes_block(n, b); } /* Add predecessors of non floating nodes on worklist. */ @@ -1438,7 +1531,7 @@ place_floats_early(ir_node *n, pdeq *worklist) * places all floating nodes reachable from its argument through floating * nodes and adds all beginnings at op_pin_state_pinned nodes to the worklist. */ -static INLINE void place_early(pdeq* worklist) { +static INLINE void place_early(pdeq *worklist) { assert(worklist); inc_irg_visited(current_ir_graph); @@ -1455,8 +1548,38 @@ static INLINE void place_early(pdeq* worklist) { current_ir_graph->op_pin_state_pinned = op_pin_state_pinned; } +/** + * Compute the deepest common ancestor of block and dca. + */ +static ir_node *calc_dca(ir_node *dca, ir_node *block) +{ + assert(block); + + /* we do not want to place nodes in dead blocks */ + if (is_Block_dead(block)) + return dca; + + /* We found a first legal placement. */ + if (!dca) return block; -/** deepest common dominance ancestor of DCA and CONSUMER of PRODUCER. */ + /* Find a placement that is dominates both, dca and block. */ + while (get_Block_dom_depth(block) > get_Block_dom_depth(dca)) + block = get_Block_idom(block); + + while (get_Block_dom_depth(dca) > get_Block_dom_depth(block)) { + dca = get_Block_idom(dca); + } + + while (block != dca) + { block = get_Block_idom(block); dca = get_Block_idom(dca); } + + return dca; +} + +/** Deepest common dominance ancestor of DCA and CONSUMER of PRODUCER. + * I.e., DCA is the block where we might place PRODUCER. + * A data flow edge points from producer to consumer. + */ static ir_node * consumer_dom_dca (ir_node *dca, ir_node *consumer, ir_node *producer) { @@ -1470,29 +1593,30 @@ consumer_dom_dca (ir_node *dca, ir_node *consumer, ir_node *producer) int i, irn_arity; ir_node *phi_block = get_nodes_block(consumer); irn_arity = get_irn_arity(consumer); + for (i = 0; i < irn_arity; i++) { if (get_irn_n(consumer, i) == producer) { - block = get_nodes_block(get_Block_cfgpred(phi_block, i)); + ir_node *new_block = get_nodes_block(get_Block_cfgpred(phi_block, i)); + + if (! is_Block_unreachable(new_block)) + block = calc_dca(block, new_block); } } + + if (! block) + block = get_irn_n(producer, -1); + } else { assert(is_no_Block(consumer)); block = get_nodes_block(consumer); } /* Compute the deepest common ancestor of block and dca. */ - assert(block); - if (!dca) return block; - while (get_Block_dom_depth(block) > get_Block_dom_depth(dca)) - block = get_Block_idom(block); - while (get_Block_dom_depth(dca) > get_Block_dom_depth(block)) - dca = get_Block_idom(dca); - while (block != dca) - { block = get_Block_idom(block); dca = get_Block_idom(dca); } - - return dca; + return calc_dca(dca, block); } +/* FIXME: the name clashes here with the function from ana/field_temperature.c + * please rename. */ static INLINE int get_irn_loop_depth(ir_node *n) { return get_loop_depth(get_irn_loop(n)); } @@ -1500,6 +1624,9 @@ static INLINE int get_irn_loop_depth(ir_node *n) { /** * Move n to a block with less loop depth than it's current block. The * new block must be dominated by early. + * + * @param n the node that should be moved + * @param early the earliest block we can n move to */ static void move_out_of_loops (ir_node *n, ir_node *early) @@ -1512,10 +1639,11 @@ move_out_of_loops (ir_node *n, ir_node *early) dca with the least loop nesting depth, but still dominated by our early placement. */ dca = get_nodes_block(n); + best = dca; while (dca != early) { dca = get_Block_idom(dca); - if (!dca) break; /* should we put assert(dca)? */ + if (!dca || is_Bad(dca)) break; /* may be Bad if not reachable from Start */ if (get_irn_loop_depth(dca) < get_irn_loop_depth(best)) { best = dca; } @@ -1536,7 +1664,7 @@ move_out_of_loops (ir_node *n, ir_node *early) * `optimal' Block between the latest and earliest legal block. * The `optimal' block is the dominance-deepest block of those * with the least loop-nesting-depth. This places N out of as many - * loops as possible and then makes it as control dependant as + * loops as possible and then makes it as control dependent as * possible. */ static void @@ -1547,13 +1675,22 @@ place_floats_late(ir_node *n, pdeq *worklist) assert (irn_not_visited(n)); /* no multiple placement */ + mark_irn_visited(n); + /* no need to place block nodes, control nodes are already placed. */ if ((get_irn_op(n) != op_Block) && (!is_cfop(n)) && (get_irn_mode(n) != mode_X)) { /* Remember the early placement of this block to move it out of loop no further than the early placement. */ - early = get_nodes_block(n); + early = get_irn_n(n, -1); + + /* + * BEWARE: Here we also get code, that is live, but + * was in a dead block. If the node is life, but because + * of CSE in a dead block, we still might need it. + */ + /* Assure that our users are all placed, except the Phi-nodes. --- Each data flow cycle contains at least one Phi-node. We have to break the `user has to be placed before the @@ -1562,47 +1699,64 @@ place_floats_late(ir_node *n, pdeq *worklist) final region of our users, which is OK with Phi-nodes, as they are op_pin_state_pinned, and they never have to be placed after a producer of one of their inputs in the same block anyway. */ - for (i = 0; i < get_irn_n_outs(n); i++) { + for (i = get_irn_n_outs(n) - 1; i >= 0; --i) { ir_node *succ = get_irn_out(n, i); if (irn_not_visited(succ) && (get_irn_op(succ) != op_Phi)) - place_floats_late(succ, worklist); + place_floats_late(succ, worklist); } /* We have to determine the final block of this node... except for constants. */ - if ((get_op_pinned(get_irn_op(n)) == op_pin_state_floats) && - (get_irn_op(n) != op_Const) && - (get_irn_op(n) != op_SymConst)) { + if ((get_irn_pinned(n) == op_pin_state_floats) && + (get_irn_op(n) != op_Const) && + (get_irn_op(n) != op_SymConst)) { ir_node *dca = NULL; /* deepest common ancestor in the dominator tree of all nodes' blocks depending on us; our final placement has to dominate DCA. */ - for (i = 0; i < get_irn_n_outs(n); i++) { - dca = consumer_dom_dca (dca, get_irn_out(n, i), n); - } - set_nodes_block(n, dca); + for (i = get_irn_n_outs(n) - 1; i >= 0; --i) { + ir_node *out = get_irn_out(n, i); + ir_node *outbl; + + if (get_irn_op(out) == op_End) { + /* + * This consumer is the End node, a keep alive edge. + * This is not a real consumer, so we ignore it + */ + continue; + } - move_out_of_loops (n, early); + /* ignore if out is in dead code */ + outbl = get_irn_n(out, -1); + if (is_Block_unreachable(outbl)) + continue; + dca = consumer_dom_dca(dca, out, n); + } + if (dca) { + set_nodes_block(n, dca); + move_out_of_loops (n, early); + } + /* else all outs are in dead code */ } } - mark_irn_visited(n); - /* Add predecessors of all non-floating nodes on list. (Those of floating - nodes are placeded already and therefore are marked.) */ + nodes are placed already and therefore are marked.) */ for (i = 0; i < get_irn_n_outs(n); i++) { + ir_node *succ = get_irn_out(n, i); if (irn_not_visited(get_irn_out(n, i))) { - pdeq_putr (worklist, get_irn_out(n, i)); + pdeq_putr (worklist, succ); } } } -static INLINE void place_late(pdeq* worklist) { +static INLINE void place_late(pdeq *worklist) { assert(worklist); inc_irg_visited(current_ir_graph); /* This fills the worklist initially. */ place_floats_late(get_irg_start_block(current_ir_graph), worklist); + /* And now empty the worklist again... */ while (!pdeq_empty (worklist)) { ir_node *n = pdeq_getl (worklist); @@ -1611,7 +1765,7 @@ static INLINE void place_late(pdeq* worklist) { } void place_code(ir_graph *irg) { - pdeq* worklist; + pdeq *worklist; ir_graph *rem = current_ir_graph; current_ir_graph = irg; @@ -1634,7 +1788,7 @@ void place_code(ir_graph *irg) { place_early(worklist); /* place_early invalidates the outs, place_late needs them. */ - compute_outs(irg); + compute_irg_outs(irg); /* Now move the nodes down in the dominator tree. This reduces the unnecessary executions of the node. */ place_late(worklist); @@ -1645,331 +1799,6 @@ void place_code(ir_graph *irg) { current_ir_graph = rem; } - - -/********************************************************************/ -/* Control flow optimization. */ -/* Removes Bad control flow predecessors and empty blocks. A block */ -/* is empty if it contains only a Jmp node. */ -/* Blocks can only be removed if they are not needed for the */ -/* semantics of Phi nodes. */ -/********************************************************************/ - -/** - * Removes Tuples from Block control flow predecessors. - * Optimizes blocks with equivalent_node(). - * Replaces n by Bad if n is unreachable control flow. - */ -static void merge_blocks(ir_node *n, void *env) { - int i; - set_irn_link(n, NULL); - - if (get_irn_op(n) == op_Block) { - /* Remove Tuples */ - for (i = 0; i < get_Block_n_cfgpreds(n); i++) - /* GL @@@ : is this possible? if (get_opt_normalize()) -- added, all tests go through. - A different order of optimizations might cause problems. */ - if (get_opt_normalize()) - set_Block_cfgpred(n, i, skip_Tuple(get_Block_cfgpred(n, i))); - } else if (get_opt_optimize() && (get_irn_mode(n) == mode_X)) { - /* We will soon visit a block. Optimize it before visiting! */ - ir_node *b = get_nodes_block(n); - ir_node *new_node = equivalent_node(b); - while (irn_not_visited(b) && (!is_Bad(new_node)) && (new_node != b)) { - /* We would have to run gigo if new is bad, so we - promote it directly below. */ - assert(((b == new_node) || - get_opt_control_flow_straightening() || - get_opt_control_flow_weak_simplification()) && - ("strange flag setting")); - exchange (b, new_node); - b = new_node; - new_node = equivalent_node(b); - } - if (is_Bad(new_node) && get_opt_normalize()) exchange(n, new_Bad()); - } -} - -/** - * Collects all Phi nodes in link list of Block. - * Marks all blocks "block_visited" if they contain a node other - * than Jmp. - */ -static void collect_nodes(ir_node *n, void *env) { - if (is_no_Block(n)) { - ir_node *b = get_nodes_block(n); - - if ((get_irn_op(n) == op_Phi)) { - /* Collect Phi nodes to compact ins along with block's ins. */ - set_irn_link(n, get_irn_link(b)); - set_irn_link(b, n); - } else if ((get_irn_op(n) != op_Jmp) && !is_Bad(b)) { /* Check for non empty block. */ - mark_Block_block_visited(b); - } - } -} - -/** Returns true if pred is predecessor of block. */ -static int is_pred_of(ir_node *pred, ir_node *b) { - int i; - for (i = 0; i < get_Block_n_cfgpreds(b); i++) { - ir_node *b_pred = get_nodes_block(get_Block_cfgpred(b, i)); - if (b_pred == pred) return 1; - } - return 0; -} - -static int test_whether_dispensable(ir_node *b, int pos) { - int i, j, n_preds = 1; - int dispensable = 1; - ir_node *cfop = get_Block_cfgpred(b, pos); - ir_node *pred = get_nodes_block(cfop); - - if (get_Block_block_visited(pred) + 1 - < get_irg_block_visited(current_ir_graph)) { - if (!get_opt_optimize() || !get_opt_control_flow_strong_simplification()) { - /* Mark block so that is will not be removed. */ - set_Block_block_visited(pred, get_irg_block_visited(current_ir_graph)-1); - return 1; - } - /* Seems to be empty. */ - if (!get_irn_link(b)) { - /* There are no Phi nodes ==> dispensable. */ - n_preds = get_Block_n_cfgpreds(pred); - } else { - /* b's pred blocks and pred's pred blocks must be pairwise disjunct. - Work preds < pos as if they were already removed. */ - for (i = 0; i < pos; i++) { - ir_node *b_pred = get_nodes_block(get_Block_cfgpred(b, i)); - if (get_Block_block_visited(b_pred) + 1 - < get_irg_block_visited(current_ir_graph)) { - for (j = 0; j < get_Block_n_cfgpreds(b_pred); j++) { - ir_node *b_pred_pred = get_nodes_block(get_Block_cfgpred(b_pred, j)); - if (is_pred_of(b_pred_pred, pred)) dispensable = 0; - } - } else { - if (is_pred_of(b_pred, pred)) dispensable = 0; - } - } - for (i = pos +1; i < get_Block_n_cfgpreds(b); i++) { - ir_node *b_pred = get_nodes_block(get_Block_cfgpred(b, i)); - if (is_pred_of(b_pred, pred)) dispensable = 0; - } - if (!dispensable) { - set_Block_block_visited(pred, get_irg_block_visited(current_ir_graph)-1); - n_preds = 1; - } else { - n_preds = get_Block_n_cfgpreds(pred); - } - } - } - - return n_preds; -} - -static void optimize_blocks(ir_node *b, void *env) { - int i, j, k, max_preds, n_preds; - ir_node *pred, *phi; - ir_node **in; - - /* Count the number of predecessor if this block is merged with pred blocks - that are empty. */ - max_preds = 0; - for (i = 0; i < get_Block_n_cfgpreds(b); i++) { - max_preds += test_whether_dispensable(b, i); - } - in = (ir_node **) malloc(max_preds * sizeof(ir_node *)); - -/*- - printf(" working on "); DDMN(b); - for (i = 0; i < get_Block_n_cfgpreds(b); i++) { - pred = get_nodes_block(get_Block_cfgpred(b, i)); - if (is_Bad(get_Block_cfgpred(b, i))) { - printf(" removing Bad %i\n ", i); - } else if (get_Block_block_visited(pred) +1 - < get_irg_block_visited(current_ir_graph)) { - printf(" removing pred %i ", i); DDMN(pred); - } else { printf(" Nothing to do for "); DDMN(pred); } - } - * end Debug output -*/ - - /*- Fix the Phi nodes -*/ - phi = get_irn_link(b); - while (phi) { - assert(get_irn_op(phi) == op_Phi); - /* Find the new predecessors for the Phi */ - n_preds = 0; - for (i = 0; i < get_Block_n_cfgpreds(b); i++) { - pred = get_nodes_block(get_Block_cfgpred(b, i)); - if (is_Bad(get_Block_cfgpred(b, i))) { - /* Do nothing */ - } else if (get_Block_block_visited(pred) +1 - < get_irg_block_visited(current_ir_graph)) { - /* It's an empty block and not yet visited. */ - ir_node *phi_pred = get_Phi_pred(phi, i); - for (j = 0; j < get_Block_n_cfgpreds(pred); j++) { - if (get_nodes_block(phi_pred) == pred) { - assert(get_irn_op(phi_pred) == op_Phi); /* Block is empty!! */ - in[n_preds] = get_Phi_pred(phi_pred, j); - } else { - in[n_preds] = phi_pred; - } - n_preds++; - } - /* The Phi_pred node is replaced now if it is a Phi. - In Schleifen kann offenbar der entfernte Phi Knoten legal verwendet werden. - Daher muss der Phiknoten durch den neuen ersetzt werden. - Weiter muss der alte Phiknoten entfernt werden (durch ersetzen oder - durch einen Bad) damit er aus den keep_alive verschwinden kann. - Man sollte also, falls keine Schleife vorliegt, exchange mit new_Bad - aufrufen. */ - if (get_nodes_block(phi_pred) == pred) { - /* remove the Phi as it might be kept alive. Further there - might be other users. */ - exchange(phi_pred, phi); /* geht, ist aber doch semantisch falsch! Warum?? */ - } - } else { - in[n_preds] = get_Phi_pred(phi, i); - n_preds ++; - } - } - /* Fix the node */ - set_irn_in(phi, n_preds, in); - - phi = get_irn_link(phi); - } - -/*- - This happens only if merge between loop backedge and single loop entry. -*/ - for (k = 0; k < get_Block_n_cfgpreds(b); k++) { - pred = get_nodes_block(get_Block_cfgpred(b, k)); - if (get_Block_block_visited(pred)+1 < get_irg_block_visited(current_ir_graph)) { - phi = get_irn_link(pred); - while (phi) { - if (get_irn_op(phi) == op_Phi) { - set_nodes_block(phi, b); - - n_preds = 0; - for (i = 0; i < k; i++) { - pred = get_nodes_block(get_Block_cfgpred(b, i)); - if (is_Bad(get_Block_cfgpred(b, i))) { - /* Do nothing */ - } else if (get_Block_block_visited(pred) +1 - < get_irg_block_visited(current_ir_graph)) { - /* It's an empty block and not yet visited. */ - for (j = 0; j < get_Block_n_cfgpreds(pred); j++) { - /* @@@ Hier brauche ich Schleifeninformation!!! Kontrollflusskante - muss Rueckwaertskante sein! (An allen vier in[n_preds] = phi - Anweisungen.) Trotzdem tuts bisher!! */ - in[n_preds] = phi; - n_preds++; - } - } else { - in[n_preds] = phi; - n_preds++; - } - } - for (i = 0; i < get_Phi_n_preds(phi); i++) { - in[n_preds] = get_Phi_pred(phi, i); - n_preds++; - } - for (i = k+1; i < get_Block_n_cfgpreds(b); i++) { - pred = get_nodes_block(get_Block_cfgpred(b, i)); - if (is_Bad(get_Block_cfgpred(b, i))) { - /* Do nothing */ - } else if (get_Block_block_visited(pred) +1 - < get_irg_block_visited(current_ir_graph)) { - /* It's an empty block and not yet visited. */ - for (j = 0; j < get_Block_n_cfgpreds(pred); j++) { - in[n_preds] = phi; - n_preds++; - } - } else { - in[n_preds] = phi; - n_preds++; - } - } - set_irn_in(phi, n_preds, in); - } - phi = get_irn_link(phi); - } - } - } - - /*- Fix the block -*/ - n_preds = 0; - for (i = 0; i < get_Block_n_cfgpreds(b); i++) { - pred = get_nodes_block(get_Block_cfgpred(b, i)); - if (is_Bad(get_Block_cfgpred(b, i))) { - /* Do nothing */ - } else if (get_Block_block_visited(pred) +1 - < get_irg_block_visited(current_ir_graph)) { - /* It's an empty block and not yet visited. */ - assert(get_Block_n_cfgpreds(b) > 1); - /* Else it should be optimized by equivalent_node. */ - for (j = 0; j < get_Block_n_cfgpreds(pred); j++) { - in[n_preds] = get_Block_cfgpred(pred, j); - n_preds++; - } - /* Remove block as it might be kept alive. */ - exchange(pred, b/*new_Bad()*/); - } else { - in[n_preds] = get_Block_cfgpred(b, i); - n_preds ++; - } - } - set_irn_in(b, n_preds, in); - free(in); -} - -void optimize_cf(ir_graph *irg) { - int i; - ir_node **in; - ir_node *end = get_irg_end(irg); - ir_graph *rem = current_ir_graph; - current_ir_graph = irg; - - /* Handle graph state */ - assert(get_irg_phase_state(irg) != phase_building); - if (get_irg_outs_state(current_ir_graph) == outs_consistent) - set_irg_outs_inconsistent(current_ir_graph); - if (get_irg_dom_state(current_ir_graph) == dom_consistent) - set_irg_dom_inconsistent(current_ir_graph); - - /* Use block visited flag to mark non-empty blocks. */ - inc_irg_block_visited(irg); - irg_walk(end, merge_blocks, collect_nodes, NULL); - - /* Optimize the standard code. */ - irg_block_walk(get_irg_end_block(irg), optimize_blocks, NULL, NULL); - - /* Walk all keep alives, optimize them if block, add to new in-array - for end if useful. */ - in = NEW_ARR_F (ir_node *, 1); - in[0] = get_nodes_block(end); - inc_irg_visited(current_ir_graph); - for(i = 0; i < get_End_n_keepalives(end); i++) { - ir_node *ka = get_End_keepalive(end, i); - if (irn_not_visited(ka)) { - if ((get_irn_op(ka) == op_Block) && Block_not_block_visited(ka)) { - set_irg_block_visited(current_ir_graph, /* Don't walk all the way to Start. */ - get_irg_block_visited(current_ir_graph)-1); - irg_block_walk(ka, optimize_blocks, NULL, NULL); - mark_irn_visited(ka); - ARR_APP1 (ir_node *, in, ka); - } else if (get_irn_op(ka) == op_Phi) { - mark_irn_visited(ka); - ARR_APP1 (ir_node *, in, ka); - } - } - } - /* DEL_ARR_F(end->in); GL @@@ tut nicht ! */ - end->in = in; - - current_ir_graph = rem; -} - - /** * Called by walker of remove_critical_cf_edges(). *