X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fir%2Firgopt.c;h=034a2a742a8eadc7006116823e041b52a10b0718;hb=8399216d8aebc713bbda04b6e3e250a1d52b20bf;hp=f8ceab8d4d05df0d8e8ca773dd53c9c489c485c0;hpb=e5bab593a4048c0a320d7794fb2cb17697da8389;p=libfirm diff --git a/ir/ir/irgopt.c b/ir/ir/irgopt.c index f8ceab8d4..034a2a742 100644 --- a/ir/ir/irgopt.c +++ b/ir/ir/irgopt.c @@ -12,11 +12,10 @@ #ifdef HAVE_CONFIG_H -# include +# include "config.h" #endif #include -#include #include "irnode_t.h" #include "irgraph_t.h" @@ -32,6 +31,7 @@ #include "pset.h" #include "eset.h" #include "pdeq.h" /* Fuer code placement */ +#include "xmalloc.h" #include "irouts.h" #include "irloop_t.h" @@ -55,9 +55,9 @@ static void init_link (ir_node *n, void *env) { } #if 0 /* Old version. Avoids Ids. - This is not necessary: we do a postwalk, and get_irn_n - removes ids anyways. So it's much cheaper to call the - optimization less often and use the exchange() algorithm. */ + This is not necessary: we do a postwalk, and get_irn_n + removes ids anyways. So it's much cheaper to call the + optimization less often and use the exchange() algorithm. */ static void optimize_in_place_wrapper (ir_node *n, void *env) { int i, irn_arity; @@ -245,7 +245,7 @@ copy_node (ir_node *n, void *env) { /* Copy the attributes. These might point to additional data. If this was allocated on the old obstack the pointers now are dangling. This frees e.g. the memory of the graph_arr allocated in new_immBlock. */ - copy_attrs(n, nn); + copy_node_attr(n, nn); new_backedge_info(nn); set_new_node(n, nn); @@ -258,7 +258,6 @@ copy_node (ir_node *n, void *env) { /* printf("\n old node: "); DDMSG2(n); printf(" new node: "); DDMSG2(nn); */ - } /** @@ -282,9 +281,9 @@ copy_preds (ir_node *n, void *env) { irn_arity = get_irn_arity(n); for (i = 0; i < irn_arity; i++) if (get_irn_opcode(get_irn_n(n, i)) != iro_Bad) { - set_irn_n (nn, j, get_new_node(get_irn_n(n, i))); - /*if (is_backedge(n, i)) set_backedge(nn, j);*/ - j++; + set_irn_n (nn, j, get_new_node(get_irn_n(n, i))); + /*if (is_backedge(n, i)) set_backedge(nn, j);*/ + j++; } /* repair the block visited flag from above misuse. Repair it in both graphs so that the old one can still be used. */ @@ -295,15 +294,15 @@ copy_preds (ir_node *n, void *env) { We don't call optimize_in_place as it requires that the fields in ir_graph are set properly. */ if ((get_opt_control_flow_straightening()) && - (get_Block_n_cfgpreds(nn) == 1) && - (get_irn_op(get_Block_cfgpred(nn, 0)) == op_Jmp)) { + (get_Block_n_cfgpreds(nn) == 1) && + (get_irn_op(get_Block_cfgpred(nn, 0)) == op_Jmp)) { ir_node *old = get_nodes_block(get_Block_cfgpred(nn, 0)); if (nn == old) { - /* Jmp jumps into the block it is in -- deal self cycle. */ - assert(is_Bad(get_new_node(get_irg_bad(current_ir_graph)))); - exchange(nn, get_new_node(get_irg_bad(current_ir_graph))); + /* Jmp jumps into the block it is in -- deal self cycle. */ + assert(is_Bad(get_new_node(get_irg_bad(current_ir_graph)))); + exchange(nn, get_new_node(get_irg_bad(current_ir_graph))); } else { - exchange(nn, old); + exchange(nn, old); } } } else if (get_irn_opcode(n) == iro_Phi) { @@ -315,9 +314,9 @@ copy_preds (ir_node *n, void *env) { irn_arity = get_irn_arity(n); for (i = 0; i < irn_arity; i++) if (get_irn_opcode(get_irn_n(block, i)) != iro_Bad) { - set_irn_n (nn, j, get_new_node(get_irn_n(n, i))); - /*if (is_backedge(n, i)) set_backedge(nn, j);*/ - j++; + set_irn_n (nn, j, get_new_node(get_irn_n(n, i))); + /*if (is_backedge(n, i)) set_backedge(nn, j);*/ + j++; } /* If the pre walker reached this Phi after the post walker visited the block block_visited is > 0. */ @@ -344,7 +343,7 @@ copy_preds (ir_node *n, void *env) { */ static void copy_graph (int copy_node_nr) { - ir_node *oe, *ne, *ob, *nb; /* old end, new end, old bad, new bad */ + ir_node *oe, *ne, *ob, *nb, *om, *nm; /* old end, new end, old bad, new bad, old NoMem, new NoMem */ ir_node *ka; /* keep alive */ int i, irn_arity; @@ -358,9 +357,10 @@ copy_graph (int copy_node_nr) { -1, NULL); /* Copy the attributes. Well, there might be some in the future... */ - copy_attrs(oe, ne); + copy_node_attr(oe, ne); set_new_node(oe, ne); + /* copy the Bad node */ ob = get_irg_bad(current_ir_graph); nb = new_ir_node(get_irn_dbg_info(ob), current_ir_graph, @@ -371,6 +371,17 @@ copy_graph (int copy_node_nr) { NULL); set_new_node(ob, nb); + /* copy the NoMem node */ + om = get_irg_no_mem(current_ir_graph); + nm = new_ir_node(get_irn_dbg_info(om), + current_ir_graph, + NULL, + op_NoMem, + mode_M, + 0, + NULL); + set_new_node(om, nm); + /* copy the live nodes */ irg_walk(get_nodes_block(oe), copy_node, copy_preds, (void *)copy_node_nr); /* copy_preds for the end node ... */ @@ -383,7 +394,7 @@ copy_graph (int copy_node_nr) { for (i = 0; i < irn_arity; i++) { ka = get_irn_intra_n(oe, i); if ((get_irn_op(ka) == op_Block) && - (get_irn_visited(ka) < get_irg_visited(current_ir_graph))) { + (get_irn_visited(ka) < get_irg_visited(current_ir_graph))) { /* We must keep the block alive and copy everything reachable */ set_irg_visited(current_ir_graph, get_irg_visited(current_ir_graph)-1); irg_walk(ka, copy_node, copy_preds, (void *)copy_node_nr); @@ -397,16 +408,17 @@ copy_graph (int copy_node_nr) { ka = get_irn_intra_n(oe, i); if ((get_irn_op(ka) == op_Phi)) { if (get_irn_visited(ka) < get_irg_visited(current_ir_graph)) { - /* We didn't copy the Phi yet. */ - set_irg_visited(current_ir_graph, get_irg_visited(current_ir_graph)-1); - irg_walk(ka, copy_node, copy_preds, (void *)copy_node_nr); + /* We didn't copy the Phi yet. */ + set_irg_visited(current_ir_graph, get_irg_visited(current_ir_graph)-1); + irg_walk(ka, copy_node, copy_preds, (void *)copy_node_nr); } add_End_keepalive(ne, get_new_node(ka)); } } - /* start block somtimes only reached after keep alives */ + /* start block sometimes only reached after keep alives */ set_nodes_block(nb, get_new_node(get_nodes_block(ob))); + set_nodes_block(nm, get_new_node(get_nodes_block(om))); } /** @@ -427,6 +439,7 @@ copy_graph_env (int copy_node_nr) { set_irn_link(get_irg_globals (current_ir_graph), NULL); set_irn_link(get_irg_args (current_ir_graph), NULL); set_irn_link(get_irg_initial_mem(current_ir_graph), NULL); + set_irn_link(get_irg_no_mem (current_ir_graph), NULL); /* we use the block walk flag for removing Bads from Blocks ins. */ inc_irg_block_visited(current_ir_graph); @@ -471,6 +484,12 @@ copy_graph_env (int copy_node_nr) { copy_preds(get_irg_bad(current_ir_graph), NULL); } set_irg_bad(current_ir_graph, get_new_node(get_irg_bad(current_ir_graph))); + + if (get_irn_link(get_irg_no_mem(current_ir_graph)) == NULL) { + copy_node(get_irg_no_mem(current_ir_graph), (void *)copy_node_nr); + copy_preds(get_irg_no_mem(current_ir_graph), NULL); + } + set_irg_no_mem(current_ir_graph, get_new_node(get_irg_no_mem(current_ir_graph))); } /** @@ -484,7 +503,7 @@ copy_graph_env (int copy_node_nr) { void dead_node_elimination(ir_graph *irg) { ir_graph *rem; - int rem_ipview = interprocedural_view; + int rem_ipview = get_interprocedural_view(); struct obstack *graveyard_obst = NULL; struct obstack *rebirth_obst = NULL; @@ -494,7 +513,7 @@ dead_node_elimination(ir_graph *irg) { /* Remember external state of current_ir_graph. */ rem = current_ir_graph; current_ir_graph = irg; - interprocedural_view = 0; + set_interprocedural_view(false); /* Handle graph state */ assert(get_irg_phase_state(current_ir_graph) != phase_building); @@ -510,7 +529,7 @@ dead_node_elimination(ir_graph *irg) { graveyard_obst = irg->obst; /* A new obstack, where the reachable nodes will be copied to. */ - rebirth_obst = (struct obstack *) xmalloc (sizeof (struct obstack)); + rebirth_obst = xmalloc (sizeof(*rebirth_obst)); current_ir_graph->obst = rebirth_obst; obstack_init (current_ir_graph->obst); @@ -530,7 +549,7 @@ dead_node_elimination(ir_graph *irg) { stat_dead_node_elim_stop(irg); current_ir_graph = rem; - interprocedural_view = rem_ipview; + set_interprocedural_view(rem_ipview); } /** @@ -559,17 +578,25 @@ static void relink_bad_block_predecessors(ir_node *n, void *env) { /* arity changing: set new predecessors without bad nodes */ if (new_irn_arity < old_irn_arity) { - /* get new predecessor array without Block predecessor */ + /* Get new predecessor array. We do not resize the array, as we must + keep the old one to update Phis. */ new_in = NEW_ARR_D (ir_node *, current_ir_graph->obst, (new_irn_arity+1)); /* set new predeseccors in array */ new_in[0] = NULL; new_irn_n = 1; - for (i = 1; i < old_irn_arity; i++) { - irn = get_irn_n(n, i); - if (!is_Bad(irn)) new_in[new_irn_n++] = irn; + for (i = 0; i < old_irn_arity; i++) { + irn = get_irn_n(n, i); + if (!is_Bad(irn)) { + new_in[new_irn_n] = irn; + is_backedge(n, i) ? set_backedge(n, new_irn_n-1) : set_not_backedge(n, new_irn_n-1); + new_irn_n++; + } } + //ARR_SETLEN(int, n->attr.block.backedge, new_irn_arity); + ARR_SHRINKLEN(n->attr.block.backedge, new_irn_arity); n->in = new_in; + } /* ir node has bad predecessors */ } /* Block is not relinked */ @@ -604,18 +631,23 @@ static void relink_bad_predecessors(ir_node *n, void *env) { /* Relink Phi predeseccors if count of predeseccors changed */ if (old_irn_arity != ARR_LEN(get_irn_in(block))) { /* set new predeseccors in array - n->in[0] remains the same block */ + n->in[0] remains the same block */ new_irn_arity = 1; for(i = 1; i < old_irn_arity; i++) - if (!is_Bad((ir_node *)old_in[i])) n->in[new_irn_arity++] = n->in[i]; + if (!is_Bad((ir_node *)old_in[i])) { + n->in[new_irn_arity] = n->in[i]; + is_backedge(n, i) ? set_backedge(n, new_irn_arity) : set_not_backedge(n, new_irn_arity); + new_irn_arity++; + } ARR_SETLEN(ir_node *, n->in, new_irn_arity); + ARR_SETLEN(int, n->attr.phi_backedge, new_irn_arity); } } /* n is a Phi node */ } -/** +/* * Removes Bad Bad predecesors from Blocks and the corresponding * inputs to Phi nodes as in dead_node_elimination but without * copying the graph. @@ -727,7 +759,7 @@ int inline_method(ir_node *call, ir_graph *called_graph) { return 0; assert(get_method_n_params(get_entity_type(get_irg_entity(called_graph))) == - get_method_n_params(get_Call_type(call))); + get_method_n_params(get_Call_type(call))); /* * currently, we cannot inline two cases: @@ -748,6 +780,7 @@ int inline_method(ir_node *call, ir_graph *called_graph) { if (get_irg_outs_state(current_ir_graph) == outs_consistent) set_irg_outs_inconsistent(current_ir_graph); set_irg_loopinfo_inconsistent(current_ir_graph); + set_irg_callee_info_state(current_ir_graph, irg_callee_info_inconsistent); /* -- Check preconditions -- */ assert(get_irn_op(call) == op_Call); @@ -874,8 +907,8 @@ int inline_method(ir_node *call, ir_graph *called_graph) { arity = get_irn_arity(end_bl); /* arity = n_exc + n_ret */ n_res = get_method_n_ress(get_Call_type(call)); - res_pred = (ir_node **) malloc (n_res * sizeof (ir_node *)); - cf_pred = (ir_node **) malloc (arity * sizeof (ir_node *)); + res_pred = xmalloc (n_res * sizeof(*res_pred)); + cf_pred = xmalloc (arity * sizeof(*res_pred)); set_irg_current_block(current_ir_graph, post_bl); /* just to make sure */ @@ -930,9 +963,9 @@ int inline_method(ir_node *call, ir_graph *called_graph) { } } if (n_ret > 0) - phi = new_Phi(n_ret, cf_pred, get_irn_mode(cf_pred[0])); + phi = new_Phi(n_ret, cf_pred, get_irn_mode(cf_pred[0])); else - phi = new_Bad(); + phi = new_Bad(); res_pred[j] = phi; /* Conserve Phi-list for further inlinings -- but might be optimized */ if (get_nodes_block(phi) == post_bl) { @@ -1006,7 +1039,7 @@ int inline_method(ir_node *call, ir_graph *called_graph) { } main_end_bl = get_irg_end_block(current_ir_graph); main_end_bl_arity = get_irn_arity(main_end_bl); - end_preds = (ir_node **) malloc ((n_exc + main_end_bl_arity) * sizeof (ir_node *)); + end_preds = xmalloc ((n_exc + main_end_bl_arity) * sizeof(*end_preds)); for (i = 0; i < main_end_bl_arity; ++i) end_preds[i] = get_irn_n(main_end_bl, i); @@ -1050,7 +1083,7 @@ int inline_method(ir_node *call, ir_graph *called_graph) { if (i < get_Block_n_cfgpreds(end_bl)) { bl = get_nodes_block(cf_op); arity = get_Block_n_cfgpreds(end_bl) + get_Block_n_cfgpreds(bl) - 1; - cf_pred = (ir_node **) malloc (arity * sizeof (ir_node *)); + cf_pred = xmalloc (arity * sizeof(*cf_pred)); for (j = 0; j < i; j++) cf_pred[j] = get_Block_cfgpred(end_bl, j); for (j = j; j < i + get_Block_n_cfgpreds(bl); j++) @@ -1117,8 +1150,8 @@ static void collect_calls(ir_node *call, void *env) { ir_graph *called_irg = get_entity_irg(get_SymConst_entity(addr)); inline_env_t *ienv = (inline_env_t *)env; if (called_irg && ienv->pos < MAX_INLINE) { - /* The Call node calls a locally defined method. Remember to inline. */ - ienv->calls[ienv->pos++] = call; + /* The Call node calls a locally defined method. Remember to inline. */ + ienv->calls[ienv->pos++] = call; } } } @@ -1158,7 +1191,7 @@ void inline_small_irgs(ir_graph *irg, int size) { for (i = 0; i < env.pos; i++) { ir_graph *callee; callee = get_entity_irg(get_SymConst_entity(get_Call_ptr(env.calls[i]))); - if (((_obstack_memory_used(callee->obst) - obstack_room(callee->obst)) < size) || + if (((_obstack_memory_used(callee->obst) - (int)obstack_room(callee->obst)) < size) || (get_irg_inline_property(callee) == irg_inline_forced)) { inline_method(env.calls[i], callee); } @@ -1182,7 +1215,7 @@ typedef struct { } inline_irg_env; static inline_irg_env *new_inline_irg_env(void) { - inline_irg_env *env = malloc(sizeof(inline_irg_env)); + inline_irg_env *env = xmalloc(sizeof(*env)); env->n_nodes = -2; /* uncount Start, End */ env->n_nodes_orig = -2; /* uncount Start, End */ env->call_nodes = eset_create(); @@ -1249,7 +1282,7 @@ void inline_leave_functions(int maxsize, int leavesize, int size) { if (!(get_opt_optimize() && get_opt_inline())) return; - /* extend all irgs by a temporary data structure for inlineing. */ + /* extend all irgs by a temporary data structure for inlining. */ for (i = 0; i < n_irgs; ++i) set_irg_link(get_irp_irg(i), new_inline_irg_env()); @@ -1260,7 +1293,7 @@ void inline_leave_functions(int maxsize, int leavesize, int size) { free_callee_info(current_ir_graph); irg_walk(get_irg_end(current_ir_graph), NULL, collect_calls2, - get_irg_link(current_ir_graph)); + get_irg_link(current_ir_graph)); } /* -- and now inline. -- */ @@ -1277,8 +1310,10 @@ void inline_leave_functions(int maxsize, int leavesize, int size) { env = (inline_irg_env *)get_irg_link(current_ir_graph); for (call = eset_first(env->call_nodes); call; call = eset_next(env->call_nodes)) { - if (get_irn_op(call) == op_Tuple) continue; /* We already inlined. */ - ir_graph *callee = get_call_called_irg(call); + ir_graph *callee; + + if (get_irn_op(call) == op_Tuple) continue; /* We already inlined. */ + callee = get_call_called_irg(call); if (env->n_nodes > maxsize) continue; // break; @@ -1287,16 +1322,16 @@ void inline_leave_functions(int maxsize, int leavesize, int size) { phiproj_computed = 1; collect_phiprojs(current_ir_graph); } - did_inline = inline_method(call, callee); + did_inline = inline_method(call, callee); if (did_inline) { - /* Do some statistics */ - inline_irg_env *callee_env = (inline_irg_env *)get_irg_link(callee); - env->n_call_nodes --; - env->n_nodes += callee_env->n_nodes; - callee_env->n_callers--; - } - } + /* Do some statistics */ + inline_irg_env *callee_env = (inline_irg_env *)get_irg_link(callee); + env->n_call_nodes --; + env->n_nodes += callee_env->n_nodes; + callee_env->n_callers--; + } + } } } } @@ -1315,24 +1350,26 @@ void inline_leave_functions(int maxsize, int leavesize, int size) { walkset = env->call_nodes; env->call_nodes = eset_create(); for (call = eset_first(walkset); call; call = eset_next(walkset)) { + ir_graph *callee; + if (get_irn_op(call) == op_Tuple) continue; /* We already inlined. */ - ir_graph *callee = get_call_called_irg(call); + callee = get_call_called_irg(call); if (callee && - ((is_smaller(callee, size) && (env->n_nodes < maxsize)) || /* small function */ - (get_irg_inline_property(callee) == irg_inline_forced))) { + ((is_smaller(callee, size) && (env->n_nodes < maxsize)) || /* small function */ + (get_irg_inline_property(callee) == irg_inline_forced))) { if (!phiproj_computed) { phiproj_computed = 1; collect_phiprojs(current_ir_graph); } if (inline_method(call, callee)) { - inline_irg_env *callee_env = (inline_irg_env *)get_irg_link(callee); - env->n_call_nodes--; - eset_insert_all(env->call_nodes, callee_env->call_nodes); /* @@@ ??? This are the wrong nodes !? Not the copied ones. */ - env->n_call_nodes += callee_env->n_call_nodes; - env->n_nodes += callee_env->n_nodes; - callee_env->n_callers--; - } + inline_irg_env *callee_env = (inline_irg_env *)get_irg_link(callee); + env->n_call_nodes--; + eset_insert_all(env->call_nodes, callee_env->call_nodes); /* @@@ ??? This are the wrong nodes !? Not the copied ones. */ + env->n_call_nodes += callee_env->n_call_nodes; + env->n_nodes += callee_env->n_nodes; + callee_env->n_callers--; + } } else { eset_insert(env->call_nodes, call); } @@ -1345,11 +1382,11 @@ void inline_leave_functions(int maxsize, int leavesize, int size) { #if 0 env = (inline_irg_env *)get_irg_link(current_ir_graph); if ((env->n_call_nodes_orig != env->n_call_nodes) || - (env->n_callers_orig != env->n_callers)) + (env->n_callers_orig != env->n_callers)) printf("Nodes:%3d ->%3d, calls:%3d ->%3d, callers:%3d ->%3d, -- %s\n", - env->n_nodes_orig, env->n_nodes, env->n_call_nodes_orig, env->n_call_nodes, - env->n_callers_orig, env->n_callers, - get_entity_name(get_irg_entity(current_ir_graph))); + env->n_nodes_orig, env->n_nodes, env->n_call_nodes_orig, env->n_call_nodes, + env->n_callers_orig, env->n_callers, + get_entity_name(get_irg_entity(current_ir_graph))); #endif free_inline_irg_env((inline_irg_env *)get_irg_link(current_ir_graph)); } @@ -1376,7 +1413,7 @@ place_floats_early(ir_node *n, pdeq *worklist) mark_irn_visited(n); /* Place floating nodes. */ - if (get_op_pinned(get_irn_op(n)) == op_pin_state_floats) { + if (get_irn_pinned(n) == op_pin_state_floats) { int depth = 0; ir_node *b = new_Bad(); /* The block to place this node in */ int bad_recursion = is_Bad(get_nodes_block(n)); @@ -1399,7 +1436,7 @@ place_floats_early(ir_node *n, pdeq *worklist) ir_node *dep_block; if ((irn_not_visited(dep)) - && (get_op_pinned(get_irn_op(dep)) == op_pin_state_floats)) { + && (get_irn_pinned(dep) == op_pin_state_floats)) { place_floats_early(dep, worklist); } @@ -1463,6 +1500,21 @@ static INLINE void place_early(pdeq *worklist) { current_ir_graph->op_pin_state_pinned = op_pin_state_pinned; } +/** Compute the deepest common ancestor of block and dca. */ +static ir_node *calc_dca(ir_node *dca, ir_node *block) +{ + assert(block); + if (!dca) return block; + while (get_Block_dom_depth(block) > get_Block_dom_depth(dca)) + block = get_Block_idom(block); + while (get_Block_dom_depth(dca) > get_Block_dom_depth(block)) { + dca = get_Block_idom(dca); + } + while (block != dca) + { block = get_Block_idom(block); dca = get_Block_idom(dca); } + + return dca; +} /** Deepest common dominance ancestor of DCA and CONSUMER of PRODUCER. * I.e., DCA is the block where we might place PRODUCER. @@ -1484,7 +1536,9 @@ consumer_dom_dca (ir_node *dca, ir_node *consumer, ir_node *producer) for (i = 0; i < irn_arity; i++) { if (get_irn_n(consumer, i) == producer) { - block = get_nodes_block(get_Block_cfgpred(phi_block, i)); + ir_node *new_block = get_nodes_block(get_Block_cfgpred(phi_block, i)); + + block = calc_dca(block, new_block); } } } else { @@ -1493,17 +1547,7 @@ consumer_dom_dca (ir_node *dca, ir_node *consumer, ir_node *producer) } /* Compute the deepest common ancestor of block and dca. */ - assert(block); - if (!dca) return block; - while (get_Block_dom_depth(block) > get_Block_dom_depth(dca)) - block = get_Block_idom(block); - while (get_Block_dom_depth(dca) > get_Block_dom_depth(block)) { - dca = get_Block_idom(dca); - } - while (block != dca) - { block = get_Block_idom(block); dca = get_Block_idom(dca); } - - return dca; + return calc_dca(dca, block); } static INLINE int get_irn_loop_depth(ir_node *n) { @@ -1586,25 +1630,25 @@ place_floats_late(ir_node *n, pdeq *worklist) for (i = 0; i < get_irn_n_outs(n); i++) { ir_node *succ = get_irn_out(n, i); if (irn_not_visited(succ) && (get_irn_op(succ) != op_Phi)) - place_floats_late(succ, worklist); + place_floats_late(succ, worklist); } /* We have to determine the final block of this node... except for constants. */ - if ((get_op_pinned(get_irn_op(n)) == op_pin_state_floats) && - (get_irn_op(n) != op_Const) && - (get_irn_op(n) != op_SymConst)) { + if ((get_irn_pinned(n) == op_pin_state_floats) && + (get_irn_op(n) != op_Const) && + (get_irn_op(n) != op_SymConst)) { ir_node *dca = NULL; /* deepest common ancestor in the dominator tree of all nodes' blocks depending on us; our final placement has to dominate DCA. */ for (i = 0; i < get_irn_n_outs(n); i++) { - ir_node *out = get_irn_out(n, i); - /* ignore if out is in dead code */ - ir_node *outbl = get_nodes_block(out); - if (is_Bad(outbl) || get_Block_dom_depth(outbl) == -1) - continue; - dca = consumer_dom_dca (dca, out, n); + ir_node *out = get_irn_out(n, i); + /* ignore if out is in dead code */ + ir_node *outbl = get_nodes_block(out); + if (is_Bad(outbl) || get_Block_dom_depth(outbl) == -1) + continue; + dca = consumer_dom_dca (dca, out, n); } if (dca) { set_nodes_block(n, dca); @@ -1618,8 +1662,9 @@ place_floats_late(ir_node *n, pdeq *worklist) /* Add predecessors of all non-floating nodes on list. (Those of floating nodes are placeded already and therefore are marked.) */ for (i = 0; i < get_irn_n_outs(n); i++) { + ir_node *succ = get_irn_out(n, i); if (irn_not_visited(get_irn_out(n, i))) { - pdeq_putr (worklist, get_irn_out(n, i)); + pdeq_putr (worklist, succ); } } }