From: Michael Beck Date: Fri, 13 Jul 2007 16:57:59 +0000 (+0000) Subject: Replaced set_irn_n(*, -1, *) and get_irn_n(*, -1) by new get_nodes_block()/set_nodes_... X-Git-Url: http://nsz.repo.hu/git/?a=commitdiff_plain;h=b519dd6a1e6d85e843eff533be787d1f138a07ff;p=libfirm Replaced set_irn_n(*, -1, *) and get_irn_n(*, -1) by new get_nodes_block()/set_nodes_block() implementation caveats: - the old outs do not see the anchor node yet - the new edges wrongly use get_irn_n(*, -1) - the new edges do not realize that the move of a node moves all its Projs - the anchor node still must have the -1 input set (due to edges bug...) [r15135] --- diff --git a/include/libfirm/irgraph.h b/include/libfirm/irgraph.h index 0b065ffb7..ecf881802 100644 --- a/include/libfirm/irgraph.h +++ b/include/libfirm/irgraph.h @@ -513,9 +513,6 @@ static INLINE void clear_using_irn_link(ir_graph *irg) { (void) irg; } static INLINE int using_irn_link(const ir_graph *irg) { (void) irg; return 0; } #endif -/** Normalization: Move Proj nodes into the same block as its predecessors */ -void normalize_proj_nodes(ir_graph *irg); - /** set a description for local value n */ void set_irg_loc_description(ir_graph *irg, int n, void *description); diff --git a/ir/ana/execution_frequency.c b/ir/ana/execution_frequency.c index 907f9f384..d1c275106 100644 --- a/ir/ana/execution_frequency.c +++ b/ir/ana/execution_frequency.c @@ -160,7 +160,7 @@ my_irg_walk_2_both(ir_node *node, irg_walk_func *pre, irg_walk_func *post, void if (node->op == op_Proj) pred = get_irn_n(node, 0); else - pred = get_irn_n(node, -1); + pred = get_nodes_block(node); if (pred->visited < current_ir_graph->visited) my_irg_walk_2_both(pred, pre, post, env); } diff --git a/ir/ana/irextbb.c b/ir/ana/irextbb.c index de41e7cb7..83e8510da 100644 --- a/ir/ana/irextbb.c +++ b/ir/ana/irextbb.c @@ -279,7 +279,7 @@ void free_extbb(ir_graph *irg) { /* Return the extended block of a node. */ ir_extblk *get_nodes_extbb(ir_node *node) { - ir_node *block = is_Block(node) ? node : get_irn_n(node, -1); + ir_node *block = is_Block(node) ? node : get_nodes_block(node); return get_Block_extbb(block); } diff --git a/ir/ana/irouts.c b/ir/ana/irouts.c index fb5f029df..6f0028350 100644 --- a/ir/ana/irouts.c +++ b/ir/ana/irouts.c @@ -111,7 +111,7 @@ int get_Block_n_cfg_outs_ka(ir_node *bl) { if (get_irn_mode(bl->out[i]) == mode_X) { /* ignore End if we are in the Endblock */ if (get_irn_op(bl->out[i]) == op_End && - get_irn_n(bl->out[i], -1) == bl) + get_nodes_block(bl->out[i]) == bl) continue; else ++n_cfg_outs; @@ -149,13 +149,13 @@ ir_node *get_Block_cfg_out_ka(ir_node *bl, int pos) { if (get_irn_mode(bl->out[i]) == mode_X) { /* ignore End if we are in the Endblock */ if (get_irn_op(bl->out[i]) == op_End && - get_irn_n(bl->out[i], -1) == bl) + get_nodes_block(bl->out[i]) == bl) continue; if (out_pos == pos) { ir_node *cfop = bl->out[i]; /* handle keep-alive here */ if (get_irn_op(cfop) == op_End) - return get_irn_n(cfop, -1); + return get_nodes_block(cfop); return cfop->out[1]; } else ++out_pos; @@ -237,7 +237,7 @@ void irg_out_block_walk(ir_node *node, } /*--------------------------------------------------------------------*/ -/** Building and Removing the out datasturcture **/ +/** Building and Removing the out datastructure **/ /** **/ /** The outs of a graph are allocated in a single, large array. **/ /** This allows to allocate and deallocate the memory for the outs **/ @@ -258,21 +258,31 @@ void irg_out_block_walk(ir_node *node, /** Returns the amount of out edges for not yet visited successors. */ static int _count_outs(ir_node *n) { - int start, i, res, irn_arity; + int i, res, irn_arity; mark_irn_visited(n); n->out = (ir_node **) 1; /* Space for array size. */ - start = is_Block(n) ? 0 : -1; irn_arity = get_irn_arity(n); - res = irn_arity - start + 1; /* --1 or --0; 1 for array size. */ + res = irn_arity + 1; - for (i = start; i < irn_arity; ++i) { + if (is_no_Block(n)) { + ir_node *pred = get_nodes_block(n); + + /* count outs for predecessors */ + if (irn_not_visited(pred)) + res += _count_outs(pred); + + /* Count my outs */ + pred->out = (ir_node **)INT_TO_PTR(PTR_TO_INT(pred->out) + 1); + ++res; + } + for (i = 0; i < irn_arity; ++i) { /* Optimize Tuples. They annoy if walking the cfg. */ ir_node *pred = skip_Tuple(get_irn_n(n, i)); set_irn_n(n, i, pred); - /* count outs for successors */ + /* count outs for predecessors */ if (irn_not_visited(pred)) res += _count_outs(pred); @@ -318,7 +328,7 @@ static int count_outs(ir_graph *irg) { * @return The next free address */ static ir_node **_set_out_edges(ir_node *n, ir_node **free) { - int n_outs, start, i, irn_arity; + int n_outs, i, irn_arity; ir_node *pred; set_irn_visited(n, get_irg_visited(current_ir_graph)); @@ -335,10 +345,18 @@ static ir_node **_set_out_edges(ir_node *n, ir_node **free) { edge. */ n->out[0] = (ir_node *)0; - start = is_Block(n) ? 0 : -1; - irn_arity = get_irn_arity(n); + if (is_no_Block(n)) { + pred = get_nodes_block(n); + /* Recursion */ + if (get_irn_visited(pred) < get_irg_visited(current_ir_graph)) + free = _set_out_edges(pred, free); + /* Remember our back edge */ + pred->out[get_irn_n_outs(pred)+1] = n; + pred->out[0] = INT_TO_PTR(get_irn_n_outs(pred) + 1); + } - for (i = start; i < irn_arity; ++i) { + irn_arity = get_irn_arity(n); + for (i = 0; i < irn_arity; ++i) { pred = get_irn_n(n, i); /* Recursion */ if (get_irn_visited(pred) < get_irg_visited(current_ir_graph)) @@ -494,16 +512,20 @@ static void init_count(ir_node * node, void *env) { * which is saved in "env" */ static void node_arity_count(ir_node * node, void * env) { - int *anz = (int *) env, arity, n_outs, i, start; + int *anz = (int *) env, arity, n_outs, i; ir_node *succ; arity = get_irn_arity(node); - start = (is_Block(node)) ? 0 : -1; + n_outs = 1 + arity; - n_outs = 1 + arity + (-start); // ((is_Block(node)) ? 0 : 1); // Why + 1?? - *anz += n_outs; + if (is_no_Block(node)) { + succ = get_nodes_block(node); + succ->out = (ir_node **)INT_TO_PTR(PTR_TO_INT(succ->out) + 1); - for(i = start; i < arity; i++) { + ++n_outs; + } + *anz += n_outs; + for (i = 0; i < arity; i++) { succ = get_irn_n(node, i); succ->out = (ir_node **)INT_TO_PTR(PTR_TO_INT(succ->out) + 1); } @@ -553,10 +575,15 @@ static void set_array_pointer(ir_node *node, void *env) { static void set_out_pointer(ir_node * node, void *env) { int i, arity = get_irn_arity(node); ir_node *succ; - int start = (!is_Block(node)) ? -1 : 0; (void) env; - for (i = start; i < arity; ++i) { + if (is_no_Block(node)) { + succ = get_nodes_block(node); + succ->out[get_irn_n_outs(succ)+1] = node; + succ->out[0] = INT_TO_PTR(get_irn_n_outs(succ) + 1); + + } + for (i = 0; i < arity; ++i) { succ = get_irn_n(node, i); succ->out[get_irn_n_outs(succ)+1] = node; succ->out[0] = INT_TO_PTR(get_irn_n_outs(succ) + 1); diff --git a/ir/be/beabi.c b/ir/be/beabi.c index 068191bd2..a3a4746a2 100644 --- a/ir/be/beabi.c +++ b/ir/be/beabi.c @@ -1971,7 +1971,7 @@ static void modify_irg(be_abi_irg_t *env) which may be wrong. Add Conv's then. */ mode = get_irn_mode(args[i]); if (mode != get_irn_mode(repl)) { - repl = new_r_Conv(irg, get_irn_n(repl, -1), repl, mode); + repl = new_r_Conv(irg, get_nodes_block(repl), repl, mode); } exchange(args[i], repl); } diff --git a/ir/be/bemain.c b/ir/be/bemain.c index 6dac697b7..b6d5f708c 100644 --- a/ir/be/bemain.c +++ b/ir/be/bemain.c @@ -310,9 +310,6 @@ static void initialize_birg(be_irg_t *birg, ir_graph *irg, be_main_env_t *env) /* set the current graph (this is important for several firm functions) */ current_ir_graph = irg; - /* Normalize proj nodes. */ - normalize_proj_nodes(irg); - /* we do this before critical edge split. As this produces less returns, because sometimes (= 164.gzip) multiple returns are slower */ normalize_n_returns(irg); diff --git a/ir/be/bespillremat.c b/ir/be/bespillremat.c index 9a128961f..b0e1e3696 100644 --- a/ir/be/bespillremat.c +++ b/ir/be/bespillremat.c @@ -3466,6 +3466,7 @@ delete_remat(spill_ilp_t * si, ir_node * remat) { } } +/* FIXME: is this still correct:? Proj's are neither scheduled anymore nor they have a block ... */ static void clean_remat_info(spill_ilp_t * si) { diff --git a/ir/ir/irarch.c b/ir/ir/irarch.c index 041286077..f544bc03a 100644 --- a/ir/ir/irarch.c +++ b/ir/ir/irarch.c @@ -124,7 +124,7 @@ ir_node *arch_dep_replace_mul_with_shifts(ir_node *irn) { return irn; if (get_irn_op(irn) == op_Mul && mode_is_int(mode)) { - ir_node *block = get_irn_n(irn, -1); + ir_node *block = get_nodes_block(irn); ir_node *left = get_binop_left(irn); ir_node *right = get_binop_right(irn); tarval *tv = NULL; @@ -563,7 +563,7 @@ static struct mu magicu(tarval *d) { static ir_node *replace_div_by_mulh(ir_node *div, tarval *tv) { dbg_info *dbg = get_irn_dbg_info(div); ir_node *n = get_binop_left(div); - ir_node *block = get_irn_n(div, -1); + ir_node *block = get_nodes_block(div); ir_mode *mode = get_irn_mode(n); int bits = get_mode_size_bits(mode); ir_node *q, *t, *c; @@ -657,7 +657,7 @@ ir_node *arch_dep_replace_div_by_const(ir_node *irn) { left = get_Div_left(irn); mode = get_irn_mode(left); - block = get_irn_n(irn, -1); + block = get_nodes_block(irn); dbg = get_irn_dbg_info(irn); bits = get_mode_size_bits(mode); @@ -748,7 +748,7 @@ ir_node *arch_dep_replace_mod_by_const(ir_node *irn) { left = get_Mod_left(irn); mode = get_irn_mode(left); - block = get_irn_n(irn, -1); + block = get_nodes_block(irn); dbg = get_irn_dbg_info(irn); bits = get_mode_size_bits(mode); n = (bits + 7) / 8; @@ -842,7 +842,7 @@ void arch_dep_replace_divmod_by_const(ir_node **div, ir_node **mod, ir_node *irn left = get_DivMod_left(irn); mode = get_irn_mode(left); - block = get_irn_n(irn, -1); + block = get_nodes_block(irn); dbg = get_irn_dbg_info(irn); bits = get_mode_size_bits(mode); diff --git a/ir/ir/irdump.c b/ir/ir/irdump.c index 4dd1df520..b1344e2b6 100644 --- a/ir/ir/irdump.c +++ b/ir/ir/irdump.c @@ -537,22 +537,7 @@ static int node_floats(ir_node *n) { */ static void ird_walk_graph(ir_graph *irg, irg_walk_func *pre, irg_walk_func *post, void *env) { if (dump_anchors) { - int i; - - if (pre) - pre(irg->anchor, env); - - for (i = get_irg_n_anchors(irg) - 1; i >= 0; --i) { - ir_node *n = get_irg_anchor(irg, i); - - if (n) { - /* reset the visit flag: will be increase in the walker */ - set_irg_visited(irg, get_irg_visited(irg) - 1); - irg_walk(n, pre, post, env); - } - } - if (post) - post(irg->anchor, env); + irg_walk_anchors(irg, pre, post, env); } else { irg_walk_graph(irg, pre, post, env); } @@ -1415,7 +1400,7 @@ print_data_edge_vcgattr(FILE *F, ir_node *from, int to) { * do not use get_nodes_block() here, will fail * if the irg is not pinned. */ - if (get_irn_n(from, -1) == get_irn_n(get_irn_n(from, to), -1)) + if (get_nodes_block(from) == get_nodes_block(get_irn_n(from, to))) fprintf(F, INTRA_DATA_EDGE_ATTR); else fprintf(F, INTER_DATA_EDGE_ATTR); @@ -1427,7 +1412,7 @@ print_mem_edge_vcgattr(FILE *F, ir_node *from, int to) { * do not use get_nodes_block() here, will fail * if the irg is not pinned. */ - if (get_irn_n(from, -1) == get_irn_n(get_irn_n(from, to), -1)) + if (get_nodes_block(from) == get_nodes_block(get_irn_n(from, to))) fprintf(F, INTRA_MEM_EDGE_ATTR); else fprintf(F, INTER_MEM_EDGE_ATTR); @@ -1457,6 +1442,9 @@ static void print_edge_vcgattr(FILE *F, ir_node *from, int to) { fprintf(F, KEEP_ALIVE_DF_EDGE_ATTR); } break; + case iro_Anchor: + fprintf(F, ANCHOR_EDGE_ATTR); + break; default: if (is_Proj(from)) { if (get_irn_mode(from) == mode_M) diff --git a/ir/ir/irdumptxt.c b/ir/ir/irdumptxt.c index f536b05d0..f516d2933 100644 --- a/ir/ir/irdumptxt.c +++ b/ir/ir/irdumptxt.c @@ -121,8 +121,8 @@ int dump_irnode_to_file(FILE *F, ir_node *n) { if (get_irn_pinned(n) == op_pin_state_floats && get_irg_pinned(get_irn_irg(n)) == op_pin_state_floats) { fprintf(F, " node was pinned in "); - dump_node_opcode(F, get_irn_n(n, -1)); - fprintf(F, " %ld\n", get_irn_node_nr(get_irn_n(n, -1))); + dump_node_opcode(F, get_nodes_block(n)); + fprintf(F, " %ld\n", get_irn_node_nr(get_nodes_block(n))); } fprintf(F, " arity: %d\n", get_irn_intra_arity(n)); @@ -130,8 +130,8 @@ int dump_irnode_to_file(FILE *F, ir_node *n) { fprintf(F, " pred nodes: \n"); if (!is_Block(n)) { fprintf(F, " -1: "); - dump_node_opcode(F, get_irn_n(n, -1)); - fprintf(F, " %ld\n", get_irn_node_nr(get_irn_n(n, -1))); + dump_node_opcode(F, get_nodes_block(n)); + fprintf(F, " %ld\n", get_irn_node_nr(get_nodes_block(n))); } for ( i = 0; i < get_irn_intra_arity(n); ++i) { fprintf(F, " %d: %s ", i, is_intra_backedge(n, i) ? "be" : " "); diff --git a/ir/ir/iredges.c b/ir/ir/iredges.c index 84a442ee2..af8b08d1b 100644 --- a/ir/ir/iredges.c +++ b/ir/ir/iredges.c @@ -397,12 +397,11 @@ void edges_notify_edge(ir_node *src, int pos, ir_node *tgt, ir_node *old_tgt, ir } if (edges_activated_kind(irg, EDGE_KIND_BLOCK) && is_Block(src)) { - /* do not use get_nodes_block() here, it fails when running unpinned */ - ir_node *bl_old = old_tgt ? get_irn_n(skip_Proj(old_tgt), -1) : NULL; + ir_node *bl_old = old_tgt ? get_nodes_block(skip_Proj(old_tgt)) : NULL; ir_node *bl_tgt = NULL; if (tgt) - bl_tgt = is_Bad(tgt) ? tgt : get_irn_n(skip_Proj(tgt), -1); + bl_tgt = is_Bad(tgt) ? tgt : get_nodes_block(skip_Proj(tgt)); edges_notify_edge_kind(src, pos, bl_tgt, bl_old, EDGE_KIND_BLOCK, irg); } diff --git a/ir/ir/irgmod.c b/ir/ir/irgmod.c index 0a0656280..88c549750 100644 --- a/ir/ir/irgmod.c +++ b/ir/ir/irgmod.c @@ -52,14 +52,13 @@ void turn_into_tuple(ir_node *node, int arity) { if (get_irn_arity(node) == arity) { /* keep old array */ } else { - /* don't use get_nodes_block here, we allow turn_into_tuple for unpinned nodes */ - ir_node *block = get_irn_n(node, -1); + ir_node *block = get_nodes_block(node); /* Allocate new array, don't free old in_array, it's on the obstack. */ edges_node_deleted(node, current_ir_graph); node->in = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity+1); /* clear the new in array, else edge_notify tries to delete garbage */ memset(node->in, 0, (arity+1) * sizeof(node->in[0])); - set_irn_n(node, -1, block); + set_nodes_block(node, block); } } diff --git a/ir/ir/irgopt.c b/ir/ir/irgopt.c index a7420fd2f..4a6790f6c 100644 --- a/ir/ir/irgopt.c +++ b/ir/ir/irgopt.c @@ -405,7 +405,7 @@ static void copy_preds(ir_node *n, void *env) { /* Don't copy node if corresponding predecessor in block is Bad. The Block itself should not be Bad. */ block = get_nodes_block(n); - set_irn_n(nn, -1, get_new_node(block)); + set_nodes_block(nn, get_new_node(block)); j = 0; irn_arity = get_irn_arity(n); for (i = 0; i < irn_arity; i++) { @@ -1757,7 +1757,7 @@ place_floats_early(ir_node *n, waitq *worklist) { /* Place floating nodes. */ if (get_irn_pinned(n) == op_pin_state_floats) { - ir_node *curr_block = get_irn_n(n, -1); + ir_node *curr_block = get_nodes_block(n); int in_dead_block = is_Block_unreachable(curr_block); int depth = 0; ir_node *b = NULL; /* The block to place this node in */ @@ -1789,7 +1789,7 @@ place_floats_early(ir_node *n, waitq *worklist) { */ if (! in_dead_block) { if (get_irn_pinned(pred) == op_pin_state_floats && - is_Block_unreachable(get_irn_n(pred, -1))) + is_Block_unreachable(get_nodes_block(pred))) set_nodes_block(pred, curr_block); } place_floats_early(pred, worklist); @@ -1805,14 +1805,14 @@ place_floats_early(ir_node *n, waitq *worklist) { /* Because all loops contain at least one op_pin_state_pinned node, now all our inputs are either op_pin_state_pinned or place_early() has already been finished on them. We do not have any unfinished inputs! */ - pred_block = get_irn_n(pred, -1); + pred_block = get_nodes_block(pred); if ((!is_Block_dead(pred_block)) && (get_Block_dom_depth(pred_block) > depth)) { b = pred_block; depth = get_Block_dom_depth(pred_block); } /* Avoid that the node is placed in the Start block */ - if ((depth == 1) && (get_Block_dom_depth(get_irn_n(n, -1)) > 1) + if ((depth == 1) && (get_Block_dom_depth(get_nodes_block(n)) > 1) && get_irg_phase_state(current_ir_graph) != phase_backend) { b = get_Block_cfg_out(get_irg_start_block(current_ir_graph), 0); assert(b != get_irg_start_block(current_ir_graph)); @@ -1851,14 +1851,14 @@ place_floats_early(ir_node *n, waitq *worklist) { } } else if (is_Phi(n)) { ir_node *pred; - ir_node *curr_block = get_irn_n(n, -1); + ir_node *curr_block = get_nodes_block(n); int in_dead_block = is_Block_unreachable(curr_block); /* * Phi nodes: move nodes from dead blocks into the effective use * of the Phi-input if the Phi is not in a bad block. */ - pred = get_irn_n(n, -1); + pred = get_nodes_block(n); if (irn_not_visited(pred)) waitq_put(worklist, pred); @@ -1868,7 +1868,7 @@ place_floats_early(ir_node *n, waitq *worklist) { if (irn_not_visited(pred)) { if (! in_dead_block && get_irn_pinned(pred) == op_pin_state_floats && - is_Block_unreachable(get_irn_n(pred, -1))) { + is_Block_unreachable(get_nodes_block(pred))) { set_nodes_block(pred, get_Block_cfgpred_block(curr_block, i)); } waitq_put(worklist, pred); @@ -1876,13 +1876,13 @@ place_floats_early(ir_node *n, waitq *worklist) { } } else { ir_node *pred; - ir_node *curr_block = get_irn_n(n, -1); + ir_node *curr_block = get_nodes_block(n); int in_dead_block = is_Block_unreachable(curr_block); /* * All other nodes: move nodes from dead blocks into the same block. */ - pred = get_irn_n(n, -1); + pred = get_nodes_block(n); if (irn_not_visited(pred)) waitq_put(worklist, pred); @@ -1892,7 +1892,7 @@ place_floats_early(ir_node *n, waitq *worklist) { if (irn_not_visited(pred)) { if (! in_dead_block && get_irn_pinned(pred) == op_pin_state_floats && - is_Block_unreachable(get_irn_n(pred, -1))) { + is_Block_unreachable(get_nodes_block(pred))) { set_nodes_block(pred, curr_block); } waitq_put(worklist, pred); @@ -1982,7 +1982,7 @@ consumer_dom_dca(ir_node *dca, ir_node *consumer, ir_node *producer) { } if (! block) - block = get_irn_n(producer, -1); + block = get_nodes_block(producer); } else { assert(is_no_Block(consumer)); block = get_nodes_block(consumer); @@ -2056,7 +2056,7 @@ static ir_node *get_deepest_common_ancestor(ir_node *node, ir_node *dca) dca = get_deepest_common_ancestor(succ, dca); } else { /* ignore if succ is in dead code */ - succ_blk = get_irn_n(succ, -1); + succ_blk = get_nodes_block(succ); if (is_Block_unreachable(succ_blk)) continue; dca = consumer_dom_dca(dca, succ, node); @@ -2110,7 +2110,7 @@ static void place_floats_late(ir_node *n, pdeq *worklist) { (get_irn_mode(n) != mode_X)) { /* Remember the early_blk placement of this block to move it out of loop no further than the early_blk placement. */ - early_blk = get_irn_n(n, -1); + early_blk = get_nodes_block(n); /* * BEWARE: Here we also get code, that is live, but diff --git a/ir/ir/irgraph.c b/ir/ir/irgraph.c index 0686a1cd3..7d266e4f1 100644 --- a/ir/ir/irgraph.c +++ b/ir/ir/irgraph.c @@ -842,29 +842,6 @@ void set_irg_fp_model(ir_graph *irg, unsigned model) { irg->fp_model = model; } -/** - * walker Start->End: places Proj nodes into the same block - * as it's predecessors - * - * @param n the node - * @param env ignored - */ -static void normalize_proj_walker(ir_node *n, void *env) { - (void) env; - if (is_Proj(n)) { - ir_node *pred = get_Proj_pred(n); - ir_node *block = get_nodes_block(pred); - - set_nodes_block(n, block); - } -} - -/* move Proj nodes into the same block as its predecessors */ -void normalize_proj_nodes(ir_graph *irg) { - irg_walk_graph(irg, NULL, normalize_proj_walker, NULL); - set_irg_outs_inconsistent(irg); -} - /* set a description for local value n */ void set_irg_loc_description(ir_graph *irg, int n, void *description) { assert(0 <= n && n < irg->n_loc); diff --git a/ir/ir/irgraph_t.h b/ir/ir/irgraph_t.h index 9bea0eea6..5a4bec4b3 100644 --- a/ir/ir/irgraph_t.h +++ b/ir/ir/irgraph_t.h @@ -122,6 +122,7 @@ _get_irg_end_block(const ir_graph *irg) { static INLINE void _set_irg_end_block(ir_graph *irg, ir_node *node) { + /* FIXME: if this line is killed the whole graph collapse, why */ set_irn_n(irg->anchor, -1, node); set_irn_n(irg->anchor, anchor_end_block, node); } diff --git a/ir/ir/irgwalk.c b/ir/ir/irgwalk.c index 3fc2fa993..af8f22345 100644 --- a/ir/ir/irgwalk.c +++ b/ir/ir/irgwalk.c @@ -153,7 +153,7 @@ irg_walk_2_pre(ir_node *node, irg_walk_func *pre, void * env) { pre(node, env); if (node->op != op_Block) { - ir_node *pred = get_irn_n(node, -1); + ir_node *pred = get_nodes_block(node); if (pred->visited < irg->visited) cnt += irg_walk_2_pre(pred, pre, env); } @@ -179,7 +179,7 @@ irg_walk_2_post(ir_node *node, irg_walk_func *post, void * env) { set_irn_visited(node, irg->visited); if (node->op != op_Block) { - ir_node *pred = get_irn_n(node, -1); + ir_node *pred = get_nodes_block(node); if (pred->visited < irg->visited) cnt += irg_walk_2_post(pred, post, env); } @@ -210,7 +210,7 @@ irg_walk_2_both(ir_node *node, irg_walk_func *pre, irg_walk_func *post, void * e pre(node, env); if (node->op != op_Block) { - ir_node *pred = get_irn_n(node, -1); + ir_node *pred = get_nodes_block(node); if (pred->visited < irg->visited) cnt += irg_walk_2_both(pred, pre, post, env); } @@ -323,7 +323,7 @@ irg_walk_in_or_dep_2_pre(ir_node *node, irg_walk_func *pre, void *env) { pre(node, env); if (node->op != op_Block) { - ir_node *pred = get_irn_n(node, -1); + ir_node *pred = get_nodes_block(node); if (pred->visited < irg->visited) cnt += irg_walk_in_or_dep_2_pre(pred, pre, env); } @@ -349,7 +349,7 @@ irg_walk_in_or_dep_2_post(ir_node *node, irg_walk_func *post, void *env) { set_irn_visited(node, irg->visited); if (node->op != op_Block) { - ir_node *pred = get_irn_n(node, -1); + ir_node *pred = get_nodes_block(node); if (pred->visited < irg->visited) cnt += irg_walk_in_or_dep_2_post(pred, post, env); } @@ -380,7 +380,7 @@ irg_walk_in_or_dep_2_both(ir_node *node, irg_walk_func *pre, irg_walk_func *post pre(node, env); if (node->op != op_Block) { - ir_node *pred = get_irn_n(node, -1); + ir_node *pred = get_nodes_block(node); if (pred->visited < irg->visited) cnt += irg_walk_in_or_dep_2_both(pred, pre, post, env); } diff --git a/ir/ir/irnode.c b/ir/ir/irnode.c index 5348ef7a8..f4d51d82f 100644 --- a/ir/ir/irnode.c +++ b/ir/ir/irnode.c @@ -2388,9 +2388,9 @@ get_irn_irg(const ir_node *node) { * irg. */ if (! is_Block(node)) - node = get_irn_n(node, -1); + node = get_nodes_block(node); if (is_Bad(node)) /* sometimes bad is predecessor of nodes instead of block: in case of optimization */ - node = get_irn_n(node, -1); + node = get_nodes_block(node); assert(get_irn_op(node) == op_Block); return node->attr.block.irg; } @@ -2985,7 +2985,7 @@ void dump_irn(ir_node *n) { int i, arity = get_irn_arity(n); printf("%s%s: %ld (%p)\n", get_irn_opname(n), get_mode_name(get_irn_mode(n)), get_irn_node_nr(n), (void *)n); if (!is_Block(n)) { - ir_node *pred = get_irn_n(n, -1); + ir_node *pred = get_nodes_block(n); printf(" block: %s%s: %ld (%p)\n", get_irn_opname(pred), get_mode_name(get_irn_mode(pred)), get_irn_node_nr(pred), (void *)pred); } diff --git a/ir/ir/iropt.c b/ir/ir/iropt.c index c84d81848..56f82c7c8 100644 --- a/ir/ir/iropt.c +++ b/ir/ir/iropt.c @@ -1031,7 +1031,7 @@ static ir_node *equivalent_node_Div(ir_node *n) { if (classify_tarval(value_of(b)) == TV_CLASSIFY_ONE) { /* div(x, 1) == x */ /* Turn Div into a tuple (mem, bad, a) */ ir_node *mem = get_Div_mem(n); - ir_node *blk = get_irn_n(n, -1); + ir_node *blk = get_nodes_block(n); turn_into_tuple(n, pn_Div_max); set_Tuple_pred(n, pn_Div_M, mem); set_Tuple_pred(n, pn_Div_X_regular, new_r_Jmp(current_ir_graph, blk)); @@ -1052,7 +1052,7 @@ static ir_node *equivalent_node_Quot(ir_node *n) { if (classify_tarval(value_of(b)) == TV_CLASSIFY_ONE) { /* Quot(x, 1) == x */ /* Turn Quot into a tuple (mem, jmp, bad, a) */ ir_node *mem = get_Quot_mem(n); - ir_node *blk = get_irn_n(n, -1); + ir_node *blk = get_nodes_block(n); turn_into_tuple(n, pn_Quot_max); set_Tuple_pred(n, pn_Quot_M, mem); set_Tuple_pred(n, pn_Quot_X_regular, new_r_Jmp(current_ir_graph, blk)); @@ -1073,7 +1073,7 @@ static ir_node *equivalent_node_DivMod(ir_node *n) { /* Turn DivMod into a tuple (mem, jmp, bad, a, 0) */ ir_node *a = get_DivMod_left(n); ir_node *mem = get_Div_mem(n); - ir_node *blk = get_irn_n(n, -1); + ir_node *blk = get_nodes_block(n); ir_mode *mode = get_DivMod_resmode(n); turn_into_tuple(n, pn_DivMod_max); @@ -1333,7 +1333,7 @@ static ir_node *equivalent_node_Proj(ir_node *proj) { if (op == op_Load) { /* get the Load address */ ir_node *addr = get_Load_ptr(a); - ir_node *blk = get_irn_n(a, -1); + ir_node *blk = get_nodes_block(a); ir_node *confirm; if (value_not_null(addr, &confirm)) { @@ -1350,7 +1350,7 @@ static ir_node *equivalent_node_Proj(ir_node *proj) { } else if (op == op_Store) { /* get the load/store address */ ir_node *addr = get_Store_ptr(a); - ir_node *blk = get_irn_n(a, -1); + ir_node *blk = get_nodes_block(a); ir_node *confirm; if (value_not_null(addr, &confirm)) { @@ -1901,7 +1901,7 @@ static ir_node *transform_node_Add(ir_node *n) { if (mode_is_num(mode)) { if (a == b) { - ir_node *block = get_irn_n(n, -1); + ir_node *block = get_nodes_block(n); n = new_rd_Mul( get_irn_dbg_info(n), @@ -1915,7 +1915,7 @@ static ir_node *transform_node_Add(ir_node *n) { n = new_rd_Sub( get_irn_dbg_info(n), current_ir_graph, - get_irn_n(n, -1), + get_nodes_block(n), b, get_Minus_op(a), mode); @@ -1924,7 +1924,7 @@ static ir_node *transform_node_Add(ir_node *n) { n = new_rd_Sub( get_irn_dbg_info(n), current_ir_graph, - get_irn_n(n, -1), + get_nodes_block(n), a, get_Minus_op(b), mode); @@ -1936,7 +1936,7 @@ static ir_node *transform_node_Add(ir_node *n) { ir_node *mb = get_Mul_right(a); if (b == ma) { - ir_node *blk = get_irn_n(n, -1); + ir_node *blk = get_nodes_block(n); n = new_rd_Mul( get_irn_dbg_info(n), current_ir_graph, blk, ma, @@ -1948,7 +1948,7 @@ static ir_node *transform_node_Add(ir_node *n) { mode); DBG_OPT_ALGSIM0(oldn, n, FS_OPT_ADD_MUL_A_X_A); } else if (b == mb) { - ir_node *blk = get_irn_n(n, -1); + ir_node *blk = get_nodes_block(n); n = new_rd_Mul( get_irn_dbg_info(n), current_ir_graph, blk, mb, @@ -1967,7 +1967,7 @@ static ir_node *transform_node_Add(ir_node *n) { ir_node *mb = get_Mul_right(b); if (a == ma) { - ir_node *blk = get_irn_n(n, -1); + ir_node *blk = get_nodes_block(n); n = new_rd_Mul( get_irn_dbg_info(n), current_ir_graph, blk, ma, @@ -1979,7 +1979,7 @@ static ir_node *transform_node_Add(ir_node *n) { mode); DBG_OPT_ALGSIM0(oldn, n, FS_OPT_ADD_MUL_A_X_A); } else if (a == mb) { - ir_node *blk = get_irn_n(n, -1); + ir_node *blk = get_nodes_block(n); n = new_rd_Mul( get_irn_dbg_info(n), current_ir_graph, blk, mb, @@ -1996,7 +1996,7 @@ static ir_node *transform_node_Add(ir_node *n) { else if (is_Not(a) && classify_Const(b) == CNST_ONE) { /* ~x + 1 = -x */ ir_node *op = get_Not_op(a); - ir_node *blk = get_irn_n(n, -1); + ir_node *blk = get_nodes_block(n); n = new_rd_Minus(get_irn_dbg_info(n), current_ir_graph, blk, op, mode); DBG_OPT_ALGSIM0(oldn, n, FS_OPT_NOT_PLUS_1); } @@ -2039,14 +2039,14 @@ restart: if (left == b) { if (mode != get_irn_mode(right)) { /* This Sub is an effective Cast */ - right = new_r_Conv(get_irn_irg(n), get_irn_n(n, -1), right, mode); + right = new_r_Conv(get_irn_irg(n), get_nodes_block(n), right, mode); } n = right; DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_ADD_SUB); } else if (right == b) { if (mode != get_irn_mode(left)) { /* This Sub is an effective Cast */ - left = new_r_Conv(get_irn_irg(n), get_irn_n(n, -1), left, mode); + left = new_r_Conv(get_irn_irg(n), get_nodes_block(n), left, mode); } n = left; DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_ADD_SUB); @@ -2079,7 +2079,7 @@ restart: n = new_rd_Minus( get_irn_dbg_info(n), current_ir_graph, - get_irn_n(n, -1), + get_nodes_block(n), b, mode); DBG_OPT_ALGSIM0(oldn, n, FS_OPT_SUB_0_A); @@ -2090,7 +2090,7 @@ restart: ir_node *mb = get_Mul_right(a); if (ma == b) { - ir_node *blk = get_irn_n(n, -1); + ir_node *blk = get_nodes_block(n); n = new_rd_Mul( get_irn_dbg_info(n), current_ir_graph, blk, @@ -2104,7 +2104,7 @@ restart: mode); DBG_OPT_ALGSIM0(oldn, n, FS_OPT_SUB_MUL_A_X_A); } else if (mb == b) { - ir_node *blk = get_irn_n(n, -1); + ir_node *blk = get_nodes_block(n); n = new_rd_Mul( get_irn_dbg_info(n), current_ir_graph, blk, @@ -2121,7 +2121,7 @@ restart: } else if (get_irn_op(a) == op_Sub) { ir_node *x = get_Sub_left(a); ir_node *y = get_Sub_right(a); - ir_node *blk = get_irn_n(n, -1); + ir_node *blk = get_nodes_block(n); ir_mode *m_b = get_irn_mode(b); ir_mode *m_y = get_irn_mode(y); ir_node *add; @@ -2174,7 +2174,7 @@ static ir_node *transform_node_Mul(ir_node *n) { else if (value_of(b) == get_mode_minus_one(mode)) r = a; if (r) { - n = new_rd_Minus(get_irn_dbg_info(n), current_ir_graph, get_irn_n(n, -1), r, mode); + n = new_rd_Minus(get_irn_dbg_info(n), current_ir_graph, get_nodes_block(n), r, mode); DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_MUL_MINUS_1); return n; } @@ -2201,7 +2201,7 @@ static ir_node *transform_node_Div(ir_node *n) { if (value != n) { /* Turn Div into a tuple (mem, jmp, bad, value) */ ir_node *mem = get_Div_mem(n); - ir_node *blk = get_irn_n(n, -1); + ir_node *blk = get_nodes_block(n); turn_into_tuple(n, pn_Div_max); set_Tuple_pred(n, pn_Div_M, mem); @@ -2231,7 +2231,7 @@ static ir_node *transform_node_Mod(ir_node *n) { if (value != n) { /* Turn Mod into a tuple (mem, jmp, bad, value) */ ir_node *mem = get_Mod_mem(n); - ir_node *blk = get_irn_n(n, -1); + ir_node *blk = get_nodes_block(n); turn_into_tuple(n, pn_Mod_max); set_Tuple_pred(n, pn_Mod_M, mem); @@ -2290,7 +2290,7 @@ static ir_node *transform_node_DivMod(ir_node *n) { if (evaluated) { /* replace by tuple */ ir_node *mem = get_DivMod_mem(n); - ir_node *blk = get_irn_n(n, -1); + ir_node *blk = get_nodes_block(n); turn_into_tuple(n, pn_DivMod_max); set_Tuple_pred(n, pn_DivMod_M, mem); set_Tuple_pred(n, pn_DivMod_X_regular, new_r_Jmp(current_ir_graph, blk)); @@ -2322,7 +2322,7 @@ static ir_node *transform_node_Abs(ir_node *n) { * not run it in the equivalent_node() context. */ n = new_rd_Minus(get_irn_dbg_info(n), current_ir_graph, - get_irn_n(n, -1), a, mode); + get_nodes_block(n), a, mode); DBG_OPT_CONFIRM(oldn, n); } else if (sign == value_classified_positive) { @@ -2395,7 +2395,7 @@ static ir_node *transform_bitwise_distributive(ir_node *n, ir_mode *a_mode = get_irn_mode(a_op); ir_mode *b_mode = get_irn_mode(b_op); if(a_mode == b_mode && (mode_is_int(a_mode) || a_mode == mode_b)) { - ir_node *blk = get_irn_n(n, -1); + ir_node *blk = get_nodes_block(n); n = exact_copy(n); set_binop_left(n, a_op); @@ -2446,7 +2446,7 @@ static ir_node *transform_bitwise_distributive(ir_node *n, if (c != NULL) { /* (a sop c) & (b sop c) => (a & b) sop c */ - ir_node *blk = get_irn_n(n, -1); + ir_node *blk = get_nodes_block(n); ir_node *new_n = exact_copy(n); set_binop_left(new_n, op1); @@ -2462,7 +2462,7 @@ static ir_node *transform_bitwise_distributive(ir_node *n, n = new_rd_And(dbgi, irg, blk, new_n, c, mode); } else { n = exact_copy(a); - set_irn_n(n, -1, blk); + set_nodes_block(n, blk); set_binop_left(n, new_n); set_binop_right(n, c); } @@ -2504,7 +2504,7 @@ static ir_node *transform_node_Eor(ir_node *n) { if (a == b) { /* a ^ a = 0 */ - n = new_rd_Const(get_irn_dbg_info(n), current_ir_graph, get_irn_n(n, -1), + n = new_rd_Const(get_irn_dbg_info(n), current_ir_graph, get_nodes_block(n), mode, get_mode_null(mode)); DBG_OPT_ALGSIM0(oldn, n, FS_OPT_EOR_A_A); } else if ((mode == mode_b) @@ -2513,7 +2513,7 @@ static ir_node *transform_node_Eor(ir_node *n) { && (classify_tarval (value_of(b)) == TV_CLASSIFY_ONE) && (get_irn_op(get_Proj_pred(a)) == op_Cmp)) { /* The Eor negates a Cmp. The Cmp has the negated result anyways! */ - n = new_r_Proj(current_ir_graph, get_irn_n(n, -1), get_Proj_pred(a), + n = new_r_Proj(current_ir_graph, get_nodes_block(n), get_Proj_pred(a), mode_b, get_negated_pnc(get_Proj_proj(a), mode)); DBG_OPT_ALGSIM0(oldn, n, FS_OPT_EOR_TO_NOT_BOOL); @@ -2521,7 +2521,7 @@ static ir_node *transform_node_Eor(ir_node *n) { && (classify_tarval (value_of(b)) == TV_CLASSIFY_ONE)) { /* The Eor is a Not. Replace it by a Not. */ /* ????!!!Extend to bitfield 1111111. */ - n = new_r_Not(current_ir_graph, get_irn_n(n, -1), a, mode_b); + n = new_r_Not(current_ir_graph, get_nodes_block(n), a, mode_b); DBG_OPT_ALGSIM0(oldn, n, FS_OPT_EOR_TO_NOT); } else { @@ -2547,7 +2547,7 @@ static ir_node *transform_node_Not(ir_node *n) { && (get_irn_mode(a) == mode_b) && (get_irn_op(get_Proj_pred(a)) == op_Cmp)) { /* We negate a Cmp. The Cmp has the negated result anyways! */ - n = new_r_Proj(current_ir_graph, get_irn_n(n, -1), get_Proj_pred(a), + n = new_r_Proj(current_ir_graph, get_nodes_block(n), get_Proj_pred(a), mode_b, get_negated_pnc(get_Proj_proj(a), mode_b)); DBG_OPT_ALGSIM0(oldn, n, FS_OPT_NOT_CMP); return n; @@ -2555,7 +2555,7 @@ static ir_node *transform_node_Not(ir_node *n) { if (op_a == op_Sub && classify_Const(get_Sub_right(a)) == CNST_ONE) { /* ~(x-1) = -x */ ir_node *op = get_Sub_left(a); - ir_node *blk = get_irn_n(n, -1); + ir_node *blk = get_nodes_block(n); n = new_rd_Minus(get_irn_dbg_info(n), current_ir_graph, blk, op, get_irn_mode(n)); DBG_OPT_ALGSIM0(oldn, n, FS_OPT_NOT_MINUS_1); } @@ -2578,7 +2578,7 @@ static ir_node *transform_node_Minus(ir_node *n) { ir_node *op = get_Not_op(a); ir_mode *mode = get_irn_mode(op); tarval *tv = get_mode_one(mode); - ir_node *blk = get_irn_n(n, -1); + ir_node *blk = get_nodes_block(n); ir_node *c = new_r_Const(current_ir_graph, blk, mode, tv); n = new_rd_Add(get_irn_dbg_info(n), current_ir_graph, blk, op, c, mode); DBG_OPT_ALGSIM2(oldn, a, n, FS_OPT_MINUS_NOT); @@ -2596,11 +2596,11 @@ static ir_node *transform_node_Cast(ir_node *n) { ir_type *tp = get_irn_type(n); if (get_irn_op(pred) == op_Const && get_Const_type(pred) != tp) { - n = new_rd_Const_type(NULL, current_ir_graph, get_irn_n(pred, -1), get_irn_mode(pred), + n = new_rd_Const_type(NULL, current_ir_graph, get_nodes_block(pred), get_irn_mode(pred), get_Const_tarval(pred), tp); DBG_OPT_CSTEVAL(oldn, n); } else if ((get_irn_op(pred) == op_SymConst) && (get_SymConst_value_type(pred) != tp)) { - n = new_rd_SymConst_type(NULL, current_ir_graph, get_irn_n(pred, -1), get_SymConst_symbol(pred), + n = new_rd_SymConst_type(NULL, current_ir_graph, get_nodes_block(pred), get_SymConst_symbol(pred), get_SymConst_kind(pred), tp); DBG_OPT_CSTEVAL(oldn, n); } @@ -2623,7 +2623,7 @@ static ir_node *transform_node_Proj_Div(ir_node *proj) { proj_nr = get_Proj_proj(proj); switch (proj_nr) { case pn_Div_X_regular: - return new_r_Jmp(current_ir_graph, get_irn_n(div, -1)); + return new_r_Jmp(current_ir_graph, get_nodes_block(div)); case pn_Div_X_except: /* we found an exception handler, remove it */ @@ -2664,7 +2664,7 @@ static ir_node *transform_node_Proj_Mod(ir_node *proj) { switch (proj_nr) { case pn_Mod_X_regular: - return new_r_Jmp(current_ir_graph, get_irn_n(mod, -1)); + return new_r_Jmp(current_ir_graph, get_nodes_block(mod)); case pn_Mod_X_except: /* we found an exception handler, remove it */ @@ -2714,7 +2714,7 @@ static ir_node *transform_node_Proj_DivMod(ir_node *proj) { switch (proj_nr) { case pn_DivMod_X_regular: - return new_r_Jmp(current_ir_graph, get_irn_n(divmod, -1)); + return new_r_Jmp(current_ir_graph, get_nodes_block(divmod)); case pn_DivMod_X_except: /* we found an exception handler, remove it */ @@ -2989,7 +2989,7 @@ static ir_node *transform_node_Proj_Cmp(ir_node *proj) { } if (changed) { - ir_node *block = get_irn_n(n, -1); /* Beware of get_nodes_Block() */ + ir_node *block = get_nodes_block(n); /* Beware of get_nodes_Block() */ if (changed & 2) /* need a new Const */ right = new_Const(mode, tv); @@ -3073,7 +3073,7 @@ static ir_node *transform_node_Phi(ir_node *phi) { in[i] = get_Confirm_value(pred); } /* move the Confirm nodes "behind" the Phi */ - block = get_irn_n(phi, -1); + block = get_nodes_block(phi); new_Phi = new_r_Phi(current_ir_graph, block, n, in, get_irn_mode(phi)); return new_r_Confirm(current_ir_graph, block, new_Phi, bound, pnc); } @@ -3165,7 +3165,7 @@ static ir_node *transform_node_Or_bf_store(ir_node *or) { } /* ok, all conditions met */ - block = get_irn_n(or, -1); + block = get_nodes_block(or); new_and = new_r_And(current_ir_graph, block, value, new_r_Const(current_ir_graph, block, mode, tarval_and(tv4, tv2)), mode); @@ -3226,7 +3226,7 @@ static ir_node *transform_node_Or_Rot(ir_node *or) { return or; /* yet, condition met */ - block = get_irn_n(or, -1); + block = get_nodes_block(or); n = new_r_Rot(current_ir_graph, block, x, c1, mode); @@ -3274,7 +3274,7 @@ static ir_node *transform_node_Or_Rot(ir_node *or) { return or; /* yet, condition met */ - block = get_irn_n(or, -1); + block = get_nodes_block(or); /* a Rot Left */ n = new_r_Rot(current_ir_graph, block, x, v, mode); @@ -3354,7 +3354,7 @@ static ir_node *transform_node_shift(ir_node *n) { if (flag) { /* ok, we can replace it */ - ir_node *in[2], *irn, *block = get_irn_n(n, -1); + ir_node *in[2], *irn, *block = get_nodes_block(n); in[0] = get_binop_left(left); in[1] = new_r_Const(current_ir_graph, block, get_tarval_mode(res), res); @@ -3447,7 +3447,7 @@ static ir_node *transform_node_Mux(ir_node *n) { ir_node *t = get_Mux_true(n); if (get_irn_op(cmp) == op_Cmp && classify_Const(get_Cmp_right(cmp)) == CNST_NULL) { - ir_node *block = get_irn_n(n, -1); + ir_node *block = get_nodes_block(n); /* * Note: normalization puts the constant on the right site, @@ -3966,7 +3966,7 @@ static INLINE ir_node *identify_cons(pset *value_table, ir_node *n) { ir_node *old = n; n = identify(value_table, n); - if (get_irn_n(old, -1) != get_irn_n(n, -1)) + if (get_nodes_block(old) != get_nodes_block(n)) set_irg_pinned(current_ir_graph, op_pin_state_floats); return n; } /* identify_cons */ diff --git a/ir/ir/irvrfy.c b/ir/ir/irvrfy.c index 1236a12b3..975aaa84e 100644 --- a/ir/ir/irvrfy.c +++ b/ir/ir/irvrfy.c @@ -1557,10 +1557,10 @@ static int verify_node_Phi(ir_node *n, ir_graph *irg) { /* currently this checks fails for blocks with exception outputs (and these are NOT basic blocks). So it is disabled yet. */ ASSERT_AND_RET_DBG( - (pred_i == pred_j) || (get_irn_n(pred_i, -1) != get_irn_n(pred_j, -1)), + (pred_i == pred_j) || (get_nodes_block(pred_i) != get_nodes_block(pred_j)), "At least two different PhiM predecessors are in the same block", 0, - ir_printf("%+F and %+F of %+F are in %+F\n", pred_i, pred_j, n, get_irn_n(pred_i, -1)) + ir_printf("%+F and %+F of %+F are in %+F\n", pred_i, pred_j, n, get_nodes_block(pred_i)) ); #endif } diff --git a/ir/opt/condeval.c b/ir/opt/condeval.c index 95be8fbc8..6a771c935 100644 --- a/ir/opt/condeval.c +++ b/ir/opt/condeval.c @@ -565,7 +565,6 @@ void opt_cond_eval(ir_graph* irg) DB((dbg, LEVEL_1, "===> Performing condition evaluation on %+F\n", irg)); remove_critical_cf_edges(irg); - normalize_proj_nodes(irg); edges_assure(irg); set_using_irn_link(irg); diff --git a/ir/opt/gvn_pre.c b/ir/opt/gvn_pre.c index d1544855f..f116405d9 100644 --- a/ir/opt/gvn_pre.c +++ b/ir/opt/gvn_pre.c @@ -380,7 +380,7 @@ static int need_copy(ir_node *node, ir_node *block) /* Phi always stop the recursion */ if (is_Phi(node)) - return get_irn_intra_n(node, -1) == block; + return get_nodes_block(node) == block; if (! is_nice_value(node)) return 0; @@ -409,7 +409,7 @@ static ir_node *translate(ir_node *node, ir_node *block, int pos, pre_env *env) /* Phi always stop the recursion */ if (is_Phi(node)) { - if (get_irn_intra_n(node, -1) == block) + if (get_nodes_block(node) == block) return get_Phi_pred(node, pos); return node; } @@ -424,7 +424,7 @@ static ir_node *translate(ir_node *node, ir_node *block, int pos, pre_env *env) need_new = 0; do { ir_node *pred = get_irn_intra_n(node, i); - ir_node *pred_blk = get_irn_intra_n(pred, -1); + ir_node *pred_blk = get_nodes_block(pred); ir_node *leader = value_lookup(get_block_info(pred_blk)->avail_out, pred); in[i] = translate(leader ? leader : pred, block, pos, env); need_new |= (in[i] != pred); @@ -489,7 +489,7 @@ static ir_node *phi_translate(ir_node *node, ir_node *block, int pos, pre_env *e struct obstack *old; if (is_Phi(node)) { - if (get_irn_intra_n(node, -1) == block) + if (get_nodes_block(node) == block) return get_Phi_pred(node, pos); return node; } @@ -499,11 +499,11 @@ static ir_node *phi_translate(ir_node *node, ir_node *block, int pos, pre_env *e /* check if the node has at least one Phi predecessor */ for (i = 0; i < arity; ++i) { ir_node *pred = get_irn_intra_n(node, i); - ir_node *pred_bl = get_irn_intra_n(pred, -1); + ir_node *pred_bl = get_nodes_block(pred); ir_node *leader = value_lookup(get_block_info(pred_bl)->avail_out, pred); leader = leader != NULL ? leader : pred; - if (is_Phi(leader) && get_irn_intra_n(pred, -1) == block) + if (is_Phi(leader) && get_nodes_block(pred) == block) break; } if (i >= arity) { @@ -528,14 +528,14 @@ static ir_node *phi_translate(ir_node *node, ir_node *block, int pos, pre_env *e node might depend on that. */ copy_node_attr(node, nn); - set_irn_n(nn, -1, get_irn_intra_n(node, -1)); + set_nodes_block(nn, get_nodes_block(node)); for (i = 0; i < arity; ++i) { ir_node *pred = get_irn_intra_n(node, i); - ir_node *pred_bl = get_irn_intra_n(pred, -1); + ir_node *pred_bl = get_nodes_block(pred); ir_node *leader = value_lookup(get_block_info(pred_bl)->avail_out, pred); leader = leader != NULL ? leader : pred; - if (is_Phi(leader) && get_irn_intra_n(pred, -1) == block) + if (is_Phi(leader) && get_nodes_block(pred) == block) set_irn_n(nn, i, get_Phi_pred(leader, pos)); else set_irn_n(nn, i, leader); @@ -604,7 +604,7 @@ restart: for (i = get_irn_intra_arity(n) - 1; i >= 0; --i) { pred = get_irn_intra_n(n, i); - pred_blk = get_irn_intra_n(pred, -1); + pred_blk = get_nodes_block(pred); if (block_dominates(pred_blk, blk)) continue; /* pred do not dominate it, but may be in the set */ @@ -1005,10 +1005,6 @@ void do_gvn_pre(ir_graph *irg) a_env.end_block = get_irg_end_block(irg); a_env.pairs = NULL; - /* Move Proj's into the same block as their args, - else we would assign the result to wrong blocks */ - normalize_proj_nodes(irg); - /* critical edges MUST be removed */ remove_critical_cf_edges(irg); diff --git a/ir/opt/ldst2.c b/ir/opt/ldst2.c index 2217c7ede..20b810907 100644 --- a/ir/opt/ldst2.c +++ b/ir/opt/ldst2.c @@ -605,9 +605,8 @@ void opt_ldst2(ir_graph* irg) irg_block_walk_graph(irg, AliasSetDestroyer, NULL, NULL); obstack_free(&obst, NULL); - normalize_proj_nodes(irg); irg_walk_graph(irg, NormaliseSync, NULL, NULL); - optimize_graph_df(irg); + optimize_graph_df(irg); irg_walk_graph(irg, NormaliseSync, NULL, NULL); dump_ir_block_graph(irg, "-postfluffig"); } diff --git a/ir/opt/opt_osr.c b/ir/opt/opt_osr.c index 5bbff0935..df0b18d01 100644 --- a/ir/opt/opt_osr.c +++ b/ir/opt/opt_osr.c @@ -914,7 +914,7 @@ static ir_node *applyOneEdge(ir_node *rc, LFTR_edge *e, iv_env *env) { DB((dbg, LEVEL_4, " = OVERFLOW")); return NULL; } - return new_r_Const(current_ir_graph, get_irn_n(rc, -1), get_tarval_mode(tv), tv); + return new_r_Const(current_ir_graph, get_nodes_block(rc), get_tarval_mode(tv), tv); } return do_apply(e->code, NULL, rc, e->rc, get_irn_mode(rc)); } @@ -1020,9 +1020,10 @@ static void clear_and_fix(ir_node *irn, void *env) (void) env; set_irn_link(irn, NULL); + /* FIXME: must be removed but edges must be fixed first*/ if (is_Proj(irn)) { ir_node *pred = get_Proj_pred(irn); - set_irn_n(irn, -1, get_irn_n(pred, -1)); + set_irn_n(irn, -1, get_nodes_block(pred)); } }