From 4b13e0fa13fd6021973a7b09e6b3e9279dd1869e Mon Sep 17 00:00:00 2001 From: =?utf8?q?G=C3=B6tz=20Lindenmaier?= Date: Wed, 28 Apr 2004 16:47:02 +0000 Subject: [PATCH] make libfirm faster [r2806] --- ir/ana/cgana.c | 34 +++---- ir/ana/irbackedge.c | 10 +- ir/ana/irdom.c | 2 +- ir/ana/irouts.c | 40 ++++---- ir/ana/irscc.c | 75 +++++++-------- ir/ana/irsimpletype.c | 35 +++---- ir/common/firmwalk.c | 3 +- ir/debug/Makefile.in | 2 +- ir/ir/ircgcons.c | 65 ++++++------- ir/ir/ircons.c | 42 ++++---- ir/ir/irdump.c | 3 + ir/ir/irgmod.c | 14 +-- ir/ir/irgopt.c | 213 +++++++++++++++++++++-------------------- ir/ir/irgwalk.c | 95 +++++++++---------- ir/ir/irnode.c | 68 +++---------- ir/ir/irnode_t.h | 91 ++++++++++++++++++ ir/ir/iropt.c | 216 +++++++++++++++++++++--------------------- ir/tr/entity.c | 2 +- 18 files changed, 532 insertions(+), 478 deletions(-) diff --git a/ir/ana/cgana.c b/ir/ana/cgana.c index 967d392c0..50b18736a 100644 --- a/ir/ana/cgana.c +++ b/ir/ana/cgana.c @@ -31,7 +31,7 @@ #include "irgwalk.h" #include "ircons.h" #include "irgmod.h" -#include "irnode.h" +#include "irnode_t.h" #include "irflag_t.h" #include "dbginfo_t.h" @@ -79,7 +79,7 @@ entity *get_inherited_methods_implementation(entity *inh_meth) { ir_node *addr = get_atomic_ent_value(inh_meth); assert(addr && "constant entity without value"); - if (get_irn_op(addr) == op_Const) { + if (intern_get_irn_op(addr) == op_Const) { impl_meth = tarval_to_entity(get_Const_tarval(addr)); } else { assert(0 && "Complex constant values not supported -- adress of method should be straight constant!"); @@ -189,7 +189,7 @@ static entity ** get_impl_methods(entity * method) { static void sel_methods_walker(ir_node * node, pmap * ldname_map) { - if (get_irn_op(node) == op_SymConst) { + if (intern_get_irn_op(node) == op_SymConst) { /* Wenn möglich SymConst-Operation durch Const-Operation * ersetzen. */ if (get_SymConst_kind(node) == linkage_ptr_info) { @@ -206,11 +206,11 @@ static void sel_methods_walker(ir_node * node, pmap * ldname_map) { } } } - } else if (get_irn_op(node) == op_Sel && + } else if (intern_get_irn_op(node) == op_Sel && is_method_type(get_entity_type(get_Sel_entity(node)))) { entity * ent = get_Sel_entity(node); if (get_opt_optimize() && get_opt_dyn_meth_dispatch() && - (get_irn_op(skip_Proj(get_Sel_ptr(node))) == op_Alloc)) { + (intern_get_irn_op(skip_Proj(get_Sel_ptr(node))) == op_Alloc)) { ir_node *new_node; entity *called_ent; /* We know which method will be called, no dispatch necessary. */ @@ -329,7 +329,7 @@ static entity ** get_Sel_arr(ir_node * sel) { static entity ** NULL_ARRAY = NULL; entity * ent; entity ** arr; - assert(sel && get_irn_op(sel) == op_Sel); + assert(sel && intern_get_irn_op(sel) == op_Sel); ent = get_Sel_entity(sel); assert(is_method_type(get_entity_type(ent))); /* what else? */ arr = get_entity_link(ent); @@ -373,14 +373,14 @@ static void callee_ana_proj(ir_node * node, long n, eset * methods) { } set_irn_link(node, MARK); - switch (get_irn_opcode(node)) { + switch (intern_get_irn_opcode(node)) { case iro_Proj: { /* proj_proj: in einem "sinnvollen" Graphen kommt jetzt ein * op_Tuple oder ein Knoten, der eine "freie Methode" * zurückgibt. */ ir_node * pred = get_Proj_pred(node); if (get_irn_link(pred) != MARK) { - if (get_irn_op(pred) == op_Tuple) { + if (intern_get_irn_op(pred) == op_Tuple) { callee_ana_proj(get_Tuple_pred(pred, get_Proj_proj(node)), n, methods); } else { eset_insert(methods, MARK); /* free method -> unknown */ @@ -417,7 +417,7 @@ static void callee_ana_node(ir_node * node, eset * methods) { } set_irn_link(node, MARK); - switch (get_irn_opcode(node)) { + switch (intern_get_irn_opcode(node)) { case iro_SymConst: /* externe Methode (wegen fix_symconst!) */ eset_insert(methods, MARK); /* free method -> unknown */ @@ -483,7 +483,7 @@ static void callee_ana_node(ir_node * node, eset * methods) { static void callee_walker(ir_node * call, void * env) { - if (get_irn_op(call) == op_Call) { + if (intern_get_irn_op(call) == op_Call) { eset * methods = eset_create(); entity * ent; entity ** arr = NEW_ARR_F(entity *, 0); @@ -539,13 +539,13 @@ static void free_mark_proj(ir_node * node, long n, eset * set) { return; } set_irn_link(node, MARK); - switch (get_irn_opcode(node)) { + switch (intern_get_irn_opcode(node)) { case iro_Proj: { /* proj_proj: in einem "sinnvollen" Graphen kommt jetzt ein * op_Tuple oder ein Knoten, der in "free_ana_walker" behandelt * wird. */ ir_node * pred = get_Proj_pred(node); - if (get_irn_link(pred) != MARK && get_irn_op(pred) == op_Tuple) { + if (get_irn_link(pred) != MARK && intern_get_irn_op(pred) == op_Tuple) { free_mark_proj(get_Tuple_pred(pred, get_Proj_proj(node)), n, set); } else { /* nothing: da in "free_ana_walker" behandelt. */ @@ -583,7 +583,7 @@ static void free_mark(ir_node * node, eset * set) { return; /* already visited */ } set_irn_link(node, MARK); - switch (get_irn_opcode(node)) { + switch (intern_get_irn_opcode(node)) { case iro_Sel: { entity * ent = get_Sel_entity(node); if (is_method_type(get_entity_type(ent))) { @@ -631,7 +631,7 @@ static void free_ana_walker(ir_node * node, eset * set) { /* bereits in einem Zyklus besucht. */ return; } - switch (get_irn_opcode(node)) { + switch (intern_get_irn_opcode(node)) { /* special nodes */ case iro_Sel: case iro_SymConst: @@ -648,7 +648,7 @@ static void free_ana_walker(ir_node * node, eset * set) { set_irn_link(node, MARK); for (i = get_Call_arity(node) - 1; i >= 0; --i) { ir_node * pred = get_Call_param(node, i); - if (mode_is_reference(get_irn_mode(pred))) { + if (mode_is_reference(intern_get_irn_mode(pred))) { free_mark(pred, set); } } @@ -657,9 +657,9 @@ static void free_ana_walker(ir_node * node, eset * set) { * jemand das Gegenteil implementiert. */ default: set_irn_link(node, MARK); - for (i = get_irn_arity(node) - 1; i >= 0; --i) { + for (i = intern_get_irn_arity(node) - 1; i >= 0; --i) { ir_node * pred = get_irn_n(node, i); - if (mode_is_reference(get_irn_mode(pred))) { + if (mode_is_reference(intern_get_irn_mode(pred))) { free_mark(pred, set); } } diff --git a/ir/ana/irbackedge.c b/ir/ana/irbackedge.c index c93eac188..a596ac871 100644 --- a/ir/ana/irbackedge.c +++ b/ir/ana/irbackedge.c @@ -27,7 +27,7 @@ * very careful! */ static INLINE int *mere_get_backarray(ir_node *n) { - switch(get_irn_opcode(n)) { + switch(intern_get_irn_opcode(n)) { case iro_Block: if (!get_Block_matured(n)) return NULL; if (interprocedural_view && n->attr.block.in_cg) { @@ -82,7 +82,7 @@ static INLINE bool legal_backarray (ir_node *n) { INLINE void fix_backedges(struct obstack *obst, ir_node *n) { - opcode opc = get_irn_opcode(n); + opcode opc = intern_get_irn_opcode(n); int *arr = mere_get_backarray(n); if (ARR_LEN(arr) == ARR_LEN(get_irn_in(n))-1) return; @@ -130,7 +130,7 @@ bool has_backedges (ir_node *n) { int i; int *ba = get_backarray (n); if (ba) - for (i = 0; i < get_irn_arity(n); i++) + for (i = 0; i < intern_get_irn_arity(n); i++) if (ba[i]) return true; return false; } @@ -142,12 +142,12 @@ void clear_backedges (ir_node *n) { interprocedural_view = 0; ba = get_backarray (n); if (ba) - for (i = 0; i < get_irn_arity(n); i++) + for (i = 0; i < intern_get_irn_arity(n); i++) ba[i] = 0; interprocedural_view = 1; ba = get_backarray (n); if (ba) - for (i = 0; i < get_irn_arity(n); i++) + for (i = 0; i < intern_get_irn_arity(n); i++) ba[i] = 0; interprocedural_view = rem; } diff --git a/ir/ana/irdom.c b/ir/ana/irdom.c index 6ac71c5aa..6eee3000d 100644 --- a/ir/ana/irdom.c +++ b/ir/ana/irdom.c @@ -214,7 +214,7 @@ void compute_doms(ir_graph *irg) { tmp_dom_info *v; /* Step 2 */ - irn_arity = get_irn_arity(w->block); + irn_arity = intern_get_irn_arity(w->block); for (j = 0; j < irn_arity; j++) { ir_node *pred = get_nodes_Block(get_Block_cfgpred(w->block, j)); tmp_dom_info *u; diff --git a/ir/ana/irouts.c b/ir/ana/irouts.c index 872209600..2092e8154 100644 --- a/ir/ana/irouts.c +++ b/ir/ana/irouts.c @@ -61,8 +61,8 @@ INLINE int get_Block_n_cfg_outs (ir_node *bl) { int i, n_cfg_outs = 0; assert(bl && (get_irn_op(bl) == op_Block)); for (i = 0; i < (int)bl->out[0]; i++) - if ((get_irn_mode(bl->out[i+1]) == mode_X) && - (get_irn_op(bl->out[i+1]) != op_End)) n_cfg_outs++; + if ((intern_get_irn_mode(bl->out[i+1]) == mode_X) && + (intern_get_irn_op(bl->out[i+1]) != op_End)) n_cfg_outs++; return n_cfg_outs; } @@ -71,8 +71,8 @@ INLINE ir_node *get_Block_cfg_out (ir_node *bl, int pos) { int i, out_pos = 0; assert(bl && (get_irn_op(bl) == op_Block)); for (i = 0; i < (int)bl->out[0]; i++) - if ((get_irn_mode(bl->out[i+1]) == mode_X) && - (get_irn_op(bl->out[i+1]) != op_End)) { + if ((intern_get_irn_mode(bl->out[i+1]) == mode_X) && + (intern_get_irn_op(bl->out[i+1]) != op_End)) { if (out_pos == pos) { ir_node *cfop = bl->out[i+1]; return cfop->out[0+1]; @@ -122,8 +122,6 @@ void irg_out_block_walk2(ir_node *bl, void *env) { int i; - assert(get_irn_opcode(bl) == iro_Block); - if(get_Block_block_visited(bl) < get_irg_block_visited(current_ir_graph)) { set_Block_block_visited(bl, get_irg_block_visited(current_ir_graph)); @@ -133,7 +131,6 @@ void irg_out_block_walk2(ir_node *bl, for(i = 0; i < get_Block_n_cfg_outs(bl); i++) { /* find the corresponding predecessor block. */ ir_node *pred = get_Block_cfg_out(bl, i); - assert(get_irn_opcode(pred) == iro_Block); /* recursion */ irg_out_block_walk2(pred, pre, post, env); } @@ -150,12 +147,11 @@ void irg_out_block_walk(ir_node *node, irg_walk_func *pre, irg_walk_func *post, void *env) { - assert((get_irn_op(node) == op_Block) || (get_irn_mode(node) == mode_X)); + assert((get_irn_op(node) == op_Block) || (intern_get_irn_mode(node) == mode_X)); inc_irg_block_visited(current_ir_graph); - if (get_irn_mode(node) == mode_X) node = node->out[1]; - assert(get_irn_opcode(node) == iro_Block); + if (intern_get_irn_mode(node) == mode_X) node = node->out[1]; irg_out_block_walk2(node, pre, post, env); @@ -191,12 +187,12 @@ static int count_outs(ir_node *n) { set_irn_visited(n, get_irg_visited(current_ir_graph)); n->out = (ir_node **) 1; /* Space for array size. */ - if ((get_irn_op(n) == op_Block)) start = 0; else start = -1; - irn_arity = get_irn_arity(n); + if ((intern_get_irn_op(n) == op_Block)) start = 0; else start = -1; + irn_arity = intern_get_irn_arity(n); res = irn_arity - start +1; /* --1 or --0; 1 for array size. */ for (i = start; i < irn_arity; i++) { /* Optimize Tuples. They annoy if walking the cfg. */ - succ = skip_Tuple(get_irn_n(n, i)); + succ = skip_Tuple(intern_get_irn_n(n, i)); set_irn_n(n, i, succ); /* count outs for successors */ if (get_irn_visited(succ) < get_irg_visited(current_ir_graph)) @@ -222,10 +218,10 @@ static ir_node **set_out_edges(ir_node *n, ir_node **free) { edge. */ n->out[0] = (ir_node *)0; - if (get_irn_op(n) == op_Block) start = 0; else start = -1; - irn_arity = get_irn_arity(n); + if (intern_get_irn_op(n) == op_Block) start = 0; else start = -1; + irn_arity = intern_get_irn_arity(n); for (i = start; i < irn_arity; i++) { - succ = get_irn_n(n, i); + succ = intern_get_irn_n(n, i); /* Recursion */ if (get_irn_visited(succ) < get_irg_visited(current_ir_graph)) free = set_out_edges(succ, free); @@ -242,7 +238,7 @@ static INLINE void fix_start_proj(ir_graph *irg) { if (get_Block_n_cfg_outs(get_irg_start_block(irg))) { startbl = get_irg_start_block(irg); for (i = 0; i < get_irn_n_outs(startbl); i++) - if (get_irn_mode(get_irn_out(startbl, i)) == mode_X) + if (intern_get_irn_mode(get_irn_out(startbl, i)) == mode_X) proj = get_irn_out(startbl, i); if (get_irn_out(proj, 0) == startbl) { assert(get_irn_n_outs(proj) == 2); @@ -317,14 +313,14 @@ static void node_arity_count(ir_node * node, void * env) int *anz = (int *) env, arity, i, start; ir_node *succ; - arity = 1 + get_irn_arity(node) + arity = 1 + intern_get_irn_arity(node) + ((is_Block(node)) ? 0 : 1); *anz += arity; start = (is_Block(node)) ? 0 : -1; - for(i = start; i < get_irn_arity(node); i++) + for(i = start; i < intern_get_irn_arity(node); i++) { - succ = get_irn_n(node, i); + succ = intern_get_irn_n(node, i); succ->out = (ir_node **)((int)succ->out + 1); } } @@ -380,9 +376,9 @@ static void set_out_pointer(ir_node * node, void * env) { ir_node *succ; int start = (!is_Block(node)) ? -1 : 0; - for(i = start; i < get_irn_arity(node); i++) + for(i = start; i < intern_get_irn_arity(node); i++) { - succ = get_irn_n(node, i); + succ = intern_get_irn_n(node, i); succ->out[get_irn_n_outs(succ)+1] = node; succ->out[0] = (ir_node *) (get_irn_n_outs(succ) + 1); } diff --git a/ir/ana/irscc.c b/ir/ana/irscc.c index d4abfe363..fab28267f 100644 --- a/ir/ana/irscc.c +++ b/ir/ana/irscc.c @@ -514,23 +514,23 @@ init_node (ir_node *n, void *env) { /* Also init nodes not visible in intraproc_view. */ /* @@@ init_node is called for too many nodes -- this wastes memory!. The mem is not lost as its on the obstack. */ - if (get_irn_op(n) == op_Filter) { + if (intern_get_irn_op(n) == op_Filter) { for (i = 0; i < get_Filter_n_cg_preds(n); i++) init_node(get_Filter_cg_pred(n, i), NULL); } - if (get_irn_op(n) == op_Block) { + if (intern_get_irn_op(n) == op_Block) { for (i = 0; i < get_Block_cg_n_cfgpreds(n); i++) { init_node(get_Block_cg_cfgpred(n, i), NULL); } } /* The following pattern matches only after a call from above pattern. */ - if ((get_irn_op(n) == op_Proj) /*&& (get_Proj_proj(n) == 0)*/) { + if ((intern_get_irn_op(n) == op_Proj) /*&& (get_Proj_proj(n) == 0)*/) { /* @@@ init_node is called for every proj -- this wastes memory!. The mem is not lost as its on the obstack. */ ir_node *cb = get_Proj_pred(n); - if ((get_irn_op(cb) == op_CallBegin) || - (get_irn_op(cb) == op_EndReg) || - (get_irn_op(cb) == op_EndExcept)) { + if ((intern_get_irn_op(cb) == op_CallBegin) || + (intern_get_irn_op(cb) == op_EndReg) || + (intern_get_irn_op(cb) == op_EndExcept)) { init_node(cb, NULL); init_node(get_nodes_Block(cb), NULL); } @@ -565,9 +565,9 @@ init_ip_scc (void) { static bool is_outermost_Start(ir_node *n) { /* Test whether this is the outermost Start node. If so recursion must end. */ - if ((get_irn_op(n) == op_Block) && + if ((intern_get_irn_op(n) == op_Block) && (get_Block_n_cfgpreds(n) == 1) && - (get_irn_op(skip_Proj(get_Block_cfgpred(n, 0))) == op_Start) && + (intern_get_irn_op(skip_Proj(get_Block_cfgpred(n, 0))) == op_Start) && (get_nodes_Block(skip_Proj(get_Block_cfgpred(n, 0))) == n)) { return true; } @@ -576,7 +576,7 @@ static bool is_outermost_Start(ir_node *n) { not possible in interprocedural view as outermost_graph is not necessarily the only with a dead-end start block. Besides current_ir_graph is not set properly. */ - if ((get_irn_op(n) == op_Block) && + if ((intern_get_irn_op(n) == op_Block) && (n == get_irg_start_block(current_ir_graph))) { if ((!interprocedural_view) || (current_ir_graph == outermost_ir_graph)) @@ -589,7 +589,7 @@ static bool is_outermost_Start(ir_node *n) { /* Don't walk from nodes to blocks except for Control flow operations. */ static INLINE int get_start_index(ir_node *n) { - if (is_cfop(n) || is_fragile_op(n) || get_irn_op(n) == op_Start) + if (is_cfop(n) || is_fragile_op(n) || intern_get_irn_op(n) == op_Start) return -1; else return 0; @@ -603,9 +603,9 @@ switch_irg (ir_node *n, int index) { if (interprocedural_view) { /* Only Filter and Block nodes can have predecessors in other graphs. */ - if (get_irn_op(n) == op_Filter) + if (intern_get_irn_op(n) == op_Filter) n = get_nodes_Block(n); - if (get_irn_op(n) == op_Block) { + if (intern_get_irn_op(n) == op_Block) { ir_node *cfop = skip_Proj(get_Block_cfgpred(n, index)); if (is_ip_cfop(cfop)) { current_ir_graph = get_irn_irg(cfop); @@ -637,21 +637,21 @@ find_irg_on_stack (ir_node *n) { m = stack[i]; /*printf(" Visiting %d ", i); DDMN(m);*/ if (is_ip_cfop(m)) { - current_ir_graph = get_irn_irg(m); - break; + current_ir_graph = get_irn_irg(m); + break; } - if (get_irn_op(m) == op_Filter) { - /* Find the corresponding ip_cfop */ - ir_node *pred = stack[i+1]; - int j; - for (j = 0; j < get_Filter_n_cg_preds(m); j++) - if (get_Filter_cg_pred(m, j) == pred) break; - if (j >= get_Filter_n_cg_preds(m)) - /* It is a filter we didn't pass as the predecessors are marked. */ - continue; - assert(get_Filter_cg_pred(m, j) == pred); - switch_irg(m, j); - break; + if (intern_get_irn_op(m) == op_Filter) { + /* Find the corresponding ip_cfop */ + ir_node *pred = stack[i+1]; + int j; + for (j = 0; j < get_Filter_n_cg_preds(m); j++) + if (get_Filter_cg_pred(m, j) == pred) break; + if (j >= get_Filter_n_cg_preds(m)) + /* It is a filter we didn't pass as the predecessors are marked. */ + continue; + assert(get_Filter_cg_pred(m, j) == pred); + switch_irg(m, j); + break; } } } @@ -681,7 +681,7 @@ static void test(ir_node *pred, ir_node *root, ir_node *this) { /* Test for legal loop header: Block, Phi, ... */ INLINE static bool is_possible_loop_head(ir_node *n) { - ir_op *op = get_irn_op(n); + ir_op *op = intern_get_irn_op(n); return ((op == op_Block) || (op == op_Phi) || ((op == op_Filter) && interprocedural_view)); @@ -700,10 +700,11 @@ is_head (ir_node *n, ir_node *root) /* Test for legal loop header: Block, Phi, ... */ if (!is_possible_loop_head(n)) return false; + if (!is_outermost_Start(n)) { - arity = get_irn_arity(n); + arity = intern_get_irn_arity(n); for (i = get_start_index(n); i < arity; i++) { - ir_node *pred = get_irn_n(n, i); + ir_node *pred = intern_get_irn_n(n, i); assert(pred); if (is_backedge(n, i)) continue; if (!irn_is_in_stack(pred)) { @@ -725,9 +726,9 @@ smallest_dfn_pred (ir_node *n, int limit) int i, index = -2, min = -1; if (!is_outermost_Start(n)) { - int arity = get_irn_arity(n); + int arity = intern_get_irn_arity(n); for (i = get_start_index(n); i < arity; i++) { - ir_node *pred = get_irn_n(n, i); + ir_node *pred = intern_get_irn_n(n, i); assert(pred); if (is_backedge(n, i) || !irn_is_in_stack(pred)) continue; if (get_irn_dfn(pred) >= limit && (min == -1 || get_irn_dfn(pred) < min)) { @@ -746,9 +747,9 @@ largest_dfn_pred (ir_node *n) int i, index = -2, max = -1; if (!is_outermost_Start(n)) { - int arity = get_irn_arity(n); + int arity = intern_get_irn_arity(n); for (i = get_start_index(n); i < arity; i++) { - ir_node *pred = get_irn_n(n, i); + ir_node *pred = intern_get_irn_n(n, i); if (is_backedge (n, i) || !irn_is_in_stack(pred)) continue; if (get_irn_dfn(pred) > max) { index = i; @@ -795,7 +796,7 @@ find_tail (ir_node *n) { assert (res_index > -2); set_backedge (m, res_index); - return is_outermost_Start(n) ? NULL : get_irn_n(m, res_index); + return is_outermost_Start(n) ? NULL : intern_get_irn_n(m, res_index); } @@ -818,13 +819,13 @@ static void scc (ir_node *n) { so is_backedge does not access array[-1] but correctly returns false! */ if (!is_outermost_Start(n)) { - int arity = get_irn_arity(n); + int arity = intern_get_irn_arity(n); for (i = get_start_index(n); i < arity; i++) { ir_node *m; if (is_backedge(n, i)) continue; - m = get_irn_n(n, i); /* get_irn_ip_pred(n, i); */ - //if ((!m) || (get_irn_op(m) == op_Unknown)) continue; + m = intern_get_irn_n(n, i); /* get_irn_ip_pred(n, i); */ + //if ((!m) || (intern_get_irn_op(m) == op_Unknown)) continue; scc (m); if (irn_is_in_stack(m)) { /* Uplink of m is smaller if n->m is a backedge. diff --git a/ir/ana/irsimpletype.c b/ir/ana/irsimpletype.c index e0afce751..696d3398f 100644 --- a/ir/ana/irsimpletype.c +++ b/ir/ana/irsimpletype.c @@ -24,6 +24,7 @@ # include "irtypeinfo.h" # include "irsimpletype.h" +# include "irnode_t.h" # include "irprog.h" # include "irgwalk.h" # include "ident.h" @@ -60,7 +61,7 @@ static type* compute_irn_type(ir_node *n); static type *find_type_for_Proj(ir_node *n) { type *tp; ir_node *pred = skip_Tuple(get_Proj_pred(n)); - ir_mode *m = get_irn_mode(n); + ir_mode *m = intern_get_irn_mode(n); if (m == mode_T || m == mode_BB || @@ -69,16 +70,16 @@ static type *find_type_for_Proj(ir_node *n) { m == mode_b ) return none_type; - switch(get_irn_opcode(pred)) { + switch(intern_get_irn_opcode(pred)) { case iro_Proj: { ir_node *pred_pred; /* Deal with Start / Call here: we need to know the Proj Nr. */ assert(get_irn_mode(pred) == mode_T); pred_pred = get_Proj_pred(pred); - if (get_irn_op(pred_pred) == op_Start) { + if (intern_get_irn_op(pred_pred) == op_Start) { type *mtp = get_entity_type(get_irg_ent(get_Start_irg(pred_pred))); tp = get_method_param_type(mtp, get_Proj_proj(n)); - } else if (get_irn_op(pred_pred) == op_Call) { + } else if (intern_get_irn_op(pred_pred) == op_Call) { type *mtp = get_Call_type(pred_pred); tp = get_method_res_type(mtp, get_Proj_proj(n)); } else { @@ -135,7 +136,7 @@ static type *find_type_for_node(ir_node *n) { tp2 = compute_irn_type(b); } - switch(get_irn_opcode(n)) { + switch(intern_get_irn_opcode(n)) { case iro_InstOf: { assert(0 && "op_InstOf not supported"); @@ -229,9 +230,9 @@ static type *find_type_for_node(ir_node *n) { } break; case iro_Load: { ir_node *a = get_Load_ptr(n); - if (get_irn_op(a) == op_Sel) + if (intern_get_irn_op(a) == op_Sel) tp = get_entity_type(get_Sel_entity(a)); - else if ((get_irn_op(a) == op_Const) && + else if ((intern_get_irn_op(a) == op_Const) && (tarval_is_entity(get_Const_tarval(a)))) tp = get_entity_type(tarval_to_entity(get_Const_tarval(a))); else if (is_pointer_type(compute_irn_type(a))) { @@ -255,28 +256,28 @@ static type *find_type_for_node(ir_node *n) { /* catch special cases with fallthrough to binop/unop cases in default. */ case iro_Sub: { - if (mode_is_int(get_irn_mode(n)) && - mode_is_reference(get_irn_mode(a)) && - mode_is_reference(get_irn_mode(b)) ) { + if (mode_is_int(intern_get_irn_mode(n)) && + mode_is_reference(intern_get_irn_mode(a)) && + mode_is_reference(intern_get_irn_mode(b)) ) { VERBOSE_UNKNOWN_TYPE(("Sub %ld ptr - ptr = int: unknown type\n", get_irn_node_nr(n))); tp = unknown_type; break; } } /* fall through to Add. */ case iro_Add: { - if (mode_is_reference(get_irn_mode(n)) && - mode_is_reference(get_irn_mode(a)) && - mode_is_int(get_irn_mode(b)) ) { + if (mode_is_reference(intern_get_irn_mode(n)) && + mode_is_reference(intern_get_irn_mode(a)) && + mode_is_int(intern_get_irn_mode(b)) ) { tp = tp1; break; } - if (mode_is_reference(get_irn_mode(n)) && - mode_is_int(get_irn_mode(a)) && - mode_is_reference(get_irn_mode(b)) ) { + if (mode_is_reference(intern_get_irn_mode(n)) && + mode_is_int(intern_get_irn_mode(a)) && + mode_is_reference(intern_get_irn_mode(b)) ) { tp = tp2; break; } goto default_code; } break; case iro_Mul: { - if (get_irn_mode(n) != get_irn_mode(a)) { + if (intern_get_irn_mode(n) != intern_get_irn_mode(a)) { VERBOSE_UNKNOWN_TYPE(("Mul %ld int1 * int1 = int2: unknown type\n", get_irn_node_nr(n))); tp = unknown_type; break; } diff --git a/ir/common/firmwalk.c b/ir/common/firmwalk.c index b681d2953..0f8442409 100644 --- a/ir/common/firmwalk.c +++ b/ir/common/firmwalk.c @@ -17,6 +17,7 @@ #include "firmwalk.h" #include "pmap.h" #include "entity.h" +#include "irnode_t.h" #include "irprog.h" #include "irgwalk.h" #include "array.h" @@ -261,7 +262,7 @@ static void fw_collect_irn(ir_node *irn, void *env) { fw_data *data; - ir_mode* mode = get_irn_mode(irn); + ir_mode* mode = intern_get_irn_mode(irn); /* The link field will be cleared in the walk_do_mode() callback function. */ diff --git a/ir/debug/Makefile.in b/ir/debug/Makefile.in index a8cd475d6..95872b4ea 100644 --- a/ir/debug/Makefile.in +++ b/ir/debug/Makefile.in @@ -25,7 +25,7 @@ include $(topdir)/MakeRules CPPFLAGS += -I$(top_srcdir)/ir/ident -I$(top_srcdir)/ir/ir -I$(top_srcdir)/ir/tv \ -I$(top_srcdir)/ir/tr -I$(top_srcdir)/ir/common -I$(top_srcdir)/ir/ana \ - -I$(top_srcdir)/ir/st + -I$(top_srcdir)/ir/st -I$(top_srcdir)/ir/adt include $(top_srcdir)/MakeTargets diff --git a/ir/ir/ircgcons.c b/ir/ir/ircgcons.c index 4d47d2fd2..8df89ed7c 100644 --- a/ir/ir/ircgcons.c +++ b/ir/ir/ircgcons.c @@ -20,6 +20,7 @@ #include "array.h" #include "irprog.h" +#include "irnode_t.h" #include "ircons.h" #include "irgmod.h" #include "irgwalk.h" @@ -58,7 +59,7 @@ static void caller_init(int arr_length, entity ** free_methods) { ir_node * call; /* Die Call-Knoten sind (mit den Proj-Knoten) am End-Knoten verlinkt! */ for (call = get_irn_link(get_irg_end(irg)); call; call = get_irn_link(call)) { - if (get_irn_op(call) != op_Call) continue; + if (intern_get_irn_op(call) != op_Call) continue; for (j = get_Call_n_callees(call) - 1; j >= 0; --j) { entity * ent = get_Call_callee(call, j); if (ent) { @@ -91,14 +92,14 @@ static INLINE ir_node * tail(ir_node * node) { * (auch bei Proj->Call Operationen) und Phi-Operationen in die Liste ihres * Grundblocks einfügen. */ static void collect_phicallproj_walker(ir_node * node, ir_node ** call_tail) { - if (get_irn_op(node) == op_Call) { + if (intern_get_irn_op(node) == op_Call) { /* Die Liste von Call an call_tail anhängen. */ ir_node * link; assert(get_irn_link(*call_tail) == NULL); set_irn_link(*call_tail, node); /* call_tail aktualisieren: */ for (link = get_irn_link(*call_tail); link; *call_tail = link, link = get_irn_link(link)) ; - } else if (get_irn_op(node) == op_Proj) { + } else if (intern_get_irn_op(node) == op_Proj) { ir_node * head = skip_Proj(get_Proj_pred(node)); set_irn_link(node, get_irn_link(head)); set_irn_link(head, node); @@ -106,7 +107,7 @@ static void collect_phicallproj_walker(ir_node * node, ir_node ** call_tail) { if (head == *call_tail) { *call_tail = node; } - } else if (get_irn_op(node) == op_Phi) { + } else if (intern_get_irn_op(node) == op_Phi) { ir_node * block = get_nodes_Block(node); set_irn_link(node, get_irn_link(block)); set_irn_link(block, node); @@ -150,7 +151,7 @@ static void collect_phicallproj(void) { static ir_node * exchange_proj(ir_node * proj) { ir_node * filter; assert(get_irn_op(proj) == op_Proj); - filter = new_Filter(get_Proj_pred(proj), get_irn_mode(proj), get_Proj_proj(proj)); + filter = new_Filter(get_Proj_pred(proj), intern_get_irn_mode(proj), get_Proj_proj(proj)); /* Die Proj- (Id-) Operation sollte im gleichen Grundblock stehen, wie die * Filter-Operation. */ set_nodes_Block(proj, get_nodes_Block(filter)); @@ -221,7 +222,7 @@ static void prepare_irg(ir_graph * irg, irg_data_t * data) { * dass oben für "verschiedene" Proj-Operationen wegen CSE nur eine * Filter-Operation erzeugt worden sein kann. */ for (link = get_irg_start(irg), proj = get_irn_link(link); proj; proj = get_irn_link(proj)) { - if (get_irn_op(proj) == op_Id) { /* replaced with filter */ + if (intern_get_irn_op(proj) == op_Id) { /* replaced with filter */ ir_node * filter = get_Id_pred(proj); assert(get_irn_op(filter) == op_Filter); if (filter != link && get_irn_link(filter) == NULL) { @@ -238,8 +239,8 @@ static void prepare_irg(ir_graph * irg, irg_data_t * data) { if (data->open) { set_Block_cg_cfgpred(start_block, 0, get_cg_Unknown(mode_X)); for (proj = get_irn_link(get_irg_start(irg)); proj; proj = get_irn_link(proj)) { - if (get_irn_op(proj) == op_Filter) { - set_Filter_cg_pred(proj, 0, get_cg_Unknown(get_irn_mode(proj))); + if (intern_get_irn_op(proj) == op_Filter) { + set_Filter_cg_pred(proj, 0, get_cg_Unknown(intern_get_irn_mode(proj))); } } data->count = 1; @@ -262,7 +263,7 @@ static void prepare_irg_end(ir_graph * irg, irg_data_t * data) { int n_ret = 0; for (i = get_Block_n_cfgpreds(end_block) - 1; i >= 0; --i) { - if (get_irn_op(cfgpred_arr[i]) == op_Return) { + if (intern_get_irn_op(cfgpred_arr[i]) == op_Return) { if (ret_arr) { ARR_APP1(ir_node *, ret_arr, cfgpred_arr[i]); } else { @@ -304,8 +305,8 @@ static void prepare_irg_end(ir_graph * irg, irg_data_t * data) { /* In[0] could be a Bad node with wrong mode. */ for (i = n_ret - 1; i >= 0; --i) { in[i] = get_Return_res(ret_arr[i], j); - if (!mode && get_irn_mode(in[i]) != mode_T) - mode = get_irn_mode(in[i]); + if (!mode && intern_get_irn_mode(in[i]) != mode_T) + mode = intern_get_irn_mode(in[i]); } if (mode) data->res[j] = new_Phi(n_ret, in, mode); @@ -329,7 +330,7 @@ static void prepare_irg_end_except(ir_graph * irg, irg_data_t * data) { int n_except = 0; ir_node ** cfgpred_arr = get_Block_cfgpred_arr(end_block); for (i = get_Block_n_cfgpreds(end_block) - 1; i >= 0; --i) { - if (get_irn_op(cfgpred_arr[i]) != op_Return) { + if (intern_get_irn_op(cfgpred_arr[i]) != op_Return) { if (except_arr) { ARR_APP1(ir_node *, except_arr, cfgpred_arr[i]); } else { @@ -348,9 +349,9 @@ static void prepare_irg_end_except(ir_graph * irg, irg_data_t * data) { /* mem */ for (i = n_except - 1; i >= 0; --i) { ir_node * node = skip_Proj(except_arr[i]); - if (get_irn_op(node) == op_Call) { + if (intern_get_irn_op(node) == op_Call) { in[i] = new_r_Proj(irg, get_nodes_Block(node), node, mode_M, 3); - } else if (get_irn_op(node) == op_Raise) { + } else if (intern_get_irn_op(node) == op_Raise) { in[i] = new_r_Proj(irg, get_nodes_Block(node), node, mode_M, 1); } else { assert(is_fragile_op(node)); @@ -402,8 +403,8 @@ static void move_nodes(ir_node * from_block, ir_node * to_block, ir_node * node) int i; ir_node *proj; - for (i = get_irn_arity(node) - 1; i >= 0; --i) { - ir_node * pred = get_irn_n(node, i); + for (i = intern_get_irn_arity(node) - 1; i >= 0; --i) { + ir_node * pred = intern_get_irn_n(node, i); if (get_nodes_Block(pred) == from_block) { move_nodes(from_block, to_block, pred); } @@ -413,8 +414,8 @@ static void move_nodes(ir_node * from_block, ir_node * to_block, ir_node * node) /* Move projs of this node. */ proj = get_irn_link(node); for (; proj; proj = skip_Id(get_irn_link(proj))) { - if (get_irn_op(proj) != op_Proj && get_irn_op(proj) != op_Filter) continue; - if ((get_nodes_Block(proj) == from_block) && (skip_Proj(get_irn_n(proj, 0)) == node)) + if (intern_get_irn_op(proj) != op_Proj && intern_get_irn_op(proj) != op_Filter) continue; + if ((get_nodes_Block(proj) == from_block) && (skip_Proj(intern_get_irn_n(proj, 0)) == node)) set_nodes_Block(proj, to_block); } } @@ -436,7 +437,7 @@ static void construct_start(entity * caller, entity * callee, set_Block_cg_cfgpred(get_nodes_Block(start), data->count, exec); for (filter = get_irn_link(start); filter; filter = get_irn_link(filter)) { - if (get_irn_op(filter) != op_Filter) continue; + if (intern_get_irn_op(filter) != op_Filter) continue; if (get_Proj_pred(filter) == start) { switch ((int) get_Proj_proj(filter)) { case pns_global_store: @@ -446,13 +447,13 @@ static void construct_start(entity * caller, entity * callee, /* "frame_base" wird nur durch Unknown dargestellt. Man kann ihn aber * auch explizit darstellen, wenn sich daraus Vorteile für die * Datenflussanalyse ergeben. */ - set_Filter_cg_pred(filter, data->count, get_cg_Unknown(get_irn_mode(filter))); + set_Filter_cg_pred(filter, data->count, get_cg_Unknown(intern_get_irn_mode(filter))); break; case pns_globals: /* "globals" wird nur durch Unknown dargestellt. Man kann ihn aber auch * explizit darstellen, wenn sich daraus Vorteile für die * Datenflussanalyse ergeben. */ - set_Filter_cg_pred(filter, data->count, get_cg_Unknown(get_irn_mode(filter))); + set_Filter_cg_pred(filter, data->count, get_cg_Unknown(intern_get_irn_mode(filter))); break; default: /* not reached */ @@ -526,8 +527,8 @@ static ir_node * get_except(ir_node * call) { /* Mit CSE könnte man das effizienter machen! Die Methode wird aber für jede * Aufrufstelle nur ein einziges Mal aufgerufen. */ ir_node * proj; - for (proj = get_irn_link(call); proj && get_irn_op(proj) == op_Proj; proj = get_irn_link(proj)) { - if (get_Proj_proj(proj) == 1 && get_irn_op(get_Proj_pred(proj)) == op_Call) { + for (proj = get_irn_link(call); proj && intern_get_irn_op(proj) == op_Proj; proj = get_irn_link(proj)) { + if (get_Proj_proj(proj) == 1 && intern_get_irn_op(get_Proj_pred(proj)) == op_Call) { return proj; } } @@ -702,7 +703,7 @@ static void construct_call(ir_node * call) { * interprozedurale Vorgänger einfügen. */ set_irg_current_block(current_ir_graph, post_block); for (proj = get_irn_link(call); proj; proj = get_irn_link(proj)) { - if (get_irn_op(proj) != op_Proj) continue; + if (intern_get_irn_op(proj) != op_Proj) continue; if (skip_Proj(get_Proj_pred(proj)) != call) continue; if (get_Proj_pred(proj) == call) { if (get_Proj_proj(proj) == 0) { /* memory */ @@ -750,7 +751,7 @@ static void construct_call(ir_node * call) { set_irn_link(filter, get_irn_link(post_block)); set_irn_link(post_block, filter); } - fill_result(get_Proj_proj(filter), n_callees, data, in, get_irn_mode(filter)); + fill_result(get_Proj_proj(filter), n_callees, data, in, intern_get_irn_mode(filter)); set_Filter_cg_pred_arr(filter, n_callees, in); } } @@ -792,7 +793,7 @@ void cg_construct(int arr_len, entity ** free_methods_arr) { ir_node * node; current_ir_graph = get_irp_irg(i); for (node = get_irn_link(get_irg_end(current_ir_graph)); node; node = get_irn_link(node)) { - if (get_irn_op(node) == op_Call) { + if (intern_get_irn_op(node) == op_Call) { int n_callees = get_Call_n_callees(node); if (n_callees > 1 || (n_callees == 1 && get_Call_callee(node, 0) != NULL)) { construct_call(node); @@ -812,17 +813,17 @@ void cg_construct(int arr_len, entity ** free_methods_arr) { static void destruct_walker(ir_node * node, void * env) { - if (get_irn_op(node) == op_Block) { + if (intern_get_irn_op(node) == op_Block) { remove_Block_cg_cfgpred_arr(node); - } else if (get_irn_op(node) == op_Filter) { + } else if (intern_get_irn_op(node) == op_Filter) { set_irg_current_block(current_ir_graph, get_nodes_Block(node)); - exchange(node, new_Proj(get_Filter_pred(node), get_irn_mode(node), get_Filter_proj(node))); - } else if (get_irn_op(node) == op_Break) { + exchange(node, new_Proj(get_Filter_pred(node), intern_get_irn_mode(node), get_Filter_proj(node))); + } else if (intern_get_irn_op(node) == op_Break) { set_irg_current_block(current_ir_graph, get_nodes_Block(node)); exchange(node, new_Jmp()); - } else if (get_irn_op(node) == op_Call) { + } else if (intern_get_irn_op(node) == op_Call) { remove_Call_callee_arr(node); - } else if (get_irn_op(node) == op_Proj) { + } else if (intern_get_irn_op(node) == op_Proj) { // some ProjX end up in strage blocks. set_nodes_block(node, get_nodes_block(get_Proj_pred(node))); } diff --git a/ir/ir/ircons.c b/ir/ir/ircons.c index 070168c46..c983cbb46 100644 --- a/ir/ir/ircons.c +++ b/ir/ir/ircons.c @@ -106,13 +106,13 @@ new_rd_Phi (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in /* Don't assert that block matured: the use of this constructor is strongly restricted ... */ if ( get_Block_matured(block) ) - assert( get_irn_arity(block) == arity ); + assert( intern_get_irn_arity(block) == arity ); res = new_ir_node (db, irg, block, op_Phi, mode, arity, in); res->attr.phi_backedge = new_backedge_arr(irg->obst, arity); - for (i = arity-1; i >= 0; i--) if (get_irn_op(in[i]) == op_Unknown) has_unknown = true; + for (i = arity-1; i >= 0; i--) if (intern_get_irn_op(in[i]) == op_Unknown) has_unknown = true; if (!has_unknown) res = optimize_node (res); irn_vrfy_irg (res, irg); @@ -211,7 +211,7 @@ INLINE ir_node * new_rd_Cast (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, type *to_tp) { ir_node *res; - res = new_ir_node (db, irg, block, op_Cast, get_irn_mode(op), 1, &op); + res = new_ir_node (db, irg, block, op_Cast, intern_get_irn_mode(op), 1, &op); res->attr.cast.totype = to_tp; res = optimize_node (res); irn_vrfy_irg (res, irg); @@ -726,7 +726,7 @@ new_rd_Confirm (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_no in[0] = val; in[1] = bound; - res = new_ir_node (db, irg, block, op_Confirm, get_irn_mode(val), 2, in); + res = new_ir_node (db, irg, block, op_Confirm, intern_get_irn_mode(val), 2, in); res->attr.confirm_cmp = cmp; @@ -1086,7 +1086,7 @@ new_d_Block (dbg_info* db, int arity, ir_node **in) current_ir_graph->n_loc); memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc); - for (i = arity-1; i >= 0; i--) if (get_irn_op(in[i]) == op_Unknown) has_unknown = true; + for (i = arity-1; i >= 0; i--) if (intern_get_irn_op(in[i]) == op_Unknown) has_unknown = true; if (!has_unknown) res = optimize_node (res); current_ir_graph->current_block = res; @@ -1164,8 +1164,6 @@ free_Phi_in_stack(Phi_in_stack *s) { } static INLINE void free_to_Phi_in_stack(ir_node *phi) { - assert(get_irn_opcode(phi) == iro_Phi); - if (ARR_LEN(current_ir_graph->Phi_in_stack->stack) == current_ir_graph->Phi_in_stack->pos) ARR_APP1 (ir_node *, current_ir_graph->Phi_in_stack->stack, phi); @@ -1409,7 +1407,7 @@ get_r_value_internal (ir_node *block, int pos, ir_mode *mode) if (block->attr.block.matured) { /* case 3 */ /* The Phi has the same amount of ins as the corresponding block. */ - int ins = get_irn_arity(block); + int ins = intern_get_irn_arity(block); ir_node **nin; NEW_ARR_A (ir_node *, nin, ins); @@ -1527,7 +1525,7 @@ static INLINE ir_node ** new_frag_arr (ir_node *n) finished yet. */ opt = get_opt_optimize(); set_optimize(0); /* Here we rely on the fact that all frag ops have Memory as first result! */ - if (get_irn_op(n) == op_Call) + if (intern_get_irn_op(n) == op_Call) arr[0] = new_Proj(n, mode_M, 3); else arr[0] = new_Proj(n, mode_M, 0); @@ -1538,9 +1536,9 @@ static INLINE ir_node ** new_frag_arr (ir_node *n) static INLINE ir_node ** get_frag_arr (ir_node *n) { - if (get_irn_op(n) == op_Call) { + if (intern_get_irn_op(n) == op_Call) { return n->attr.call.frag_arr; - } else if (get_irn_op(n) == op_Alloc) { + } else if (intern_get_irn_op(n) == op_Alloc) { return n->attr.a.frag_arr; } else { return n->attr.frag_arr; @@ -1568,7 +1566,7 @@ get_r_frag_value_internal (ir_node *block, ir_node *cfOp, int pos, ir_mode *mode /* There was a set_value after the cfOp and no get_value before that set_value. We must build a Phi node now. */ if (block->attr.block.matured) { - int ins = get_irn_arity(block); + int ins = intern_get_irn_arity(block); ir_node **nin; NEW_ARR_A (ir_node *, nin, ins); res = phi_merge(block, pos, mode, nin, ins); @@ -1656,7 +1654,7 @@ phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins) assert (prevBlock); if (!is_Bad(prevBlock)) { #if PRECISE_EXC_CONTEXT - if (is_fragile_op(prevCfOp) && (get_irn_op (prevCfOp) != op_Bad)) { + if (is_fragile_op(prevCfOp) && (intern_get_irn_op (prevCfOp) != op_Bad)) { assert(get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode)); nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode); } else @@ -1742,7 +1740,7 @@ get_r_value_internal (ir_node *block, int pos, ir_mode *mode) if (block->attr.block.matured) { /* case 3 */ /* The Phi has the same amount of ins as the corresponding block. */ - int ins = get_irn_arity(block); + int ins = intern_get_irn_arity(block); ir_node **nin; NEW_ARR_A (ir_node *, nin, ins); @@ -1930,7 +1928,7 @@ new_d_Quot (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2) memop, op1, op2); #if PRECISE_EXC_CONTEXT if ((current_ir_graph->phase_state == phase_building) && - (get_irn_op(res) == op_Quot)) /* Could be optimized away. */ + (intern_get_irn_op(res) == op_Quot)) /* Could be optimized away. */ res->attr.frag_arr = new_frag_arr(res); #endif @@ -1945,7 +1943,7 @@ new_d_DivMod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2) memop, op1, op2); #if PRECISE_EXC_CONTEXT if ((current_ir_graph->phase_state == phase_building) && - (get_irn_op(res) == op_DivMod)) /* Could be optimized away. */ + (intern_get_irn_op(res) == op_DivMod)) /* Could be optimized away. */ res->attr.frag_arr = new_frag_arr(res); #endif @@ -1960,7 +1958,7 @@ new_d_Div (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2) memop, op1, op2); #if PRECISE_EXC_CONTEXT if ((current_ir_graph->phase_state == phase_building) && - (get_irn_op(res) == op_Div)) /* Could be optimized away. */ + (intern_get_irn_op(res) == op_Div)) /* Could be optimized away. */ res->attr.frag_arr = new_frag_arr(res); #endif @@ -1975,7 +1973,7 @@ new_d_Mod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2) memop, op1, op2); #if PRECISE_EXC_CONTEXT if ((current_ir_graph->phase_state == phase_building) && - (get_irn_op(res) == op_Mod)) /* Could be optimized away. */ + (intern_get_irn_op(res) == op_Mod)) /* Could be optimized away. */ res->attr.frag_arr = new_frag_arr(res); #endif @@ -2073,7 +2071,7 @@ new_d_Call (dbg_info* db, ir_node *store, ir_node *callee, int arity, ir_node ** store, callee, arity, in, tp); #if PRECISE_EXC_CONTEXT if ((current_ir_graph->phase_state == phase_building) && - (get_irn_op(res) == op_Call)) /* Could be optimized away. */ + (intern_get_irn_op(res) == op_Call)) /* Could be optimized away. */ res->attr.call.frag_arr = new_frag_arr(res); #endif @@ -2102,7 +2100,7 @@ new_d_Load (dbg_info* db, ir_node *store, ir_node *addr) store, addr); #if PRECISE_EXC_CONTEXT if ((current_ir_graph->phase_state == phase_building) && - (get_irn_op(res) == op_Load)) /* Could be optimized away. */ + (intern_get_irn_op(res) == op_Load)) /* Could be optimized away. */ res->attr.frag_arr = new_frag_arr(res); #endif @@ -2117,7 +2115,7 @@ new_d_Store (dbg_info* db, ir_node *store, ir_node *addr, ir_node *val) store, addr, val); #if PRECISE_EXC_CONTEXT if ((current_ir_graph->phase_state == phase_building) && - (get_irn_op(res) == op_Store)) /* Could be optimized away. */ + (intern_get_irn_op(res) == op_Store)) /* Could be optimized away. */ res->attr.frag_arr = new_frag_arr(res); #endif @@ -2133,7 +2131,7 @@ new_d_Alloc (dbg_info* db, ir_node *store, ir_node *size, type *alloc_type, store, size, alloc_type, where); #if PRECISE_EXC_CONTEXT if ((current_ir_graph->phase_state == phase_building) && - (get_irn_op(res) == op_Alloc)) /* Could be optimized away. */ + (intern_get_irn_op(res) == op_Alloc)) /* Could be optimized away. */ res->attr.a.frag_arr = new_frag_arr(res); #endif diff --git a/ir/ir/irdump.c b/ir/ir/irdump.c index 51393706e..502ba82d4 100644 --- a/ir/ir/irdump.c +++ b/ir/ir/irdump.c @@ -1680,6 +1680,8 @@ dump_ir_graph (ir_graph *irg) char *suffix; rem = current_ir_graph; + printf("comparing %s %s\n", get_irg_dump_name(irg), dump_file_filter); + if(strncmp(get_irg_dump_name(irg),dump_file_filter,strlen(dump_file_filter))!=0) return; current_ir_graph = irg; @@ -1709,6 +1711,7 @@ dump_ir_block_graph (ir_graph *irg) int i; char *suffix; + printf("comparing %s %s\n", get_irg_dump_name(irg), dump_file_filter); if(strncmp(get_irg_dump_name(irg),dump_file_filter,strlen(dump_file_filter))!=0) return; if (interprocedural_view) suffix = "-ip"; diff --git a/ir/ir/irgmod.c b/ir/ir/irgmod.c index c46122224..df3b7179c 100644 --- a/ir/ir/irgmod.c +++ b/ir/ir/irgmod.c @@ -32,7 +32,7 @@ turn_into_tuple (ir_node *node, int arity) { assert(node); set_irn_op(node, op_Tuple); - if (get_irn_arity(node) == arity) { + if (intern_get_irn_arity(node) == arity) { /* keep old array */ } else { /* Allocate new array, don't free old in_array, it's on the obstack. */ @@ -72,13 +72,13 @@ clear_link (ir_node *n, void *env) { static void collect (ir_node *n, void *env) { ir_node *pred; - if (get_irn_op(n) == op_Phi) { + if (intern_get_irn_op(n) == op_Phi) { set_irn_link(n, get_irn_link(get_nodes_Block(n))); set_irn_link(get_nodes_Block(n), n); } - if (get_irn_op(n) == op_Proj) { + if (intern_get_irn_op(n) == op_Proj) { pred = n; - while (get_irn_op(pred) == op_Proj) + while (intern_get_irn_op(pred) == op_Proj) pred = get_Proj_pred(pred); set_irn_link(n, get_irn_link(pred)); set_irn_link(pred, n); @@ -114,7 +114,7 @@ static void move (ir_node *node, ir_node *from_bl, ir_node *to_bl) { set_nodes_Block(node, to_bl); /* move its projs */ - if (get_irn_mode(node) == mode_T) { + if (intern_get_irn_mode(node) == mode_T) { proj = get_irn_link(node); while (proj) { if (get_nodes_Block(proj) == from_bl) @@ -124,9 +124,9 @@ static void move (ir_node *node, ir_node *from_bl, ir_node *to_bl) { } /* recursion ... */ - if (get_irn_op(node) == op_Phi) return; + if (intern_get_irn_op(node) == op_Phi) return; - for (i = 0; i < get_irn_arity(node); i++) { + for (i = 0; i < intern_get_irn_arity(node); i++) { pred = get_irn_n(node, i); if (get_nodes_Block(pred) == from_bl) move(pred, from_bl, to_bl); diff --git a/ir/ir/irgopt.c b/ir/ir/irgopt.c index 2112f6fc8..cee394679 100644 --- a/ir/ir/irgopt.c +++ b/ir/ir/irgopt.c @@ -57,16 +57,16 @@ optimize_in_place_wrapper (ir_node *n, void *env) { int i, irn_arity; ir_node *optimized, *old; - irn_arity = get_irn_arity(n); + irn_arity = intern_get_irn_arity(n); for (i = 0; i < irn_arity; i++) { /* get_irn_n skips Id nodes, so comparison old != optimized does not show all optimizations. Therefore always set new predecessor. */ - old = get_irn_n(n, i); + old = intern_get_irn_intra_n(n, i); optimized = optimize_in_place_2(old); set_irn_n(n, i, optimized); } - if (get_irn_op(n) == op_Block) { + if (intern_get_irn_op(n) == op_Block) { optimized = optimize_in_place_2(n); if (optimized != n) exchange (n, optimized); } @@ -149,9 +149,9 @@ compute_new_arity(ir_node *b) { return block_v - irg_v; } else { /* compute the number of good predecessors */ - res = irn_arity = get_irn_arity(b); + res = irn_arity = intern_get_irn_arity(b); for (i = 0; i < irn_arity; i++) - if (get_irn_opcode(get_irn_n(b, i)) == iro_Bad) res--; + if (intern_get_irn_opcode(intern_get_irn_n(b, i)) == iro_Bad) res--; /* save it in the flag. */ set_Block_block_visited(b, irg_v + res); return res; @@ -160,16 +160,16 @@ compute_new_arity(ir_node *b) { /* TODO: add an ir_op operation */ static INLINE void new_backedge_info(ir_node *n) { - switch(get_irn_opcode(n)) { + switch(intern_get_irn_opcode(n)) { case iro_Block: n->attr.block.cg_backedge = NULL; - n->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, get_irn_arity(n)); + n->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, intern_get_irn_arity(n)); break; case iro_Phi: - n->attr.phi_backedge = new_backedge_arr(current_ir_graph->obst, get_irn_arity(n)); + n->attr.phi_backedge = new_backedge_arr(current_ir_graph->obst, intern_get_irn_arity(n)); break; case iro_Filter: - n->attr.filter.backedge = new_backedge_arr(current_ir_graph->obst, get_irn_arity(n)); + n->attr.filter.backedge = new_backedge_arr(current_ir_graph->obst, intern_get_irn_arity(n)); break; default: ; } @@ -193,23 +193,23 @@ copy_node (ir_node *n, void *env) { the End node. */ //assert(n->op == op_End || ((_ARR_DESCR(n->in))->cookie != ARR_F_MAGIC)); - if (get_irn_opcode(n) == iro_Block) { + if (intern_get_irn_opcode(n) == iro_Block) { block = NULL; new_arity = compute_new_arity(n); n->attr.block.graph_arr = NULL; } else { block = get_nodes_Block(n); - if (get_irn_opcode(n) == iro_Phi) { + if (intern_get_irn_opcode(n) == iro_Phi) { new_arity = compute_new_arity(block); } else { - new_arity = get_irn_arity(n); + new_arity = intern_get_irn_arity(n); } } nn = new_ir_node(get_irn_dbg_info(n), current_ir_graph, block, - get_irn_op(n), - get_irn_mode(n), + intern_get_irn_op(n), + intern_get_irn_mode(n), new_arity, get_irn_in(n)); /* Copy the attributes. These might point to additional data. If this @@ -237,15 +237,15 @@ copy_preds (ir_node *n, void *env) { /* printf("\n old node: "); DDMSG2(n); printf(" new node: "); DDMSG2(nn); - printf(" arities: old: %d, new: %d\n", get_irn_arity(n), get_irn_arity(nn)); */ + printf(" arities: old: %d, new: %d\n", intern_get_irn_arity(n), intern_get_irn_arity(nn)); */ - if (get_irn_opcode(n) == iro_Block) { + if (intern_get_irn_opcode(n) == iro_Block) { /* Don't copy Bad nodes. */ j = 0; - irn_arity = get_irn_arity(n); + irn_arity = intern_get_irn_arity(n); for (i = 0; i < irn_arity; i++) - if (get_irn_opcode(get_irn_n(n, i)) != iro_Bad) { - set_irn_n (nn, j, get_new_node(get_irn_n(n, i))); + if (intern_get_irn_opcode(intern_get_irn_n(n, i)) != iro_Bad) { + set_irn_n (nn, j, get_new_node(intern_get_irn_n(n, i))); /*if (is_backedge(n, i)) set_backedge(nn, j);*/ j++; } @@ -259,18 +259,18 @@ copy_preds (ir_node *n, void *env) { that the fields in ir_graph are set properly. */ if ((get_opt_control_flow_straightening()) && (get_Block_n_cfgpreds(nn) == 1) && - (get_irn_op(get_Block_cfgpred(nn, 0)) == op_Jmp)) + (intern_get_irn_op(get_Block_cfgpred(nn, 0)) == op_Jmp)) exchange(nn, get_nodes_Block(get_Block_cfgpred(nn, 0))); - } else if (get_irn_opcode(n) == iro_Phi) { + } else if (intern_get_irn_opcode(n) == iro_Phi) { /* Don't copy node if corresponding predecessor in block is Bad. The Block itself should not be Bad. */ block = get_nodes_Block(n); set_irn_n (nn, -1, get_new_node(block)); j = 0; - irn_arity = get_irn_arity(n); + irn_arity = intern_get_irn_arity(n); for (i = 0; i < irn_arity; i++) - if (get_irn_opcode(get_irn_n(block, i)) != iro_Bad) { - set_irn_n (nn, j, get_new_node(get_irn_n(n, i))); + if (intern_get_irn_opcode(intern_get_irn_n(block, i)) != iro_Bad) { + set_irn_n (nn, j, get_new_node(intern_get_irn_n(n, i))); /*if (is_backedge(n, i)) set_backedge(nn, j);*/ j++; } @@ -279,16 +279,16 @@ copy_preds (ir_node *n, void *env) { set_Block_block_visited(get_nodes_Block(n), 0); /* Compacting the Phi's ins might generate Phis with only one predecessor. */ - if (get_irn_arity(n) == 1) - exchange(n, get_irn_n(n, 0)); + if (intern_get_irn_arity(n) == 1) + exchange(n, intern_get_irn_n(n, 0)); } else { - irn_arity = get_irn_arity(n); + irn_arity = intern_get_irn_arity(n); for (i = -1; i < irn_arity; i++) - set_irn_n (nn, i, get_new_node(get_irn_n(n, i))); + set_irn_n (nn, i, get_new_node(intern_get_irn_n(n, i))); } /* Now the new node is complete. We can add it to the hash table for cse. @@@ inlinening aborts if we identify End. Why? */ - if(get_irn_op(nn) != op_End) + if(intern_get_irn_op(nn) != op_End) add_identities (current_ir_graph->value_table, nn); } @@ -322,10 +322,10 @@ copy_graph (void) { /*- ... and now the keep alives. -*/ /* First pick the not marked block nodes and walk them. We must pick these first as else we will oversee blocks reachable from Phis. */ - irn_arity = get_irn_arity(oe); + irn_arity = intern_get_irn_arity(oe); for (i = 0; i < irn_arity; i++) { - ka = get_irn_n(oe, i); - if ((get_irn_op(ka) == op_Block) && + ka = intern_get_irn_intra_n(oe, i); + if ((intern_get_irn_op(ka) == op_Block) && (get_irn_visited(ka) < get_irg_visited(current_ir_graph))) { /* We must keep the block alive and copy everything reachable */ set_irg_visited(current_ir_graph, get_irg_visited(current_ir_graph)-1); @@ -335,10 +335,10 @@ copy_graph (void) { } /* Now pick the Phis. Here we will keep all! */ - irn_arity = get_irn_arity(oe); + irn_arity = intern_get_irn_arity(oe); for (i = 0; i < irn_arity; i++) { - ka = get_irn_n(oe, i); - if ((get_irn_op(ka) == op_Phi)) { + ka = intern_get_irn_intra_n(oe, i); + if ((intern_get_irn_op(ka) == op_Phi)) { if (get_irn_visited(ka) < get_irg_visited(current_ir_graph)) { /* We didn't copy the Phi yet. */ set_irg_visited(current_ir_graph, get_irg_visited(current_ir_graph)-1); @@ -421,12 +421,14 @@ copy_graph_env (void) { void dead_node_elimination(ir_graph *irg) { ir_graph *rem; + int rem_ipview = interprocedural_view; struct obstack *graveyard_obst = NULL; struct obstack *rebirth_obst = NULL; /* Remember external state of current_ir_graph. */ rem = current_ir_graph; current_ir_graph = irg; + interprocedural_view = 0; /* Handle graph state */ assert(get_irg_phase_state(current_ir_graph) != phase_building); @@ -460,6 +462,7 @@ dead_node_elimination(ir_graph *irg) { } current_ir_graph = rem; + interprocedural_view = rem_ipview; } /** @@ -475,16 +478,16 @@ static void relink_bad_block_predecessors(ir_node *n, void *env) { /* if link field of block is NULL, look for bad predecessors otherwise this is allready done */ - if (get_irn_op(n) == op_Block && + if (intern_get_irn_op(n) == op_Block && get_irn_link(n) == NULL) { /* save old predecessors in link field (position 0 is the block operand)*/ set_irn_link(n, (void *)get_irn_in(n)); /* count predecessors without bad nodes */ - old_irn_arity = get_irn_arity(n); + old_irn_arity = intern_get_irn_arity(n); for (i = 0; i < old_irn_arity; i++) - if (!is_Bad(get_irn_n(n, i))) new_irn_arity++; + if (!is_Bad(intern_get_irn_n(n, i))) new_irn_arity++; /* arity changing: set new predecessors without bad nodes */ if (new_irn_arity < old_irn_arity) { @@ -495,7 +498,7 @@ static void relink_bad_block_predecessors(ir_node *n, void *env) { new_in[0] = NULL; new_irn_n = 1; for (i = 1; i < old_irn_arity; i++) { - irn = get_irn_n(n, i); + irn = intern_get_irn_n(n, i); if (!is_Bad(irn)) new_in[new_irn_n++] = irn; } n->in = new_in; @@ -516,11 +519,11 @@ static void relink_bad_predecessors(ir_node *n, void *env) { int i, old_irn_arity, new_irn_arity; /* relink bad predeseccors of a block */ - if (get_irn_op(n) == op_Block) + if (intern_get_irn_op(n) == op_Block) relink_bad_block_predecessors(n, env); /* If Phi node relink its block and its predecessors */ - if (get_irn_op(n) == op_Phi) { + if (intern_get_irn_op(n) == op_Phi) { /* Relink predeseccors of phi's block */ block = get_nodes_Block(n); @@ -577,13 +580,13 @@ copy_node_inline (ir_node *n, void *env) { type *frame_tp = (type *)env; copy_node(n, NULL); - if (get_irn_op(n) == op_Sel) { + if (intern_get_irn_op(n) == op_Sel) { new = get_new_node (n); - assert(get_irn_op(new) == op_Sel); + assert(intern_get_irn_op(new) == op_Sel); if (get_entity_owner(get_Sel_entity(n)) == frame_tp) { set_Sel_entity(new, get_entity_link(get_Sel_entity(n))); } - } else if (get_irn_op(n) == op_Block) { + } else if (intern_get_irn_op(n) == op_Block) { new = get_new_node (n); new->attr.block.irg = current_ir_graph; } @@ -734,7 +737,7 @@ void inline_method(ir_node *call, ir_graph *called_graph) { /* -- Precompute some values -- */ end_bl = get_new_node(get_irg_end_block(called_graph)); end = get_new_node(get_irg_end(called_graph)); - arity = get_irn_arity(end_bl); /* arity = n_exc + n_ret */ + arity = intern_get_irn_arity(end_bl); /* arity = n_exc + n_ret */ n_res = get_method_n_ress(get_Call_type(call)); res_pred = (ir_node **) malloc (n_res * sizeof (ir_node *)); @@ -743,9 +746,9 @@ void inline_method(ir_node *call, ir_graph *called_graph) { set_irg_current_block(current_ir_graph, post_bl); /* just to make sure */ /* -- archive keepalives -- */ - irn_arity = get_irn_arity(end); + irn_arity = intern_get_irn_arity(end); for (i = 0; i < irn_arity; i++) - add_End_keepalive(get_irg_end(current_ir_graph), get_irn_n(end, i)); + add_End_keepalive(get_irg_end(current_ir_graph), intern_get_irn_n(end, i)); /* The new end node will die. We need not free as the in array is on the obstack: copy_node only generated 'D' arrays. */ @@ -754,8 +757,8 @@ void inline_method(ir_node *call, ir_graph *called_graph) { n_ret = 0; for (i = 0; i < arity; i++) { ir_node *ret; - ret = get_irn_n(end_bl, i); - if (get_irn_op(ret) == op_Return) { + ret = intern_get_irn_n(end_bl, i); + if (intern_get_irn_op(ret) == op_Return) { cf_pred[n_ret] = new_r_Jmp(current_ir_graph, get_nodes_Block(ret)); n_ret++; } @@ -768,8 +771,8 @@ void inline_method(ir_node *call, ir_graph *called_graph) { /* First the Memory-Phi */ n_ret = 0; for (i = 0; i < arity; i++) { - ret = get_irn_n(end_bl, i); - if (get_irn_op(ret) == op_Return) { + ret = intern_get_irn_n(end_bl, i); + if (intern_get_irn_op(ret) == op_Return) { cf_pred[n_ret] = get_Return_mem(ret); n_ret++; } @@ -786,13 +789,13 @@ void inline_method(ir_node *call, ir_graph *called_graph) { for (j = 0; j < n_res; j++) { n_ret = 0; for (i = 0; i < arity; i++) { - ret = get_irn_n(end_bl, i); - if (get_irn_op(ret) == op_Return) { + ret = intern_get_irn_n(end_bl, i); + if (intern_get_irn_op(ret) == op_Return) { cf_pred[n_ret] = get_Return_res(ret, j); n_ret++; } } - phi = new_Phi(n_ret, cf_pred, get_irn_mode(cf_pred[0])); + phi = new_Phi(n_ret, cf_pred, intern_get_irn_mode(cf_pred[0])); res_pred[j] = phi; /* Conserve Phi-list for further inlinings -- but might be optimized */ if (get_nodes_Block(phi) == post_bl) { @@ -818,8 +821,8 @@ void inline_method(ir_node *call, ir_graph *called_graph) { n_exc = 0; for (i = 0; i < arity; i++) { ir_node *ret; - ret = get_irn_n(end_bl, i); - if (is_fragile_op(skip_Proj(ret)) || (get_irn_op(skip_Proj(ret)) == op_Raise)) { + ret = intern_get_irn_n(end_bl, i); + if (is_fragile_op(skip_Proj(ret)) || (intern_get_irn_op(skip_Proj(ret)) == op_Raise)) { cf_pred[n_exc] = ret; n_exc++; } @@ -831,15 +834,15 @@ void inline_method(ir_node *call, ir_graph *called_graph) { n_exc = 0; for (i = 0; i < arity; i++) { ir_node *ret; - ret = skip_Proj(get_irn_n(end_bl, i)); - if (get_irn_op(ret) == op_Call) { + ret = skip_Proj(intern_get_irn_n(end_bl, i)); + if (intern_get_irn_op(ret) == op_Call) { cf_pred[n_exc] = new_r_Proj(current_ir_graph, get_nodes_Block(ret), ret, mode_M, 3); n_exc++; } else if (is_fragile_op(ret)) { /* We rely that all cfops have the memory output at the same position. */ cf_pred[n_exc] = new_r_Proj(current_ir_graph, get_nodes_Block(ret), ret, mode_M, 0); n_exc++; - } else if (get_irn_op(ret) == op_Raise) { + } else if (intern_get_irn_op(ret) == op_Raise) { cf_pred[n_exc] = new_r_Proj(current_ir_graph, get_nodes_Block(ret), ret, mode_M, 1); n_exc++; } @@ -857,19 +860,19 @@ void inline_method(ir_node *call, ir_graph *called_graph) { /* assert(exc_handling == 1 || no exceptions. ) */ n_exc = 0; for (i = 0; i < arity; i++) { - ir_node *ret = get_irn_n(end_bl, i); + ir_node *ret = intern_get_irn_n(end_bl, i); - if (is_fragile_op(skip_Proj(ret)) || (get_irn_op(skip_Proj(ret)) == op_Raise)) { + if (is_fragile_op(skip_Proj(ret)) || (intern_get_irn_op(skip_Proj(ret)) == op_Raise)) { cf_pred[n_exc] = ret; n_exc++; } } main_end_bl = get_irg_end_block(current_ir_graph); - main_end_bl_arity = get_irn_arity(main_end_bl); + main_end_bl_arity = intern_get_irn_arity(main_end_bl); end_preds = (ir_node **) malloc ((n_exc + main_end_bl_arity) * sizeof (ir_node *)); for (i = 0; i < main_end_bl_arity; ++i) - end_preds[i] = get_irn_n(main_end_bl, i); + end_preds[i] = intern_get_irn_n(main_end_bl, i); for (i = 0; i < n_exc; ++i) end_preds[main_end_bl_arity + i] = cf_pred[i]; set_irn_in(main_end_bl, n_exc + main_end_bl_arity, end_preds); @@ -895,13 +898,13 @@ void inline_method(ir_node *call, ir_graph *called_graph) { end_bl = get_irg_end_block(current_ir_graph); for (i = 0; i < get_Block_n_cfgpreds(end_bl); i++) { cf_op = get_Block_cfgpred(end_bl, i); - if (get_irn_op(cf_op) == op_Proj) { + if (intern_get_irn_op(cf_op) == op_Proj) { cf_op = get_Proj_pred(cf_op); - if ((get_irn_op(cf_op) == op_Tuple) && (cf_op == call)) { + if ((intern_get_irn_op(cf_op) == op_Tuple) && (cf_op == call)) { // There are unoptimized tuples from inlineing before when no exc assert(get_Proj_proj(get_Block_cfgpred(end_bl, i)) == pn_Call_X_except); cf_op = get_Tuple_pred(cf_op, pn_Call_X_except); - assert(get_irn_op(cf_op) == op_Jmp); + assert(intern_get_irn_op(cf_op) == op_Jmp); break; } } @@ -951,7 +954,7 @@ static ir_graph *get_call_called_irg(ir_node *call) { assert(get_irn_op(call) == op_Call); addr = get_Call_ptr(call); - if (get_irn_op(addr) == op_Const) { + if (intern_get_irn_op(addr) == op_Const) { /* Check whether the constant is the pointer to a compiled entity. */ tv = get_Const_tarval(addr); if (tarval_to_entity(tv)) @@ -967,10 +970,10 @@ static void collect_calls(ir_node *call, void *env) { tarval *tv; ir_graph *called_irg; - if (get_irn_op(call) != op_Call) return; + if (intern_get_irn_op(call) != op_Call) return; addr = get_Call_ptr(call); - if (get_irn_op(addr) == op_Const) { + if (intern_get_irn_op(addr) == op_Const) { /* Check whether the constant is the pointer to a compiled entity. */ tv = get_Const_tarval(addr); if (tarval_to_entity(tv)) { @@ -1062,7 +1065,7 @@ static void free_inline_irg_env(inline_irg_env *env) { static void collect_calls2(ir_node *call, void *env) { inline_irg_env *x = (inline_irg_env *)env; - ir_op *op = get_irn_op(call); + ir_op *op = intern_get_irn_op(call); ir_graph *callee; /* count nodes in irg */ @@ -1251,28 +1254,28 @@ place_floats_early(ir_node *n, pdeq *worklist) mark_irn_visited(n); /* Place floating nodes. */ - if (get_op_pinned(get_irn_op(n)) == floats) { + if (get_op_pinned(intern_get_irn_op(n)) == floats) { int depth = 0; ir_node *b = new_Bad(); /* The block to place this node in */ - assert(get_irn_op(n) != op_Block); + assert(intern_get_irn_op(n) != op_Block); - if ((get_irn_op(n) == op_Const) || - (get_irn_op(n) == op_SymConst) || + if ((intern_get_irn_op(n) == op_Const) || + (intern_get_irn_op(n) == op_SymConst) || (is_Bad(n)) || - (get_irn_op(n) == op_Unknown)) { + (intern_get_irn_op(n) == op_Unknown)) { /* These nodes will not be placed by the loop below. */ b = get_irg_start_block(current_ir_graph); depth = 1; } /* find the block for this node. */ - irn_arity = get_irn_arity(n); + irn_arity = intern_get_irn_arity(n); for (i = 0; i < irn_arity; i++) { - ir_node *dep = get_irn_n(n, i); + ir_node *dep = intern_get_irn_n(n, i); ir_node *dep_block; if ((irn_not_visited(dep)) && - (get_op_pinned(get_irn_op(dep)) == floats)) { + (get_op_pinned(intern_get_irn_op(dep)) == floats)) { place_floats_early(dep, worklist); } /* Because all loops contain at least one pinned node, now all @@ -1295,10 +1298,10 @@ place_floats_early(ir_node *n, pdeq *worklist) } /* Add predecessors of non floating nodes on worklist. */ - start = (get_irn_op(n) == op_Block) ? 0 : -1; - irn_arity = get_irn_arity(n); + start = (intern_get_irn_op(n) == op_Block) ? 0 : -1; + irn_arity = intern_get_irn_arity(n); for (i = start; i < irn_arity; i++) { - ir_node *pred = get_irn_n(n, i); + ir_node *pred = intern_get_irn_n(n, i); if (irn_not_visited(pred)) { pdeq_putr (worklist, pred); } @@ -1337,14 +1340,14 @@ consumer_dom_dca (ir_node *dca, ir_node *consumer, ir_node *producer) /* Compute the latest block into which we can place a node so that it is before consumer. */ - if (get_irn_op(consumer) == op_Phi) { + if (intern_get_irn_op(consumer) == op_Phi) { /* our consumer is a Phi-node, the effective use is in all those blocks through which the Phi-node reaches producer */ int i, irn_arity; ir_node *phi_block = get_nodes_Block(consumer); - irn_arity = get_irn_arity(consumer); + irn_arity = intern_get_irn_arity(consumer); for (i = 0; i < irn_arity; i++) { - if (get_irn_n(consumer, i) == producer) { + if (intern_get_irn_n(consumer, i) == producer) { block = get_nodes_Block(get_Block_cfgpred(phi_block, i)); } } @@ -1421,9 +1424,9 @@ place_floats_late(ir_node *n, pdeq *worklist) assert (irn_not_visited(n)); /* no multiple placement */ /* no need to place block nodes, control nodes are already placed. */ - if ((get_irn_op(n) != op_Block) && + if ((intern_get_irn_op(n) != op_Block) && (!is_cfop(n)) && - (get_irn_mode(n) != mode_X)) { + (intern_get_irn_mode(n) != mode_X)) { /* Remember the early placement of this block to move it out of loop no further than the early placement. */ early = get_nodes_Block(n); @@ -1437,15 +1440,15 @@ place_floats_late(ir_node *n, pdeq *worklist) producer of one of their inputs in the same block anyway. */ for (i = 0; i < get_irn_n_outs(n); i++) { ir_node *succ = get_irn_out(n, i); - if (irn_not_visited(succ) && (get_irn_op(succ) != op_Phi)) + if (irn_not_visited(succ) && (intern_get_irn_op(succ) != op_Phi)) place_floats_late(succ, worklist); } /* We have to determine the final block of this node... except for constants. */ - if ((get_op_pinned(get_irn_op(n)) == floats) && - (get_irn_op(n) != op_Const) && - (get_irn_op(n) != op_SymConst)) { + if ((get_op_pinned(intern_get_irn_op(n)) == floats) && + (intern_get_irn_op(n) != op_Const) && + (intern_get_irn_op(n) != op_SymConst)) { ir_node *dca = NULL; /* deepest common ancestor in the dominator tree of all nodes' blocks depending on us; our final @@ -1537,14 +1540,14 @@ static void merge_blocks(ir_node *n, void *env) { int i; set_irn_link(n, NULL); - if (get_irn_op(n) == op_Block) { + if (intern_get_irn_op(n) == op_Block) { /* Remove Tuples */ for (i = 0; i < get_Block_n_cfgpreds(n); i++) /* GL @@@ : is this possible? if (get_opt_normalize()) -- added, all tests go through. A different order of optimizations might cause problems. */ if (get_opt_normalize()) set_Block_cfgpred(n, i, skip_Tuple(get_Block_cfgpred(n, i))); - } else if (get_opt_optimize() && (get_irn_mode(n) == mode_X)) { + } else if (get_opt_optimize() && (intern_get_irn_mode(n) == mode_X)) { /* We will soon visit a block. Optimize it before visiting! */ ir_node *b = get_nodes_Block(n); ir_node *new_node = equivalent_node(b); @@ -1573,11 +1576,11 @@ static void collect_nodes(ir_node *n, void *env) { if (is_no_Block(n)) { ir_node *b = get_nodes_Block(n); - if ((get_irn_op(n) == op_Phi)) { + if ((intern_get_irn_op(n) == op_Phi)) { /* Collect Phi nodes to compact ins along with block's ins. */ set_irn_link(n, get_irn_link(b)); set_irn_link(b, n); - } else if (get_irn_op(n) != op_Jmp) { /* Check for non empty block. */ + } else if (intern_get_irn_op(n) != op_Jmp) { /* Check for non empty block. */ mark_Block_block_visited(b); } } @@ -1670,7 +1673,7 @@ static void optimize_blocks(ir_node *b, void *env) { /*- Fix the Phi nodes -*/ phi = get_irn_link(b); while (phi) { - assert(get_irn_op(phi) == op_Phi); + assert(intern_get_irn_op(phi) == op_Phi); /* Find the new predecessors for the Phi */ n_preds = 0; for (i = 0; i < get_Block_n_cfgpreds(b); i++) { @@ -1683,7 +1686,7 @@ static void optimize_blocks(ir_node *b, void *env) { ir_node *phi_pred = get_Phi_pred(phi, i); for (j = 0; j < get_Block_n_cfgpreds(pred); j++) { if (get_nodes_Block(phi_pred) == pred) { - assert(get_irn_op(phi_pred) == op_Phi); /* Block is empty!! */ + assert(intern_get_irn_op(phi_pred) == op_Phi); /* Block is empty!! */ in[n_preds] = get_Phi_pred(phi_pred, j); } else { in[n_preds] = phi_pred; @@ -1721,7 +1724,7 @@ static void optimize_blocks(ir_node *b, void *env) { < get_irg_block_visited(current_ir_graph)) { phi = get_irn_link(pred); while (phi) { - if (get_irn_op(phi) == op_Phi) { + if (intern_get_irn_op(phi) == op_Phi) { set_nodes_Block(phi, b); n_preds = 0; @@ -1826,13 +1829,13 @@ void optimize_cf(ir_graph *irg) { for(i = 0; i < get_End_n_keepalives(end); i++) { ir_node *ka = get_End_keepalive(end, i); if (irn_not_visited(ka)) { - if ((get_irn_op(ka) == op_Block) && Block_not_block_visited(ka)) { + if ((intern_get_irn_op(ka) == op_Block) && Block_not_block_visited(ka)) { set_irg_block_visited(current_ir_graph, /* Don't walk all the way to Start. */ get_irg_block_visited(current_ir_graph)-1); irg_block_walk(ka, optimize_blocks, NULL, NULL); mark_irn_visited(ka); ARR_APP1 (ir_node *, in, ka); - } else if (get_irn_op(ka) == op_Phi) { + } else if (intern_get_irn_op(ka) == op_Phi) { mark_irn_visited(ka); ARR_APP1 (ir_node *, in, ka); } @@ -1860,19 +1863,19 @@ static void walk_critical_cf_edges(ir_node *n, void *env) { ir_node *pre, *block, **in, *jmp; /* Block has multiple predecessors */ - if ((op_Block == get_irn_op(n)) && - (get_irn_arity(n) > 1)) { - arity = get_irn_arity(n); + if ((op_Block == intern_get_irn_op(n)) && + (intern_get_irn_arity(n) > 1)) { + arity = intern_get_irn_arity(n); if (n == get_irg_end_block(current_ir_graph)) return; // No use to add a block here. for (i=0; iobst, 1); diff --git a/ir/ir/irgwalk.c b/ir/ir/irgwalk.c index c5e94a892..73d472343 100644 --- a/ir/ir/irgwalk.c +++ b/ir/ir/irgwalk.c @@ -45,9 +45,9 @@ static void irg_walk_cg(ir_node * node, int visited, eset * irg_set, set_irn_visited(node, visited); pred = skip_Proj(node); - if (get_irn_op(pred) == op_CallBegin - || get_irn_op(pred) == op_EndReg - || get_irn_op(pred) == op_EndExcept) { + if (intern_get_irn_op(pred) == op_CallBegin + || intern_get_irn_op(pred) == op_EndReg + || intern_get_irn_op(pred) == op_EndExcept) { current_ir_graph = get_irn_irg(pred); } @@ -56,28 +56,28 @@ static void irg_walk_cg(ir_node * node, int visited, eset * irg_set, if (is_no_Block(node)) irg_walk_cg(get_nodes_block(node), visited, irg_set, pre, post, env); - if (get_irn_op(node) == op_Block) { /* block */ - for (i = get_irn_arity(node) - 1; i >= 0; --i) { - ir_node * exec = get_irn_n(node, i); + if (intern_get_irn_op(node) == op_Block) { /* block */ + for (i = intern_get_irn_arity(node) - 1; i >= 0; --i) { + ir_node * exec = intern_get_irn_n(node, i); ir_node * pred = skip_Proj(exec); - if ((get_irn_op(pred) != op_CallBegin - && get_irn_op(pred) != op_EndReg - && get_irn_op(pred) != op_EndExcept) + if ((intern_get_irn_op(pred) != op_CallBegin + && intern_get_irn_op(pred) != op_EndReg + && intern_get_irn_op(pred) != op_EndExcept) || eset_contains(irg_set, get_irn_irg(pred))) { irg_walk_cg(exec, visited, irg_set, pre, post, env); } } - } else if (get_irn_op(node) == op_Filter) { - for (i = get_irn_arity(node) - 1; i >= 0; --i) { - ir_node * pred = get_irn_n(node, i); - if (get_irn_op(pred) == op_Unknown || get_irn_op(pred) == op_Bad) { + } else if (intern_get_irn_op(node) == op_Filter) { + for (i = intern_get_irn_arity(node) - 1; i >= 0; --i) { + ir_node * pred = intern_get_irn_n(node, i); + if (intern_get_irn_op(pred) == op_Unknown || intern_get_irn_op(pred) == op_Bad) { irg_walk_cg(pred, visited, irg_set, pre, post, env); } else { ir_node * exec; exec = skip_Proj(get_Block_cfgpred(get_nodes_block(node), i)); - assert(get_irn_op(exec) == op_CallBegin - || get_irn_op(exec) == op_EndReg - || get_irn_op(exec) == op_EndExcept); + assert(intern_get_irn_op(exec) == op_CallBegin + || intern_get_irn_op(exec) == op_EndReg + || intern_get_irn_op(exec) == op_EndExcept); if (eset_contains(irg_set, get_irn_irg(exec))) { current_ir_graph = get_irn_irg(exec); irg_walk_cg(pred, visited, irg_set, pre, post, env); @@ -86,8 +86,8 @@ static void irg_walk_cg(ir_node * node, int visited, eset * irg_set, } } } else { - for (i = get_irn_arity(node) - 1; i >= 0; --i) { - irg_walk_cg(get_irn_n(node, i), visited, irg_set, pre, post, env); + for (i = intern_get_irn_arity(node) - 1; i >= 0; --i) { + irg_walk_cg(intern_get_irn_n(node, i), visited, irg_set, pre, post, env); } } @@ -99,7 +99,7 @@ static void irg_walk_cg(ir_node * node, int visited, eset * irg_set, /* Insert all ir_graphs in irg_set, that are (transitive) reachable. */ static void collect_irgs(ir_node * node, eset * irg_set) { - if (get_irn_op(node) == op_Call) { + if (intern_get_irn_op(node) == op_Call) { int i; for (i = get_Call_n_callees(node) - 1; i >= 0; --i) { entity * ent = get_Call_callee(node, i); @@ -125,8 +125,8 @@ irg_walk_2(ir_node *node, irg_walk_func *pre, irg_walk_func *post, void * env) if (is_no_Block(node)) irg_walk_2(get_nodes_block(node), pre, post, env); - for (i = get_irn_arity(node) - 1; i >= 0; --i) - irg_walk_2(get_irn_n(node, i), pre, post, env); + for (i = intern_get_irn_arity(node) - 1; i >= 0; --i) + irg_walk_2(intern_get_irn_n(node, i), pre, post, env); if (post) post(node, env); } @@ -137,9 +137,9 @@ irg_walk_2(ir_node *node, irg_walk_func *pre, irg_walk_func *post, void * env) if (pre) pre(node, env); if (node->op != op_Block) - irg_walk_2(get_irn_n(node, -1), pre, post, env); - for (i = get_irn_arity(node) - 1; i >= 0; --i) - irg_walk_2(get_irn_n(node, i), pre, post, env); + irg_walk_2(intern_get_irn_n(node, -1), pre, post, env); + for (i = intern_get_irn_arity(node) - 1; i >= 0; --i) + irg_walk_2(intern_get_irn_n(node, i), pre, post, env); if (post) post(node, env); } @@ -206,9 +206,9 @@ switch_irg (ir_node *n, int index) { if (interprocedural_view) { /* Only Filter and Block nodes can have predecessors in other graphs. */ - if (get_irn_op(n) == op_Filter) + if (intern_get_irn_op(n) == op_Filter) n = get_nodes_block(n); - if (get_irn_op(n) == op_Block) { + if (intern_get_irn_op(n) == op_Block) { ir_node *cfop = skip_Proj(get_Block_cfgpred(n, index)); if (is_ip_cfop(cfop)) { current_ir_graph = get_irn_irg(cfop); @@ -233,10 +233,10 @@ cg_walk_2(ir_node *node, irg_walk_func *pre, irg_walk_func *post, void * env) if (is_no_Block(node)) cg_walk_2(get_nodes_block(node), pre, post, env); - for (i = get_irn_arity(node) - 1; i >= 0; --i) { + for (i = intern_get_irn_arity(node) - 1; i >= 0; --i) { rem = switch_irg(node, i); /* @@@ AS: Is this wrong? We do have to switch to the irg of the predecessor, don't we? */ - cg_walk_2(get_irn_n(node, i), pre, post, env); + cg_walk_2(intern_get_irn_n(node, i), pre, post, env); current_ir_graph = rem; } @@ -316,7 +316,7 @@ static ir_node *get_cf_op(ir_node *n) { n = skip_Tuple(n); pred = skip_Proj(n); if (!(is_cfop(pred) || is_fragile_op(pred) || - (get_irn_op(pred) == op_Bad))) + (intern_get_irn_op(pred) == op_Bad))) n = get_cf_op(n); return skip_Proj(n); @@ -325,7 +325,6 @@ static ir_node *get_cf_op(ir_node *n) { static void irg_block_walk_2(ir_node *node, irg_walk_func *pre, irg_walk_func *post, void *env) { int i; - assert(get_irn_opcode(node) == iro_Block); if(get_Block_block_visited(node) < get_irg_block_visited(current_ir_graph)) { set_Block_block_visited(node, get_irg_block_visited(current_ir_graph)); @@ -336,7 +335,7 @@ static void irg_block_walk_2(ir_node *node, irg_walk_func *pre, irg_walk_func *p /* find the corresponding predecessor block. */ ir_node *pred = get_cf_op(get_Block_cfgpred(node, i)); pred = get_nodes_block(pred); - if(get_irn_opcode(pred) == iro_Block) { + if(intern_get_irn_opcode(pred) == iro_Block) { /* recursion */ irg_block_walk_2(pred, pre, post, env); } @@ -366,11 +365,11 @@ void irg_block_walk(ir_node *node, irg_walk_func *pre, irg_walk_func *post, void assert(get_irn_opcode(block) == iro_Block); irg_block_walk_2(block, pre, post, env); /* keepalive: the endless loops ... */ - if (get_irn_op(node) == op_End) { - int arity = get_irn_arity(node); + if (intern_get_irn_op(node) == op_End) { + int arity = intern_get_irn_arity(node); for (i = 0; i < arity; i++) { - pred = get_irn_n(node, i); - if (get_irn_op(pred) == op_Block) + pred = intern_get_irn_n(node, i); + if (intern_get_irn_op(pred) == op_Block) irg_block_walk_2(pred, pre, post, env); } } @@ -591,8 +590,8 @@ enter_procedure(ir_node *block, ir_node *cf_pred, int pos) { assert(interprocedural_view); interprocedural_view = 0; - callbegin = skip_Proj(get_irn_n(block, 0)); - assert(get_irn_op(callbegin) == op_CallBegin); + callbegin = skip_Proj(intern_get_irn_n(block, 0)); + assert(intern_get_irn_op(callbegin) == op_CallBegin); interprocedural_view = 1; push_callsite(irg, callbegin); @@ -624,18 +623,18 @@ ir_node *get_irn_ip_pred(ir_node *n, int pos) { /* Find the cf_pred refering to pos. */ ir_node *block = n; ir_node *cf_pred; - if (get_irn_opcode(n) == iro_Filter) block = get_nodes_block(n); - cf_pred = skip_Proj(get_irn_n(block, pos)); + if (intern_get_irn_opcode(n) == iro_Filter) block = get_nodes_block(n); + cf_pred = skip_Proj(intern_get_irn_n(block, pos)); /* Check whether we enter or leave a procedure and act according. */ - if ((get_irn_op(cf_pred) == op_EndReg) || - (get_irn_op(cf_pred) == op_EndExcept)) + if ((intern_get_irn_op(cf_pred) == op_EndReg) || + (intern_get_irn_op(cf_pred) == op_EndExcept)) enter_procedure(block, cf_pred, pos); - if (get_irn_op(cf_pred) == op_CallBegin) + if (intern_get_irn_op(cf_pred) == op_CallBegin) if (!leave_procedure(block, cf_pred, pos)) return NULL; } - return get_irn_n(n, pos); + return intern_get_irn_n(n, pos); } static INLINE void @@ -670,13 +669,13 @@ return_recur(ir_node *n, int pos) { /* Find the cf_pred refering to pos. */ block = n; - if (get_irn_opcode(n) == iro_Filter) block = get_nodes_block(n); - cf_pred = skip_Proj(get_irn_n(block, pos)); + if (intern_get_irn_opcode(n) == iro_Filter) block = get_nodes_block(n); + cf_pred = skip_Proj(intern_get_irn_n(block, pos)); /* Check whether we re_enter or re_leave a procedure and act according. */ - if ((get_irn_op(cf_pred) == op_EndReg) || - (get_irn_op(cf_pred) == op_EndExcept)) + if ((intern_get_irn_op(cf_pred) == op_EndReg) || + (intern_get_irn_op(cf_pred) == op_EndExcept)) re_enter_procedure(block, cf_pred, pos); - if (get_irn_op(cf_pred) == op_CallBegin) + if (intern_get_irn_op(cf_pred) == op_CallBegin) re_leave_procedure(block, cf_pred, pos); } diff --git a/ir/ir/irnode.c b/ir/ir/irnode.c index cfeb23bd6..6c0dff9b6 100644 --- a/ir/ir/irnode.c +++ b/ir/ir/irnode.c @@ -20,10 +20,8 @@ #include "irgraph_t.h" #include "irmode_t.h" #include "typegmod.h" -#include "array.h" #include "irbackedge_t.h" #include "irdump.h" -#include "irflag_t.h" #include "irop_t.h" #include "irprog_t.h" @@ -159,32 +157,19 @@ is_ir_node (const void *thing) { return 0; } -/* returns the number of predecessors without the block predecessor. */ INLINE int get_irn_intra_arity (const ir_node *node) { - assert(node); - return ARR_LEN(node->in) - 1; + return intern_get_irn_intra_arity(node); } -/* returns the number of predecessors without the block predecessor. */ INLINE int get_irn_inter_arity (const ir_node *node) { - assert(node); - if (get_irn_opcode(node) == iro_Filter) { - assert(node->attr.filter.in_cg); - return ARR_LEN(node->attr.filter.in_cg) - 1; - } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) { - return ARR_LEN(node->attr.block.in_cg) - 1; - } - return get_irn_intra_arity(node); + return intern_get_irn_inter_arity(node); } -/* returns the number of predecessors without the block predecessor. */ INLINE int get_irn_arity (const ir_node *node) { - assert(node); - if (interprocedural_view) return get_irn_inter_arity(node); - return get_irn_intra_arity(node); + return intern_get_irn_arity(node); } /* Returns the array with ins. This array is shifted with respect to the @@ -235,35 +220,19 @@ set_irn_in (ir_node *node, int arity, ir_node **in) { INLINE ir_node * get_irn_intra_n (ir_node *node, int n) { - return (node->in[n + 1] = skip_nop(node->in[n + 1])); + return intern_get_irn_intra_n (node, n); } -INLINE ir_node* +INLINE ir_node * get_irn_inter_n (ir_node *node, int n) { - /* handle Filter and Block specially */ - if (get_irn_opcode(node) == iro_Filter) { - assert(node->attr.filter.in_cg); - return (node->attr.filter.in_cg[n + 1] = skip_nop(node->attr.filter.in_cg[n + 1])); - } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) { - return (node->attr.block.in_cg[n + 1] = skip_nop(node->attr.block.in_cg[n + 1])); - } - - return get_irn_intra_n (node, n); + return intern_get_irn_inter_n (node, n); } -/* to iterate through the predecessors without touching the array */ -/* To iterate over the operands iterate from 0 to i < get_irn_arity(), - to iterate including the Block predecessor iterate from i = -1 to - i < get_irn_arity. - If it is a block, the entry -1 is NULL. */ INLINE ir_node * get_irn_n (ir_node *node, int n) { - assert(node); assert(-1 <= n && n < get_irn_arity(node)); - if (interprocedural_view) return get_irn_inter_n (node, n); - return get_irn_intra_n (node, n); + return intern_get_irn_n (node, n); } - INLINE void set_irn_n (ir_node *node, int n, ir_node *in) { assert(node && -1 <= n && n < get_irn_arity(node)); @@ -289,10 +258,8 @@ set_irn_n (ir_node *node, int n, ir_node *in) { } INLINE ir_mode * -get_irn_mode (const ir_node *node) -{ - assert (node); - return node->mode; +get_irn_mode (const ir_node *node) { + return intern_get_irn_mode(node); } INLINE void @@ -328,8 +295,7 @@ get_irn_modeident (const ir_node *node) INLINE ir_op * get_irn_op (const ir_node *node) { - assert (node); - return node->op; + return intern_get_irn_op(node); } /* should be private to the library: */ @@ -343,9 +309,7 @@ set_irn_op (ir_node *node, ir_op *op) INLINE opcode get_irn_opcode (const ir_node *node) { - assert (k_ir_node == get_kind(node)); - assert (node -> op); - return node->op->code; + return intern_get_irn_opcode(node); } INLINE const char * @@ -728,8 +692,7 @@ INLINE void set_Start_irg(ir_node *node, ir_graph *irg) { assert(node->op == op_Start); assert(is_ir_graph(irg)); - assert(0 && " Why set irg? "); - //node->attr.start.irg = irg; + assert(0 && " Why set irg? -- use set_irn_irg"); } INLINE int @@ -1917,7 +1880,7 @@ skip_nop (ir_node *node) { ir_node *rem_pred = node->in[0+1]; ir_node *res; - assert (get_irn_arity (node) > 0); + assert (intern_get_irn_arity (node) > 0); node->in[0+1] = node; res = skip_nop(rem_pred); @@ -1933,7 +1896,6 @@ skip_nop (ir_node *node) { /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity than any other approach, as Id chains are resolved and all point to the real node, or all id's are self loops. */ -extern int opt_normalize; INLINE ir_node * skip_nop (ir_node *node) { ir_node *pred; @@ -1949,7 +1911,7 @@ skip_nop (ir_node *node) { if (pred->op != op_Id) return pred; /* shortcut */ rem_pred = pred; - assert (get_irn_arity (node) > 0); + assert (intern_get_irn_arity (node) > 0); node->in[0+1] = node; res = skip_nop(rem_pred); @@ -1963,8 +1925,6 @@ skip_nop (ir_node *node) { } #endif - - INLINE ir_node * skip_Id (ir_node *node) { return skip_nop(node); diff --git a/ir/ir/irnode_t.h b/ir/ir/irnode_t.h index fa24090fa..142c5c2a8 100644 --- a/ir/ir/irnode_t.h +++ b/ir/ir/irnode_t.h @@ -29,10 +29,12 @@ # include "irnode.h" # include "irop_t.h" +# include "irflag_t.h" # include "firm_common_t.h" # include "irdom_t.h" /* For size of struct dom_info. */ # include "dbginfo.h" # include "irloop.h" +# include "array.h" # include "exc.h" @@ -241,4 +243,93 @@ int get_irn_phi_attr (ir_node *node); block_attr get_irn_block_attr (ir_node *node); /*@}*/ +/*********************************************************************/ +/* These function are most used in libfirm. Give them as static */ +/* functions so they can be inlined. */ +/*********************************************************************/ + + + +/* returns the number of predecessors without the block predecessor. */ +static INLINE int +intern_get_irn_intra_arity (const ir_node *node) { + assert(node); + return ARR_LEN(node->in) - 1; +} + +/* returns the number of predecessors without the block predecessor. */ +static INLINE int +intern_get_irn_inter_arity (const ir_node *node) { + assert(node); + if (get_irn_opcode(node) == iro_Filter) { + assert(node->attr.filter.in_cg); + return ARR_LEN(node->attr.filter.in_cg) - 1; + } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) { + return ARR_LEN(node->attr.block.in_cg) - 1; + } + return intern_get_irn_intra_arity(node); +} + +/* returns the number of predecessors without the block predecessor. */ +static INLINE int +intern_get_irn_arity (const ir_node *node) { + assert(node); + if (interprocedural_view) return intern_get_irn_inter_arity(node); + return intern_get_irn_intra_arity(node); +} + +static INLINE ir_node * +intern_get_irn_intra_n (ir_node *node, int n) { + return (node->in[n + 1] = skip_nop(node->in[n + 1])); +} + +static INLINE ir_node* +intern_get_irn_inter_n (ir_node *node, int n) { + /* handle Filter and Block specially */ + if (get_irn_opcode(node) == iro_Filter) { + assert(node->attr.filter.in_cg); + return (node->attr.filter.in_cg[n + 1] = skip_nop(node->attr.filter.in_cg[n + 1])); + } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) { + return (node->attr.block.in_cg[n + 1] = skip_nop(node->attr.block.in_cg[n + 1])); + } + + return get_irn_intra_n (node, n); +} + +/* to iterate through the predecessors without touching the array */ +/* To iterate over the operands iterate from 0 to i < get_irn_arity(), + to iterate including the Block predecessor iterate from i = -1 to + i < get_irn_arity. + If it is a block, the entry -1 is NULL. */ +static INLINE ir_node * +intern_get_irn_n (ir_node *node, int n) { + assert(node); assert(-1 <= n && n < intern_get_irn_arity(node)); + if (interprocedural_view) return get_irn_inter_n (node, n); + return get_irn_intra_n (node, n); +} + +static INLINE ir_mode * +intern_get_irn_mode (const ir_node *node) +{ + assert (node); + return node->mode; +} + +static INLINE ir_op * +intern_get_irn_op (const ir_node *node) +{ + assert (node); + return node->op; +} + +static INLINE opcode +intern_get_irn_opcode (const ir_node *node) +{ + assert (k_ir_node == get_kind(node)); + assert (node -> op); + return node->op->code; +} + + + # endif /* _IRNODE_T_H_ */ diff --git a/ir/ir/iropt.c b/ir/ir/iropt.c index d8d9ac0ad..8909f6ec6 100644 --- a/ir/ir/iropt.c +++ b/ir/ir/iropt.c @@ -35,7 +35,7 @@ static INLINE ir_node * follow_Id (ir_node *n) { - while (get_irn_op (n) == op_Id) n = get_Id_pred (n); + while (intern_get_irn_op (n) == op_Id) n = get_Id_pred (n); return n; } @@ -45,7 +45,7 @@ follow_Id (ir_node *n) static INLINE tarval * value_of (ir_node *n) { - if ((n != NULL) && (get_irn_op(n) == op_Const)) + if ((n != NULL) && (intern_get_irn_op(n) == op_Const)) return get_Const_tarval(n); /* might return tarval_bad */ else return tarval_bad; @@ -73,8 +73,8 @@ static tarval *computed_value_Add(ir_node *n) tarval *tb = value_of(b); if ((ta != tarval_bad) && (tb != tarval_bad) - && (get_irn_mode(a) == get_irn_mode(b)) - && !(get_mode_sort(get_irn_mode(a)) == irms_reference)) { + && (intern_get_irn_mode(a) == intern_get_irn_mode(b)) + && !(get_mode_sort(intern_get_irn_mode(a)) == irms_reference)) { return tarval_add(ta, tb); } return tarval_bad; @@ -89,8 +89,8 @@ static tarval *computed_value_Sub(ir_node *n) tarval *tb = value_of(b); if ((ta != tarval_bad) && (tb != tarval_bad) - && (get_irn_mode(a) == get_irn_mode(b)) - && !(get_mode_sort(get_irn_mode(a)) == irms_reference)) { + && (intern_get_irn_mode(a) == intern_get_irn_mode(b)) + && !(get_mode_sort(intern_get_irn_mode(a)) == irms_reference)) { return tarval_sub(ta, tb); } return tarval_bad; @@ -101,7 +101,7 @@ static tarval *computed_value_Minus(ir_node *n) ir_node *a = get_Minus_op(n); tarval *ta = value_of(a); - if ((ta != tarval_bad) && mode_is_signed(get_irn_mode(a))) + if ((ta != tarval_bad) && mode_is_signed(intern_get_irn_mode(a))) return tarval_neg(ta); return tarval_bad; @@ -115,7 +115,7 @@ static tarval *computed_value_Mul(ir_node *n) tarval *ta = value_of(a); tarval *tb = value_of(b); - if ((ta != tarval_bad) && (tb != tarval_bad) && (get_irn_mode(a) == get_irn_mode(b))) { + if ((ta != tarval_bad) && (tb != tarval_bad) && (intern_get_irn_mode(a) == intern_get_irn_mode(b))) { return tarval_mul(ta, tb); } else { /* a*0 = 0 or 0*b = 0: @@ -142,7 +142,7 @@ static tarval *computed_value_Quot(ir_node *n) tarval *tb = value_of(b); /* This was missing in original implementation. Why? */ - if ((ta != tarval_bad) && (tb != tarval_bad) && (get_irn_mode(a) == get_irn_mode(b))) { + if ((ta != tarval_bad) && (tb != tarval_bad) && (intern_get_irn_mode(a) == intern_get_irn_mode(b))) { if (tb != get_mode_null(get_tarval_mode(tb))) /* div by zero: return tarval_bad */ return tarval_quo(ta, tb); } @@ -158,7 +158,7 @@ static tarval *computed_value_Div(ir_node *n) tarval *tb = value_of(b); /* This was missing in original implementation. Why? */ - if ((ta != tarval_bad) && (tb != tarval_bad) && (get_irn_mode(a) == get_irn_mode(b))) { + if ((ta != tarval_bad) && (tb != tarval_bad) && (intern_get_irn_mode(a) == intern_get_irn_mode(b))) { if (tb != get_mode_null(get_tarval_mode(tb))) /* div by zero: return tarval_bad */ return tarval_div(ta, tb); } @@ -174,7 +174,7 @@ static tarval *computed_value_Mod(ir_node *n) tarval *tb = value_of(b); /* This was missing in original implementation. Why? */ - if ((ta != tarval_bad) && (tb != tarval_bad) && (get_irn_mode(a) == get_irn_mode(b))) { + if ((ta != tarval_bad) && (tb != tarval_bad) && (intern_get_irn_mode(a) == intern_get_irn_mode(b))) { if (tb != get_mode_null(get_tarval_mode(tb))) /* div by zero: return tarval_bad */ return tarval_mod(ta, tb); } @@ -320,7 +320,7 @@ static tarval *computed_value_Conv(ir_node *n) tarval *ta = value_of(a); if (ta != tarval_bad) - return tarval_convert_to(ta, get_irn_mode(n)); + return tarval_convert_to(ta, intern_get_irn_mode(n)); return tarval_bad; } @@ -344,7 +344,7 @@ static tarval *computed_value_Proj(ir_node *n) 3. The predecessors are Allocs or void* constants. Allocs never return NULL, they raise an exception. Therefore we can predict the Cmp result. */ - if (get_irn_op(a) == op_Cmp) { + if (intern_get_irn_op(a) == op_Cmp) { aa = get_Cmp_left(a); ab = get_Cmp_right(a); @@ -368,34 +368,34 @@ static tarval *computed_value_Proj(ir_node *n) ir_node *aba = skip_nop(skip_Proj(ab)); if ( ( (/* aa is ProjP and aaa is Alloc */ - (get_irn_op(aa) == op_Proj) - && (mode_is_reference(get_irn_mode(aa))) - && (get_irn_op(aaa) == op_Alloc)) + (intern_get_irn_op(aa) == op_Proj) + && (mode_is_reference(intern_get_irn_mode(aa))) + && (intern_get_irn_op(aaa) == op_Alloc)) && ( (/* ab is constant void */ - (get_irn_op(ab) == op_Const) - && (mode_is_reference(get_irn_mode(ab))) - && (get_Const_tarval(ab) == get_mode_null(get_irn_mode(ab)))) + (intern_get_irn_op(ab) == op_Const) + && (mode_is_reference(intern_get_irn_mode(ab))) + && (get_Const_tarval(ab) == get_mode_null(intern_get_irn_mode(ab)))) || (/* ab is other Alloc */ - (get_irn_op(ab) == op_Proj) - && (mode_is_reference(get_irn_mode(ab))) - && (get_irn_op(aba) == op_Alloc) + (intern_get_irn_op(ab) == op_Proj) + && (mode_is_reference(intern_get_irn_mode(ab))) + && (intern_get_irn_op(aba) == op_Alloc) && (aaa != aba)))) || (/* aa is void and aba is Alloc */ - (get_irn_op(aa) == op_Const) - && (mode_is_reference(get_irn_mode(aa))) - && (get_Const_tarval(aa) == get_mode_null(get_irn_mode(aa))) - && (get_irn_op(ab) == op_Proj) - && (mode_is_reference(get_irn_mode(ab))) - && (get_irn_op(aba) == op_Alloc))) + (intern_get_irn_op(aa) == op_Const) + && (mode_is_reference(intern_get_irn_mode(aa))) + && (get_Const_tarval(aa) == get_mode_null(intern_get_irn_mode(aa))) + && (intern_get_irn_op(ab) == op_Proj) + && (mode_is_reference(intern_get_irn_mode(ab))) + && (intern_get_irn_op(aba) == op_Alloc))) /* 3.: */ return new_tarval_from_long (get_Proj_proj(n) & Ne, mode_b); } } - } else if (get_irn_op(a) == op_DivMod) { + } else if (intern_get_irn_op(a) == op_DivMod) { tarval *tb = value_of(b = get_DivMod_right(a)); tarval *ta = value_of(a = get_DivMod_left(a)); - if ((ta != tarval_bad) && (tb != tarval_bad) && (get_irn_mode(a) == get_irn_mode(b))) { + if ((ta != tarval_bad) && (tb != tarval_bad) && (intern_get_irn_mode(a) == intern_get_irn_mode(b))) { if (tb == get_mode_null(get_tarval_mode(tb))) /* div by zero: return tarval_bad */ return tarval_bad; if (get_Proj_proj(n)== 0) /* Div */ @@ -467,11 +467,11 @@ different_identity (ir_node *a, ir_node *b) assert (mode_is_reference(get_irn_mode (a)) && mode_is_reference(get_irn_mode (b))); - if (get_irn_op (a) == op_Proj && get_irn_op(b) == op_Proj) { + if (intern_get_irn_op (a) == op_Proj && intern_get_irn_op(b) == op_Proj) { ir_node *a1 = get_Proj_pred (a); ir_node *b1 = get_Proj_pred (b); - if (a1 != b1 && get_irn_op (a1) == op_Alloc - && get_irn_op (b1) == op_Alloc) + if (a1 != b1 && intern_get_irn_op (a1) == op_Alloc + && intern_get_irn_op (b1) == op_Alloc) return 1; } return 0; @@ -493,7 +493,7 @@ static ir_node *equivalent_node_Block(ir_node *n) But what about Phi-cycles with the Phi0/Id that could not be resolved? Remaining Phi nodes are just Ids. */ if ((get_Block_n_cfgpreds(n) == 1) && - (get_irn_op(get_Block_cfgpred(n, 0)) == op_Jmp) && + (intern_get_irn_op(get_Block_cfgpred(n, 0)) == op_Jmp) && (get_opt_control_flow_straightening())) { n = get_nodes_Block(get_Block_cfgpred(n, 0)); DBG_OPT_STG; @@ -504,11 +504,11 @@ static ir_node *equivalent_node_Block(ir_node *n) ir_node *a = get_Block_cfgpred(n, 0); ir_node *b = get_Block_cfgpred(n, 1); - if ((get_irn_op(a) == op_Proj) && - (get_irn_op(b) == op_Proj) && + if ((intern_get_irn_op(a) == op_Proj) && + (intern_get_irn_op(b) == op_Proj) && (get_Proj_pred(a) == get_Proj_pred(b)) && - (get_irn_op(get_Proj_pred(a)) == op_Cond) && - (get_irn_mode(get_Cond_selector(get_Proj_pred(a))) == mode_b)) { + (intern_get_irn_op(get_Proj_pred(a)) == op_Cond) && + (intern_get_irn_mode(get_Cond_selector(get_Proj_pred(a))) == mode_b)) { /* Also a single entry Block following a single exit Block. Phis have twice the same operand and will be optimized away. */ n = get_nodes_Block(a); DBG_OPT_IFSIM; @@ -642,7 +642,7 @@ static ir_node *equivalent_node_symmetric_unop(ir_node *n) ir_node *oldn = n; /* optimize symmetric unop */ - if (get_irn_op(get_unop_op(n)) == get_irn_op(n)) { + if (intern_get_irn_op(get_unop_op(n)) == intern_get_irn_op(n)) { n = get_unop_op(get_unop_op(n)); DBG_OPT_ALGSIM2; } return n; @@ -718,17 +718,17 @@ static ir_node *equivalent_node_Conv(ir_node *n) ir_node *a = get_Conv_op(n); ir_node *b; - ir_mode *n_mode = get_irn_mode(n); - ir_mode *a_mode = get_irn_mode(a); + ir_mode *n_mode = intern_get_irn_mode(n); + ir_mode *a_mode = intern_get_irn_mode(a); if (n_mode == a_mode) { /* No Conv necessary */ n = a; DBG_OPT_ALGSIM3; - } else if (get_irn_op(a) == op_Conv) { /* Conv(Conv(b)) */ + } else if (intern_get_irn_op(a) == op_Conv) { /* Conv(Conv(b)) */ ir_mode *b_mode; b = get_Conv_op(a); - n_mode = get_irn_mode(n); - b_mode = get_irn_mode(b); + n_mode = intern_get_irn_mode(n); + b_mode = intern_get_irn_mode(b); if (n_mode == b_mode) { if (n_mode == mode_b) { @@ -780,12 +780,12 @@ static ir_node *equivalent_node_Phi(ir_node *n) if (n_preds == 2) { ir_node *a = follow_Id (get_Phi_pred(n, 0)); ir_node *b = follow_Id (get_Phi_pred(n, 1)); - if ( (get_irn_op(a) == op_Confirm) - && (get_irn_op(b) == op_Confirm) - && (follow_Id (get_irn_n(a, 0)) == follow_Id(get_irn_n(b, 0))) - && (get_irn_n(a, 1) == get_irn_n (b, 1)) + if ( (intern_get_irn_op(a) == op_Confirm) + && (intern_get_irn_op(b) == op_Confirm) + && follow_Id (intern_get_irn_n(a, 0) == intern_get_irn_n(b, 0)) + && (intern_get_irn_n(a, 1) == intern_get_irn_n (b, 1)) && (a->data.num == (~b->data.num & irpn_True) )) { - return follow_Id (get_irn_n(a, 0)); + return intern_get_irn_n(a, 0); } } #endif @@ -796,7 +796,7 @@ static ir_node *equivalent_node_Phi(ir_node *n) /* skip Id's */ set_Phi_pred(n, i, first_val); if ( (first_val != n) /* not self pointer */ - && (get_irn_op(first_val) != op_Bad) /* value not dead */ + && (intern_get_irn_op(first_val) != op_Bad) /* value not dead */ && !(is_Bad (get_Block_cfgpred(block, i))) ) { /* not dead control flow */ break; /* then found first value. */ } @@ -815,7 +815,7 @@ static ir_node *equivalent_node_Phi(ir_node *n) set_Phi_pred(n, i, scnd_val); if ( (scnd_val != n) && (scnd_val != first_val) - && (get_irn_op(scnd_val) != op_Bad) + && (intern_get_irn_op(scnd_val) != op_Bad) && !(is_Bad (get_Block_cfgpred(block, i))) ) { break; } @@ -840,7 +840,7 @@ static ir_node *equivalent_node_Load(ir_node *n) ir_node *a = skip_Proj(get_Load_mem(n)); ir_node *b = get_Load_ptr(n); - if (get_irn_op(a) == op_Store) { + if (intern_get_irn_op(a) == op_Store) { if ( different_identity (b, get_Store_ptr(a))) { /* load and store use different pointers, therefore load needs not take store's memory but the state before. */ @@ -861,12 +861,12 @@ static ir_node *equivalent_node_Store(ir_node *n) ir_node *b = get_Store_ptr(n); ir_node *c = skip_Proj(get_Store_value(n)); - if (get_irn_op(a) == op_Store + if (intern_get_irn_op(a) == op_Store && get_Store_ptr(a) == b && skip_Proj(get_Store_value(a)) == c) { /* We have twice exactly the same store -- a write after write. */ n = a; DBG_OPT_WAW; - } else if (get_irn_op(c) == op_Load + } else if (intern_get_irn_op(c) == op_Load && (a == c || skip_Proj(get_Load_mem(c)) == a) && get_Load_ptr(c) == b ) { /* We just loaded the value from the same memory, i.e., the store @@ -885,7 +885,7 @@ static ir_node *equivalent_node_Proj(ir_node *n) ir_node *a = get_Proj_pred(n); - if ( get_irn_op(a) == op_Tuple) { + if ( intern_get_irn_op(a) == op_Tuple) { /* Remove the Tuple/Proj combination. */ if ( get_Proj_proj(n) <= get_Tuple_n_preds(a) ) { n = get_Tuple_pred(a, get_Proj_proj(n)); DBG_OPT_TUPLE; @@ -893,7 +893,7 @@ static ir_node *equivalent_node_Proj(ir_node *n) assert(0); /* This should not happen! */ n = new_Bad(); } - } else if (get_irn_mode(n) == mode_X && + } else if (intern_get_irn_mode(n) == mode_X && is_Bad(get_nodes_Block(n))) { /* Remove dead control flow -- early gigo. */ n = new_Bad(); @@ -986,15 +986,15 @@ optimize_preds(ir_node *n) { a = get_unop_op(n); } - switch (get_irn_opcode(n)) { + switch (intern_get_irn_opcode(n)) { case iro_Cmp: /* We don't want Cast as input to Cmp. */ - if (get_irn_op(a) == op_Cast) { + if (intern_get_irn_op(a) == op_Cast) { a = get_Cast_op(a); set_Cmp_left(n, a); } - if (get_irn_op(b) == op_Cast) { + if (intern_get_irn_op(b) == op_Cast) { b = get_Cast_op(b); set_Cmp_right(n, b); } @@ -1041,9 +1041,9 @@ static ir_node *transform_node_DivMod(ir_node *n) ir_node *a = get_DivMod_left(n); ir_node *b = get_DivMod_right(n); - ir_mode *mode = get_irn_mode(a); + ir_mode *mode = intern_get_irn_mode(a); - if (!(mode_is_int(mode) && mode_is_int(get_irn_mode(b)))) + if (!(mode_is_int(mode) && mode_is_int(intern_get_irn_mode(b)))) return n; if (a == b) { @@ -1096,7 +1096,7 @@ static ir_node *transform_node_Cond(ir_node *n) tarval *ta = value_of(a); if ((ta != tarval_bad) && - (get_irn_mode(a) == mode_b) && + (intern_get_irn_mode(a) == mode_b) && (get_opt_unreachable_code())) { /* It's a boolean Cond, branching on a boolean constant. Replace it by a tuple (Bad, Jmp) or (Jmp, Bad) */ @@ -1112,7 +1112,7 @@ static ir_node *transform_node_Cond(ir_node *n) /* We might generate an endless loop, so keep it alive. */ add_End_keepalive(get_irg_end(current_ir_graph), get_nodes_Block(n)); } else if ((ta != tarval_bad) && - (get_irn_mode(a) == mode_Iu) && + (intern_get_irn_mode(a) == mode_Iu) && (get_Cond_kind(n) == dense) && (get_opt_unreachable_code())) { /* I don't want to allow Tuples smaller than the biggest Proj. @@ -1122,15 +1122,15 @@ static ir_node *transform_node_Cond(ir_node *n) set_irn_link(n, new_r_Jmp(current_ir_graph, get_nodes_Block(n))); /* We might generate an endless loop, so keep it alive. */ add_End_keepalive(get_irg_end(current_ir_graph), get_nodes_Block(n)); - } else if ((get_irn_op(a) == op_Eor) - && (get_irn_mode(a) == mode_b) + } else if ((intern_get_irn_op(a) == op_Eor) + && (intern_get_irn_mode(a) == mode_b) && (tarval_classify(computed_value(get_Eor_right(a))) == TV_CLASSIFY_ONE)) { /* The Eor is a negate. Generate a new Cond without the negate, simulate the negate by exchanging the results. */ set_irn_link(n, new_r_Cond(current_ir_graph, get_nodes_Block(n), get_Eor_left(a))); - } else if ((get_irn_op(a) == op_Not) - && (get_irn_mode(a) == mode_b)) { + } else if ((intern_get_irn_op(a) == op_Not) + && (intern_get_irn_mode(a) == mode_b)) { /* A Not before the Cond. Generate a new Cond without the Not, simulate the Not by exchanging the results. */ set_irn_link(n, new_r_Cond(current_ir_graph, get_nodes_Block(n), @@ -1144,15 +1144,15 @@ static ir_node *transform_node_Eor(ir_node *n) ir_node *a = get_Eor_left(n); ir_node *b = get_Eor_right(n); - if ((get_irn_mode(n) == mode_b) - && (get_irn_op(a) == op_Proj) - && (get_irn_mode(a) == mode_b) + if ((intern_get_irn_mode(n) == mode_b) + && (intern_get_irn_op(a) == op_Proj) + && (intern_get_irn_mode(a) == mode_b) && (tarval_classify (computed_value (b)) == TV_CLASSIFY_ONE) - && (get_irn_op(get_Proj_pred(a)) == op_Cmp)) + && (intern_get_irn_op(get_Proj_pred(a)) == op_Cmp)) /* The Eor negates a Cmp. The Cmp has the negated result anyways! */ n = new_r_Proj(current_ir_graph, get_nodes_Block(n), get_Proj_pred(a), mode_b, get_negated_pnc(get_Proj_proj(a))); - else if ((get_irn_mode(n) == mode_b) + else if ((intern_get_irn_mode(n) == mode_b) && (tarval_classify (computed_value (b)) == TV_CLASSIFY_ONE)) /* The Eor is a Not. Replace it by a Not. */ /* ????!!!Extend to bitfield 1111111. */ @@ -1165,10 +1165,10 @@ static ir_node *transform_node_Not(ir_node *n) { ir_node *a = get_Not_op(n); - if ( (get_irn_mode(n) == mode_b) - && (get_irn_op(a) == op_Proj) - && (get_irn_mode(a) == mode_b) - && (get_irn_op(get_Proj_pred(a)) == op_Cmp)) + if ( (intern_get_irn_mode(n) == mode_b) + && (intern_get_irn_op(a) == op_Proj) + && (intern_get_irn_mode(a) == mode_b) + && (intern_get_irn_op(get_Proj_pred(a)) == op_Cmp)) /* We negate a Cmp. The Cmp has the negated result anyways! */ n = new_r_Proj(current_ir_graph, get_nodes_Block(n), get_Proj_pred(a), mode_b, get_negated_pnc(get_Proj_proj(a))); @@ -1329,23 +1329,23 @@ vt_cmp (const void *elt, const void *key) if (a == b) return 0; - if ((get_irn_op(a) != get_irn_op(b)) || - (get_irn_mode(a) != get_irn_mode(b))) return 1; + if ((intern_get_irn_op(a) != intern_get_irn_op(b)) || + (intern_get_irn_mode(a) != intern_get_irn_mode(b))) return 1; /* compare if a's in and b's in are equal */ - irn_arity_a = get_irn_arity (a); - if (irn_arity_a != get_irn_arity(b)) + irn_arity_a = intern_get_irn_arity (a); + if (irn_arity_a != intern_get_irn_arity(b)) return 1; /* for block-local cse and pinned nodes: */ - if (!get_opt_global_cse() || (get_op_pinned(get_irn_op(a)) == pinned)) { - if (get_irn_n(a, -1) != get_irn_n(b, -1)) + if (!get_opt_global_cse() || (get_op_pinned(intern_get_irn_op(a)) == pinned)) { + if (intern_get_irn_n(a, -1) != intern_get_irn_n(b, -1)) return 1; } /* compare a->in[0..ins] with b->in[0..ins] */ for (i = 0; i < irn_arity_a; i++) - if (get_irn_n(a, i) != get_irn_n(b, i)) + if (intern_get_irn_n(a, i) != intern_get_irn_n(b, i)) return 1; /* @@ -1368,17 +1368,17 @@ ir_node_hash (ir_node *node) int i, irn_arity; /* hash table value = 9*(9*(9*(9*(9*arity+in[0])+in[1])+ ...)+mode)+code */ - h = irn_arity = get_irn_arity(node); + h = irn_arity = intern_get_irn_arity(node); /* consider all in nodes... except the block. */ for (i = 0; i < irn_arity; i++) { - h = 9*h + (unsigned long)get_irn_n(node, i); + h = 9*h + (unsigned long)intern_get_irn_n(node, i); } /* ...mode,... */ - h = 9*h + (unsigned long) get_irn_mode (node); + h = 9*h + (unsigned long) intern_get_irn_mode (node); /* ...and code */ - h = 9*h + (unsigned long) get_irn_op (node); + h = 9*h + (unsigned long) intern_get_irn_op (node); return h; } @@ -1408,7 +1408,7 @@ identify (pset *value_table, ir_node *n) /* TODO: use a generic commutative attribute */ if (get_opt_reassociation()) { - if (is_op_commutative(get_irn_op(n))) { + if (is_op_commutative(intern_get_irn_op(n))) { /* for commutative operators perform a OP b == b OP a */ if (get_binop_left(n) > get_binop_right(n)) { ir_node *h = get_binop_left(n); @@ -1433,7 +1433,7 @@ static INLINE ir_node * identify_cons (pset *value_table, ir_node *n) { ir_node *old = n; n = identify(value_table, n); - if (get_irn_n(old, -1) != get_irn_n(n, -1)) + if (intern_get_irn_n(old, -1) != intern_get_irn_n(n, -1)) set_irg_pinned(current_ir_graph, floats); return n; } @@ -1471,17 +1471,17 @@ static INLINE ir_node * gigo (ir_node *node) { int i, irn_arity; - ir_op* op = get_irn_op(node); + ir_op* op = intern_get_irn_op(node); /* remove garbage blocks by looking at control flow that leaves the block and replacing the control flow by Bad. */ - if (get_irn_mode(node) == mode_X) { + if (intern_get_irn_mode(node) == mode_X) { ir_node *block = get_nodes_block(node); if (op == op_End) return node; /* Don't optimize End, may have Bads. */ - if (get_irn_op(block) == op_Block && get_Block_matured(block)) { - irn_arity = get_irn_arity(block); + if (intern_get_irn_op(block) == op_Block && get_Block_matured(block)) { + irn_arity = intern_get_irn_arity(block); for (i = 0; i < irn_arity; i++) { - if (!is_Bad(get_irn_n(block, i))) break; + if (!is_Bad(intern_get_irn_n(block, i))) break; } if (i == irn_arity) return new_Bad(); } @@ -1490,9 +1490,9 @@ gigo (ir_node *node) /* Blocks, Phis and Tuples may have dead inputs, e.g., if one of the blocks predecessors is dead. */ if ( op != op_Block && op != op_Phi && op != op_Tuple) { - irn_arity = get_irn_arity(node); + irn_arity = intern_get_irn_arity(node); for (i = -1; i < irn_arity; i++) { - if (is_Bad(get_irn_n(node, i))) { + if (is_Bad(intern_get_irn_n(node, i))) { return new_Bad(); } } @@ -1503,9 +1503,9 @@ gigo (ir_node *node) /* If Block has only Bads as predecessors it's garbage. */ /* If Phi has only Bads as predecessors it's garbage. */ if ((op == op_Block && get_Block_matured(node)) || op == op_Phi) { - irn_arity = get_irn_arity(node); + irn_arity = intern_get_irn_arity(node); for (i = 0; i < irn_arity; i++) { - if (!is_Bad(get_irn_n(node, i))) break; + if (!is_Bad(intern_get_irn_n(node, i))) break; } if (i == irn_arity) node = new_Bad(); } @@ -1524,7 +1524,7 @@ optimize_node (ir_node *n) { tarval *tv; ir_node *old_n = n; - opcode iro = get_irn_opcode(n); + opcode iro = intern_get_irn_opcode(n); /* Allways optimize Phi nodes: part of the construction. */ if ((!get_opt_optimize()) && (iro != iro_Phi)) return n; @@ -1532,10 +1532,10 @@ optimize_node (ir_node *n) /* constant expression evaluation / constant folding */ if (get_opt_constant_folding()) { /* constants can not be evaluated */ - if (get_irn_op(n) != op_Const) { + if (intern_get_irn_op(n) != op_Const) { /* try to evaluate */ tv = computed_value (n); - if ((get_irn_mode(n) != mode_T) && (tv != tarval_bad)) { + if ((intern_get_irn_mode(n) != mode_T) && (tv != tarval_bad)) { /* evaluation was succesful -- replace the node. */ obstack_free (current_ir_graph->obst, n); return new_Const (get_tarval_mode (tv), tv); @@ -1568,7 +1568,7 @@ optimize_node (ir_node *n) /* Some more constant expression evaluation that does not allow to free the node. */ - iro = get_irn_opcode(n); + iro = intern_get_irn_opcode(n); if (get_opt_constant_folding() || (iro == iro_Cond) || (iro == iro_Proj)) /* Flags tested local. */ @@ -1579,7 +1579,7 @@ optimize_node (ir_node *n) n = gigo (n); /* Now we have a legal, useful node. Enter it in hash table for cse */ - if (get_opt_cse() && (get_irn_opcode(n) != iro_Block)) { + if (get_opt_cse() && (intern_get_irn_opcode(n) != iro_Block)) { n = identify_remember (current_ir_graph->value_table, n); } @@ -1597,9 +1597,9 @@ optimize_in_place_2 (ir_node *n) { tarval *tv; ir_node *old_n = n; - opcode iro = get_irn_opcode(n); + opcode iro = intern_get_irn_opcode(n); - if (!get_opt_optimize() && (get_irn_op(n) != op_Phi)) return n; + if (!get_opt_optimize() && (intern_get_irn_op(n) != op_Phi)) return n; /* if not optimize return n */ if (n == NULL) { @@ -1615,7 +1615,7 @@ optimize_in_place_2 (ir_node *n) if (iro != iro_Const) { /* try to evaluate */ tv = computed_value (n); - if ((get_irn_mode(n) != mode_T) && (tv != tarval_bad)) { + if ((intern_get_irn_mode(n) != mode_T) && (tv != tarval_bad)) { /* evaluation was succesful -- replace the node. */ n = new_Const (get_tarval_mode (tv), tv); __dbg_info_merge_pair(n, old_n, dbg_const_eval); @@ -1645,7 +1645,7 @@ optimize_in_place_2 (ir_node *n) } /* Some more constant expression evaluation. */ - iro = get_irn_opcode(n); + iro = intern_get_irn_opcode(n); if (get_opt_constant_folding() || (iro == iro_Cond) || (iro == iro_Proj)) /* Flags tested local. */ @@ -1661,7 +1661,7 @@ optimize_in_place_2 (ir_node *n) /* Now we have a legal, useful node. Enter it in hash table for cse. Blocks should be unique anyways. (Except the successor of start: is cse with the start block!) */ - if (get_opt_cse() && (get_irn_opcode(n) != iro_Block)) + if (get_opt_cse() && (intern_get_irn_opcode(n) != iro_Block)) n = identify_remember (current_ir_graph->value_table, n); return n; diff --git a/ir/tr/entity.c b/ir/tr/entity.c index cb73eede0..67912382c 100644 --- a/ir/tr/entity.c +++ b/ir/tr/entity.c @@ -919,7 +919,7 @@ entity *resolve_ent_polymorphy(type *dynamic_class, entity* static_ent) { entity *res = resolve_ent_polymorphy2(dynamic_class, static_ent); if (!res) { printf(" Could not find entity "); DDME(static_ent); - printf(" in class"); DDMT(dynamic_class); + printf(" in "); DDMT(dynamic_class); printf("\n"); dump_entity(static_ent); dump_type(get_entity_owner(static_ent)); -- 2.20.1