#include "irgwalk.h"
#include "ircons.h"
#include "irgmod.h"
-#include "irnode.h"
+#include "irnode_t.h"
#include "irflag_t.h"
#include "dbginfo_t.h"
ir_node *addr = get_atomic_ent_value(inh_meth);
assert(addr && "constant entity without value");
- if (get_irn_op(addr) == op_Const) {
+ if (intern_get_irn_op(addr) == op_Const) {
impl_meth = tarval_to_entity(get_Const_tarval(addr));
} else {
assert(0 && "Complex constant values not supported -- adress of method should be straight constant!");
static void sel_methods_walker(ir_node * node, pmap * ldname_map) {
- if (get_irn_op(node) == op_SymConst) {
+ if (intern_get_irn_op(node) == op_SymConst) {
/* Wenn möglich SymConst-Operation durch Const-Operation
* ersetzen. */
if (get_SymConst_kind(node) == linkage_ptr_info) {
}
}
}
- } else if (get_irn_op(node) == op_Sel &&
+ } else if (intern_get_irn_op(node) == op_Sel &&
is_method_type(get_entity_type(get_Sel_entity(node)))) {
entity * ent = get_Sel_entity(node);
if (get_opt_optimize() && get_opt_dyn_meth_dispatch() &&
- (get_irn_op(skip_Proj(get_Sel_ptr(node))) == op_Alloc)) {
+ (intern_get_irn_op(skip_Proj(get_Sel_ptr(node))) == op_Alloc)) {
ir_node *new_node;
entity *called_ent;
/* We know which method will be called, no dispatch necessary. */
static entity ** NULL_ARRAY = NULL;
entity * ent;
entity ** arr;
- assert(sel && get_irn_op(sel) == op_Sel);
+ assert(sel && intern_get_irn_op(sel) == op_Sel);
ent = get_Sel_entity(sel);
assert(is_method_type(get_entity_type(ent))); /* what else? */
arr = get_entity_link(ent);
}
set_irn_link(node, MARK);
- switch (get_irn_opcode(node)) {
+ switch (intern_get_irn_opcode(node)) {
case iro_Proj: {
/* proj_proj: in einem "sinnvollen" Graphen kommt jetzt ein
* op_Tuple oder ein Knoten, der eine "freie Methode"
* zurückgibt. */
ir_node * pred = get_Proj_pred(node);
if (get_irn_link(pred) != MARK) {
- if (get_irn_op(pred) == op_Tuple) {
+ if (intern_get_irn_op(pred) == op_Tuple) {
callee_ana_proj(get_Tuple_pred(pred, get_Proj_proj(node)), n, methods);
} else {
eset_insert(methods, MARK); /* free method -> unknown */
}
set_irn_link(node, MARK);
- switch (get_irn_opcode(node)) {
+ switch (intern_get_irn_opcode(node)) {
case iro_SymConst:
/* externe Methode (wegen fix_symconst!) */
eset_insert(methods, MARK); /* free method -> unknown */
static void callee_walker(ir_node * call, void * env) {
- if (get_irn_op(call) == op_Call) {
+ if (intern_get_irn_op(call) == op_Call) {
eset * methods = eset_create();
entity * ent;
entity ** arr = NEW_ARR_F(entity *, 0);
return;
}
set_irn_link(node, MARK);
- switch (get_irn_opcode(node)) {
+ switch (intern_get_irn_opcode(node)) {
case iro_Proj: {
/* proj_proj: in einem "sinnvollen" Graphen kommt jetzt ein
* op_Tuple oder ein Knoten, der in "free_ana_walker" behandelt
* wird. */
ir_node * pred = get_Proj_pred(node);
- if (get_irn_link(pred) != MARK && get_irn_op(pred) == op_Tuple) {
+ if (get_irn_link(pred) != MARK && intern_get_irn_op(pred) == op_Tuple) {
free_mark_proj(get_Tuple_pred(pred, get_Proj_proj(node)), n, set);
} else {
/* nothing: da in "free_ana_walker" behandelt. */
return; /* already visited */
}
set_irn_link(node, MARK);
- switch (get_irn_opcode(node)) {
+ switch (intern_get_irn_opcode(node)) {
case iro_Sel: {
entity * ent = get_Sel_entity(node);
if (is_method_type(get_entity_type(ent))) {
/* bereits in einem Zyklus besucht. */
return;
}
- switch (get_irn_opcode(node)) {
+ switch (intern_get_irn_opcode(node)) {
/* special nodes */
case iro_Sel:
case iro_SymConst:
set_irn_link(node, MARK);
for (i = get_Call_arity(node) - 1; i >= 0; --i) {
ir_node * pred = get_Call_param(node, i);
- if (mode_is_reference(get_irn_mode(pred))) {
+ if (mode_is_reference(intern_get_irn_mode(pred))) {
free_mark(pred, set);
}
}
* jemand das Gegenteil implementiert. */
default:
set_irn_link(node, MARK);
- for (i = get_irn_arity(node) - 1; i >= 0; --i) {
+ for (i = intern_get_irn_arity(node) - 1; i >= 0; --i) {
ir_node * pred = get_irn_n(node, i);
- if (mode_is_reference(get_irn_mode(pred))) {
+ if (mode_is_reference(intern_get_irn_mode(pred))) {
free_mark(pred, set);
}
}
* very careful!
*/
static INLINE int *mere_get_backarray(ir_node *n) {
- switch(get_irn_opcode(n)) {
+ switch(intern_get_irn_opcode(n)) {
case iro_Block:
if (!get_Block_matured(n)) return NULL;
if (interprocedural_view && n->attr.block.in_cg) {
INLINE void fix_backedges(struct obstack *obst, ir_node *n) {
- opcode opc = get_irn_opcode(n);
+ opcode opc = intern_get_irn_opcode(n);
int *arr = mere_get_backarray(n);
if (ARR_LEN(arr) == ARR_LEN(get_irn_in(n))-1)
return;
int i;
int *ba = get_backarray (n);
if (ba)
- for (i = 0; i < get_irn_arity(n); i++)
+ for (i = 0; i < intern_get_irn_arity(n); i++)
if (ba[i]) return true;
return false;
}
interprocedural_view = 0;
ba = get_backarray (n);
if (ba)
- for (i = 0; i < get_irn_arity(n); i++)
+ for (i = 0; i < intern_get_irn_arity(n); i++)
ba[i] = 0;
interprocedural_view = 1;
ba = get_backarray (n);
if (ba)
- for (i = 0; i < get_irn_arity(n); i++)
+ for (i = 0; i < intern_get_irn_arity(n); i++)
ba[i] = 0;
interprocedural_view = rem;
}
tmp_dom_info *v;
/* Step 2 */
- irn_arity = get_irn_arity(w->block);
+ irn_arity = intern_get_irn_arity(w->block);
for (j = 0; j < irn_arity; j++) {
ir_node *pred = get_nodes_Block(get_Block_cfgpred(w->block, j));
tmp_dom_info *u;
int i, n_cfg_outs = 0;
assert(bl && (get_irn_op(bl) == op_Block));
for (i = 0; i < (int)bl->out[0]; i++)
- if ((get_irn_mode(bl->out[i+1]) == mode_X) &&
- (get_irn_op(bl->out[i+1]) != op_End)) n_cfg_outs++;
+ if ((intern_get_irn_mode(bl->out[i+1]) == mode_X) &&
+ (intern_get_irn_op(bl->out[i+1]) != op_End)) n_cfg_outs++;
return n_cfg_outs;
}
int i, out_pos = 0;
assert(bl && (get_irn_op(bl) == op_Block));
for (i = 0; i < (int)bl->out[0]; i++)
- if ((get_irn_mode(bl->out[i+1]) == mode_X) &&
- (get_irn_op(bl->out[i+1]) != op_End)) {
+ if ((intern_get_irn_mode(bl->out[i+1]) == mode_X) &&
+ (intern_get_irn_op(bl->out[i+1]) != op_End)) {
if (out_pos == pos) {
ir_node *cfop = bl->out[i+1];
return cfop->out[0+1];
void *env) {
int i;
- assert(get_irn_opcode(bl) == iro_Block);
-
if(get_Block_block_visited(bl) < get_irg_block_visited(current_ir_graph)) {
set_Block_block_visited(bl, get_irg_block_visited(current_ir_graph));
for(i = 0; i < get_Block_n_cfg_outs(bl); i++) {
/* find the corresponding predecessor block. */
ir_node *pred = get_Block_cfg_out(bl, i);
- assert(get_irn_opcode(pred) == iro_Block);
/* recursion */
irg_out_block_walk2(pred, pre, post, env);
}
irg_walk_func *pre, irg_walk_func *post,
void *env) {
- assert((get_irn_op(node) == op_Block) || (get_irn_mode(node) == mode_X));
+ assert((get_irn_op(node) == op_Block) || (intern_get_irn_mode(node) == mode_X));
inc_irg_block_visited(current_ir_graph);
- if (get_irn_mode(node) == mode_X) node = node->out[1];
- assert(get_irn_opcode(node) == iro_Block);
+ if (intern_get_irn_mode(node) == mode_X) node = node->out[1];
irg_out_block_walk2(node, pre, post, env);
set_irn_visited(n, get_irg_visited(current_ir_graph));
n->out = (ir_node **) 1; /* Space for array size. */
- if ((get_irn_op(n) == op_Block)) start = 0; else start = -1;
- irn_arity = get_irn_arity(n);
+ if ((intern_get_irn_op(n) == op_Block)) start = 0; else start = -1;
+ irn_arity = intern_get_irn_arity(n);
res = irn_arity - start +1; /* --1 or --0; 1 for array size. */
for (i = start; i < irn_arity; i++) {
/* Optimize Tuples. They annoy if walking the cfg. */
- succ = skip_Tuple(get_irn_n(n, i));
+ succ = skip_Tuple(intern_get_irn_n(n, i));
set_irn_n(n, i, succ);
/* count outs for successors */
if (get_irn_visited(succ) < get_irg_visited(current_ir_graph))
edge. */
n->out[0] = (ir_node *)0;
- if (get_irn_op(n) == op_Block) start = 0; else start = -1;
- irn_arity = get_irn_arity(n);
+ if (intern_get_irn_op(n) == op_Block) start = 0; else start = -1;
+ irn_arity = intern_get_irn_arity(n);
for (i = start; i < irn_arity; i++) {
- succ = get_irn_n(n, i);
+ succ = intern_get_irn_n(n, i);
/* Recursion */
if (get_irn_visited(succ) < get_irg_visited(current_ir_graph))
free = set_out_edges(succ, free);
if (get_Block_n_cfg_outs(get_irg_start_block(irg))) {
startbl = get_irg_start_block(irg);
for (i = 0; i < get_irn_n_outs(startbl); i++)
- if (get_irn_mode(get_irn_out(startbl, i)) == mode_X)
+ if (intern_get_irn_mode(get_irn_out(startbl, i)) == mode_X)
proj = get_irn_out(startbl, i);
if (get_irn_out(proj, 0) == startbl) {
assert(get_irn_n_outs(proj) == 2);
int *anz = (int *) env, arity, i, start;
ir_node *succ;
- arity = 1 + get_irn_arity(node)
+ arity = 1 + intern_get_irn_arity(node)
+ ((is_Block(node)) ? 0 : 1);
*anz += arity;
start = (is_Block(node)) ? 0 : -1;
- for(i = start; i < get_irn_arity(node); i++)
+ for(i = start; i < intern_get_irn_arity(node); i++)
{
- succ = get_irn_n(node, i);
+ succ = intern_get_irn_n(node, i);
succ->out = (ir_node **)((int)succ->out + 1);
}
}
ir_node *succ;
int start = (!is_Block(node)) ? -1 : 0;
- for(i = start; i < get_irn_arity(node); i++)
+ for(i = start; i < intern_get_irn_arity(node); i++)
{
- succ = get_irn_n(node, i);
+ succ = intern_get_irn_n(node, i);
succ->out[get_irn_n_outs(succ)+1] = node;
succ->out[0] = (ir_node *) (get_irn_n_outs(succ) + 1);
}
/* Also init nodes not visible in intraproc_view. */
/* @@@ init_node is called for too many nodes -- this wastes memory!.
The mem is not lost as its on the obstack. */
- if (get_irn_op(n) == op_Filter) {
+ if (intern_get_irn_op(n) == op_Filter) {
for (i = 0; i < get_Filter_n_cg_preds(n); i++)
init_node(get_Filter_cg_pred(n, i), NULL);
}
- if (get_irn_op(n) == op_Block) {
+ if (intern_get_irn_op(n) == op_Block) {
for (i = 0; i < get_Block_cg_n_cfgpreds(n); i++) {
init_node(get_Block_cg_cfgpred(n, i), NULL);
}
}
/* The following pattern matches only after a call from above pattern. */
- if ((get_irn_op(n) == op_Proj) /*&& (get_Proj_proj(n) == 0)*/) {
+ if ((intern_get_irn_op(n) == op_Proj) /*&& (get_Proj_proj(n) == 0)*/) {
/* @@@ init_node is called for every proj -- this wastes memory!.
The mem is not lost as its on the obstack. */
ir_node *cb = get_Proj_pred(n);
- if ((get_irn_op(cb) == op_CallBegin) ||
- (get_irn_op(cb) == op_EndReg) ||
- (get_irn_op(cb) == op_EndExcept)) {
+ if ((intern_get_irn_op(cb) == op_CallBegin) ||
+ (intern_get_irn_op(cb) == op_EndReg) ||
+ (intern_get_irn_op(cb) == op_EndExcept)) {
init_node(cb, NULL);
init_node(get_nodes_Block(cb), NULL);
}
static bool is_outermost_Start(ir_node *n) {
/* Test whether this is the outermost Start node. If so
recursion must end. */
- if ((get_irn_op(n) == op_Block) &&
+ if ((intern_get_irn_op(n) == op_Block) &&
(get_Block_n_cfgpreds(n) == 1) &&
- (get_irn_op(skip_Proj(get_Block_cfgpred(n, 0))) == op_Start) &&
+ (intern_get_irn_op(skip_Proj(get_Block_cfgpred(n, 0))) == op_Start) &&
(get_nodes_Block(skip_Proj(get_Block_cfgpred(n, 0))) == n)) {
return true;
}
not possible in interprocedural view as outermost_graph is
not necessarily the only with a dead-end start block.
Besides current_ir_graph is not set properly. */
- if ((get_irn_op(n) == op_Block) &&
+ if ((intern_get_irn_op(n) == op_Block) &&
(n == get_irg_start_block(current_ir_graph))) {
if ((!interprocedural_view) ||
(current_ir_graph == outermost_ir_graph))
/* Don't walk from nodes to blocks except for Control flow operations. */
static INLINE int
get_start_index(ir_node *n) {
- if (is_cfop(n) || is_fragile_op(n) || get_irn_op(n) == op_Start)
+ if (is_cfop(n) || is_fragile_op(n) || intern_get_irn_op(n) == op_Start)
return -1;
else
return 0;
if (interprocedural_view) {
/* Only Filter and Block nodes can have predecessors in other graphs. */
- if (get_irn_op(n) == op_Filter)
+ if (intern_get_irn_op(n) == op_Filter)
n = get_nodes_Block(n);
- if (get_irn_op(n) == op_Block) {
+ if (intern_get_irn_op(n) == op_Block) {
ir_node *cfop = skip_Proj(get_Block_cfgpred(n, index));
if (is_ip_cfop(cfop)) {
current_ir_graph = get_irn_irg(cfop);
m = stack[i];
/*printf(" Visiting %d ", i); DDMN(m);*/
if (is_ip_cfop(m)) {
- current_ir_graph = get_irn_irg(m);
- break;
+ current_ir_graph = get_irn_irg(m);
+ break;
}
- if (get_irn_op(m) == op_Filter) {
- /* Find the corresponding ip_cfop */
- ir_node *pred = stack[i+1];
- int j;
- for (j = 0; j < get_Filter_n_cg_preds(m); j++)
- if (get_Filter_cg_pred(m, j) == pred) break;
- if (j >= get_Filter_n_cg_preds(m))
- /* It is a filter we didn't pass as the predecessors are marked. */
- continue;
- assert(get_Filter_cg_pred(m, j) == pred);
- switch_irg(m, j);
- break;
+ if (intern_get_irn_op(m) == op_Filter) {
+ /* Find the corresponding ip_cfop */
+ ir_node *pred = stack[i+1];
+ int j;
+ for (j = 0; j < get_Filter_n_cg_preds(m); j++)
+ if (get_Filter_cg_pred(m, j) == pred) break;
+ if (j >= get_Filter_n_cg_preds(m))
+ /* It is a filter we didn't pass as the predecessors are marked. */
+ continue;
+ assert(get_Filter_cg_pred(m, j) == pred);
+ switch_irg(m, j);
+ break;
}
}
}
/* Test for legal loop header: Block, Phi, ... */
INLINE static bool is_possible_loop_head(ir_node *n) {
- ir_op *op = get_irn_op(n);
+ ir_op *op = intern_get_irn_op(n);
return ((op == op_Block) ||
(op == op_Phi) ||
((op == op_Filter) && interprocedural_view));
/* Test for legal loop header: Block, Phi, ... */
if (!is_possible_loop_head(n))
return false;
+
if (!is_outermost_Start(n)) {
- arity = get_irn_arity(n);
+ arity = intern_get_irn_arity(n);
for (i = get_start_index(n); i < arity; i++) {
- ir_node *pred = get_irn_n(n, i);
+ ir_node *pred = intern_get_irn_n(n, i);
assert(pred);
if (is_backedge(n, i)) continue;
if (!irn_is_in_stack(pred)) {
int i, index = -2, min = -1;
if (!is_outermost_Start(n)) {
- int arity = get_irn_arity(n);
+ int arity = intern_get_irn_arity(n);
for (i = get_start_index(n); i < arity; i++) {
- ir_node *pred = get_irn_n(n, i);
+ ir_node *pred = intern_get_irn_n(n, i);
assert(pred);
if (is_backedge(n, i) || !irn_is_in_stack(pred)) continue;
if (get_irn_dfn(pred) >= limit && (min == -1 || get_irn_dfn(pred) < min)) {
int i, index = -2, max = -1;
if (!is_outermost_Start(n)) {
- int arity = get_irn_arity(n);
+ int arity = intern_get_irn_arity(n);
for (i = get_start_index(n); i < arity; i++) {
- ir_node *pred = get_irn_n(n, i);
+ ir_node *pred = intern_get_irn_n(n, i);
if (is_backedge (n, i) || !irn_is_in_stack(pred)) continue;
if (get_irn_dfn(pred) > max) {
index = i;
assert (res_index > -2);
set_backedge (m, res_index);
- return is_outermost_Start(n) ? NULL : get_irn_n(m, res_index);
+ return is_outermost_Start(n) ? NULL : intern_get_irn_n(m, res_index);
}
so is_backedge does not access array[-1] but correctly returns false! */
if (!is_outermost_Start(n)) {
- int arity = get_irn_arity(n);
+ int arity = intern_get_irn_arity(n);
for (i = get_start_index(n); i < arity; i++) {
ir_node *m;
if (is_backedge(n, i)) continue;
- m = get_irn_n(n, i); /* get_irn_ip_pred(n, i); */
- //if ((!m) || (get_irn_op(m) == op_Unknown)) continue;
+ m = intern_get_irn_n(n, i); /* get_irn_ip_pred(n, i); */
+ //if ((!m) || (intern_get_irn_op(m) == op_Unknown)) continue;
scc (m);
if (irn_is_in_stack(m)) {
/* Uplink of m is smaller if n->m is a backedge.
# include "irtypeinfo.h"
# include "irsimpletype.h"
+# include "irnode_t.h"
# include "irprog.h"
# include "irgwalk.h"
# include "ident.h"
static type *find_type_for_Proj(ir_node *n) {
type *tp;
ir_node *pred = skip_Tuple(get_Proj_pred(n));
- ir_mode *m = get_irn_mode(n);
+ ir_mode *m = intern_get_irn_mode(n);
if (m == mode_T ||
m == mode_BB ||
m == mode_b )
return none_type;
- switch(get_irn_opcode(pred)) {
+ switch(intern_get_irn_opcode(pred)) {
case iro_Proj: {
ir_node *pred_pred;
/* Deal with Start / Call here: we need to know the Proj Nr. */
assert(get_irn_mode(pred) == mode_T);
pred_pred = get_Proj_pred(pred);
- if (get_irn_op(pred_pred) == op_Start) {
+ if (intern_get_irn_op(pred_pred) == op_Start) {
type *mtp = get_entity_type(get_irg_ent(get_Start_irg(pred_pred)));
tp = get_method_param_type(mtp, get_Proj_proj(n));
- } else if (get_irn_op(pred_pred) == op_Call) {
+ } else if (intern_get_irn_op(pred_pred) == op_Call) {
type *mtp = get_Call_type(pred_pred);
tp = get_method_res_type(mtp, get_Proj_proj(n));
} else {
tp2 = compute_irn_type(b);
}
- switch(get_irn_opcode(n)) {
+ switch(intern_get_irn_opcode(n)) {
case iro_InstOf: {
assert(0 && "op_InstOf not supported");
} break;
case iro_Load: {
ir_node *a = get_Load_ptr(n);
- if (get_irn_op(a) == op_Sel)
+ if (intern_get_irn_op(a) == op_Sel)
tp = get_entity_type(get_Sel_entity(a));
- else if ((get_irn_op(a) == op_Const) &&
+ else if ((intern_get_irn_op(a) == op_Const) &&
(tarval_is_entity(get_Const_tarval(a))))
tp = get_entity_type(tarval_to_entity(get_Const_tarval(a)));
else if (is_pointer_type(compute_irn_type(a))) {
/* catch special cases with fallthrough to binop/unop cases in default. */
case iro_Sub: {
- if (mode_is_int(get_irn_mode(n)) &&
- mode_is_reference(get_irn_mode(a)) &&
- mode_is_reference(get_irn_mode(b)) ) {
+ if (mode_is_int(intern_get_irn_mode(n)) &&
+ mode_is_reference(intern_get_irn_mode(a)) &&
+ mode_is_reference(intern_get_irn_mode(b)) ) {
VERBOSE_UNKNOWN_TYPE(("Sub %ld ptr - ptr = int: unknown type\n", get_irn_node_nr(n)));
tp = unknown_type; break;
}
} /* fall through to Add. */
case iro_Add: {
- if (mode_is_reference(get_irn_mode(n)) &&
- mode_is_reference(get_irn_mode(a)) &&
- mode_is_int(get_irn_mode(b)) ) {
+ if (mode_is_reference(intern_get_irn_mode(n)) &&
+ mode_is_reference(intern_get_irn_mode(a)) &&
+ mode_is_int(intern_get_irn_mode(b)) ) {
tp = tp1; break;
}
- if (mode_is_reference(get_irn_mode(n)) &&
- mode_is_int(get_irn_mode(a)) &&
- mode_is_reference(get_irn_mode(b)) ) {
+ if (mode_is_reference(intern_get_irn_mode(n)) &&
+ mode_is_int(intern_get_irn_mode(a)) &&
+ mode_is_reference(intern_get_irn_mode(b)) ) {
tp = tp2; break;
}
goto default_code;
} break;
case iro_Mul: {
- if (get_irn_mode(n) != get_irn_mode(a)) {
+ if (intern_get_irn_mode(n) != intern_get_irn_mode(a)) {
VERBOSE_UNKNOWN_TYPE(("Mul %ld int1 * int1 = int2: unknown type\n", get_irn_node_nr(n)));
tp = unknown_type; break;
}
#include "firmwalk.h"
#include "pmap.h"
#include "entity.h"
+#include "irnode_t.h"
#include "irprog.h"
#include "irgwalk.h"
#include "array.h"
void fw_collect_irn(ir_node *irn, void *env)
{
fw_data *data;
- ir_mode* mode = get_irn_mode(irn);
+ ir_mode* mode = intern_get_irn_mode(irn);
/* The link field will be cleared in the walk_do_mode()
callback function. */
CPPFLAGS += -I$(top_srcdir)/ir/ident -I$(top_srcdir)/ir/ir -I$(top_srcdir)/ir/tv \
-I$(top_srcdir)/ir/tr -I$(top_srcdir)/ir/common -I$(top_srcdir)/ir/ana \
- -I$(top_srcdir)/ir/st
+ -I$(top_srcdir)/ir/st -I$(top_srcdir)/ir/adt
include $(top_srcdir)/MakeTargets
#include "array.h"
#include "irprog.h"
+#include "irnode_t.h"
#include "ircons.h"
#include "irgmod.h"
#include "irgwalk.h"
ir_node * call;
/* Die Call-Knoten sind (mit den Proj-Knoten) am End-Knoten verlinkt! */
for (call = get_irn_link(get_irg_end(irg)); call; call = get_irn_link(call)) {
- if (get_irn_op(call) != op_Call) continue;
+ if (intern_get_irn_op(call) != op_Call) continue;
for (j = get_Call_n_callees(call) - 1; j >= 0; --j) {
entity * ent = get_Call_callee(call, j);
if (ent) {
* (auch bei Proj->Call Operationen) und Phi-Operationen in die Liste ihres
* Grundblocks einfügen. */
static void collect_phicallproj_walker(ir_node * node, ir_node ** call_tail) {
- if (get_irn_op(node) == op_Call) {
+ if (intern_get_irn_op(node) == op_Call) {
/* Die Liste von Call an call_tail anhängen. */
ir_node * link;
assert(get_irn_link(*call_tail) == NULL);
set_irn_link(*call_tail, node);
/* call_tail aktualisieren: */
for (link = get_irn_link(*call_tail); link; *call_tail = link, link = get_irn_link(link)) ;
- } else if (get_irn_op(node) == op_Proj) {
+ } else if (intern_get_irn_op(node) == op_Proj) {
ir_node * head = skip_Proj(get_Proj_pred(node));
set_irn_link(node, get_irn_link(head));
set_irn_link(head, node);
if (head == *call_tail) {
*call_tail = node;
}
- } else if (get_irn_op(node) == op_Phi) {
+ } else if (intern_get_irn_op(node) == op_Phi) {
ir_node * block = get_nodes_Block(node);
set_irn_link(node, get_irn_link(block));
set_irn_link(block, node);
static ir_node * exchange_proj(ir_node * proj) {
ir_node * filter;
assert(get_irn_op(proj) == op_Proj);
- filter = new_Filter(get_Proj_pred(proj), get_irn_mode(proj), get_Proj_proj(proj));
+ filter = new_Filter(get_Proj_pred(proj), intern_get_irn_mode(proj), get_Proj_proj(proj));
/* Die Proj- (Id-) Operation sollte im gleichen Grundblock stehen, wie die
* Filter-Operation. */
set_nodes_Block(proj, get_nodes_Block(filter));
* dass oben für "verschiedene" Proj-Operationen wegen CSE nur eine
* Filter-Operation erzeugt worden sein kann. */
for (link = get_irg_start(irg), proj = get_irn_link(link); proj; proj = get_irn_link(proj)) {
- if (get_irn_op(proj) == op_Id) { /* replaced with filter */
+ if (intern_get_irn_op(proj) == op_Id) { /* replaced with filter */
ir_node * filter = get_Id_pred(proj);
assert(get_irn_op(filter) == op_Filter);
if (filter != link && get_irn_link(filter) == NULL) {
if (data->open) {
set_Block_cg_cfgpred(start_block, 0, get_cg_Unknown(mode_X));
for (proj = get_irn_link(get_irg_start(irg)); proj; proj = get_irn_link(proj)) {
- if (get_irn_op(proj) == op_Filter) {
- set_Filter_cg_pred(proj, 0, get_cg_Unknown(get_irn_mode(proj)));
+ if (intern_get_irn_op(proj) == op_Filter) {
+ set_Filter_cg_pred(proj, 0, get_cg_Unknown(intern_get_irn_mode(proj)));
}
}
data->count = 1;
int n_ret = 0;
for (i = get_Block_n_cfgpreds(end_block) - 1; i >= 0; --i) {
- if (get_irn_op(cfgpred_arr[i]) == op_Return) {
+ if (intern_get_irn_op(cfgpred_arr[i]) == op_Return) {
if (ret_arr) {
ARR_APP1(ir_node *, ret_arr, cfgpred_arr[i]);
} else {
/* In[0] could be a Bad node with wrong mode. */
for (i = n_ret - 1; i >= 0; --i) {
in[i] = get_Return_res(ret_arr[i], j);
- if (!mode && get_irn_mode(in[i]) != mode_T)
- mode = get_irn_mode(in[i]);
+ if (!mode && intern_get_irn_mode(in[i]) != mode_T)
+ mode = intern_get_irn_mode(in[i]);
}
if (mode)
data->res[j] = new_Phi(n_ret, in, mode);
int n_except = 0;
ir_node ** cfgpred_arr = get_Block_cfgpred_arr(end_block);
for (i = get_Block_n_cfgpreds(end_block) - 1; i >= 0; --i) {
- if (get_irn_op(cfgpred_arr[i]) != op_Return) {
+ if (intern_get_irn_op(cfgpred_arr[i]) != op_Return) {
if (except_arr) {
ARR_APP1(ir_node *, except_arr, cfgpred_arr[i]);
} else {
/* mem */
for (i = n_except - 1; i >= 0; --i) {
ir_node * node = skip_Proj(except_arr[i]);
- if (get_irn_op(node) == op_Call) {
+ if (intern_get_irn_op(node) == op_Call) {
in[i] = new_r_Proj(irg, get_nodes_Block(node), node, mode_M, 3);
- } else if (get_irn_op(node) == op_Raise) {
+ } else if (intern_get_irn_op(node) == op_Raise) {
in[i] = new_r_Proj(irg, get_nodes_Block(node), node, mode_M, 1);
} else {
assert(is_fragile_op(node));
int i;
ir_node *proj;
- for (i = get_irn_arity(node) - 1; i >= 0; --i) {
- ir_node * pred = get_irn_n(node, i);
+ for (i = intern_get_irn_arity(node) - 1; i >= 0; --i) {
+ ir_node * pred = intern_get_irn_n(node, i);
if (get_nodes_Block(pred) == from_block) {
move_nodes(from_block, to_block, pred);
}
/* Move projs of this node. */
proj = get_irn_link(node);
for (; proj; proj = skip_Id(get_irn_link(proj))) {
- if (get_irn_op(proj) != op_Proj && get_irn_op(proj) != op_Filter) continue;
- if ((get_nodes_Block(proj) == from_block) && (skip_Proj(get_irn_n(proj, 0)) == node))
+ if (intern_get_irn_op(proj) != op_Proj && intern_get_irn_op(proj) != op_Filter) continue;
+ if ((get_nodes_Block(proj) == from_block) && (skip_Proj(intern_get_irn_n(proj, 0)) == node))
set_nodes_Block(proj, to_block);
}
}
set_Block_cg_cfgpred(get_nodes_Block(start), data->count, exec);
for (filter = get_irn_link(start); filter; filter = get_irn_link(filter)) {
- if (get_irn_op(filter) != op_Filter) continue;
+ if (intern_get_irn_op(filter) != op_Filter) continue;
if (get_Proj_pred(filter) == start) {
switch ((int) get_Proj_proj(filter)) {
case pns_global_store:
/* "frame_base" wird nur durch Unknown dargestellt. Man kann ihn aber
* auch explizit darstellen, wenn sich daraus Vorteile für die
* Datenflussanalyse ergeben. */
- set_Filter_cg_pred(filter, data->count, get_cg_Unknown(get_irn_mode(filter)));
+ set_Filter_cg_pred(filter, data->count, get_cg_Unknown(intern_get_irn_mode(filter)));
break;
case pns_globals:
/* "globals" wird nur durch Unknown dargestellt. Man kann ihn aber auch
* explizit darstellen, wenn sich daraus Vorteile für die
* Datenflussanalyse ergeben. */
- set_Filter_cg_pred(filter, data->count, get_cg_Unknown(get_irn_mode(filter)));
+ set_Filter_cg_pred(filter, data->count, get_cg_Unknown(intern_get_irn_mode(filter)));
break;
default:
/* not reached */
/* Mit CSE könnte man das effizienter machen! Die Methode wird aber für jede
* Aufrufstelle nur ein einziges Mal aufgerufen. */
ir_node * proj;
- for (proj = get_irn_link(call); proj && get_irn_op(proj) == op_Proj; proj = get_irn_link(proj)) {
- if (get_Proj_proj(proj) == 1 && get_irn_op(get_Proj_pred(proj)) == op_Call) {
+ for (proj = get_irn_link(call); proj && intern_get_irn_op(proj) == op_Proj; proj = get_irn_link(proj)) {
+ if (get_Proj_proj(proj) == 1 && intern_get_irn_op(get_Proj_pred(proj)) == op_Call) {
return proj;
}
}
* interprozedurale Vorgänger einfügen. */
set_irg_current_block(current_ir_graph, post_block);
for (proj = get_irn_link(call); proj; proj = get_irn_link(proj)) {
- if (get_irn_op(proj) != op_Proj) continue;
+ if (intern_get_irn_op(proj) != op_Proj) continue;
if (skip_Proj(get_Proj_pred(proj)) != call) continue;
if (get_Proj_pred(proj) == call) {
if (get_Proj_proj(proj) == 0) { /* memory */
set_irn_link(filter, get_irn_link(post_block));
set_irn_link(post_block, filter);
}
- fill_result(get_Proj_proj(filter), n_callees, data, in, get_irn_mode(filter));
+ fill_result(get_Proj_proj(filter), n_callees, data, in, intern_get_irn_mode(filter));
set_Filter_cg_pred_arr(filter, n_callees, in);
}
}
ir_node * node;
current_ir_graph = get_irp_irg(i);
for (node = get_irn_link(get_irg_end(current_ir_graph)); node; node = get_irn_link(node)) {
- if (get_irn_op(node) == op_Call) {
+ if (intern_get_irn_op(node) == op_Call) {
int n_callees = get_Call_n_callees(node);
if (n_callees > 1 || (n_callees == 1 && get_Call_callee(node, 0) != NULL)) {
construct_call(node);
static void destruct_walker(ir_node * node, void * env) {
- if (get_irn_op(node) == op_Block) {
+ if (intern_get_irn_op(node) == op_Block) {
remove_Block_cg_cfgpred_arr(node);
- } else if (get_irn_op(node) == op_Filter) {
+ } else if (intern_get_irn_op(node) == op_Filter) {
set_irg_current_block(current_ir_graph, get_nodes_Block(node));
- exchange(node, new_Proj(get_Filter_pred(node), get_irn_mode(node), get_Filter_proj(node)));
- } else if (get_irn_op(node) == op_Break) {
+ exchange(node, new_Proj(get_Filter_pred(node), intern_get_irn_mode(node), get_Filter_proj(node)));
+ } else if (intern_get_irn_op(node) == op_Break) {
set_irg_current_block(current_ir_graph, get_nodes_Block(node));
exchange(node, new_Jmp());
- } else if (get_irn_op(node) == op_Call) {
+ } else if (intern_get_irn_op(node) == op_Call) {
remove_Call_callee_arr(node);
- } else if (get_irn_op(node) == op_Proj) {
+ } else if (intern_get_irn_op(node) == op_Proj) {
// some ProjX end up in strage blocks.
set_nodes_block(node, get_nodes_block(get_Proj_pred(node)));
}
/* Don't assert that block matured: the use of this constructor is strongly
restricted ... */
if ( get_Block_matured(block) )
- assert( get_irn_arity(block) == arity );
+ assert( intern_get_irn_arity(block) == arity );
res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
- for (i = arity-1; i >= 0; i--) if (get_irn_op(in[i]) == op_Unknown) has_unknown = true;
+ for (i = arity-1; i >= 0; i--) if (intern_get_irn_op(in[i]) == op_Unknown) has_unknown = true;
if (!has_unknown) res = optimize_node (res);
irn_vrfy_irg (res, irg);
new_rd_Cast (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, type *to_tp)
{
ir_node *res;
- res = new_ir_node (db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
+ res = new_ir_node (db, irg, block, op_Cast, intern_get_irn_mode(op), 1, &op);
res->attr.cast.totype = to_tp;
res = optimize_node (res);
irn_vrfy_irg (res, irg);
in[0] = val;
in[1] = bound;
- res = new_ir_node (db, irg, block, op_Confirm, get_irn_mode(val), 2, in);
+ res = new_ir_node (db, irg, block, op_Confirm, intern_get_irn_mode(val), 2, in);
res->attr.confirm_cmp = cmp;
current_ir_graph->n_loc);
memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
- for (i = arity-1; i >= 0; i--) if (get_irn_op(in[i]) == op_Unknown) has_unknown = true;
+ for (i = arity-1; i >= 0; i--) if (intern_get_irn_op(in[i]) == op_Unknown) has_unknown = true;
if (!has_unknown) res = optimize_node (res);
current_ir_graph->current_block = res;
}
static INLINE void
free_to_Phi_in_stack(ir_node *phi) {
- assert(get_irn_opcode(phi) == iro_Phi);
-
if (ARR_LEN(current_ir_graph->Phi_in_stack->stack) ==
current_ir_graph->Phi_in_stack->pos)
ARR_APP1 (ir_node *, current_ir_graph->Phi_in_stack->stack, phi);
if (block->attr.block.matured) { /* case 3 */
/* The Phi has the same amount of ins as the corresponding block. */
- int ins = get_irn_arity(block);
+ int ins = intern_get_irn_arity(block);
ir_node **nin;
NEW_ARR_A (ir_node *, nin, ins);
finished yet. */
opt = get_opt_optimize(); set_optimize(0);
/* Here we rely on the fact that all frag ops have Memory as first result! */
- if (get_irn_op(n) == op_Call)
+ if (intern_get_irn_op(n) == op_Call)
arr[0] = new_Proj(n, mode_M, 3);
else
arr[0] = new_Proj(n, mode_M, 0);
static INLINE ir_node **
get_frag_arr (ir_node *n) {
- if (get_irn_op(n) == op_Call) {
+ if (intern_get_irn_op(n) == op_Call) {
return n->attr.call.frag_arr;
- } else if (get_irn_op(n) == op_Alloc) {
+ } else if (intern_get_irn_op(n) == op_Alloc) {
return n->attr.a.frag_arr;
} else {
return n->attr.frag_arr;
/* There was a set_value after the cfOp and no get_value before that
set_value. We must build a Phi node now. */
if (block->attr.block.matured) {
- int ins = get_irn_arity(block);
+ int ins = intern_get_irn_arity(block);
ir_node **nin;
NEW_ARR_A (ir_node *, nin, ins);
res = phi_merge(block, pos, mode, nin, ins);
assert (prevBlock);
if (!is_Bad(prevBlock)) {
#if PRECISE_EXC_CONTEXT
- if (is_fragile_op(prevCfOp) && (get_irn_op (prevCfOp) != op_Bad)) {
+ if (is_fragile_op(prevCfOp) && (intern_get_irn_op (prevCfOp) != op_Bad)) {
assert(get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode));
nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode);
} else
if (block->attr.block.matured) { /* case 3 */
/* The Phi has the same amount of ins as the corresponding block. */
- int ins = get_irn_arity(block);
+ int ins = intern_get_irn_arity(block);
ir_node **nin;
NEW_ARR_A (ir_node *, nin, ins);
memop, op1, op2);
#if PRECISE_EXC_CONTEXT
if ((current_ir_graph->phase_state == phase_building) &&
- (get_irn_op(res) == op_Quot)) /* Could be optimized away. */
+ (intern_get_irn_op(res) == op_Quot)) /* Could be optimized away. */
res->attr.frag_arr = new_frag_arr(res);
#endif
memop, op1, op2);
#if PRECISE_EXC_CONTEXT
if ((current_ir_graph->phase_state == phase_building) &&
- (get_irn_op(res) == op_DivMod)) /* Could be optimized away. */
+ (intern_get_irn_op(res) == op_DivMod)) /* Could be optimized away. */
res->attr.frag_arr = new_frag_arr(res);
#endif
memop, op1, op2);
#if PRECISE_EXC_CONTEXT
if ((current_ir_graph->phase_state == phase_building) &&
- (get_irn_op(res) == op_Div)) /* Could be optimized away. */
+ (intern_get_irn_op(res) == op_Div)) /* Could be optimized away. */
res->attr.frag_arr = new_frag_arr(res);
#endif
memop, op1, op2);
#if PRECISE_EXC_CONTEXT
if ((current_ir_graph->phase_state == phase_building) &&
- (get_irn_op(res) == op_Mod)) /* Could be optimized away. */
+ (intern_get_irn_op(res) == op_Mod)) /* Could be optimized away. */
res->attr.frag_arr = new_frag_arr(res);
#endif
store, callee, arity, in, tp);
#if PRECISE_EXC_CONTEXT
if ((current_ir_graph->phase_state == phase_building) &&
- (get_irn_op(res) == op_Call)) /* Could be optimized away. */
+ (intern_get_irn_op(res) == op_Call)) /* Could be optimized away. */
res->attr.call.frag_arr = new_frag_arr(res);
#endif
store, addr);
#if PRECISE_EXC_CONTEXT
if ((current_ir_graph->phase_state == phase_building) &&
- (get_irn_op(res) == op_Load)) /* Could be optimized away. */
+ (intern_get_irn_op(res) == op_Load)) /* Could be optimized away. */
res->attr.frag_arr = new_frag_arr(res);
#endif
store, addr, val);
#if PRECISE_EXC_CONTEXT
if ((current_ir_graph->phase_state == phase_building) &&
- (get_irn_op(res) == op_Store)) /* Could be optimized away. */
+ (intern_get_irn_op(res) == op_Store)) /* Could be optimized away. */
res->attr.frag_arr = new_frag_arr(res);
#endif
store, size, alloc_type, where);
#if PRECISE_EXC_CONTEXT
if ((current_ir_graph->phase_state == phase_building) &&
- (get_irn_op(res) == op_Alloc)) /* Could be optimized away. */
+ (intern_get_irn_op(res) == op_Alloc)) /* Could be optimized away. */
res->attr.a.frag_arr = new_frag_arr(res);
#endif
char *suffix;
rem = current_ir_graph;
+ printf("comparing %s %s\n", get_irg_dump_name(irg), dump_file_filter);
+
if(strncmp(get_irg_dump_name(irg),dump_file_filter,strlen(dump_file_filter))!=0) return;
current_ir_graph = irg;
int i;
char *suffix;
+ printf("comparing %s %s\n", get_irg_dump_name(irg), dump_file_filter);
if(strncmp(get_irg_dump_name(irg),dump_file_filter,strlen(dump_file_filter))!=0) return;
if (interprocedural_view) suffix = "-ip";
{
assert(node);
set_irn_op(node, op_Tuple);
- if (get_irn_arity(node) == arity) {
+ if (intern_get_irn_arity(node) == arity) {
/* keep old array */
} else {
/* Allocate new array, don't free old in_array, it's on the obstack. */
static void
collect (ir_node *n, void *env) {
ir_node *pred;
- if (get_irn_op(n) == op_Phi) {
+ if (intern_get_irn_op(n) == op_Phi) {
set_irn_link(n, get_irn_link(get_nodes_Block(n)));
set_irn_link(get_nodes_Block(n), n);
}
- if (get_irn_op(n) == op_Proj) {
+ if (intern_get_irn_op(n) == op_Proj) {
pred = n;
- while (get_irn_op(pred) == op_Proj)
+ while (intern_get_irn_op(pred) == op_Proj)
pred = get_Proj_pred(pred);
set_irn_link(n, get_irn_link(pred));
set_irn_link(pred, n);
set_nodes_Block(node, to_bl);
/* move its projs */
- if (get_irn_mode(node) == mode_T) {
+ if (intern_get_irn_mode(node) == mode_T) {
proj = get_irn_link(node);
while (proj) {
if (get_nodes_Block(proj) == from_bl)
}
/* recursion ... */
- if (get_irn_op(node) == op_Phi) return;
+ if (intern_get_irn_op(node) == op_Phi) return;
- for (i = 0; i < get_irn_arity(node); i++) {
+ for (i = 0; i < intern_get_irn_arity(node); i++) {
pred = get_irn_n(node, i);
if (get_nodes_Block(pred) == from_bl)
move(pred, from_bl, to_bl);
int i, irn_arity;
ir_node *optimized, *old;
- irn_arity = get_irn_arity(n);
+ irn_arity = intern_get_irn_arity(n);
for (i = 0; i < irn_arity; i++) {
/* get_irn_n skips Id nodes, so comparison old != optimized does not
show all optimizations. Therefore always set new predecessor. */
- old = get_irn_n(n, i);
+ old = intern_get_irn_intra_n(n, i);
optimized = optimize_in_place_2(old);
set_irn_n(n, i, optimized);
}
- if (get_irn_op(n) == op_Block) {
+ if (intern_get_irn_op(n) == op_Block) {
optimized = optimize_in_place_2(n);
if (optimized != n) exchange (n, optimized);
}
return block_v - irg_v;
} else {
/* compute the number of good predecessors */
- res = irn_arity = get_irn_arity(b);
+ res = irn_arity = intern_get_irn_arity(b);
for (i = 0; i < irn_arity; i++)
- if (get_irn_opcode(get_irn_n(b, i)) == iro_Bad) res--;
+ if (intern_get_irn_opcode(intern_get_irn_n(b, i)) == iro_Bad) res--;
/* save it in the flag. */
set_Block_block_visited(b, irg_v + res);
return res;
/* TODO: add an ir_op operation */
static INLINE void new_backedge_info(ir_node *n) {
- switch(get_irn_opcode(n)) {
+ switch(intern_get_irn_opcode(n)) {
case iro_Block:
n->attr.block.cg_backedge = NULL;
- n->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, get_irn_arity(n));
+ n->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, intern_get_irn_arity(n));
break;
case iro_Phi:
- n->attr.phi_backedge = new_backedge_arr(current_ir_graph->obst, get_irn_arity(n));
+ n->attr.phi_backedge = new_backedge_arr(current_ir_graph->obst, intern_get_irn_arity(n));
break;
case iro_Filter:
- n->attr.filter.backedge = new_backedge_arr(current_ir_graph->obst, get_irn_arity(n));
+ n->attr.filter.backedge = new_backedge_arr(current_ir_graph->obst, intern_get_irn_arity(n));
break;
default: ;
}
the End node. */
//assert(n->op == op_End || ((_ARR_DESCR(n->in))->cookie != ARR_F_MAGIC));
- if (get_irn_opcode(n) == iro_Block) {
+ if (intern_get_irn_opcode(n) == iro_Block) {
block = NULL;
new_arity = compute_new_arity(n);
n->attr.block.graph_arr = NULL;
} else {
block = get_nodes_Block(n);
- if (get_irn_opcode(n) == iro_Phi) {
+ if (intern_get_irn_opcode(n) == iro_Phi) {
new_arity = compute_new_arity(block);
} else {
- new_arity = get_irn_arity(n);
+ new_arity = intern_get_irn_arity(n);
}
}
nn = new_ir_node(get_irn_dbg_info(n),
current_ir_graph,
block,
- get_irn_op(n),
- get_irn_mode(n),
+ intern_get_irn_op(n),
+ intern_get_irn_mode(n),
new_arity,
get_irn_in(n));
/* Copy the attributes. These might point to additional data. If this
/* printf("\n old node: "); DDMSG2(n);
printf(" new node: "); DDMSG2(nn);
- printf(" arities: old: %d, new: %d\n", get_irn_arity(n), get_irn_arity(nn)); */
+ printf(" arities: old: %d, new: %d\n", intern_get_irn_arity(n), intern_get_irn_arity(nn)); */
- if (get_irn_opcode(n) == iro_Block) {
+ if (intern_get_irn_opcode(n) == iro_Block) {
/* Don't copy Bad nodes. */
j = 0;
- irn_arity = get_irn_arity(n);
+ irn_arity = intern_get_irn_arity(n);
for (i = 0; i < irn_arity; i++)
- if (get_irn_opcode(get_irn_n(n, i)) != iro_Bad) {
- set_irn_n (nn, j, get_new_node(get_irn_n(n, i)));
+ if (intern_get_irn_opcode(intern_get_irn_n(n, i)) != iro_Bad) {
+ set_irn_n (nn, j, get_new_node(intern_get_irn_n(n, i)));
/*if (is_backedge(n, i)) set_backedge(nn, j);*/
j++;
}
that the fields in ir_graph are set properly. */
if ((get_opt_control_flow_straightening()) &&
(get_Block_n_cfgpreds(nn) == 1) &&
- (get_irn_op(get_Block_cfgpred(nn, 0)) == op_Jmp))
+ (intern_get_irn_op(get_Block_cfgpred(nn, 0)) == op_Jmp))
exchange(nn, get_nodes_Block(get_Block_cfgpred(nn, 0)));
- } else if (get_irn_opcode(n) == iro_Phi) {
+ } else if (intern_get_irn_opcode(n) == iro_Phi) {
/* Don't copy node if corresponding predecessor in block is Bad.
The Block itself should not be Bad. */
block = get_nodes_Block(n);
set_irn_n (nn, -1, get_new_node(block));
j = 0;
- irn_arity = get_irn_arity(n);
+ irn_arity = intern_get_irn_arity(n);
for (i = 0; i < irn_arity; i++)
- if (get_irn_opcode(get_irn_n(block, i)) != iro_Bad) {
- set_irn_n (nn, j, get_new_node(get_irn_n(n, i)));
+ if (intern_get_irn_opcode(intern_get_irn_n(block, i)) != iro_Bad) {
+ set_irn_n (nn, j, get_new_node(intern_get_irn_n(n, i)));
/*if (is_backedge(n, i)) set_backedge(nn, j);*/
j++;
}
set_Block_block_visited(get_nodes_Block(n), 0);
/* Compacting the Phi's ins might generate Phis with only one
predecessor. */
- if (get_irn_arity(n) == 1)
- exchange(n, get_irn_n(n, 0));
+ if (intern_get_irn_arity(n) == 1)
+ exchange(n, intern_get_irn_n(n, 0));
} else {
- irn_arity = get_irn_arity(n);
+ irn_arity = intern_get_irn_arity(n);
for (i = -1; i < irn_arity; i++)
- set_irn_n (nn, i, get_new_node(get_irn_n(n, i)));
+ set_irn_n (nn, i, get_new_node(intern_get_irn_n(n, i)));
}
/* Now the new node is complete. We can add it to the hash table for cse.
@@@ inlinening aborts if we identify End. Why? */
- if(get_irn_op(nn) != op_End)
+ if(intern_get_irn_op(nn) != op_End)
add_identities (current_ir_graph->value_table, nn);
}
/*- ... and now the keep alives. -*/
/* First pick the not marked block nodes and walk them. We must pick these
first as else we will oversee blocks reachable from Phis. */
- irn_arity = get_irn_arity(oe);
+ irn_arity = intern_get_irn_arity(oe);
for (i = 0; i < irn_arity; i++) {
- ka = get_irn_n(oe, i);
- if ((get_irn_op(ka) == op_Block) &&
+ ka = intern_get_irn_intra_n(oe, i);
+ if ((intern_get_irn_op(ka) == op_Block) &&
(get_irn_visited(ka) < get_irg_visited(current_ir_graph))) {
/* We must keep the block alive and copy everything reachable */
set_irg_visited(current_ir_graph, get_irg_visited(current_ir_graph)-1);
}
/* Now pick the Phis. Here we will keep all! */
- irn_arity = get_irn_arity(oe);
+ irn_arity = intern_get_irn_arity(oe);
for (i = 0; i < irn_arity; i++) {
- ka = get_irn_n(oe, i);
- if ((get_irn_op(ka) == op_Phi)) {
+ ka = intern_get_irn_intra_n(oe, i);
+ if ((intern_get_irn_op(ka) == op_Phi)) {
if (get_irn_visited(ka) < get_irg_visited(current_ir_graph)) {
/* We didn't copy the Phi yet. */
set_irg_visited(current_ir_graph, get_irg_visited(current_ir_graph)-1);
void
dead_node_elimination(ir_graph *irg) {
ir_graph *rem;
+ int rem_ipview = interprocedural_view;
struct obstack *graveyard_obst = NULL;
struct obstack *rebirth_obst = NULL;
/* Remember external state of current_ir_graph. */
rem = current_ir_graph;
current_ir_graph = irg;
+ interprocedural_view = 0;
/* Handle graph state */
assert(get_irg_phase_state(current_ir_graph) != phase_building);
}
current_ir_graph = rem;
+ interprocedural_view = rem_ipview;
}
/**
/* if link field of block is NULL, look for bad predecessors otherwise
this is allready done */
- if (get_irn_op(n) == op_Block &&
+ if (intern_get_irn_op(n) == op_Block &&
get_irn_link(n) == NULL) {
/* save old predecessors in link field (position 0 is the block operand)*/
set_irn_link(n, (void *)get_irn_in(n));
/* count predecessors without bad nodes */
- old_irn_arity = get_irn_arity(n);
+ old_irn_arity = intern_get_irn_arity(n);
for (i = 0; i < old_irn_arity; i++)
- if (!is_Bad(get_irn_n(n, i))) new_irn_arity++;
+ if (!is_Bad(intern_get_irn_n(n, i))) new_irn_arity++;
/* arity changing: set new predecessors without bad nodes */
if (new_irn_arity < old_irn_arity) {
new_in[0] = NULL;
new_irn_n = 1;
for (i = 1; i < old_irn_arity; i++) {
- irn = get_irn_n(n, i);
+ irn = intern_get_irn_n(n, i);
if (!is_Bad(irn)) new_in[new_irn_n++] = irn;
}
n->in = new_in;
int i, old_irn_arity, new_irn_arity;
/* relink bad predeseccors of a block */
- if (get_irn_op(n) == op_Block)
+ if (intern_get_irn_op(n) == op_Block)
relink_bad_block_predecessors(n, env);
/* If Phi node relink its block and its predecessors */
- if (get_irn_op(n) == op_Phi) {
+ if (intern_get_irn_op(n) == op_Phi) {
/* Relink predeseccors of phi's block */
block = get_nodes_Block(n);
type *frame_tp = (type *)env;
copy_node(n, NULL);
- if (get_irn_op(n) == op_Sel) {
+ if (intern_get_irn_op(n) == op_Sel) {
new = get_new_node (n);
- assert(get_irn_op(new) == op_Sel);
+ assert(intern_get_irn_op(new) == op_Sel);
if (get_entity_owner(get_Sel_entity(n)) == frame_tp) {
set_Sel_entity(new, get_entity_link(get_Sel_entity(n)));
}
- } else if (get_irn_op(n) == op_Block) {
+ } else if (intern_get_irn_op(n) == op_Block) {
new = get_new_node (n);
new->attr.block.irg = current_ir_graph;
}
/* -- Precompute some values -- */
end_bl = get_new_node(get_irg_end_block(called_graph));
end = get_new_node(get_irg_end(called_graph));
- arity = get_irn_arity(end_bl); /* arity = n_exc + n_ret */
+ arity = intern_get_irn_arity(end_bl); /* arity = n_exc + n_ret */
n_res = get_method_n_ress(get_Call_type(call));
res_pred = (ir_node **) malloc (n_res * sizeof (ir_node *));
set_irg_current_block(current_ir_graph, post_bl); /* just to make sure */
/* -- archive keepalives -- */
- irn_arity = get_irn_arity(end);
+ irn_arity = intern_get_irn_arity(end);
for (i = 0; i < irn_arity; i++)
- add_End_keepalive(get_irg_end(current_ir_graph), get_irn_n(end, i));
+ add_End_keepalive(get_irg_end(current_ir_graph), intern_get_irn_n(end, i));
/* The new end node will die. We need not free as the in array is on the obstack:
copy_node only generated 'D' arrays. */
n_ret = 0;
for (i = 0; i < arity; i++) {
ir_node *ret;
- ret = get_irn_n(end_bl, i);
- if (get_irn_op(ret) == op_Return) {
+ ret = intern_get_irn_n(end_bl, i);
+ if (intern_get_irn_op(ret) == op_Return) {
cf_pred[n_ret] = new_r_Jmp(current_ir_graph, get_nodes_Block(ret));
n_ret++;
}
/* First the Memory-Phi */
n_ret = 0;
for (i = 0; i < arity; i++) {
- ret = get_irn_n(end_bl, i);
- if (get_irn_op(ret) == op_Return) {
+ ret = intern_get_irn_n(end_bl, i);
+ if (intern_get_irn_op(ret) == op_Return) {
cf_pred[n_ret] = get_Return_mem(ret);
n_ret++;
}
for (j = 0; j < n_res; j++) {
n_ret = 0;
for (i = 0; i < arity; i++) {
- ret = get_irn_n(end_bl, i);
- if (get_irn_op(ret) == op_Return) {
+ ret = intern_get_irn_n(end_bl, i);
+ if (intern_get_irn_op(ret) == op_Return) {
cf_pred[n_ret] = get_Return_res(ret, j);
n_ret++;
}
}
- phi = new_Phi(n_ret, cf_pred, get_irn_mode(cf_pred[0]));
+ phi = new_Phi(n_ret, cf_pred, intern_get_irn_mode(cf_pred[0]));
res_pred[j] = phi;
/* Conserve Phi-list for further inlinings -- but might be optimized */
if (get_nodes_Block(phi) == post_bl) {
n_exc = 0;
for (i = 0; i < arity; i++) {
ir_node *ret;
- ret = get_irn_n(end_bl, i);
- if (is_fragile_op(skip_Proj(ret)) || (get_irn_op(skip_Proj(ret)) == op_Raise)) {
+ ret = intern_get_irn_n(end_bl, i);
+ if (is_fragile_op(skip_Proj(ret)) || (intern_get_irn_op(skip_Proj(ret)) == op_Raise)) {
cf_pred[n_exc] = ret;
n_exc++;
}
n_exc = 0;
for (i = 0; i < arity; i++) {
ir_node *ret;
- ret = skip_Proj(get_irn_n(end_bl, i));
- if (get_irn_op(ret) == op_Call) {
+ ret = skip_Proj(intern_get_irn_n(end_bl, i));
+ if (intern_get_irn_op(ret) == op_Call) {
cf_pred[n_exc] = new_r_Proj(current_ir_graph, get_nodes_Block(ret), ret, mode_M, 3);
n_exc++;
} else if (is_fragile_op(ret)) {
/* We rely that all cfops have the memory output at the same position. */
cf_pred[n_exc] = new_r_Proj(current_ir_graph, get_nodes_Block(ret), ret, mode_M, 0);
n_exc++;
- } else if (get_irn_op(ret) == op_Raise) {
+ } else if (intern_get_irn_op(ret) == op_Raise) {
cf_pred[n_exc] = new_r_Proj(current_ir_graph, get_nodes_Block(ret), ret, mode_M, 1);
n_exc++;
}
/* assert(exc_handling == 1 || no exceptions. ) */
n_exc = 0;
for (i = 0; i < arity; i++) {
- ir_node *ret = get_irn_n(end_bl, i);
+ ir_node *ret = intern_get_irn_n(end_bl, i);
- if (is_fragile_op(skip_Proj(ret)) || (get_irn_op(skip_Proj(ret)) == op_Raise)) {
+ if (is_fragile_op(skip_Proj(ret)) || (intern_get_irn_op(skip_Proj(ret)) == op_Raise)) {
cf_pred[n_exc] = ret;
n_exc++;
}
}
main_end_bl = get_irg_end_block(current_ir_graph);
- main_end_bl_arity = get_irn_arity(main_end_bl);
+ main_end_bl_arity = intern_get_irn_arity(main_end_bl);
end_preds = (ir_node **) malloc ((n_exc + main_end_bl_arity) * sizeof (ir_node *));
for (i = 0; i < main_end_bl_arity; ++i)
- end_preds[i] = get_irn_n(main_end_bl, i);
+ end_preds[i] = intern_get_irn_n(main_end_bl, i);
for (i = 0; i < n_exc; ++i)
end_preds[main_end_bl_arity + i] = cf_pred[i];
set_irn_in(main_end_bl, n_exc + main_end_bl_arity, end_preds);
end_bl = get_irg_end_block(current_ir_graph);
for (i = 0; i < get_Block_n_cfgpreds(end_bl); i++) {
cf_op = get_Block_cfgpred(end_bl, i);
- if (get_irn_op(cf_op) == op_Proj) {
+ if (intern_get_irn_op(cf_op) == op_Proj) {
cf_op = get_Proj_pred(cf_op);
- if ((get_irn_op(cf_op) == op_Tuple) && (cf_op == call)) {
+ if ((intern_get_irn_op(cf_op) == op_Tuple) && (cf_op == call)) {
// There are unoptimized tuples from inlineing before when no exc
assert(get_Proj_proj(get_Block_cfgpred(end_bl, i)) == pn_Call_X_except);
cf_op = get_Tuple_pred(cf_op, pn_Call_X_except);
- assert(get_irn_op(cf_op) == op_Jmp);
+ assert(intern_get_irn_op(cf_op) == op_Jmp);
break;
}
}
assert(get_irn_op(call) == op_Call);
addr = get_Call_ptr(call);
- if (get_irn_op(addr) == op_Const) {
+ if (intern_get_irn_op(addr) == op_Const) {
/* Check whether the constant is the pointer to a compiled entity. */
tv = get_Const_tarval(addr);
if (tarval_to_entity(tv))
tarval *tv;
ir_graph *called_irg;
- if (get_irn_op(call) != op_Call) return;
+ if (intern_get_irn_op(call) != op_Call) return;
addr = get_Call_ptr(call);
- if (get_irn_op(addr) == op_Const) {
+ if (intern_get_irn_op(addr) == op_Const) {
/* Check whether the constant is the pointer to a compiled entity. */
tv = get_Const_tarval(addr);
if (tarval_to_entity(tv)) {
static void collect_calls2(ir_node *call, void *env) {
inline_irg_env *x = (inline_irg_env *)env;
- ir_op *op = get_irn_op(call);
+ ir_op *op = intern_get_irn_op(call);
ir_graph *callee;
/* count nodes in irg */
mark_irn_visited(n);
/* Place floating nodes. */
- if (get_op_pinned(get_irn_op(n)) == floats) {
+ if (get_op_pinned(intern_get_irn_op(n)) == floats) {
int depth = 0;
ir_node *b = new_Bad(); /* The block to place this node in */
- assert(get_irn_op(n) != op_Block);
+ assert(intern_get_irn_op(n) != op_Block);
- if ((get_irn_op(n) == op_Const) ||
- (get_irn_op(n) == op_SymConst) ||
+ if ((intern_get_irn_op(n) == op_Const) ||
+ (intern_get_irn_op(n) == op_SymConst) ||
(is_Bad(n)) ||
- (get_irn_op(n) == op_Unknown)) {
+ (intern_get_irn_op(n) == op_Unknown)) {
/* These nodes will not be placed by the loop below. */
b = get_irg_start_block(current_ir_graph);
depth = 1;
}
/* find the block for this node. */
- irn_arity = get_irn_arity(n);
+ irn_arity = intern_get_irn_arity(n);
for (i = 0; i < irn_arity; i++) {
- ir_node *dep = get_irn_n(n, i);
+ ir_node *dep = intern_get_irn_n(n, i);
ir_node *dep_block;
if ((irn_not_visited(dep)) &&
- (get_op_pinned(get_irn_op(dep)) == floats)) {
+ (get_op_pinned(intern_get_irn_op(dep)) == floats)) {
place_floats_early(dep, worklist);
}
/* Because all loops contain at least one pinned node, now all
}
/* Add predecessors of non floating nodes on worklist. */
- start = (get_irn_op(n) == op_Block) ? 0 : -1;
- irn_arity = get_irn_arity(n);
+ start = (intern_get_irn_op(n) == op_Block) ? 0 : -1;
+ irn_arity = intern_get_irn_arity(n);
for (i = start; i < irn_arity; i++) {
- ir_node *pred = get_irn_n(n, i);
+ ir_node *pred = intern_get_irn_n(n, i);
if (irn_not_visited(pred)) {
pdeq_putr (worklist, pred);
}
/* Compute the latest block into which we can place a node so that it is
before consumer. */
- if (get_irn_op(consumer) == op_Phi) {
+ if (intern_get_irn_op(consumer) == op_Phi) {
/* our consumer is a Phi-node, the effective use is in all those
blocks through which the Phi-node reaches producer */
int i, irn_arity;
ir_node *phi_block = get_nodes_Block(consumer);
- irn_arity = get_irn_arity(consumer);
+ irn_arity = intern_get_irn_arity(consumer);
for (i = 0; i < irn_arity; i++) {
- if (get_irn_n(consumer, i) == producer) {
+ if (intern_get_irn_n(consumer, i) == producer) {
block = get_nodes_Block(get_Block_cfgpred(phi_block, i));
}
}
assert (irn_not_visited(n)); /* no multiple placement */
/* no need to place block nodes, control nodes are already placed. */
- if ((get_irn_op(n) != op_Block) &&
+ if ((intern_get_irn_op(n) != op_Block) &&
(!is_cfop(n)) &&
- (get_irn_mode(n) != mode_X)) {
+ (intern_get_irn_mode(n) != mode_X)) {
/* Remember the early placement of this block to move it
out of loop no further than the early placement. */
early = get_nodes_Block(n);
producer of one of their inputs in the same block anyway. */
for (i = 0; i < get_irn_n_outs(n); i++) {
ir_node *succ = get_irn_out(n, i);
- if (irn_not_visited(succ) && (get_irn_op(succ) != op_Phi))
+ if (irn_not_visited(succ) && (intern_get_irn_op(succ) != op_Phi))
place_floats_late(succ, worklist);
}
/* We have to determine the final block of this node... except for
constants. */
- if ((get_op_pinned(get_irn_op(n)) == floats) &&
- (get_irn_op(n) != op_Const) &&
- (get_irn_op(n) != op_SymConst)) {
+ if ((get_op_pinned(intern_get_irn_op(n)) == floats) &&
+ (intern_get_irn_op(n) != op_Const) &&
+ (intern_get_irn_op(n) != op_SymConst)) {
ir_node *dca = NULL; /* deepest common ancestor in the
dominator tree of all nodes'
blocks depending on us; our final
int i;
set_irn_link(n, NULL);
- if (get_irn_op(n) == op_Block) {
+ if (intern_get_irn_op(n) == op_Block) {
/* Remove Tuples */
for (i = 0; i < get_Block_n_cfgpreds(n); i++)
/* GL @@@ : is this possible? if (get_opt_normalize()) -- added, all tests go through.
A different order of optimizations might cause problems. */
if (get_opt_normalize())
set_Block_cfgpred(n, i, skip_Tuple(get_Block_cfgpred(n, i)));
- } else if (get_opt_optimize() && (get_irn_mode(n) == mode_X)) {
+ } else if (get_opt_optimize() && (intern_get_irn_mode(n) == mode_X)) {
/* We will soon visit a block. Optimize it before visiting! */
ir_node *b = get_nodes_Block(n);
ir_node *new_node = equivalent_node(b);
if (is_no_Block(n)) {
ir_node *b = get_nodes_Block(n);
- if ((get_irn_op(n) == op_Phi)) {
+ if ((intern_get_irn_op(n) == op_Phi)) {
/* Collect Phi nodes to compact ins along with block's ins. */
set_irn_link(n, get_irn_link(b));
set_irn_link(b, n);
- } else if (get_irn_op(n) != op_Jmp) { /* Check for non empty block. */
+ } else if (intern_get_irn_op(n) != op_Jmp) { /* Check for non empty block. */
mark_Block_block_visited(b);
}
}
/*- Fix the Phi nodes -*/
phi = get_irn_link(b);
while (phi) {
- assert(get_irn_op(phi) == op_Phi);
+ assert(intern_get_irn_op(phi) == op_Phi);
/* Find the new predecessors for the Phi */
n_preds = 0;
for (i = 0; i < get_Block_n_cfgpreds(b); i++) {
ir_node *phi_pred = get_Phi_pred(phi, i);
for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
if (get_nodes_Block(phi_pred) == pred) {
- assert(get_irn_op(phi_pred) == op_Phi); /* Block is empty!! */
+ assert(intern_get_irn_op(phi_pred) == op_Phi); /* Block is empty!! */
in[n_preds] = get_Phi_pred(phi_pred, j);
} else {
in[n_preds] = phi_pred;
< get_irg_block_visited(current_ir_graph)) {
phi = get_irn_link(pred);
while (phi) {
- if (get_irn_op(phi) == op_Phi) {
+ if (intern_get_irn_op(phi) == op_Phi) {
set_nodes_Block(phi, b);
n_preds = 0;
for(i = 0; i < get_End_n_keepalives(end); i++) {
ir_node *ka = get_End_keepalive(end, i);
if (irn_not_visited(ka)) {
- if ((get_irn_op(ka) == op_Block) && Block_not_block_visited(ka)) {
+ if ((intern_get_irn_op(ka) == op_Block) && Block_not_block_visited(ka)) {
set_irg_block_visited(current_ir_graph, /* Don't walk all the way to Start. */
get_irg_block_visited(current_ir_graph)-1);
irg_block_walk(ka, optimize_blocks, NULL, NULL);
mark_irn_visited(ka);
ARR_APP1 (ir_node *, in, ka);
- } else if (get_irn_op(ka) == op_Phi) {
+ } else if (intern_get_irn_op(ka) == op_Phi) {
mark_irn_visited(ka);
ARR_APP1 (ir_node *, in, ka);
}
ir_node *pre, *block, **in, *jmp;
/* Block has multiple predecessors */
- if ((op_Block == get_irn_op(n)) &&
- (get_irn_arity(n) > 1)) {
- arity = get_irn_arity(n);
+ if ((op_Block == intern_get_irn_op(n)) &&
+ (intern_get_irn_arity(n) > 1)) {
+ arity = intern_get_irn_arity(n);
if (n == get_irg_end_block(current_ir_graph))
return; // No use to add a block here.
for (i=0; i<arity; i++) {
- pre = get_irn_n(n, i);
+ pre = intern_get_irn_n(n, i);
/* Predecessor has multiple successors. Insert new flow edge */
if ((NULL != pre) &&
- (op_Proj == get_irn_op(pre)) &&
- op_Raise != get_irn_op(skip_Proj(pre))) {
+ (op_Proj == intern_get_irn_op(pre)) &&
+ op_Raise != intern_get_irn_op(skip_Proj(pre))) {
/* set predecessor array for new block */
in = NEW_ARR_D (ir_node *, current_ir_graph->obst, 1);
set_irn_visited(node, visited);
pred = skip_Proj(node);
- if (get_irn_op(pred) == op_CallBegin
- || get_irn_op(pred) == op_EndReg
- || get_irn_op(pred) == op_EndExcept) {
+ if (intern_get_irn_op(pred) == op_CallBegin
+ || intern_get_irn_op(pred) == op_EndReg
+ || intern_get_irn_op(pred) == op_EndExcept) {
current_ir_graph = get_irn_irg(pred);
}
if (is_no_Block(node))
irg_walk_cg(get_nodes_block(node), visited, irg_set, pre, post, env);
- if (get_irn_op(node) == op_Block) { /* block */
- for (i = get_irn_arity(node) - 1; i >= 0; --i) {
- ir_node * exec = get_irn_n(node, i);
+ if (intern_get_irn_op(node) == op_Block) { /* block */
+ for (i = intern_get_irn_arity(node) - 1; i >= 0; --i) {
+ ir_node * exec = intern_get_irn_n(node, i);
ir_node * pred = skip_Proj(exec);
- if ((get_irn_op(pred) != op_CallBegin
- && get_irn_op(pred) != op_EndReg
- && get_irn_op(pred) != op_EndExcept)
+ if ((intern_get_irn_op(pred) != op_CallBegin
+ && intern_get_irn_op(pred) != op_EndReg
+ && intern_get_irn_op(pred) != op_EndExcept)
|| eset_contains(irg_set, get_irn_irg(pred))) {
irg_walk_cg(exec, visited, irg_set, pre, post, env);
}
}
- } else if (get_irn_op(node) == op_Filter) {
- for (i = get_irn_arity(node) - 1; i >= 0; --i) {
- ir_node * pred = get_irn_n(node, i);
- if (get_irn_op(pred) == op_Unknown || get_irn_op(pred) == op_Bad) {
+ } else if (intern_get_irn_op(node) == op_Filter) {
+ for (i = intern_get_irn_arity(node) - 1; i >= 0; --i) {
+ ir_node * pred = intern_get_irn_n(node, i);
+ if (intern_get_irn_op(pred) == op_Unknown || intern_get_irn_op(pred) == op_Bad) {
irg_walk_cg(pred, visited, irg_set, pre, post, env);
} else {
ir_node * exec;
exec = skip_Proj(get_Block_cfgpred(get_nodes_block(node), i));
- assert(get_irn_op(exec) == op_CallBegin
- || get_irn_op(exec) == op_EndReg
- || get_irn_op(exec) == op_EndExcept);
+ assert(intern_get_irn_op(exec) == op_CallBegin
+ || intern_get_irn_op(exec) == op_EndReg
+ || intern_get_irn_op(exec) == op_EndExcept);
if (eset_contains(irg_set, get_irn_irg(exec))) {
current_ir_graph = get_irn_irg(exec);
irg_walk_cg(pred, visited, irg_set, pre, post, env);
}
}
} else {
- for (i = get_irn_arity(node) - 1; i >= 0; --i) {
- irg_walk_cg(get_irn_n(node, i), visited, irg_set, pre, post, env);
+ for (i = intern_get_irn_arity(node) - 1; i >= 0; --i) {
+ irg_walk_cg(intern_get_irn_n(node, i), visited, irg_set, pre, post, env);
}
}
/* Insert all ir_graphs in irg_set, that are (transitive) reachable. */
static void collect_irgs(ir_node * node, eset * irg_set) {
- if (get_irn_op(node) == op_Call) {
+ if (intern_get_irn_op(node) == op_Call) {
int i;
for (i = get_Call_n_callees(node) - 1; i >= 0; --i) {
entity * ent = get_Call_callee(node, i);
if (is_no_Block(node))
irg_walk_2(get_nodes_block(node), pre, post, env);
- for (i = get_irn_arity(node) - 1; i >= 0; --i)
- irg_walk_2(get_irn_n(node, i), pre, post, env);
+ for (i = intern_get_irn_arity(node) - 1; i >= 0; --i)
+ irg_walk_2(intern_get_irn_n(node, i), pre, post, env);
if (post) post(node, env);
}
if (pre) pre(node, env);
if (node->op != op_Block)
- irg_walk_2(get_irn_n(node, -1), pre, post, env);
- for (i = get_irn_arity(node) - 1; i >= 0; --i)
- irg_walk_2(get_irn_n(node, i), pre, post, env);
+ irg_walk_2(intern_get_irn_n(node, -1), pre, post, env);
+ for (i = intern_get_irn_arity(node) - 1; i >= 0; --i)
+ irg_walk_2(intern_get_irn_n(node, i), pre, post, env);
if (post) post(node, env);
}
if (interprocedural_view) {
/* Only Filter and Block nodes can have predecessors in other graphs. */
- if (get_irn_op(n) == op_Filter)
+ if (intern_get_irn_op(n) == op_Filter)
n = get_nodes_block(n);
- if (get_irn_op(n) == op_Block) {
+ if (intern_get_irn_op(n) == op_Block) {
ir_node *cfop = skip_Proj(get_Block_cfgpred(n, index));
if (is_ip_cfop(cfop)) {
current_ir_graph = get_irn_irg(cfop);
if (is_no_Block(node))
cg_walk_2(get_nodes_block(node), pre, post, env);
- for (i = get_irn_arity(node) - 1; i >= 0; --i) {
+ for (i = intern_get_irn_arity(node) - 1; i >= 0; --i) {
rem = switch_irg(node, i); /* @@@ AS: Is this wrong? We do have to
switch to the irg of the predecessor, don't we? */
- cg_walk_2(get_irn_n(node, i), pre, post, env);
+ cg_walk_2(intern_get_irn_n(node, i), pre, post, env);
current_ir_graph = rem;
}
n = skip_Tuple(n);
pred = skip_Proj(n);
if (!(is_cfop(pred) || is_fragile_op(pred) ||
- (get_irn_op(pred) == op_Bad)))
+ (intern_get_irn_op(pred) == op_Bad)))
n = get_cf_op(n);
return skip_Proj(n);
static void irg_block_walk_2(ir_node *node, irg_walk_func *pre, irg_walk_func *post, void *env)
{
int i;
- assert(get_irn_opcode(node) == iro_Block);
if(get_Block_block_visited(node) < get_irg_block_visited(current_ir_graph)) {
set_Block_block_visited(node, get_irg_block_visited(current_ir_graph));
/* find the corresponding predecessor block. */
ir_node *pred = get_cf_op(get_Block_cfgpred(node, i));
pred = get_nodes_block(pred);
- if(get_irn_opcode(pred) == iro_Block) {
+ if(intern_get_irn_opcode(pred) == iro_Block) {
/* recursion */
irg_block_walk_2(pred, pre, post, env);
}
assert(get_irn_opcode(block) == iro_Block);
irg_block_walk_2(block, pre, post, env);
/* keepalive: the endless loops ... */
- if (get_irn_op(node) == op_End) {
- int arity = get_irn_arity(node);
+ if (intern_get_irn_op(node) == op_End) {
+ int arity = intern_get_irn_arity(node);
for (i = 0; i < arity; i++) {
- pred = get_irn_n(node, i);
- if (get_irn_op(pred) == op_Block)
+ pred = intern_get_irn_n(node, i);
+ if (intern_get_irn_op(pred) == op_Block)
irg_block_walk_2(pred, pre, post, env);
}
}
assert(interprocedural_view);
interprocedural_view = 0;
- callbegin = skip_Proj(get_irn_n(block, 0));
- assert(get_irn_op(callbegin) == op_CallBegin);
+ callbegin = skip_Proj(intern_get_irn_n(block, 0));
+ assert(intern_get_irn_op(callbegin) == op_CallBegin);
interprocedural_view = 1;
push_callsite(irg, callbegin);
/* Find the cf_pred refering to pos. */
ir_node *block = n;
ir_node *cf_pred;
- if (get_irn_opcode(n) == iro_Filter) block = get_nodes_block(n);
- cf_pred = skip_Proj(get_irn_n(block, pos));
+ if (intern_get_irn_opcode(n) == iro_Filter) block = get_nodes_block(n);
+ cf_pred = skip_Proj(intern_get_irn_n(block, pos));
/* Check whether we enter or leave a procedure and act according. */
- if ((get_irn_op(cf_pred) == op_EndReg) ||
- (get_irn_op(cf_pred) == op_EndExcept))
+ if ((intern_get_irn_op(cf_pred) == op_EndReg) ||
+ (intern_get_irn_op(cf_pred) == op_EndExcept))
enter_procedure(block, cf_pred, pos);
- if (get_irn_op(cf_pred) == op_CallBegin)
+ if (intern_get_irn_op(cf_pred) == op_CallBegin)
if (!leave_procedure(block, cf_pred, pos)) return NULL;
}
- return get_irn_n(n, pos);
+ return intern_get_irn_n(n, pos);
}
static INLINE void
/* Find the cf_pred refering to pos. */
block = n;
- if (get_irn_opcode(n) == iro_Filter) block = get_nodes_block(n);
- cf_pred = skip_Proj(get_irn_n(block, pos));
+ if (intern_get_irn_opcode(n) == iro_Filter) block = get_nodes_block(n);
+ cf_pred = skip_Proj(intern_get_irn_n(block, pos));
/* Check whether we re_enter or re_leave a procedure and act according. */
- if ((get_irn_op(cf_pred) == op_EndReg) ||
- (get_irn_op(cf_pred) == op_EndExcept))
+ if ((intern_get_irn_op(cf_pred) == op_EndReg) ||
+ (intern_get_irn_op(cf_pred) == op_EndExcept))
re_enter_procedure(block, cf_pred, pos);
- if (get_irn_op(cf_pred) == op_CallBegin)
+ if (intern_get_irn_op(cf_pred) == op_CallBegin)
re_leave_procedure(block, cf_pred, pos);
}
#include "irgraph_t.h"
#include "irmode_t.h"
#include "typegmod.h"
-#include "array.h"
#include "irbackedge_t.h"
#include "irdump.h"
-#include "irflag_t.h"
#include "irop_t.h"
#include "irprog_t.h"
return 0;
}
-/* returns the number of predecessors without the block predecessor. */
INLINE int
get_irn_intra_arity (const ir_node *node) {
- assert(node);
- return ARR_LEN(node->in) - 1;
+ return intern_get_irn_intra_arity(node);
}
-/* returns the number of predecessors without the block predecessor. */
INLINE int
get_irn_inter_arity (const ir_node *node) {
- assert(node);
- if (get_irn_opcode(node) == iro_Filter) {
- assert(node->attr.filter.in_cg);
- return ARR_LEN(node->attr.filter.in_cg) - 1;
- } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
- return ARR_LEN(node->attr.block.in_cg) - 1;
- }
- return get_irn_intra_arity(node);
+ return intern_get_irn_inter_arity(node);
}
-/* returns the number of predecessors without the block predecessor. */
INLINE int
get_irn_arity (const ir_node *node) {
- assert(node);
- if (interprocedural_view) return get_irn_inter_arity(node);
- return get_irn_intra_arity(node);
+ return intern_get_irn_arity(node);
}
/* Returns the array with ins. This array is shifted with respect to the
INLINE ir_node *
get_irn_intra_n (ir_node *node, int n) {
- return (node->in[n + 1] = skip_nop(node->in[n + 1]));
+ return intern_get_irn_intra_n (node, n);
}
-INLINE ir_node*
+INLINE ir_node *
get_irn_inter_n (ir_node *node, int n) {
- /* handle Filter and Block specially */
- if (get_irn_opcode(node) == iro_Filter) {
- assert(node->attr.filter.in_cg);
- return (node->attr.filter.in_cg[n + 1] = skip_nop(node->attr.filter.in_cg[n + 1]));
- } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
- return (node->attr.block.in_cg[n + 1] = skip_nop(node->attr.block.in_cg[n + 1]));
- }
-
- return get_irn_intra_n (node, n);
+ return intern_get_irn_inter_n (node, n);
}
-/* to iterate through the predecessors without touching the array */
-/* To iterate over the operands iterate from 0 to i < get_irn_arity(),
- to iterate including the Block predecessor iterate from i = -1 to
- i < get_irn_arity.
- If it is a block, the entry -1 is NULL. */
INLINE ir_node *
get_irn_n (ir_node *node, int n) {
- assert(node); assert(-1 <= n && n < get_irn_arity(node));
- if (interprocedural_view) return get_irn_inter_n (node, n);
- return get_irn_intra_n (node, n);
+ return intern_get_irn_n (node, n);
}
-
INLINE void
set_irn_n (ir_node *node, int n, ir_node *in) {
assert(node && -1 <= n && n < get_irn_arity(node));
}
INLINE ir_mode *
-get_irn_mode (const ir_node *node)
-{
- assert (node);
- return node->mode;
+get_irn_mode (const ir_node *node) {
+ return intern_get_irn_mode(node);
}
INLINE void
INLINE ir_op *
get_irn_op (const ir_node *node)
{
- assert (node);
- return node->op;
+ return intern_get_irn_op(node);
}
/* should be private to the library: */
INLINE opcode
get_irn_opcode (const ir_node *node)
{
- assert (k_ir_node == get_kind(node));
- assert (node -> op);
- return node->op->code;
+ return intern_get_irn_opcode(node);
}
INLINE const char *
set_Start_irg(ir_node *node, ir_graph *irg) {
assert(node->op == op_Start);
assert(is_ir_graph(irg));
- assert(0 && " Why set irg? ");
- //node->attr.start.irg = irg;
+ assert(0 && " Why set irg? -- use set_irn_irg");
}
INLINE int
ir_node *rem_pred = node->in[0+1];
ir_node *res;
- assert (get_irn_arity (node) > 0);
+ assert (intern_get_irn_arity (node) > 0);
node->in[0+1] = node;
res = skip_nop(rem_pred);
/* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
than any other approach, as Id chains are resolved and all point to the real node, or
all id's are self loops. */
-extern int opt_normalize;
INLINE ir_node *
skip_nop (ir_node *node) {
ir_node *pred;
if (pred->op != op_Id) return pred; /* shortcut */
rem_pred = pred;
- assert (get_irn_arity (node) > 0);
+ assert (intern_get_irn_arity (node) > 0);
node->in[0+1] = node;
res = skip_nop(rem_pred);
}
#endif
-
-
INLINE ir_node *
skip_Id (ir_node *node) {
return skip_nop(node);
# include "irnode.h"
# include "irop_t.h"
+# include "irflag_t.h"
# include "firm_common_t.h"
# include "irdom_t.h" /* For size of struct dom_info. */
# include "dbginfo.h"
# include "irloop.h"
+# include "array.h"
# include "exc.h"
block_attr get_irn_block_attr (ir_node *node);
/*@}*/
+/*********************************************************************/
+/* These function are most used in libfirm. Give them as static */
+/* functions so they can be inlined. */
+/*********************************************************************/
+
+
+
+/* returns the number of predecessors without the block predecessor. */
+static INLINE int
+intern_get_irn_intra_arity (const ir_node *node) {
+ assert(node);
+ return ARR_LEN(node->in) - 1;
+}
+
+/* returns the number of predecessors without the block predecessor. */
+static INLINE int
+intern_get_irn_inter_arity (const ir_node *node) {
+ assert(node);
+ if (get_irn_opcode(node) == iro_Filter) {
+ assert(node->attr.filter.in_cg);
+ return ARR_LEN(node->attr.filter.in_cg) - 1;
+ } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
+ return ARR_LEN(node->attr.block.in_cg) - 1;
+ }
+ return intern_get_irn_intra_arity(node);
+}
+
+/* returns the number of predecessors without the block predecessor. */
+static INLINE int
+intern_get_irn_arity (const ir_node *node) {
+ assert(node);
+ if (interprocedural_view) return intern_get_irn_inter_arity(node);
+ return intern_get_irn_intra_arity(node);
+}
+
+static INLINE ir_node *
+intern_get_irn_intra_n (ir_node *node, int n) {
+ return (node->in[n + 1] = skip_nop(node->in[n + 1]));
+}
+
+static INLINE ir_node*
+intern_get_irn_inter_n (ir_node *node, int n) {
+ /* handle Filter and Block specially */
+ if (get_irn_opcode(node) == iro_Filter) {
+ assert(node->attr.filter.in_cg);
+ return (node->attr.filter.in_cg[n + 1] = skip_nop(node->attr.filter.in_cg[n + 1]));
+ } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
+ return (node->attr.block.in_cg[n + 1] = skip_nop(node->attr.block.in_cg[n + 1]));
+ }
+
+ return get_irn_intra_n (node, n);
+}
+
+/* to iterate through the predecessors without touching the array */
+/* To iterate over the operands iterate from 0 to i < get_irn_arity(),
+ to iterate including the Block predecessor iterate from i = -1 to
+ i < get_irn_arity.
+ If it is a block, the entry -1 is NULL. */
+static INLINE ir_node *
+intern_get_irn_n (ir_node *node, int n) {
+ assert(node); assert(-1 <= n && n < intern_get_irn_arity(node));
+ if (interprocedural_view) return get_irn_inter_n (node, n);
+ return get_irn_intra_n (node, n);
+}
+
+static INLINE ir_mode *
+intern_get_irn_mode (const ir_node *node)
+{
+ assert (node);
+ return node->mode;
+}
+
+static INLINE ir_op *
+intern_get_irn_op (const ir_node *node)
+{
+ assert (node);
+ return node->op;
+}
+
+static INLINE opcode
+intern_get_irn_opcode (const ir_node *node)
+{
+ assert (k_ir_node == get_kind(node));
+ assert (node -> op);
+ return node->op->code;
+}
+
+
+
# endif /* _IRNODE_T_H_ */
static INLINE ir_node *
follow_Id (ir_node *n)
{
- while (get_irn_op (n) == op_Id) n = get_Id_pred (n);
+ while (intern_get_irn_op (n) == op_Id) n = get_Id_pred (n);
return n;
}
static INLINE tarval *
value_of (ir_node *n)
{
- if ((n != NULL) && (get_irn_op(n) == op_Const))
+ if ((n != NULL) && (intern_get_irn_op(n) == op_Const))
return get_Const_tarval(n); /* might return tarval_bad */
else
return tarval_bad;
tarval *tb = value_of(b);
if ((ta != tarval_bad) && (tb != tarval_bad)
- && (get_irn_mode(a) == get_irn_mode(b))
- && !(get_mode_sort(get_irn_mode(a)) == irms_reference)) {
+ && (intern_get_irn_mode(a) == intern_get_irn_mode(b))
+ && !(get_mode_sort(intern_get_irn_mode(a)) == irms_reference)) {
return tarval_add(ta, tb);
}
return tarval_bad;
tarval *tb = value_of(b);
if ((ta != tarval_bad) && (tb != tarval_bad)
- && (get_irn_mode(a) == get_irn_mode(b))
- && !(get_mode_sort(get_irn_mode(a)) == irms_reference)) {
+ && (intern_get_irn_mode(a) == intern_get_irn_mode(b))
+ && !(get_mode_sort(intern_get_irn_mode(a)) == irms_reference)) {
return tarval_sub(ta, tb);
}
return tarval_bad;
ir_node *a = get_Minus_op(n);
tarval *ta = value_of(a);
- if ((ta != tarval_bad) && mode_is_signed(get_irn_mode(a)))
+ if ((ta != tarval_bad) && mode_is_signed(intern_get_irn_mode(a)))
return tarval_neg(ta);
return tarval_bad;
tarval *ta = value_of(a);
tarval *tb = value_of(b);
- if ((ta != tarval_bad) && (tb != tarval_bad) && (get_irn_mode(a) == get_irn_mode(b))) {
+ if ((ta != tarval_bad) && (tb != tarval_bad) && (intern_get_irn_mode(a) == intern_get_irn_mode(b))) {
return tarval_mul(ta, tb);
} else {
/* a*0 = 0 or 0*b = 0:
tarval *tb = value_of(b);
/* This was missing in original implementation. Why? */
- if ((ta != tarval_bad) && (tb != tarval_bad) && (get_irn_mode(a) == get_irn_mode(b))) {
+ if ((ta != tarval_bad) && (tb != tarval_bad) && (intern_get_irn_mode(a) == intern_get_irn_mode(b))) {
if (tb != get_mode_null(get_tarval_mode(tb))) /* div by zero: return tarval_bad */
return tarval_quo(ta, tb);
}
tarval *tb = value_of(b);
/* This was missing in original implementation. Why? */
- if ((ta != tarval_bad) && (tb != tarval_bad) && (get_irn_mode(a) == get_irn_mode(b))) {
+ if ((ta != tarval_bad) && (tb != tarval_bad) && (intern_get_irn_mode(a) == intern_get_irn_mode(b))) {
if (tb != get_mode_null(get_tarval_mode(tb))) /* div by zero: return tarval_bad */
return tarval_div(ta, tb);
}
tarval *tb = value_of(b);
/* This was missing in original implementation. Why? */
- if ((ta != tarval_bad) && (tb != tarval_bad) && (get_irn_mode(a) == get_irn_mode(b))) {
+ if ((ta != tarval_bad) && (tb != tarval_bad) && (intern_get_irn_mode(a) == intern_get_irn_mode(b))) {
if (tb != get_mode_null(get_tarval_mode(tb))) /* div by zero: return tarval_bad */
return tarval_mod(ta, tb);
}
tarval *ta = value_of(a);
if (ta != tarval_bad)
- return tarval_convert_to(ta, get_irn_mode(n));
+ return tarval_convert_to(ta, intern_get_irn_mode(n));
return tarval_bad;
}
3. The predecessors are Allocs or void* constants. Allocs never
return NULL, they raise an exception. Therefore we can predict
the Cmp result. */
- if (get_irn_op(a) == op_Cmp) {
+ if (intern_get_irn_op(a) == op_Cmp) {
aa = get_Cmp_left(a);
ab = get_Cmp_right(a);
ir_node *aba = skip_nop(skip_Proj(ab));
if ( ( (/* aa is ProjP and aaa is Alloc */
- (get_irn_op(aa) == op_Proj)
- && (mode_is_reference(get_irn_mode(aa)))
- && (get_irn_op(aaa) == op_Alloc))
+ (intern_get_irn_op(aa) == op_Proj)
+ && (mode_is_reference(intern_get_irn_mode(aa)))
+ && (intern_get_irn_op(aaa) == op_Alloc))
&& ( (/* ab is constant void */
- (get_irn_op(ab) == op_Const)
- && (mode_is_reference(get_irn_mode(ab)))
- && (get_Const_tarval(ab) == get_mode_null(get_irn_mode(ab))))
+ (intern_get_irn_op(ab) == op_Const)
+ && (mode_is_reference(intern_get_irn_mode(ab)))
+ && (get_Const_tarval(ab) == get_mode_null(intern_get_irn_mode(ab))))
|| (/* ab is other Alloc */
- (get_irn_op(ab) == op_Proj)
- && (mode_is_reference(get_irn_mode(ab)))
- && (get_irn_op(aba) == op_Alloc)
+ (intern_get_irn_op(ab) == op_Proj)
+ && (mode_is_reference(intern_get_irn_mode(ab)))
+ && (intern_get_irn_op(aba) == op_Alloc)
&& (aaa != aba))))
|| (/* aa is void and aba is Alloc */
- (get_irn_op(aa) == op_Const)
- && (mode_is_reference(get_irn_mode(aa)))
- && (get_Const_tarval(aa) == get_mode_null(get_irn_mode(aa)))
- && (get_irn_op(ab) == op_Proj)
- && (mode_is_reference(get_irn_mode(ab)))
- && (get_irn_op(aba) == op_Alloc)))
+ (intern_get_irn_op(aa) == op_Const)
+ && (mode_is_reference(intern_get_irn_mode(aa)))
+ && (get_Const_tarval(aa) == get_mode_null(intern_get_irn_mode(aa)))
+ && (intern_get_irn_op(ab) == op_Proj)
+ && (mode_is_reference(intern_get_irn_mode(ab)))
+ && (intern_get_irn_op(aba) == op_Alloc)))
/* 3.: */
return new_tarval_from_long (get_Proj_proj(n) & Ne, mode_b);
}
}
- } else if (get_irn_op(a) == op_DivMod) {
+ } else if (intern_get_irn_op(a) == op_DivMod) {
tarval *tb = value_of(b = get_DivMod_right(a));
tarval *ta = value_of(a = get_DivMod_left(a));
- if ((ta != tarval_bad) && (tb != tarval_bad) && (get_irn_mode(a) == get_irn_mode(b))) {
+ if ((ta != tarval_bad) && (tb != tarval_bad) && (intern_get_irn_mode(a) == intern_get_irn_mode(b))) {
if (tb == get_mode_null(get_tarval_mode(tb))) /* div by zero: return tarval_bad */
return tarval_bad;
if (get_Proj_proj(n)== 0) /* Div */
assert (mode_is_reference(get_irn_mode (a))
&& mode_is_reference(get_irn_mode (b)));
- if (get_irn_op (a) == op_Proj && get_irn_op(b) == op_Proj) {
+ if (intern_get_irn_op (a) == op_Proj && intern_get_irn_op(b) == op_Proj) {
ir_node *a1 = get_Proj_pred (a);
ir_node *b1 = get_Proj_pred (b);
- if (a1 != b1 && get_irn_op (a1) == op_Alloc
- && get_irn_op (b1) == op_Alloc)
+ if (a1 != b1 && intern_get_irn_op (a1) == op_Alloc
+ && intern_get_irn_op (b1) == op_Alloc)
return 1;
}
return 0;
But what about Phi-cycles with the Phi0/Id that could not be resolved?
Remaining Phi nodes are just Ids. */
if ((get_Block_n_cfgpreds(n) == 1) &&
- (get_irn_op(get_Block_cfgpred(n, 0)) == op_Jmp) &&
+ (intern_get_irn_op(get_Block_cfgpred(n, 0)) == op_Jmp) &&
(get_opt_control_flow_straightening())) {
n = get_nodes_Block(get_Block_cfgpred(n, 0)); DBG_OPT_STG;
ir_node *a = get_Block_cfgpred(n, 0);
ir_node *b = get_Block_cfgpred(n, 1);
- if ((get_irn_op(a) == op_Proj) &&
- (get_irn_op(b) == op_Proj) &&
+ if ((intern_get_irn_op(a) == op_Proj) &&
+ (intern_get_irn_op(b) == op_Proj) &&
(get_Proj_pred(a) == get_Proj_pred(b)) &&
- (get_irn_op(get_Proj_pred(a)) == op_Cond) &&
- (get_irn_mode(get_Cond_selector(get_Proj_pred(a))) == mode_b)) {
+ (intern_get_irn_op(get_Proj_pred(a)) == op_Cond) &&
+ (intern_get_irn_mode(get_Cond_selector(get_Proj_pred(a))) == mode_b)) {
/* Also a single entry Block following a single exit Block. Phis have
twice the same operand and will be optimized away. */
n = get_nodes_Block(a); DBG_OPT_IFSIM;
ir_node *oldn = n;
/* optimize symmetric unop */
- if (get_irn_op(get_unop_op(n)) == get_irn_op(n)) {
+ if (intern_get_irn_op(get_unop_op(n)) == intern_get_irn_op(n)) {
n = get_unop_op(get_unop_op(n)); DBG_OPT_ALGSIM2;
}
return n;
ir_node *a = get_Conv_op(n);
ir_node *b;
- ir_mode *n_mode = get_irn_mode(n);
- ir_mode *a_mode = get_irn_mode(a);
+ ir_mode *n_mode = intern_get_irn_mode(n);
+ ir_mode *a_mode = intern_get_irn_mode(a);
if (n_mode == a_mode) { /* No Conv necessary */
n = a; DBG_OPT_ALGSIM3;
- } else if (get_irn_op(a) == op_Conv) { /* Conv(Conv(b)) */
+ } else if (intern_get_irn_op(a) == op_Conv) { /* Conv(Conv(b)) */
ir_mode *b_mode;
b = get_Conv_op(a);
- n_mode = get_irn_mode(n);
- b_mode = get_irn_mode(b);
+ n_mode = intern_get_irn_mode(n);
+ b_mode = intern_get_irn_mode(b);
if (n_mode == b_mode) {
if (n_mode == mode_b) {
if (n_preds == 2) {
ir_node *a = follow_Id (get_Phi_pred(n, 0));
ir_node *b = follow_Id (get_Phi_pred(n, 1));
- if ( (get_irn_op(a) == op_Confirm)
- && (get_irn_op(b) == op_Confirm)
- && (follow_Id (get_irn_n(a, 0)) == follow_Id(get_irn_n(b, 0)))
- && (get_irn_n(a, 1) == get_irn_n (b, 1))
+ if ( (intern_get_irn_op(a) == op_Confirm)
+ && (intern_get_irn_op(b) == op_Confirm)
+ && follow_Id (intern_get_irn_n(a, 0) == intern_get_irn_n(b, 0))
+ && (intern_get_irn_n(a, 1) == intern_get_irn_n (b, 1))
&& (a->data.num == (~b->data.num & irpn_True) )) {
- return follow_Id (get_irn_n(a, 0));
+ return intern_get_irn_n(a, 0);
}
}
#endif
/* skip Id's */
set_Phi_pred(n, i, first_val);
if ( (first_val != n) /* not self pointer */
- && (get_irn_op(first_val) != op_Bad) /* value not dead */
+ && (intern_get_irn_op(first_val) != op_Bad) /* value not dead */
&& !(is_Bad (get_Block_cfgpred(block, i))) ) { /* not dead control flow */
break; /* then found first value. */
}
set_Phi_pred(n, i, scnd_val);
if ( (scnd_val != n)
&& (scnd_val != first_val)
- && (get_irn_op(scnd_val) != op_Bad)
+ && (intern_get_irn_op(scnd_val) != op_Bad)
&& !(is_Bad (get_Block_cfgpred(block, i))) ) {
break;
}
ir_node *a = skip_Proj(get_Load_mem(n));
ir_node *b = get_Load_ptr(n);
- if (get_irn_op(a) == op_Store) {
+ if (intern_get_irn_op(a) == op_Store) {
if ( different_identity (b, get_Store_ptr(a))) {
/* load and store use different pointers, therefore load
needs not take store's memory but the state before. */
ir_node *b = get_Store_ptr(n);
ir_node *c = skip_Proj(get_Store_value(n));
- if (get_irn_op(a) == op_Store
+ if (intern_get_irn_op(a) == op_Store
&& get_Store_ptr(a) == b
&& skip_Proj(get_Store_value(a)) == c) {
/* We have twice exactly the same store -- a write after write. */
n = a; DBG_OPT_WAW;
- } else if (get_irn_op(c) == op_Load
+ } else if (intern_get_irn_op(c) == op_Load
&& (a == c || skip_Proj(get_Load_mem(c)) == a)
&& get_Load_ptr(c) == b ) {
/* We just loaded the value from the same memory, i.e., the store
ir_node *a = get_Proj_pred(n);
- if ( get_irn_op(a) == op_Tuple) {
+ if ( intern_get_irn_op(a) == op_Tuple) {
/* Remove the Tuple/Proj combination. */
if ( get_Proj_proj(n) <= get_Tuple_n_preds(a) ) {
n = get_Tuple_pred(a, get_Proj_proj(n)); DBG_OPT_TUPLE;
assert(0); /* This should not happen! */
n = new_Bad();
}
- } else if (get_irn_mode(n) == mode_X &&
+ } else if (intern_get_irn_mode(n) == mode_X &&
is_Bad(get_nodes_Block(n))) {
/* Remove dead control flow -- early gigo. */
n = new_Bad();
a = get_unop_op(n);
}
- switch (get_irn_opcode(n)) {
+ switch (intern_get_irn_opcode(n)) {
case iro_Cmp:
/* We don't want Cast as input to Cmp. */
- if (get_irn_op(a) == op_Cast) {
+ if (intern_get_irn_op(a) == op_Cast) {
a = get_Cast_op(a);
set_Cmp_left(n, a);
}
- if (get_irn_op(b) == op_Cast) {
+ if (intern_get_irn_op(b) == op_Cast) {
b = get_Cast_op(b);
set_Cmp_right(n, b);
}
ir_node *a = get_DivMod_left(n);
ir_node *b = get_DivMod_right(n);
- ir_mode *mode = get_irn_mode(a);
+ ir_mode *mode = intern_get_irn_mode(a);
- if (!(mode_is_int(mode) && mode_is_int(get_irn_mode(b))))
+ if (!(mode_is_int(mode) && mode_is_int(intern_get_irn_mode(b))))
return n;
if (a == b) {
tarval *ta = value_of(a);
if ((ta != tarval_bad) &&
- (get_irn_mode(a) == mode_b) &&
+ (intern_get_irn_mode(a) == mode_b) &&
(get_opt_unreachable_code())) {
/* It's a boolean Cond, branching on a boolean constant.
Replace it by a tuple (Bad, Jmp) or (Jmp, Bad) */
/* We might generate an endless loop, so keep it alive. */
add_End_keepalive(get_irg_end(current_ir_graph), get_nodes_Block(n));
} else if ((ta != tarval_bad) &&
- (get_irn_mode(a) == mode_Iu) &&
+ (intern_get_irn_mode(a) == mode_Iu) &&
(get_Cond_kind(n) == dense) &&
(get_opt_unreachable_code())) {
/* I don't want to allow Tuples smaller than the biggest Proj.
set_irn_link(n, new_r_Jmp(current_ir_graph, get_nodes_Block(n)));
/* We might generate an endless loop, so keep it alive. */
add_End_keepalive(get_irg_end(current_ir_graph), get_nodes_Block(n));
- } else if ((get_irn_op(a) == op_Eor)
- && (get_irn_mode(a) == mode_b)
+ } else if ((intern_get_irn_op(a) == op_Eor)
+ && (intern_get_irn_mode(a) == mode_b)
&& (tarval_classify(computed_value(get_Eor_right(a))) == TV_CLASSIFY_ONE)) {
/* The Eor is a negate. Generate a new Cond without the negate,
simulate the negate by exchanging the results. */
set_irn_link(n, new_r_Cond(current_ir_graph, get_nodes_Block(n),
get_Eor_left(a)));
- } else if ((get_irn_op(a) == op_Not)
- && (get_irn_mode(a) == mode_b)) {
+ } else if ((intern_get_irn_op(a) == op_Not)
+ && (intern_get_irn_mode(a) == mode_b)) {
/* A Not before the Cond. Generate a new Cond without the Not,
simulate the Not by exchanging the results. */
set_irn_link(n, new_r_Cond(current_ir_graph, get_nodes_Block(n),
ir_node *a = get_Eor_left(n);
ir_node *b = get_Eor_right(n);
- if ((get_irn_mode(n) == mode_b)
- && (get_irn_op(a) == op_Proj)
- && (get_irn_mode(a) == mode_b)
+ if ((intern_get_irn_mode(n) == mode_b)
+ && (intern_get_irn_op(a) == op_Proj)
+ && (intern_get_irn_mode(a) == mode_b)
&& (tarval_classify (computed_value (b)) == TV_CLASSIFY_ONE)
- && (get_irn_op(get_Proj_pred(a)) == op_Cmp))
+ && (intern_get_irn_op(get_Proj_pred(a)) == op_Cmp))
/* The Eor negates a Cmp. The Cmp has the negated result anyways! */
n = new_r_Proj(current_ir_graph, get_nodes_Block(n), get_Proj_pred(a),
mode_b, get_negated_pnc(get_Proj_proj(a)));
- else if ((get_irn_mode(n) == mode_b)
+ else if ((intern_get_irn_mode(n) == mode_b)
&& (tarval_classify (computed_value (b)) == TV_CLASSIFY_ONE))
/* The Eor is a Not. Replace it by a Not. */
/* ????!!!Extend to bitfield 1111111. */
{
ir_node *a = get_Not_op(n);
- if ( (get_irn_mode(n) == mode_b)
- && (get_irn_op(a) == op_Proj)
- && (get_irn_mode(a) == mode_b)
- && (get_irn_op(get_Proj_pred(a)) == op_Cmp))
+ if ( (intern_get_irn_mode(n) == mode_b)
+ && (intern_get_irn_op(a) == op_Proj)
+ && (intern_get_irn_mode(a) == mode_b)
+ && (intern_get_irn_op(get_Proj_pred(a)) == op_Cmp))
/* We negate a Cmp. The Cmp has the negated result anyways! */
n = new_r_Proj(current_ir_graph, get_nodes_Block(n), get_Proj_pred(a),
mode_b, get_negated_pnc(get_Proj_proj(a)));
if (a == b) return 0;
- if ((get_irn_op(a) != get_irn_op(b)) ||
- (get_irn_mode(a) != get_irn_mode(b))) return 1;
+ if ((intern_get_irn_op(a) != intern_get_irn_op(b)) ||
+ (intern_get_irn_mode(a) != intern_get_irn_mode(b))) return 1;
/* compare if a's in and b's in are equal */
- irn_arity_a = get_irn_arity (a);
- if (irn_arity_a != get_irn_arity(b))
+ irn_arity_a = intern_get_irn_arity (a);
+ if (irn_arity_a != intern_get_irn_arity(b))
return 1;
/* for block-local cse and pinned nodes: */
- if (!get_opt_global_cse() || (get_op_pinned(get_irn_op(a)) == pinned)) {
- if (get_irn_n(a, -1) != get_irn_n(b, -1))
+ if (!get_opt_global_cse() || (get_op_pinned(intern_get_irn_op(a)) == pinned)) {
+ if (intern_get_irn_n(a, -1) != intern_get_irn_n(b, -1))
return 1;
}
/* compare a->in[0..ins] with b->in[0..ins] */
for (i = 0; i < irn_arity_a; i++)
- if (get_irn_n(a, i) != get_irn_n(b, i))
+ if (intern_get_irn_n(a, i) != intern_get_irn_n(b, i))
return 1;
/*
int i, irn_arity;
/* hash table value = 9*(9*(9*(9*(9*arity+in[0])+in[1])+ ...)+mode)+code */
- h = irn_arity = get_irn_arity(node);
+ h = irn_arity = intern_get_irn_arity(node);
/* consider all in nodes... except the block. */
for (i = 0; i < irn_arity; i++) {
- h = 9*h + (unsigned long)get_irn_n(node, i);
+ h = 9*h + (unsigned long)intern_get_irn_n(node, i);
}
/* ...mode,... */
- h = 9*h + (unsigned long) get_irn_mode (node);
+ h = 9*h + (unsigned long) intern_get_irn_mode (node);
/* ...and code */
- h = 9*h + (unsigned long) get_irn_op (node);
+ h = 9*h + (unsigned long) intern_get_irn_op (node);
return h;
}
/* TODO: use a generic commutative attribute */
if (get_opt_reassociation()) {
- if (is_op_commutative(get_irn_op(n))) {
+ if (is_op_commutative(intern_get_irn_op(n))) {
/* for commutative operators perform a OP b == b OP a */
if (get_binop_left(n) > get_binop_right(n)) {
ir_node *h = get_binop_left(n);
identify_cons (pset *value_table, ir_node *n) {
ir_node *old = n;
n = identify(value_table, n);
- if (get_irn_n(old, -1) != get_irn_n(n, -1))
+ if (intern_get_irn_n(old, -1) != intern_get_irn_n(n, -1))
set_irg_pinned(current_ir_graph, floats);
return n;
}
gigo (ir_node *node)
{
int i, irn_arity;
- ir_op* op = get_irn_op(node);
+ ir_op* op = intern_get_irn_op(node);
/* remove garbage blocks by looking at control flow that leaves the block
and replacing the control flow by Bad. */
- if (get_irn_mode(node) == mode_X) {
+ if (intern_get_irn_mode(node) == mode_X) {
ir_node *block = get_nodes_block(node);
if (op == op_End) return node; /* Don't optimize End, may have Bads. */
- if (get_irn_op(block) == op_Block && get_Block_matured(block)) {
- irn_arity = get_irn_arity(block);
+ if (intern_get_irn_op(block) == op_Block && get_Block_matured(block)) {
+ irn_arity = intern_get_irn_arity(block);
for (i = 0; i < irn_arity; i++) {
- if (!is_Bad(get_irn_n(block, i))) break;
+ if (!is_Bad(intern_get_irn_n(block, i))) break;
}
if (i == irn_arity) return new_Bad();
}
/* Blocks, Phis and Tuples may have dead inputs, e.g., if one of the
blocks predecessors is dead. */
if ( op != op_Block && op != op_Phi && op != op_Tuple) {
- irn_arity = get_irn_arity(node);
+ irn_arity = intern_get_irn_arity(node);
for (i = -1; i < irn_arity; i++) {
- if (is_Bad(get_irn_n(node, i))) {
+ if (is_Bad(intern_get_irn_n(node, i))) {
return new_Bad();
}
}
/* If Block has only Bads as predecessors it's garbage. */
/* If Phi has only Bads as predecessors it's garbage. */
if ((op == op_Block && get_Block_matured(node)) || op == op_Phi) {
- irn_arity = get_irn_arity(node);
+ irn_arity = intern_get_irn_arity(node);
for (i = 0; i < irn_arity; i++) {
- if (!is_Bad(get_irn_n(node, i))) break;
+ if (!is_Bad(intern_get_irn_n(node, i))) break;
}
if (i == irn_arity) node = new_Bad();
}
{
tarval *tv;
ir_node *old_n = n;
- opcode iro = get_irn_opcode(n);
+ opcode iro = intern_get_irn_opcode(n);
/* Allways optimize Phi nodes: part of the construction. */
if ((!get_opt_optimize()) && (iro != iro_Phi)) return n;
/* constant expression evaluation / constant folding */
if (get_opt_constant_folding()) {
/* constants can not be evaluated */
- if (get_irn_op(n) != op_Const) {
+ if (intern_get_irn_op(n) != op_Const) {
/* try to evaluate */
tv = computed_value (n);
- if ((get_irn_mode(n) != mode_T) && (tv != tarval_bad)) {
+ if ((intern_get_irn_mode(n) != mode_T) && (tv != tarval_bad)) {
/* evaluation was succesful -- replace the node. */
obstack_free (current_ir_graph->obst, n);
return new_Const (get_tarval_mode (tv), tv);
/* Some more constant expression evaluation that does not allow to
free the node. */
- iro = get_irn_opcode(n);
+ iro = intern_get_irn_opcode(n);
if (get_opt_constant_folding() ||
(iro == iro_Cond) ||
(iro == iro_Proj)) /* Flags tested local. */
n = gigo (n);
/* Now we have a legal, useful node. Enter it in hash table for cse */
- if (get_opt_cse() && (get_irn_opcode(n) != iro_Block)) {
+ if (get_opt_cse() && (intern_get_irn_opcode(n) != iro_Block)) {
n = identify_remember (current_ir_graph->value_table, n);
}
{
tarval *tv;
ir_node *old_n = n;
- opcode iro = get_irn_opcode(n);
+ opcode iro = intern_get_irn_opcode(n);
- if (!get_opt_optimize() && (get_irn_op(n) != op_Phi)) return n;
+ if (!get_opt_optimize() && (intern_get_irn_op(n) != op_Phi)) return n;
/* if not optimize return n */
if (n == NULL) {
if (iro != iro_Const) {
/* try to evaluate */
tv = computed_value (n);
- if ((get_irn_mode(n) != mode_T) && (tv != tarval_bad)) {
+ if ((intern_get_irn_mode(n) != mode_T) && (tv != tarval_bad)) {
/* evaluation was succesful -- replace the node. */
n = new_Const (get_tarval_mode (tv), tv);
__dbg_info_merge_pair(n, old_n, dbg_const_eval);
}
/* Some more constant expression evaluation. */
- iro = get_irn_opcode(n);
+ iro = intern_get_irn_opcode(n);
if (get_opt_constant_folding() ||
(iro == iro_Cond) ||
(iro == iro_Proj)) /* Flags tested local. */
/* Now we have a legal, useful node. Enter it in hash table for cse.
Blocks should be unique anyways. (Except the successor of start:
is cse with the start block!) */
- if (get_opt_cse() && (get_irn_opcode(n) != iro_Block))
+ if (get_opt_cse() && (intern_get_irn_opcode(n) != iro_Block))
n = identify_remember (current_ir_graph->value_table, n);
return n;
entity *res = resolve_ent_polymorphy2(dynamic_class, static_ent);
if (!res) {
printf(" Could not find entity "); DDME(static_ent);
- printf(" in class"); DDMT(dynamic_class);
+ printf(" in "); DDMT(dynamic_class);
printf("\n");
dump_entity(static_ent);
dump_type(get_entity_owner(static_ent));