ir_node *addr = get_atomic_ent_value(inh_meth);
assert(addr && "constant entity without value");
- if (intern_get_irn_op(addr) == op_Const) {
+ if (get_irn_op(addr) == op_Const) {
impl_meth = tarval_to_entity(get_Const_tarval(addr));
} else {
assert(0 && "Complex constant values not supported -- address of method should be straight constant!");
static void sel_methods_walker(ir_node * node, pmap * ldname_map) {
- if (intern_get_irn_op(node) == op_SymConst) {
+ if (get_irn_op(node) == op_SymConst) {
/* Wenn möglich SymConst-Operation durch Const-Operation
* ersetzen. */
if (get_SymConst_kind(node) == linkage_ptr_info) {
}
}
}
- } else if (intern_get_irn_op(node) == op_Sel &&
+ } else if (get_irn_op(node) == op_Sel &&
is_method_type(get_entity_type(get_Sel_entity(node)))) {
entity * ent = get_Sel_entity(node);
if (get_opt_optimize() && get_opt_dyn_meth_dispatch() &&
- (intern_get_irn_op(skip_Proj(get_Sel_ptr(node))) == op_Alloc)) {
+ (get_irn_op(skip_Proj(get_Sel_ptr(node))) == op_Alloc)) {
ir_node *new_node;
entity *called_ent;
/* We know which method will be called, no dispatch necessary. */
static entity ** NULL_ARRAY = NULL;
entity * ent;
entity ** arr;
- assert(sel && intern_get_irn_op(sel) == op_Sel);
+ assert(sel && get_irn_op(sel) == op_Sel);
ent = get_Sel_entity(sel);
assert(is_method_type(get_entity_type(ent))); /* what else? */
arr = get_entity_link(ent);
}
set_irn_link(node, MARK);
- switch (intern_get_irn_opcode(node)) {
+ switch (get_irn_opcode(node)) {
case iro_Proj: {
/* proj_proj: in einem "sinnvollen" Graphen kommt jetzt ein
* op_Tuple oder ein Knoten, der eine "freie Methode"
* zurückgibt. */
ir_node * pred = get_Proj_pred(node);
if (get_irn_link(pred) != MARK) {
- if (intern_get_irn_op(pred) == op_Tuple) {
+ if (get_irn_op(pred) == op_Tuple) {
callee_ana_proj(get_Tuple_pred(pred, get_Proj_proj(node)), n, methods);
} else {
eset_insert(methods, MARK); /* free method -> unknown */
}
set_irn_link(node, MARK);
- switch (intern_get_irn_opcode(node)) {
+ switch (get_irn_opcode(node)) {
case iro_SymConst:
/* externe Methode (wegen fix_symconst!) */
eset_insert(methods, MARK); /* free method -> unknown */
static void callee_walker(ir_node * call, void * env) {
- if (intern_get_irn_op(call) == op_Call) {
+ if (get_irn_op(call) == op_Call) {
eset * methods = eset_create();
entity * ent;
entity ** arr = NEW_ARR_F(entity *, 0);
return;
}
set_irn_link(node, MARK);
- switch (intern_get_irn_opcode(node)) {
+ switch (get_irn_opcode(node)) {
case iro_Proj: {
/* proj_proj: in einem "sinnvollen" Graphen kommt jetzt ein
* op_Tuple oder ein Knoten, der in "free_ana_walker" behandelt
* wird. */
ir_node * pred = get_Proj_pred(node);
- if (get_irn_link(pred) != MARK && intern_get_irn_op(pred) == op_Tuple) {
+ if (get_irn_link(pred) != MARK && get_irn_op(pred) == op_Tuple) {
free_mark_proj(get_Tuple_pred(pred, get_Proj_proj(node)), n, set);
} else {
/* nothing: da in "free_ana_walker" behandelt. */
return; /* already visited */
}
set_irn_link(node, MARK);
- switch (intern_get_irn_opcode(node)) {
+ switch (get_irn_opcode(node)) {
case iro_Sel: {
entity * ent = get_Sel_entity(node);
if (is_method_type(get_entity_type(ent))) {
/* bereits in einem Zyklus besucht. */
return;
}
- switch (intern_get_irn_opcode(node)) {
+ switch (get_irn_opcode(node)) {
/* special nodes */
case iro_Sel:
case iro_SymConst:
set_irn_link(node, MARK);
for (i = get_Call_arity(node) - 1; i >= 0; --i) {
ir_node * pred = get_Call_param(node, i);
- if (mode_is_reference(intern_get_irn_mode(pred))) {
+ if (mode_is_reference(get_irn_mode(pred))) {
free_mark(pred, set);
}
}
* jemand das Gegenteil implementiert. */
default:
set_irn_link(node, MARK);
- for (i = intern_get_irn_arity(node) - 1; i >= 0; --i) {
+ for (i = get_irn_arity(node) - 1; i >= 0; --i) {
ir_node * pred = get_irn_n(node, i);
- if (mode_is_reference(intern_get_irn_mode(pred))) {
- free_mark(pred, set);
+ if (mode_is_reference(get_irn_mode(pred))) {
+ free_mark(pred, set);
}
}
break;
* very careful!
*/
static INLINE int *mere_get_backarray(ir_node *n) {
- switch(intern_get_irn_opcode(n)) {
+ switch (get_irn_opcode(n)) {
case iro_Block:
if (!get_Block_matured(n)) return NULL;
if (interprocedural_view && n->attr.block.in_cg) {
INLINE void fix_backedges(struct obstack *obst, ir_node *n) {
- opcode opc = intern_get_irn_opcode(n);
+ opcode opc = get_irn_opcode(n);
int *arr = mere_get_backarray(n);
if (ARR_LEN(arr) == ARR_LEN(get_irn_in(n))-1)
return;
int i;
int *ba = get_backarray (n);
if (ba)
- for (i = 0; i < intern_get_irn_arity(n); i++)
+ for (i = 0; i < get_irn_arity(n); i++)
if (ba[i]) return true;
return false;
}
interprocedural_view = 0;
ba = get_backarray (n);
if (ba)
- for (i = 0; i < intern_get_irn_arity(n); i++)
+ for (i = 0; i < get_irn_arity(n); i++)
ba[i] = 0;
interprocedural_view = 1;
ba = get_backarray (n);
if (ba)
- for (i = 0; i < intern_get_irn_arity(n); i++)
+ for (i = 0; i < get_irn_arity(n); i++)
ba[i] = 0;
interprocedural_view = rem;
}
recursion must end. */
assert(is_Block(n));
if ((get_Block_n_cfgpreds(n) == 1) &&
- (intern_get_irn_op(skip_Proj(get_Block_cfgpred(n, 0))) == op_Start) &&
+ (get_irn_op(skip_Proj(get_Block_cfgpred(n, 0))) == op_Start) &&
(get_nodes_Block(skip_Proj(get_Block_cfgpred(n, 0))) == n)) {
return true;
}
assert(is_Block(n));
if (!is_outermost_StartBlock(n)) {
- arity = intern_get_irn_arity(n);
+ arity = get_irn_arity(n);
for (i = 0; i < arity; i++) {
- ir_node *pred = get_nodes_block(skip_Proj(intern_get_irn_n(n, i)));
+ ir_node *pred = get_nodes_block(skip_Proj(get_irn_n(n, i)));
if (is_backedge(n, i)) continue;
if (!irn_is_in_stack(pred)) {
some_outof_loop = 1;
int i, index = -2, min = -1;
if (!is_outermost_StartBlock(n)) {
- int arity = intern_get_irn_arity(n);
+ int arity = get_irn_arity(n);
for (i = 0; i < arity; i++) {
- ir_node *pred = get_nodes_block(skip_Proj(intern_get_irn_n(n, i)));
+ ir_node *pred = get_nodes_block(skip_Proj(get_irn_n(n, i)));
if (is_backedge(n, i) || !irn_is_in_stack(pred)) continue;
if (get_irn_dfn(pred) >= limit && (min == -1 || get_irn_dfn(pred) < min)) {
index = i;
int i, index = -2, max = -1;
if (!is_outermost_StartBlock(n)) {
- int arity = intern_get_irn_arity(n);
+ int arity = get_irn_arity(n);
for (i = 0; i < arity; i++) {
- ir_node *pred = get_nodes_block(skip_Proj(intern_get_irn_n(n, i)));
+ ir_node *pred = get_nodes_block(skip_Proj(get_irn_n(n, i)));
if (is_backedge (n, i) || !irn_is_in_stack(pred)) continue;
if (get_irn_dfn(pred) > max) {
index = i;
assert (res_index > -2);
set_backedge (m, res_index);
- return is_outermost_StartBlock(n) ? NULL : get_nodes_block(skip_Proj(intern_get_irn_n(m, res_index)));
+ return is_outermost_StartBlock(n) ? NULL : get_nodes_block(skip_Proj(get_irn_n(m, res_index)));
}
/*-----------------------------------------------------------*
so is_backedge does not access array[-1] but correctly returns false! */
if (!is_outermost_StartBlock(n)) {
- int arity = intern_get_irn_arity(n);
+ int arity = get_irn_arity(n);
for (i = 0; i < arity; i++) {
ir_node *m;
if (is_backedge(n, i)) continue;
- m = get_nodes_block(skip_Proj(intern_get_irn_n(n, i)));
+ m = get_nodes_block(skip_Proj(get_irn_n(n, i)));
cfscc (m);
if (irn_is_in_stack(m)) {
tmp_dom_info *v;
/* Step 2 */
- irn_arity = intern_get_irn_arity(w->block);
+ irn_arity = get_irn_arity(w->block);
for (j = 0; j < irn_arity; j++) {
ir_node *pred = get_nodes_Block(get_Block_cfgpred(w->block, j));
tmp_dom_info *u;
assert (bl->out_valid);
#endif
for (i = 0; i < (int)bl->out[0]; i++)
- if ((intern_get_irn_mode(bl->out[i+1]) == mode_X) &&
- (intern_get_irn_op(bl->out[i+1]) != op_End)) n_cfg_outs++;
+ if ((get_irn_mode(bl->out[i+1]) == mode_X) &&
+ (get_irn_op(bl->out[i+1]) != op_End)) n_cfg_outs++;
return n_cfg_outs;
}
assert (bl->out_valid);
#endif
for (i = 0; i < (int)bl->out[0]; i++)
- if ((intern_get_irn_mode(bl->out[i+1]) == mode_X) &&
- (intern_get_irn_op(bl->out[i+1]) != op_End)) {
+ if ((get_irn_mode(bl->out[i+1]) == mode_X) &&
+ (get_irn_op(bl->out[i+1]) != op_End)) {
if (out_pos == pos) {
- ir_node *cfop = bl->out[i+1];
- return cfop->out[0+1];
+ ir_node *cfop = bl->out[i+1];
+ return cfop->out[0+1];
} else {
- out_pos++;
+ out_pos++;
}
}
return NULL;
irg_walk_func *pre, irg_walk_func *post,
void *env) {
- assert((get_irn_op(node) == op_Block) || (intern_get_irn_mode(node) == mode_X));
+ assert((get_irn_op(node) == op_Block) || (get_irn_mode(node) == mode_X));
inc_irg_block_visited(current_ir_graph);
- if (intern_get_irn_mode(node) == mode_X) node = node->out[1];
+ if (get_irn_mode(node) == mode_X) node = node->out[1];
irg_out_block_walk2(node, pre, post, env);
set_irn_visited(n, get_irg_visited(current_ir_graph));
n->out = (ir_node **) 1; /* Space for array size. */
- if ((intern_get_irn_op(n) == op_Block)) start = 0; else start = -1;
- irn_arity = intern_get_irn_arity(n);
+ start = get_irn_op(n) == op_Block ? 0 : -1;
+ irn_arity = get_irn_arity(n);
res = irn_arity - start +1; /* --1 or --0; 1 for array size. */
for (i = start; i < irn_arity; i++) {
/* Optimize Tuples. They annoy if walking the cfg. */
- succ = skip_Tuple(intern_get_irn_n(n, i));
+ succ = skip_Tuple(get_irn_n(n, i));
set_irn_n(n, i, succ);
/* count outs for successors */
if (get_irn_visited(succ) < get_irg_visited(current_ir_graph)) {
edge. */
n->out[0] = (ir_node *)0;
- if (intern_get_irn_op(n) == op_Block) start = 0; else start = -1;
- irn_arity = intern_get_irn_arity(n);
+ if (get_irn_op(n) == op_Block) start = 0; else start = -1;
+ irn_arity = get_irn_arity(n);
for (i = start; i < irn_arity; i++) {
- succ = intern_get_irn_n(n, i);
+ succ = get_irn_n(n, i);
/* Recursion */
if (get_irn_visited(succ) < get_irg_visited(current_ir_graph))
free = set_out_edges(succ, free);
if (get_Block_n_cfg_outs(get_irg_start_block(irg))) {
startbl = get_irg_start_block(irg);
for (i = 0; i < get_irn_n_outs(startbl); i++)
- if (intern_get_irn_mode(get_irn_out(startbl, i)) == mode_X)
- proj = get_irn_out(startbl, i);
+ if (get_irn_mode(get_irn_out(startbl, i)) == mode_X)
+ proj = get_irn_out(startbl, i);
if (get_irn_out(proj, 0) == startbl) {
assert(get_irn_n_outs(proj) == 2);
set_irn_out(proj, 0, get_irn_out(proj, 1));
int *anz = (int *) env, arity, i, start;
ir_node *succ;
- arity = 1 + intern_get_irn_arity(node)
+ arity = 1 + get_irn_arity(node)
+ ((is_Block(node)) ? 0 : 1);
*anz += arity;
start = (is_Block(node)) ? 0 : -1;
- for(i = start; i < intern_get_irn_arity(node); i++) {
- succ = intern_get_irn_n(node, i);
+ for(i = start; i < get_irn_arity(node); i++) {
+ succ = get_irn_n(node, i);
succ->out = (ir_node **)((int)succ->out + 1);
}
}
ir_node *succ;
int start = (!is_Block(node)) ? -1 : 0;
- for(i = start; i < intern_get_irn_arity(node); i++)
- {
- succ = intern_get_irn_n(node, i);
- succ->out[get_irn_n_outs(succ)+1] = node;
- succ->out[0] = (ir_node *) (get_irn_n_outs(succ) + 1);
- }
+ for(i = start; i < get_irn_arity(node); i++) {
+ succ = get_irn_n(node, i);
+ succ->out[get_irn_n_outs(succ)+1] = node;
+ succ->out[0] = (ir_node *) (get_irn_n_outs(succ) + 1);
+ }
}
void free_ip_outs(void)
{
ir_node **out_edges = get_irp_ip_outedges();
- if (out_edges != NULL)
- {
- free(out_edges);
- set_irp_ip_outedges(NULL);
- }
+ if (out_edges != NULL) {
+ free(out_edges);
+ set_irp_ip_outedges(NULL);
+ }
irp->outs_state = no_outs;
}
/* Also init nodes not visible in intraproc_view. */
/* @@@ init_node is called for too many nodes -- this wastes memory!.
The mem is not lost as its on the obstack. */
- if (intern_get_irn_op(n) == op_Filter) {
+ if (get_irn_op(n) == op_Filter) {
for (i = 0; i < get_Filter_n_cg_preds(n); i++)
init_node(get_Filter_cg_pred(n, i), NULL);
}
- if (intern_get_irn_op(n) == op_Block) {
+ if (get_irn_op(n) == op_Block) {
for (i = 0; i < get_Block_cg_n_cfgpreds(n); i++) {
init_node(get_Block_cg_cfgpred(n, i), NULL);
}
}
/* The following pattern matches only after a call from above pattern. */
- if ((intern_get_irn_op(n) == op_Proj) /*&& (get_Proj_proj(n) == 0)*/) {
+ if ((get_irn_op(n) == op_Proj) /*&& (get_Proj_proj(n) == 0)*/) {
/* @@@ init_node is called for every proj -- this wastes memory!.
The mem is not lost as its on the obstack. */
ir_node *cb = get_Proj_pred(n);
- if ((intern_get_irn_op(cb) == op_CallBegin) ||
- (intern_get_irn_op(cb) == op_EndReg) ||
- (intern_get_irn_op(cb) == op_EndExcept)) {
+ if ((get_irn_op(cb) == op_CallBegin) ||
+ (get_irn_op(cb) == op_EndReg) ||
+ (get_irn_op(cb) == op_EndExcept)) {
init_node(cb, NULL);
init_node(get_nodes_Block(cb), NULL);
}
static bool is_outermost_Start(ir_node *n) {
/* Test whether this is the outermost Start node. If so
recursion must end. */
- if ((intern_get_irn_op(n) == op_Block) &&
+ if ((get_irn_op(n) == op_Block) &&
(get_Block_n_cfgpreds(n) == 1) &&
- (intern_get_irn_op(skip_Proj(get_Block_cfgpred(n, 0))) == op_Start) &&
+ (get_irn_op(skip_Proj(get_Block_cfgpred(n, 0))) == op_Start) &&
(get_nodes_Block(skip_Proj(get_Block_cfgpred(n, 0))) == n)) {
return true;
}
not possible in interprocedural view as outermost_graph is
not necessarily the only with a dead-end start block.
Besides current_ir_graph is not set properly. */
- if ((intern_get_irn_op(n) == op_Block) &&
+ if ((get_irn_op(n) == op_Block) &&
(n == get_irg_start_block(current_ir_graph))) {
if ((!interprocedural_view) ||
(current_ir_graph == outermost_ir_graph))
not reachable.
I.e., with this code, the order on the loop tree is correct. But a (single)
test showed the loop tree is deeper. */
- if (intern_get_irn_op(n) == op_Phi ||
- intern_get_irn_op(n) == op_Block ||
- (intern_get_irn_op(n) == op_Filter && interprocedural_view) ||
+ if (get_irn_op(n) == op_Phi ||
+ get_irn_op(n) == op_Block ||
+ (get_irn_op(n) == op_Filter && interprocedural_view) ||
(get_irg_pinned(get_irn_irg(n)) == floats &&
get_op_pinned(get_irn_op(n)) == floats))
// Here we could test for backedge at -1 which is illegal
But it guarantees that Blocks are analysed before nodes contained in the
block. If so, we can set the value to undef if the block is not \
executed. */
- if (is_cfop(n) || is_fragile_op(n) || intern_get_irn_op(n) == op_Start)
+ if (is_cfop(n) || is_fragile_op(n) || get_irn_op(n) == op_Start)
return -1;
else
return 0;
if (interprocedural_view) {
/* Only Filter and Block nodes can have predecessors in other graphs. */
- if (intern_get_irn_op(n) == op_Filter)
+ if (get_irn_op(n) == op_Filter)
n = get_nodes_Block(n);
- if (intern_get_irn_op(n) == op_Block) {
+ if (get_irn_op(n) == op_Block) {
ir_node *cfop = skip_Proj(get_Block_cfgpred(n, index));
if (is_ip_cfop(cfop)) {
current_ir_graph = get_irn_irg(cfop);
current_ir_graph = get_irn_irg(m);
break;
}
- if (intern_get_irn_op(m) == op_Filter) {
+ if (get_irn_op(m) == op_Filter) {
/* Find the corresponding ip_cfop */
ir_node *pred = stack[i+1];
int j;
/* Test for legal loop header: Block, Phi, ... */
INLINE static bool is_possible_loop_head(ir_node *n) {
- ir_op *op = intern_get_irn_op(n);
+ ir_op *op = get_irn_op(n);
return ((op == op_Block) ||
(op == op_Phi) ||
((op == op_Filter) && interprocedural_view));
return false;
if (!is_outermost_Start(n)) {
- arity = intern_get_irn_arity(n);
+ arity = get_irn_arity(n);
for (i = get_start_index(n); i < arity; i++) {
- ir_node *pred = intern_get_irn_n(n, i);
+ ir_node *pred = get_irn_n(n, i);
assert(pred);
if (is_backedge(n, i)) continue;
if (!irn_is_in_stack(pred)) {
int i, index = -2, min = -1;
if (!is_outermost_Start(n)) {
- int arity = intern_get_irn_arity(n);
+ int arity = get_irn_arity(n);
for (i = get_start_index(n); i < arity; i++) {
- ir_node *pred = intern_get_irn_n(n, i);
+ ir_node *pred = get_irn_n(n, i);
assert(pred);
if (is_backedge(n, i) || !irn_is_in_stack(pred)) continue;
if (get_irn_dfn(pred) >= limit && (min == -1 || get_irn_dfn(pred) < min)) {
int i, index = -2, max = -1;
if (!is_outermost_Start(n)) {
- int arity = intern_get_irn_arity(n);
+ int arity = get_irn_arity(n);
for (i = get_start_index(n); i < arity; i++) {
- ir_node *pred = intern_get_irn_n(n, i);
+ ir_node *pred = get_irn_n(n, i);
if (is_backedge (n, i) || !irn_is_in_stack(pred)) continue;
if (get_irn_dfn(pred) > max) {
index = i;
assert (res_index > -2);
set_backedge (m, res_index);
- return is_outermost_Start(n) ? NULL : intern_get_irn_n(m, res_index);
+ return is_outermost_Start(n) ? NULL : get_irn_n(m, res_index);
}
so is_backedge does not access array[-1] but correctly returns false! */
if (!is_outermost_Start(n)) {
- int arity = intern_get_irn_arity(n);
+ int arity = get_irn_arity(n);
#if EXPERIMENTAL_LOOP_TREE
ir_node *m;
if (is_backedge(n, i)) continue;
/* printf("i: %d\n", i); */
- m = intern_get_irn_n(n, i); /* get_irn_ip_pred(n, i); */
- /* if ((!m) || (intern_get_irn_op(m) == op_Unknown)) continue; */
+ m = get_irn_n(n, i); /* get_irn_ip_pred(n, i); */
+ /* if ((!m) || (get_irn_op(m) == op_Unknown)) continue; */
scc (m);
if (irn_is_in_stack(m)) {
/* Uplink of m is smaller if n->m is a backedge.
static type *find_type_for_Proj(ir_node *n) {
type *tp;
ir_node *pred = skip_Tuple(get_Proj_pred(n));
- ir_mode *m = intern_get_irn_mode(n);
+ ir_mode *m = get_irn_mode(n);
if (m == mode_T ||
m == mode_BB ||
m == mode_b )
return none_type;
- switch(intern_get_irn_opcode(pred)) {
+ switch (get_irn_opcode(pred)) {
case iro_Proj: {
ir_node *pred_pred;
/* Deal with Start / Call here: we need to know the Proj Nr. */
assert(get_irn_mode(pred) == mode_T);
pred_pred = get_Proj_pred(pred);
- if (intern_get_irn_op(pred_pred) == op_Start) {
+ if (get_irn_op(pred_pred) == op_Start) {
type *mtp = get_entity_type(get_irg_ent(get_Start_irg(pred_pred)));
tp = get_method_param_type(mtp, get_Proj_proj(n));
- } else if (intern_get_irn_op(pred_pred) == op_Call) {
+ } else if (get_irn_op(pred_pred) == op_Call) {
type *mtp = get_Call_type(pred_pred);
tp = get_method_res_type(mtp, get_Proj_proj(n));
} else {
tp2 = compute_irn_type(b);
}
- switch(intern_get_irn_opcode(n)) {
+ switch(get_irn_opcode(n)) {
case iro_InstOf: {
assert(0 && "op_InstOf not supported");
} break;
case iro_Load: {
ir_node *a = get_Load_ptr(n);
- if (intern_get_irn_op(a) == op_Sel)
+ if (get_irn_op(a) == op_Sel)
tp = get_entity_type(get_Sel_entity(a));
- else if ((intern_get_irn_op(a) == op_Const) &&
+ else if ((get_irn_op(a) == op_Const) &&
(tarval_is_entity(get_Const_tarval(a))))
tp = get_entity_type(tarval_to_entity(get_Const_tarval(a)));
else if (is_pointer_type(compute_irn_type(a))) {
/* catch special cases with fallthrough to binop/unop cases in default. */
case iro_Sub: {
- if (mode_is_int(intern_get_irn_mode(n)) &&
- mode_is_reference(intern_get_irn_mode(a)) &&
- mode_is_reference(intern_get_irn_mode(b)) ) {
+ if (mode_is_int(get_irn_mode(n)) &&
+ mode_is_reference(get_irn_mode(a)) &&
+ mode_is_reference(get_irn_mode(b)) ) {
VERBOSE_UNKNOWN_TYPE(("Sub %ld ptr - ptr = int: unknown type\n", get_irn_node_nr(n)));
tp = unknown_type; break;
}
} /* fall through to Add. */
case iro_Add: {
- if (mode_is_reference(intern_get_irn_mode(n)) &&
- mode_is_reference(intern_get_irn_mode(a)) &&
- mode_is_int(intern_get_irn_mode(b)) ) {
+ if (mode_is_reference(get_irn_mode(n)) &&
+ mode_is_reference(get_irn_mode(a)) &&
+ mode_is_int(get_irn_mode(b)) ) {
tp = tp1; break;
}
- if (mode_is_reference(intern_get_irn_mode(n)) &&
- mode_is_int(intern_get_irn_mode(a)) &&
- mode_is_reference(intern_get_irn_mode(b)) ) {
+ if (mode_is_reference(get_irn_mode(n)) &&
+ mode_is_int(get_irn_mode(a)) &&
+ mode_is_reference(get_irn_mode(b)) ) {
tp = tp2; break;
}
goto default_code;
} break;
case iro_Mul: {
- if (intern_get_irn_mode(n) != intern_get_irn_mode(a)) {
+ if (get_irn_mode(n) != get_irn_mode(a)) {
VERBOSE_UNKNOWN_TYPE(("Mul %ld int1 * int1 = int2: unknown type\n", get_irn_node_nr(n)));
tp = unknown_type; break;
}
}
}
-void rta_cleanup ()
+void rta_cleanup (void)
{
if (_live_classes) {
eset_destroy (_live_classes);
int rta_is_alive_graph (ir_graph *graph)
{
+ entity *meth;
+
if (eset_contains (_live_graphs, graph)) {
return (TRUE);
}
return (FALSE);
}
- entity *meth = get_irg_ent (graph);
+ meth = get_irg_ent (graph);
if (has_live_call (meth, graph) && has_live_class (meth, graph)) {
eset_insert (_live_graphs, graph);
/*
* $Log$
+ * Revision 1.7 2004/06/15 11:44:54 beck
+ * New inlining schema implemented:
+ *
+ * small functions that should be inlined in libFirm are implemented in _t.h files
+ * with a __ prefix.
+ * Preprocessor magic is used to automatically inline these functions whenever a _t.h
+ * file is included instead of a .h file.
+ * Note that this magic did not work outside libFirm without accessing _t.h files.
+ *
* Revision 1.6 2004/06/14 13:02:03 goetz
* bugfixesbug
*
# include "entity.h"
-void rta_init (int);
-void rta_cleanup (void);
+void rta_init(int verbose);
+void rta_cleanup(void);
-int rta_is_alive_class (type*);
-int rta_is_alive_graph (ir_graph*);
-int rta_is_alive_field (entity*);
+int rta_is_alive_class(type *clazz);
+int rta_is_alive_graph(ir_graph *graph);
+int rta_is_alive_field(entity *field);
#endif /* def _RTA_H_ */
/*
* $Log$
+ * Revision 1.4 2004/06/15 11:44:54 beck
+ * New inlining schema implemented:
+ *
+ * small functions that should be inlined in libFirm are implemented in _t.h files
+ * with a __ prefix.
+ * Preprocessor magic is used to automatically inline these functions whenever a _t.h
+ * file is included instead of a .h file.
+ * Note that this magic did not work outside libFirm without accessing _t.h files.
+ *
* Revision 1.3 2004/06/13 15:03:45 liekweg
* RTA auf Iterative RTA aufgebohrt --flo
*
void fw_collect_irn(ir_node *irn, void *env)
{
fw_data *data;
- ir_mode* mode = intern_get_irn_mode(irn);
+ ir_mode *mode = get_irn_mode(irn);
/* The link field will be cleared in the walk_do_mode()
callback function. */
ir_node * call;
/* Die Call-Knoten sind (mit den Proj-Knoten) am End-Knoten verlinkt! */
for (call = get_irn_link(get_irg_end(irg)); call; call = get_irn_link(call)) {
- if (intern_get_irn_op(call) != op_Call) continue;
+ if (get_irn_op(call) != op_Call) continue;
for (j = get_Call_n_callees(call) - 1; j >= 0; --j) {
entity * ent = get_Call_callee(call, j);
if (ent) {
* (auch bei Proj->Call Operationen) und Phi-Operationen in die Liste ihres
* Grundblocks einfügen. */
static void collect_phicallproj_walker(ir_node * node, ir_node ** call_tail) {
- if (intern_get_irn_op(node) == op_Call) {
+ if (get_irn_op(node) == op_Call) {
/* Die Liste von Call an call_tail anhängen. */
ir_node * link;
assert(get_irn_link(*call_tail) == NULL);
set_irn_link(*call_tail, node);
/* call_tail aktualisieren: */
for (link = get_irn_link(*call_tail); link; *call_tail = link, link = get_irn_link(link)) ;
- } else if (intern_get_irn_op(node) == op_Proj) {
+ } else if (get_irn_op(node) == op_Proj) {
ir_node * head = skip_Proj(get_Proj_pred(node));
set_irn_link(node, get_irn_link(head));
set_irn_link(head, node);
if (head == *call_tail) {
*call_tail = node;
}
- } else if (intern_get_irn_op(node) == op_Phi) {
+ } else if (get_irn_op(node) == op_Phi) {
ir_node * block = get_nodes_Block(node);
set_irn_link(node, get_irn_link(block));
set_irn_link(block, node);
static ir_node * exchange_proj(ir_node * proj) {
ir_node * filter;
assert(get_irn_op(proj) == op_Proj);
- filter = new_Filter(get_Proj_pred(proj), intern_get_irn_mode(proj), get_Proj_proj(proj));
+ filter = new_Filter(get_Proj_pred(proj), get_irn_mode(proj), get_Proj_proj(proj));
/* Die Proj- (Id-) Operation sollte im gleichen Grundblock stehen, wie die
* Filter-Operation. */
set_nodes_Block(proj, get_nodes_Block(filter));
* dass oben für "verschiedene" Proj-Operationen wegen CSE nur eine
* Filter-Operation erzeugt worden sein kann. */
for (link = get_irg_start(irg), proj = get_irn_link(link); proj; proj = get_irn_link(proj)) {
- if (intern_get_irn_op(proj) == op_Id) { /* replaced with filter */
+ if (get_irn_op(proj) == op_Id) { /* replaced with filter */
ir_node * filter = get_Id_pred(proj);
assert(get_irn_op(filter) == op_Filter);
if (filter != link && get_irn_link(filter) == NULL) {
if (data->open) {
set_Block_cg_cfgpred(start_block, 0, get_cg_Unknown(mode_X));
for (proj = get_irn_link(get_irg_start(irg)); proj; proj = get_irn_link(proj)) {
- if (intern_get_irn_op(proj) == op_Filter) {
- set_Filter_cg_pred(proj, 0, get_cg_Unknown(intern_get_irn_mode(proj)));
+ if (get_irn_op(proj) == op_Filter) {
+ set_Filter_cg_pred(proj, 0, get_cg_Unknown(get_irn_mode(proj)));
}
}
data->count = 1;
int n_ret = 0;
for (i = get_Block_n_cfgpreds(end_block) - 1; i >= 0; --i) {
- if (intern_get_irn_op(cfgpred_arr[i]) == op_Return) {
+ if (get_irn_op(cfgpred_arr[i]) == op_Return) {
if (ret_arr) {
ARR_APP1(ir_node *, ret_arr, cfgpred_arr[i]);
} else {
/* In[0] could be a Bad node with wrong mode. */
for (i = n_ret - 1; i >= 0; --i) {
in[i] = get_Return_res(ret_arr[i], j);
- if (!mode && intern_get_irn_mode(in[i]) != mode_T)
- mode = intern_get_irn_mode(in[i]);
+ if (!mode && get_irn_mode(in[i]) != mode_T)
+ mode = get_irn_mode(in[i]);
}
if (mode)
data->res[j] = new_Phi(n_ret, in, mode);
int n_except = 0;
ir_node ** cfgpred_arr = get_Block_cfgpred_arr(end_block);
for (i = get_Block_n_cfgpreds(end_block) - 1; i >= 0; --i) {
- if (intern_get_irn_op(cfgpred_arr[i]) != op_Return) {
+ if (get_irn_op(cfgpred_arr[i]) != op_Return) {
if (except_arr) {
ARR_APP1(ir_node *, except_arr, cfgpred_arr[i]);
} else {
/* mem */
for (i = n_except - 1; i >= 0; --i) {
ir_node * node = skip_Proj(except_arr[i]);
- if (intern_get_irn_op(node) == op_Call) {
+ if (get_irn_op(node) == op_Call) {
in[i] = new_r_Proj(irg, get_nodes_Block(node), node, mode_M, 3);
- } else if (intern_get_irn_op(node) == op_Raise) {
+ } else if (get_irn_op(node) == op_Raise) {
in[i] = new_r_Proj(irg, get_nodes_Block(node), node, mode_M, 1);
} else {
assert(is_fragile_op(node));
int i;
ir_node *proj;
- for (i = intern_get_irn_arity(node) - 1; i >= 0; --i) {
- ir_node * pred = intern_get_irn_n(node, i);
+ for (i = get_irn_arity(node) - 1; i >= 0; --i) {
+ ir_node * pred = get_irn_n(node, i);
if (get_nodes_Block(pred) == from_block) {
move_nodes(from_block, to_block, pred);
}
/* Move projs of this node. */
proj = get_irn_link(node);
for (; proj; proj = skip_Id(get_irn_link(proj))) {
- if (intern_get_irn_op(proj) != op_Proj && intern_get_irn_op(proj) != op_Filter) continue;
- if ((get_nodes_Block(proj) == from_block) && (skip_Proj(intern_get_irn_n(proj, 0)) == node))
+ if (get_irn_op(proj) != op_Proj && get_irn_op(proj) != op_Filter) continue;
+ if ((get_nodes_Block(proj) == from_block) && (skip_Proj(get_irn_n(proj, 0)) == node))
set_nodes_Block(proj, to_block);
}
}
set_Block_cg_cfgpred(get_nodes_Block(start), data->count, exec);
for (filter = get_irn_link(start); filter; filter = get_irn_link(filter)) {
- if (intern_get_irn_op(filter) != op_Filter) continue;
+ if (get_irn_op(filter) != op_Filter) continue;
if (get_Proj_pred(filter) == start) {
switch ((int) get_Proj_proj(filter)) {
case pns_global_store:
/* "frame_base" wird nur durch Unknown dargestellt. Man kann ihn aber
* auch explizit darstellen, wenn sich daraus Vorteile für die
* Datenflussanalyse ergeben. */
- set_Filter_cg_pred(filter, data->count, get_cg_Unknown(intern_get_irn_mode(filter)));
+ set_Filter_cg_pred(filter, data->count, get_cg_Unknown(get_irn_mode(filter)));
break;
case pns_globals:
/* "globals" wird nur durch Unknown dargestellt. Man kann ihn aber auch
* explizit darstellen, wenn sich daraus Vorteile für die
* Datenflussanalyse ergeben. */
- set_Filter_cg_pred(filter, data->count, get_cg_Unknown(intern_get_irn_mode(filter)));
+ set_Filter_cg_pred(filter, data->count, get_cg_Unknown(get_irn_mode(filter)));
break;
default:
/* not reached */
/* Mit CSE könnte man das effizienter machen! Die Methode wird aber für jede
* Aufrufstelle nur ein einziges Mal aufgerufen. */
ir_node * proj;
- for (proj = get_irn_link(call); proj && intern_get_irn_op(proj) == op_Proj; proj = get_irn_link(proj)) {
- if (get_Proj_proj(proj) == 1 && intern_get_irn_op(get_Proj_pred(proj)) == op_Call) {
+ for (proj = get_irn_link(call); proj && get_irn_op(proj) == op_Proj; proj = get_irn_link(proj)) {
+ if (get_Proj_proj(proj) == 1 && get_irn_op(get_Proj_pred(proj)) == op_Call) {
return proj;
}
}
* interprozedurale Vorgänger einfügen. */
set_irg_current_block(current_ir_graph, post_block);
for (proj = get_irn_link(call); proj; proj = get_irn_link(proj)) {
- if (intern_get_irn_op(proj) != op_Proj) continue;
+ if (get_irn_op(proj) != op_Proj) continue;
if (skip_Proj(get_Proj_pred(proj)) != call) continue;
if (get_Proj_pred(proj) == call) {
if (get_Proj_proj(proj) == 0) { /* memory */
set_irn_link(filter, get_irn_link(post_block));
set_irn_link(post_block, filter);
}
- fill_result(get_Proj_proj(filter), n_callees, data, in, intern_get_irn_mode(filter));
+ fill_result(get_Proj_proj(filter), n_callees, data, in, get_irn_mode(filter));
set_Filter_cg_pred_arr(filter, n_callees, in);
}
}
/* construct calls */
for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
ir_node * node;
+ int n_callees;
+
current_ir_graph = get_irp_irg(i);
for (node = get_irn_link(get_irg_end(current_ir_graph)); node; node = get_irn_link(node)) {
- if (intern_get_irn_op(node) == op_Call) {
- int n_callees = get_Call_n_callees(node);
+ if (get_irn_op(node) == op_Call) {
+ n_callees = get_Call_n_callees(node);
if (n_callees > 1 || (n_callees == 1 && get_Call_callee(node, 0) != NULL)) {
construct_call(node);
}
static void destruct_walker(ir_node * node, void * env) {
- if (intern_get_irn_op(node) == op_Block) {
+ if (get_irn_op(node) == op_Block) {
remove_Block_cg_cfgpred_arr(node);
- } else if (intern_get_irn_op(node) == op_Filter) {
+ } else if (get_irn_op(node) == op_Filter) {
set_irg_current_block(current_ir_graph, get_nodes_Block(node));
- exchange(node, new_Proj(get_Filter_pred(node), intern_get_irn_mode(node), get_Filter_proj(node)));
- } else if (intern_get_irn_op(node) == op_Break) {
+ exchange(node, new_Proj(get_Filter_pred(node), get_irn_mode(node), get_Filter_proj(node)));
+ } else if (get_irn_op(node) == op_Break) {
set_irg_current_block(current_ir_graph, get_nodes_Block(node));
exchange(node, new_Jmp());
- } else if (intern_get_irn_op(node) == op_Call) {
+ } else if (get_irn_op(node) == op_Call) {
remove_Call_callee_arr(node);
- } else if (intern_get_irn_op(node) == op_Proj) {
+ } else if (get_irn_op(node) == op_Proj) {
/* some ProjX end up in strage blocks. */
set_nodes_block(node, get_nodes_block(get_Proj_pred(node)));
}
/* Don't assert that block matured: the use of this constructor is strongly
restricted ... */
if ( get_Block_matured(block) )
- assert( intern_get_irn_arity(block) == arity );
+ assert( get_irn_arity(block) == arity );
res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
for (i = arity-1; i >= 0; i--)
- if (intern_get_irn_op(in[i]) == op_Unknown) {
+ if (get_irn_op(in[i]) == op_Unknown) {
has_unknown = true;
break;
}
new_rd_Cast (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, type *to_tp)
{
ir_node *res;
- res = new_ir_node (db, irg, block, op_Cast, intern_get_irn_mode(op), 1, &op);
+ res = new_ir_node (db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
res->attr.cast.totype = to_tp;
res = optimize_node (res);
irn_vrfy_irg (res, irg);
in[0] = val;
in[1] = bound;
- res = new_ir_node (db, irg, block, op_Confirm, intern_get_irn_mode(val), 2, in);
+ res = new_ir_node (db, irg, block, op_Confirm, get_irn_mode(val), 2, in);
res->attr.confirm_cmp = cmp;
current_ir_graph->n_loc);
memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
- for (i = arity-1; i >= 0; i--) if (intern_get_irn_op(in[i]) == op_Unknown) has_unknown = true;
+ for (i = arity-1; i >= 0; i--) if (get_irn_op(in[i]) == op_Unknown) has_unknown = true;
if (!has_unknown) res = optimize_node (res);
current_ir_graph->current_block = res;
if (block->attr.block.matured) { /* case 3 */
/* The Phi has the same amount of ins as the corresponding block. */
- int ins = intern_get_irn_arity(block);
+ int ins = get_irn_arity(block);
ir_node **nin;
NEW_ARR_A (ir_node *, nin, ins);
finished yet. */
opt = get_opt_optimize(); set_optimize(0);
/* Here we rely on the fact that all frag ops have Memory as first result! */
- if (intern_get_irn_op(n) == op_Call)
+ if (get_irn_op(n) == op_Call)
arr[0] = new_Proj(n, mode_M, 3);
else
arr[0] = new_Proj(n, mode_M, 0);
static INLINE ir_node **
get_frag_arr (ir_node *n) {
- if (intern_get_irn_op(n) == op_Call) {
+ if (get_irn_op(n) == op_Call) {
return n->attr.call.frag_arr;
- } else if (intern_get_irn_op(n) == op_Alloc) {
+ } else if (get_irn_op(n) == op_Alloc) {
return n->attr.a.frag_arr;
} else {
return n->attr.frag_arr;
/* There was a set_value after the cfOp and no get_value before that
set_value. We must build a Phi node now. */
if (block->attr.block.matured) {
- int ins = intern_get_irn_arity(block);
+ int ins = get_irn_arity(block);
ir_node **nin;
NEW_ARR_A (ir_node *, nin, ins);
res = phi_merge(block, pos, mode, nin, ins);
assert (prevBlock);
if (!is_Bad(prevBlock)) {
#if PRECISE_EXC_CONTEXT
- if (is_fragile_op(prevCfOp) && (intern_get_irn_op (prevCfOp) != op_Bad)) {
+ if (is_fragile_op(prevCfOp) && (get_irn_op (prevCfOp) != op_Bad)) {
assert(get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode));
nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode);
} else
if (block->attr.block.matured) { /* case 3 */
/* The Phi has the same amount of ins as the corresponding block. */
- int ins = intern_get_irn_arity(block);
+ int ins = get_irn_arity(block);
ir_node **nin;
NEW_ARR_A (ir_node *, nin, ins);
memop, op1, op2);
#if PRECISE_EXC_CONTEXT
if ((current_ir_graph->phase_state == phase_building) &&
- (intern_get_irn_op(res) == op_Quot)) /* Could be optimized away. */
+ (get_irn_op(res) == op_Quot)) /* Could be optimized away. */
res->attr.frag_arr = new_frag_arr(res);
#endif
memop, op1, op2);
#if PRECISE_EXC_CONTEXT
if ((current_ir_graph->phase_state == phase_building) &&
- (intern_get_irn_op(res) == op_DivMod)) /* Could be optimized away. */
+ (get_irn_op(res) == op_DivMod)) /* Could be optimized away. */
res->attr.frag_arr = new_frag_arr(res);
#endif
memop, op1, op2);
#if PRECISE_EXC_CONTEXT
if ((current_ir_graph->phase_state == phase_building) &&
- (intern_get_irn_op(res) == op_Div)) /* Could be optimized away. */
+ (get_irn_op(res) == op_Div)) /* Could be optimized away. */
res->attr.frag_arr = new_frag_arr(res);
#endif
memop, op1, op2);
#if PRECISE_EXC_CONTEXT
if ((current_ir_graph->phase_state == phase_building) &&
- (intern_get_irn_op(res) == op_Mod)) /* Could be optimized away. */
+ (get_irn_op(res) == op_Mod)) /* Could be optimized away. */
res->attr.frag_arr = new_frag_arr(res);
#endif
store, callee, arity, in, tp);
#if PRECISE_EXC_CONTEXT
if ((current_ir_graph->phase_state == phase_building) &&
- (intern_get_irn_op(res) == op_Call)) /* Could be optimized away. */
+ (get_irn_op(res) == op_Call)) /* Could be optimized away. */
res->attr.call.frag_arr = new_frag_arr(res);
#endif
store, addr);
#if PRECISE_EXC_CONTEXT
if ((current_ir_graph->phase_state == phase_building) &&
- (intern_get_irn_op(res) == op_Load)) /* Could be optimized away. */
+ (get_irn_op(res) == op_Load)) /* Could be optimized away. */
res->attr.frag_arr = new_frag_arr(res);
#endif
store, addr, val);
#if PRECISE_EXC_CONTEXT
if ((current_ir_graph->phase_state == phase_building) &&
- (intern_get_irn_op(res) == op_Store)) /* Could be optimized away. */
+ (get_irn_op(res) == op_Store)) /* Could be optimized away. */
res->attr.frag_arr = new_frag_arr(res);
#endif
store, size, alloc_type, where);
#if PRECISE_EXC_CONTEXT
if ((current_ir_graph->phase_state == phase_building) &&
- (intern_get_irn_op(res) == op_Alloc)) /* Could be optimized away. */
+ (get_irn_op(res) == op_Alloc)) /* Could be optimized away. */
res->attr.a.frag_arr = new_frag_arr(res);
#endif
{
assert(node);
set_irn_op(node, op_Tuple);
- if (intern_get_irn_arity(node) == arity) {
+ if (get_irn_arity(node) == arity) {
/* keep old array */
} else {
/* Allocate new array, don't free old in_array, it's on the obstack. */
static void
collect (ir_node *n, void *env) {
ir_node *pred;
- if (intern_get_irn_op(n) == op_Phi) {
+ if (get_irn_op(n) == op_Phi) {
set_irn_link(n, get_irn_link(get_nodes_Block(n)));
set_irn_link(get_nodes_Block(n), n);
}
- if (intern_get_irn_op(n) == op_Proj) {
+ if (get_irn_op(n) == op_Proj) {
pred = n;
- while (intern_get_irn_op(pred) == op_Proj)
+ while (get_irn_op(pred) == op_Proj)
pred = get_Proj_pred(pred);
set_irn_link(n, get_irn_link(pred));
set_irn_link(pred, n);
set_nodes_Block(node, to_bl);
/* move its projs */
- if (intern_get_irn_mode(node) == mode_T) {
+ if (get_irn_mode(node) == mode_T) {
proj = get_irn_link(node);
while (proj) {
if (get_nodes_Block(proj) == from_bl)
}
/* recursion ... */
- if (intern_get_irn_op(node) == op_Phi) return;
+ if (get_irn_op(node) == op_Phi) return;
- for (i = 0; i < intern_get_irn_arity(node); i++) {
+ for (i = 0; i < get_irn_arity(node); i++) {
pred = get_irn_n(node, i);
if (get_nodes_Block(pred) == from_bl)
move(pred, from_bl, to_bl);
int i, irn_arity;
ir_node *optimized, *old;
- irn_arity = intern_get_irn_arity(n);
+ irn_arity = get_irn_arity(n);
for (i = 0; i < irn_arity; i++) {
/* get_irn_n skips Id nodes, so comparison old != optimized does not
show all optimizations. Therefore always set new predecessor. */
- old = intern_get_irn_intra_n(n, i);
+ old = get_irn_intra_n(n, i);
optimized = optimize_in_place_2(old);
set_irn_n(n, i, optimized);
}
- if (intern_get_irn_op(n) == op_Block) {
+ if (get_irn_op(n) == op_Block) {
optimized = optimize_in_place_2(n);
if (optimized != n) exchange (n, optimized);
}
return block_v - irg_v;
} else {
/* compute the number of good predecessors */
- res = irn_arity = intern_get_irn_arity(b);
+ res = irn_arity = get_irn_arity(b);
for (i = 0; i < irn_arity; i++)
- if (intern_get_irn_opcode(intern_get_irn_n(b, i)) == iro_Bad) res--;
+ if (get_irn_opcode(get_irn_n(b, i)) == iro_Bad) res--;
/* save it in the flag. */
set_Block_block_visited(b, irg_v + res);
return res;
/* TODO: add an ir_op operation */
static INLINE void new_backedge_info(ir_node *n) {
- switch(intern_get_irn_opcode(n)) {
+ switch(get_irn_opcode(n)) {
case iro_Block:
n->attr.block.cg_backedge = NULL;
- n->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, intern_get_irn_arity(n));
+ n->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, get_irn_arity(n));
break;
case iro_Phi:
- n->attr.phi_backedge = new_backedge_arr(current_ir_graph->obst, intern_get_irn_arity(n));
+ n->attr.phi_backedge = new_backedge_arr(current_ir_graph->obst, get_irn_arity(n));
break;
case iro_Filter:
- n->attr.filter.backedge = new_backedge_arr(current_ir_graph->obst, intern_get_irn_arity(n));
+ n->attr.filter.backedge = new_backedge_arr(current_ir_graph->obst, get_irn_arity(n));
break;
default: ;
}
the End node. */
/* assert(n->op == op_End || ((_ARR_DESCR(n->in))->cookie != ARR_F_MAGIC)); */
- if (intern_get_irn_opcode(n) == iro_Block) {
+ if (get_irn_opcode(n) == iro_Block) {
block = NULL;
new_arity = compute_new_arity(n);
n->attr.block.graph_arr = NULL;
} else {
block = get_nodes_Block(n);
- if (intern_get_irn_opcode(n) == iro_Phi) {
+ if (get_irn_opcode(n) == iro_Phi) {
new_arity = compute_new_arity(block);
} else {
- new_arity = intern_get_irn_arity(n);
+ new_arity = get_irn_arity(n);
}
}
nn = new_ir_node(get_irn_dbg_info(n),
current_ir_graph,
block,
- intern_get_irn_op(n),
- intern_get_irn_mode(n),
+ get_irn_op(n),
+ get_irn_mode(n),
new_arity,
get_irn_in(n));
/* Copy the attributes. These might point to additional data. If this
/* printf("\n old node: "); DDMSG2(n);
printf(" new node: "); DDMSG2(nn);
- printf(" arities: old: %d, new: %d\n", intern_get_irn_arity(n), intern_get_irn_arity(nn)); */
+ printf(" arities: old: %d, new: %d\n", get_irn_arity(n), get_irn_arity(nn)); */
- if (intern_get_irn_opcode(n) == iro_Block) {
+ if (get_irn_opcode(n) == iro_Block) {
/* Don't copy Bad nodes. */
j = 0;
- irn_arity = intern_get_irn_arity(n);
+ irn_arity = get_irn_arity(n);
for (i = 0; i < irn_arity; i++)
- if (intern_get_irn_opcode(intern_get_irn_n(n, i)) != iro_Bad) {
- set_irn_n (nn, j, get_new_node(intern_get_irn_n(n, i)));
+ if (get_irn_opcode(get_irn_n(n, i)) != iro_Bad) {
+ set_irn_n (nn, j, get_new_node(get_irn_n(n, i)));
/*if (is_backedge(n, i)) set_backedge(nn, j);*/
j++;
}
that the fields in ir_graph are set properly. */
if ((get_opt_control_flow_straightening()) &&
(get_Block_n_cfgpreds(nn) == 1) &&
- (intern_get_irn_op(get_Block_cfgpred(nn, 0)) == op_Jmp))
+ (get_irn_op(get_Block_cfgpred(nn, 0)) == op_Jmp))
exchange(nn, get_nodes_Block(get_Block_cfgpred(nn, 0)));
- } else if (intern_get_irn_opcode(n) == iro_Phi) {
+ } else if (get_irn_opcode(n) == iro_Phi) {
/* Don't copy node if corresponding predecessor in block is Bad.
The Block itself should not be Bad. */
block = get_nodes_Block(n);
set_irn_n (nn, -1, get_new_node(block));
j = 0;
- irn_arity = intern_get_irn_arity(n);
+ irn_arity = get_irn_arity(n);
for (i = 0; i < irn_arity; i++)
- if (intern_get_irn_opcode(intern_get_irn_n(block, i)) != iro_Bad) {
- set_irn_n (nn, j, get_new_node(intern_get_irn_n(n, i)));
+ if (get_irn_opcode(get_irn_n(block, i)) != iro_Bad) {
+ set_irn_n (nn, j, get_new_node(get_irn_n(n, i)));
/*if (is_backedge(n, i)) set_backedge(nn, j);*/
j++;
}
set_Block_block_visited(get_nodes_Block(n), 0);
/* Compacting the Phi's ins might generate Phis with only one
predecessor. */
- if (intern_get_irn_arity(n) == 1)
- exchange(n, intern_get_irn_n(n, 0));
+ if (get_irn_arity(n) == 1)
+ exchange(n, get_irn_n(n, 0));
} else {
- irn_arity = intern_get_irn_arity(n);
+ irn_arity = get_irn_arity(n);
for (i = -1; i < irn_arity; i++)
- set_irn_n (nn, i, get_new_node(intern_get_irn_n(n, i)));
+ set_irn_n (nn, i, get_new_node(get_irn_n(n, i)));
}
/* Now the new node is complete. We can add it to the hash table for cse.
@@@ inlinening aborts if we identify End. Why? */
- if(intern_get_irn_op(nn) != op_End)
+ if(get_irn_op(nn) != op_End)
add_identities (current_ir_graph->value_table, nn);
}
/*- ... and now the keep alives. -*/
/* First pick the not marked block nodes and walk them. We must pick these
first as else we will oversee blocks reachable from Phis. */
- irn_arity = intern_get_irn_arity(oe);
+ irn_arity = get_irn_arity(oe);
for (i = 0; i < irn_arity; i++) {
- ka = intern_get_irn_intra_n(oe, i);
- if ((intern_get_irn_op(ka) == op_Block) &&
+ ka = get_irn_intra_n(oe, i);
+ if ((get_irn_op(ka) == op_Block) &&
(get_irn_visited(ka) < get_irg_visited(current_ir_graph))) {
/* We must keep the block alive and copy everything reachable */
set_irg_visited(current_ir_graph, get_irg_visited(current_ir_graph)-1);
}
/* Now pick the Phis. Here we will keep all! */
- irn_arity = intern_get_irn_arity(oe);
+ irn_arity = get_irn_arity(oe);
for (i = 0; i < irn_arity; i++) {
- ka = intern_get_irn_intra_n(oe, i);
- if ((intern_get_irn_op(ka) == op_Phi)) {
+ ka = get_irn_intra_n(oe, i);
+ if ((get_irn_op(ka) == op_Phi)) {
if (get_irn_visited(ka) < get_irg_visited(current_ir_graph)) {
/* We didn't copy the Phi yet. */
set_irg_visited(current_ir_graph, get_irg_visited(current_ir_graph)-1);
/* if link field of block is NULL, look for bad predecessors otherwise
this is allready done */
- if (intern_get_irn_op(n) == op_Block &&
+ if (get_irn_op(n) == op_Block &&
get_irn_link(n) == NULL) {
/* save old predecessors in link field (position 0 is the block operand)*/
set_irn_link(n, (void *)get_irn_in(n));
/* count predecessors without bad nodes */
- old_irn_arity = intern_get_irn_arity(n);
+ old_irn_arity = get_irn_arity(n);
for (i = 0; i < old_irn_arity; i++)
- if (!is_Bad(intern_get_irn_n(n, i))) new_irn_arity++;
+ if (!is_Bad(get_irn_n(n, i))) new_irn_arity++;
/* arity changing: set new predecessors without bad nodes */
if (new_irn_arity < old_irn_arity) {
new_in[0] = NULL;
new_irn_n = 1;
for (i = 1; i < old_irn_arity; i++) {
- irn = intern_get_irn_n(n, i);
+ irn = get_irn_n(n, i);
if (!is_Bad(irn)) new_in[new_irn_n++] = irn;
}
n->in = new_in;
int i, old_irn_arity, new_irn_arity;
/* relink bad predeseccors of a block */
- if (intern_get_irn_op(n) == op_Block)
+ if (get_irn_op(n) == op_Block)
relink_bad_block_predecessors(n, env);
/* If Phi node relink its block and its predecessors */
- if (intern_get_irn_op(n) == op_Phi) {
+ if (get_irn_op(n) == op_Phi) {
/* Relink predeseccors of phi's block */
block = get_nodes_Block(n);
type *frame_tp = (type *)env;
copy_node(n, NULL);
- if (intern_get_irn_op(n) == op_Sel) {
+ if (get_irn_op(n) == op_Sel) {
new = get_new_node (n);
- assert(intern_get_irn_op(new) == op_Sel);
+ assert(get_irn_op(new) == op_Sel);
if (get_entity_owner(get_Sel_entity(n)) == frame_tp) {
set_Sel_entity(new, get_entity_link(get_Sel_entity(n)));
}
- } else if (intern_get_irn_op(n) == op_Block) {
+ } else if (get_irn_op(n) == op_Block) {
new = get_new_node (n);
new->attr.block.irg = current_ir_graph;
}
/* -- Precompute some values -- */
end_bl = get_new_node(get_irg_end_block(called_graph));
end = get_new_node(get_irg_end(called_graph));
- arity = intern_get_irn_arity(end_bl); /* arity = n_exc + n_ret */
+ arity = get_irn_arity(end_bl); /* arity = n_exc + n_ret */
n_res = get_method_n_ress(get_Call_type(call));
res_pred = (ir_node **) malloc (n_res * sizeof (ir_node *));
set_irg_current_block(current_ir_graph, post_bl); /* just to make sure */
/* -- archive keepalives -- */
- irn_arity = intern_get_irn_arity(end);
+ irn_arity = get_irn_arity(end);
for (i = 0; i < irn_arity; i++)
- add_End_keepalive(get_irg_end(current_ir_graph), intern_get_irn_n(end, i));
+ add_End_keepalive(get_irg_end(current_ir_graph), get_irn_n(end, i));
/* The new end node will die. We need not free as the in array is on the obstack:
copy_node only generated 'D' arrays. */
n_ret = 0;
for (i = 0; i < arity; i++) {
ir_node *ret;
- ret = intern_get_irn_n(end_bl, i);
- if (intern_get_irn_op(ret) == op_Return) {
+ ret = get_irn_n(end_bl, i);
+ if (get_irn_op(ret) == op_Return) {
cf_pred[n_ret] = new_r_Jmp(current_ir_graph, get_nodes_Block(ret));
n_ret++;
}
/* First the Memory-Phi */
n_ret = 0;
for (i = 0; i < arity; i++) {
- ret = intern_get_irn_n(end_bl, i);
- if (intern_get_irn_op(ret) == op_Return) {
+ ret = get_irn_n(end_bl, i);
+ if (get_irn_op(ret) == op_Return) {
cf_pred[n_ret] = get_Return_mem(ret);
n_ret++;
}
for (j = 0; j < n_res; j++) {
n_ret = 0;
for (i = 0; i < arity; i++) {
- ret = intern_get_irn_n(end_bl, i);
- if (intern_get_irn_op(ret) == op_Return) {
+ ret = get_irn_n(end_bl, i);
+ if (get_irn_op(ret) == op_Return) {
cf_pred[n_ret] = get_Return_res(ret, j);
n_ret++;
}
}
- phi = new_Phi(n_ret, cf_pred, intern_get_irn_mode(cf_pred[0]));
+ phi = new_Phi(n_ret, cf_pred, get_irn_mode(cf_pred[0]));
res_pred[j] = phi;
/* Conserve Phi-list for further inlinings -- but might be optimized */
if (get_nodes_Block(phi) == post_bl) {
n_exc = 0;
for (i = 0; i < arity; i++) {
ir_node *ret;
- ret = intern_get_irn_n(end_bl, i);
- if (is_fragile_op(skip_Proj(ret)) || (intern_get_irn_op(skip_Proj(ret)) == op_Raise)) {
+ ret = get_irn_n(end_bl, i);
+ if (is_fragile_op(skip_Proj(ret)) || (get_irn_op(skip_Proj(ret)) == op_Raise)) {
cf_pred[n_exc] = ret;
n_exc++;
}
n_exc = 0;
for (i = 0; i < arity; i++) {
ir_node *ret;
- ret = skip_Proj(intern_get_irn_n(end_bl, i));
- if (intern_get_irn_op(ret) == op_Call) {
+ ret = skip_Proj(get_irn_n(end_bl, i));
+ if (get_irn_op(ret) == op_Call) {
cf_pred[n_exc] = new_r_Proj(current_ir_graph, get_nodes_Block(ret), ret, mode_M, 3);
n_exc++;
} else if (is_fragile_op(ret)) {
/* We rely that all cfops have the memory output at the same position. */
cf_pred[n_exc] = new_r_Proj(current_ir_graph, get_nodes_Block(ret), ret, mode_M, 0);
n_exc++;
- } else if (intern_get_irn_op(ret) == op_Raise) {
+ } else if (get_irn_op(ret) == op_Raise) {
cf_pred[n_exc] = new_r_Proj(current_ir_graph, get_nodes_Block(ret), ret, mode_M, 1);
n_exc++;
}
/* assert(exc_handling == 1 || no exceptions. ) */
n_exc = 0;
for (i = 0; i < arity; i++) {
- ir_node *ret = intern_get_irn_n(end_bl, i);
+ ir_node *ret = get_irn_n(end_bl, i);
- if (is_fragile_op(skip_Proj(ret)) || (intern_get_irn_op(skip_Proj(ret)) == op_Raise)) {
+ if (is_fragile_op(skip_Proj(ret)) || (get_irn_op(skip_Proj(ret)) == op_Raise)) {
cf_pred[n_exc] = ret;
n_exc++;
}
}
main_end_bl = get_irg_end_block(current_ir_graph);
- main_end_bl_arity = intern_get_irn_arity(main_end_bl);
+ main_end_bl_arity = get_irn_arity(main_end_bl);
end_preds = (ir_node **) malloc ((n_exc + main_end_bl_arity) * sizeof (ir_node *));
for (i = 0; i < main_end_bl_arity; ++i)
- end_preds[i] = intern_get_irn_n(main_end_bl, i);
+ end_preds[i] = get_irn_n(main_end_bl, i);
for (i = 0; i < n_exc; ++i)
end_preds[main_end_bl_arity + i] = cf_pred[i];
set_irn_in(main_end_bl, n_exc + main_end_bl_arity, end_preds);
end_bl = get_irg_end_block(current_ir_graph);
for (i = 0; i < get_Block_n_cfgpreds(end_bl); i++) {
cf_op = get_Block_cfgpred(end_bl, i);
- if (intern_get_irn_op(cf_op) == op_Proj) {
+ if (get_irn_op(cf_op) == op_Proj) {
cf_op = get_Proj_pred(cf_op);
- if ((intern_get_irn_op(cf_op) == op_Tuple) && (cf_op == call)) {
+ if ((get_irn_op(cf_op) == op_Tuple) && (cf_op == call)) {
/* There are unoptimized tuples from inlineing before when no exc */
assert(get_Proj_proj(get_Block_cfgpred(end_bl, i)) == pn_Call_X_except);
cf_op = get_Tuple_pred(cf_op, pn_Call_X_except);
- assert(intern_get_irn_op(cf_op) == op_Jmp);
+ assert(get_irn_op(cf_op) == op_Jmp);
break;
}
}
assert(get_irn_op(call) == op_Call);
addr = get_Call_ptr(call);
- if (intern_get_irn_op(addr) == op_Const) {
+ if (get_irn_op(addr) == op_Const) {
/* Check whether the constant is the pointer to a compiled entity. */
tv = get_Const_tarval(addr);
if (tarval_to_entity(tv))
tarval *tv;
ir_graph *called_irg;
- if (intern_get_irn_op(call) != op_Call) return;
+ if (get_irn_op(call) != op_Call) return;
addr = get_Call_ptr(call);
- if (intern_get_irn_op(addr) == op_Const) {
+ if (get_irn_op(addr) == op_Const) {
/* Check whether the constant is the pointer to a compiled entity. */
tv = get_Const_tarval(addr);
if (tarval_to_entity(tv)) {
static void collect_calls2(ir_node *call, void *env) {
inline_irg_env *x = (inline_irg_env *)env;
- ir_op *op = intern_get_irn_op(call);
+ ir_op *op = get_irn_op(call);
ir_graph *callee;
/* count nodes in irg */
mark_irn_visited(n);
/* Place floating nodes. */
- if (get_op_pinned(intern_get_irn_op(n)) == floats) {
+ if (get_op_pinned(get_irn_op(n)) == floats) {
int depth = 0;
ir_node *b = new_Bad(); /* The block to place this node in */
- assert(intern_get_irn_op(n) != op_Block);
+ assert(get_irn_op(n) != op_Block);
- if ((intern_get_irn_op(n) == op_Const) ||
- (intern_get_irn_op(n) == op_SymConst) ||
+ if ((get_irn_op(n) == op_Const) ||
+ (get_irn_op(n) == op_SymConst) ||
(is_Bad(n)) ||
- (intern_get_irn_op(n) == op_Unknown)) {
+ (get_irn_op(n) == op_Unknown)) {
/* These nodes will not be placed by the loop below. */
b = get_irg_start_block(current_ir_graph);
depth = 1;
}
/* find the block for this node. */
- irn_arity = intern_get_irn_arity(n);
+ irn_arity = get_irn_arity(n);
for (i = 0; i < irn_arity; i++) {
- ir_node *dep = intern_get_irn_n(n, i);
+ ir_node *dep = get_irn_n(n, i);
ir_node *dep_block;
if ((irn_not_visited(dep)) &&
- (get_op_pinned(intern_get_irn_op(dep)) == floats)) {
+ (get_op_pinned(get_irn_op(dep)) == floats)) {
place_floats_early(dep, worklist);
}
/* Because all loops contain at least one pinned node, now all
}
/* Add predecessors of non floating nodes on worklist. */
- start = (intern_get_irn_op(n) == op_Block) ? 0 : -1;
- irn_arity = intern_get_irn_arity(n);
+ start = (get_irn_op(n) == op_Block) ? 0 : -1;
+ irn_arity = get_irn_arity(n);
for (i = start; i < irn_arity; i++) {
- ir_node *pred = intern_get_irn_n(n, i);
+ ir_node *pred = get_irn_n(n, i);
if (irn_not_visited(pred)) {
pdeq_putr (worklist, pred);
}
/* Compute the latest block into which we can place a node so that it is
before consumer. */
- if (intern_get_irn_op(consumer) == op_Phi) {
+ if (get_irn_op(consumer) == op_Phi) {
/* our consumer is a Phi-node, the effective use is in all those
blocks through which the Phi-node reaches producer */
int i, irn_arity;
ir_node *phi_block = get_nodes_Block(consumer);
- irn_arity = intern_get_irn_arity(consumer);
+ irn_arity = get_irn_arity(consumer);
for (i = 0; i < irn_arity; i++) {
- if (intern_get_irn_n(consumer, i) == producer) {
+ if (get_irn_n(consumer, i) == producer) {
block = get_nodes_Block(get_Block_cfgpred(phi_block, i));
}
}
assert (irn_not_visited(n)); /* no multiple placement */
/* no need to place block nodes, control nodes are already placed. */
- if ((intern_get_irn_op(n) != op_Block) &&
+ if ((get_irn_op(n) != op_Block) &&
(!is_cfop(n)) &&
- (intern_get_irn_mode(n) != mode_X)) {
+ (get_irn_mode(n) != mode_X)) {
/* Remember the early placement of this block to move it
out of loop no further than the early placement. */
early = get_nodes_Block(n);
producer of one of their inputs in the same block anyway. */
for (i = 0; i < get_irn_n_outs(n); i++) {
ir_node *succ = get_irn_out(n, i);
- if (irn_not_visited(succ) && (intern_get_irn_op(succ) != op_Phi))
+ if (irn_not_visited(succ) && (get_irn_op(succ) != op_Phi))
place_floats_late(succ, worklist);
}
/* We have to determine the final block of this node... except for
constants. */
- if ((get_op_pinned(intern_get_irn_op(n)) == floats) &&
- (intern_get_irn_op(n) != op_Const) &&
- (intern_get_irn_op(n) != op_SymConst)) {
+ if ((get_op_pinned(get_irn_op(n)) == floats) &&
+ (get_irn_op(n) != op_Const) &&
+ (get_irn_op(n) != op_SymConst)) {
ir_node *dca = NULL; /* deepest common ancestor in the
dominator tree of all nodes'
blocks depending on us; our final
int i;
set_irn_link(n, NULL);
- if (intern_get_irn_op(n) == op_Block) {
+ if (get_irn_op(n) == op_Block) {
/* Remove Tuples */
for (i = 0; i < get_Block_n_cfgpreds(n); i++)
/* GL @@@ : is this possible? if (get_opt_normalize()) -- added, all tests go through.
A different order of optimizations might cause problems. */
if (get_opt_normalize())
set_Block_cfgpred(n, i, skip_Tuple(get_Block_cfgpred(n, i)));
- } else if (get_opt_optimize() && (intern_get_irn_mode(n) == mode_X)) {
+ } else if (get_opt_optimize() && (get_irn_mode(n) == mode_X)) {
/* We will soon visit a block. Optimize it before visiting! */
ir_node *b = get_nodes_Block(n);
ir_node *new_node = equivalent_node(b);
if (is_no_Block(n)) {
ir_node *b = get_nodes_Block(n);
- if ((intern_get_irn_op(n) == op_Phi)) {
+ if ((get_irn_op(n) == op_Phi)) {
/* Collect Phi nodes to compact ins along with block's ins. */
set_irn_link(n, get_irn_link(b));
set_irn_link(b, n);
- } else if (intern_get_irn_op(n) != op_Jmp) { /* Check for non empty block. */
+ } else if (get_irn_op(n) != op_Jmp) { /* Check for non empty block. */
mark_Block_block_visited(b);
}
}
/*- Fix the Phi nodes -*/
phi = get_irn_link(b);
while (phi) {
- assert(intern_get_irn_op(phi) == op_Phi);
+ assert(get_irn_op(phi) == op_Phi);
/* Find the new predecessors for the Phi */
n_preds = 0;
for (i = 0; i < get_Block_n_cfgpreds(b); i++) {
ir_node *phi_pred = get_Phi_pred(phi, i);
for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
if (get_nodes_Block(phi_pred) == pred) {
- assert(intern_get_irn_op(phi_pred) == op_Phi); /* Block is empty!! */
+ assert(get_irn_op(phi_pred) == op_Phi); /* Block is empty!! */
in[n_preds] = get_Phi_pred(phi_pred, j);
} else {
in[n_preds] = phi_pred;
if (get_Block_block_visited(pred)+1 < get_irg_block_visited(current_ir_graph)) {
phi = get_irn_link(pred);
while (phi) {
- if (intern_get_irn_op(phi) == op_Phi) {
+ if (get_irn_op(phi) == op_Phi) {
set_nodes_Block(phi, b);
n_preds = 0;
for(i = 0; i < get_End_n_keepalives(end); i++) {
ir_node *ka = get_End_keepalive(end, i);
if (irn_not_visited(ka)) {
- if ((intern_get_irn_op(ka) == op_Block) && Block_not_block_visited(ka)) {
+ if ((get_irn_op(ka) == op_Block) && Block_not_block_visited(ka)) {
set_irg_block_visited(current_ir_graph, /* Don't walk all the way to Start. */
get_irg_block_visited(current_ir_graph)-1);
irg_block_walk(ka, optimize_blocks, NULL, NULL);
mark_irn_visited(ka);
ARR_APP1 (ir_node *, in, ka);
- } else if (intern_get_irn_op(ka) == op_Phi) {
+ } else if (get_irn_op(ka) == op_Phi) {
mark_irn_visited(ka);
ARR_APP1 (ir_node *, in, ka);
}
ir_node *pre, *block, **in, *jmp;
/* Block has multiple predecessors */
- if ((op_Block == intern_get_irn_op(n)) &&
- (intern_get_irn_arity(n) > 1)) {
- arity = intern_get_irn_arity(n);
+ if ((op_Block == get_irn_op(n)) &&
+ (get_irn_arity(n) > 1)) {
+ arity = get_irn_arity(n);
if (n == get_irg_end_block(current_ir_graph))
return; /* No use to add a block here. */
for (i=0; i<arity; i++) {
- pre = intern_get_irn_n(n, i);
+ pre = get_irn_n(n, i);
/* Predecessor has multiple successors. Insert new flow edge */
if ((NULL != pre) &&
- (op_Proj == intern_get_irn_op(pre)) &&
- op_Raise != intern_get_irn_op(skip_Proj(pre))) {
+ (op_Proj == get_irn_op(pre)) &&
+ op_Raise != get_irn_op(skip_Proj(pre))) {
/* set predecessor array for new block */
in = NEW_ARR_D (ir_node *, current_ir_graph->obst, 1);
}
ir_node *
-get_irg_start_block (ir_graph *irg)
+(get_irg_start_block)(ir_graph *irg)
{
- return irg->start_block;
+ return __get_irg_start_block(irg);
}
void
-set_irg_start_block (ir_graph *irg, ir_node *node)
+(set_irg_start_block)(ir_graph *irg, ir_node *node)
{
- irg->start_block = node;
+ __set_irg_start_block(irg, node);
}
ir_node *
-get_irg_start (ir_graph *irg)
+(get_irg_start)(ir_graph *irg)
{
- return irg->start;
+ return __get_irg_start(irg);
}
void
-set_irg_start(ir_graph *irg, ir_node *node)
+(set_irg_start)(ir_graph *irg, ir_node *node)
{
- irg->start = node;
+ __set_irg_start(irg, node);
}
ir_node *
-get_irg_end_block (ir_graph *irg)
+(get_irg_end_block)(ir_graph *irg)
{
- return irg->end_block;
+ return __get_irg_end_block(irg);
}
void
-set_irg_end_block (ir_graph *irg, ir_node *node)
+(set_irg_end_block)(ir_graph *irg, ir_node *node)
{
- irg->end_block = node;
+ __set_irg_end_block(irg, node);
}
ir_node *
-get_irg_end (ir_graph *irg)
+(get_irg_end)(ir_graph *irg)
{
- return irg->end;
+ return __get_irg_end(irg);
}
void
-set_irg_end (ir_graph *irg, ir_node *node)
+(set_irg_end)(ir_graph *irg, ir_node *node)
{
- irg->end = node;
+ __set_irg_end(irg, node);
}
ir_node *
-get_irg_end_reg (ir_graph *irg) {
- return irg->end_reg;
+(get_irg_end_reg)(ir_graph *irg) {
+ return __get_irg_end_reg(irg);
}
+
void set_irg_end_reg (ir_graph *irg, ir_node *node) {
assert(get_irn_op(node) == op_EndReg || get_irn_op(node) == op_End);
irg->end_reg = node;
}
-ir_node *get_irg_end_except (ir_graph *irg) {
- return irg->end_except;
+ir_node *
+(get_irg_end_except)(ir_graph *irg) {
+ return __get_irg_end_except(irg);
}
void set_irg_end_except (ir_graph *irg, ir_node *node) {
}
ir_node *
-get_irg_cstore (ir_graph *irg)
+(get_irg_cstore)(ir_graph *irg)
{
- return irg->cstore;
+ return __get_irg_cstore(irg);
}
void
-set_irg_cstore (ir_graph *irg, ir_node *node)
+(set_irg_cstore)(ir_graph *irg, ir_node *node)
{
- irg->cstore = node;
+ __set_irg_cstore(irg, node);
}
ir_node *
-get_irg_frame (ir_graph *irg)
+(get_irg_frame)(ir_graph *irg)
{
- return irg->frame;
+ return __get_irg_frame(irg);
}
void
-set_irg_frame (ir_graph *irg, ir_node *node)
+(set_irg_frame)(ir_graph *irg, ir_node *node)
{
- irg->frame = node;
+ __set_irg_frame(irg, node);
}
ir_node *
-get_irg_globals (ir_graph *irg)
+(get_irg_globals)(ir_graph *irg)
{
- return irg->globals;
+ return __get_irg_globals(irg);
}
void
-set_irg_globals (ir_graph *irg, ir_node *node)
+(set_irg_globals)(ir_graph *irg, ir_node *node)
{
- irg->globals = node;
+ __set_irg_globals(irg, node);
}
ir_node *
-get_irg_args (ir_graph *irg)
+(get_irg_args)(ir_graph *irg)
{
- return irg->args;
+ return __get_irg_args(irg);
}
void
-set_irg_args (ir_graph *irg, ir_node *node)
+(set_irg_args)(ir_graph *irg, ir_node *node)
{
- irg->args = node;
+ __set_irg_args(irg, node);
}
ir_node *
-get_irg_bad (ir_graph *irg)
+(get_irg_bad)(ir_graph *irg)
{
- return irg->bad;
+ return __get_irg_bad(irg);
}
void
-set_irg_bad (ir_graph *irg, ir_node *node)
+(set_irg_bad)(ir_graph *irg, ir_node *node)
{
- irg->bad = node;
+ __set_irg_bad(irg, node);
}
/* GL removed: we need unknown with mode for analyses.
*/
ir_node *
-get_irg_current_block (ir_graph *irg)
+(get_irg_current_block)(ir_graph *irg)
{
- return irg->current_block;
+ return __get_irg_current_block(irg);
}
void
-set_irg_current_block (ir_graph *irg, ir_node *node)
+(set_irg_current_block)(ir_graph *irg, ir_node *node)
{
- irg->current_block = node;
+ __set_irg_current_block(irg, node);
}
entity *
-get_irg_ent (ir_graph *irg)
+(get_irg_ent)(ir_graph *irg)
{
- assert(irg && irg->ent);
- return irg->ent;
+ return __get_irg_ent(irg);
}
void
-set_irg_ent (ir_graph *irg, entity *ent)
+(set_irg_ent)(ir_graph *irg, entity *ent)
{
- irg->ent = ent;
+ __set_irg_ent(irg, ent);
}
type *
-get_irg_frame_type (ir_graph *irg)
+(get_irg_frame_type)(ir_graph *irg)
{
- assert(irg && irg->frame_type);
- return irg->frame_type;
+ return __get_irg_frame_type(irg);
}
void
-set_irg_frame_type (ir_graph *irg, type *ftp)
+(set_irg_frame_type)(ir_graph *irg, type *ftp)
{
- assert(is_class_type(ftp));
- irg->frame_type = ftp;
+ __set_irg_frame_type(irg, ftp);
}
/* Returns the obstack associated with the graph. */
-struct obstack *get_irg_obstack(ir_graph *irg) {
- return irg->obst;
+struct obstack *
+(get_irg_obstack)(ir_graph *irg) {
+ return __get_irg_obstack(irg);
}
/*
}
-INLINE void
-set_irg_link (ir_graph *irg, void *thing) {
- irg->link = thing;
+void
+(set_irg_link)(ir_graph *irg, void *thing) {
+ __set_irg_link(irg, thing);
}
-INLINE void *
-get_irg_link (ir_graph *irg) {
- return irg->link;
+void *
+(get_irg_link)(ir_graph *irg) {
+ return __get_irg_link(irg);
}
/* maximum visited flag content of all ir_graph visited fields. */
static int max_irg_visited = 0;
unsigned long
-get_irg_visited (ir_graph *irg)
+(get_irg_visited)(ir_graph *irg)
{
- return irg->visited;
+ return __get_irg_visited(irg);
}
void
}
unsigned long
-get_irg_block_visited (ir_graph *irg)
+(get_irg_block_visited)(ir_graph *irg)
{
- return irg->block_visited;
+ return __get_irg_block_visited(irg);
}
void
-set_irg_block_visited (ir_graph *irg, unsigned long visited)
+(set_irg_block_visited)(ir_graph *irg, unsigned long visited)
{
- irg->block_visited = visited;
+ __set_irg_block_visited(irg, visited);
}
void
-inc_irg_block_visited (ir_graph *irg)
+(inc_irg_block_visited)(ir_graph *irg)
{
- ++irg->block_visited;
+ __inc_irg_block_visited(irg);
}
*/
int node_is_in_irgs_storage(ir_graph *irg, ir_node *n);
+/*-------------------------------------------------------------------*/
+/* inline functions for graphs */
+/*-------------------------------------------------------------------*/
+
+/** Returns the start block of a graph. */
+static INLINE ir_node *
+__get_irg_start_block(ir_graph *irg)
+{
+ return irg->start_block;
+}
+
+static INLINE void
+__set_irg_start_block(ir_graph *irg, ir_node *node)
+{
+ irg->start_block = node;
+}
+
+static INLINE ir_node *
+__get_irg_start(ir_graph *irg)
+{
+ return irg->start;
+}
+
+static INLINE void
+__set_irg_start(ir_graph *irg, ir_node *node)
+{
+ irg->start = node;
+}
+
+static INLINE ir_node *
+__get_irg_end_block(ir_graph *irg)
+{
+ return irg->end_block;
+}
+
+static INLINE void
+__set_irg_end_block(ir_graph *irg, ir_node *node)
+{
+ irg->end_block = node;
+}
+
+static INLINE ir_node *
+__get_irg_end(ir_graph *irg)
+{
+ return irg->end;
+}
+
+static INLINE void
+__set_irg_end(ir_graph *irg, ir_node *node)
+{
+ irg->end = node;
+}
+
+static INLINE ir_node *
+__get_irg_end_reg(ir_graph *irg) {
+ return irg->end_reg;
+}
+
+static INLINE ir_node *
+__get_irg_end_except (ir_graph *irg) {
+ return irg->end_except;
+}
+
+static INLINE ir_node *
+__get_irg_cstore(ir_graph *irg)
+{
+ return irg->cstore;
+}
+
+static INLINE void
+__set_irg_cstore(ir_graph *irg, ir_node *node)
+{
+ irg->cstore = node;
+}
+
+static INLINE ir_node *
+__get_irg_frame(ir_graph *irg)
+{
+ return irg->frame;
+}
+
+static INLINE void
+__set_irg_frame(ir_graph *irg, ir_node *node)
+{
+ irg->frame = node;
+}
+
+static INLINE ir_node *
+__get_irg_globals(ir_graph *irg)
+{
+ return irg->globals;
+}
+
+static INLINE void
+__set_irg_globals(ir_graph *irg, ir_node *node)
+{
+ irg->globals = node;
+}
+
+static INLINE ir_node *
+__get_irg_args(ir_graph *irg)
+{
+ return irg->args;
+}
+
+static INLINE void
+__set_irg_args(ir_graph *irg, ir_node *node)
+{
+ irg->args = node;
+}
+
+static INLINE ir_node *
+__get_irg_bad(ir_graph *irg)
+{
+ return irg->bad;
+}
+
+static INLINE void
+__set_irg_bad(ir_graph *irg, ir_node *node)
+{
+ irg->bad = node;
+}
+
+static INLINE ir_node *
+__get_irg_current_block(ir_graph *irg)
+{
+ return irg->current_block;
+}
+
+static INLINE void
+__set_irg_current_block(ir_graph *irg, ir_node *node)
+{
+ irg->current_block = node;
+}
+
+static INLINE entity *
+__get_irg_ent(ir_graph *irg)
+{
+ assert(irg && irg->ent);
+ return irg->ent;
+}
+
+static INLINE void
+__set_irg_ent(ir_graph *irg, entity *ent)
+{
+ irg->ent = ent;
+}
+
+static INLINE type *
+__get_irg_frame_type(ir_graph *irg)
+{
+ assert(irg && irg->frame_type);
+ return irg->frame_type;
+}
+
+static INLINE void
+__set_irg_frame_type(ir_graph *irg, type *ftp)
+{
+ assert(is_class_type(ftp));
+ irg->frame_type = ftp;
+}
+
+static INLINE struct obstack *
+__get_irg_obstack(ir_graph *irg) {
+ return irg->obst;
+}
+
+
+static INLINE void
+__set_irg_link(ir_graph *irg, void *thing) {
+ irg->link = thing;
+}
+
+static INLINE void *
+__get_irg_link(ir_graph *irg) {
+ return irg->link;
+}
+
+static INLINE unsigned long
+__get_irg_visited(ir_graph *irg)
+{
+ return irg->visited;
+}
+
+static INLINE unsigned long
+__get_irg_block_visited(ir_graph *irg)
+{
+ return irg->block_visited;
+}
+
+static INLINE void
+__set_irg_block_visited(ir_graph *irg, unsigned long visited)
+{
+ irg->block_visited = visited;
+}
+
+static INLINE void
+__inc_irg_block_visited(ir_graph *irg)
+{
+ ++irg->block_visited;
+}
+
+#define get_irg_start_block(irg) __get_irg_start_block(irg)
+#define set_irg_start_block(irg, node) __set_irg_start_block(irg, node)
+#define get_irg_start(irg) __get_irg_start(irg)
+#define set_irg_start(irg, node) __set_irg_start(irg, node)
+#define get_irg_end_block(irg) __get_irg_end_block(irg)
+#define set_irg_end_block(irg, node) __set_irg_end_block(irg, node)
+#define get_irg_end(irg) __get_irg_end(irg)
+#define set_irg_end(irg, node) __set_irg_end(irg, node)
+#define get_irg_end_reg(irg) __get_irg_end_reg(irg)
+#define get_irg_end_except(irg) __get_irg_end_except(irg)
+#define get_irg_cstore(irg) __get_irg_cstore(irg)
+#define set_irg_cstore(irg, node) __set_irg_cstore(irg, node)
+#define get_irg_frame(irg) __get_irg_frame(irg)
+#define set_irg_frame(irg, node) __set_irg_frame(irg, node)
+#define get_irg_globals(irg) __get_irg_globals(irg)
+#define set_irg_globals(irg, node) __set_irg_globals(irg, node)
+#define get_irg_args(irg) __get_irg_args(irg)
+#define set_irg_args(irg, node) __set_irg_args(irg, node)
+#define get_irg_bad(irg) __get_irg_bad(irg)
+#define set_irg_bad(irg, node) __set_irg_bad(irg, node)
+#define get_irg_current_block(irg) __get_irg_current_block(irg)
+#define set_irg_current_block(irg, node) __set_irg_current_block(irg, node)
+#define get_irg_ent(irg) __get_irg_ent(irg)
+#define set_irg_ent(irg, ent) __set_irg_ent(irg, ent)
+#define get_irg_frame_type(irg) __get_irg_frame_type(irg)
+#define set_irg_frame_type(irg, ftp) __set_irg_frame_type(irg, ftp)
+#define get_irg_obstack(irg) __get_irg_obstack(irg)
+#define set_irg_link(irg, thing) __set_irg_link(irg, thing)
+#define get_irg_link(irg) __get_irg_link(irg)
+#define get_irg_visited(irg) __get_irg_visited(irg)
+#define get_irg_block_visited(irg) __get_irg_block_visited(irg)
+#define set_irg_block_visited(irg, v) __set_irg_block_visited(irg, v)
+#define inc_irg_block_visited(irg) __inc_irg_block_visited(irg)
+
# endif /* _IRGRAPH_T_H_ */
set_irn_visited(node, visited);
pred = skip_Proj(node);
- if (intern_get_irn_op(pred) == op_CallBegin
- || intern_get_irn_op(pred) == op_EndReg
- || intern_get_irn_op(pred) == op_EndExcept) {
+ if (get_irn_op(pred) == op_CallBegin
+ || get_irn_op(pred) == op_EndReg
+ || get_irn_op(pred) == op_EndExcept) {
current_ir_graph = get_irn_irg(pred);
}
if (is_no_Block(node))
irg_walk_cg(get_nodes_block(node), visited, irg_set, pre, post, env);
- if (intern_get_irn_op(node) == op_Block) { /* block */
- for (i = intern_get_irn_arity(node) - 1; i >= 0; --i) {
- ir_node * exec = intern_get_irn_n(node, i);
+ if (get_irn_op(node) == op_Block) { /* block */
+ for (i = get_irn_arity(node) - 1; i >= 0; --i) {
+ ir_node * exec = get_irn_n(node, i);
ir_node * pred = skip_Proj(exec);
- if ((intern_get_irn_op(pred) != op_CallBegin
- && intern_get_irn_op(pred) != op_EndReg
- && intern_get_irn_op(pred) != op_EndExcept)
+ if ((get_irn_op(pred) != op_CallBegin
+ && get_irn_op(pred) != op_EndReg
+ && get_irn_op(pred) != op_EndExcept)
|| eset_contains(irg_set, get_irn_irg(pred))) {
irg_walk_cg(exec, visited, irg_set, pre, post, env);
}
}
- } else if (intern_get_irn_op(node) == op_Filter) {
- for (i = intern_get_irn_arity(node) - 1; i >= 0; --i) {
- ir_node * pred = intern_get_irn_n(node, i);
- if (intern_get_irn_op(pred) == op_Unknown || intern_get_irn_op(pred) == op_Bad) {
+ } else if (get_irn_op(node) == op_Filter) {
+ for (i = get_irn_arity(node) - 1; i >= 0; --i) {
+ ir_node * pred = get_irn_n(node, i);
+ if (get_irn_op(pred) == op_Unknown || get_irn_op(pred) == op_Bad) {
irg_walk_cg(pred, visited, irg_set, pre, post, env);
} else {
ir_node * exec;
exec = skip_Proj(get_Block_cfgpred(get_nodes_block(node), i));
- assert(intern_get_irn_op(exec) == op_CallBegin
- || intern_get_irn_op(exec) == op_EndReg
- || intern_get_irn_op(exec) == op_EndExcept);
+ assert(get_irn_op(exec) == op_CallBegin
+ || get_irn_op(exec) == op_EndReg
+ || get_irn_op(exec) == op_EndExcept);
if (eset_contains(irg_set, get_irn_irg(exec))) {
current_ir_graph = get_irn_irg(exec);
irg_walk_cg(pred, visited, irg_set, pre, post, env);
}
}
} else {
- for (i = intern_get_irn_arity(node) - 1; i >= 0; --i) {
- irg_walk_cg(intern_get_irn_n(node, i), visited, irg_set, pre, post, env);
+ for (i = get_irn_arity(node) - 1; i >= 0; --i) {
+ irg_walk_cg(get_irn_n(node, i), visited, irg_set, pre, post, env);
}
}
/* Insert all ir_graphs in irg_set, that are (transitive) reachable. */
static void collect_irgs(ir_node * node, eset * irg_set) {
- if (intern_get_irn_op(node) == op_Call) {
+ if (get_irn_op(node) == op_Call) {
int i;
for (i = get_Call_n_callees(node) - 1; i >= 0; --i) {
entity * ent = get_Call_callee(node, i);
if (is_no_Block(node))
irg_walk_2(get_nodes_block(node), pre, post, env);
- for (i = intern_get_irn_arity(node) - 1; i >= 0; --i)
- irg_walk_2(intern_get_irn_n(node, i), pre, post, env);
+ for (i = get_irn_arity(node) - 1; i >= 0; --i)
+ irg_walk_2(get_irn_n(node, i), pre, post, env);
if (post) post(node, env);
}
if (pre) pre(node, env);
if (node->op != op_Block)
- irg_walk_2(intern_get_irn_n(node, -1), pre, post, env);
- for (i = intern_get_irn_arity(node) - 1; i >= 0; --i)
- irg_walk_2(intern_get_irn_n(node, i), pre, post, env);
+ irg_walk_2(get_irn_n(node, -1), pre, post, env);
+ for (i = get_irn_arity(node) - 1; i >= 0; --i)
+ irg_walk_2(get_irn_n(node, i), pre, post, env);
if (post) post(node, env);
}
if (interprocedural_view) {
/* Only Filter and Block nodes can have predecessors in other graphs. */
- if (intern_get_irn_op(n) == op_Filter)
+ if (get_irn_op(n) == op_Filter)
n = get_nodes_block(n);
- if (intern_get_irn_op(n) == op_Block) {
+ if (get_irn_op(n) == op_Block) {
ir_node *cfop = skip_Proj(get_Block_cfgpred(n, index));
if (is_ip_cfop(cfop)) {
current_ir_graph = get_irn_irg(cfop);
if (is_no_Block(node))
cg_walk_2(get_nodes_block(node), pre, post, env);
- for (i = intern_get_irn_arity(node) - 1; i >= 0; --i) {
+ for (i = get_irn_arity(node) - 1; i >= 0; --i) {
rem = switch_irg(node, i); /* @@@ AS: Is this wrong? We do have to
switch to the irg of the predecessor, don't we? */
- cg_walk_2(intern_get_irn_n(node, i), pre, post, env);
+ cg_walk_2(get_irn_n(node, i), pre, post, env);
current_ir_graph = rem;
}
n = skip_Tuple(n);
pred = skip_Proj(n);
if (!(is_cfop(pred) || is_fragile_op(pred) ||
- (intern_get_irn_op(pred) == op_Bad)))
+ (get_irn_op(pred) == op_Bad)))
n = get_cf_op(n);
return skip_Proj(n);
/* find the corresponding predecessor block. */
ir_node *pred = get_cf_op(get_Block_cfgpred(node, i));
pred = get_nodes_block(pred);
- if(intern_get_irn_opcode(pred) == iro_Block) {
+ if(get_irn_opcode(pred) == iro_Block) {
/* recursion */
irg_block_walk_2(pred, pre, post, env);
}
assert(get_irn_opcode(block) == iro_Block);
irg_block_walk_2(block, pre, post, env);
/* keepalive: the endless loops ... */
- if (intern_get_irn_op(node) == op_End) {
- int arity = intern_get_irn_arity(node);
+ if (get_irn_op(node) == op_End) {
+ int arity = get_irn_arity(node);
for (i = 0; i < arity; i++) {
- pred = intern_get_irn_n(node, i);
- if (intern_get_irn_op(pred) == op_Block)
+ pred = get_irn_n(node, i);
+ if (get_irn_op(pred) == op_Block)
irg_block_walk_2(pred, pre, post, env);
}
}
assert(interprocedural_view);
interprocedural_view = 0;
- callbegin = skip_Proj(intern_get_irn_n(block, 0));
- assert(intern_get_irn_op(callbegin) == op_CallBegin);
+ callbegin = skip_Proj(get_irn_n(block, 0));
+ assert(get_irn_op(callbegin) == op_CallBegin);
interprocedural_view = 1;
push_callsite(irg, callbegin);
/* Find the cf_pred refering to pos. */
ir_node *block = n;
ir_node *cf_pred;
- if (intern_get_irn_opcode(n) == iro_Filter) block = get_nodes_block(n);
- cf_pred = skip_Proj(intern_get_irn_n(block, pos));
+ if (get_irn_opcode(n) == iro_Filter) block = get_nodes_block(n);
+ cf_pred = skip_Proj(get_irn_n(block, pos));
/* Check whether we enter or leave a procedure and act according. */
- if ((intern_get_irn_op(cf_pred) == op_EndReg) ||
- (intern_get_irn_op(cf_pred) == op_EndExcept))
+ if ((get_irn_op(cf_pred) == op_EndReg) ||
+ (get_irn_op(cf_pred) == op_EndExcept))
enter_procedure(block, cf_pred, pos);
- if (intern_get_irn_op(cf_pred) == op_CallBegin)
+ if (get_irn_op(cf_pred) == op_CallBegin)
if (!leave_procedure(block, cf_pred, pos)) return NULL;
}
- return intern_get_irn_n(n, pos);
+ return get_irn_n(n, pos);
}
static INLINE void
/* Find the cf_pred refering to pos. */
block = n;
- if (intern_get_irn_opcode(n) == iro_Filter) block = get_nodes_block(n);
- cf_pred = skip_Proj(intern_get_irn_n(block, pos));
+ if (get_irn_opcode(n) == iro_Filter) block = get_nodes_block(n);
+ cf_pred = skip_Proj(get_irn_n(block, pos));
/* Check whether we re_enter or re_leave a procedure and act according. */
- if ((intern_get_irn_op(cf_pred) == op_EndReg) ||
- (intern_get_irn_op(cf_pred) == op_EndExcept))
+ if ((get_irn_op(cf_pred) == op_EndReg) ||
+ (get_irn_op(cf_pred) == op_EndExcept))
re_enter_procedure(block, cf_pred, pos);
- if (intern_get_irn_op(cf_pred) == op_CallBegin)
+ if (get_irn_op(cf_pred) == op_CallBegin)
re_leave_procedure(block, cf_pred, pos);
}
/*-- getting some parameters from ir_nodes --*/
int
-is_ir_node (const void *thing) {
- if (get_kind(thing) == k_ir_node)
- return 1;
- else
- return 0;
+(is_ir_node)(const void *thing) {
+ return __is_ir_node(thing);
}
int
-get_irn_intra_arity (const ir_node *node) {
- return intern_get_irn_intra_arity(node);
+(get_irn_intra_arity)(const ir_node *node) {
+ return __get_irn_intra_arity(node);
}
int
-get_irn_inter_arity (const ir_node *node) {
- return intern_get_irn_inter_arity(node);
+(get_irn_inter_arity)(const ir_node *node) {
+ return __get_irn_inter_arity(node);
}
int
-get_irn_arity (const ir_node *node) {
- return intern_get_irn_arity(node);
+(get_irn_arity)(const ir_node *node) {
+ return __get_irn_arity(node);
}
/* Returns the array with ins. This array is shifted with respect to the
}
ir_node *
-get_irn_intra_n (ir_node *node, int n) {
- return intern_get_irn_intra_n (node, n);
+(get_irn_intra_n)(ir_node *node, int n) {
+ return __get_irn_intra_n (node, n);
}
ir_node *
-get_irn_inter_n (ir_node *node, int n) {
- return intern_get_irn_inter_n (node, n);
+(get_irn_inter_n)(ir_node *node, int n) {
+ return __get_irn_inter_n (node, n);
}
ir_node *
-get_irn_n (ir_node *node, int n) {
- return intern_get_irn_n (node, n);
+(get_irn_n)(ir_node *node, int n) {
+ return __get_irn_n (node, n);
}
void
}
ir_mode *
-get_irn_mode (const ir_node *node) {
- return intern_get_irn_mode(node);
+(get_irn_mode)(const ir_node *node) {
+ return __get_irn_mode(node);
}
void
-set_irn_mode (ir_node *node, ir_mode *mode)
+(set_irn_mode)(ir_node *node, ir_mode *mode)
{
- assert (node);
- node->mode=mode;
- return;
+ __set_irn_mode(node, mode);
}
modecode
}
ir_op *
-get_irn_op (const ir_node *node)
+(get_irn_op)(const ir_node *node)
{
- return intern_get_irn_op(node);
+ return __get_irn_op(node);
}
/* should be private to the library: */
}
opcode
-get_irn_opcode (const ir_node *node)
+(get_irn_opcode)(const ir_node *node)
{
- return intern_get_irn_opcode(node);
+ return __get_irn_opcode(node);
}
const char *
}
unsigned long
-get_irn_visited (const ir_node *node)
+(get_irn_visited)(const ir_node *node)
{
- assert (node);
- return node->visited;
+ return __get_irn_visited(node);
}
void
-set_irn_visited (ir_node *node, unsigned long visited)
+(set_irn_visited)(ir_node *node, unsigned long visited)
{
- assert (node);
- node->visited = visited;
+ __set_irn_visited(node, visited);
}
void
-mark_irn_visited (ir_node *node) {
- assert (node);
- node->visited = current_ir_graph->visited;
+(mark_irn_visited)(ir_node *node) {
+ __mark_irn_visited(node);
}
int
-irn_not_visited (const ir_node *node) {
- assert (node);
- return (node->visited < current_ir_graph->visited);
+(irn_not_visited)(const ir_node *node) {
+ return __irn_not_visited(node);
}
int
-irn_visited (const ir_node *node) {
- assert (node);
- return (node->visited >= current_ir_graph->visited);
+(irn_visited)(const ir_node *node) {
+ return __irn_visited(node);
}
void
-set_irn_link (ir_node *node, void *link) {
- assert (node);
- /* Link field is used for Phi construction and various optimizations
- in iropt. */
- assert(get_irg_phase_state(current_ir_graph) != phase_building);
-
- node->link = link;
+(set_irn_link)(ir_node *node, void *link) {
+ __set_irn_link(node, link);
}
void *
-get_irn_link (const ir_node *node) {
- assert (node);
- return node->link;
+(get_irn_link)(const ir_node *node) {
+ return __get_irn_link(node);
}
/* Outputs a unique number for this node */
ir_node *rem_pred = node->in[0+1];
ir_node *res;
- assert (intern_get_irn_arity (node) > 0);
+ assert (get_irn_arity (node) > 0);
node->in[0+1] = node;
res = skip_nop(rem_pred);
if (pred->op != op_Id) return pred; /* shortcut */
rem_pred = pred;
- assert (intern_get_irn_arity (node) > 0);
+ assert (get_irn_arity (node) > 0);
node->in[0+1] = node;
res = skip_nop(rem_pred);
/**
* Checks whether a pointer points to a ir node.
*
- * @param thing an arbitrary pointer
- *
- * @return
- * true if the thing is a ir mode, else false
+ * @param thing an arbitrary pointer
+ * @return non-zero if the thing is a ir mode, else zero
*/
-int
-is_ir_node (const void *thing);
+int is_ir_node (const void *thing);
-/** returns the number of predecessors without the block predecessor: */
-int get_irn_arity (const ir_node *node);
+/**
+ * Returns the number of predecessors without the block predecessor.
+ *
+ * @param node the IR-node
+ */
+int get_irn_arity (const ir_node *node);
int get_irn_intra_arity (const ir_node *node);
int get_irn_inter_arity (const ir_node *node);
to iterate including the Block predecessor iterate from i = -1 to
i < get_irn_arity. */
/* Access predecessor n */
-/* get_irn_n removes Id predecessors. */
+
+/**
+ * Get the n-th predecessor of a node.
+ * This function removes Id predecessors.
+ */
ir_node *get_irn_n (ir_node *node, int n);
ir_node *get_irn_intra_n (ir_node *node, int n);
ir_node *get_irn_inter_n (ir_node *node, int n);
+
+/** Replace the n-th predecessor of a node with a new one. */
void set_irn_n (ir_node *node, int n, ir_node *in);
-/** Sets the mode struct of node */
+/* Sets the mode struct of node. */
void set_irn_mode (ir_node *node, ir_mode *mode);
-/** Gets the mode struct. */
+/** Gets the mode struct of a node. */
ir_mode *get_irn_mode (const ir_node *node);
/** Gets the mode-enum modecode. */
modecode get_irn_modecode (const ir_node *node);
ident *get_irn_modeident (const ir_node *node);
/** Gets the string representation of the mode .*/
const char *get_irn_modename (const ir_node *node);
-/** Gets the opcode struct of the node */
+/** Gets the opcode struct of the node. */
ir_op *get_irn_op (const ir_node *node);
/** Sets the opcode struct of the node. */
void set_irn_op (ir_node *node, ir_op *op);
const char *get_irn_opname (const ir_node *node);
/** Get the ident for a string representation of the opcode. */
ident *get_irn_opident (const ir_node *node);
+/** Gets the visited counter of a node. */
unsigned long get_irn_visited (const ir_node *node);
+/** Sets the visited counter of a node. */
void set_irn_visited (ir_node *node, unsigned long visited);
/** Sets visited to get_irg_visited(current_ir_graph). */
void mark_irn_visited (ir_node *node);
-/** Returns 1 if visited < get_irg_visited(current_ir_graph). */
+/** Returns 1 if visited < get_irg_visited(current_ir_graph). */
int irn_not_visited (const ir_node *node);
-/** Returns 1 if visited >= get_irg_visited(current_ir_graph). */
+/** Returns 1 if visited >= get_irg_visited(current_ir_graph). */
int irn_visited (const ir_node *node);
+
+/**
+ * Sets the link of a node.
+ * Only allowed if the graph is NOT in phase_building.
+ */
void set_irn_link (ir_node *node, void *link);
+
+/** Returns the link of a node. */
void *get_irn_link (const ir_node *node);
/** Returns the ir_graph this node belongs to. Only valid if irg
/**
* @function get_irn_block
- * @see get_nodes_block
+ * @see get_nodes_block()
*/
/**
# include "irnode.h"
# include "irop_t.h"
+# include "irgraph_t.h"
# include "irflag_t.h"
# include "firm_common_t.h"
# include "irdom_t.h" /* For size of struct dom_info. */
/* functions so they can be inlined. */
/*-------------------------------------------------------------------*/
+/**
+ * Checks whether a pointer points to a ir node.
+ * Intern version for libFirm.
+ */
+static INLINE int
+__is_ir_node (const void *thing) {
+ return (get_kind(thing) == k_ir_node);
+}
+/**
+ * Gets the op of a node.
+ * Intern version for libFirm.
+ */
+static INLINE ir_op *
+__get_irn_op (const ir_node *node)
+{
+ assert (node);
+ return node->op;
+}
+
+/**
+ * Gets the opcode of a node.
+ * Intern version for libFirm.
+ */
+static INLINE opcode
+__get_irn_opcode (const ir_node *node)
+{
+ assert (k_ir_node == get_kind(node));
+ assert (node -> op);
+ return node->op->code;
+}
/**
* Returns the number of predecessors without the block predecessor.
* Intern version for libFirm.
*/
static INLINE int
-intern_get_irn_intra_arity (const ir_node *node) {
+__get_irn_intra_arity (const ir_node *node) {
assert(node);
return ARR_LEN(node->in) - 1;
}
* Intern version for libFirm.
*/
static INLINE int
-intern_get_irn_inter_arity (const ir_node *node) {
+__get_irn_inter_arity (const ir_node *node) {
assert(node);
- if (get_irn_opcode(node) == iro_Filter) {
+ if (__get_irn_opcode(node) == iro_Filter) {
assert(node->attr.filter.in_cg);
return ARR_LEN(node->attr.filter.in_cg) - 1;
- } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
+ } else if (__get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
return ARR_LEN(node->attr.block.in_cg) - 1;
}
- return intern_get_irn_intra_arity(node);
+ return __get_irn_intra_arity(node);
}
/**
* Intern version for libFirm.
*/
static INLINE int
-intern_get_irn_arity (const ir_node *node) {
+__get_irn_arity (const ir_node *node) {
assert(node);
- if (interprocedural_view) return intern_get_irn_inter_arity(node);
- return intern_get_irn_intra_arity(node);
+ if (interprocedural_view) return __get_irn_inter_arity(node);
+ return __get_irn_intra_arity(node);
}
/**
* Intern version for libFirm.
*/
static INLINE ir_node *
-intern_get_irn_intra_n (ir_node *node, int n) {
+__get_irn_intra_n (ir_node *node, int n) {
return (node->in[n + 1] = skip_nop(node->in[n + 1]));
}
* Intern version for libFirm.
*/
static INLINE ir_node*
-intern_get_irn_inter_n (ir_node *node, int n) {
+__get_irn_inter_n (ir_node *node, int n) {
/* handle Filter and Block specially */
- if (get_irn_opcode(node) == iro_Filter) {
+ if (__get_irn_opcode(node) == iro_Filter) {
assert(node->attr.filter.in_cg);
return (node->attr.filter.in_cg[n + 1] = skip_nop(node->attr.filter.in_cg[n + 1]));
- } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
+ } else if (__get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
return (node->attr.block.in_cg[n + 1] = skip_nop(node->attr.block.in_cg[n + 1]));
}
- return get_irn_intra_n (node, n);
+ return __get_irn_intra_n (node, n);
}
/**
* Access to the predecessors of a node.
- * To iterate over the operands iterate from 0 to i < intern_get_irn_arity(),
+ * To iterate over the operands iterate from 0 to i < get_irn_arity(),
* to iterate including the Block predecessor iterate from i = -1 to
* i < get_irn_arity.
* If it is a block, the entry -1 is NULL.
* Intern version for libFirm.
*/
static INLINE ir_node *
-intern_get_irn_n (ir_node *node, int n) {
- assert(node); assert(-1 <= n && n < intern_get_irn_arity(node));
- if (interprocedural_view) return get_irn_inter_n (node, n);
- return get_irn_intra_n (node, n);
+__get_irn_n (ir_node *node, int n) {
+ assert(node); assert(-1 <= n && n < __get_irn_arity(node));
+ if (interprocedural_view) return __get_irn_inter_n (node, n);
+ return __get_irn_intra_n (node, n);
}
/**
* Intern version for libFirm.
*/
static INLINE ir_mode *
-intern_get_irn_mode (const ir_node *node)
+__get_irn_mode (const ir_node *node)
{
assert (node);
return node->mode;
}
/**
- * Gets the op of a node.
+ * Sets the mode of a node.
+ * Intern version of libFirm.
+ */
+static INLINE void
+__set_irn_mode (ir_node *node, ir_mode *mode)
+{
+ assert (node);
+ node->mode = mode;
+}
+
+/**
+ * Gets the visited counter of a node.
* Intern version for libFirm.
*/
-static INLINE ir_op *
-intern_get_irn_op (const ir_node *node)
+static INLINE unsigned long
+__get_irn_visited (const ir_node *node)
{
assert (node);
- return node->op;
+ return node->visited;
}
/**
- * Gets the opcode of a node.
+ * Sets the visited counter of a node.
* Intern version for libFirm.
*/
-static INLINE opcode
-intern_get_irn_opcode (const ir_node *node)
+static INLINE void
+__set_irn_visited (ir_node *node, unsigned long visited)
{
- assert (k_ir_node == get_kind(node));
- assert (node -> op);
- return node->op->code;
+ assert (node);
+ node->visited = visited;
+}
+
+/**
+ * Mark a node as visited in a graph.
+ * Intern version for libFirm.
+ */
+static INLINE void
+__mark_irn_visited (ir_node *node) {
+ assert (node);
+ node->visited = current_ir_graph->visited;
+}
+
+/**
+ * Returns non-zero if a node of was visited.
+ * Intern version for libFirm.
+ */
+static INLINE int
+__irn_visited(const ir_node *node) {
+ assert (node);
+ return (node->visited >= current_ir_graph->visited);
}
+/**
+ * Returns non-zero if a node of was NOT visited.
+ * Intern version for libFirm.
+ */
+static INLINE int
+__irn_not_visited(const ir_node *node) {
+ assert (node);
+ return (node->visited < current_ir_graph->visited);
+}
+
+/**
+ * Sets the link of a node.
+ * Intern version of libFirm.
+ */
+static INLINE void
+__set_irn_link(ir_node *node, void *link) {
+ assert (node);
+ /* Link field is used for Phi construction and various optimizations
+ in iropt. */
+ assert(get_irg_phase_state(current_ir_graph) != phase_building);
+
+ node->link = link;
+}
+
+/**
+ * Returns the link of a node.
+ * Intern version of libFirm.
+ */
+static INLINE void *
+__get_irn_link(const ir_node *node) {
+ assert (node);
+ return node->link;
+}
+/* this section MUST contain all inline functions */
+#define is_ir_node(thing) __is_ir_node(thing)
+#define get_irn_intra_arity(node) __get_irn_intra_arity(node)
+#define get_irn_inter_arity(node) __get_irn_inter_arity(node)
+#define get_irn_arity(node) __get_irn_arity(node)
+#define get_irn_intra_n(node, n) __get_irn_intra_n(node, n)
+#define get_irn_inter_n(node, n) __get_irn_inter_n(node, n)
+#define get_irn_n(node, n) __get_irn_n(node, n)
+#define get_irn_mode(node) __get_irn_mode(node)
+#define set_irn_mode(node, mode) __set_irn_mode(node, mode)
+#define get_irn_op(node) __get_irn_op(node)
+#define get_irn_opcode(node) __get_irn_opcode(node)
+#define get_irn_visited(node) __get_irn_visited(node)
+#define set_irn_visited(node, v) __set_irn_visited(node, v)
+#define mark_irn_visited(node) __mark_irn_visited(node)
+#define irn_visited(node) __irn_visited(node)
+#define irn_not_visited(node) __irn_not_visited(node)
+#define set_irn_link(node, link) __set_irn_link(node, link)
+#define get_irn_link(node) __get_irn_link(node)
# endif /* _IRNODE_T_H_ */
static INLINE ir_node *
follow_Id (ir_node *n)
{
- while (intern_get_irn_op (n) == op_Id) n = get_Id_pred (n);
+ while (get_irn_op (n) == op_Id) n = get_Id_pred (n);
return n;
}
static INLINE tarval *
value_of (ir_node *n)
{
- if ((n != NULL) && (intern_get_irn_op(n) == op_Const))
+ if ((n != NULL) && (get_irn_op(n) == op_Const))
return get_Const_tarval(n); /* might return tarval_bad */
else
return tarval_bad;
tarval *tb = value_of(b);
if ((ta != tarval_bad) && (tb != tarval_bad)
- && (intern_get_irn_mode(a) == intern_get_irn_mode(b))
- && !(get_mode_sort(intern_get_irn_mode(a)) == irms_reference)) {
+ && (get_irn_mode(a) == get_irn_mode(b))
+ && !(get_mode_sort(get_irn_mode(a)) == irms_reference)) {
return tarval_add(ta, tb);
}
return tarval_bad;
tarval *tb = value_of(b);
if ((ta != tarval_bad) && (tb != tarval_bad)
- && (intern_get_irn_mode(a) == intern_get_irn_mode(b))
- && !(get_mode_sort(intern_get_irn_mode(a)) == irms_reference)) {
+ && (get_irn_mode(a) == get_irn_mode(b))
+ && !(get_mode_sort(get_irn_mode(a)) == irms_reference)) {
return tarval_sub(ta, tb);
}
return tarval_bad;
ir_node *a = get_Minus_op(n);
tarval *ta = value_of(a);
- if ((ta != tarval_bad) && mode_is_signed(intern_get_irn_mode(a)))
+ if ((ta != tarval_bad) && mode_is_signed(get_irn_mode(a)))
return tarval_neg(ta);
return tarval_bad;
tarval *ta = value_of(a);
tarval *tb = value_of(b);
- if ((ta != tarval_bad) && (tb != tarval_bad) && (intern_get_irn_mode(a) == intern_get_irn_mode(b))) {
+ if ((ta != tarval_bad) && (tb != tarval_bad) && (get_irn_mode(a) == get_irn_mode(b))) {
return tarval_mul(ta, tb);
} else {
/* a*0 = 0 or 0*b = 0:
tarval *tb = value_of(b);
/* This was missing in original implementation. Why? */
- if ((ta != tarval_bad) && (tb != tarval_bad) && (intern_get_irn_mode(a) == intern_get_irn_mode(b))) {
+ if ((ta != tarval_bad) && (tb != tarval_bad) && (get_irn_mode(a) == get_irn_mode(b))) {
if (tb != get_mode_null(get_tarval_mode(tb))) /* div by zero: return tarval_bad */
return tarval_quo(ta, tb);
}
tarval *tb = value_of(b);
/* This was missing in original implementation. Why? */
- if ((ta != tarval_bad) && (tb != tarval_bad) && (intern_get_irn_mode(a) == intern_get_irn_mode(b))) {
+ if ((ta != tarval_bad) && (tb != tarval_bad) && (get_irn_mode(a) == get_irn_mode(b))) {
if (tb != get_mode_null(get_tarval_mode(tb))) /* div by zero: return tarval_bad */
return tarval_div(ta, tb);
}
tarval *tb = value_of(b);
/* This was missing in original implementation. Why? */
- if ((ta != tarval_bad) && (tb != tarval_bad) && (intern_get_irn_mode(a) == intern_get_irn_mode(b))) {
+ if ((ta != tarval_bad) && (tb != tarval_bad) && (get_irn_mode(a) == get_irn_mode(b))) {
if (tb != get_mode_null(get_tarval_mode(tb))) /* div by zero: return tarval_bad */
return tarval_mod(ta, tb);
}
tarval *ta = value_of(a);
if (ta != tarval_bad)
- return tarval_convert_to(ta, intern_get_irn_mode(n));
+ return tarval_convert_to(ta, get_irn_mode(n));
return tarval_bad;
}
3. The predecessors are Allocs or void* constants. Allocs never
return NULL, they raise an exception. Therefore we can predict
the Cmp result. */
- if (intern_get_irn_op(a) == op_Cmp) {
+ if (get_irn_op(a) == op_Cmp) {
aa = get_Cmp_left(a);
ab = get_Cmp_right(a);
ir_node *aba = skip_nop(skip_Proj(ab));
if ( ( (/* aa is ProjP and aaa is Alloc */
- (intern_get_irn_op(aa) == op_Proj)
- && (mode_is_reference(intern_get_irn_mode(aa)))
- && (intern_get_irn_op(aaa) == op_Alloc))
+ (get_irn_op(aa) == op_Proj)
+ && (mode_is_reference(get_irn_mode(aa)))
+ && (get_irn_op(aaa) == op_Alloc))
&& ( (/* ab is constant void */
- (intern_get_irn_op(ab) == op_Const)
- && (mode_is_reference(intern_get_irn_mode(ab)))
- && (get_Const_tarval(ab) == get_mode_null(intern_get_irn_mode(ab))))
+ (get_irn_op(ab) == op_Const)
+ && (mode_is_reference(get_irn_mode(ab)))
+ && (get_Const_tarval(ab) == get_mode_null(get_irn_mode(ab))))
|| (/* ab is other Alloc */
- (intern_get_irn_op(ab) == op_Proj)
- && (mode_is_reference(intern_get_irn_mode(ab)))
- && (intern_get_irn_op(aba) == op_Alloc)
+ (get_irn_op(ab) == op_Proj)
+ && (mode_is_reference(get_irn_mode(ab)))
+ && (get_irn_op(aba) == op_Alloc)
&& (aaa != aba))))
|| (/* aa is void and aba is Alloc */
- (intern_get_irn_op(aa) == op_Const)
- && (mode_is_reference(intern_get_irn_mode(aa)))
- && (get_Const_tarval(aa) == get_mode_null(intern_get_irn_mode(aa)))
- && (intern_get_irn_op(ab) == op_Proj)
- && (mode_is_reference(intern_get_irn_mode(ab)))
- && (intern_get_irn_op(aba) == op_Alloc)))
+ (get_irn_op(aa) == op_Const)
+ && (mode_is_reference(get_irn_mode(aa)))
+ && (get_Const_tarval(aa) == get_mode_null(get_irn_mode(aa)))
+ && (get_irn_op(ab) == op_Proj)
+ && (mode_is_reference(get_irn_mode(ab)))
+ && (get_irn_op(aba) == op_Alloc)))
/* 3.: */
return new_tarval_from_long (get_Proj_proj(n) & Ne, mode_b);
}
}
- } else if (intern_get_irn_op(a) == op_DivMod) {
+ } else if (get_irn_op(a) == op_DivMod) {
tarval *tb = value_of(b = get_DivMod_right(a));
tarval *ta = value_of(a = get_DivMod_left(a));
- if ((ta != tarval_bad) && (tb != tarval_bad) && (intern_get_irn_mode(a) == intern_get_irn_mode(b))) {
+ if ((ta != tarval_bad) && (tb != tarval_bad) && (get_irn_mode(a) == get_irn_mode(b))) {
if (tb == get_mode_null(get_tarval_mode(tb))) /* div by zero: return tarval_bad */
return tarval_bad;
if (get_Proj_proj(n)== 0) /* Div */
assert (mode_is_reference(get_irn_mode (a))
&& mode_is_reference(get_irn_mode (b)));
- if (intern_get_irn_op (a) == op_Proj && intern_get_irn_op(b) == op_Proj) {
+ if (get_irn_op (a) == op_Proj && get_irn_op(b) == op_Proj) {
ir_node *a1 = get_Proj_pred (a);
ir_node *b1 = get_Proj_pred (b);
- if (a1 != b1 && intern_get_irn_op (a1) == op_Alloc
- && intern_get_irn_op (b1) == op_Alloc)
+ if (a1 != b1 && get_irn_op (a1) == op_Alloc
+ && get_irn_op (b1) == op_Alloc)
return 1;
}
return 0;
But what about Phi-cycles with the Phi0/Id that could not be resolved?
Remaining Phi nodes are just Ids. */
if ((get_Block_n_cfgpreds(n) == 1) &&
- (intern_get_irn_op(get_Block_cfgpred(n, 0)) == op_Jmp) &&
+ (get_irn_op(get_Block_cfgpred(n, 0)) == op_Jmp) &&
(get_opt_control_flow_straightening())) {
n = get_nodes_Block(get_Block_cfgpred(n, 0)); DBG_OPT_STG;
ir_node *a = get_Block_cfgpred(n, 0);
ir_node *b = get_Block_cfgpred(n, 1);
- if ((intern_get_irn_op(a) == op_Proj) &&
- (intern_get_irn_op(b) == op_Proj) &&
+ if ((get_irn_op(a) == op_Proj) &&
+ (get_irn_op(b) == op_Proj) &&
(get_Proj_pred(a) == get_Proj_pred(b)) &&
- (intern_get_irn_op(get_Proj_pred(a)) == op_Cond) &&
- (intern_get_irn_mode(get_Cond_selector(get_Proj_pred(a))) == mode_b)) {
+ (get_irn_op(get_Proj_pred(a)) == op_Cond) &&
+ (get_irn_mode(get_Cond_selector(get_Proj_pred(a))) == mode_b)) {
/* Also a single entry Block following a single exit Block. Phis have
twice the same operand and will be optimized away. */
n = get_nodes_Block(a); DBG_OPT_IFSIM;
ir_node *oldn = n;
/* optimize symmetric unop */
- if (intern_get_irn_op(get_unop_op(n)) == intern_get_irn_op(n)) {
+ if (get_irn_op(get_unop_op(n)) == get_irn_op(n)) {
n = get_unop_op(get_unop_op(n)); DBG_OPT_ALGSIM2;
}
return n;
ir_node *a = get_Conv_op(n);
ir_node *b;
- ir_mode *n_mode = intern_get_irn_mode(n);
- ir_mode *a_mode = intern_get_irn_mode(a);
+ ir_mode *n_mode = get_irn_mode(n);
+ ir_mode *a_mode = get_irn_mode(a);
if (n_mode == a_mode) { /* No Conv necessary */
n = a; DBG_OPT_ALGSIM3;
- } else if (intern_get_irn_op(a) == op_Conv) { /* Conv(Conv(b)) */
+ } else if (get_irn_op(a) == op_Conv) { /* Conv(Conv(b)) */
ir_mode *b_mode;
b = get_Conv_op(a);
- n_mode = intern_get_irn_mode(n);
- b_mode = intern_get_irn_mode(b);
+ n_mode = get_irn_mode(n);
+ b_mode = get_irn_mode(b);
if (n_mode == b_mode) {
if (n_mode == mode_b) {
if (n_preds == 2) {
ir_node *a = follow_Id (get_Phi_pred(n, 0));
ir_node *b = follow_Id (get_Phi_pred(n, 1));
- if ( (intern_get_irn_op(a) == op_Confirm)
- && (intern_get_irn_op(b) == op_Confirm)
- && follow_Id (intern_get_irn_n(a, 0) == intern_get_irn_n(b, 0))
- && (intern_get_irn_n(a, 1) == intern_get_irn_n (b, 1))
+ if ( (get_irn_op(a) == op_Confirm)
+ && (get_irn_op(b) == op_Confirm)
+ && follow_Id (get_irn_n(a, 0) == get_irn_n(b, 0))
+ && (get_irn_n(a, 1) == get_irn_n (b, 1))
&& (a->data.num == (~b->data.num & irpn_True) )) {
- return intern_get_irn_n(a, 0);
+ return get_irn_n(a, 0);
}
}
#endif
/* skip Id's */
set_Phi_pred(n, i, first_val);
if ( (first_val != n) /* not self pointer */
- && (intern_get_irn_op(first_val) != op_Bad) /* value not dead */
+ && (get_irn_op(first_val) != op_Bad) /* value not dead */
&& !(is_Bad (get_Block_cfgpred(block, i))) ) { /* not dead control flow */
break; /* then found first value. */
}
set_Phi_pred(n, i, scnd_val);
if ( (scnd_val != n)
&& (scnd_val != first_val)
- && (intern_get_irn_op(scnd_val) != op_Bad)
+ && (get_irn_op(scnd_val) != op_Bad)
&& !(is_Bad (get_Block_cfgpred(block, i))) ) {
break;
}
ir_node *a = skip_Proj(get_Load_mem(n));
ir_node *b = get_Load_ptr(n);
- if (intern_get_irn_op(a) == op_Store) {
+ if (get_irn_op(a) == op_Store) {
if ( different_identity (b, get_Store_ptr(a))) {
/* load and store use different pointers, therefore load
needs not take store's memory but the state before. */
ir_node *b = get_Store_ptr(n);
ir_node *c = skip_Proj(get_Store_value(n));
- if (intern_get_irn_op(a) == op_Store
+ if (get_irn_op(a) == op_Store
&& get_Store_ptr(a) == b
&& skip_Proj(get_Store_value(a)) == c) {
/* We have twice exactly the same store -- a write after write. */
n = a; DBG_OPT_WAW;
- } else if (intern_get_irn_op(c) == op_Load
+ } else if (get_irn_op(c) == op_Load
&& (a == c || skip_Proj(get_Load_mem(c)) == a)
&& get_Load_ptr(c) == b ) {
/* We just loaded the value from the same memory, i.e., the store
ir_node *a = get_Proj_pred(n);
- if ( intern_get_irn_op(a) == op_Tuple) {
+ if ( get_irn_op(a) == op_Tuple) {
/* Remove the Tuple/Proj combination. */
if ( get_Proj_proj(n) <= get_Tuple_n_preds(a) ) {
n = get_Tuple_pred(a, get_Proj_proj(n)); DBG_OPT_TUPLE;
assert(0); /* This should not happen! */
n = new_Bad();
}
- } else if (intern_get_irn_mode(n) == mode_X &&
+ } else if (get_irn_mode(n) == mode_X &&
is_Bad(get_nodes_Block(n))) {
/* Remove dead control flow -- early gigo. */
n = new_Bad();
a = get_unop_op(n);
}
- switch (intern_get_irn_opcode(n)) {
+ switch (get_irn_opcode(n)) {
case iro_Cmp:
/* We don't want Cast as input to Cmp. */
- if (intern_get_irn_op(a) == op_Cast) {
+ if (get_irn_op(a) == op_Cast) {
a = get_Cast_op(a);
set_Cmp_left(n, a);
}
- if (intern_get_irn_op(b) == op_Cast) {
+ if (get_irn_op(b) == op_Cast) {
b = get_Cast_op(b);
set_Cmp_right(n, b);
}
ir_node *a = get_DivMod_left(n);
ir_node *b = get_DivMod_right(n);
- ir_mode *mode = intern_get_irn_mode(a);
+ ir_mode *mode = get_irn_mode(a);
- if (!(mode_is_int(mode) && mode_is_int(intern_get_irn_mode(b))))
+ if (!(mode_is_int(mode) && mode_is_int(get_irn_mode(b))))
return n;
if (a == b) {
tarval *ta = value_of(a);
if ((ta != tarval_bad) &&
- (intern_get_irn_mode(a) == mode_b) &&
+ (get_irn_mode(a) == mode_b) &&
(get_opt_unreachable_code())) {
/* It's a boolean Cond, branching on a boolean constant.
Replace it by a tuple (Bad, Jmp) or (Jmp, Bad) */
/* We might generate an endless loop, so keep it alive. */
add_End_keepalive(get_irg_end(current_ir_graph), get_nodes_Block(n));
} else if ((ta != tarval_bad) &&
- (intern_get_irn_mode(a) == mode_Iu) &&
+ (get_irn_mode(a) == mode_Iu) &&
(get_Cond_kind(n) == dense) &&
(get_opt_unreachable_code())) {
/* I don't want to allow Tuples smaller than the biggest Proj.
set_irn_link(n, new_r_Jmp(current_ir_graph, get_nodes_Block(n)));
/* We might generate an endless loop, so keep it alive. */
add_End_keepalive(get_irg_end(current_ir_graph), get_nodes_Block(n));
- } else if ((intern_get_irn_op(a) == op_Eor)
- && (intern_get_irn_mode(a) == mode_b)
+ } else if ((get_irn_op(a) == op_Eor)
+ && (get_irn_mode(a) == mode_b)
&& (tarval_classify(computed_value(get_Eor_right(a))) == TV_CLASSIFY_ONE)) {
/* The Eor is a negate. Generate a new Cond without the negate,
simulate the negate by exchanging the results. */
set_irn_link(n, new_r_Cond(current_ir_graph, get_nodes_Block(n),
get_Eor_left(a)));
- } else if ((intern_get_irn_op(a) == op_Not)
- && (intern_get_irn_mode(a) == mode_b)) {
+ } else if ((get_irn_op(a) == op_Not)
+ && (get_irn_mode(a) == mode_b)) {
/* A Not before the Cond. Generate a new Cond without the Not,
simulate the Not by exchanging the results. */
set_irn_link(n, new_r_Cond(current_ir_graph, get_nodes_Block(n),
ir_node *a = get_Eor_left(n);
ir_node *b = get_Eor_right(n);
- if ((intern_get_irn_mode(n) == mode_b)
- && (intern_get_irn_op(a) == op_Proj)
- && (intern_get_irn_mode(a) == mode_b)
+ if ((get_irn_mode(n) == mode_b)
+ && (get_irn_op(a) == op_Proj)
+ && (get_irn_mode(a) == mode_b)
&& (tarval_classify (computed_value (b)) == TV_CLASSIFY_ONE)
- && (intern_get_irn_op(get_Proj_pred(a)) == op_Cmp))
+ && (get_irn_op(get_Proj_pred(a)) == op_Cmp))
/* The Eor negates a Cmp. The Cmp has the negated result anyways! */
n = new_r_Proj(current_ir_graph, get_nodes_Block(n), get_Proj_pred(a),
mode_b, get_negated_pnc(get_Proj_proj(a)));
- else if ((intern_get_irn_mode(n) == mode_b)
+ else if ((get_irn_mode(n) == mode_b)
&& (tarval_classify (computed_value (b)) == TV_CLASSIFY_ONE))
/* The Eor is a Not. Replace it by a Not. */
/* ????!!!Extend to bitfield 1111111. */
{
ir_node *a = get_Not_op(n);
- if ( (intern_get_irn_mode(n) == mode_b)
- && (intern_get_irn_op(a) == op_Proj)
- && (intern_get_irn_mode(a) == mode_b)
- && (intern_get_irn_op(get_Proj_pred(a)) == op_Cmp))
+ if ( (get_irn_mode(n) == mode_b)
+ && (get_irn_op(a) == op_Proj)
+ && (get_irn_mode(a) == mode_b)
+ && (get_irn_op(get_Proj_pred(a)) == op_Cmp))
/* We negate a Cmp. The Cmp has the negated result anyways! */
n = new_r_Proj(current_ir_graph, get_nodes_Block(n), get_Proj_pred(a),
mode_b, get_negated_pnc(get_Proj_proj(a)));
if (a == b) return 0;
- if ((intern_get_irn_op(a) != intern_get_irn_op(b)) ||
- (intern_get_irn_mode(a) != intern_get_irn_mode(b))) return 1;
+ if ((get_irn_op(a) != get_irn_op(b)) ||
+ (get_irn_mode(a) != get_irn_mode(b))) return 1;
/* compare if a's in and b's in are equal */
- irn_arity_a = intern_get_irn_arity (a);
- if (irn_arity_a != intern_get_irn_arity(b))
+ irn_arity_a = get_irn_arity (a);
+ if (irn_arity_a != get_irn_arity(b))
return 1;
/* for block-local cse and pinned nodes: */
- if (!get_opt_global_cse() || (get_op_pinned(intern_get_irn_op(a)) == pinned)) {
- if (intern_get_irn_n(a, -1) != intern_get_irn_n(b, -1))
+ if (!get_opt_global_cse() || (get_op_pinned(get_irn_op(a)) == pinned)) {
+ if (get_irn_n(a, -1) != get_irn_n(b, -1))
return 1;
}
/* compare a->in[0..ins] with b->in[0..ins] */
for (i = 0; i < irn_arity_a; i++)
- if (intern_get_irn_n(a, i) != intern_get_irn_n(b, i))
+ if (get_irn_n(a, i) != get_irn_n(b, i))
return 1;
/*
int i, irn_arity;
/* hash table value = 9*(9*(9*(9*(9*arity+in[0])+in[1])+ ...)+mode)+code */
- h = irn_arity = intern_get_irn_arity(node);
+ h = irn_arity = get_irn_arity(node);
/* consider all in nodes... except the block. */
for (i = 0; i < irn_arity; i++) {
- h = 9*h + (unsigned long)intern_get_irn_n(node, i);
+ h = 9*h + (unsigned long)get_irn_n(node, i);
}
/* ...mode,... */
- h = 9*h + (unsigned long) intern_get_irn_mode (node);
+ h = 9*h + (unsigned long) get_irn_mode (node);
/* ...and code */
- h = 9*h + (unsigned long) intern_get_irn_op (node);
+ h = 9*h + (unsigned long) get_irn_op (node);
return h;
}
/* TODO: use a generic commutative attribute */
if (get_opt_reassociation()) {
- if (is_op_commutative(intern_get_irn_op(n))) {
+ if (is_op_commutative(get_irn_op(n))) {
/* for commutative operators perform a OP b == b OP a */
if (get_binop_left(n) > get_binop_right(n)) {
ir_node *h = get_binop_left(n);
identify_cons (pset *value_table, ir_node *n) {
ir_node *old = n;
n = identify(value_table, n);
- if (intern_get_irn_n(old, -1) != intern_get_irn_n(n, -1))
+ if (get_irn_n(old, -1) != get_irn_n(n, -1))
set_irg_pinned(current_ir_graph, floats);
return n;
}
gigo (ir_node *node)
{
int i, irn_arity;
- ir_op* op = intern_get_irn_op(node);
+ ir_op* op = get_irn_op(node);
/* remove garbage blocks by looking at control flow that leaves the block
and replacing the control flow by Bad. */
- if (intern_get_irn_mode(node) == mode_X) {
+ if (get_irn_mode(node) == mode_X) {
ir_node *block = get_nodes_block(node);
if (op == op_End) return node; /* Don't optimize End, may have Bads. */
- if (intern_get_irn_op(block) == op_Block && get_Block_matured(block)) {
- irn_arity = intern_get_irn_arity(block);
+ if (get_irn_op(block) == op_Block && get_Block_matured(block)) {
+ irn_arity = get_irn_arity(block);
for (i = 0; i < irn_arity; i++) {
- if (!is_Bad(intern_get_irn_n(block, i))) break;
+ if (!is_Bad(get_irn_n(block, i))) break;
}
if (i == irn_arity) return new_Bad();
}
/* Blocks, Phis and Tuples may have dead inputs, e.g., if one of the
blocks predecessors is dead. */
if ( op != op_Block && op != op_Phi && op != op_Tuple) {
- irn_arity = intern_get_irn_arity(node);
+ irn_arity = get_irn_arity(node);
for (i = -1; i < irn_arity; i++) {
- if (is_Bad(intern_get_irn_n(node, i))) {
+ if (is_Bad(get_irn_n(node, i))) {
return new_Bad();
}
}
/* If Block has only Bads as predecessors it's garbage. */
/* If Phi has only Bads as predecessors it's garbage. */
if ((op == op_Block && get_Block_matured(node)) || op == op_Phi) {
- irn_arity = intern_get_irn_arity(node);
+ irn_arity = get_irn_arity(node);
for (i = 0; i < irn_arity; i++) {
- if (!is_Bad(intern_get_irn_n(node, i))) break;
+ if (!is_Bad(get_irn_n(node, i))) break;
}
if (i == irn_arity) node = new_Bad();
}
{
tarval *tv;
ir_node *oldn = n;
- opcode iro = intern_get_irn_opcode(n);
+ opcode iro = get_irn_opcode(n);
/* Allways optimize Phi nodes: part of the construction. */
if ((!get_opt_optimize()) && (iro != iro_Phi)) return n;
/* constant expression evaluation / constant folding */
if (get_opt_constant_folding()) {
/* constants can not be evaluated */
- if (intern_get_irn_op(n) != op_Const) {
+ if (get_irn_op(n) != op_Const) {
/* try to evaluate */
tv = computed_value (n);
- if ((intern_get_irn_mode(n) != mode_T) && (tv != tarval_bad)) {
+ if ((get_irn_mode(n) != mode_T) && (tv != tarval_bad)) {
/*
* we MUST copy the node here temparary, because it's still needed
* for DBG_OPT_ALGSIM0
/* Some more constant expression evaluation that does not allow to
free the node. */
- iro = intern_get_irn_opcode(n);
+ iro = get_irn_opcode(n);
if (get_opt_constant_folding() ||
(iro == iro_Cond) ||
(iro == iro_Proj)) /* Flags tested local. */
n = gigo (n);
/* Now we have a legal, useful node. Enter it in hash table for cse */
- if (get_opt_cse() && (intern_get_irn_opcode(n) != iro_Block)) {
+ if (get_opt_cse() && (get_irn_opcode(n) != iro_Block)) {
n = identify_remember (current_ir_graph->value_table, n);
}
{
tarval *tv;
ir_node *oldn = n;
- opcode iro = intern_get_irn_opcode(n);
+ opcode iro = get_irn_opcode(n);
- if (!get_opt_optimize() && (intern_get_irn_op(n) != op_Phi)) return n;
+ if (!get_opt_optimize() && (get_irn_op(n) != op_Phi)) return n;
/* if not optimize return n */
if (n == NULL) {
if (iro != iro_Const) {
/* try to evaluate */
tv = computed_value (n);
- if ((intern_get_irn_mode(n) != mode_T) && (tv != tarval_bad)) {
+ if ((get_irn_mode(n) != mode_T) && (tv != tarval_bad)) {
/* evaluation was succesful -- replace the node. */
n = new_Const (get_tarval_mode (tv), tv);
DBG_OPT_ALGSIM0;
}
/* Some more constant expression evaluation. */
- iro = intern_get_irn_opcode(n);
+ iro = get_irn_opcode(n);
if (get_opt_constant_folding() ||
(iro == iro_Cond) ||
(iro == iro_Proj)) /* Flags tested local. */
/* Now we have a legal, useful node. Enter it in hash table for cse.
Blocks should be unique anyways. (Except the successor of start:
is cse with the start block!) */
- if (get_opt_cse() && (intern_get_irn_opcode(n) != iro_Block))
+ if (get_opt_cse() && (get_irn_opcode(n) != iro_Block))
n = identify_remember (current_ir_graph->value_table, n);
return n;
ir_node *pred;
ir_op *op;
- switch (intern_get_irn_opcode(node)) {
+ switch (get_irn_opcode(node)) {
case iro_Proj:
pred = get_Proj_pred(node);
- op = intern_get_irn_op(pred);
+ op = get_irn_op(pred);
if (op == op_Proj) {
ir_node *start = get_Proj_pred(pred);
- if (intern_get_irn_op(start) == op_Start) {
+ if (get_irn_op(start) == op_Start) {
if (get_Proj_proj(pred) == pn_Start_T_args) {
/* found Proj(ProjT(Start)) */
set_irn_link(node, data->proj_data);
ir_node **ress;
/* search all returns of a block */
- if (intern_get_irn_op(ret) != op_Return)
+ if (get_irn_op(ret) != op_Return)
continue;
/* check, if it's a Return self() */
proj_m = get_Return_mem(ret);
- if (intern_get_irn_op(proj_m) != op_Proj)
+ if (get_irn_op(proj_m) != op_Proj)
continue;
call = get_Proj_pred(proj_m);
- if (intern_get_irn_op(call) != op_Call)
+ if (get_irn_op(call) != op_Call)
continue;
/* check if it's a recursive call */
call_ptr = get_Call_ptr(call);
- if (intern_get_irn_op(call_ptr) != op_Const)
+ if (get_irn_op(call_ptr) != op_Const)
continue;
tv = get_Const_tarval(call_ptr);
ir_node *proj_proj;
ir_node *irn;
- if (intern_get_irn_op(proj) != op_Proj) {
+ if (get_irn_op(proj) != op_Proj) {
/* not routed to a call */
break;
}
proj_proj = get_Proj_pred(proj);
- if (intern_get_irn_op(proj) != op_Proj) {
+ if (get_irn_op(proj) != op_Proj) {
/* not routed to a call */
break;
}
*/
static ir_op *stat_get_irn_op(const ir_node *node)
{
- ir_op *op = intern_get_irn_op(node);
+ ir_op *op = get_irn_op(node);
- if (op->code == iro_Phi && intern_get_irn_arity(node) == 0) {
+ if (op->code == iro_Phi && get_irn_arity(node) == 0) {
/* special case, a Phi0 node, count on extra counter */
op = status->op_Phi0;
}
- else if (op->code == iro_Phi && intern_get_irn_mode(node) == mode_M) {
+ else if (op->code == iro_Phi && get_irn_mode(node) == mode_M) {
/* special case, a Memory Phi node, count on extra counter */
op = status->op_PhiM;
}
cnt_env_t *cenv = env;
node_entry_t *entry;
ir_op *op = stat_get_irn_op(node);
- int arity = intern_get_irn_arity(node);
+ int arity = get_irn_arity(node);
entry = opcode_get_entry(op, cenv->set);
opcode code;
int i, preds;
- code = intern_get_irn_opcode(node);
+ code = get_irn_opcode(node);
put_code(buf, code);
--max_depth;
return;
}
- preds = intern_get_irn_arity(node);
+ preds = get_irn_arity(node);
put_code(buf, preds);
for (i = 0; i < preds; ++i) {
- ir_node *n = intern_get_irn_n(node, i);
+ ir_node *n = get_irn_n(node, i);
_encode_node(n, buf, max_depth);
}
return get_id_str(op->name);
}
-tp_opcode get_tpop_code (tp_op *op){
- return op->code;
+tp_opcode (get_tpop_code)(tp_op *op){
+ return __get_tpop_code(op);
}
-ident *get_tpop_ident(tp_op *op){
- return op->name;
+ident *(get_tpop_ident)(tp_op *op){
+ return __get_tpop_ident(op);
}
/* returns the attribute size of the operator. */
-int get_tpop_attr_size (tp_op *op) {
- return op->attr_size;
+int (get_tpop_attr_size)(tp_op *op) {
+ return __get_tpop_attr_size(op);
}
*/
int get_tpop_attr_size (tp_op *op);
+
+/* ---------------- *
+ * inline functions *
+ * -----------------*/
+
+static INLINE tp_opcode
+__get_tpop_code(tp_op *op) {
+ return op->code;
+}
+
+static INLINE ident *
+__get_tpop_ident(tp_op *op){
+ return op->name;
+}
+
+static INLINE int
+__get_tpop_attr_size(tp_op *op) {
+ return op->attr_size;
+}
+
+#define get_tpop_code(op) __get_tpop_code(op)
+#define get_tpop_ident(op) __get_tpop_ident(op)
+#define get_tpop_attr_size(op) __get_tpop_attr_size(op)
+
#endif /* _TPOP_T_H_ */
}
unsigned long type_visited;
-void set_master_type_visited(unsigned long val) { type_visited = val; }
-unsigned long get_master_type_visited() { return type_visited; }
-void inc_master_type_visited() { type_visited++; }
+
+void (set_master_type_visited)(unsigned long val) { __set_master_type_visited(val); }
+unsigned long (get_master_type_visited)(void) { return __get_master_type_visited(); }
+void (inc_master_type_visited)(void) { __inc_master_type_visited(); }
type *
}
/* set/get the link field */
-void *get_type_link(type *tp)
+void *(get_type_link)(type *tp)
{
- assert(tp && tp->kind == k_type);
- return(tp -> link);
+ return __get_type_link(tp);
}
-void set_type_link(type *tp, void *l)
+void (set_type_link)(type *tp, void *l)
{
- assert(tp && tp->kind == k_type);
- tp -> link = l;
+ __set_type_link(tp, l);
}
-tp_op* get_type_tpop(type *tp) {
- assert(tp && tp->kind == k_type);
- return tp->type_op;
+tp_op *(get_type_tpop)(type *tp) {
+ return __get_type_tpop(tp);
}
-ident* get_type_tpop_nameid(type *tp) {
- assert(tp && tp->kind == k_type);
- return tp->type_op->name;
+ident *(get_type_tpop_nameid)(type *tp) {
+ return __get_type_tpop_nameid(tp);
}
const char* get_type_tpop_name(type *tp) {
return get_id_str(tp->type_op->name);
}
-tp_opcode get_type_tpop_code(type *tp) {
- assert(tp && tp->kind == k_type);
- return tp->type_op->code;
+tp_opcode (get_type_tpop_code)(type *tp) {
+ return __get_type_tpop_code(tp);
}
-ir_mode* get_type_mode(type *tp) {
- assert(tp && tp->kind == k_type);
- return tp->mode;
+ir_mode *(get_type_mode)(type *tp) {
+ return __get_type_mode(tp);
}
void set_type_mode(type *tp, ir_mode* m) {
}
}
-ident* get_type_ident(type *tp) {
- assert(tp && tp->kind == k_type);
- return tp->name;
+ident *(get_type_ident)(type *tp) {
+ return __get_type_ident(tp);
}
-void set_type_ident(type *tp, ident* id) {
- assert(tp && tp->kind == k_type);
- tp->name = id;
+void (set_type_ident)(type *tp, ident* id) {
+ __set_type_ident(tp, id);
}
/* Outputs a unique number for this node */
-long
-get_type_nr(type *tp) {
- assert(tp);
-#ifdef DEBUG_libfirm
- return tp->nr;
-#else
- return (long)tp;
-#endif
+long (get_type_nr)(type *tp) {
+ return __get_type_nr(tp);
}
const char* get_type_name(type *tp) {
return (get_id_str(tp->name));
}
-int get_type_size(type *tp) {
- assert(tp && tp->kind == k_type);
- return tp->size;
+int (get_type_size)(type *tp) {
+ return __get_type_size(tp);
}
void
tp->size = size;
}
-type_state
-get_type_state(type *tp) {
- assert(tp && tp->kind == k_type);
- return tp->state;
+type_state (get_type_state)(type *tp) {
+ return __get_type_state(tp);
}
void
tp->state = state;
}
-unsigned long get_type_visited(type *tp) {
- assert(tp && tp->kind == k_type);
- return tp->visit;
+unsigned long (get_type_visited)(type *tp) {
+ return __get_type_visited(tp);
}
-void set_type_visited(type *tp, unsigned long num) {
- assert(tp && tp->kind == k_type);
- tp->visit = num;
+void (set_type_visited)(type *tp, unsigned long num) {
+ __set_type_visited(tp, num);
}
+
/* Sets visited field in type to type_visited. */
-void mark_type_visited(type *tp) {
- assert(tp && tp->kind == k_type);
- assert(tp->visit < type_visited);
- tp->visit = type_visited;
-}
-/* @@@ name clash with master flag
-bool type_visited(type *tp) {
- assert(tp && tp->kind == k_type);
- return tp->visit >= type_visited;
- }*/
-bool type_not_visited(type *tp) {
- assert(tp && tp->kind == k_type);
- return tp->visit < type_visited;
+void (mark_type_visited)(type *tp) {
+ __mark_type_visited(tp);
}
+/* @@@ name clash with master flag
+int (type_visited)(type *tp) {
+ return __type_visited(tp);
+}*/
-int is_type (void *thing) {
- assert(thing);
- if (get_kind(thing) == k_type)
- return 1;
- else
- return 0;
+int (type_not_visited)(type *tp) {
+ return __type_not_visited(tp);
}
+int (is_type)(void *thing) {
+ return __is_type(thing);
+}
bool equal_type(type *typ1, type *typ2) {
entity **m;
}
/* typecheck */
-bool is_class_type(type *clss) {
- assert(clss);
- if (clss->type_op == type_class) return 1; else return 0;
+int (is_class_type)(type *clss) {
+ return __is_class_type(clss);
}
bool is_subclass_of(type *low, type *high) {
}
/* typecheck */
-bool is_struct_type(type *strct) {
- assert(strct);
- if (strct->type_op == type_struct) return 1; else return 0;
+int (is_struct_type)(type *strct) {
+ return __is_struct_type(strct);
}
/*******************************************************************/
}
/* typecheck */
-bool is_method_type (type *method) {
- assert(method);
- return (method->type_op == type_method);
+int (is_method_type)(type *method) {
+ return __is_method_type(method);
}
/*-----------------------------------------------------------------*/
}
/* typecheck */
-bool is_union_type (type *uni) {
- assert(uni);
- if (uni->type_op == type_union) return 1; else return 0;
+int (is_union_type)(type *uni) {
+ return __is_union_type(uni);
}
/*-----------------------------------------------------------------*/
return res;
}
+
type *new_d_type_array (ident *name, int n_dimensions,
type *element_type, dbg_info* db) {
type *res = new_type_array (name, n_dimensions, element_type);
void free_array_entities (type *array) {
assert(array && (array->type_op == type_array));
}
+
void free_array_attrs (type *array) {
assert(array && (array->type_op == type_array));
free(array->attr.aa.lower_bound);
}
/* typecheck */
-bool is_array_type (type *array) {
- assert(array);
- if (array->type_op == type_array) return 1; else return 0;
+int (is_array_type)(type *array) {
+ return __is_array_type(array);
}
/*-----------------------------------------------------------------*/
}
/* typecheck */
-bool is_enumeration_type (type *enumeration) {
- assert(enumeration);
- if (enumeration->type_op == type_enumeration) return 1; else return 0;
+int (is_enumeration_type)(type *enumeration) {
+ return __is_enumeration_type(enumeration);
}
/*-----------------------------------------------------------------*/
}
/* typecheck */
-bool is_pointer_type (type *pointer) {
- assert(pointer);
- if (pointer->type_op == type_pointer) return 1; else return 0;
+int (is_pointer_type)(type *pointer) {
+ return __is_pointer_type(pointer);
}
/* Returns the first pointer type that has as points_to tp.
}
/* typecheck */
-bool is_primitive_type (type *primitive) {
- assert(primitive && primitive->kind == k_type);
- if (primitive->type_op == type_primitive) return 1; else return 0;
+int (is_primitive_type)(type *primitive) {
+ return __is_primitive_type(primitive);
}
/*-----------------------------------------------------------------*/
/*-----------------------------------------------------------------*/
-int is_atomic_type(type *tp) {
- assert(tp && tp->kind == k_type);
- return (is_primitive_type(tp) || is_pointer_type(tp) ||
- is_enumeration_type(tp));
+int (is_atomic_type)(type *tp) {
+ return __is_atomic_type(tp);
}
/*
void set_type_visited(type *tp, unsigned long num);
/* Sets visited field in type to type_visited. */
void mark_type_visited(type *tp);
-/* @@@ name clash!! bool type_visited(type *tp); */
-bool type_not_visited(type *tp);
+/* @@@ name clash!! int type_visited(type *tp); */
+int type_not_visited(type *tp);
void* get_type_link(type *tp);
void set_type_link(type *tp, void *l);
int get_class_dfn (type *clss);
/** Returns true if a type is a class type. */
-bool is_class_type(type *clss);
+int is_class_type(type *clss);
/** Returns true if low is subclass of high. */
bool is_subclass_of(type *low, type *high);
void remove_struct_member (type *strct, entity *member);
/** Returns true if a type is a struct type. */
-bool is_struct_type(type *strct);
+int is_struct_type(type *strct);
/**
* @page method_type Representation of a method type
void set_method_first_variadic_param_index(type *method, int index);
/** Returns true if a type is a method type. */
-bool is_method_type (type *method);
+int is_method_type (type *method);
/**
* @page union_type Representation of a union type.
void remove_union_member (type *uni, entity *member);
/** Returns true if a type is a union type. */
-bool is_union_type (type *uni);
+int is_union_type (type *uni);
/**
* @page array_type Representation of an array type
entity *get_array_element_entity (type *array);
/** Returns true if a type is an array type. */
-bool is_array_type (type *array);
+int is_array_type(type *array);
/**
* @page enumeration_type Representation of an enumeration type
const char *get_enumeration_name(type *enumeration, int pos);
/** Returns true if a type is a enumeration type. */
-bool is_enumeration_type (type *enumeration);
+int is_enumeration_type (type *enumeration);
/**
* @page pointer_type Representation of a pointer type
type *get_pointer_points_to_type (type *pointer);
/** Returns true if a type is a pointer type. */
-bool is_pointer_type (type *pointer);
+int is_pointer_type (type *pointer);
/** Returns the first pointer type that has as points_to tp.
* Not efficient: O(#types).
type *new_d_type_primitive (ident *name, ir_mode *mode, dbg_info* db);
/** Returns true if a type is a primitive type. */
-bool is_primitive_type (type *primitive);
+int is_primitive_type (type *primitive);
/**
# include "config.h"
# endif
# include "type.h"
+# include "tpop_t.h"
+
/**
* @file type_t.h
* This file contains the datatypes hidden in type.h.
/** initialize the type module */
void init_type (void);
+
+/* ------------------- *
+ * inline functions *
+ * ------------------- */
+
+extern unsigned long type_visited;
+
+static INLINE void __set_master_type_visited(unsigned long val) { type_visited = val; }
+static INLINE unsigned long __get_master_type_visited(void) { return type_visited; }
+static INLINE void __inc_master_type_visited(void) { type_visited++; }
+
+static INLINE void *
+__get_type_link(type *tp) {
+ assert(tp && tp->kind == k_type);
+ return(tp -> link);
+}
+
+static INLINE void
+__set_type_link(type *tp, void *l) {
+ assert(tp && tp->kind == k_type);
+ tp -> link = l;
+}
+
+static INLINE tp_op*
+__get_type_tpop(type *tp) {
+ assert(tp && tp->kind == k_type);
+ return tp->type_op;
+}
+
+static INLINE ident*
+__get_type_tpop_nameid(type *tp) {
+ assert(tp && tp->kind == k_type);
+ return get_tpop_ident(tp->type_op);
+}
+
+static INLINE tp_opcode
+__get_type_tpop_code(type *tp) {
+ assert(tp && tp->kind == k_type);
+ return get_tpop_code(tp->type_op);
+}
+
+static INLINE ir_mode *
+__get_type_mode(type *tp) {
+ assert(tp && tp->kind == k_type);
+ return tp->mode;
+}
+
+static INLINE ident *
+__get_type_ident(type *tp) {
+ assert(tp && tp->kind == k_type);
+ return tp->name;
+}
+
+static INLINE void
+__set_type_ident(type *tp, ident* id) {
+ assert(tp && tp->kind == k_type);
+ tp->name = id;
+}
+
+static INLINE long
+__get_type_nr(type *tp) {
+ assert(tp);
+#ifdef DEBUG_libfirm
+ return tp->nr;
+#else
+ return (long)tp;
+#endif
+}
+
+static INLINE int
+__get_type_size(type *tp) {
+ assert(tp && tp->kind == k_type);
+ return tp->size;
+}
+
+static INLINE type_state
+__get_type_state(type *tp) {
+ assert(tp && tp->kind == k_type);
+ return tp->state;
+}
+
+static INLINE unsigned long
+__get_type_visited(type *tp) {
+ assert(tp && tp->kind == k_type);
+ return tp->visit;
+}
+
+static INLINE void
+__set_type_visited(type *tp, unsigned long num) {
+ assert(tp && tp->kind == k_type);
+ tp->visit = num;
+}
+
+static INLINE void
+__mark_type_visited(type *tp) {
+ assert(tp && tp->kind == k_type);
+ assert(tp->visit < type_visited);
+ tp->visit = type_visited;
+}
+
+static INLINE int
+__type_visited(type *tp) {
+ assert(tp && tp->kind == k_type);
+ return tp->visit >= type_visited;
+}
+
+static INLINE int
+__type_not_visited(type *tp) {
+ assert(tp && tp->kind == k_type);
+ return tp->visit < type_visited;
+}
+
+static INLINE int
+__is_type(void *thing) {
+ return (get_kind(thing) == k_type);
+}
+
+static INLINE int
+__is_class_type(type *clss) {
+ assert(clss);
+ return (clss->type_op == type_class);
+}
+
+static INLINE int
+__is_struct_type(type *strct) {
+ assert(strct);
+ return (strct->type_op == type_struct);
+}
+
+static INLINE int
+__is_method_type(type *method) {
+ assert(method);
+ return (method->type_op == type_method);
+}
+
+static INLINE int
+__is_union_type(type *uni) {
+ assert(uni);
+ return (uni->type_op == type_union);
+}
+
+static INLINE int
+__is_array_type(type *array) {
+ assert(array);
+ return (array->type_op == type_array);
+}
+
+static INLINE int
+__is_enumeration_type(type *enumeration) {
+ assert(enumeration);
+ return (enumeration->type_op == type_enumeration);
+}
+
+static INLINE int
+__is_pointer_type(type *pointer) {
+ assert(pointer);
+ return (pointer->type_op == type_pointer);
+}
+
+/** Returns true if a type is a primitive type. */
+static INLINE int
+__is_primitive_type(type *primitive) {
+ assert(primitive && primitive->kind == k_type);
+ return (primitive->type_op == type_primitive);
+}
+
+static INLINE int
+__is_atomic_type(type *tp) {
+ assert(tp && tp->kind == k_type);
+ return (is_primitive_type(tp) || is_pointer_type(tp) ||
+ is_enumeration_type(tp));
+}
+
+
+#define set_master_type_visited(val) __set_master_type_visited(val)
+#define get_master_type_visited() __get_master_type_visited()
+#define inc_master_type_visited() __inc_master_type_visited()
+#define get_type_link(tp) __get_type_link(tp)
+#define set_type_link(tp, l) __set_type_link(tp, l)
+#define get_type_tpop(tp) __get_type_tpop(tp)
+#define get_type_tpop_nameid(tp) __get_type_tpop_nameid(tp)
+#define get_type_tpop_code(tp) __get_type_tpop_code(tp)
+#define get_type_mode(tp) __get_type_mode(tp)
+#define get_type_ident(tp) __get_type_ident(tp)
+#define set_type_ident(tp, id) __set_type_ident(tp, id)
+#define get_type_nr(tp) __get_type_nr(tp)
+#define get_type_size(tp) __get_type_size(tp)
+#define get_type_state(tp) __get_type_state(tp)
+#define get_type_visited(tp) __get_type_visited(tp)
+#define set_type_visited(tp, num) __set_type_visited(tp, num)
+#define mark_type_visited(tp) __mark_type_visited(tp)
+#define type_visited(tp) __type_visited(tp)
+#define type_not_visited(tp) __type_not_visited(tp)
+#define is_type(thing) __is_type(thing)
+#define is_class_type(clss) __is_class_type(clss)
+#define is_struct_type(strct) __is_struct_type(strct)
+#define is_method_type(method) __is_method_type(method)
+#define is_union_type(uni) __is_union_type(uni)
+#define is_array_type(array) __is_array_type(array)
+#define is_enumeration_type(enumeration) __is_enumeration_type(enumeration)
+#define is_pointer_type(pointer) __is_pointer_type(pointer)
+#define is_primitive_type(primitive) __is_primitive_type(primitive)
+#define is_atomic_type(tp) __is_atomic_type(tp)
+
# endif /* _TYPE_T_H_ */