for (i = 0; i < arity; i++) {
ir_node *pred_cfop = skip_Proj(get_Block_cfgpred(sblock, i));
//printf(" "); DDMN(pred_cfop);
- if (get_irn_op(pred_cfop) == op_CallBegin) { /* could be Unknown */
+ if (is_CallBegin(pred_cfop)) { /* could be Unknown */
ir_graph *ip_pred = get_irn_irg(pred_cfop);
//printf(" "); DDMG(ip_pred);
if ((ip_pred == pred) && is_backedge(sblock, i)) {
* op_Tuple oder ein Knoten, der in "free_ana_walker" behandelt
* wird. */
ir_node * pred = get_Proj_pred(node);
- if (get_irn_link(pred) != MARK && get_irn_op(pred) == op_Tuple) {
+ if (get_irn_link(pred) != MARK && is_Tuple(pred)) {
free_mark_proj(get_Tuple_pred(pred, get_Proj_proj(node)), n, set);
} else {
/* nothing: da in "free_ana_walker" behandelt. */
static void walk_pre(ir_node *n, void *env)
{
(void) env;
- if (get_irn_op(n) == op_Raise)
+ if (is_Raise(n))
just_passed_a_Raise = 1;
- if ( (get_irn_op(n) == op_Proj)
- && (get_irn_op(get_Proj_pred(n)) == op_Cond)
- && (just_passed_a_Raise)) {
+ if (get_irn_op(n) == op_Proj &&
+ is_Cond(get_Proj_pred(n)) &&
+ just_passed_a_Raise) {
ir_node *other_proj;
ir_node *c = get_Proj_pred(n);
}
}
- if (get_irn_op(n) == op_Cond) {
+ if (is_Cond(n)) {
set_irn_link(n, Cond_list);
Cond_list = n;
}
static void walk_post(ir_node *n, void *env)
{
(void) env;
- if (get_irn_op(n) == op_Raise)
+ if (is_Raise(n))
just_passed_a_Raise = 0;
- if ( (get_irn_op(n) == op_Proj)
- && (get_irn_op(get_Proj_pred(n)) == op_Cond)
- && ((get_ProjX_probability(n) == Cond_prob_exception_taken) ||
- (get_ProjX_probability(n) == Cond_prob_was_exception_taken) )) {
+ if (get_irn_op(n) == op_Proj &&
+ is_Cond(get_Proj_pred(n)) && (
+ get_ProjX_probability(n) == Cond_prob_exception_taken ||
+ get_ProjX_probability(n) == Cond_prob_was_exception_taken
+ )) {
just_passed_a_Raise = 1;
}
}
ir_node *cfop;
if (is_ir_node(reg)) {
cfop = get_Block_cfgpred((ir_node *)reg, pos);
- if (is_Proj(cfop) && (get_irn_op(get_Proj_pred(cfop)) != op_Cond))
+ if (is_Proj(cfop) && !is_Cond(get_Proj_pred(cfop)))
cfop = skip_Proj(cfop);
} else {
assert(is_ir_loop(reg));
case tpo_array: {
long n_elt = DEFAULT_N_ARRAY_ELEMENTS;
assert(get_array_n_dimensions(tp) == 1 && "other not implemented");
- if ((get_irn_op(get_array_lower_bound(tp, 0)) == op_Const) &&
- (get_irn_op(get_array_upper_bound(tp, 0)) == op_Const) ) {
+ if (is_Const(get_array_lower_bound(tp, 0)) &&
+ is_Const(get_array_upper_bound(tp, 0))) {
n_elt = get_array_upper_bound_int(tp, 0) - get_array_upper_bound_int(tp, 0);
}
s = n_elt;
int elt_s = get_type_estimated_size_bytes(get_array_element_type(tp));
long n_elt = DEFAULT_N_ARRAY_ELEMENTS;
assert(get_array_n_dimensions(tp) == 1 && "other not implemented");
- if ((get_irn_op(get_array_lower_bound(tp, 0)) == op_Const) &&
- (get_irn_op(get_array_upper_bound(tp, 0)) == op_Const) ) {
+ if (is_Const(get_array_lower_bound(tp, 0)) &&
+ is_Const(get_array_upper_bound(tp, 0))) {
n_elt = get_array_upper_bound_int(tp, 0) - get_array_lower_bound_int(tp, 0);
}
s = n_elt * elt_s;
double n_loads = 0;
for (i = 0; i < n_acc; ++i) {
ir_node *acc = get_entity_access(ent, i);
- if (get_irn_op(acc) == op_Load) {
+ if (is_Load(acc)) {
n_loads += get_irn_final_cost(acc);
}
}
double n_stores = 0;
for (i = 0; i < n_acc; ++i) {
ir_node *acc = get_entity_access(ent, i);
- if (get_irn_op(acc) == op_Store)
+ if (is_Store(acc))
n_stores += get_irn_final_cost(acc);
}
return n_stores;
double n_calls = 0;
for (i = 0; i < n_acc; ++i) {
ir_node *acc = get_entity_access(ent, i);
- if (get_irn_op(acc) == op_Call)
-
+ if (is_Call(acc))
n_calls += get_irn_final_cost(acc);
}
return n_calls;
cfop = get_Block_cfgpred(b, i);
if (is_Proj(cfop)) {
- if (get_irn_op(get_Proj_pred(cfop)) != op_Cond) {
+ if (!is_Cond(get_Proj_pred(cfop))) {
cfop = skip_Proj(cfop);
} else {
assert(get_nodes_block(cfop) == get_nodes_block(skip_Proj(cfop)));
pred = skip_Proj(get_nodes_block(cfop));
/* We want nice blocks. */
- assert( get_irn_op(pred) != op_Bad
- && get_irn_op(skip_Proj(get_Block_cfgpred(b, i))) != op_Bad);
+ assert(!is_Bad(pred) && !is_Bad(skip_Proj(get_Block_cfgpred(b, i))));
pred_l = get_irn_loop(pred);
if (pred_l == l) {
add_region_in(b, pred);
not reachable.
I.e., with this code, the order on the loop tree is correct. But a (single)
test showed the loop tree is deeper. */
- if (get_irn_op(n) == op_Phi ||
- get_irn_op(n) == op_Block ||
- (get_irn_op(n) == op_Filter && get_interprocedural_view()) ||
- (get_irg_pinned(get_irn_irg(n)) == op_pin_state_floats &&
- get_irn_pinned(n) == op_pin_state_floats))
+ if (get_irn_op(n) == op_Phi ||
+ is_Block(n) ||
+ (is_Filter(n) && get_interprocedural_view()) || (
+ get_irg_pinned(get_irn_irg(n)) == op_pin_state_floats &&
+ get_irn_pinned(n) == op_pin_state_floats
+ ))
// Here we could test for backedge at -1 which is illegal
return 0;
else
But it guarantees that Blocks are analysed before nodes contained in the
block. If so, we can set the value to undef if the block is not \
executed. */
- if (is_cfop(n) || is_fragile_op(n) || get_irn_op(n) == op_Start)
+ if (is_cfop(n) || is_fragile_op(n) || is_Start(n))
return -1;
else
return 0;
ir_node *m;
if (is_backedge(n, i)) continue;
m = get_irn_n(n, i); /* get_irn_ip_pred(n, i); */
- /* if ((!m) || (get_irn_op(m) == op_Unknown)) continue; */
+ /* if (!m || is_Unknown(m)) continue; */
my_scc(m);
if (irn_is_in_stack(m)) {
/* Uplink of m is smaller if n->m is a backedge.
/* Deal with Start / Call here: we need to know the Proj Nr. */
assert(get_irn_mode(pred) == mode_T);
pred_pred = get_Proj_pred(pred);
- if (get_irn_op(pred_pred) == op_Start) {
+ if (is_Start(pred_pred)) {
ir_type *mtp = get_entity_type(get_irg_entity(get_irn_irg(pred_pred)));
tp = get_method_param_type(mtp, get_Proj_proj(n));
- } else if (get_irn_op(pred_pred) == op_Call) {
+ } else if (is_Call(pred_pred)) {
ir_type *mtp = get_Call_type(pred_pred);
tp = get_method_res_type(mtp, get_Proj_proj(n));
- } else if (get_irn_op(pred_pred) == op_Tuple) {
+ } else if (is_Tuple(pred_pred)) {
panic("Encountered nested Tuple");
} else {
DB((dbg, SET_LEVEL_1, "Proj %ld from Proj from ??: unknown type\n", get_irn_node_nr(n)));
ir_node *f_addr = get_atomic_ent_value (over);
ir_entity *impl_ent = get_SymConst_entity (f_addr);
- assert ((get_irn_op(f_addr) == op_SymConst) && "can't do complex addrs");
+ assert(is_SymConst(f_addr) && "can't do complex addrs");
if (impl_ent == addr) {
assert (0 && "gibt's denn sowas");
force_description (over, addr);
ir_node *addr;
(void) env;
- if (get_irn_op(n) == op_Alloc) {
+ if (is_Alloc(n)) {
add_type_alloc(get_Alloc_type(n), n);
return;
- } else if (get_irn_op(n) == op_Cast) {
+ } else if (is_Cast(n)) {
add_type_cast(get_Cast_type(n), n);
return;
} else if (is_Sel(n)) {
return;
} else if (is_memop(n)) {
addr = get_memop_ptr(n);
- } else if (get_irn_op(n) == op_Call) {
+ } else if (is_Call(n)) {
addr = get_Call_ptr(n);
if (! is_Sel(addr)) return; /* Sels before Calls mean a Load / polymorphic Call. */
} else {
return n;
cmp = get_Proj_pred(proj);
- if (get_irn_op(cmp) == op_Cmp) {
+ if (is_Cmp(cmp)) {
ir_node *a = get_Cmp_left(cmp);
ir_node *b = get_Cmp_right(cmp);
ir_node *t = get_Mux_true(n);
default:
if (get_irn_op(node) == get_op_Max() ||
- get_irn_op(node) == get_op_Min() ||
- get_irn_op(node) == get_op_Mulh())
+ get_irn_op(node) == get_op_Min() ||
+ is_Mulh(node))
{
/* TODO: implement */
/* ignore for now */
return gen_Proj_be_AddSP(node);
} else if (is_Cmp(pred)) {
return gen_Proj_Cmp(node);
- } else if (get_irn_op(pred) == op_Start) {
+ } else if (is_Start(pred)) {
if (proj == pn_Start_X_initial_exec) {
ir_node *block = get_nodes_block(pred);
ir_node *jump;
v = (v << 8) | get_tarval_sub_bits(tv, 1);
v = (v << 8) | get_tarval_sub_bits(tv, 0);
*resL = new_Const_long(mode_Is, v);
- }
- else if (get_irn_op(skip_Proj(arg)) == op_Load) {
+ } else if (is_Load(skip_Proj(arg))) {
/* FIXME: handling of low/high depends on LE/BE here */
assert(0);
}
v = (v << 8) | get_tarval_sub_bits(tv, 1);
v = (v << 8) | get_tarval_sub_bits(tv, 0);
return new_Const_long(mode_Is, v);
- }
- else if (get_irn_op(skip_Proj(arg)) == op_Load) {
+ } else if (is_Load(skip_Proj(arg))) {
ir_node *load;
load = skip_Proj(arg);
/* if irn is an End we have keep-alives and op might be a block, skip that */
if (is_Block(op)) {
- assert(get_irn_op(irn) == op_End);
+ assert(is_End(irn));
continue;
}
for (n = get_irn_arity(irn)-1; n>=0 && remat; --n) {
ir_node *op = get_irn_n(irn, n);
- remat &= has_reg_class(si, op) || arch_irn_get_flags(arch_env, op) & arch_irn_flags_ignore || (get_irn_op(op) == op_NoMem);
+ remat &= has_reg_class(si, op) || arch_irn_get_flags(arch_env, op) & arch_irn_flags_ignore || is_NoMem(op);
// if(!remat)
// ir_fprintf(stderr, " Argument %d (%+F) of Node %+F has wrong regclass\n", i, op, irn);
static void collect_copyb_nodes(ir_node *node, void *env) {
wenv_t *wenv = env;
- if (get_irn_op(node) == op_CopyB) {
+ if (is_CopyB(node)) {
set_irn_link(node, wenv->list);
wenv->list = node;
}
static void emit_Proj(const ir_node *irn) {
ir_node *pred = get_Proj_pred(irn);
- if (get_irn_op(pred) == op_Start) {
+ if (is_Start(pred)) {
if (get_Proj_proj(irn) == pn_Start_X_initial_exec) {
emit_Jmp(irn);
}
ir_node * call;
/* We collected all call nodes in a link list at the end node. */
for (call = get_irn_link(get_irg_end(irg)); call; call = get_irn_link(call)) {
- if (get_irn_op(call) != op_Call) continue;
+ if (!is_Call(call)) continue;
for (j = get_Call_n_callees(call) - 1; j >= 0; --j) {
ir_entity * ent = get_Call_callee(call, j);
if (get_entity_irg(ent)) {
* (auch bei Proj->Call Operationen) und Phi-Operationen in die Liste ihres
* Grundblocks einfügen. */
static void collect_phicallproj_walker(ir_node * node, ir_node ** call_tail) {
- if (get_irn_op(node) == op_Call) {
+ if (is_Call(node)) {
/* Die Liste von Call an call_tail anhängen. */
ir_node * link;
assert(get_irn_link(*call_tail) == NULL);
* dass oben für "verschiedene" Proj-Operationen wegen CSE nur eine
* Filter-Operation erzeugt worden sein kann. */
for (link = get_irg_start(irg), proj = get_irn_link(link); proj; proj = get_irn_link(proj)) {
- if (get_irn_op(proj) == op_Id) { /* replaced with filter */
+ if (is_Id(proj)) { /* replaced with filter */
ir_node * filter = get_Id_pred(proj);
- assert(get_irn_op(filter) == op_Filter);
+ assert(is_Filter(filter));
if (filter != link && get_irn_link(filter) == NULL) {
set_irn_link(link, filter);
link = filter;
if (data->open) {
set_Block_cg_cfgpred(start_block, 0, get_cg_Unknown(mode_X));
for (proj = get_irn_link(get_irg_start(irg)); proj; proj = get_irn_link(proj)) {
- if (get_irn_op(proj) == op_Filter) {
+ if (is_Filter(proj)) {
set_Filter_cg_pred(proj, 0, get_cg_Unknown(get_irn_mode(proj)));
}
}
/* Move projs of this node. */
proj = get_irn_link(node);
for (; proj; proj = skip_Id(get_irn_link(proj))) {
- if (get_irn_op(proj) != op_Proj && get_irn_op(proj) != op_Filter) continue;
+ if (get_irn_op(proj) != op_Proj && !is_Filter(proj)) continue;
if ((get_nodes_block(proj) == from_block) && (skip_Proj(get_irn_n(proj, 0)) == node))
set_nodes_block(proj, to_block);
}
set_Block_cg_cfgpred(get_nodes_block(start), data->count, exec);
for (filter = get_irn_link(start); filter; filter = get_irn_link(filter)) {
- if (get_irn_op(filter) != op_Filter) continue;
+ if (!is_Filter(filter)) continue;
if (get_Proj_pred(filter) == start) {
switch ((int) get_Proj_proj(filter)) {
case pn_Start_M:
* Aufrufstelle nur ein einziges Mal aufgerufen. */
ir_node * proj;
for (proj = get_irn_link(call); proj && get_irn_op(proj) == op_Proj; proj = get_irn_link(proj)) {
- if (get_Proj_proj(proj) == 1 && get_irn_op(get_Proj_pred(proj)) == op_Call) {
+ if (get_Proj_proj(proj) == 1 && is_Call(get_Proj_pred(proj))) {
return proj;
}
}
current_ir_graph = get_irp_irg(i);
for (node = get_irn_link(get_irg_end(current_ir_graph)); node; node = get_irn_link(node)) {
- if (get_irn_op(node) == op_Call) {
+ if (is_Call(node)) {
int j, n_callees = get_Call_n_callees(node);
for (j = 0; j < n_callees; ++j)
if (get_entity_irg(get_Call_callee(node, j)))
static void destruct_walker(ir_node * node, void * env)
{
(void) env;
- if (get_irn_op(node) == op_Block) {
+ if (is_Block(node)) {
remove_Block_cg_cfgpred_arr(node);
/* Do not turn Break into Jmp. Better: merge blocks right away.
Well, but there are Breaks left.
if (get_irn_op(pred) == op_Break)
exchange(node, get_nodes_block(pred));
}
- } else if (get_irn_op(node) == op_Filter) {
+ } else if (is_Filter(node)) {
set_irg_current_block(current_ir_graph, get_nodes_block(node));
exchange(node, new_Proj(get_Filter_pred(node), get_irn_mode(node), get_Filter_proj(node)));
} else if (get_irn_op(node) == op_Break) {
set_irg_current_block(current_ir_graph, get_nodes_block(node));
exchange(node, new_Jmp());
- } else if (get_irn_op(node) == op_Call) {
+ } else if (is_Call(node)) {
remove_Call_callee_arr(node);
} else if (get_irn_op(node) == op_Proj) {
/* some ProjX end up in strange blocks. */
res->attr.phi.u.backedge = new_backedge_arr(irg->obst, arity);
for (i = arity - 1; i >= 0; --i)
- if (get_irn_op(in[i]) == op_Unknown) {
+ if (is_Unknown(in[i])) {
has_unknown = 1;
break;
}
}
for (i = arity-1; i >= 0; i--)
- if (get_irn_op(in[i]) == op_Unknown) {
+ if (is_Unknown(in[i])) {
has_unknown = 1;
break;
}
finished yet. */
opt = get_opt_optimize(); set_optimize(0);
/* Here we rely on the fact that all frag ops have Memory as first result! */
- if (get_irn_op(n) == op_Call)
+ if (is_Call(n)) {
arr[0] = new_Proj(n, mode_M, pn_Call_M_except);
- else if (get_irn_op(n) == op_CopyB)
+ } else if (is_CopyB(n)) {
arr[0] = new_Proj(n, mode_M, pn_CopyB_M_except);
- else {
+ } else {
assert((pn_Quot_M == pn_DivMod_M) &&
(pn_Quot_M == pn_Div_M) &&
(pn_Quot_M == pn_Mod_M) &&
case iro_CallBegin: {
ir_node *addr = get_CallBegin_ptr(n);
ir_entity *ent = NULL;
- if (get_irn_op(addr) == op_Sel)
+ if (is_Sel(addr))
ent = get_Sel_entity(addr);
else if (is_Global(addr))
ent = get_Global_entity(addr);
fprintf(F, "}\n");
/* Dump the edges */
for ( i = 0; i < get_Block_n_cfgpreds(block); i++)
- if (get_irn_op(skip_Proj(get_Block_cfgpred(block, i))) != op_Bad) {
+ if (!is_Bad(skip_Proj(get_Block_cfgpred(block, i)))) {
pred = get_nodes_block(skip_Proj(get_Block_cfgpred(block, i)));
fprintf(F, "edge: { sourcename: \"");
PRINT_NODEID(block);
}
/* This is not nice, output it as a marker in the predecessor list. */
- if ((get_irn_op(n) == op_Block) ||
- (get_irn_op(n) == op_Phi) ||
- ((get_irn_op(n) == op_Filter) && get_interprocedural_view())) {
+ if (is_Block(n) ||
+ get_irn_op(n) == op_Phi ||
+ (is_Filter(n) && get_interprocedural_view())) {
fprintf(F, " backedges:");
comma = ' ';
for (i = 0; i < get_irn_arity(n); i++)
ir_node *acc = get_entity_access(ent, i);
int depth = get_weighted_loop_depth(acc);
assert(depth < max_depth);
- if ((get_irn_op(acc) == op_Load) || (get_irn_op(acc) == op_Call)) {
+ if (is_Load(acc) || is_Call(acc)) {
L_freq[depth]++;
max_L_freq = (depth > max_L_freq) ? depth : max_L_freq;
if (addr_is_alloc(acc)) {
LA_freq[depth]++;
max_LA_freq = (depth > max_LA_freq) ? depth : max_LA_freq;
}
- } else if (get_irn_op(acc) == op_Store) {
+ } else if (is_Store(acc)) {
S_freq[depth]++;
max_S_freq = (depth > max_S_freq) ? depth : max_S_freq;
if (addr_is_alloc(acc)) {
ir_node *acc = get_entity_access(ent, i);
int depth = get_weighted_loop_depth(acc);
assert(depth <= max_depth);
- if ((get_irn_op(acc) == op_Load) || (get_irn_op(acc) == op_Call)) {
+ if (is_Load(acc) || is_Call(acc)) {
L_freq[depth]++;
max_L_freq = (depth > max_L_freq) ? depth : max_L_freq;
if (addr_is_alloc(acc)) {
disp[depth]++;
*max_disp = (depth > *max_disp) ? depth : *max_disp;
}
- } else if (get_irn_op(acc) == op_Store) {
+ } else if (is_Store(acc)) {
S_freq[depth]++;
max_S_freq = (depth > max_S_freq) ? depth : max_S_freq;
if (addr_is_alloc(acc)) {
assert(depth <= max_depth);
freq[depth]++;
max_freq = (depth > max_freq) ? depth : max_freq;
- assert(get_irn_op(all) == op_Alloc);
+ assert(is_Alloc(all));
}
fprintf(F, "%s ", get_type_name(tp));
fprintf(F, "[");
- if (get_irn_op(lower) == op_Const)
+ if (is_Const(lower)) {
fprintf(F, "%ld .. ", get_tarval_long(get_Const_tarval(lower)));
- else {
+ } else {
dump_node_opcode(F, lower);
fprintf(F, " %ld .. ", get_irn_node_nr(lower));
}
- if (get_irn_op(upper) == op_Const)
+ if (is_Const(upper)) {
fprintf(F, "%ld]", get_tarval_long(get_Const_tarval(lower)));
- else {
+ } else {
dump_node_opcode(F, upper);
fprintf(F, " %ld]", get_irn_node_nr(upper));
}
assert(depth <= max_depth);
freq[depth]++;
max_freq = (depth > max_freq) ? depth : max_freq;
- assert(get_irn_op(all) == op_Alloc);
+ assert(is_Alloc(all));
}
if (max_freq >= 0) {
void
(set_irg_end_except)(ir_graph *irg, ir_node *node) {
- assert(get_irn_op(node) == op_EndExcept || get_irn_op(node) == op_End);
+ assert(get_irn_op(node) == op_EndExcept || is_End(node));
_set_irg_end_except(irg, node);
}
if (pre) pre(node, env);
pred = skip_Proj(node);
- if (get_irn_op(pred) == op_CallBegin
- || get_irn_op(pred) == op_EndReg
- || get_irn_op(pred) == op_EndExcept) {
- current_ir_graph = get_irn_irg(pred);
+ if (is_CallBegin(pred) ||
+ get_irn_op(pred) == op_EndReg ||
+ get_irn_op(pred) == op_EndExcept) {
+ current_ir_graph = get_irn_irg(pred);
}
if (is_no_Block(node)) { /* not block */
irg_walk_cg(get_nodes_block(node), visited, irg_set, pre, post, env);
}
- if (get_irn_op(node) == op_Block) { /* block */
+ if (is_Block(node)) { /* block */
for (i = get_irn_arity(node) - 1; i >= 0; --i) {
ir_node * exec = get_irn_n(node, i);
ir_node * pred = skip_Proj(exec);
- if ((get_irn_op(pred) != op_CallBegin
- && get_irn_op(pred) != op_EndReg
- && get_irn_op(pred) != op_EndExcept)
- || pset_new_contains(irg_set, get_irn_irg(pred))) {
- irg_walk_cg(exec, visited, irg_set, pre, post, env);
+ if ((
+ !is_CallBegin(pred) &&
+ get_irn_op(pred) != op_EndReg &&
+ get_irn_op(pred) != op_EndExcept
+ ) || pset_new_contains(irg_set, get_irn_irg(pred))) {
+ irg_walk_cg(exec, visited, irg_set, pre, post, env);
}
}
- } else if (get_irn_op(node) == op_Filter) { /* filter */
+ } else if (is_Filter(node)) { /* filter */
for (i = get_irn_arity(node) - 1; i >= 0; --i) {
ir_node * pred = get_irn_n(node, i);
- if (get_irn_op(pred) == op_Unknown || get_irn_op(pred) == op_Bad) {
+ if (is_Unknown(pred) || is_Bad(pred)) {
irg_walk_cg(pred, visited, irg_set, pre, post, env);
} else {
ir_node * exec;
exec = skip_Proj(get_Block_cfgpred(get_nodes_block(node), i));
- if (op_Bad == get_irn_op (exec)) {
+ if (is_Bad(exec)) {
continue;
}
- assert(get_irn_op(exec) == op_CallBegin
- || get_irn_op(exec) == op_EndReg
- || get_irn_op(exec) == op_EndExcept);
+ assert(is_CallBegin(exec) ||
+ get_irn_op(exec) == op_EndReg ||
+ get_irn_op(exec) == op_EndExcept);
if (pset_new_contains(irg_set, get_irn_irg(exec))) {
current_ir_graph = get_irn_irg(exec);
irg_walk_cg(pred, visited, irg_set, pre, post, env);
if (get_interprocedural_view()) {
/* Only Filter and Block nodes can have predecessors in other graphs. */
- if (get_irn_op(n) == op_Filter)
+ if (is_Filter(n))
n = get_nodes_block(n);
- if (get_irn_op(n) == op_Block) {
+ if (is_Block(n)) {
ir_node *cfop = skip_Proj(get_Block_cfgpred(n, index));
if (is_ip_cfop(cfop)) {
current_ir_graph = get_irn_irg(cfop);
ir_reserve_resources(irg, IR_RESOURCE_BLOCK_VISITED);
inc_irg_block_visited(irg);
block = is_Block(node) ? node : get_nodes_block(node);
- assert(get_irn_op(block) == op_Block);
+ assert(is_Block(block));
irg_block_walk_2(block, pre, post, env);
/* keepalive: the endless loops ... */
- if (get_irn_op(node) == op_End) {
+ if (is_End(node)) {
int arity = get_irn_arity(node);
for (i = 0; i < arity; i++) {
pred = get_irn_n(node, i);
- if (get_irn_op(pred) == op_Block)
+ if (is_Block(pred))
irg_block_walk_2(pred, pre, post, env);
}
/* Sometimes the blocks died, but are still reachable through Phis.
void set_irn_pinned(ir_node *node, op_pin_state state) {
/* due to optimization an opt may be turned into a Tuple */
- if (get_irn_op(node) == op_Tuple)
+ if (is_Tuple(node))
return;
assert(node && get_op_pinned(get_irn_op(node)) >= op_pin_state_exc_pinned);
node = get_irn_n(node, -1);
if (is_Bad(node)) /* sometimes bad is predecessor of nodes instead of block: in case of optimization */
node = get_irn_n(node, -1);
- assert(get_irn_op(node) == op_Block);
+ assert(is_Block(node));
return node->attr.block.irg;
}
/* returns operand of node if node is a Confirm */
ir_node *skip_Confirm(ir_node *node) {
- if (get_irn_op(node) == op_Confirm)
+ if (is_Confirm(node))
return get_Confirm_value(node);
return node;
}
else if (proj_nr == pn_Cmp_Le || proj_nr == pn_Cmp_Lt) {
if (tv != tarval_bad) {
/* c >= 0 : Abs(a) <= c ==> (unsigned)(a + c) <= 2*c */
- if (get_irn_op(left) == op_Abs) { // TODO something is missing here
+ if (is_Abs(left)) { // TODO something is missing here
}
}
}
* @param ptr the node representing the address
*/
static ir_entity *get_ptr_entity(ir_node *ptr) {
- if (get_irn_op(ptr) == op_Sel) {
+ if (is_Sel(ptr)) {
return get_Sel_entity(ptr);
} else if (is_SymConst_addr_ent(ptr)) {
return get_SymConst_entity(ptr);
/* if we have exception flow, we must have a real Memory input */
if (proj == pn_Call_X_regular)
ASSERT_AND_RET(
- get_irn_op(get_Call_mem(n)) != op_NoMem,
+ !is_NoMem(get_Call_mem(n)),
"Regular Proj from FunctionCall", 0);
else if (proj == pn_Call_X_except)
ASSERT_AND_RET(
- get_irn_op(get_Call_mem(n)) != op_NoMem,
+ !is_NoMem(get_Call_mem(n)),
"Exception Proj from FunctionCall", 0);
else if (proj == pn_Call_M_regular || proj == pn_Call_M_except)
ASSERT_AND_RET(
- (get_irn_op(get_Call_mem(n)) != op_NoMem || 1),
+ (!is_NoMem(get_Call_mem(n)) || 1),
"Memory Proj from FunctionCall", 0);
return 1;
}
/* Phi: BB x dataM^n --> dataM */
for (i = get_irn_arity(n) - 1; i >= 0; --i) {
ir_node *pred = get_irn_n(n, i);
- if (!is_Bad(pred) && (get_irn_op(pred) != op_Unknown)) {
+ if (!is_Bad(pred) && !is_Unknown(pred)) {
ASSERT_AND_RET_DBG(
get_irn_mode(pred) == mymode,
"Phi node", 0,
}
if ((venv->flags & TUPLE) == 0) {
- if (get_irn_op(node) == op_Tuple) {
+ if (is_Tuple(node)) {
venv->res |= TUPLE;
if (get_node_verification_mode() == FIRM_VERIFICATION_REPORT) {
int offset, bit_offset, bits, bf_bits, old_cse;
dbg_info *db;
- if (get_irn_op(sel) != op_Sel)
+ if (!is_Sel(sel))
return;
ent = get_Sel_entity(sel);
dbg_info *db;
/* check bitfield access */
- if (get_irn_op(sel) != op_Sel)
+ if (!is_Sel(sel))
return;
ent = get_Sel_entity(sel);
{
long proj = get_Proj_proj(irn);
ir_node *pred = get_Proj_pred(irn);
- ir_op *op = get_irn_op(pred);
- if ((proj == pn_Load_res) && (op == op_Load))
+ if (proj == pn_Load_res && is_Load(pred))
lower_bitfields_loads(irn, pred);
break;
}
set_Tuple_pred(node, i, new_r_Bad(irg));
if (rt->mem_proj_nr >= 0)
set_Tuple_pred(node, rt->mem_proj_nr, new_r_Proj(irg, bl, call, mode_M, pn_Call_M_regular));
- if (get_irn_op(mem) != op_NoMem) {
+ if (!is_NoMem(mem)) {
/* Exceptions can only be handled with real memory */
if (rt->regular_proj_nr >= 0)
set_Tuple_pred(node, rt->regular_proj_nr, new_r_Proj(irg, bl, call, mode_X, pn_Call_X_regular));
ir_node *ka = get_End_keepalive(end, i);
if (irn_not_visited(ka)) {
- ir_op *op = get_irn_op(ka);
-
- if ((op == op_Block) && !Block_block_visited(ka)) {
+ if (is_Block(ka) && !Block_block_visited(ka)) {
/* irg_block_walk() will increase the block visited flag, but we must visit only
these blocks that are not visited yet, so decrease it first. */
set_irg_block_visited(irg, get_irg_block_visited(irg) - 1);
for (i = 0; i < n; ++i) {
ir_node *idx = get_Sel_index(sel, i);
- if (get_irn_op(idx) != op_Const)
+ if (!is_Const(idx))
return 0;
}
return 1;
for (i = 0; i < n; ++i) {
ir_node *succ = get_irn_out(sel, i);
- if (get_irn_op(succ) == op_Sel)
+ if (is_Sel(succ))
link_all_leave_sels(ent, succ);
-
}
/* if Sel nodes with memory inputs are used, a entity can be
for (i = 0; i < n; ++i) {
ir_node *succ = get_irn_out(irg_frame, i);
- if (get_irn_op(succ) == op_Sel) {
+ if (is_Sel(succ)) {
ir_entity *ent = get_Sel_entity(succ);
set_entity_link(ent, NULL);
}
for (i = 0; i < n; ++i) {
ir_node *succ = get_irn_out(irg_frame, i);
- if (get_irn_op(succ) == op_Sel) {
+ if (is_Sel(succ)) {
ir_entity *ent = get_Sel_entity(succ);
ir_type *ent_type;
for(i = get_irn_n_outs(sel) - 1; i >= 0; i--) {
succ = get_irn_out(sel, i);
- if(get_irn_op(succ) == op_Sel)
+ if (is_Sel(succ))
return 0;
}
n = get_Sel_n_indexs(sel);
len += n + 1;
- if (get_irn_op(pred) != op_Sel) {
+ if (!is_Sel(pred)) {
/* we found the root */
res = xmalloc(sizeof(*res) + (len - 1) * sizeof(res->path));
for (i = 0; i < n; ++i) {
ir_node *index = get_Sel_index(sel, i);
- if(get_irn_op(index) == op_Const)
+ if (is_Const(index))
res->path[pos++].tv = get_Const_tarval(index);
}
return res;
/* We must check this, why it is possible to get a Bad node
* form new_r_Sync(), when the node can be optimized.
* In this case we must do nothing.*/
- if(get_irn_op(sync) == op_Sync) {
+ if (is_Sync(sync)) {
val_arr[env->gl_mem_vnum].mem_edge_state = sync;
/* We add this sync node to the sync's fix list.*/
add_sync_to_fixlist(val_arr[env->gl_mem_vnum].mem_edge_state, unk_vnum, env);
/* We must check this, why it is possible to get a Bad node
* form new_r_Sync(), when the node can be optimized.
* In this case we must do nothing.*/
- if(get_irn_op(sync) == op_Sync) {
-
+ if (is_Sync(sync)) {
set_Call_mem(call, sync);
if(ARR_LEN(accessed_leaves_vnum))
/* We add this sync node to the sync's fix list.*/
/* Calls that have a NoMem input do neither read nor write memory.
We can completely ignore them here. */
- if (get_irn_op(get_Call_mem(irn)) == op_NoMem)
+ if (is_NoMem(get_Call_mem(irn)))
return;
/* We save in this set all entities,
sel = get_Call_param(irn, i);
value_sels = NULL;
- if(get_irn_op(sel) == op_Sel) {
+ if (is_Sel(sel)) {
key_sels.sel = sel;
value_sels = set_find(env->set_sels, &key_sels, sizeof(key_sels), HASH_PTR(key_sels.sel));
pred = get_nodes_block(pred);
/* We first repair the global memory edge at the first position of sync predecessors.*/
- if(get_irn_op(get_irn_n(sync, 0)) == op_Unknown) {
+ if (is_Unknown(get_irn_n(sync, 0))) {
inc_irg_block_visited(current_ir_graph);
val = find_vnum_value(pred, env->gl_mem_vnum);
/* We repair the leaves*/
assert(k <= ARR_LEN(l->accessed_vnum) && "The algorythm for sync repair is wron");
- if(get_irn_op(get_irn_n(sync, i)) == op_Unknown) {
+ if (is_Unknown(get_irn_n(sync, i))) {
inc_irg_block_visited(current_ir_graph);
val = find_vnum_value(pred, l->accessed_vnum[k++]);
int i, vnum;
unsigned int acces_type;
ir_node *param, *call_ptr, *blk;
- ir_op *op;
ir_entity *meth_ent;
sels_t key_sels, *value_sels;
call_access_t key_call, *value_call;
env_t *env;
env = ctx;
- if(get_irn_op(irn) != op_Call)
+ if (!is_Call(irn))
return;
/* Calls that have a NoMem input do neither read nor write memory.
We can completely ignore them here. */
- if (get_irn_op(get_Call_mem(irn)) == op_NoMem)
+ if (is_NoMem(get_Call_mem(irn)))
return;
/* We iterate over the parameters of this call nodes.*/
for ( i = get_Call_n_params(irn) - 1; i >= 0; i--) {
param = get_Call_param(irn, i);
- if(get_irn_op(param) == op_Sel) {
+ if (is_Sel(param)) {
/* We have found a parameter with operation sel.*/
key_sels.sel = param;
value_sels = set_find(env->set_sels, &key_sels, sizeof(key_sels), HASH_PTR(key_sels.sel));
/* We have found a call, that have as parameter a sel from our set_sels.*/
call_ptr = get_Call_ptr(irn);
- op = get_irn_op(call_ptr);
- if(op == op_SymConst && get_SymConst_kind(call_ptr) == symconst_addr_ent) {
+ if (is_SymConst(call_ptr) && get_SymConst_kind(call_ptr) == symconst_addr_ent) {
meth_ent = get_SymConst_entity(call_ptr);
/* we get the access type for our sel.*/
acces_type = get_method_param_access(meth_ent, i);
for (i = 0 ; i < get_irn_n_outs(irg_frame); i++) {
ir_node *succ = get_irn_out(irg_frame, i);
- if (get_irn_op(succ) == op_Sel) {
+ if (is_Sel(succ)) {
ir_entity *ent = get_Sel_entity(succ);
if (get_entity_link(ent) == NULL || get_entity_link(ent) == ADDRESS_TAKEN)
static ir_node *is_depend_alloc(ir_node *adr) {
ir_node *alloc;
- if (get_irn_op(adr) != op_Sel)
+ if (!is_Sel(adr))
return NULL;
/* should be a simple Sel */
return NULL;
alloc = skip_Proj(get_Sel_ptr(adr));
- if (get_irn_op(alloc) != op_Alloc)
+ if (!is_Alloc(alloc))
return NULL;
/* hmm, we depend on this Alloc */
mem = get_irn_link(call);
/* beware of calls in the pure call list */
- if (! mem || get_irn_op(mem) == op_Call)
+ if (!mem || is_Call(mem))
continue;
assert(get_irn_mode(mem) == mode_M);
case iro_Call:
/* A call is only tolerable if its either constant or pure. */
ptr = get_Call_ptr(node);
- if (get_irn_op(ptr) == op_SymConst &&
- get_SymConst_kind(ptr) == symconst_addr_ent) {
+ if (is_SymConst(ptr) && get_SymConst_kind(ptr) == symconst_addr_ent) {
ir_entity *ent = get_SymConst_entity(ptr);
ir_graph *irg = get_entity_irg(ent);
static ir_entity *find_constant_entity(ir_node *ptr)
{
for (;;) {
- ir_op *op = get_irn_op(ptr);
-
- if (op == op_SymConst && (get_SymConst_kind(ptr) == symconst_addr_ent)) {
+ if (is_SymConst(ptr) && get_SymConst_kind(ptr) == symconst_addr_ent) {
ir_entity *ent = get_SymConst_entity(ptr);
if (variability_constant == get_entity_variability(ent))
return ent;
return NULL;
- } else if (op == op_Sel) {
+ } else if (is_Sel(ptr)) {
ir_entity *ent = get_Sel_entity(ptr);
ir_type *tp = get_entity_owner(ent);
ir_entity *root, *field;
int path_len, pos;
- if (get_irn_op(ptr) == op_SymConst) {
+ if (is_SymConst(ptr)) {
/* a SymConst. If the depth is 0, this is an access to a global
* entity and we don't need a component path, else we know
* at least it's length.
root = get_SymConst_entity(ptr);
res = (depth == 0) ? NULL : new_compound_graph_path(get_entity_type(root), depth);
} else {
- assert(get_irn_op(ptr) == op_Sel);
+ assert(is_Sel(ptr));
/* it's a Sel, go up until we find the root */
res = rec_get_accessed_path(get_Sel_ptr(ptr), depth+1);
if (rel != ir_no_alias)
break;
pred = skip_Proj(get_Store_mem(pred));
- } else if (get_irn_op(pred) == op_Load) {
+ } else if (is_Load(pred)) {
ir_alias_relation rel = get_alias_relation(
current_ir_graph, get_Load_ptr(pred), get_Load_mode(pred),
ptr, mode);
store = skip_Proj(projM);
old_store = store;
- if (get_irn_op(store) != op_Store)
+ if (!is_Store(store))
return 0;
block = get_nodes_block(store);
}
break;
}
- if (get_irn_op(n) != op_Confirm)
+ if (!is_Confirm(n))
return value_classified_unknown;
tv = value_of(get_Confirm_bound(n));
ir_node *iv, *rc;
ir_node *nleft = NULL, *nright = NULL;
- if (get_irn_op(cmp) != op_Cmp)
+ if (!is_Cmp(cmp))
return;
left = get_Cmp_left(cmp);
*/
ir_type *default_firm_get_Alloc(ir_node *n) {
n = skip_Proj(n);
- if (get_irn_op(n) == op_Alloc) {
+ if (is_Alloc(n)) {
return get_Alloc_type(n);
}
return NULL;
} else {
/* First we set the block our copy if it is not a block.*/
set_nodes_block(irn_copy, get_irn_link(get_nodes_block(irn)));
- if (get_irn_op(irn) == op_End) {
+ if (is_End(irn)) {
/* Handle the keep-alives. This must be done separately, because
the End node was NOT copied */
for (i = 0; i < get_End_n_keepalives(irn); ++i)
/* we know, that a SymConst is here */
ptr = get_Call_ptr(call);
- assert(get_irn_op(ptr) == op_SymConst);
+ assert(is_SymConst(ptr));
callee = get_SymConst_entity(ptr);
if (callee != entry->q.ent) {
for (i = 0; i < n; ++i) {
ir_node *idx = get_Sel_index(sel, i);
- if (get_irn_op(idx) != op_Const)
+ if (!is_Const(idx))
return 0;
}
return 1;
*/
static void topologic_walker(ir_node *node, void *ctx) {
env_t *env = ctx;
- ir_op *op = get_irn_op(node);
ir_node *adr, *block, *mem, *val;
ir_mode *mode;
unsigned vnum;
- if (op == op_Load) {
+ if (is_Load(node)) {
/* a load, check if we can resolve it */
adr = get_Load_ptr(node);
set_Tuple_pred(node, pn_Load_res, val);
set_Tuple_pred(node, pn_Load_X_regular, new_Jmp());
set_Tuple_pred(node, pn_Load_X_except, new_Bad());
- } else if (op == op_Store) {
+ } else if (is_Store(node)) {
DB((dbg, SET_LEVEL_3, " checking %+F for replacement ", node));
/* a Store always can be replaced */
if (op == op_Proj) {
ir_node *start = get_Proj_pred(pred);
- if (get_irn_op(start) == op_Start) {
+ if (is_Start(start)) {
if (get_Proj_proj(pred) == pn_Start_T_args) {
/* found Proj(ProjT(Start)) */
set_irn_link(node, data->proj_data);
ir_type *tp_cast, *tp_pred, *tp_orig;
int ref_depth = 0;
- if (get_irn_op(pred) != op_Cast) return;
+ if (!is_Cast(pred)) return;
orig = get_Cast_op(pred);
tp_cast = get_Cast_type(cast);
sel_ent = get_Sel_entity(sel);
cast = get_Sel_ptr(sel);
- while (get_irn_op(cast) == op_Cast) {
+ while (is_Cast(cast)) {
cast_tp = get_Cast_type(cast);
ptr = get_Cast_op(cast);
orig_tp = get_irn_typeinfo_type(ptr);
if (n_preds == 0) return;
pred[0] = get_Phi_pred(phi, 0);
- if (get_irn_op(pred[0]) != op_Cast) return;
+ if (!is_Cast(pred[0])) return;
if (!is_Cast_upcast(pred[0])) return;
pred[0] = get_Cast_op(pred[0]);
for (i = 1; i < n_preds; ++i) {
pred[i] = get_Phi_pred(phi, i);
- if (get_irn_op(pred[i]) != op_Cast) return;
+ if (!is_Cast(pred[i])) return;
if (get_irn_typeinfo_type(get_Cast_op(pred[i])) != fromtype) return;
pred[i] = get_Cast_op(pred[i]);
}
return 0;
node = get_Proj_pred(node);
- return get_irn_op(node) == op_Start;
+ return is_Start(node);
} /* is_arg */
/**
} /* if */
/* if this option is set, Loads are always leaves */
- if (dag_env->options & FIRMSTAT_LOAD_IS_LEAVE && get_irn_op(node) == op_Load)
+ if (dag_env->options & FIRMSTAT_LOAD_IS_LEAVE && is_Load(node))
return;
- if (dag_env->options & FIRMSTAT_CALL_IS_LEAVE && get_irn_op(node) == op_Call)
+ if (dag_env->options & FIRMSTAT_CALL_IS_LEAVE && is_Call(node))
return;
entry = get_irn_dag_entry(node);
} /* if */
break;
case iro_Sel:
- if (get_irn_op(get_Sel_ptr(node)) == op_Sel) {
+ if (is_Sel(get_Sel_ptr(node))) {
/* special case, a Sel of a Sel, count on extra counter */
op = status->op_SelSel ? status->op_SelSel : op;
- if (get_irn_op(get_Sel_ptr(get_Sel_ptr(node))) == op_Sel) {
+ if (is_Sel(get_Sel_ptr(get_Sel_ptr(node)))) {
/* special case, a Sel of a Sel of a Sel, count on extra counter */
op = status->op_SelSelSel ? status->op_SelSelSel : op;
} /* if */
cnt_inc(&eb_entry->cnt[bcnt_nodes]);
/* don't count keep-alive edges */
- if (get_irn_op(node) == op_End)
+ if (is_End(node))
return;
arity = get_irn_arity(node);
/* found a call, this function is not a leaf */
graph->is_leaf = 0;
- if (get_irn_op(ptr) == op_SymConst) {
+ if (is_SymConst(ptr)) {
if (get_SymConst_kind(ptr) == symconst_addr_ent) {
/* ok, we seems to know the entity */
ent = get_SymConst_entity(ptr);
if (is_Bad(block))
return;
- if (get_irn_op(ptr) == op_SymConst) {
+ if (is_SymConst(ptr)) {
if (get_SymConst_kind(ptr) == symconst_addr_ent) {
/* ok, we seems to know the entity */
ent = get_SymConst_entity(ptr);
ir_type *fromtype, *totype;
int ref_depth = 0;
- if (get_irn_op(n) != op_Cast) return;
+ if (!is_Cast(n)) return;
fromtype = get_irn_typeinfo_type(get_Cast_op(n));
totype = get_Cast_type(n);
int has_array_lower_bound(const ir_type *array, int dimension) {
assert(array && (array->type_op == type_array));
- return (get_irn_op(array->attr.aa.lower_bound[dimension]) != op_Unknown);
+ return !is_Unknown(array->attr.aa.lower_bound[dimension]);
}
ir_node *get_array_lower_bound(const ir_type *array, int dimension) {
ir_node *node;
assert(array && (array->type_op == type_array));
node = array->attr.aa.lower_bound[dimension];
- assert(get_irn_op(node) == op_Const);
+ assert(is_Const(node));
return get_tarval_long(get_Const_tarval(node));
}
int has_array_upper_bound(const ir_type *array, int dimension) {
assert(array && (array->type_op == type_array));
- return get_irn_op(array->attr.aa.upper_bound[dimension]) != op_Unknown;
+ return !is_Unknown(array->attr.aa.upper_bound[dimension]);
}
ir_node *get_array_upper_bound(const ir_type *array, int dimension) {
ir_node *node;
assert(array && (array->type_op == type_array));
node = array->attr.aa.upper_bound[dimension];
- assert(get_irn_op(node) == op_Const);
+ assert(is_Const(node));
return get_tarval_long(get_Const_tarval(node));
}