ir_node *left = get_Cmp_left(cmp);
ir_node *right = get_Cmp_right(cmp);
ir_node *cond_block;
- ir_op *op;
/* Beware of Bads */
if (is_Bad(left) || is_Bad(right))
return;
- op = get_irn_op(left);
-
/* Do not create Confirm nodes for Cmp(Const, Const) constructs.
These are removed anyway */
- if (op == op_Const && is_Const(right))
+ if (is_Const(left) && is_Const(right))
return;
/* try to place the constant on the right side for a Confirm */
- if (op == op_Const || op == op_SymConst) {
+ if (is_Const(left) || is_SymConst(left)) {
ir_node *t = left;
left = right;
means not reachable.
I.e., with this code, the order on the loop tree is correct. But a
(single) test showed the loop tree is deeper. */
- if (get_irn_op(n) == op_Phi ||
- is_Block(n) ||
+ if (is_Phi(n) ||
+ is_Block(n) ||
(get_irg_pinned(get_irn_irg(n)) == op_pin_state_floats &&
get_irn_pinned(n) == op_pin_state_floats))
// Here we could test for backedge at -1 which is illegal
*/
static inline int is_possible_loop_head(ir_node *n)
{
- ir_op *op = get_irn_op(n);
- return ((op == op_Block) ||
- (op == op_Phi));
+ return is_Block(n) || is_Phi(n);
}
/**
void copy_irn_to_irg(ir_node *n, ir_graph *irg)
{
- ir_op *op = get_irn_op(n);
ir_graph *old_irg;
ir_node *nn = NULL;
/* do not copy standard nodes */
- if (op == op_NoMem)
+ switch (get_irn_opcode(n)) {
+ case iro_NoMem:
n = get_irg_no_mem(irg);
- else if (op == op_Block) {
- old_irg = get_irn_irg(n);
+ break;
+ case iro_Block:
+ old_irg = get_irn_irg(n);
if (n == get_irg_start_block(old_irg))
nn = get_irg_start_block(irg);
else if (n == get_irg_end_block(old_irg))
nn = get_irg_end_block(irg);
- }
- else if (op == op_Start)
+ break;
+
+ case iro_Start:
nn = get_irg_start(irg);
- else if (op == op_End)
+ break;
+
+ case iro_End:
nn = get_irg_end(irg);
- else if (op == op_Proj) {
- old_irg = get_irn_irg(n);
+ break;
+ case iro_Proj:
+ old_irg = get_irn_irg(n);
if (n == get_irg_initial_exec(old_irg))
nn = get_irg_initial_exec(irg);
else if (n == get_irg_frame(old_irg))
nn = get_irg_initial_mem(irg);
else if (n == get_irg_args(old_irg))
nn = get_irg_args(irg);
+ break;
}
if (nn) {
nn = new_ir_node(get_irn_dbg_info(n),
irg,
NULL, /* no block yet, will be set later */
- op,
+ get_irn_op(n),
get_irn_mode(n),
get_irn_arity(n),
get_irn_in(n) + 1);
}
/* This is not nice, output it as a marker in the predecessor list. */
- if (is_Block(n) || get_irn_op(n) == op_Phi) {
+ if (is_Block(n) || is_Phi(n)) {
int i;
fprintf(F, " backedges:");
comma = ' ';
pre(node, env);
- if (node->op != op_Block) {
+ if (!is_Block(node)) {
ir_node *pred = get_nodes_block(node);
if (pred->visited < irg->visited)
cnt += irg_walk_2_pre(pred, pre, env);
set_irn_visited(node, irg->visited);
- if (node->op != op_Block) {
+ if (!is_Block(node)) {
ir_node *pred = get_nodes_block(node);
if (pred->visited < irg->visited)
cnt += irg_walk_2_post(pred, post, env);
pre(node, env);
- if (node->op != op_Block) {
+ if (!is_Block(node)) {
ir_node *pred = get_nodes_block(node);
if (pred->visited < irg->visited)
cnt += irg_walk_2_both(pred, pre, post, env);
pre(node, env);
- if (node->op != op_Block) {
+ if (!is_Block(node)) {
ir_node *pred = get_nodes_block(node);
if (pred->visited < irg->visited)
cnt += irg_walk_in_or_dep_2_pre(pred, pre, env);
set_irn_visited(node, irg->visited);
- if (node->op != op_Block) {
+ if (!is_Block(node)) {
ir_node *pred = get_nodes_block(node);
if (pred->visited < irg->visited)
cnt += irg_walk_in_or_dep_2_post(pred, post, env);
pre(node, env);
- if (node->op != op_Block) {
+ if (!is_Block(node)) {
ir_node *pred = get_nodes_block(node);
if (pred->visited < irg->visited)
cnt += irg_walk_in_or_dep_2_both(pred, pre, post, env);
void set_nodes_block(ir_node *node, ir_node *block)
{
- assert(node->op != op_Block);
+ assert(!is_Block(node));
set_irn_n(node, -1, block);
}
assert(-1 <= n && n < get_irn_arity_(node));
nn = node->in[n + 1];
- if (nn->op != op_Id) return nn;
+ if (!is_Id(nn)) return nn;
return (node->in[n + 1] = skip_Id(nn));
}
static inline ir_tarval *get_Const_tarval_(const ir_node *node)
{
- assert(get_irn_op_(node) == op_Const);
+ assert(is_Const(node));
return node->attr.con.tarval;
}
static inline cond_jmp_predicate get_Cond_jmp_pred_(const ir_node *node)
{
- assert(get_irn_op_(node) == op_Cond);
+ assert(is_Cond(node));
return node->attr.cond.jmp_pred;
}
static inline void set_Cond_jmp_pred_(ir_node *node, cond_jmp_predicate pred)
{
- assert(get_irn_op_(node) == op_Cond);
+ assert(is_Cond(node));
node->attr.cond.jmp_pred = pred;
}
*/
static int is_constant_expr(ir_node *irn)
{
- ir_op *op;
-
switch (get_irn_opcode(irn)) {
case iro_Const:
case iro_SymConst:
return 1;
- case iro_Add:
- op = get_irn_op(get_Add_left(irn));
- if (op != op_Const && op != op_SymConst)
+
+ case iro_Add: {
+ ir_node *const l = get_Add_left(irn);
+ if (!is_Const(l) && !is_SymConst(l))
return 0;
- op = get_irn_op(get_Add_right(irn));
- if (op != op_Const && op != op_SymConst)
+ ir_node *const r = get_Add_right(irn);
+ if (!is_Const(r) && !is_SymConst(r))
return 0;
return 1;
+ }
+
default:
return 0;
}
/* do not count Bad / NoMem */
if (l) {
- ir_op *op = get_irn_op(l);
-
- if (op == op_NoMem || op == op_Bad)
+ if (is_NoMem(l) || is_Bad(l))
return DEFAULT_RET;
/* check for additional options */
- op = get_irn_op(n);
-
- if (mark_options & FIRMSTAT_LOAD_IS_LEAVE && op == op_Load)
+ if (mark_options & FIRMSTAT_LOAD_IS_LEAVE && is_Load(n))
return DEFAULT_RET;
- if (mark_options & FIRMSTAT_CALL_IS_LEAVE && op == op_Call)
+ if (mark_options & FIRMSTAT_CALL_IS_LEAVE && is_Call(n))
return DEFAULT_RET;
} /* if */
*/
static void undate_block_info(ir_node *node, graph_entry_t *graph)
{
- ir_op *op = get_irn_op(node);
ir_node *block;
block_entry_t *b_entry;
int i, arity;
/* check for block */
- if (op == op_Block) {
+ if (is_Block(node)) {
arity = get_irn_arity(node);
b_entry = block_get_entry(&graph->recalc_cnts, get_irn_node_nr(node), graph->block_hash);
/* mark start end block to allow to filter them out */
block = get_nodes_block(node);
b_entry = block_get_entry(&graph->recalc_cnts, get_irn_node_nr(block), graph->block_hash);
- if (op == op_Phi && mode_is_datab(get_irn_mode(node))) {
+ if (is_Phi(node) && mode_is_datab(get_irn_mode(node))) {
/* count data Phi per block */
cnt_inc(&b_entry->cnt[bcnt_phi_data]);
} /* if */
/* sometimes we did not detect, that it is replaced by a Const */
if (opt == HOOK_OPT_CONFIRM && new_num_entries == 1) {
- ir_op *op = get_irn_op(new_node_array[0]);
-
- if (op == op_Const || op == op_SymConst)
+ ir_node *const irn = new_node_array[0];
+ if (is_Const(irn) || is_SymConst(irn))
xopt = HOOK_OPT_CONFIRM_C;
} /* if */