}
/*
- * Calculates the negated pnc condition.
+ * Calculates the negated (Complement(R)) pnc condition.
*/
-int
-get_negated_pnc(int pnc) {
- switch (pnc) {
- case pn_Cmp_False: return pn_Cmp_True;
- case pn_Cmp_Eq: return pn_Cmp_Ne;
- case pn_Cmp_Lt: return pn_Cmp_Uge;
- case pn_Cmp_Le: return pn_Cmp_Ug;
- case pn_Cmp_Gt: return pn_Cmp_Ule;
- case pn_Cmp_Ge: return pn_Cmp_Ul;
- case pn_Cmp_Lg: return pn_Cmp_Ue;
- case pn_Cmp_Leg: return pn_Cmp_Uo;
- case pn_Cmp_Uo: return pn_Cmp_Leg;
- case pn_Cmp_Ue: return pn_Cmp_Lg;
- case pn_Cmp_Ul: return pn_Cmp_Ge;
- case pn_Cmp_Ule: return pn_Cmp_Gt;
- case pn_Cmp_Ug: return pn_Cmp_Le;
- case pn_Cmp_Uge: return pn_Cmp_Lt;
- case pn_Cmp_Ne: return pn_Cmp_Eq;
- case pn_Cmp_True: return pn_Cmp_False;
- }
- return 99; /* to shut up gcc */
+int get_negated_pnc(int pnc, ir_mode *mode) {
+ pnc ^= pn_Cmp_True;
+
+ /* do NOT add the Uo bit for non-floating point values */
+ if (! mode_is_float(mode))
+ pnc &= ~pn_Cmp_Uo;
+
+ return pnc;
}
-/* Calculates the swapped pnc condition, i.e., "<" --> ">" */
+/* Calculates the inversed (R^-1) pnc condition, i.e., "<" --> ">" */
int
-get_swapped_pnc(int pnc) {
+get_inversed_pnc(int pnc) {
int code = pnc & ~(pn_Cmp_Lt|pn_Cmp_Gt);
int lesser = pnc & pn_Cmp_Lt;
int greater = pnc & pn_Cmp_Gt;
int not_a_block = is_no_Block(res);
INIT_LIST_HEAD(&res->edge_info.outs_head);
+ if(!not_a_block)
+ INIT_LIST_HEAD(&res->attr.block.succ_head);
+
for (i = 0, n = arity + not_a_block; i < n; ++i)
edges_notify_edge(res, i - not_a_block, res->in[i], NULL, irg);
void
set_irn_in (ir_node *node, int arity, ir_node **in) {
+ int i;
ir_node *** arr;
assert(node);
if (get_interprocedural_view()) { /* handle Filter and Block specially */
(*arr)[0] = block;
}
fix_backedges(current_ir_graph->obst, node);
+
+ for (i = 0; i < arity; i++) {
+ edges_notify_edge(node, i, in[i], (*arr)[i+1], current_ir_graph);
+ }
+
memcpy((*arr) + 1, in, sizeof(ir_node *) * arity);
}
return _get_irn_pinned(node);
}
+op_pin_state
+(is_irn_pinned_in_irg) (const ir_node *node) {
+ return _is_irn_pinned_in_irg(node);
+}
+
void set_irn_pinned(ir_node *node, op_pin_state state) {
/* due to optimization an opt may be turned into a Tuple */
if (get_irn_op(node) == op_Tuple)
return node->attr.except;
}
+void *
+get_irn_generic_attr (ir_node *node) {
+ return &node->attr;
+}
+
/** manipulate fields of individual nodes **/
/* this works for all except Block */
ir_node *
get_nodes_block (const ir_node *node) {
assert (!(node->op == op_Block));
+ assert (is_irn_pinned_in_irg(node) && "block info may be incorrect");
return get_irn_n(node, -1);
}
return (ir_node **)&(get_irn_in(node)[1]);
}
-
int
-get_Block_n_cfgpreds (ir_node *node) {
- assert ((node->op == op_Block));
- return get_irn_arity(node);
+(get_Block_n_cfgpreds)(ir_node *node) {
+ return get_Block_n_cfgpreds(node);
}
ir_node *
-get_Block_cfgpred (ir_node *node, int pos) {
- assert(-1 <= pos && pos < get_irn_arity(node));
- assert(node->op == op_Block);
- return get_irn_n(node, pos);
+(get_Block_cfgpred)(ir_node *node, int pos) {
+ return get_Block_cfgpred(node, pos);
}
void
set_irn_n(node, pos, pred);
}
+ir_node *
+(get_Block_cfgpred_block)(ir_node *node, int pos) {
+ return _get_Block_cfgpred_block(node, pos);
+}
+
bool
get_Block_matured (ir_node *node) {
assert (node->op == op_Block);
node->attr.block.in_cg[0] = NULL;
node->attr.block.cg_backedge = new_backedge_arr(current_ir_graph->obst, arity);
{
- /* Fix backedge array. fix_backedges operates depending on
+ /* Fix backedge array. fix_backedges() operates depending on
interprocedural_view. */
int ipv = get_interprocedural_view();
set_interprocedural_view(true);
return _is_Block_dead(block);
}
+ir_extblk *get_Block_extbb(const ir_node *block) {
+ assert(is_Block(block));
+ return block->attr.block.extblk;
+}
+
+void set_Block_extbb(ir_node *block, ir_extblk *extblk) {
+ assert(is_Block(block));
+ block->attr.block.extblk = extblk;
+}
+
void
set_Start_irg(ir_node *node, ir_graph *irg) {
assert(node->op == op_Start);
in array afterwards ... */
}
+/* Return the target address of an IJmp */
+ir_node *get_IJmp_target(ir_node *ijmp) {
+ assert(ijmp->op == op_IJmp);
+ return get_irn_n(ijmp, 0);
+}
+
+/** Sets the target address of an IJmp */
+void set_IJmp_target(ir_node *ijmp, ir_node *tgt) {
+ assert(ijmp->op == op_IJmp);
+ set_irn_n(ijmp, 0, tgt);
+}
/*
> Implementing the case construct (which is where the constant Proj node is
}
ir_node *
-get_Proj_pred (ir_node *node) {
+get_Proj_pred (const ir_node *node) {
assert (is_Proj(node));
return get_irn_n(node, 0);
}
}
long
-get_Proj_proj (ir_node *node) {
+get_Proj_proj (const ir_node *node) {
assert (is_Proj(node));
if (get_irn_opcode(node) == iro_Proj) {
return node->attr.proj;
node->in[3] = ir_true;
}
+/* CopyB support */
+ir_node *get_CopyB_mem (ir_node *node) {
+ assert (node->op == op_CopyB);
+ return get_irn_n(node, 0);
+}
+
+void set_CopyB_mem (ir_node *node, ir_node *mem) {
+ assert (node->op == op_CopyB);
+ set_irn_n(node, 0, mem);
+}
+
+ir_node *get_CopyB_dst (ir_node *node) {
+ assert (node->op == op_CopyB);
+ return get_irn_n(node, 1);
+}
+
+void set_CopyB_dst (ir_node *node, ir_node *dst) {
+ assert (node->op == op_CopyB);
+ set_irn_n(node, 1, dst);
+}
+
+ir_node *get_CopyB_src (ir_node *node) {
+ assert (node->op == op_CopyB);
+ return get_irn_n(node, 2);
+}
+
+void set_CopyB_src (ir_node *node, ir_node *src) {
+ assert (node->op == op_CopyB);
+ set_irn_n(node, 2, src);
+}
+
+type *get_CopyB_type(ir_node *node) {
+ assert (node->op == op_CopyB);
+ return node->attr.copyb.data_type;
+}
+
+void set_CopyB_type(ir_node *node, type *data_type) {
+ assert (node->op == op_CopyB && data_type);
+ node->attr.copyb.data_type = data_type;
+}
+
ir_graph *
get_irn_irg(const ir_node *node) {
+ /*
+ * Do not use get_nodes_Block() here, because this
+ * will check the pinned state.
+ * However even a 'wrong' block is always in the proper
+ * irg.
+ */
if (! is_Block(node))
- node = get_nodes_block(node);
+ node = get_irn_n(node, -1);
if (is_Bad(node)) /* sometimes bad is predecessor of nodes instead of block: in case of optimization */
- node = get_nodes_block(node);
+ node = get_irn_n(node, -1);
assert(get_irn_op(node) == op_Block);
return node->attr.block.irg;
}
ir_node *
skip_Tuple (ir_node *node) {
ir_node *pred;
+ ir_op *op;
if (!get_opt_normalize()) return node;
+restart:
node = skip_Id(node);
if (get_irn_op(node) == op_Proj) {
pred = skip_Id(get_Proj_pred(node));
- if (get_irn_op(pred) == op_Proj) /* nested Tuple ? */
+ op = get_irn_op(pred);
+
+ /*
+ * Looks strange but calls get_irn_op() only once
+ * in most often cases.
+ */
+ if (op == op_Proj) { /* nested Tuple ? */
pred = skip_Id(skip_Tuple(pred));
- if (get_irn_op(pred) == op_Tuple)
- return get_Tuple_pred(pred, get_Proj_proj(node));
+ op = get_irn_op(pred);
+
+ if (op == op_Tuple) {
+ node = get_Tuple_pred(pred, get_Proj_proj(node));
+ goto restart;
+ }
+ }
+ else if (op == op_Tuple) {
+ node = get_Tuple_pred(pred, get_Proj_proj(node));
+ goto restart;
+ }
}
return node;
}
-/** returns operand of node if node is a Cast */
+/* returns operand of node if node is a Cast */
ir_node *skip_Cast (ir_node *node) {
- if (node && get_irn_op(node) == op_Cast) {
- return skip_Id(get_irn_n(node, 0));
- } else {
- return node;
- }
+ if (node && get_irn_op(node) == op_Cast)
+ return get_Cast_op(node);
+ return node;
+}
+
+/* returns operand of node if node is a Confirm */
+ir_node *skip_Confirm (ir_node *node) {
+ if (node && get_irn_op(node) == op_Confirm)
+ return get_Confirm_value(node);
+ return node;
+}
+
+/* skip all high-level ops */
+ir_node *skip_HighLevel(ir_node *node) {
+ if (node && is_op_highlevel(get_irn_op(node)))
+ return get_irn_n(node, 0);
+ return node;
}
#if 0
}
/* Returns true if the operation is a forking control flow operation. */
-int
-is_forking_op(const ir_node *node) {
- return is_op_forking(get_irn_op(node));
+int (is_irn_forking)(const ir_node *node) {
+ return _is_irn_forking(node);
}
type *(get_irn_type)(ir_node *node) {
return _get_irn_type(node);
}
+/* Returns non-zero for constant-like nodes. */
+int (is_irn_constlike)(const ir_node *node) {
+ return _is_irn_constlike(node);
+}
+
+/* Gets the string representation of the jump prediction .*/
+const char *get_cond_jmp_predicate_name(cond_jmp_predicate pred)
+{
+ switch (pred) {
+ default:
+ case COND_JMP_PRED_NONE: return "no prediction";
+ case COND_JMP_PRED_TRUE: return "true taken";
+ case COND_JMP_PRED_FALSE: return "false taken";
+ }
+}
+
+/* Returns the conditional jump prediction of a Cond node. */
+cond_jmp_predicate (get_Cond_jmp_pred)(ir_node *cond) {
+ return _get_Cond_jmp_pred(cond);
+}
+
+/* Sets a new conditional jump prediction. */
+void (set_Cond_jmp_pred)(ir_node *cond, cond_jmp_predicate pred) {
+ _set_Cond_jmp_pred(cond, pred);
+}
+
/** the get_type operation must be always implemented */
static type *get_Null_type(ir_node *n) {
return NULL;