return pnc_name_arr[pnc];
}
-/**
- * Calculates the negated pnc condition.
+/*
+ * Calculates the negated (Complement(R)) pnc condition.
*/
+int get_negated_pnc(int pnc, ir_mode *mode) {
+ pnc ^= pn_Cmp_True;
+
+ /* do NOT add the Uo bit for non-floating point values */
+ if (! mode_is_float(mode))
+ pnc &= ~pn_Cmp_Uo;
+
+ return pnc;
+}
+
+/* Calculates the inversed (R^-1) pnc condition, i.e., "<" --> ">" */
int
-get_negated_pnc(int pnc) {
- switch (pnc) {
- case pn_Cmp_False: return pn_Cmp_True; break;
- case pn_Cmp_Eq: return pn_Cmp_Ne; break;
- case pn_Cmp_Lt: return pn_Cmp_Uge; break;
- case pn_Cmp_Le: return pn_Cmp_Ug; break;
- case pn_Cmp_Gt: return pn_Cmp_Ule; break;
- case pn_Cmp_Ge: return pn_Cmp_Ul; break;
- case pn_Cmp_Lg: return pn_Cmp_Ue; break;
- case pn_Cmp_Leg: return pn_Cmp_Uo; break;
- case pn_Cmp_Uo: return pn_Cmp_Leg; break;
- case pn_Cmp_Ue: return pn_Cmp_Lg; break;
- case pn_Cmp_Ul: return pn_Cmp_Ge; break;
- case pn_Cmp_Ule: return pn_Cmp_Gt; break;
- case pn_Cmp_Ug: return pn_Cmp_Le; break;
- case pn_Cmp_Uge: return pn_Cmp_Lt; break;
- case pn_Cmp_Ne: return pn_Cmp_Eq; break;
- case pn_Cmp_True: return pn_Cmp_False; break;
- }
- return 99; /* to shut up gcc */
+get_inversed_pnc(int pnc) {
+ int code = pnc & ~(pn_Cmp_Lt|pn_Cmp_Gt);
+ int lesser = pnc & pn_Cmp_Lt;
+ int greater = pnc & pn_Cmp_Gt;
+
+ code |= (lesser ? pn_Cmp_Gt : 0) | (greater ? pn_Cmp_Lt : 0);
+
+ return code;
}
const char *pns_name_arr [] = {
int not_a_block = is_no_Block(res);
INIT_LIST_HEAD(&res->edge_info.outs_head);
+ if(!not_a_block)
+ INIT_LIST_HEAD(&res->attr.block.succ_head);
+
for (i = 0, n = arity + not_a_block; i < n; ++i)
edges_notify_edge(res, i - not_a_block, res->in[i], NULL, irg);
void
set_irn_in (ir_node *node, int arity, ir_node **in) {
+ int i;
ir_node *** arr;
assert(node);
if (get_interprocedural_view()) { /* handle Filter and Block specially */
} else {
arr = &node->in;
}
- if (arity != ARR_LEN(*arr) - 1) {
+
+ for (i = 0; i < arity; i++) {
+ if (i < ARR_LEN(*arr)-1)
+ edges_notify_edge(node, i, in[i], (*arr)[i+1], current_ir_graph);
+ else
+ edges_notify_edge(node, i, in[i], NULL, current_ir_graph);
+ }
+ for(;i < ARR_LEN(*arr)-1; i++) {
+ edges_notify_edge(node, i, NULL, (*arr)[i+1], current_ir_graph);
+ }
+
+ if (arity != ARR_LEN(*arr) - 1) {
ir_node * block = (*arr)[0];
*arr = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
(*arr)[0] = block;
}
fix_backedges(current_ir_graph->obst, node);
+
memcpy((*arr) + 1, in, sizeof(ir_node *) * arity);
}
return _get_irn_pinned(node);
}
+op_pin_state
+(is_irn_pinned_in_irg) (const ir_node *node) {
+ return _is_irn_pinned_in_irg(node);
+}
+
void set_irn_pinned(ir_node *node, op_pin_state state) {
/* due to optimization an opt may be turned into a Tuple */
if (get_irn_op(node) == op_Tuple)
return node->attr.except;
}
+void *
+get_irn_generic_attr (ir_node *node) {
+ return &node->attr;
+}
+
/** manipulate fields of individual nodes **/
/* this works for all except Block */
ir_node *
get_nodes_block (const ir_node *node) {
assert (!(node->op == op_Block));
+ assert (is_irn_pinned_in_irg(node) && "block info may be incorrect");
return get_irn_n(node, -1);
}
return (ir_node **)&(get_irn_in(node)[1]);
}
-
int
-get_Block_n_cfgpreds (ir_node *node) {
- assert ((node->op == op_Block));
- return get_irn_arity(node);
+(get_Block_n_cfgpreds)(ir_node *node) {
+ return get_Block_n_cfgpreds(node);
}
ir_node *
-get_Block_cfgpred (ir_node *node, int pos) {
- assert(-1 <= pos && pos < get_irn_arity(node));
- assert(node->op == op_Block);
- return get_irn_n(node, pos);
+(get_Block_cfgpred)(ir_node *node, int pos) {
+ return get_Block_cfgpred(node, pos);
}
void
set_irn_n(node, pos, pred);
}
-bool
+ir_node *
+(get_Block_cfgpred_block)(ir_node *node, int pos) {
+ return _get_Block_cfgpred_block(node, pos);
+}
+
+int
get_Block_matured (ir_node *node) {
assert (node->op == op_Block);
- return node->attr.block.matured;
+ return (int)node->attr.block.matured;
}
void
-set_Block_matured (ir_node *node, bool matured) {
+set_Block_matured (ir_node *node, int matured) {
assert (node->op == op_Block);
node->attr.block.matured = matured;
}
+
unsigned long
-get_Block_block_visited (ir_node *node) {
- assert (node->op == op_Block);
- return node->attr.block.block_visited;
+(get_Block_block_visited)(ir_node *node) {
+ return _get_Block_block_visited(node);
}
void
-set_Block_block_visited (ir_node *node, unsigned long visit) {
- assert (node->op == op_Block);
- node->attr.block.block_visited = visit;
+(set_Block_block_visited)(ir_node *node, unsigned long visit) {
+ _set_Block_block_visited(node, visit);
}
/* For this current_ir_graph must be set. */
void
-mark_Block_block_visited (ir_node *node) {
- assert (node->op == op_Block);
- node->attr.block.block_visited = get_irg_block_visited(current_ir_graph);
+(mark_Block_block_visited)(ir_node *node) {
+ _mark_Block_block_visited(node);
}
int
-Block_not_block_visited(ir_node *node) {
- assert (node->op == op_Block);
- return (node->attr.block.block_visited < get_irg_block_visited(current_ir_graph));
+(Block_not_block_visited)(ir_node *node) {
+ return _Block_not_block_visited(node);
}
ir_node *
node->attr.block.in_cg[0] = NULL;
node->attr.block.cg_backedge = new_backedge_arr(current_ir_graph->obst, arity);
{
- /* Fix backedge array. fix_backedges operates depending on
+ /* Fix backedge array. fix_backedges() operates depending on
interprocedural_view. */
int ipv = get_interprocedural_view();
- set_interprocedural_view(true);
+ set_interprocedural_view(1);
fix_backedges(current_ir_graph->obst, node);
set_interprocedural_view(ipv);
}
return _is_Block_dead(block);
}
+ir_extblk *get_Block_extbb(const ir_node *block) {
+ assert(is_Block(block));
+ return block->attr.block.extblk;
+}
+
+void set_Block_extbb(ir_node *block, ir_extblk *extblk) {
+ assert(is_Block(block));
+ block->attr.block.extblk = extblk;
+}
+
void
set_Start_irg(ir_node *node, ir_graph *irg) {
assert(node->op == op_Start);
in array afterwards ... */
}
+/* Return the target address of an IJmp */
+ir_node *get_IJmp_target(ir_node *ijmp) {
+ assert(ijmp->op == op_IJmp);
+ return get_irn_n(ijmp, 0);
+}
+
+/** Sets the target address of an IJmp */
+void set_IJmp_target(ir_node *ijmp, ir_node *tgt) {
+ assert(ijmp->op == op_IJmp);
+ set_irn_n(ijmp, 0, tgt);
+}
/*
> Implementing the case construct (which is where the constant Proj node is
node->attr.cast.totype = to_tp;
}
+
+/* Checks for upcast.
+ *
+ * Returns true if the Cast node casts a class type to a super type.
+ */
+int is_Cast_upcast(ir_node *node) {
+ type *totype = get_Cast_type(node);
+ type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
+ ir_graph *myirg = get_irn_irg(node);
+
+ assert(get_irg_typeinfo_state(myirg) == ir_typeinfo_consistent);
+ assert(fromtype);
+
+ while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
+ totype = get_pointer_points_to_type(totype);
+ fromtype = get_pointer_points_to_type(fromtype);
+ }
+
+ assert(fromtype);
+
+ if (!is_Class_type(totype)) return 0;
+ return is_subclass_of(fromtype, totype);
+}
+
+/* Checks for downcast.
+ *
+ * Returns true if the Cast node casts a class type to a sub type.
+ */
+int is_Cast_downcast(ir_node *node) {
+ type *totype = get_Cast_type(node);
+ type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
+
+ assert(get_irg_typeinfo_state(get_irn_irg(node)) == ir_typeinfo_consistent);
+ assert(fromtype);
+
+ while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
+ totype = get_pointer_points_to_type(totype);
+ fromtype = get_pointer_points_to_type(fromtype);
+ }
+
+ assert(fromtype);
+
+ if (!is_Class_type(totype)) return 0;
+ return is_subclass_of(totype, fromtype);
+}
+
int
(is_unop)(const ir_node *node) {
return _is_unop(node);
set_irn_n(node, pos, pred);
}
+type *get_Proj_type(ir_node *n)
+{
+ type *tp = NULL;
+ ir_node *pred = get_Proj_pred(n);
+
+ switch (get_irn_opcode(pred)) {
+ case iro_Proj: {
+ ir_node *pred_pred;
+ /* Deal with Start / Call here: we need to know the Proj Nr. */
+ assert(get_irn_mode(pred) == mode_T);
+ pred_pred = get_Proj_pred(pred);
+ if (get_irn_op(pred_pred) == op_Start) {
+ type *mtp = get_entity_type(get_irg_entity(get_irn_irg(pred_pred)));
+ tp = get_method_param_type(mtp, get_Proj_proj(n));
+ } else if (get_irn_op(pred_pred) == op_Call) {
+ type *mtp = get_Call_type(pred_pred);
+ tp = get_method_res_type(mtp, get_Proj_proj(n));
+ }
+ } break;
+ case iro_Start: break;
+ case iro_Call: break;
+ case iro_Load: {
+ ir_node *a = get_Load_ptr(pred);
+ if (get_irn_op(a) == op_Sel)
+ tp = get_entity_type(get_Sel_entity(a));
+ } break;
+ default:
+ break;
+ }
+ return tp;
+}
+
ir_node *
-get_Proj_pred (ir_node *node) {
+get_Proj_pred (const ir_node *node) {
assert (is_Proj(node));
return get_irn_n(node, 0);
}
}
long
-get_Proj_proj (ir_node *node) {
+get_Proj_proj (const ir_node *node) {
assert (is_Proj(node));
if (get_irn_opcode(node) == iro_Proj) {
return node->attr.proj;
node->in[3] = ir_true;
}
+/* CopyB support */
+ir_node *get_CopyB_mem (ir_node *node) {
+ assert (node->op == op_CopyB);
+ return get_irn_n(node, 0);
+}
+
+void set_CopyB_mem (ir_node *node, ir_node *mem) {
+ assert (node->op == op_CopyB);
+ set_irn_n(node, 0, mem);
+}
+
+ir_node *get_CopyB_dst (ir_node *node) {
+ assert (node->op == op_CopyB);
+ return get_irn_n(node, 1);
+}
+
+void set_CopyB_dst (ir_node *node, ir_node *dst) {
+ assert (node->op == op_CopyB);
+ set_irn_n(node, 1, dst);
+}
+
+ir_node *get_CopyB_src (ir_node *node) {
+ assert (node->op == op_CopyB);
+ return get_irn_n(node, 2);
+}
+
+void set_CopyB_src (ir_node *node, ir_node *src) {
+ assert (node->op == op_CopyB);
+ set_irn_n(node, 2, src);
+}
+
+type *get_CopyB_type(ir_node *node) {
+ assert (node->op == op_CopyB);
+ return node->attr.copyb.data_type;
+}
+
+void set_CopyB_type(ir_node *node, type *data_type) {
+ assert (node->op == op_CopyB && data_type);
+ node->attr.copyb.data_type = data_type;
+}
+
ir_graph *
get_irn_irg(const ir_node *node) {
+ /*
+ * Do not use get_nodes_Block() here, because this
+ * will check the pinned state.
+ * However even a 'wrong' block is always in the proper
+ * irg.
+ */
if (! is_Block(node))
- node = get_nodes_block(node);
+ node = get_irn_n(node, -1);
if (is_Bad(node)) /* sometimes bad is predecessor of nodes instead of block: in case of optimization */
- node = get_nodes_block(node);
+ node = get_irn_n(node, -1);
assert(get_irn_op(node) == op_Block);
return node->attr.block.irg;
}
ir_node *
skip_Tuple (ir_node *node) {
ir_node *pred;
+ ir_op *op;
if (!get_opt_normalize()) return node;
+restart:
node = skip_Id(node);
if (get_irn_op(node) == op_Proj) {
pred = skip_Id(get_Proj_pred(node));
- if (get_irn_op(pred) == op_Proj) /* nested Tuple ? */
+ op = get_irn_op(pred);
+
+ /*
+ * Looks strange but calls get_irn_op() only once
+ * in most often cases.
+ */
+ if (op == op_Proj) { /* nested Tuple ? */
pred = skip_Id(skip_Tuple(pred));
- if (get_irn_op(pred) == op_Tuple)
- return get_Tuple_pred(pred, get_Proj_proj(node));
+ op = get_irn_op(pred);
+
+ if (op == op_Tuple) {
+ node = get_Tuple_pred(pred, get_Proj_proj(node));
+ goto restart;
+ }
+ }
+ else if (op == op_Tuple) {
+ node = get_Tuple_pred(pred, get_Proj_proj(node));
+ goto restart;
+ }
}
return node;
}
-/** returns operand of node if node is a Cast */
+/* returns operand of node if node is a Cast */
ir_node *skip_Cast (ir_node *node) {
- if (node && get_irn_op(node) == op_Cast) {
- return skip_Id(get_irn_n(node, 0));
- } else {
- return node;
- }
+ if (node && get_irn_op(node) == op_Cast)
+ return get_Cast_op(node);
+ return node;
+}
+
+/* returns operand of node if node is a Confirm */
+ir_node *skip_Confirm (ir_node *node) {
+ if (node && get_irn_op(node) == op_Confirm)
+ return get_Confirm_value(node);
+ return node;
+}
+
+/* skip all high-level ops */
+ir_node *skip_HighLevel(ir_node *node) {
+ if (node && is_op_highlevel(get_irn_op(node)))
+ return get_irn_n(node, 0);
+ return node;
}
#if 0
return _is_Bad(node);
}
+int
+(is_Const)(const ir_node *node) {
+ return _is_Const(node);
+}
+
int
(is_no_Block)(const ir_node *node) {
return _is_no_Block(node);
/* returns true if node is a Unknown node. */
int
-is_Unknown (const ir_node *node) {
- assert(node);
- return (get_irn_op(node) == op_Unknown);
+(is_Unknown)(const ir_node *node) {
+ return _is_Unknown(node);
}
int
}
/* Returns true if the operation is a forking control flow operation. */
-int
-is_forking_op(const ir_node *node) {
- return is_op_forking(get_irn_op(node));
+int (is_irn_forking)(const ir_node *node) {
+ return _is_irn_forking(node);
+}
+
+type *(get_irn_type)(ir_node *node) {
+ return _get_irn_type(node);
+}
+
+/* Returns non-zero for constant-like nodes. */
+int (is_irn_constlike)(const ir_node *node) {
+ return _is_irn_constlike(node);
}
+/* Gets the string representation of the jump prediction .*/
+const char *get_cond_jmp_predicate_name(cond_jmp_predicate pred)
+{
+ switch (pred) {
+ default:
+ case COND_JMP_PRED_NONE: return "no prediction";
+ case COND_JMP_PRED_TRUE: return "true taken";
+ case COND_JMP_PRED_FALSE: return "false taken";
+ }
+}
+
+/* Returns the conditional jump prediction of a Cond node. */
+cond_jmp_predicate (get_Cond_jmp_pred)(ir_node *cond) {
+ return _get_Cond_jmp_pred(cond);
+}
+
+/* Sets a new conditional jump prediction. */
+void (set_Cond_jmp_pred)(ir_node *cond, cond_jmp_predicate pred) {
+ _set_Cond_jmp_pred(cond, pred);
+}
+
+/** the get_type operation must be always implemented */
+static type *get_Null_type(ir_node *n) {
+ return NULL;
+}
+
+/* Sets the get_type operation for an ir_op_ops. */
+ir_op_ops *firm_set_default_get_type(opcode code, ir_op_ops *ops)
+{
+ switch (code) {
+ case iro_Const: ops->get_type = get_Const_type; break;
+ case iro_SymConst: ops->get_type = get_SymConst_value_type; break;
+ case iro_Cast: ops->get_type = get_Cast_type; break;
+ case iro_Proj: ops->get_type = get_Proj_type; break;
+ default:
+ /* not allowed to be NULL */
+ if (! ops->get_type)
+ ops->get_type = get_Null_type;
+ break;
+ }
+ return ops;
+}
#ifdef DEBUG_libfirm
void dump_irn (ir_node *n) {