/*
* Calculates the negated (Complement(R)) pnc condition.
*/
-int
-get_negated_pnc(int pnc) {
- switch (pnc) {
- case pn_Cmp_False: return pn_Cmp_True;
- case pn_Cmp_Eq: return pn_Cmp_Ne;
- case pn_Cmp_Lt: return pn_Cmp_Uge;
- case pn_Cmp_Le: return pn_Cmp_Ug;
- case pn_Cmp_Gt: return pn_Cmp_Ule;
- case pn_Cmp_Ge: return pn_Cmp_Ul;
- case pn_Cmp_Lg: return pn_Cmp_Ue;
- case pn_Cmp_Leg: return pn_Cmp_Uo;
- case pn_Cmp_Uo: return pn_Cmp_Leg;
- case pn_Cmp_Ue: return pn_Cmp_Lg;
- case pn_Cmp_Ul: return pn_Cmp_Ge;
- case pn_Cmp_Ule: return pn_Cmp_Gt;
- case pn_Cmp_Ug: return pn_Cmp_Le;
- case pn_Cmp_Uge: return pn_Cmp_Lt;
- case pn_Cmp_Ne: return pn_Cmp_Eq;
- case pn_Cmp_True: return pn_Cmp_False;
- }
- return 99; /* to shut up gcc */
+int get_negated_pnc(int pnc, ir_mode *mode) {
+ pnc ^= pn_Cmp_True;
+
+ /* do NOT add the Uo bit for non-floating point values */
+ if (! mode_is_float(mode))
+ pnc &= ~pn_Cmp_Uo;
+
+ return pnc;
}
/* Calculates the inversed (R^-1) pnc condition, i.e., "<" --> ">" */
int not_a_block = is_no_Block(res);
INIT_LIST_HEAD(&res->edge_info.outs_head);
+ if(!not_a_block)
+ INIT_LIST_HEAD(&res->attr.block.succ_head);
+
for (i = 0, n = arity + not_a_block; i < n; ++i)
edges_notify_edge(res, i - not_a_block, res->in[i], NULL, irg);
void
set_irn_in (ir_node *node, int arity, ir_node **in) {
+ int i;
ir_node *** arr;
assert(node);
if (get_interprocedural_view()) { /* handle Filter and Block specially */
} else {
arr = &node->in;
}
- if (arity != ARR_LEN(*arr) - 1) {
+
+ for (i = 0; i < arity; i++) {
+ if (i < ARR_LEN(*arr)-1)
+ edges_notify_edge(node, i, in[i], (*arr)[i+1], current_ir_graph);
+ else
+ edges_notify_edge(node, i, in[i], NULL, current_ir_graph);
+ }
+ for(;i < ARR_LEN(*arr)-1; i++) {
+ edges_notify_edge(node, i, NULL, (*arr)[i+1], current_ir_graph);
+ }
+
+ if (arity != ARR_LEN(*arr) - 1) {
ir_node * block = (*arr)[0];
*arr = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
(*arr)[0] = block;
}
fix_backedges(current_ir_graph->obst, node);
+
memcpy((*arr) + 1, in, sizeof(ir_node *) * arity);
}
return _get_irn_pinned(node);
}
+op_pin_state
+(is_irn_pinned_in_irg) (const ir_node *node) {
+ return _is_irn_pinned_in_irg(node);
+}
+
void set_irn_pinned(ir_node *node, op_pin_state state) {
/* due to optimization an opt may be turned into a Tuple */
if (get_irn_op(node) == op_Tuple)
return node->attr.except;
}
+void *
+get_irn_generic_attr (ir_node *node) {
+ return &node->attr;
+}
+
/** manipulate fields of individual nodes **/
/* this works for all except Block */
ir_node *
get_nodes_block (const ir_node *node) {
assert (!(node->op == op_Block));
+ assert (is_irn_pinned_in_irg(node) && "block info may be incorrect");
return get_irn_n(node, -1);
}
}
int
-get_Block_n_cfgpreds (ir_node *node) {
- assert ((node->op == op_Block));
- return get_irn_arity(node);
+(get_Block_n_cfgpreds)(ir_node *node) {
+ return get_Block_n_cfgpreds(node);
}
ir_node *
-get_Block_cfgpred (ir_node *node, int pos) {
- assert(-1 <= pos && pos < get_irn_arity(node));
- assert(node->op == op_Block);
- return get_irn_n(node, pos);
+(get_Block_cfgpred)(ir_node *node, int pos) {
+ return get_Block_cfgpred(node, pos);
}
void
set_irn_n(node, pos, pred);
}
-bool
+ir_node *
+(get_Block_cfgpred_block)(ir_node *node, int pos) {
+ return _get_Block_cfgpred_block(node, pos);
+}
+
+int
get_Block_matured (ir_node *node) {
assert (node->op == op_Block);
- return node->attr.block.matured;
+ return (int)node->attr.block.matured;
}
void
-set_Block_matured (ir_node *node, bool matured) {
+set_Block_matured (ir_node *node, int matured) {
assert (node->op == op_Block);
node->attr.block.matured = matured;
}
node->attr.block.in_cg[0] = NULL;
node->attr.block.cg_backedge = new_backedge_arr(current_ir_graph->obst, arity);
{
- /* Fix backedge array. fix_backedges operates depending on
+ /* Fix backedge array. fix_backedges() operates depending on
interprocedural_view. */
int ipv = get_interprocedural_view();
- set_interprocedural_view(true);
+ set_interprocedural_view(1);
fix_backedges(current_ir_graph->obst, node);
set_interprocedural_view(ipv);
}
in array afterwards ... */
}
+/* Return the target address of an IJmp */
+ir_node *get_IJmp_target(ir_node *ijmp) {
+ assert(ijmp->op == op_IJmp);
+ return get_irn_n(ijmp, 0);
+}
+
+/** Sets the target address of an IJmp */
+void set_IJmp_target(ir_node *ijmp, ir_node *tgt) {
+ assert(ijmp->op == op_IJmp);
+ set_irn_n(ijmp, 0, tgt);
+}
/*
> Implementing the case construct (which is where the constant Proj node is
assert(fromtype);
- if (!is_Class_type(totype)) return false;
+ if (!is_Class_type(totype)) return 0;
return is_subclass_of(fromtype, totype);
}
assert(fromtype);
- if (!is_Class_type(totype)) return false;
+ if (!is_Class_type(totype)) return 0;
return is_subclass_of(totype, fromtype);
}
node->in[3] = ir_true;
}
+/* CopyB support */
+ir_node *get_CopyB_mem (ir_node *node) {
+ assert (node->op == op_CopyB);
+ return get_irn_n(node, 0);
+}
+
+void set_CopyB_mem (ir_node *node, ir_node *mem) {
+ assert (node->op == op_CopyB);
+ set_irn_n(node, 0, mem);
+}
+
+ir_node *get_CopyB_dst (ir_node *node) {
+ assert (node->op == op_CopyB);
+ return get_irn_n(node, 1);
+}
+
+void set_CopyB_dst (ir_node *node, ir_node *dst) {
+ assert (node->op == op_CopyB);
+ set_irn_n(node, 1, dst);
+}
+
+ir_node *get_CopyB_src (ir_node *node) {
+ assert (node->op == op_CopyB);
+ return get_irn_n(node, 2);
+}
+
+void set_CopyB_src (ir_node *node, ir_node *src) {
+ assert (node->op == op_CopyB);
+ set_irn_n(node, 2, src);
+}
+
+type *get_CopyB_type(ir_node *node) {
+ assert (node->op == op_CopyB);
+ return node->attr.copyb.data_type;
+}
+
+void set_CopyB_type(ir_node *node, type *data_type) {
+ assert (node->op == op_CopyB && data_type);
+ node->attr.copyb.data_type = data_type;
+}
+
ir_graph *
get_irn_irg(const ir_node *node) {
+ /*
+ * Do not use get_nodes_Block() here, because this
+ * will check the pinned state.
+ * However even a 'wrong' block is always in the proper
+ * irg.
+ */
if (! is_Block(node))
- node = get_nodes_block(node);
+ node = get_irn_n(node, -1);
if (is_Bad(node)) /* sometimes bad is predecessor of nodes instead of block: in case of optimization */
- node = get_nodes_block(node);
+ node = get_irn_n(node, -1);
assert(get_irn_op(node) == op_Block);
return node->attr.block.irg;
}
ir_node *
skip_Tuple (ir_node *node) {
ir_node *pred;
+ ir_op *op;
if (!get_opt_normalize()) return node;
+restart:
node = skip_Id(node);
if (get_irn_op(node) == op_Proj) {
pred = skip_Id(get_Proj_pred(node));
- if (get_irn_op(pred) == op_Proj) /* nested Tuple ? */
+ op = get_irn_op(pred);
+
+ /*
+ * Looks strange but calls get_irn_op() only once
+ * in most often cases.
+ */
+ if (op == op_Proj) { /* nested Tuple ? */
pred = skip_Id(skip_Tuple(pred));
- else if (get_irn_op(pred) == op_Tuple)
- return get_Tuple_pred(pred, get_Proj_proj(node));
+ op = get_irn_op(pred);
+
+ if (op == op_Tuple) {
+ node = get_Tuple_pred(pred, get_Proj_proj(node));
+ goto restart;
+ }
+ }
+ else if (op == op_Tuple) {
+ node = get_Tuple_pred(pred, get_Proj_proj(node));
+ goto restart;
+ }
}
return node;
}
}
/* Returns true if the operation is a forking control flow operation. */
-int
-is_forking_op(const ir_node *node) {
- return is_op_forking(get_irn_op(node));
+int (is_irn_forking)(const ir_node *node) {
+ return _is_irn_forking(node);
}
type *(get_irn_type)(ir_node *node) {
return _get_irn_type(node);
}
+/* Returns non-zero for constant-like nodes. */
+int (is_irn_constlike)(const ir_node *node) {
+ return _is_irn_constlike(node);
+}
+
+/* Gets the string representation of the jump prediction .*/
+const char *get_cond_jmp_predicate_name(cond_jmp_predicate pred)
+{
+ switch (pred) {
+ default:
+ case COND_JMP_PRED_NONE: return "no prediction";
+ case COND_JMP_PRED_TRUE: return "true taken";
+ case COND_JMP_PRED_FALSE: return "false taken";
+ }
+}
+
+/* Returns the conditional jump prediction of a Cond node. */
+cond_jmp_predicate (get_Cond_jmp_pred)(ir_node *cond) {
+ return _get_Cond_jmp_pred(cond);
+}
+
+/* Sets a new conditional jump prediction. */
+void (set_Cond_jmp_pred)(ir_node *cond, cond_jmp_predicate pred) {
+ _set_Cond_jmp_pred(cond, pred);
+}
+
/** the get_type operation must be always implemented */
static type *get_Null_type(ir_node *n) {
return NULL;
}
-/* set the get_type operation */
-ir_op *firm_set_default_get_type(ir_op *op)
+/* Sets the get_type operation for an ir_op_ops. */
+ir_op_ops *firm_set_default_get_type(opcode code, ir_op_ops *ops)
{
- switch (op->code) {
- case iro_Const: op->get_type = get_Const_type; break;
- case iro_SymConst: op->get_type = get_SymConst_value_type; break;
- case iro_Cast: op->get_type = get_Cast_type; break;
- case iro_Proj: op->get_type = get_Proj_type; break;
- default: op->get_type = get_Null_type; break;
+ switch (code) {
+ case iro_Const: ops->get_type = get_Const_type; break;
+ case iro_SymConst: ops->get_type = get_SymConst_value_type; break;
+ case iro_Cast: ops->get_type = get_Cast_type; break;
+ case iro_Proj: ops->get_type = get_Proj_type; break;
+ default:
+ /* not allowed to be NULL */
+ if (! ops->get_type)
+ ops->get_type = get_Null_type;
+ break;
}
- return op;
+ return ops;
}
#ifdef DEBUG_libfirm