void
set_irn_in (ir_node *node, int arity, ir_node **in) {
+ int i;
ir_node *** arr;
assert(node);
if (get_interprocedural_view()) { /* handle Filter and Block specially */
(*arr)[0] = block;
}
fix_backedges(current_ir_graph->obst, node);
+
+ for (i = 0; i < arity; i++) {
+ edges_notify_edge(node, i, in[i], (*arr)[i+1], current_ir_graph);
+ }
+
memcpy((*arr) + 1, in, sizeof(ir_node *) * arity);
}
return node->attr.except;
}
+void *
+get_irn_generic_attr (ir_node *node) {
+ return &node->attr;
+}
+
/** manipulate fields of individual nodes **/
/* this works for all except Block */
ir_node *
get_nodes_block (const ir_node *node) {
assert (!(node->op == op_Block));
+ assert (is_irn_pinned_in_irg(node) && "block info may be incorrect");
return get_irn_n(node, -1);
}
set_irn_n(node, pos, pred);
}
+ir_node *
+(get_Block_cfgpred_block)(ir_node *node, int pos) {
+ return _get_Block_cfgpred_block(node, pos);
+}
+
bool
get_Block_matured (ir_node *node) {
assert (node->op == op_Block);
node->attr.block.in_cg[0] = NULL;
node->attr.block.cg_backedge = new_backedge_arr(current_ir_graph->obst, arity);
{
- /* Fix backedge array. fix_backedges operates depending on
+ /* Fix backedge array. fix_backedges() operates depending on
interprocedural_view. */
int ipv = get_interprocedural_view();
set_interprocedural_view(true);
in array afterwards ... */
}
+/* Return the target address of an IJmp */
+ir_node *get_IJmp_target(ir_node *ijmp) {
+ assert(ijmp->op == op_IJmp);
+ return get_irn_n(ijmp, 0);
+}
+
+/** Sets the target address of an IJmp */
+void set_IJmp_target(ir_node *ijmp, ir_node *tgt) {
+ assert(ijmp->op == op_IJmp);
+ set_irn_n(ijmp, 0, tgt);
+}
/*
> Implementing the case construct (which is where the constant Proj node is
node->in[3] = ir_true;
}
+/* CopyB support */
+ir_node *get_CopyB_mem (ir_node *node) {
+ assert (node->op == op_CopyB);
+ return get_irn_n(node, 0);
+}
+
+void set_CopyB_mem (ir_node *node, ir_node *mem) {
+ assert (node->op == op_CopyB);
+ set_irn_n(node, 0, mem);
+}
+
+ir_node *get_CopyB_dst (ir_node *node) {
+ assert (node->op == op_CopyB);
+ return get_irn_n(node, 1);
+}
+
+void set_CopyB_dst (ir_node *node, ir_node *dst) {
+ assert (node->op == op_CopyB);
+ set_irn_n(node, 1, dst);
+}
+
+ir_node *get_CopyB_src (ir_node *node) {
+ assert (node->op == op_CopyB);
+ return get_irn_n(node, 2);
+}
+
+void set_CopyB_src (ir_node *node, ir_node *src) {
+ assert (node->op == op_CopyB);
+ set_irn_n(node, 2, src);
+}
+
+type *get_CopyB_type(ir_node *node) {
+ assert (node->op == op_CopyB);
+ return node->attr.copyb.data_type;
+}
+
+void set_CopyB_type(ir_node *node, type *data_type) {
+ assert (node->op == op_CopyB && data_type);
+ node->attr.copyb.data_type = data_type;
+}
+
ir_graph *
get_irn_irg(const ir_node *node) {
+ /*
+ * Do not use get_nodes_Block() here, because this
+ * will check the pinned state.
+ * However even a 'wrong' block is always in the proper
+ * irg.
+ */
if (! is_Block(node))
- node = get_nodes_block(node);
+ node = get_irn_n(node, -1);
if (is_Bad(node)) /* sometimes bad is predecessor of nodes instead of block: in case of optimization */
- node = get_nodes_block(node);
+ node = get_irn_n(node, -1);
assert(get_irn_op(node) == op_Block);
return node->attr.block.irg;
}
ir_node *
skip_Tuple (ir_node *node) {
ir_node *pred;
+ ir_op *op;
if (!get_opt_normalize()) return node;
+restart:
node = skip_Id(node);
if (get_irn_op(node) == op_Proj) {
pred = skip_Id(get_Proj_pred(node));
- if (get_irn_op(pred) == op_Proj) /* nested Tuple ? */
+ op = get_irn_op(pred);
+
+ /*
+ * Looks strange but calls get_irn_op() only once
+ * in most often cases.
+ */
+ if (op == op_Proj) { /* nested Tuple ? */
pred = skip_Id(skip_Tuple(pred));
- else if (get_irn_op(pred) == op_Tuple)
- return get_Tuple_pred(pred, get_Proj_proj(node));
+ op = get_irn_op(pred);
+
+ if (op == op_Tuple) {
+ node = get_Tuple_pred(pred, get_Proj_proj(node));
+ goto restart;
+ }
+ }
+ else if (op == op_Tuple) {
+ node = get_Tuple_pred(pred, get_Proj_proj(node));
+ goto restart;
+ }
}
return node;
}
}
/* Returns true if the operation is a forking control flow operation. */
-int
-is_forking_op(const ir_node *node) {
- return is_op_forking(get_irn_op(node));
+int (is_irn_forking)(const ir_node *node) {
+ return _is_irn_forking(node);
}
type *(get_irn_type)(ir_node *node) {
return _get_irn_type(node);
}
+/* Returns non-zero for constant-like nodes. */
+int (is_irn_constlike)(const ir_node *node) {
+ return _is_irn_constlike(node);
+}
+
+/* Gets the string representation of the jump prediction .*/
+const char *get_cond_jmp_predicate_name(cond_jmp_predicate pred)
+{
+ switch (pred) {
+ default:
+ case COND_JMP_PRED_NONE: return "no prediction";
+ case COND_JMP_PRED_TRUE: return "true taken";
+ case COND_JMP_PRED_FALSE: return "false taken";
+ }
+}
+
+/* Returns the conditional jump prediction of a Cond node. */
+cond_jmp_predicate (get_Cond_jmp_pred)(ir_node *cond) {
+ return _get_Cond_jmp_pred(cond);
+}
+
+/* Sets a new conditional jump prediction. */
+void (set_Cond_jmp_pred)(ir_node *cond, cond_jmp_predicate pred) {
+ _set_Cond_jmp_pred(cond, pred);
+}
+
/** the get_type operation must be always implemented */
static type *get_Null_type(ir_node *n) {
return NULL;