fix_backedges(irg->obst, node);
memcpy((*pOld_in) + 1, in, sizeof(ir_node *) * arity);
+
+ /* update irg flags */
+ set_irg_outs_inconsistent(irg);
+ set_irg_loopinfo_inconsistent(irg);
}
ir_node *(get_irn_n)(const ir_node *node, int n)
edges_notify_edge(node, n, in, node->in[n + 1], irg);
node->in[n + 1] = in;
+
+ /* update irg flags */
+ set_irg_outs_inconsistent(irg);
+ set_irg_loopinfo_inconsistent(irg);
}
int add_irn_n(ir_node *node, ir_node *in)
end->in[1 + END_KEEPALIVE_OFFSET + i] = in[i];
edges_notify_edge(end, END_KEEPALIVE_OFFSET + i, end->in[1 + END_KEEPALIVE_OFFSET + i], NULL, irg);
}
+
+ /* update irg flags */
+ set_irg_outs_inconsistent(irg);
}
/* Set new keep-alives from old keep-alives, skipping irn */
}
/* now n - 1 keeps, 1 block input */
ARR_RESIZE(ir_node *, end->in, (n - 1) + 1 + END_KEEPALIVE_OFFSET);
+
+ /* update irg flags */
+ set_irg_outs_inconsistent(irg);
}
/* remove Bads, NoMems and doublets from the keep-alive set */
ir_node *get_memop_mem(const ir_node *node)
{
assert(is_memop(node));
+ assert(n_Load_mem == 0 && n_Store_mem == 0);
return get_irn_n(node, 0);
}
void set_memop_mem(ir_node *node, ir_node *mem)
{
assert(is_memop(node));
+ assert(n_Load_mem == 0 && n_Store_mem == 0);
set_irn_n(node, 0, mem);
}
ir_node *get_memop_ptr(const ir_node *node)
{
assert(is_memop(node));
+ assert(n_Load_mem == 1 && n_Store_mem == 1);
return get_irn_n(node, 1);
}
void set_memop_ptr(ir_node *node, ir_node *ptr)
{
assert(is_memop(node));
+ assert(n_Load_mem == 1 && n_Store_mem == 1);
set_irn_n(node, 1, ptr);
}
return _is_arg_Proj(node);
}
+int is_x_except_Proj(const ir_node *node)
+{
+ ir_node *pred;
+ if (!is_Proj(node))
+ return false;
+ pred = get_Proj_pred(node);
+ if (!is_fragile_op(pred))
+ return false;
+ return get_Proj_proj(node) == pred->op->pn_x_except;
+}
+
+int is_x_regular_Proj(const ir_node *node)
+{
+ ir_node *pred;
+ if (!is_Proj(node))
+ return false;
+ pred = get_Proj_pred(node);
+ if (!is_fragile_op(pred))
+ return false;
+ return get_Proj_proj(node) == pred->op->pn_x_regular;
+}
+
ir_node **get_Tuple_preds_arr(ir_node *node)
{
assert(is_Tuple(node));
ir_node *get_fragile_op_mem(ir_node *node)
{
assert(node && is_fragile_op(node));
-
- switch (get_irn_opcode(node)) {
- case iro_Call :
- case iro_Div :
- case iro_Mod :
- case iro_Load :
- case iro_Store :
- case iro_Alloc :
- case iro_Bound :
- case iro_CopyB :
- return get_irn_n(node, pn_Generic_M);
- case iro_Bad :
- case iro_Unknown:
- return node;
- default:
- panic("should not be reached");
- }
+ return get_irn_n(node, node->op->fragile_mem_index);
}
/* Returns true if the operation is a forking control flow operation. */