/* some constants fixing the positions of nodes predecessors
in the in array */
-#define CALL_PARAM_OFFSET 2
-#define SEL_INDEX_OFFSET 2
-#define RETURN_RESULT_OFFSET 1 /* mem is not a result */
-#define END_KEEPALIVE_OFFSET 0
+#define CALL_PARAM_OFFSET 2
+#define FUNCCALL_PARAM_OFFSET 1
+#define SEL_INDEX_OFFSET 2
+#define RETURN_RESULT_OFFSET 1 /* mem is not a result */
+#define END_KEEPALIVE_OFFSET 0
static const char *pnc_name_arr [] = {
"False", "Eq", "Lt", "Le",
*/
ir_node *
new_ir_node (dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op, ir_mode *mode,
- int arity, ir_node **in)
+ int arity, ir_node **in)
{
ir_node *res;
int node_size = offsetof (ir_node, attr) + op->attr_size;
void
set_irn_n (ir_node *node, int n, ir_node *in) {
- assert(node && -1 <= n && n < get_irn_arity(node));
+ assert(node && node->kind == k_ir_node && -1 <= n && n < get_irn_arity(node));
+ assert(in && in->kind == k_ir_node);
if ((n == -1) && (get_irn_opcode(node) == iro_Filter)) {
/* Change block pred in both views! */
node->in[n + 1] = in;
return __get_irn_link(node);
}
+op_pin_state
+(get_irn_pinned)(const ir_node *node) {
+ return __get_irn_pinned(node);
+}
+
+
+#ifdef DO_HEAPANALYSIS
+/* Access the abstract interpretation information of a node.
+ Returns NULL if no such information is available. */
+struct abstval *get_irn_abst_value(ir_node *n) {
+ return n->av;
+}
+/* Set the abstract interpretation information of a node. */
+void set_irn_abst_value(ir_node *n, struct abstval *os) {
+ n->av = os;
+}
+struct section *firm_get_irn_section(ir_node *n) {
+ return n->sec;
+}
+void firm_set_irn_section(ir_node *n, struct section *s) {
+ n->sec = s;
+}
+#endif /* DO_HEAPANALYSIS */
+
+
/* Outputs a unique number for this node */
long
get_irn_node_nr(const ir_node *node) {
#ifdef DEBUG_libfirm
return node->node_nr;
#else
- return (long)&node;
+ return (long)node;
#endif
}
return node->attr.block;
}
+load_attr
+get_irn_load_attr (ir_node *node)
+{
+ assert (node->op == op_Load);
+ return node->attr.load;
+}
+
+store_attr
+get_irn_store_attr (ir_node *node)
+{
+ assert (node->op == op_Store);
+ return node->attr.store;
+}
+
+except_attr
+get_irn_except_attr (ir_node *node)
+{
+ assert (node->op == op_Div || node->op == op_Quot ||
+ node->op == op_DivMod || node->op == op_Mod);
+ return node->attr.except;
+}
+
/** manipulate fields of individual nodes **/
/* this works for all except Block */
ir_node *
-get_nodes_Block (ir_node *node) {
+get_nodes_block (ir_node *node) {
assert (!(node->op == op_Block));
return get_irn_n(node, -1);
}
void
-set_nodes_Block (ir_node *node, ir_node *block) {
+set_nodes_block (ir_node *node, ir_node *block) {
assert (!(node->op == op_Block));
set_irn_n(node, -1, block);
}
node->attr.block.graph_arr[pos+1] = value;
}
-/* handler handling for Blocks * /
-void
-set_Block_handler (ir_node *block, ir_node *handler) {
- assert ((block->op == op_Block));
- assert ((handler->op == op_Block));
- block->attr.block.handler_entry = handler;
-}
-
-ir_node *
-get_Block_handler (ir_node *block) {
- assert ((block->op == op_Block));
- return (block->attr.block.handler_entry);
-}
-
-/ * handler handling for Nodes * /
-void
-set_Node_handler (ir_node *node, ir_node *handler) {
- set_Block_handler (get_nodes_Block (node), handler);
-}
-
-ir_node *
-get_Node_handler (ir_node *node) {
- return (get_Block_handler (get_nodes_Block (node)));
-}
-
-/ * exc_t handling for Blocks * /
-void set_Block_exc (ir_node *block, exc_t exc) {
- assert ((block->op == op_Block));
- block->attr.block.exc = exc;
-}
-
-exc_t get_Block_exc (ir_node *block) {
- assert ((block->op == op_Block));
- return (block->attr.block.exc);
-}
-
-/ * exc_t handling for Nodes * /
-void set_Node_exc (ir_node *node, exc_t exc) {
- set_Block_exc (get_nodes_Block (node), exc);
-}
-
-exc_t get_Node_exc (ir_node *node) {
- return (get_Block_exc (get_nodes_Block (node)));
-}
-*/
-
void set_Block_cg_cfgpred_arr(ir_node * node, int arity, ir_node ** in) {
assert(node->op == op_Block);
if (node->attr.block.in_cg == NULL || arity != ARR_LEN(node->attr.block.in_cg) - 1) {
node->attr.block.cg_backedge = new_backedge_arr(current_ir_graph->obst, arity);
{
/* Fix backedge array. fix_backedges operates depending on
- interprocedural_view. */
+ interprocedural_view. */
bool ipv = interprocedural_view;
interprocedural_view = true;
fix_backedges(current_ir_graph->obst, node);
void set_Block_cg_cfgpred(ir_node * node, int pos, ir_node * pred) {
assert(node->op == op_Block &&
- node->attr.block.in_cg &&
- 0 <= pos && pos < ARR_LEN(node->attr.block.in_cg) - 1);
+ node->attr.block.in_cg &&
+ 0 <= pos && pos < ARR_LEN(node->attr.block.in_cg) - 1);
node->attr.block.in_cg[pos + 1] = pred;
}
node->attr.block.in_cg = NULL;
}
-/* Start references the irg it is in. */
-ir_graph *
-get_Start_irg(ir_node *node) {
- return get_irn_irg(node);
-}
-
void
set_Start_irg(ir_node *node, ir_graph *irg) {
assert(node->op == op_Start);
end->kind = k_BAD;
DEL_ARR_F(end->in); /* GL @@@ tut nicht ! */
end->in = NULL; /* @@@ make sure we get an error if we use the
- in array afterwards ... */
+ in array afterwards ... */
}
-ir_graph *get_EndReg_irg (ir_node *end) {
- return get_irn_irg(end);
-}
-
-ir_graph *get_EndExcept_irg (ir_node *end) {
- return get_irn_irg(end);
-}
/*
> Implementing the case construct (which is where the constant Proj node is
if (tp != unknown_type) {
assert (is_atomic_type(tp));
assert (get_type_mode(tp) == get_irn_mode(node));
- assert (!tarval_is_entity(get_Const_tarval(node)) ||
- (is_pointer_type(tp) &&
- (get_pointer_points_to_type(tp) ==
- get_entity_type(get_tarval_entity(get_Const_tarval(node))))));
}
node->attr.con.tp = tp;
/* Only to access SymConst of kind symconst_addr_ent. Else assertion: */
entity *get_SymConst_entity (ir_node *node) {
assert ( (node->op == op_SymConst)
- && (get_SymConst_kind(node) == symconst_addr_ent));
+ && (get_SymConst_kind (node) == symconst_addr_ent));
return node->attr.i.sym.entity_p;
}
}
-symconst_symbol
-get_SymConst_type_or_id (ir_node *node) {
+union symconst_symbol
+get_SymConst_symbol (ir_node *node) {
assert (node->op == op_SymConst);
return node->attr.i.sym;
}
void
-set_SymConst_type_or_id (ir_node *node, symconst_symbol sym) {
+set_SymConst_symbol (ir_node *node, union symconst_symbol sym) {
assert (node->op == op_SymConst);
//memcpy (&(node->attr.i.sym), sym, sizeof(type_or_id));
node->attr.i.sym = sym;
}
int Call_has_callees(ir_node *node) {
- return (node->attr.call.callee_arr != NULL);
+ assert(node && node->op == op_Call);
+ return ((get_irg_callee_info_state(get_irn_irg(node)) != irg_callee_info_none) &&
+ (node->attr.call.callee_arr != NULL));
}
int get_Call_n_callees(ir_node * node) {
- assert(node->op == op_Call && node->attr.call.callee_arr);
+ assert(node && node->op == op_Call && node->attr.call.callee_arr);
return ARR_LEN(node->attr.call.callee_arr);
}
entity * get_Call_callee(ir_node * node, int pos) {
- assert(node->op == op_Call && node->attr.call.callee_arr);
+ assert(pos >= 0 && pos < get_Call_n_callees(node));
return node->attr.call.callee_arr[pos];
}
-void set_Call_callee_arr(ir_node * node, int n, entity ** arr) {
+void set_Call_callee_arr(ir_node * node, const int n, entity ** arr) {
assert(node->op == op_Call);
if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
node->attr.call.callee_arr = NEW_ARR_D(entity *, current_ir_graph->obst, n);
assert(node->op == op_CallBegin);
set_irn_n(node, 0, ptr);
}
-ir_graph * get_CallBegin_irg (ir_node *node) {
- return get_irn_irg(node);
-}
ir_node * get_CallBegin_call (ir_node *node) {
assert(node->op == op_CallBegin);
return node->attr.callbegin.call;
ir_node **
get_FuncCall_param_arr (ir_node *node) {
assert (node->op == op_FuncCall);
- return (ir_node **)&get_irn_in(node)[CALL_PARAM_OFFSET];
+ return (ir_node **)&get_irn_in(node)[FUNCCALL_PARAM_OFFSET];
}
int
get_FuncCall_n_params (ir_node *node) {
assert (node->op == op_FuncCall);
- return (get_irn_arity(node) - CALL_PARAM_OFFSET);
+ return (get_irn_arity(node) - FUNCCALL_PARAM_OFFSET);
}
int
ir_node *
get_FuncCall_param (ir_node *node, int pos) {
assert (node->op == op_FuncCall);
- return get_irn_n(node, pos + CALL_PARAM_OFFSET);
+ return get_irn_n(node, pos + FUNCCALL_PARAM_OFFSET);
}
void
set_FuncCall_param (ir_node *node, int pos, ir_node *param) {
assert (node->op == op_FuncCall);
- set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
+ set_irn_n(node, pos + FUNCCALL_PARAM_OFFSET, param);
}
type *
}
int FuncCall_has_callees(ir_node *node) {
- return (node->attr.call.callee_arr != NULL);
+ return ((get_irg_callee_info_state(get_irn_irg(node)) != irg_callee_info_none) &&
+ (node->attr.call.callee_arr != NULL));
}
int get_FuncCall_n_callees(ir_node * node) {
}
-#define BINOP(OP) \
-ir_node * get_##OP##_left(ir_node *node) { \
- assert(node->op == op_##OP); \
- return get_irn_n(node, node->op->op_index); \
-} \
-void set_##OP##_left(ir_node *node, ir_node *left) { \
- assert(node->op == op_##OP); \
- set_irn_n(node, node->op->op_index, left); \
-} \
-ir_node *get_##OP##_right(ir_node *node) { \
- assert(node->op == op_##OP); \
- return get_irn_n(node, node->op->op_index + 1); \
-} \
-void set_##OP##_right(ir_node *node, ir_node *right) { \
- assert(node->op == op_##OP); \
- set_irn_n(node, node->op->op_index + 1, right); \
+#define BINOP(OP) \
+ir_node * get_##OP##_left(ir_node *node) { \
+ assert(node->op == op_##OP); \
+ return get_irn_n(node, node->op->op_index); \
+} \
+void set_##OP##_left(ir_node *node, ir_node *left) { \
+ assert(node->op == op_##OP); \
+ set_irn_n(node, node->op->op_index, left); \
+} \
+ir_node *get_##OP##_right(ir_node *node) { \
+ assert(node->op == op_##OP); \
+ return get_irn_n(node, node->op->op_index + 1); \
+} \
+void set_##OP##_right(ir_node *node, ir_node *right) { \
+ assert(node->op == op_##OP); \
+ set_irn_n(node, node->op->op_index + 1, right); \
}
-#define UNOP(OP) \
-ir_node *get_##OP##_op(ir_node *node) { \
- assert(node->op == op_##OP); \
- return get_irn_n(node, node->op->op_index); \
-} \
-void set_##OP##_op (ir_node *node, ir_node *op) { \
- assert(node->op == op_##OP); \
- set_irn_n(node, node->op->op_index, op); \
+#define UNOP(OP) \
+ir_node *get_##OP##_op(ir_node *node) { \
+ assert(node->op == op_##OP); \
+ return get_irn_n(node, node->op->op_index); \
+} \
+void set_##OP##_op (ir_node *node, ir_node *op) { \
+ assert(node->op == op_##OP); \
+ set_irn_n(node, node->op->op_index, op); \
}
BINOP(Add)
if (op == op_Phi)
return ((get_irg_phase_state(get_irn_irg(n)) != phase_building) ||
- (get_irn_arity(n) > 0));
+ (get_irn_arity(n) > 0));
return 0;
}
assert(n);
return ((get_irn_op(n) == op_Phi) &&
- (get_irn_arity(n) == 0) &&
- (get_irg_phase_state(get_irn_irg(n)) == phase_building));
+ (get_irn_arity(n) == 0) &&
+ (get_irg_phase_state(get_irn_irg(n)) == phase_building));
}
ir_node **
set_irn_n(node, pos, pred);
}
+
+int is_memop(ir_node *node) {
+ return ((get_irn_op(node) == op_Load) || (get_irn_op(node) == op_Store));
+}
+
+ir_node *get_memop_mem (ir_node *node) {
+ assert(is_memop(node));
+ return get_irn_n(node, 0);
+}
+
+void set_memop_mem (ir_node *node, ir_node *mem) {
+ assert(is_memop(node));
+ set_irn_n(node, 0, mem);
+}
+
+ir_node *get_memop_ptr (ir_node *node) {
+ assert(is_memop(node));
+ return get_irn_n(node, 1);
+}
+
+void set_memop_ptr (ir_node *node, ir_node *ptr) {
+ assert(is_memop(node));
+ set_irn_n(node, 1, ptr);
+}
+
ir_node *
get_Load_mem (ir_node *node) {
assert (node->op == op_Load);
set_irn_n(node, 1, ptr);
}
+ir_mode *
+get_Load_mode (ir_node *node) {
+ assert (node->op == op_Load);
+ return node->attr.load.load_mode;
+}
+
+void
+set_Load_mode (ir_node *node, ir_mode *mode) {
+ assert (node->op == op_Load);
+ node->attr.load.load_mode = mode;
+}
+
+ent_volatility
+get_Load_volatility (ir_node *node) {
+ assert (node->op == op_Load);
+ return node->attr.load.volatility;
+}
+
+void
+set_Load_volatility (ir_node *node, ent_volatility volatility) {
+ assert (node->op == op_Load);
+ node->attr.load.volatility = volatility;
+}
+
ir_node *
get_Store_mem (ir_node *node) {
set_irn_n(node, 2, value);
}
+ent_volatility
+get_Store_volatility (ir_node *node) {
+ assert (node->op == op_Store);
+ return node->attr.store.volatility;
+}
+
+void
+set_Store_volatility (ir_node *node, ent_volatility volatility) {
+ assert (node->op == op_Store);
+ node->attr.store.volatility = volatility;
+}
+
+
ir_node *
get_Alloc_mem (ir_node *node) {
assert (node->op == op_Alloc);
void set_Filter_cg_pred(ir_node * node, int pos, ir_node * pred) {
assert(node->op == op_Filter && node->attr.filter.in_cg &&
- 0 <= pos && pos < ARR_LEN(node->attr.filter.in_cg) - 1);
+ 0 <= pos && pos < ARR_LEN(node->attr.filter.in_cg) - 1);
node->attr.filter.in_cg[pos + 1] = pred;
}
int get_Filter_n_cg_preds(ir_node *node) {
ir_node *get_Filter_cg_pred(ir_node *node, int pos) {
int arity;
assert(node->op == op_Filter && node->attr.filter.in_cg &&
- 0 <= pos);
+ 0 <= pos);
arity = ARR_LEN(node->attr.filter.in_cg);
assert(pos < arity - 1);
return node->attr.filter.in_cg[pos + 1];
if (!get_opt_normalize()) return node;
- node = skip_nop(node);
+ node = skip_Id(node);
if (get_irn_op(node) == op_Proj) {
- pred = skip_nop(get_Proj_pred(node));
+ pred = skip_Id(get_Proj_pred(node));
if (get_irn_op(pred) == op_Proj) /* nested Tuple ? */
- pred = skip_nop(skip_Tuple(pred));
+ pred = skip_Id(skip_Tuple(pred));
if (get_irn_op(pred) == op_Tuple)
return get_Tuple_pred(pred, get_Proj_proj(node));
}
/** returns operand of node if node is a Cast */
ir_node *skip_Cast (ir_node *node) {
if (node && get_irn_op(node) == op_Cast) {
- return skip_nop(get_irn_n(node, 0));
+ return skip_Id(get_irn_n(node, 0));
} else {
return node;
}
than any other approach, as Id chains are resolved and all point to the real node, or
all id's are self loops. */
ir_node *
-skip_nop (ir_node *node) {
+skip_Id (ir_node *node) {
/* don't assert node !!! */
if (!get_opt_normalize()) return node;
assert (get_irn_arity (node) > 0);
node->in[0+1] = node;
- res = skip_nop(rem_pred);
+ res = skip_Id(rem_pred);
if (res->op == op_Id) /* self-loop */ return node;
node->in[0+1] = res;
than any other approach, as Id chains are resolved and all point to the real node, or
all id's are self loops. */
ir_node *
-skip_nop (ir_node *node) {
+skip_Id (ir_node *node) {
ir_node *pred;
/* don't assert node !!! */
+ if (!node || (node->op != op_Id)) return node;
+
if (!get_opt_normalize()) return node;
/* Don't use get_Id_pred: We get into an endless loop for
self-referencing Ids. */
- if (node && (node->op == op_Id) && (node != (pred = node->in[0+1]))) {
+ pred = node->in[0+1];
+
+ if (pred->op != op_Id) return pred;
+
+ if (node != pred) { /* not a self referencing Id. Resolve Id chain. */
ir_node *rem_pred, *res;
if (pred->op != op_Id) return pred; /* shortcut */
assert (get_irn_arity (node) > 0);
- node->in[0+1] = node;
- res = skip_nop(rem_pred);
+ node->in[0+1] = node; /* turn us into a self referencing Id: shorten Id cycles. */
+ res = skip_Id(rem_pred);
if (res->op == op_Id) /* self-loop */ return node;
- node->in[0+1] = res;
+ node->in[0+1] = res; /* Turn Id chain into Ids all referencing the chain end. */
return res;
} else {
return node;
}
#endif
-ir_node *
-skip_Id (ir_node *node) {
- return skip_nop(node);
-}
-
int
is_Bad (ir_node *node) {
assert(node);
return is_ip_cfopcode(get_irn_op(node));
}
-ir_graph *get_ip_cfop_irg(ir_node *n) {
- return get_irn_irg(n);
-}
-
/* Returns true if the operation can change the control flow because
of an exception. */
int
}
}
+/* Returns true if the operation is a forking control flow operation. */
+int
+is_forking_op(ir_node *node) {
+ return is_op_forking(get_irn_op(node));
+}
+
#ifdef DEBUG_libfirm
void dump_irn (ir_node *n) {
int i, arity = get_irn_arity(n);
if (!is_Block(n)) {
ir_node *pred = get_irn_n(n, -1);
printf(" block: %s%s: %ld (%p)\n", get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
- get_irn_node_nr(pred), (void *)pred);
+ get_irn_node_nr(pred), (void *)pred);
}
printf(" preds: \n");
for (i = 0; i < arity; ++i) {
ir_node *pred = get_irn_n(n, i);
printf(" %d: %s%s: %ld (%p)\n", i, get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
- get_irn_node_nr(pred), (void *)pred);
+ get_irn_node_nr(pred), (void *)pred);
}
}