*/
#ifdef HAVE_CONFIG_H
-# include <config.h>
+# include "config.h"
+#endif
+
+#ifdef HAVE_STRING_H
+# include <string.h>
#endif
-#include <string.h>
#include "ident.h"
#include "irnode_t.h"
#include "irdump.h"
#include "irop_t.h"
#include "irprog_t.h"
+#include "iredges_t.h"
-#include "firmstat.h"
+#include "irhooks.h"
/* some constants fixing the positions of nodes predecessors
in the in array */
-#define CALL_PARAM_OFFSET 2
-#define SEL_INDEX_OFFSET 2
-#define RETURN_RESULT_OFFSET 1 /* mem is not a result */
-#define END_KEEPALIVE_OFFSET 0
+#define CALL_PARAM_OFFSET 2
+#define FUNCCALL_PARAM_OFFSET 1
+#define SEL_INDEX_OFFSET 2
+#define RETURN_RESULT_OFFSET 1 /* mem is not a result */
+#define END_KEEPALIVE_OFFSET 0
static const char *pnc_name_arr [] = {
"False", "Eq", "Lt", "Le",
"type_tag", "size", "addr_name", "addr_ent"
};
+/**
+ * Indicates, whether additional data can be registered to ir nodes.
+ * If set to 1, this is not possible anymore.
+ */
+static int forbid_new_data = 0;
+
+/**
+ * The amount of additional space for custom data to be allocated upon
+ * creating a new node.
+ */
+unsigned firm_add_node_size = 0;
+
+
+/* register new space for every node */
+unsigned register_additional_node_data(unsigned size) {
+ assert(!forbid_new_data && "Too late to register additional node data");
+
+ if (forbid_new_data)
+ return 0;
+
+ return firm_add_node_size += size;
+}
+
+
void
-init_irnode (void)
-{
+init_irnode(void) {
+ /* Forbid the addition of new data to an ir node. */
+ forbid_new_data = 1;
}
/*
int arity, ir_node **in)
{
ir_node *res;
- int node_size = offsetof (ir_node, attr) + op->attr_size;
+ size_t node_size = offsetof(ir_node, attr) + op->attr_size + firm_add_node_size;
+ char *p;
assert(irg && op && mode);
- res = (ir_node *) obstack_alloc (irg->obst, node_size);
- memset((void *)res, 0, node_size);
+ p = obstack_alloc (irg->obst, node_size);
+ memset(p, 0, node_size);
+ res = (ir_node *) (p + firm_add_node_size);
- res->kind = k_ir_node;
- res->op = op;
- res->mode = mode;
+ res->kind = k_ir_node;
+ res->op = op;
+ res->mode = mode;
res->visited = 0;
- res->link = NULL;
+ res->link = NULL;
if (arity < 0) {
res->in = NEW_ARR_F (ir_node *, 1); /* 1: space for block */
} else {
res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
}
+
res->in[0] = block;
set_irn_dbg_info(res, db);
res->out = NULL;
res->node_nr = get_irp_new_node_nr();
#endif
- stat_new_node(res);
+#ifdef FIRM_EDGES_INPLACE
+ {
+ int i, n;
+ int not_a_block = !is_Block(res);
- return res;
-}
+ INIT_LIST_HEAD(&res->edge_info.outs_head);
-/* Copies all attributes stored in the old node to the new node.
- Assumes both have the same opcode and sufficient size. */
-void
-copy_attrs (const ir_node *old_node, ir_node *new_node) {
- assert(get_irn_op(old_node) == get_irn_op(new_node));
- memcpy(&new_node->attr, &old_node->attr, get_op_attr_size(get_irn_op(old_node)));
+ for(i = 0, n = arity + not_a_block; i < n; ++i)
+ edges_notify_edge(res, i - not_a_block, res->in[i], NULL, irg);
+ }
+#endif
+
+ hook_new_node(irg, res);
+
+ return res;
}
/*-- getting some parameters from ir_nodes --*/
int
(is_ir_node)(const void *thing) {
- return __is_ir_node(thing);
+ return _is_ir_node(thing);
}
int
(get_irn_intra_arity)(const ir_node *node) {
- return __get_irn_intra_arity(node);
+ return _get_irn_intra_arity(node);
}
int
(get_irn_inter_arity)(const ir_node *node) {
- return __get_irn_inter_arity(node);
+ return _get_irn_inter_arity(node);
}
+int (*_get_irn_arity)(const ir_node *node) = _get_irn_intra_arity;
+
int
(get_irn_arity)(const ir_node *node) {
- return __get_irn_arity(node);
+ return _get_irn_arity(node);
}
/* Returns the array with ins. This array is shifted with respect to the
ir_node **
get_irn_in (const ir_node *node) {
assert(node);
- if (interprocedural_view) { /* handle Filter and Block specially */
+ if (get_interprocedural_view()) { /* handle Filter and Block specially */
if (get_irn_opcode(node) == iro_Filter) {
assert(node->attr.filter.in_cg);
return node->attr.filter.in_cg;
set_irn_in (ir_node *node, int arity, ir_node **in) {
ir_node *** arr;
assert(node);
- if (interprocedural_view) { /* handle Filter and Block specially */
+ if (get_interprocedural_view()) { /* handle Filter and Block specially */
if (get_irn_opcode(node) == iro_Filter) {
assert(node->attr.filter.in_cg);
arr = &node->attr.filter.in_cg;
}
ir_node *
-(get_irn_intra_n)(ir_node *node, int n) {
- return __get_irn_intra_n (node, n);
+(get_irn_intra_n)(const ir_node *node, int n) {
+ return _get_irn_intra_n (node, n);
}
ir_node *
-(get_irn_inter_n)(ir_node *node, int n) {
- return __get_irn_inter_n (node, n);
+(get_irn_inter_n)(const ir_node *node, int n) {
+ return _get_irn_inter_n (node, n);
}
+ir_node *(*_get_irn_n)(const ir_node *node, int n) = _get_irn_intra_n;
+
ir_node *
-(get_irn_n)(ir_node *node, int n) {
- return __get_irn_n (node, n);
+(get_irn_n)(const ir_node *node, int n) {
+ return _get_irn_n(node, n);
}
void
set_irn_n (ir_node *node, int n, ir_node *in) {
- assert(node && -1 <= n && n < get_irn_arity(node));
+ assert(node && node->kind == k_ir_node);
+ assert(-1 <= n);
+ assert(n < get_irn_arity(node));
+ assert(in && in->kind == k_ir_node);
+
if ((n == -1) && (get_irn_opcode(node) == iro_Filter)) {
/* Change block pred in both views! */
node->in[n + 1] = in;
node->attr.filter.in_cg[n + 1] = in;
return;
}
- if (interprocedural_view) { /* handle Filter and Block specially */
+ if (get_interprocedural_view()) { /* handle Filter and Block specially */
if (get_irn_opcode(node) == iro_Filter) {
assert(node->attr.filter.in_cg);
node->attr.filter.in_cg[n + 1] = in;
}
/* else fall through */
}
+
+ /* Call the hook */
+ hook_set_irn_n(node, n, in, node->in[n + 1]);
+
+#ifdef FIRM_EDGES_INPLACE
+ /* Here, we rely on src and tgt being in the current ir graph */
+ edges_notify_edge(node, n, in, node->in[n + 1], current_ir_graph);
+#endif
+
node->in[n + 1] = in;
}
ir_mode *
(get_irn_mode)(const ir_node *node) {
- return __get_irn_mode(node);
+ return _get_irn_mode(node);
}
void
(set_irn_mode)(ir_node *node, ir_mode *mode)
{
- __set_irn_mode(node, mode);
+ _set_irn_mode(node, mode);
}
modecode
ir_op *
(get_irn_op)(const ir_node *node)
{
- return __get_irn_op(node);
+ return _get_irn_op(node);
}
/* should be private to the library: */
opcode
(get_irn_opcode)(const ir_node *node)
{
- return __get_irn_opcode(node);
+ return _get_irn_opcode(node);
}
const char *
unsigned long
(get_irn_visited)(const ir_node *node)
{
- return __get_irn_visited(node);
+ return _get_irn_visited(node);
}
void
(set_irn_visited)(ir_node *node, unsigned long visited)
{
- __set_irn_visited(node, visited);
+ _set_irn_visited(node, visited);
}
void
(mark_irn_visited)(ir_node *node) {
- __mark_irn_visited(node);
+ _mark_irn_visited(node);
}
int
(irn_not_visited)(const ir_node *node) {
- return __irn_not_visited(node);
+ return _irn_not_visited(node);
}
int
(irn_visited)(const ir_node *node) {
- return __irn_visited(node);
+ return _irn_visited(node);
}
void
(set_irn_link)(ir_node *node, void *link) {
- __set_irn_link(node, link);
+ _set_irn_link(node, link);
}
void *
(get_irn_link)(const ir_node *node) {
- return __get_irn_link(node);
+ return _get_irn_link(node);
}
+op_pin_state
+(get_irn_pinned)(const ir_node *node) {
+ return _get_irn_pinned(node);
+}
+
+void set_irn_pinned(ir_node *node, op_pin_state state) {
+ /* due to optimization an opt may be turned into a Tuple */
+ if (get_irn_op(node) == op_Tuple)
+ return;
+
+ assert(node && get_op_pinned(get_irn_op(node)) >= op_pin_state_exc_pinned);
+ assert(state == op_pin_state_pinned || state == op_pin_state_floats);
+
+ node->attr.except.pin_state = state;
+}
+
+#ifdef DO_HEAPANALYSIS
+/* Access the abstract interpretation information of a node.
+ Returns NULL if no such information is available. */
+struct abstval *get_irn_abst_value(ir_node *n) {
+ return n->av;
+}
+/* Set the abstract interpretation information of a node. */
+void set_irn_abst_value(ir_node *n, struct abstval *os) {
+ n->av = os;
+}
+struct section *firm_get_irn_section(ir_node *n) {
+ return n->sec;
+}
+void firm_set_irn_section(ir_node *n, struct section *s) {
+ n->sec = s;
+}
+#else
+/* Dummies needed for firmjni. */
+struct abstval *get_irn_abst_value(ir_node *n) { return NULL; }
+void set_irn_abst_value(ir_node *n, struct abstval *os) {}
+struct section *firm_get_irn_section(ir_node *n) { return NULL; }
+void firm_set_irn_section(ir_node *n, struct section *s) {}
+#endif /* DO_HEAPANALYSIS */
+
+
/* Outputs a unique number for this node */
long
get_irn_node_nr(const ir_node *node) {
#ifdef DEBUG_libfirm
return node->node_nr;
#else
- return (long)&node;
+ return (long)node;
#endif
}
return node->attr.a;
}
-type *
+free_attr
get_irn_free_attr (ir_node *node)
{
assert (node->op == op_Free);
- return node->attr.f = skip_tid(node->attr.f);
+ return node->attr.f;
}
symconst_attr
return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
}
-type *
-get_irn_funccall_attr (ir_node *node)
-{
- assert (node->op == op_FuncCall);
- return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
-}
-
sel_attr
get_irn_sel_attr (ir_node *node)
{
return node->attr.block;
}
+load_attr
+get_irn_load_attr (ir_node *node)
+{
+ assert (node->op == op_Load);
+ return node->attr.load;
+}
+
+store_attr
+get_irn_store_attr (ir_node *node)
+{
+ assert (node->op == op_Store);
+ return node->attr.store;
+}
+
+except_attr
+get_irn_except_attr (ir_node *node)
+{
+ assert (node->op == op_Div || node->op == op_Quot ||
+ node->op == op_DivMod || node->op == op_Mod || node->op == op_Call || node->op == op_Alloc);
+ return node->attr.except;
+}
+
/** manipulate fields of individual nodes **/
/* this works for all except Block */
ir_node *
-get_nodes_Block (ir_node *node) {
+get_nodes_block (const ir_node *node) {
assert (!(node->op == op_Block));
return get_irn_n(node, -1);
}
void
-set_nodes_Block (ir_node *node, ir_node *block) {
+set_nodes_block (ir_node *node, ir_node *block) {
assert (!(node->op == op_Block));
set_irn_n(node, -1, block);
}
ir_node *
get_Block_cfgpred (ir_node *node, int pos) {
- assert(node);
- assert (node->op == op_Block);
assert(-1 <= pos && pos < get_irn_arity(node));
+ assert(node->op == op_Block);
return get_irn_n(node, pos);
}
node->attr.block.graph_arr[pos+1] = value;
}
-/* handler handling for Blocks * /
-void
-set_Block_handler (ir_node *block, ir_node *handler) {
- assert ((block->op == op_Block));
- assert ((handler->op == op_Block));
- block->attr.block.handler_entry = handler;
-}
-
-ir_node *
-get_Block_handler (ir_node *block) {
- assert ((block->op == op_Block));
- return (block->attr.block.handler_entry);
-}
-
-/ * handler handling for Nodes * /
-void
-set_Node_handler (ir_node *node, ir_node *handler) {
- set_Block_handler (get_nodes_Block (node), handler);
-}
-
-ir_node *
-get_Node_handler (ir_node *node) {
- return (get_Block_handler (get_nodes_Block (node)));
-}
-
-/ * exc_t handling for Blocks * /
-void set_Block_exc (ir_node *block, exc_t exc) {
- assert ((block->op == op_Block));
- block->attr.block.exc = exc;
-}
-
-exc_t get_Block_exc (ir_node *block) {
- assert ((block->op == op_Block));
- return (block->attr.block.exc);
-}
-
-/ * exc_t handling for Nodes * /
-void set_Node_exc (ir_node *node, exc_t exc) {
- set_Block_exc (get_nodes_Block (node), exc);
-}
-
-exc_t get_Node_exc (ir_node *node) {
- return (get_Block_exc (get_nodes_Block (node)));
-}
-*/
-
void set_Block_cg_cfgpred_arr(ir_node * node, int arity, ir_node ** in) {
assert(node->op == op_Block);
if (node->attr.block.in_cg == NULL || arity != ARR_LEN(node->attr.block.in_cg) - 1) {
{
/* Fix backedge array. fix_backedges operates depending on
interprocedural_view. */
- bool ipv = interprocedural_view;
- interprocedural_view = true;
+ int ipv = get_interprocedural_view();
+ set_interprocedural_view(true);
fix_backedges(current_ir_graph->obst, node);
- interprocedural_view = ipv;
+ set_interprocedural_view(ipv);
}
}
memcpy(node->attr.block.in_cg + 1, in, sizeof(ir_node *) * arity);
node->attr.block.in_cg = NULL;
}
-/* Start references the irg it is in. */
-ir_graph *
-get_Start_irg(ir_node *node) {
- return get_irn_irg(node);
+ir_node *(set_Block_dead)(ir_node *block) {
+ return _set_Block_dead(block);
+}
+
+int (is_Block_dead)(const ir_node *block) {
+ return _is_Block_dead(block);
}
void
in array afterwards ... */
}
-ir_graph *get_EndReg_irg (ir_node *end) {
- return get_irn_irg(end);
-}
-
-ir_graph *get_EndExcept_irg (ir_node *end) {
- return get_irn_irg(end);
-}
/*
> Implementing the case construct (which is where the constant Proj node is
set_irn_n(node, 1, exo_ptr);
}
-tarval *get_Const_tarval (ir_node *node) {
- assert (node->op == op_Const);
- return node->attr.con.tv;
+tarval *(get_Const_tarval)(ir_node *node) {
+ return _get_Const_tarval(node);
}
void
node->attr.con.tv = con;
}
+cnst_classify_t (classify_Const)(ir_node *node)
+{
+ return _classify_Const(node);
+}
+
/* The source language type. Must be an atomic type. Mode of type must
be mode of node. For tarvals from entities type must be pointer to
void
set_Const_type (ir_node *node, type *tp) {
assert (node->op == op_Const);
- if (tp != unknown_type) {
+ if (tp != firm_unknown_type) {
assert (is_atomic_type(tp));
assert (get_type_mode(tp) == get_irn_mode(node));
- assert (!tarval_is_entity(get_Const_tarval(node)) ||
- (is_pointer_type(tp) &&
- (get_pointer_points_to_type(tp) ==
- get_entity_type(get_tarval_entity(get_Const_tarval(node))))));
}
-
node->attr.con.tp = tp;
}
node->attr.i.sym.entity_p = ent;
}
-
-symconst_symbol
-get_SymConst_type_or_id (ir_node *node) {
+union symconst_symbol
+get_SymConst_symbol (ir_node *node) {
assert (node->op == op_SymConst);
return node->attr.i.sym;
}
void
-set_SymConst_type_or_id (ir_node *node, symconst_symbol sym) {
+set_SymConst_symbol (ir_node *node, union symconst_symbol sym) {
assert (node->op == op_SymConst);
//memcpy (&(node->attr.i.sym), sym, sizeof(type_or_id));
node->attr.i.sym = sym;
}
+type *
+get_SymConst_value_type (ir_node *node) {
+ assert (node->op == op_SymConst);
+ if (node->attr.i.tp) node->attr.i.tp = skip_tid(node->attr.i.tp);
+ return node->attr.i.tp;
+}
+
+void
+set_SymConst_value_type (ir_node *node, type *tp) {
+ assert (node->op == op_SymConst);
+ node->attr.i.tp = tp;
+}
+
ir_node *
get_Sel_mem (ir_node *node) {
assert (node->op == op_Sel);
void
set_Call_type (ir_node *node, type *tp) {
assert (node->op == op_Call);
- assert (is_method_type(tp));
+ assert ((get_unknown_type() == tp) || is_Method_type(tp));
node->attr.call.cld_tp = tp;
}
int Call_has_callees(ir_node *node) {
- return (node->attr.call.callee_arr != NULL);
+ assert(node && node->op == op_Call);
+ return ((get_irg_callee_info_state(get_irn_irg(node)) != irg_callee_info_none) &&
+ (node->attr.call.callee_arr != NULL));
}
int get_Call_n_callees(ir_node * node) {
- assert(node->op == op_Call && node->attr.call.callee_arr);
+ assert(node && node->op == op_Call && node->attr.call.callee_arr);
return ARR_LEN(node->attr.call.callee_arr);
}
entity * get_Call_callee(ir_node * node, int pos) {
- assert(node->op == op_Call && node->attr.call.callee_arr);
+ assert(pos >= 0 && pos < get_Call_n_callees(node));
return node->attr.call.callee_arr[pos];
}
-void set_Call_callee_arr(ir_node * node, int n, entity ** arr) {
+void set_Call_callee_arr(ir_node * node, const int n, entity ** arr) {
assert(node->op == op_Call);
if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
node->attr.call.callee_arr = NEW_ARR_D(entity *, current_ir_graph->obst, n);
assert(node->op == op_CallBegin);
set_irn_n(node, 0, ptr);
}
-ir_graph * get_CallBegin_irg (ir_node *node) {
- return get_irn_irg(node);
-}
ir_node * get_CallBegin_call (ir_node *node) {
assert(node->op == op_CallBegin);
return node->attr.callbegin.call;
node->attr.callbegin.call = call;
}
-ir_node *
-get_FuncCall_ptr (ir_node *node) {
- assert (node->op == op_FuncCall);
- return get_irn_n(node, 0);
-}
-
-void
-set_FuncCall_ptr (ir_node *node, ir_node *ptr) {
- assert (node->op == op_FuncCall);
- set_irn_n(node, 0, ptr);
-}
-
-ir_node **
-get_FuncCall_param_arr (ir_node *node) {
- assert (node->op == op_FuncCall);
- return (ir_node **)&get_irn_in(node)[CALL_PARAM_OFFSET];
-}
-
-int
-get_FuncCall_n_params (ir_node *node) {
- assert (node->op == op_FuncCall);
- return (get_irn_arity(node) - CALL_PARAM_OFFSET);
-}
-
-int
-get_FuncCall_arity (ir_node *node) {
- assert (node->op == op_FuncCall);
- return get_FuncCall_n_params(node);
-}
-
-/* void
-set_FuncCall_arity (ir_node *node, ir_node *arity) {
- assert (node->op == op_FuncCall);
-}
-*/
-
-ir_node *
-get_FuncCall_param (ir_node *node, int pos) {
- assert (node->op == op_FuncCall);
- return get_irn_n(node, pos + CALL_PARAM_OFFSET);
-}
-
-void
-set_FuncCall_param (ir_node *node, int pos, ir_node *param) {
- assert (node->op == op_FuncCall);
- set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
-}
-
-type *
-get_FuncCall_type (ir_node *node) {
- assert (node->op == op_FuncCall);
- return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
-}
-
-void
-set_FuncCall_type (ir_node *node, type *tp) {
- assert (node->op == op_FuncCall);
- assert (is_method_type(tp));
- node->attr.call.cld_tp = tp;
-}
-
-int FuncCall_has_callees(ir_node *node) {
- return (node->attr.call.callee_arr != NULL);
-}
-
-int get_FuncCall_n_callees(ir_node * node) {
- assert(node->op == op_FuncCall && node->attr.call.callee_arr);
- return ARR_LEN(node->attr.call.callee_arr);
-}
-
-entity * get_FuncCall_callee(ir_node * node, int pos) {
- assert(node->op == op_FuncCall && node->attr.call.callee_arr);
- return node->attr.call.callee_arr[pos];
-}
-
-void set_FuncCall_callee_arr(ir_node * node, int n, entity ** arr) {
- assert(node->op == op_FuncCall);
- if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
- node->attr.call.callee_arr = NEW_ARR_D(entity *, current_ir_graph->obst, n);
- }
- memcpy(node->attr.call.callee_arr, arr, n * sizeof(entity *));
-}
-
-void remove_FuncCall_callee_arr(ir_node * node) {
- assert(node->op == op_FuncCall);
- node->attr.call.callee_arr = NULL;
-}
-
#define BINOP(OP) \
ir_node * get_##OP##_left(ir_node *node) { \
}
int
-is_unop (ir_node *node) {
- return (node->op->opar == oparity_unary);
+(is_unop)(const ir_node *node) {
+ return _is_unop(node);
}
ir_node *
}
int
-is_binop (ir_node *node) {
- return (node->op->opar == oparity_binary);
+(is_binop)(const ir_node *node) {
+ return _is_binop(node);
}
ir_node *
assert (node->op->opar == oparity_binary);
}
-int is_Phi (ir_node *n) {
+int is_Phi (const ir_node *n) {
ir_op *op;
assert(n);
op = get_irn_op(n);
- if (op == op_Filter) return interprocedural_view;
+ if (op == op_Filter) return get_interprocedural_view();
if (op == op_Phi)
return ((get_irg_phase_state(get_irn_irg(n)) != phase_building) ||
return 0;
}
-int is_Phi0 (ir_node *n) {
+int is_Phi0 (const ir_node *n) {
assert(n);
return ((get_irn_op(n) == op_Phi) &&
set_irn_n(node, pos, pred);
}
+
+int is_memop(ir_node *node) {
+ return ((get_irn_op(node) == op_Load) || (get_irn_op(node) == op_Store));
+}
+
+ir_node *get_memop_mem (ir_node *node) {
+ assert(is_memop(node));
+ return get_irn_n(node, 0);
+}
+
+void set_memop_mem (ir_node *node, ir_node *mem) {
+ assert(is_memop(node));
+ set_irn_n(node, 0, mem);
+}
+
+ir_node *get_memop_ptr (ir_node *node) {
+ assert(is_memop(node));
+ return get_irn_n(node, 1);
+}
+
+void set_memop_ptr (ir_node *node, ir_node *ptr) {
+ assert(is_memop(node));
+ set_irn_n(node, 1, ptr);
+}
+
ir_node *
get_Load_mem (ir_node *node) {
assert (node->op == op_Load);
set_irn_n(node, 1, ptr);
}
+ir_mode *
+get_Load_mode (ir_node *node) {
+ assert (node->op == op_Load);
+ return node->attr.load.load_mode;
+}
+
+void
+set_Load_mode (ir_node *node, ir_mode *mode) {
+ assert (node->op == op_Load);
+ node->attr.load.load_mode = mode;
+}
+
+ent_volatility
+get_Load_volatility (ir_node *node) {
+ assert (node->op == op_Load);
+ return node->attr.load.volatility;
+}
+
+void
+set_Load_volatility (ir_node *node, ent_volatility volatility) {
+ assert (node->op == op_Load);
+ node->attr.load.volatility = volatility;
+}
+
ir_node *
get_Store_mem (ir_node *node) {
set_irn_n(node, 2, value);
}
+ent_volatility
+get_Store_volatility (ir_node *node) {
+ assert (node->op == op_Store);
+ return node->attr.store.volatility;
+}
+
+void
+set_Store_volatility (ir_node *node, ent_volatility volatility) {
+ assert (node->op == op_Store);
+ node->attr.store.volatility = volatility;
+}
+
+
ir_node *
get_Alloc_mem (ir_node *node) {
assert (node->op == op_Alloc);
type *
get_Free_type (ir_node *node) {
assert (node->op == op_Free);
- return node->attr.f = skip_tid(node->attr.f);
+ return node->attr.f.type = skip_tid(node->attr.f.type);
}
void
set_Free_type (ir_node *node, type *tp) {
assert (node->op == op_Free);
- node->attr.f = tp;
+ node->attr.f.type = tp;
+}
+
+where_alloc
+get_Free_where (ir_node *node) {
+ assert (node->op == op_Free);
+ return node->attr.f.where;
+}
+
+void
+set_Free_where (ir_node *node, where_alloc where) {
+ assert (node->op == op_Free);
+ node->attr.f.where = where;
}
ir_node **
return node->attr.filter.in_cg[pos + 1];
}
+/* Mux support */
+ir_node *get_Mux_sel (ir_node *node) {
+ assert(node->op == op_Mux);
+ return node->in[1];
+}
+void set_Mux_sel (ir_node *node, ir_node *sel) {
+ assert(node->op == op_Mux);
+ node->in[1] = sel;
+}
+
+ir_node *get_Mux_false (ir_node *node) {
+ assert(node->op == op_Mux);
+ return node->in[2];
+}
+void set_Mux_false (ir_node *node, ir_node *ir_false) {
+ assert(node->op == op_Mux);
+ node->in[2] = ir_false;
+}
+
+ir_node *get_Mux_true (ir_node *node) {
+ assert(node->op == op_Mux);
+ return node->in[3];
+}
+void set_Mux_true (ir_node *node, ir_node *ir_true) {
+ assert(node->op == op_Mux);
+ node->in[3] = ir_true;
+}
+
ir_graph *
-get_irn_irg(ir_node *node) {
- if (get_irn_op(node) != op_Block)
+get_irn_irg(const ir_node *node) {
+ if (! is_Block(node))
node = get_nodes_block(node);
if (is_Bad(node)) /* sometimes bad is predecessor of nodes instead of block: in case of optimization */
node = get_nodes_block(node);
if (!get_opt_normalize()) return node;
- node = skip_nop(node);
+ node = skip_Id(node);
if (get_irn_op(node) == op_Proj) {
- pred = skip_nop(get_Proj_pred(node));
+ pred = skip_Id(get_Proj_pred(node));
if (get_irn_op(pred) == op_Proj) /* nested Tuple ? */
- pred = skip_nop(skip_Tuple(pred));
+ pred = skip_Id(skip_Tuple(pred));
if (get_irn_op(pred) == op_Tuple)
return get_Tuple_pred(pred, get_Proj_proj(node));
}
/** returns operand of node if node is a Cast */
ir_node *skip_Cast (ir_node *node) {
if (node && get_irn_op(node) == op_Cast) {
- return skip_nop(get_irn_n(node, 0));
+ return skip_Id(get_irn_n(node, 0));
} else {
return node;
}
than any other approach, as Id chains are resolved and all point to the real node, or
all id's are self loops. */
ir_node *
-skip_nop (ir_node *node) {
+skip_Id (ir_node *node) {
/* don't assert node !!! */
if (!get_opt_normalize()) return node;
assert (get_irn_arity (node) > 0);
node->in[0+1] = node;
- res = skip_nop(rem_pred);
+ res = skip_Id(rem_pred);
if (res->op == op_Id) /* self-loop */ return node;
node->in[0+1] = res;
than any other approach, as Id chains are resolved and all point to the real node, or
all id's are self loops. */
ir_node *
-skip_nop (ir_node *node) {
+skip_Id (ir_node *node) {
ir_node *pred;
/* don't assert node !!! */
assert (get_irn_arity (node) > 0);
node->in[0+1] = node; /* turn us into a self referencing Id: shorten Id cycles. */
- res = skip_nop(rem_pred);
+ res = skip_Id(rem_pred);
if (res->op == op_Id) /* self-loop */ return node;
node->in[0+1] = res; /* Turn Id chain into Ids all referencing the chain end. */
}
#endif
-ir_node *
-skip_Id (ir_node *node) {
- return skip_nop(node);
-}
-
int
-is_Bad (ir_node *node) {
- assert(node);
- if ((node) && get_irn_opcode(node) == iro_Bad)
- return 1;
- return 0;
+(is_Bad)(const ir_node *node) {
+ return _is_Bad(node);
}
int
-is_no_Block (ir_node *node) {
- assert(node);
- return (get_irn_opcode(node) != iro_Block);
+(is_no_Block)(const ir_node *node) {
+ return _is_no_Block(node);
}
int
-is_Block (ir_node *node) {
- assert(node);
- return (get_irn_opcode(node) == iro_Block);
+(is_Block)(const ir_node *node) {
+ return _is_Block(node);
}
/* returns true if node is a Unknown node. */
int
-is_Unknown (ir_node *node) {
+is_Unknown (const ir_node *node) {
assert(node);
- return (get_irn_opcode(node) == iro_Unknown);
+ return (get_irn_op(node) == op_Unknown);
}
int
is_Proj (const ir_node *node) {
assert(node);
return node->op == op_Proj
- || (!interprocedural_view && node->op == op_Filter);
+ || (!get_interprocedural_view() && node->op == op_Filter);
}
/* Returns true if the operation manipulates control flow. */
int
-is_cfop(ir_node *node) {
+is_cfop(const ir_node *node) {
return is_cfopcode(get_irn_op(node));
}
/* Returns true if the operation manipulates interprocedural control flow:
CallBegin, EndReg, EndExcept */
-int is_ip_cfop(ir_node *node) {
+int is_ip_cfop(const ir_node *node) {
return is_ip_cfopcode(get_irn_op(node));
}
-ir_graph *get_ip_cfop_irg(ir_node *n) {
- return get_irn_irg(n);
-}
-
/* Returns true if the operation can change the control flow because
of an exception. */
int
-is_fragile_op(ir_node *node) {
+is_fragile_op(const ir_node *node) {
return is_op_fragile(get_irn_op(node));
}
}
}
+/* Returns true if the operation is a forking control flow operation. */
+int
+is_forking_op(const ir_node *node) {
+ return is_op_forking(get_irn_op(node));
+}
+
+
#ifdef DEBUG_libfirm
void dump_irn (ir_node *n) {
int i, arity = get_irn_arity(n);