*/
#ifdef HAVE_CONFIG_H
-# include <config.h>
+# include "config.h"
+#endif
+
+#ifdef HAVE_STRING_H
+# include <string.h>
#endif
-#include <string.h>
#include "ident.h"
#include "irnode_t.h"
"type_tag", "size", "addr_name", "addr_ent"
};
+/**
+ * Indicates, whether additional data can be registered to ir nodes.
+ * If set to 1, this is not possible anymore.
+ */
+static int forbid_new_data = 0;
+
+/**
+ * The amount of additional space for custom data to be allocated upon
+ * creating a new node.
+ */
+unsigned firm_add_node_size = 0;
+
+
+/* register new space for every node */
+unsigned register_additional_node_data(unsigned size) {
+ assert(!forbid_new_data && "Too late to register additional node data");
+
+ if (forbid_new_data)
+ return 0;
+
+ return firm_add_node_size += size;
+}
+
+
void
-init_irnode (void)
-{
+init_irnode(void) {
+ /* Forbid the addition of new data to an ir node. */
+ forbid_new_data = 1;
}
/*
int arity, ir_node **in)
{
ir_node *res;
- int node_size = offsetof (ir_node, attr) + op->attr_size;
+ size_t node_size = offsetof(ir_node, attr) + op->attr_size + firm_add_node_size;
+ char *p;
assert(irg && op && mode);
- res = (ir_node *) obstack_alloc (irg->obst, node_size);
- memset((void *)res, 0, node_size);
+ p = obstack_alloc (irg->obst, node_size);
+ memset(p, 0, node_size);
+ res = (ir_node *) (p + firm_add_node_size);
res->kind = k_ir_node;
res->op = op;
return res;
}
-/* Copies all attributes stored in the old node to the new node.
- Assumes both have the same opcode and sufficient size. */
-void
-copy_attrs (const ir_node *old_node, ir_node *new_node) {
- assert(get_irn_op(old_node) == get_irn_op(new_node));
- memcpy(&new_node->attr, &old_node->attr, get_op_attr_size(get_irn_op(old_node)));
-}
-
/*-- getting some parameters from ir_nodes --*/
int
}
ir_node *
-(get_irn_intra_n)(ir_node *node, int n) {
+(get_irn_intra_n)(const ir_node *node, int n) {
return __get_irn_intra_n (node, n);
}
ir_node *
-(get_irn_inter_n)(ir_node *node, int n) {
+(get_irn_inter_n)(const ir_node *node, int n) {
return __get_irn_inter_n (node, n);
}
-ir_node *(*__get_irn_n)(ir_node *node, int n) = __get_irn_intra_n;
+ir_node *(*__get_irn_n)(const ir_node *node, int n) = __get_irn_intra_n;
ir_node *
-(get_irn_n)(ir_node *node, int n) {
+(get_irn_n)(const ir_node *node, int n) {
return __get_irn_n(node, n);
}
if (get_irn_op(node) == op_Tuple)
return;
- assert(node && get_op_pinned(get_irn_op(node)) == op_pin_state_exc_pinned);
+ assert(node && get_op_pinned(get_irn_op(node)) >= op_pin_state_exc_pinned);
assert(state == op_pin_state_pinned || state == op_pin_state_floats);
node->attr.except.pin_state = state;
void firm_set_irn_section(ir_node *n, struct section *s) {
n->sec = s;
}
+#else
+/* Dummies needed for firmjni. */
+struct abstval *get_irn_abst_value(ir_node *n) { return NULL; }
+void set_irn_abst_value(ir_node *n, struct abstval *os) {}
+struct section *firm_get_irn_section(ir_node *n) { return NULL; }
+void firm_set_irn_section(ir_node *n, struct section *s) {}
#endif /* DO_HEAPANALYSIS */
return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
}
-type *
-get_irn_funccall_attr (ir_node *node)
-{
- assert (node->op == op_FuncCall);
- return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
-}
-
sel_attr
get_irn_sel_attr (ir_node *node)
{
get_irn_except_attr (ir_node *node)
{
assert (node->op == op_Div || node->op == op_Quot ||
- node->op == op_DivMod || node->op == op_Mod);
+ node->op == op_DivMod || node->op == op_Mod || node->op == op_Call || node->op == op_Alloc);
return node->attr.except;
}
/* this works for all except Block */
ir_node *
-get_nodes_block (ir_node *node) {
+get_nodes_block (const ir_node *node) {
assert (!(node->op == op_Block));
return get_irn_n(node, -1);
}
node->attr.block.in_cg = NULL;
}
+ir_node *(set_Block_dead)(ir_node *block) {
+ return __set_Block_dead(block);
+}
+
+int (is_Block_dead)(const ir_node *block) {
+ return __is_Block_dead(block);
+}
+
void
set_Start_irg(ir_node *node, ir_graph *irg) {
assert(node->op == op_Start);
void
set_Const_type (ir_node *node, type *tp) {
assert (node->op == op_Const);
- if (tp != unknown_type) {
+ if (tp != firm_unknown_type) {
assert (is_atomic_type(tp));
assert (get_type_mode(tp) == get_irn_mode(node));
}
node->attr.callbegin.call = call;
}
-ir_node *
-get_FuncCall_ptr (ir_node *node) {
- assert (node->op == op_FuncCall);
- return get_irn_n(node, 0);
-}
-
-void
-set_FuncCall_ptr (ir_node *node, ir_node *ptr) {
- assert (node->op == op_FuncCall);
- set_irn_n(node, 0, ptr);
-}
-
-ir_node **
-get_FuncCall_param_arr (ir_node *node) {
- assert (node->op == op_FuncCall);
- return (ir_node **)&get_irn_in(node)[FUNCCALL_PARAM_OFFSET];
-}
-
-int
-get_FuncCall_n_params (ir_node *node) {
- assert (node->op == op_FuncCall);
- return (get_irn_arity(node) - FUNCCALL_PARAM_OFFSET);
-}
-
-int
-get_FuncCall_arity (ir_node *node) {
- assert (node->op == op_FuncCall);
- return get_FuncCall_n_params(node);
-}
-
-/* void
-set_FuncCall_arity (ir_node *node, ir_node *arity) {
- assert (node->op == op_FuncCall);
-}
-*/
-
-ir_node *
-get_FuncCall_param (ir_node *node, int pos) {
- assert (node->op == op_FuncCall);
- return get_irn_n(node, pos + FUNCCALL_PARAM_OFFSET);
-}
-
-void
-set_FuncCall_param (ir_node *node, int pos, ir_node *param) {
- assert (node->op == op_FuncCall);
- set_irn_n(node, pos + FUNCCALL_PARAM_OFFSET, param);
-}
-
-type *
-get_FuncCall_type (ir_node *node) {
- assert (node->op == op_FuncCall);
- return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
-}
-
-void
-set_FuncCall_type (ir_node *node, type *tp) {
- assert (node->op == op_FuncCall);
- assert (is_method_type(tp));
- node->attr.call.cld_tp = tp;
-}
-
-int FuncCall_has_callees(ir_node *node) {
- return ((get_irg_callee_info_state(get_irn_irg(node)) != irg_callee_info_none) &&
- (node->attr.call.callee_arr != NULL));
-}
-
-int get_FuncCall_n_callees(ir_node * node) {
- assert(node->op == op_FuncCall && node->attr.call.callee_arr);
- return ARR_LEN(node->attr.call.callee_arr);
-}
-
-entity * get_FuncCall_callee(ir_node * node, int pos) {
- assert(node->op == op_FuncCall && node->attr.call.callee_arr);
- return node->attr.call.callee_arr[pos];
-}
-
-void set_FuncCall_callee_arr(ir_node * node, int n, entity ** arr) {
- assert(node->op == op_FuncCall);
- if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
- node->attr.call.callee_arr = NEW_ARR_D(entity *, current_ir_graph->obst, n);
- }
- memcpy(node->attr.call.callee_arr, arr, n * sizeof(entity *));
-}
-
-void remove_FuncCall_callee_arr(ir_node * node) {
- assert(node->op == op_FuncCall);
- node->attr.call.callee_arr = NULL;
-}
-
#define BINOP(OP) \
ir_node * get_##OP##_left(ir_node *node) { \
assert (node->op->opar == oparity_binary);
}
-int is_Phi (ir_node *n) {
+int is_Phi (const ir_node *n) {
ir_op *op;
assert(n);
return 0;
}
-int is_Phi0 (ir_node *n) {
+int is_Phi0 (const ir_node *n) {
assert(n);
return ((get_irn_op(n) == op_Phi) &&
return node->attr.filter.in_cg[pos + 1];
}
+/* Mux support */
+ir_node *get_Mux_sel (ir_node *node) {
+ assert(node->op == op_Mux);
+ return node->in[1];
+}
+void set_Mux_sel (ir_node *node, ir_node *sel) {
+ assert(node->op == op_Mux);
+ node->in[1] = sel;
+}
+
+ir_node *get_Mux_false (ir_node *node) {
+ assert(node->op == op_Mux);
+ return node->in[2];
+}
+void set_Mux_false (ir_node *node, ir_node *ir_false) {
+ assert(node->op == op_Mux);
+ node->in[2] = ir_false;
+}
+
+ir_node *get_Mux_true (ir_node *node) {
+ assert(node->op == op_Mux);
+ return node->in[3];
+}
+void set_Mux_true (ir_node *node, ir_node *ir_true) {
+ assert(node->op == op_Mux);
+ node->in[3] = ir_true;
+}
+
ir_graph *
-get_irn_irg(ir_node *node) {
- if (get_irn_op(node) != op_Block)
+get_irn_irg(const ir_node *node) {
+ if (! is_Block(node))
node = get_nodes_block(node);
if (is_Bad(node)) /* sometimes bad is predecessor of nodes instead of block: in case of optimization */
node = get_nodes_block(node);
return is_op_forking(get_irn_op(node));
}
+
#ifdef DEBUG_libfirm
void dump_irn (ir_node *n) {
int i, arity = get_irn_arity(n);