in the in array */
#define CALL_PARAM_OFFSET (n_Call_max+1)
#define BUILTIN_PARAM_OFFSET (n_Builtin_max+1)
+#define ASM_PARAM_OFFSET (n_ASM_max+1)
#define SEL_INDEX_OFFSET (n_Sel_max+1)
#define RETURN_RESULT_OFFSET (n_Return_max+1)
#define END_KEEPALIVE_OFFSET 0
ir_relation code = relation & ~(ir_relation_less|ir_relation_greater);
bool less = relation & ir_relation_less;
bool greater = relation & ir_relation_greater;
- code |= (less ? ir_relation_greater : 0) | (greater ? ir_relation_less : 0);
+ code |= (less ? ir_relation_greater : ir_relation_false)
+ | (greater ? ir_relation_less : ir_relation_false);
return code;
}
-/**
- * Indicates, whether additional data can be registered to ir nodes.
- * If set to 1, this is not possible anymore.
- */
-static int forbid_new_data = 0;
-
-/**
- * The amount of additional space for custom data to be allocated upon
- * creating a new node.
- */
-unsigned firm_add_node_size = 0;
-
-
-/* register new space for every node */
-unsigned firm_register_additional_node_data(unsigned size)
-{
- assert(!forbid_new_data && "Too late to register additional node data");
-
- if (forbid_new_data)
- return 0;
-
- return firm_add_node_size += size;
-}
-
-
-void init_irnode(void)
-{
- /* Forbid the addition of new data to an ir node. */
- forbid_new_data = 1;
-}
-
-struct struct_align {
- char c;
- struct s {
- int i;
- float f;
- double d;
- } s;
-};
-
-/*
- * irnode constructor.
- * Create a new irnode in irg, with an op, mode, arity and
- * some incoming irnodes.
- * If arity is negative, a node with a dynamic array is created.
- */
ir_node *new_ir_node(dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op,
ir_mode *mode, int arity, ir_node *const *in)
{
- ir_node *res;
- unsigned align = offsetof(struct struct_align, s) - 1;
- unsigned add_node_size = (firm_add_node_size + align) & ~align;
- size_t node_size = offsetof(ir_node, attr) + op->attr_size + add_node_size;
- char *p;
int i;
assert(irg);
assert(op);
assert(mode);
- p = (char*)obstack_alloc(irg->obst, node_size);
- memset(p, 0, node_size);
- res = (ir_node *)(p + add_node_size);
+
+ size_t const node_size = offsetof(ir_node, attr) + op->attr_size;
+ ir_node *const res = (ir_node*)OALLOCNZ(irg->obst, char, node_size);
res->kind = k_ir_node;
res->op = op;
res->in[0] = block;
set_irn_dbg_info(res, db);
- res->out = NULL;
res->node_nr = get_irp_new_node_nr();
for (i = 0; i < EDGE_KIND_LAST; ++i) {
edges_notify_edge(res, i - 1, res->in[i], NULL, irg);
hook_new_node(irg, res);
- if (get_irg_phase_state(irg) == phase_backend) {
- be_info_new_node(res);
+ if (irg_is_constrained(irg, IR_GRAPH_CONSTRAINT_BACKEND)) {
+ be_info_new_node(irg, res);
}
return res;
}
-/*-- getting some parameters from ir_nodes --*/
-
int (is_ir_node)(const void *thing)
{
return is_ir_node_(thing);
return get_irn_arity_(node);
}
-/* Returns the array with ins. This array is shifted with respect to the
- array accessed by get_irn_n: The block operand is at position 0 not -1.
- (@@@ This should be changed.)
- The order of the predecessors in this array is not guaranteed, except that
- lists of operands as predecessors of Block or arguments of a Call are
- consecutive. */
ir_node **get_irn_in(const ir_node *node)
{
return node->in;
memcpy((*pOld_in) + 1, in, sizeof(ir_node *) * arity);
/* update irg flags */
- clear_irg_state(irg, IR_GRAPH_STATE_CONSISTENT_OUTS | IR_GRAPH_STATE_CONSISTENT_LOOPINFO);
+ clear_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_OUTS | IR_GRAPH_PROPERTY_CONSISTENT_LOOPINFO);
}
ir_node *(get_irn_n)(const ir_node *node, int n)
node->in[n + 1] = in;
/* update irg flags */
- clear_irg_state(irg, IR_GRAPH_STATE_CONSISTENT_OUTS | IR_GRAPH_STATE_CONSISTENT_LOOPINFO);
+ clear_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_OUTS | IR_GRAPH_PROPERTY_CONSISTENT_LOOPINFO);
}
int add_irn_n(ir_node *node, ir_node *in)
return get_irn_dep_(node, pos);
}
-void (set_irn_dep)(ir_node *node, int pos, ir_node *dep)
+void set_irn_dep(ir_node *node, int pos, ir_node *dep)
{
- set_irn_dep_(node, pos, dep);
+ ir_node *old;
+ ir_graph *irg;
+
+ assert(node->deps && "dependency array node yet allocated. use add_irn_dep()");
+ assert(pos >= 0 && pos < (int)ARR_LEN(node->deps) && "dependency index out of range");
+ assert(dep != NULL);
+ old = node->deps[pos];
+ node->deps[pos] = dep;
+ irg = get_irn_irg(node);
+ if (edges_activated_kind(irg, EDGE_KIND_DEP))
+ edges_notify_edge_kind(node, pos, dep, old, EDGE_KIND_DEP, irg);
}
-int add_irn_dep(ir_node *node, ir_node *dep)
+void add_irn_dep(ir_node *node, ir_node *dep)
{
- int res = 0;
-
- /* DEP edges are only allowed in backend phase */
- assert(get_irg_phase_state(get_irn_irg(node)) == phase_backend);
+ ir_graph *irg;
+ assert(dep != NULL);
if (node->deps == NULL) {
- node->deps = NEW_ARR_F(ir_node *, 1);
- node->deps[0] = dep;
- } else {
- int i, n;
- int first_zero = -1;
-
- for (i = 0, n = ARR_LEN(node->deps); i < n; ++i) {
- if (node->deps[i] == NULL)
- first_zero = i;
+ node->deps = NEW_ARR_F(ir_node *, 0);
+ }
+ ARR_APP1(ir_node*, node->deps, dep);
+ irg = get_irn_irg(node);
+ if (edges_activated_kind(irg, EDGE_KIND_DEP))
+ edges_notify_edge_kind(node, ARR_LEN(node->deps)-1, dep, NULL, EDGE_KIND_DEP, irg);
+}
- if (node->deps[i] == dep)
- return i;
- }
+void delete_irn_dep(ir_node *node, ir_node *dep)
+{
+ size_t i;
+ size_t n_deps;
+ if (node->deps == NULL)
+ return;
- if (first_zero >= 0) {
- node->deps[first_zero] = dep;
- res = first_zero;
- } else {
- ARR_APP1(ir_node *, node->deps, dep);
- res = n;
+ n_deps = ARR_LEN(node->deps);
+ for (i = 0; i < n_deps; ++i) {
+ if (node->deps[i] == dep) {
+ set_irn_dep(node, i, node->deps[n_deps-1]);
+ edges_notify_edge(node, i, NULL, dep, get_irn_irg(node));
+ ARR_SHRINKLEN(node->deps, n_deps-1);
+ break;
}
}
-
- edges_notify_edge_kind(node, res, dep, NULL, EDGE_KIND_DEP, get_irn_irg(node));
-
- return res;
}
void add_irn_deps(ir_node *tgt, ir_node *src)
return get_irn_op_(node);
}
-/* should be private to the library: */
void (set_irn_op)(ir_node *node, ir_op *op)
{
set_irn_op_(node, op);
const char *get_irn_opname(const ir_node *node)
{
- assert(node);
- if (is_Phi0(node)) return "Phi0";
return get_id_str(node->op->name);
}
node->attr.except.pin_state = state;
}
-/* Outputs a unique number for this node */
long get_irn_node_nr(const ir_node *node)
{
assert(node);
return -1;
}
-/** manipulate fields of individual nodes **/
-
ir_node *(get_nodes_block)(const ir_node *node)
{
return get_nodes_block_(node);
void set_nodes_block(ir_node *node, ir_node *block)
{
- assert(node->op != op_Block);
+ assert(!is_Block(node));
set_irn_n(node, -1, block);
}
-/* Test whether arbitrary node is frame pointer, i.e. Proj(pn_Start_P_frame_base)
- * from Start. If so returns frame type, else Null. */
ir_type *is_frame_pointer(const ir_node *n)
{
if (is_Proj(n) && (get_Proj_proj(n) == pn_Start_P_frame_base)) {
return Block_block_visited_(node);
}
-ir_extblk *get_Block_extbb(const ir_node *block)
-{
- ir_extblk *res;
- assert(is_Block(block));
- res = block->attr.block.extblk;
- assert(res == NULL || is_ir_extbb(res));
- return res;
-}
-
-void set_Block_extbb(ir_node *block, ir_extblk *extblk)
-{
- assert(is_Block(block));
- assert(extblk == NULL || is_ir_extbb(extblk));
- block->attr.block.extblk = extblk;
-}
-
-/* returns the graph of a Block. */
ir_graph *(get_Block_irg)(const ir_node *block)
{
return get_Block_irg_(block);
add_Block_phi_(block, phi);
}
-/* Get the Block mark (single bit). */
unsigned (get_Block_mark)(const ir_node *block)
{
return get_Block_mark_(block);
}
-/* Set the Block mark (single bit). */
void (set_Block_mark)(ir_node *block, unsigned mark)
{
set_Block_mark_(block, mark);
set_irn_n(end, pos + END_KEEPALIVE_OFFSET, ka);
}
-/* Set new keep-alives */
void set_End_keepalives(ir_node *end, int n, ir_node *in[])
{
size_t e;
}
/* update irg flags */
- clear_irg_state(irg, IR_GRAPH_STATE_CONSISTENT_OUTS);
+ clear_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_OUTS);
}
-/* Set new keep-alives from old keep-alives, skipping irn */
void remove_End_keepalive(ir_node *end, ir_node *irn)
{
int n = get_End_n_keepalives(end);
- int i, idx;
ir_graph *irg;
- idx = -1;
- for (i = n -1; i >= 0; --i) {
+ int idx = -1;
+ for (int i = n;;) {
+ if (i-- == 0)
+ return;
+
ir_node *old_ka = end->in[1 + END_KEEPALIVE_OFFSET + i];
/* find irn */
if (old_ka == irn) {
idx = i;
- goto found;
+ break;
}
}
- return;
-found:
irg = get_irn_irg(end);
/* remove the edge */
ARR_RESIZE(ir_node *, end->in, (n - 1) + 1 + END_KEEPALIVE_OFFSET);
/* update irg flags */
- clear_irg_state(irg, IR_GRAPH_STATE_CONSISTENT_OUTS);
+ clear_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_OUTS);
}
-/* remove Bads, NoMems and doublets from the keep-alive set */
void remove_End_Bads_and_doublets(ir_node *end)
{
pset_new_t keeps;
pset_new_destroy(&keeps);
if (changed) {
- clear_irg_state(irg, IR_GRAPH_STATE_CONSISTENT_OUTS);
+ clear_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_OUTS);
}
}
node->attr.symc.sym.type_p = tp;
}
-
-/* Only to access SymConst of kind symconst_addr_ent. Else assertion: */
ir_entity *get_SymConst_entity(const ir_node *node)
{
assert(is_SymConst(node) && SYMCONST_HAS_ENT(get_SymConst_kind(node)));
return &get_irn_in(node)[CALL_PARAM_OFFSET + 1];
}
-size_t get_Call_n_params(const ir_node *node)
+int get_Call_n_params(const ir_node *node)
{
assert(is_Call(node));
- return (size_t) (get_irn_arity(node) - CALL_PARAM_OFFSET);
+ return get_irn_arity(node) - CALL_PARAM_OFFSET;
}
ir_node *get_Call_param(const ir_node *node, int pos)
set_irn_n(node, pos + BUILTIN_PARAM_OFFSET, param);
}
-/* Returns a human readable string for the ir_builtin_kind. */
const char *get_builtin_kind_name(ir_builtin_kind kind)
{
#define X(a) case a: return #a
node->attr.call.callee_arr = NULL;
}
-/* Checks for upcast.
- *
- * Returns true if the Cast node casts a class type to a super type.
- */
int is_Cast_upcast(ir_node *node)
{
ir_type *totype = get_Cast_type(node);
return is_SubClass_of(fromtype, totype);
}
-/* Checks for downcast.
- *
- * Returns true if the Cast node casts a class type to a sub type.
- */
int is_Cast_downcast(ir_node *node)
{
ir_type *totype = get_Cast_type(node);
set_irn_n(node, node->op->op_index + 1, right);
}
-int is_Phi0(const ir_node *n)
-{
- assert(n);
-
- return ((get_irn_op(n) == op_Phi) &&
- (get_irn_arity(n) == 0) &&
- (get_irg_phase_state(get_irn_irg(n)) == phase_building));
-}
-
ir_node **get_Phi_preds_arr(ir_node *node)
{
assert(is_Phi(node));
int get_Phi_n_preds(const ir_node *node)
{
- assert(is_Phi(node) || is_Phi0(node));
- return (get_irn_arity(node));
+ assert(is_Phi(node));
+ return get_irn_arity(node);
}
ir_node *get_Phi_pred(const ir_node *node, int pos)
{
- assert(is_Phi(node) || is_Phi0(node));
+ assert(is_Phi(node));
return get_irn_n(node, pos);
}
void set_Phi_pred(ir_node *node, int pos, ir_node *pred)
{
- assert(is_Phi(node) || is_Phi0(node));
+ assert(is_Phi(node));
set_irn_n(node, pos, pred);
}
int is_memop(const ir_node *node)
{
- unsigned code = get_irn_opcode(node);
- return (code == iro_Load || code == iro_Store);
+ return is_op_uses_memory(get_irn_op(node));
}
ir_node *get_memop_mem(const ir_node *node)
{
+ const ir_op *op = get_irn_op(node);
assert(is_memop(node));
- assert(n_Load_mem == 0 && n_Store_mem == 0);
- return get_irn_n(node, 0);
+ return get_irn_n(node, op->memory_index);
}
void set_memop_mem(ir_node *node, ir_node *mem)
{
+ const ir_op *op = get_irn_op(node);
assert(is_memop(node));
- assert(n_Load_mem == 0 && n_Store_mem == 0);
- set_irn_n(node, 0, mem);
-}
-
-ir_node *get_memop_ptr(const ir_node *node)
-{
- assert(is_memop(node));
- assert(n_Load_mem == 1 && n_Store_mem == 1);
- return get_irn_n(node, 1);
-}
-
-void set_memop_ptr(ir_node *node, ir_node *ptr)
-{
- assert(is_memop(node));
- assert(n_Load_mem == 1 && n_Store_mem == 1);
- set_irn_n(node, 1, ptr);
+ set_irn_n(node, op->memory_index, mem);
}
-
ir_node **get_Sync_preds_arr(ir_node *node)
{
assert(is_Sync(node));
return (get_irn_arity(node));
}
-/*
-void set_Sync_n_preds(ir_node *node, int n_preds)
-{
- assert(is_Sync(node));
-}
-*/
-
ir_node *get_Sync_pred(const ir_node *node, int pos)
{
assert(is_Sync(node));
set_irn_n(node, pos, pred);
}
-/* Add a new Sync predecessor */
void add_Sync_pred(ir_node *node, ir_node *pred)
{
assert(is_Sync(node));
set_irn_n(node, pos, pred);
}
-size_t get_ASM_n_input_constraints(const ir_node *node)
+int get_ASM_n_inputs(const ir_node *node)
{
assert(is_ASM(node));
- return ARR_LEN(node->attr.assem.input_constraints);
+ return get_irn_arity(node) - ASM_PARAM_OFFSET;
+}
+
+ir_node *get_ASM_input(const ir_node *node, int pos)
+{
+ return get_irn_n(node, ASM_PARAM_OFFSET + pos);
}
size_t get_ASM_n_output_constraints(const ir_node *node)
return ARR_LEN(node->attr.assem.clobbers);
}
-/* returns the graph of a node */
ir_graph *(get_irn_irg)(const ir_node *node)
{
return get_irn_irg_(node);
}
-
-/*----------------------------------------------------------------*/
-/* Auxiliary routines */
-/*----------------------------------------------------------------*/
-
ir_node *skip_Proj(ir_node *node)
{
/* don't assert node !!! */
return node;
}
-/* returns operand of node if node is a Cast */
ir_node *skip_Cast(ir_node *node)
{
if (is_Cast(node))
return node;
}
-/* returns operand of node if node is a Cast */
const ir_node *skip_Cast_const(const ir_node *node)
{
if (is_Cast(node))
return node;
}
-/* returns operand of node if node is a Pin */
ir_node *skip_Pin(ir_node *node)
{
if (is_Pin(node))
return node;
}
-/* returns operand of node if node is a Confirm */
ir_node *skip_Confirm(ir_node *node)
{
if (is_Confirm(node))
return node;
}
-/* skip all high-level ops */
ir_node *skip_HighLevel_ops(ir_node *node)
{
while (is_op_highlevel(get_irn_op(node))) {
}
-/* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
- * than any other approach, as Id chains are resolved and all point to the real node, or
- * all id's are self loops.
- *
- * Note: This function takes 10% of mostly ANY the compiler run, so it's
- * a little bit "hand optimized".
- */
ir_node *skip_Id(ir_node *node)
{
+ /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
+ * than any other approach, as Id chains are resolved and all point to the real node, or
+ * all id's are self loops.
+ *
+ * Note: This function takes 10% of mostly ANY the compiler run, so it's
+ * a little bit "hand optimized".
+ */
ir_node *pred;
/* don't assert node !!! */
}
}
-int (is_strictConv)(const ir_node *node)
-{
- return is_strictConv_(node);
-}
-
-/* Returns true if node is a SymConst node with kind symconst_addr_ent. */
int (is_SymConst_addr_ent)(const ir_node *node)
{
return is_SymConst_addr_ent_(node);
}
-/* Returns true if the operation manipulates control flow. */
int is_cfop(const ir_node *node)
{
if (is_fragile_op(node) && ir_throws_exception(node))
return is_op_unknown_jump(get_irn_op(node));
}
-/* Returns true if the operation can change the control flow because
- of an exception. */
int is_fragile_op(const ir_node *node)
{
return is_op_fragile(get_irn_op(node));
}
-/* Returns the memory operand of fragile operations. */
-ir_node *get_fragile_op_mem(ir_node *node)
-{
- assert(node && is_fragile_op(node));
- return get_irn_n(node, node->op->fragile_mem_index);
-}
-
-/* Returns true if the operation is a forking control flow operation. */
int (is_irn_forking)(const ir_node *node)
{
return is_irn_forking_(node);
copy_node_attr_(irg, old_node, new_node);
}
-/* Return the type attribute of a node n (SymConst, Call, Alloc, Free,
- Cast) or NULL.*/
ir_type *(get_irn_type_attr)(ir_node *node)
{
return get_irn_type_attr_(node);
}
-/* Return the entity attribute of a node n (SymConst, Sel) or NULL. */
ir_entity *(get_irn_entity_attr)(ir_node *node)
{
return get_irn_entity_attr_(node);
}
-/* Returns non-zero for constant-like nodes. */
int (is_irn_constlike)(const ir_node *node)
{
return is_irn_constlike_(node);
}
-/*
- * Returns non-zero for nodes that are allowed to have keep-alives and
- * are neither Block nor PhiM.
- */
int (is_irn_keep)(const ir_node *node)
{
return is_irn_keep_(node);
}
-/*
- * Returns non-zero for nodes that are always placed in the start block.
- */
int (is_irn_start_block_placed)(const ir_node *node)
{
return is_irn_start_block_placed_(node);
}
-/* Returns non-zero for nodes that are CSE neutral to its users. */
int (is_irn_cse_neutral)(const ir_node *node)
{
return is_irn_cse_neutral_(node);
}
-/* Gets the string representation of the jump prediction .*/
const char *get_cond_jmp_predicate_name(cond_jmp_predicate pred)
{
#define X(a) case a: return #a
return NULL;
}
-/** the get_type_attr operation must be always implemented */
-static ir_type *get_Null_type(const ir_node *n)
+static void register_get_type_func(ir_op *op, get_type_attr_func func)
{
- (void) n;
- return firm_unknown_type;
+ op->ops.get_type_attr = func;
}
-/* Sets the get_type operation for an ir_op_ops. */
-void firm_set_default_get_type_attr(unsigned code, ir_op_ops *ops)
+static void register_get_entity_func(ir_op *op, get_entity_attr_func func)
{
- switch (code) {
- case iro_Alloc: ops->get_type_attr = get_Alloc_type; break;
- case iro_Builtin: ops->get_type_attr = get_Builtin_type; break;
- case iro_Call: ops->get_type_attr = get_Call_type; break;
- case iro_Cast: ops->get_type_attr = get_Cast_type; break;
- case iro_CopyB: ops->get_type_attr = get_CopyB_type; break;
- case iro_Free: ops->get_type_attr = get_Free_type; break;
- case iro_InstOf: ops->get_type_attr = get_InstOf_type; break;
- case iro_SymConst: ops->get_type_attr = get_SymConst_attr_type; break;
- default:
- /* not allowed to be NULL */
- if (! ops->get_type_attr)
- ops->get_type_attr = get_Null_type;
- break;
- }
+ op->ops.get_entity_attr = func;
}
-/** the get_entity_attr operation must be always implemented */
-static ir_entity *get_Null_ent(const ir_node *n)
+void ir_register_getter_ops(void)
{
- (void) n;
- return NULL;
-}
+ register_get_type_func(op_Alloc, get_Alloc_type);
+ register_get_type_func(op_Builtin, get_Builtin_type);
+ register_get_type_func(op_Call, get_Call_type);
+ register_get_type_func(op_Cast, get_Cast_type);
+ register_get_type_func(op_CopyB, get_CopyB_type);
+ register_get_type_func(op_Free, get_Free_type);
+ register_get_type_func(op_InstOf, get_InstOf_type);
+ register_get_type_func(op_SymConst, get_SymConst_attr_type);
-/* Sets the get_type operation for an ir_op_ops. */
-void firm_set_default_get_entity_attr(unsigned code, ir_op_ops *ops)
-{
- switch (code) {
- case iro_SymConst: ops->get_entity_attr = get_SymConst_attr_entity; break;
- case iro_Sel: ops->get_entity_attr = get_Sel_entity; break;
- case iro_Block: ops->get_entity_attr = get_Block_entity; break;
- default:
- /* not allowed to be NULL */
- if (! ops->get_entity_attr)
- ops->get_entity_attr = get_Null_ent;
- break;
- }
+ register_get_entity_func(op_SymConst, get_SymConst_attr_entity);
+ register_get_entity_func(op_Sel, get_Sel_entity);
+ register_get_entity_func(op_Block, get_Block_entity);
}
-/* Sets the debug information of a node. */
void (set_irn_dbg_info)(ir_node *n, dbg_info *db)
{
set_irn_dbg_info_(n, db);
}
-/**
- * Returns the debug information of an node.
- *
- * @param n The node.
- */
dbg_info *(get_irn_dbg_info)(const ir_node *n)
{
return get_irn_dbg_info_(n);
return res;
}
-/*
- * Calculate a hash value of a node.
- */
-unsigned firm_default_hash(const ir_node *node)
+bool only_used_by_keepalive(const ir_node *node)
{
- unsigned h;
- int i, irn_arity;
-
- /* hash table value = 9*(9*(9*(9*(9*arity+in[0])+in[1])+ ...)+mode)+code */
- h = irn_arity = get_irn_arity(node);
-
- /* consider all in nodes... except the block if not a control flow. */
- for (i = is_cfop(node) ? -1 : 0; i < irn_arity; ++i) {
- ir_node *pred = get_irn_n(node, i);
- if (is_irn_cse_neutral(pred))
- h *= 9;
- else
- h = 9*h + HASH_PTR(pred);
+ foreach_out_edge(node, edge) {
+ ir_node *succ = get_edge_src_irn(edge);
+ if (is_End(succ))
+ continue;
+ if (is_Proj(succ) && only_used_by_keepalive(succ))
+ return true;
+ /* found a real user */
+ return false;
}
-
- /* ...mode,... */
- h = 9*h + HASH_PTR(get_irn_mode(node));
- /* ...and code */
- h = 9*h + HASH_PTR(get_irn_op(node));
-
- return h;
+ return true;
}
/* include generated code */