* of an unknown_jump */
irop_flag_unknown_jump = 1U << 11,
} irop_flags;
+ENUM_BITSET(irop_flags)
/** Returns the ident for the opcode name */
FIRM_API ident *get_op_ident(const ir_op *op);
equivalent_node_func equivalent_node; /**< Optimizes the node by returning an equivalent one. */
equivalent_node_func equivalent_node_Proj; /**< Optimizes the Proj node by returning an equivalent one. */
transform_node_func transform_node; /**< Optimizes the node by transforming it. */
- equivalent_node_func transform_node_Proj; /**< Optimizes the Proj node by transforming it. */
+ transform_node_func transform_node_Proj; /**< Optimizes the Proj node by transforming it. */
node_cmp_attr_func node_cmp_attr; /**< Compares two node attributes. */
reassociate_func reassociate; /**< Reassociate a tree. */
copy_attr_func copy_attr; /**< Copy node attributes. */
* @param opar the parity of this IR operation
* @param op_index if the parity is oparity_unary, oparity_binary or oparity_trinary the index
* of the left operand
- * @param ops operations for this opcode, iff NULL default operations are used
* @param attr_size attribute size for this IR operation
*
* @return The generated IR operation.
* The behavior of new opcode depends on the operations \c ops and the \c flags.
*/
FIRM_API ir_op *new_ir_op(unsigned code, const char *name, op_pin_state p,
- unsigned flags, op_arity opar, int op_index,
- size_t attr_size, const ir_op_ops *ops);
+ irop_flags flags, op_arity opar, int op_index,
+ size_t attr_size);
/** Returns one more than the highest opcode code in use. */
FIRM_API unsigned ir_get_n_opcodes(void);
}
}
-static const ir_op_ops be_node_op_ops = {
- firm_default_hash,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- copy_attr,
- NULL,
- NULL,
- NULL,
- NULL,
- dump_node,
- NULL,
- &be_node_irn_ops
-};
-
int is_be_node(const ir_node *irn)
{
return get_op_ops(get_irn_op(irn))->be_ops == &be_node_irn_ops;
}
+static ir_op *new_be_op(unsigned code, const char *name, op_pin_state p,
+ irop_flags flags, op_arity opar, size_t attr_size)
+{
+ ir_op *res = new_ir_op(code, name, p, flags, opar, 0, attr_size);
+ res->ops.dump_node = dump_node;
+ res->ops.copy_attr = copy_attr;
+ res->ops.be_ops = &be_node_irn_ops;
+ return res;
+}
+
void be_init_op(void)
{
unsigned opc;
assert(op_be_Spill == NULL);
/* Acquire all needed opcodes. */
- op_be_Spill = new_ir_op(beo_Spill, "be_Spill", op_pin_state_exc_pinned, irop_flag_none, oparity_unary, 0, sizeof(be_frame_attr_t), &be_node_op_ops);
- op_be_Reload = new_ir_op(beo_Reload, "be_Reload", op_pin_state_exc_pinned, irop_flag_none, oparity_zero, 0, sizeof(be_frame_attr_t), &be_node_op_ops);
- op_be_Perm = new_ir_op(beo_Perm, "be_Perm", op_pin_state_exc_pinned, irop_flag_none, oparity_variable, 0, sizeof(be_node_attr_t), &be_node_op_ops);
- op_be_MemPerm = new_ir_op(beo_MemPerm, "be_MemPerm", op_pin_state_exc_pinned, irop_flag_none, oparity_variable, 0, sizeof(be_memperm_attr_t), &be_node_op_ops);
- op_be_Copy = new_ir_op(beo_Copy, "be_Copy", op_pin_state_exc_pinned, irop_flag_none, oparity_unary, 0, sizeof(be_node_attr_t), &be_node_op_ops);
- op_be_Keep = new_ir_op(beo_Keep, "be_Keep", op_pin_state_exc_pinned, irop_flag_keep, oparity_dynamic, 0, sizeof(be_node_attr_t), &be_node_op_ops);
- op_be_CopyKeep = new_ir_op(beo_CopyKeep, "be_CopyKeep", op_pin_state_exc_pinned, irop_flag_keep, oparity_variable, 0, sizeof(be_node_attr_t), &be_node_op_ops);
- op_be_Call = new_ir_op(beo_Call, "be_Call", op_pin_state_exc_pinned, irop_flag_fragile|irop_flag_uses_memory, oparity_variable, 0, sizeof(be_call_attr_t), &be_node_op_ops);
+ op_be_Spill = new_be_op(beo_Spill, "be_Spill", op_pin_state_exc_pinned, irop_flag_none, oparity_unary, sizeof(be_frame_attr_t));
+ op_be_Reload = new_be_op(beo_Reload, "be_Reload", op_pin_state_exc_pinned, irop_flag_none, oparity_zero, sizeof(be_frame_attr_t));
+ op_be_Perm = new_be_op(beo_Perm, "be_Perm", op_pin_state_exc_pinned, irop_flag_none, oparity_variable, sizeof(be_node_attr_t));
+ op_be_MemPerm = new_be_op(beo_MemPerm, "be_MemPerm", op_pin_state_exc_pinned, irop_flag_none, oparity_variable, sizeof(be_memperm_attr_t));
+ op_be_Copy = new_be_op(beo_Copy, "be_Copy", op_pin_state_exc_pinned, irop_flag_none, oparity_unary, sizeof(be_node_attr_t));
+ op_be_Keep = new_be_op(beo_Keep, "be_Keep", op_pin_state_exc_pinned, irop_flag_keep, oparity_dynamic, sizeof(be_node_attr_t));
+ op_be_CopyKeep = new_be_op(beo_CopyKeep, "be_CopyKeep", op_pin_state_exc_pinned, irop_flag_keep, oparity_variable, sizeof(be_node_attr_t));
+ op_be_Call = new_be_op(beo_Call, "be_Call", op_pin_state_exc_pinned, irop_flag_fragile|irop_flag_uses_memory, oparity_variable, sizeof(be_call_attr_t));
ir_op_set_memory_index(op_be_Call, n_be_Call_mem);
ir_op_set_fragile_indices(op_be_Call, pn_be_Call_X_regular, pn_be_Call_X_except);
- op_be_Return = new_ir_op(beo_Return, "be_Return", op_pin_state_exc_pinned, irop_flag_cfopcode, oparity_variable, 0, sizeof(be_return_attr_t), &be_node_op_ops);
- op_be_AddSP = new_ir_op(beo_AddSP, "be_AddSP", op_pin_state_exc_pinned, irop_flag_none, oparity_unary, 0, sizeof(be_node_attr_t), &be_node_op_ops);
- op_be_SubSP = new_ir_op(beo_SubSP, "be_SubSP", op_pin_state_exc_pinned, irop_flag_none, oparity_unary, 0, sizeof(be_node_attr_t), &be_node_op_ops);
- op_be_IncSP = new_ir_op(beo_IncSP, "be_IncSP", op_pin_state_exc_pinned, irop_flag_none, oparity_unary, 0, sizeof(be_incsp_attr_t), &be_node_op_ops);
- op_be_Start = new_ir_op(beo_Start, "be_Start", op_pin_state_exc_pinned, irop_flag_none, oparity_zero, 0, sizeof(be_node_attr_t), &be_node_op_ops);
- op_be_FrameAddr = new_ir_op(beo_FrameAddr, "be_FrameAddr", op_pin_state_exc_pinned, irop_flag_none, oparity_unary, 0, sizeof(be_frame_attr_t), &be_node_op_ops);
+ op_be_Return = new_be_op(beo_Return, "be_Return", op_pin_state_exc_pinned, irop_flag_cfopcode, oparity_variable, sizeof(be_return_attr_t));
+ op_be_AddSP = new_be_op(beo_AddSP, "be_AddSP", op_pin_state_exc_pinned, irop_flag_none, oparity_unary, sizeof(be_node_attr_t));
+ op_be_SubSP = new_be_op(beo_SubSP, "be_SubSP", op_pin_state_exc_pinned, irop_flag_none, oparity_unary, sizeof(be_node_attr_t));
+ op_be_IncSP = new_be_op(beo_IncSP, "be_IncSP", op_pin_state_exc_pinned, irop_flag_none, oparity_unary, sizeof(be_incsp_attr_t));
+ op_be_Start = new_be_op(beo_Start, "be_Start", op_pin_state_exc_pinned, irop_flag_none, oparity_zero, sizeof(be_node_attr_t));
+ op_be_FrameAddr = new_be_op(beo_FrameAddr, "be_FrameAddr", op_pin_state_exc_pinned, irop_flag_none, oparity_unary, sizeof(be_frame_attr_t));
op_be_Spill->ops.node_cmp_attr = FrameAddr_cmp_attr;
op_be_Reload->ops.node_cmp_attr = FrameAddr_cmp_attr;
$n{"dump_func"} = "${arch}_dump_node" if (!exists($n{"dump_func"}));
my $dump_func = $n{"dump_func"};
- push(@obst_new_irop, "\n\tmemset(&ops, 0, sizeof(ops));\n");
- push(@obst_new_irop, "\tops.be_ops = be_ops;\n");
- push(@obst_new_irop, "\tops.dump_node = ${dump_func};\n");
-
- if (defined($cmp_attr_func)) {
- push(@obst_new_irop, "\tops.node_cmp_attr = ${cmp_attr_func};\n");
- }
- my $copy_attr_func = $copy_attr{$attr_type};
- if (!defined($copy_attr_func)) {
- if ($attr_type eq "") {
- $copy_attr_func = "NULL";
- } else {
- $copy_attr_func = $default_copy_attr;
- }
- }
- if (defined($copy_attr_func)) {
- push(@obst_new_irop, "\tops.copy_attr = ${copy_attr_func};\n");
- }
- if (defined($hash_func)) {
- push(@obst_new_irop, "\tops.hash = ${hash_func};\n");
- }
-
my %known_flags = map { $_ => 1 } (
"none", "commutative", "cfopcode", "unknown_jump", "fragile",
"forking", "highlevel", "constlike", "keep", "start_block",
$n_opcodes++;
$temp = "\top = new_ir_op(cur_opcode + iro_$op, \"$op\", op_pin_state_".$n{"state"}.", $op_flags";
- $temp .= ", ".translate_arity($arity).", 0, ${attr_size}, &ops);\n";
+ $temp .= ", ".translate_arity($arity).", 0, ${attr_size});\n";
push(@obst_new_irop, $temp);
+ push(@obst_new_irop, "\top->ops.be_ops = be_ops;\n");
+ push(@obst_new_irop, "\top->ops.dump_node = ${dump_func};\n");
+ if (defined($cmp_attr_func)) {
+ push(@obst_new_irop, "\top->ops.node_cmp_attr = ${cmp_attr_func};\n");
+ }
+ my $copy_attr_func = $copy_attr{$attr_type};
+ if (!defined($copy_attr_func)) {
+ if ($attr_type eq "") {
+ $copy_attr_func = "NULL";
+ } else {
+ $copy_attr_func = $default_copy_attr;
+ }
+ }
+ if (defined($copy_attr_func)) {
+ push(@obst_new_irop, "\top->ops.copy_attr = ${copy_attr_func};\n");
+ }
+ if (defined($hash_func)) {
+ push(@obst_new_irop, "\top->ops.hash = ${hash_func};\n");
+ }
+
if ($is_fragile) {
push(@obst_new_irop, "\tir_op_set_memory_index(op, n_${op}_mem);\n");
push(@obst_new_irop, "\tir_op_set_fragile_indices(op, pn_${op}_X_regular, pn_${op}_X_except);\n");
*/
void $arch\_create_opcodes(const arch_irn_ops_t *be_ops)
{
- ir_op_ops ops;
- ir_op *op;
- int cur_opcode = get_next_ir_opcodes(iro_$arch\_last);
+ ir_op *op;
+ int cur_opcode = get_next_ir_opcodes(iro_$arch\_last);
$arch\_opcode_start = cur_opcode;
ENDOFMAIN
return NULL;
}
-/** the get_type_attr operation must be always implemented */
-static ir_type *get_Null_type(const ir_node *n)
-{
- (void) n;
- return get_unknown_type();
-}
-
-void firm_set_default_get_type_attr(unsigned code, ir_op_ops *ops)
-{
- switch (code) {
- case iro_Alloc: ops->get_type_attr = get_Alloc_type; break;
- case iro_Builtin: ops->get_type_attr = get_Builtin_type; break;
- case iro_Call: ops->get_type_attr = get_Call_type; break;
- case iro_Cast: ops->get_type_attr = get_Cast_type; break;
- case iro_CopyB: ops->get_type_attr = get_CopyB_type; break;
- case iro_Free: ops->get_type_attr = get_Free_type; break;
- case iro_InstOf: ops->get_type_attr = get_InstOf_type; break;
- case iro_SymConst: ops->get_type_attr = get_SymConst_attr_type; break;
- default:
- /* not allowed to be NULL */
- if (! ops->get_type_attr)
- ops->get_type_attr = get_Null_type;
- break;
- }
+static void register_get_type_func(ir_op *op, get_type_attr_func func)
+{
+ op->ops.get_type_attr = func;
}
-/** the get_entity_attr operation must be always implemented */
-static ir_entity *get_Null_ent(const ir_node *n)
+static void register_get_entity_func(ir_op *op, get_entity_attr_func func)
{
- (void) n;
- return NULL;
+ op->ops.get_entity_attr = func;
}
-void firm_set_default_get_entity_attr(unsigned code, ir_op_ops *ops)
+void ir_register_getter_ops(void)
{
- switch (code) {
- case iro_SymConst: ops->get_entity_attr = get_SymConst_attr_entity; break;
- case iro_Sel: ops->get_entity_attr = get_Sel_entity; break;
- case iro_Block: ops->get_entity_attr = get_Block_entity; break;
- default:
- /* not allowed to be NULL */
- if (! ops->get_entity_attr)
- ops->get_entity_attr = get_Null_ent;
- break;
- }
+ register_get_type_func(op_Alloc, get_Alloc_type);
+ register_get_type_func(op_Builtin, get_Builtin_type);
+ register_get_type_func(op_Call, get_Call_type);
+ register_get_type_func(op_Cast, get_Cast_type);
+ register_get_type_func(op_CopyB, get_CopyB_type);
+ register_get_type_func(op_Free, get_Free_type);
+ register_get_type_func(op_InstOf, get_InstOf_type);
+ register_get_type_func(op_SymConst, get_SymConst_attr_type);
+
+ register_get_entity_func(op_SymConst, get_SymConst_attr_entity);
+ register_get_entity_func(op_Sel, get_Sel_entity);
+ register_get_entity_func(op_Block, get_Block_entity);
}
void (set_irn_dbg_info)(ir_node *n, dbg_info *db)
*/
extern unsigned firm_add_node_size;
-/**
- * Sets the get_type_attr operation for an ir_op_ops.
- *
- * @param code the opcode for the default operation
- * @param ops the operations initialized
- *
- * @return
- * The operations.
- */
-void firm_set_default_get_type_attr(unsigned code, ir_op_ops *ops);
-
-/**
- * Sets the get_entity_attr operation for an ir_op_ops.
- *
- * @param code the opcode for the default operation
- * @param ops the operations initialized
- *
- * @return
- * The operations.
- */
-void firm_set_default_get_entity_attr(unsigned code, ir_op_ops *ops);
-
/**
* Returns an array with the predecessors of the Block. Depending on
* the implementation of the graph data structure this can be a copy of
return &table->entries[entry];
}
+void ir_register_getter_ops(void);
+
/** initialize ir_node module */
void init_irnode(void);
/** the available next opcode */
static unsigned next_iro = iro_MaxOpcode;
-void default_copy_attr(ir_graph *irg, const ir_node *old_node,
- ir_node *new_node)
-{
- unsigned size = firm_add_node_size;
- (void) irg;
-
- assert(get_irn_op(old_node) == get_irn_op(new_node));
- memcpy(&new_node->attr, &old_node->attr, get_op_attr_size(get_irn_op(old_node)));
-
- if (size > 0) {
- /* copy additional node data */
- memcpy(get_irn_data(new_node, void, size), get_irn_data(old_node, void, size), size);
- }
-}
-
-/**
- * Copies all Call attributes stored in the old node to the new node.
- */
-static void call_copy_attr(ir_graph *irg, const ir_node *old_node,
- ir_node *new_node)
-{
- default_copy_attr(irg, old_node, new_node);
- remove_Call_callee_arr(new_node);
-}
-
-/**
- * Copies all Block attributes stored in the old node to the new node.
- */
-static void block_copy_attr(ir_graph *irg, const ir_node *old_node,
- ir_node *new_node)
-{
- default_copy_attr(irg, old_node, new_node);
- new_node->attr.block.irg.irg = irg;
- new_node->attr.block.phis = NULL;
- new_node->attr.block.backedge = new_backedge_arr(irg->obst, get_irn_arity(new_node));
- new_node->attr.block.block_visited = 0;
- memset(&new_node->attr.block.dom, 0, sizeof(new_node->attr.block.dom));
- memset(&new_node->attr.block.pdom, 0, sizeof(new_node->attr.block.pdom));
- /* It should be safe to copy the entity here, as it has no back-link to the old block.
- * It serves just as a label number, so copying a labeled block results in an exact copy.
- * This is at least what we need for DCE to work. */
- new_node->attr.block.entity = old_node->attr.block.entity;
- new_node->attr.block.phis = NULL;
- INIT_LIST_HEAD(&new_node->attr.block.succ_head);
-}
-
-/**
- * Copies all phi attributes stored in old node to the new node
- */
-static void phi_copy_attr(ir_graph *irg, const ir_node *old_node,
- ir_node *new_node)
-{
- default_copy_attr(irg, old_node, new_node);
- new_node->attr.phi.next = NULL;
- new_node->attr.phi.u.backedge = new_backedge_arr(irg->obst, get_irn_arity(new_node));
-}
-
-/**
- * Copies all ASM attributes stored in old node to the new node
- */
-static void ASM_copy_attr(ir_graph *irg, const ir_node *old_node,
- ir_node *new_node)
-{
- default_copy_attr(irg, old_node, new_node);
- new_node->attr.assem.input_constraints = DUP_ARR_D(ir_asm_constraint, irg->obst, old_node->attr.assem.input_constraints);
- new_node->attr.assem.output_constraints = DUP_ARR_D(ir_asm_constraint, irg->obst, old_node->attr.assem.output_constraints);
- new_node->attr.assem.clobbers = DUP_ARR_D(ident*, irg->obst, old_node->attr.assem.clobbers);
-}
-
-static void switch_copy_attr(ir_graph *irg, const ir_node *old_node,
- ir_node *new_node)
-{
- const ir_switch_table *table = get_Switch_table(old_node);
- new_node->attr.switcha.table = ir_switch_table_duplicate(irg, table);
- new_node->attr.switcha.n_outs = old_node->attr.switcha.n_outs;
-}
-
-/**
- * Sets the default copy_attr operation for an ir_ops
- *
- * @param code the opcode for the default operation
- * @param ops the operations initialized
- *
- * @return
- * The operations.
- */
-static void firm_set_default_copy_attr(unsigned code, ir_op_ops *ops)
-{
- switch (code) {
- case iro_Call: ops->copy_attr = call_copy_attr; break;
- case iro_Block: ops->copy_attr = block_copy_attr; break;
- case iro_Phi: ops->copy_attr = phi_copy_attr; break;
- case iro_ASM: ops->copy_attr = ASM_copy_attr; break;
- case iro_Switch: ops->copy_attr = switch_copy_attr; break;
- default:
- if (ops->copy_attr == NULL)
- ops->copy_attr = default_copy_attr;
- }
-}
-
-/*
- * Sets the default operation for an ir_ops.
- */
-static void set_default_operations(unsigned code, ir_op_ops *ops)
-{
- firm_set_default_hash(code, ops);
- firm_set_default_computed_value(code, ops);
- firm_set_default_equivalent_node(code, ops);
- firm_set_default_transform_node(code, ops);
- firm_set_default_node_cmp_attr(code, ops);
- firm_set_default_get_type_attr(code, ops);
- firm_set_default_get_entity_attr(code, ops);
- firm_set_default_copy_attr(code, ops);
- firm_set_default_verifier(code, ops);
- firm_set_default_reassoc(code, ops);
-}
+static ir_type *default_get_type_attr(const ir_node *node);
+static ir_entity *default_get_entity_attr(const ir_node *node);
+static unsigned default_hash_node(const ir_node *node);
+static void default_copy_attr(ir_graph *irg, const ir_node *old_node,
+ ir_node *new_node);
ir_op *new_ir_op(unsigned code, const char *name, op_pin_state p,
- unsigned flags, op_arity opar, int op_index, size_t attr_size,
- const ir_op_ops *ops)
+ irop_flags flags, op_arity opar, int op_index,
+ size_t attr_size)
{
ir_op *res = XMALLOCZ(ir_op);
res->op_index = op_index;
res->tag = 0;
- if (ops)
- res->ops = *ops;
- else /* no given ops, set all operations to NULL */
- memset(&res->ops, 0, sizeof(res->ops));
-
- set_default_operations(code, &res->ops);
+ memset(&res->ops, 0, sizeof(res->ops));
+ res->ops.hash = default_hash_node;
+ res->ops.copy_attr = default_copy_attr;
+ res->ops.get_type_attr = default_get_type_attr;
+ res->ops.get_entity_attr = default_get_entity_attr;
{
size_t len = ARR_LEN(opcodes);
return (irop_flags)op->flags;
}
+static ir_type *default_get_type_attr(const ir_node *node)
+{
+ (void)node;
+ return get_unknown_type();
+}
+
+static ir_entity *default_get_entity_attr(const ir_node *node)
+{
+ (void)node;
+ return NULL;
+}
+
+static unsigned default_hash_node(const ir_node *node)
+{
+ unsigned h;
+ int i, irn_arity;
+
+ /* hash table value = 9*(9*(9*(9*(9*arity+in[0])+in[1])+ ...)+mode)+code */
+ h = irn_arity = get_irn_arity(node);
+
+ /* consider all in nodes... except the block if not a control flow. */
+ for (i = is_cfop(node) ? -1 : 0; i < irn_arity; ++i) {
+ ir_node *pred = get_irn_n(node, i);
+ if (is_irn_cse_neutral(pred))
+ h *= 9;
+ else
+ h = 9*h + hash_ptr(pred);
+ }
+
+ /* ...mode,... */
+ h = 9*h + hash_ptr(get_irn_mode(node));
+ /* ...and code */
+ h = 9*h + hash_ptr(get_irn_op(node));
+
+ return h;
+}
+
+/**
+ * Calculate a hash value of a Const node.
+ */
+static unsigned hash_Const(const ir_node *node)
+{
+ unsigned h;
+
+ /* special value for const, as they only differ in their tarval. */
+ h = hash_ptr(node->attr.con.tarval);
+
+ return h;
+}
+
+/**
+ * Calculate a hash value of a SymConst node.
+ */
+static unsigned hash_SymConst(const ir_node *node)
+{
+ unsigned h;
+
+ /* all others are pointers */
+ h = hash_ptr(node->attr.symc.sym.type_p);
+
+ return h;
+}
+
+/** Compares two exception attributes */
+static int node_cmp_exception(const ir_node *a, const ir_node *b)
+{
+ const except_attr *ea = &a->attr.except;
+ const except_attr *eb = &b->attr.except;
+ return ea->pin_state != eb->pin_state;
+}
+
+/** Compares the attributes of two Const nodes. */
+static int node_cmp_attr_Const(const ir_node *a, const ir_node *b)
+{
+ return get_Const_tarval(a) != get_Const_tarval(b);
+}
+
+/** Compares the attributes of two Proj nodes. */
+static int node_cmp_attr_Proj(const ir_node *a, const ir_node *b)
+{
+ return a->attr.proj.proj != b->attr.proj.proj;
+}
+
+/** Compares the attributes of two Alloc nodes. */
+static int node_cmp_attr_Alloc(const ir_node *a, const ir_node *b)
+{
+ const alloc_attr *pa = &a->attr.alloc;
+ const alloc_attr *pb = &b->attr.alloc;
+ if (pa->where != pb->where || pa->type != pb->type)
+ return 1;
+ return node_cmp_exception(a, b);
+}
+
+/** Compares the attributes of two Free nodes. */
+static int node_cmp_attr_Free(const ir_node *a, const ir_node *b)
+{
+ const free_attr *pa = &a->attr.free;
+ const free_attr *pb = &b->attr.free;
+ return (pa->where != pb->where) || (pa->type != pb->type);
+}
+
+/** Compares the attributes of two SymConst nodes. */
+static int node_cmp_attr_SymConst(const ir_node *a, const ir_node *b)
+{
+ const symconst_attr *pa = &a->attr.symc;
+ const symconst_attr *pb = &b->attr.symc;
+ return (pa->kind != pb->kind)
+ || (pa->sym.type_p != pb->sym.type_p);
+}
+
+/** Compares the attributes of two Call nodes. */
+static int node_cmp_attr_Call(const ir_node *a, const ir_node *b)
+{
+ const call_attr *pa = &a->attr.call;
+ const call_attr *pb = &b->attr.call;
+ if (pa->type != pb->type)
+ return 1;
+ return node_cmp_exception(a, b);
+}
+
+/** Compares the attributes of two Sel nodes. */
+static int node_cmp_attr_Sel(const ir_node *a, const ir_node *b)
+{
+ const ir_entity *a_ent = get_Sel_entity(a);
+ const ir_entity *b_ent = get_Sel_entity(b);
+ return a_ent != b_ent;
+}
+
+/** Compares the attributes of two Phi nodes. */
+static int node_cmp_attr_Phi(const ir_node *a, const ir_node *b)
+{
+ (void) b;
+ /* do not CSE Phi-nodes without any inputs when building new graphs */
+ if (get_irn_arity(a) == 0 &&
+ get_irg_phase_state(get_irn_irg(a)) == phase_building) {
+ return 1;
+ }
+ return 0;
+}
+
+/** Compares the attributes of two Conv nodes. */
+static int node_cmp_attr_Conv(const ir_node *a, const ir_node *b)
+{
+ return get_Conv_strict(a) != get_Conv_strict(b);
+}
+
+/** Compares the attributes of two Cast nodes. */
+static int node_cmp_attr_Cast(const ir_node *a, const ir_node *b)
+{
+ return get_Cast_type(a) != get_Cast_type(b);
+}
+
+/** Compares the attributes of two Load nodes. */
+static int node_cmp_attr_Load(const ir_node *a, const ir_node *b)
+{
+ if (get_Load_volatility(a) == volatility_is_volatile ||
+ get_Load_volatility(b) == volatility_is_volatile)
+ /* NEVER do CSE on volatile Loads */
+ return 1;
+ /* do not CSE Loads with different alignment. Be conservative. */
+ if (get_Load_unaligned(a) != get_Load_unaligned(b))
+ return 1;
+ if (get_Load_mode(a) != get_Load_mode(b))
+ return 1;
+ return node_cmp_exception(a, b);
+}
+
+/** Compares the attributes of two Store nodes. */
+static int node_cmp_attr_Store(const ir_node *a, const ir_node *b)
+{
+ /* do not CSE Stores with different alignment. Be conservative. */
+ if (get_Store_unaligned(a) != get_Store_unaligned(b))
+ return 1;
+ /* NEVER do CSE on volatile Stores */
+ if (get_Store_volatility(a) == volatility_is_volatile ||
+ get_Store_volatility(b) == volatility_is_volatile)
+ return 1;
+ return node_cmp_exception(a, b);
+}
+
+static int node_cmp_attr_CopyB(const ir_node *a, const ir_node *b)
+{
+ if (get_CopyB_type(a) != get_CopyB_type(b))
+ return 1;
+
+ return node_cmp_exception(a, b);
+}
+
+static int node_cmp_attr_Bound(const ir_node *a, const ir_node *b)
+{
+ return node_cmp_exception(a, b);
+}
+
+/** Compares the attributes of two Div nodes. */
+static int node_cmp_attr_Div(const ir_node *a, const ir_node *b)
+{
+ const div_attr *ma = &a->attr.div;
+ const div_attr *mb = &b->attr.div;
+ if (ma->resmode != mb->resmode || ma->no_remainder != mb->no_remainder)
+ return 1;
+ return node_cmp_exception(a, b);
+}
+
+/** Compares the attributes of two Mod nodes. */
+static int node_cmp_attr_Mod(const ir_node *a, const ir_node *b)
+{
+ const mod_attr *ma = &a->attr.mod;
+ const mod_attr *mb = &b->attr.mod;
+ if (ma->resmode != mb->resmode)
+ return 1;
+ return node_cmp_exception(a, b);
+}
+
+static int node_cmp_attr_Cmp(const ir_node *a, const ir_node *b)
+{
+ const cmp_attr *ma = &a->attr.cmp;
+ const cmp_attr *mb = &b->attr.cmp;
+ return ma->relation != mb->relation;
+}
+
+/** Compares the attributes of two Confirm nodes. */
+static int node_cmp_attr_Confirm(const ir_node *a, const ir_node *b)
+{
+ const confirm_attr *ma = &a->attr.confirm;
+ const confirm_attr *mb = &b->attr.confirm;
+ return ma->relation != mb->relation;
+}
+
+/** Compares the attributes of two Builtin nodes. */
+static int node_cmp_attr_Builtin(const ir_node *a, const ir_node *b)
+{
+ if (get_Builtin_kind(a) != get_Builtin_kind(b))
+ return 1;
+ if (get_Builtin_type(a) != get_Builtin_type(b))
+ return 1;
+ return node_cmp_exception(a, b);
+}
+
+/** Compares the attributes of two ASM nodes. */
+static int node_cmp_attr_ASM(const ir_node *a, const ir_node *b)
+{
+ size_t n;
+ size_t i;
+ const ir_asm_constraint *ca;
+ const ir_asm_constraint *cb;
+ ident **cla, **clb;
+
+ if (get_ASM_text(a) != get_ASM_text(b))
+ return 1;
+
+ /* Should we really check the constraints here? Should be better, but is strange. */
+ n = get_ASM_n_input_constraints(a);
+ if (n != get_ASM_n_input_constraints(b))
+ return 1;
+
+ ca = get_ASM_input_constraints(a);
+ cb = get_ASM_input_constraints(b);
+ for (i = 0; i < n; ++i) {
+ if (ca[i].pos != cb[i].pos || ca[i].constraint != cb[i].constraint
+ || ca[i].mode != cb[i].mode)
+ return 1;
+ }
+
+ n = get_ASM_n_output_constraints(a);
+ if (n != get_ASM_n_output_constraints(b))
+ return 1;
+
+ ca = get_ASM_output_constraints(a);
+ cb = get_ASM_output_constraints(b);
+ for (i = 0; i < n; ++i) {
+ if (ca[i].pos != cb[i].pos || ca[i].constraint != cb[i].constraint
+ || ca[i].mode != cb[i].mode)
+ return 1;
+ }
+
+ n = get_ASM_n_clobbers(a);
+ if (n != get_ASM_n_clobbers(b))
+ return 1;
+
+ cla = get_ASM_clobbers(a);
+ clb = get_ASM_clobbers(b);
+ for (i = 0; i < n; ++i) {
+ if (cla[i] != clb[i])
+ return 1;
+ }
+
+ return node_cmp_exception(a, b);
+}
+
+/** Compares the inexistent attributes of two Dummy nodes. */
+static int node_cmp_attr_Dummy(const ir_node *a, const ir_node *b)
+{
+ (void) a;
+ (void) b;
+ /* Dummy nodes never equal by definition */
+ return 1;
+}
+
+static int node_cmp_attr_InstOf(const ir_node *a, const ir_node *b)
+{
+ if (get_InstOf_type(a) != get_InstOf_type(b))
+ return 1;
+ return node_cmp_exception(a, b);
+}
+
+static void default_copy_attr(ir_graph *irg, const ir_node *old_node,
+ ir_node *new_node)
+{
+ unsigned size = firm_add_node_size;
+ (void) irg;
+
+ assert(get_irn_op(old_node) == get_irn_op(new_node));
+ memcpy(&new_node->attr, &old_node->attr, get_op_attr_size(get_irn_op(old_node)));
+
+ if (size > 0) {
+ /* copy additional node data */
+ memcpy(get_irn_data(new_node, void, size), get_irn_data(old_node, void, size), size);
+ }
+}
+
+/**
+ * Copies all Call attributes stored in the old node to the new node.
+ */
+static void call_copy_attr(ir_graph *irg, const ir_node *old_node,
+ ir_node *new_node)
+{
+ default_copy_attr(irg, old_node, new_node);
+ remove_Call_callee_arr(new_node);
+}
+
+/**
+ * Copies all Block attributes stored in the old node to the new node.
+ */
+static void block_copy_attr(ir_graph *irg, const ir_node *old_node,
+ ir_node *new_node)
+{
+ default_copy_attr(irg, old_node, new_node);
+ new_node->attr.block.irg.irg = irg;
+ new_node->attr.block.phis = NULL;
+ new_node->attr.block.backedge = new_backedge_arr(irg->obst, get_irn_arity(new_node));
+ new_node->attr.block.block_visited = 0;
+ memset(&new_node->attr.block.dom, 0, sizeof(new_node->attr.block.dom));
+ memset(&new_node->attr.block.pdom, 0, sizeof(new_node->attr.block.pdom));
+ /* It should be safe to copy the entity here, as it has no back-link to the old block.
+ * It serves just as a label number, so copying a labeled block results in an exact copy.
+ * This is at least what we need for DCE to work. */
+ new_node->attr.block.entity = old_node->attr.block.entity;
+ new_node->attr.block.phis = NULL;
+ INIT_LIST_HEAD(&new_node->attr.block.succ_head);
+}
+
+/**
+ * Copies all phi attributes stored in old node to the new node
+ */
+static void phi_copy_attr(ir_graph *irg, const ir_node *old_node,
+ ir_node *new_node)
+{
+ default_copy_attr(irg, old_node, new_node);
+ new_node->attr.phi.next = NULL;
+ new_node->attr.phi.u.backedge = new_backedge_arr(irg->obst, get_irn_arity(new_node));
+}
+
+/**
+ * Copies all ASM attributes stored in old node to the new node
+ */
+static void ASM_copy_attr(ir_graph *irg, const ir_node *old_node,
+ ir_node *new_node)
+{
+ default_copy_attr(irg, old_node, new_node);
+ new_node->attr.assem.input_constraints = DUP_ARR_D(ir_asm_constraint, irg->obst, old_node->attr.assem.input_constraints);
+ new_node->attr.assem.output_constraints = DUP_ARR_D(ir_asm_constraint, irg->obst, old_node->attr.assem.output_constraints);
+ new_node->attr.assem.clobbers = DUP_ARR_D(ident*, irg->obst, old_node->attr.assem.clobbers);
+}
+
+static void switch_copy_attr(ir_graph *irg, const ir_node *old_node,
+ ir_node *new_node)
+{
+ const ir_switch_table *table = get_Switch_table(old_node);
+ new_node->attr.switcha.table = ir_switch_table_duplicate(irg, table);
+ new_node->attr.switcha.n_outs = old_node->attr.switcha.n_outs;
+}
+
+static void register_node_cmp_func(ir_op *op, node_cmp_attr_func func)
+{
+ op->ops.node_cmp_attr = func;
+}
+
+static void register_node_hash_func(ir_op *op, hash_func func)
+{
+ op->ops.hash = func;
+}
+
+static void register_node_copy_attr_func(ir_op *op, copy_attr_func func)
+{
+ op->ops.copy_attr = func;
+}
+
static void generated_init_op(void);
static void generated_finish_op(void);
opcodes = NEW_ARR_F(ir_op*, 0);
generated_init_op();
be_init_op();
+
+ register_node_cmp_func(op_ASM, node_cmp_attr_ASM);
+ register_node_cmp_func(op_Alloc, node_cmp_attr_Alloc);
+ register_node_cmp_func(op_Bound, node_cmp_attr_Bound);
+ register_node_cmp_func(op_Builtin, node_cmp_attr_Builtin);
+ register_node_cmp_func(op_Call, node_cmp_attr_Call);
+ register_node_cmp_func(op_Cast, node_cmp_attr_Cast);
+ register_node_cmp_func(op_Cmp, node_cmp_attr_Cmp);
+ register_node_cmp_func(op_Confirm, node_cmp_attr_Confirm);
+ register_node_cmp_func(op_Const, node_cmp_attr_Const);
+ register_node_cmp_func(op_Conv, node_cmp_attr_Conv);
+ register_node_cmp_func(op_CopyB, node_cmp_attr_CopyB);
+ register_node_cmp_func(op_Div, node_cmp_attr_Div);
+ register_node_cmp_func(op_Dummy, node_cmp_attr_Dummy);
+ register_node_cmp_func(op_Free, node_cmp_attr_Free);
+ register_node_cmp_func(op_InstOf, node_cmp_attr_InstOf);
+ register_node_cmp_func(op_Load, node_cmp_attr_Load);
+ register_node_cmp_func(op_Mod, node_cmp_attr_Mod);
+ register_node_cmp_func(op_Phi, node_cmp_attr_Phi);
+ register_node_cmp_func(op_Proj, node_cmp_attr_Proj);
+ register_node_cmp_func(op_Div, node_cmp_attr_Div);
+ register_node_cmp_func(op_Dummy, node_cmp_attr_Dummy);
+ register_node_cmp_func(op_Free, node_cmp_attr_Free);
+ register_node_cmp_func(op_InstOf, node_cmp_attr_InstOf);
+ register_node_cmp_func(op_Load, node_cmp_attr_Load);
+ register_node_cmp_func(op_Mod, node_cmp_attr_Mod);
+ register_node_cmp_func(op_Phi, node_cmp_attr_Phi);
+ register_node_cmp_func(op_Proj, node_cmp_attr_Proj);
+ register_node_cmp_func(op_Sel, node_cmp_attr_Sel);
+ register_node_cmp_func(op_Store, node_cmp_attr_Store);
+ register_node_cmp_func(op_SymConst, node_cmp_attr_SymConst);
+
+ register_node_hash_func(op_Const, hash_Const);
+ register_node_hash_func(op_SymConst, hash_SymConst);
+
+ register_node_copy_attr_func(op_Call, call_copy_attr);
+ register_node_copy_attr_func(op_Block, block_copy_attr);
+ register_node_copy_attr_func(op_Phi, phi_copy_attr);
+ register_node_copy_attr_func(op_ASM, ASM_copy_attr);
+ register_node_copy_attr_func(op_Switch, switch_copy_attr);
+
+ ir_register_opt_node_ops();
+ ir_register_reassoc_node_ops();
+ ir_register_verify_node_ops();
}
void firm_finish_op(void)
/** frees memory allocated by irop module */
void firm_finish_op(void);
-/**
- * Copies simply all attributes stored in the old node to the new node.
- * Assumes both have the same opcode and sufficient size.
- *
- * @param old_node the old node from which the attributes are read
- * @param new_node the new node to which the attributes are written
- */
-void default_copy_attr(ir_graph *irg, const ir_node *old_node,
- ir_node *new_node);
-
/**
* Returns the attribute size of nodes of this opcode.
* @note Use not encouraged, internal feature.
return tarval_bad;
}
-void firm_set_default_computed_value(ir_opcode code, ir_op_ops *ops)
-{
-#define CASE(a) \
- case iro_##a: \
- ops->computed_value = computed_value_##a; \
- break
-#define CASE_PROJ(a) \
- case iro_##a: \
- ops->computed_value_Proj = computed_value_Proj_##a; \
- break
-
- switch (code) {
- CASE(Add);
- CASE(And);
- CASE(Borrow);
- CASE(Carry);
- CASE(Cmp);
- CASE(Confirm);
- CASE(Const);
- CASE(Conv);
- CASE(Eor);
- CASE(Minus);
- CASE(Mul);
- CASE(Mux);
- CASE(Not);
- CASE(Or);
- CASE(Proj);
- CASE(Rotl);
- CASE(Shl);
- CASE(Shr);
- CASE(Shrs);
- CASE(Sub);
- CASE(SymConst);
- CASE_PROJ(Div);
- CASE_PROJ(Mod);
- default:
- /* leave NULL */
- break;
- }
-#undef CASE_PROJ
-#undef CASE
-}
-
/**
* Optimize operations that are commutative and have neutral 0,
* so a op 0 = 0 op a = a.
return n;
}
-#define equivalent_node_Shl equivalent_node_left_zero
-#define equivalent_node_Shr equivalent_node_left_zero
-#define equivalent_node_Shrs equivalent_node_left_zero
-#define equivalent_node_Rotl equivalent_node_left_zero
-
/**
* Optimize a - 0 and (a + x) - x (for modes with wrap-around).
*
return n;
}
-/** Optimize Not(Not(x)) == x. */
-#define equivalent_node_Not equivalent_node_idempotent_unop
-
-/** -(-x) == x ??? Is this possible or can --x raise an
- out of bounds exception if min =! max? */
-#define equivalent_node_Minus equivalent_node_idempotent_unop
-
/**
* Optimize a * 1 = 1 * a = a.
*/
return n;
}
-void firm_set_default_equivalent_node(ir_opcode code, ir_op_ops *ops)
-{
-#define CASE(a) \
- case iro_##a: \
- ops->equivalent_node = equivalent_node_##a; \
- break
-#define CASE_PROJ(a) \
- case iro_##a: \
- ops->equivalent_node_Proj = equivalent_node_Proj_##a; \
- break
-
- switch (code) {
- CASE(Eor);
- CASE(Add);
- CASE(Shl);
- CASE(Shr);
- CASE(Shrs);
- CASE(Rotl);
- CASE(Sub);
- CASE(Not);
- CASE(Minus);
- CASE(Mul);
- CASE(Or);
- CASE(And);
- CASE(Conv);
- CASE(Phi);
- CASE_PROJ(Tuple);
- CASE_PROJ(Div);
- CASE_PROJ(CopyB);
- CASE_PROJ(Bound);
- CASE(Proj);
- CASE(Id);
- CASE(Mux);
- CASE(Confirm);
- default:
- /* leave NULL */
- break;
- }
-#undef CASE
-#undef CASE_PROJ
-}
-
/**
* Returns non-zero if a node is a Phi node
* with all predecessors constant.
return res;
}
-void firm_set_default_transform_node(ir_opcode code, ir_op_ops *ops)
-{
-#define CASE(a) \
- case iro_##a: \
- ops->transform_node = transform_node_##a; \
- break
-#define CASE_PROJ(a) \
- case iro_##a: \
- ops->transform_node_Proj = transform_node_Proj_##a; \
- break
-#define CASE_PROJ_EX(a) \
- case iro_##a: \
- ops->transform_node = transform_node_##a; \
- ops->transform_node_Proj = transform_node_Proj_##a; \
- break
-
- switch (code) {
- CASE(Add);
- CASE(And);
- CASE(Block);
- CASE(Call);
- CASE(Cmp);
- CASE(Cond);
- CASE(Conv);
- CASE(End);
- CASE(Eor);
- CASE(Minus);
- CASE(Mul);
- CASE(Mux);
- CASE(Not);
- CASE(Or);
- CASE(Phi);
- CASE(Proj);
- CASE(Rotl);
- CASE(Shl);
- CASE(Shr);
- CASE(Shrs);
- CASE(Sub);
- CASE(Switch);
- CASE(Sync);
- CASE_PROJ(Bound);
- CASE_PROJ(CopyB);
- CASE_PROJ(Store);
- CASE_PROJ_EX(Div);
- CASE_PROJ_EX(Load);
- CASE_PROJ_EX(Mod);
- default:
- break;
- }
-#undef CASE_PROJ_EX
-#undef CASE_PROJ
-#undef CASE
-}
-
/**
* Tries several [inplace] [optimizing] transformations and returns an
* equivalent node. The difference to equivalent_node() is that these
return n;
}
+static void register_computed_value_func(ir_op *op, computed_value_func func)
+{
+ assert(op->ops.computed_value == NULL || op->ops.computed_value == func);
+ op->ops.computed_value = func;
+}
+
+static void register_computed_value_func_proj(ir_op *op,
+ computed_value_func func)
+{
+ assert(op->ops.computed_value_Proj == NULL
+ || op->ops.computed_value_Proj == func);
+ op->ops.computed_value_Proj = func;
+}
+
+static void register_equivalent_node_func(ir_op *op, equivalent_node_func func)
+{
+ assert(op->ops.equivalent_node == NULL || op->ops.equivalent_node == func);
+ op->ops.equivalent_node = func;
+}
+
+static void register_equivalent_node_func_proj(ir_op *op,
+ equivalent_node_func func)
+{
+ assert(op->ops.equivalent_node_Proj == NULL
+ || op->ops.equivalent_node_Proj == func);
+ op->ops.equivalent_node_Proj = func;
+}
+
+static void register_transform_node_func(ir_op *op, transform_node_func func)
+{
+ assert(op->ops.transform_node == NULL || op->ops.transform_node == func);
+ op->ops.transform_node = func;
+}
+
+static void register_transform_node_func_proj(ir_op *op,
+ transform_node_func func)
+{
+ assert(op->ops.transform_node_Proj == NULL
+ || op->ops.transform_node_Proj == func);
+ op->ops.transform_node_Proj = func;
+}
+
+void ir_register_opt_node_ops(void)
+{
+ register_computed_value_func(op_Add, computed_value_Add);
+ register_computed_value_func(op_And, computed_value_And);
+ register_computed_value_func(op_Borrow, computed_value_Borrow);
+ register_computed_value_func(op_Carry, computed_value_Carry);
+ register_computed_value_func(op_Cmp, computed_value_Cmp);
+ register_computed_value_func(op_Confirm, computed_value_Confirm);
+ register_computed_value_func(op_Const, computed_value_Const);
+ register_computed_value_func(op_Conv, computed_value_Conv);
+ register_computed_value_func(op_Eor, computed_value_Eor);
+ register_computed_value_func(op_Minus, computed_value_Minus);
+ register_computed_value_func(op_Mul, computed_value_Mul);
+ register_computed_value_func(op_Mux, computed_value_Mux);
+ register_computed_value_func(op_Not, computed_value_Not);
+ register_computed_value_func(op_Or, computed_value_Or);
+ register_computed_value_func(op_Proj, computed_value_Proj);
+ register_computed_value_func(op_Rotl, computed_value_Rotl);
+ register_computed_value_func(op_Shl, computed_value_Shl);
+ register_computed_value_func(op_Shr, computed_value_Shr);
+ register_computed_value_func(op_Shrs, computed_value_Shrs);
+ register_computed_value_func(op_Sub, computed_value_Sub);
+ register_computed_value_func(op_SymConst, computed_value_SymConst);
+ register_computed_value_func_proj(op_Div, computed_value_Proj_Div);
+ register_computed_value_func_proj(op_Mod, computed_value_Proj_Mod);
+
+ register_equivalent_node_func(op_Add, equivalent_node_Add);
+ register_equivalent_node_func(op_And, equivalent_node_And);
+ register_equivalent_node_func(op_Confirm, equivalent_node_Confirm);
+ register_equivalent_node_func(op_Conv, equivalent_node_Conv);
+ register_equivalent_node_func(op_Eor, equivalent_node_Eor);
+ register_equivalent_node_func(op_Id, equivalent_node_Id);
+ register_equivalent_node_func(op_Minus, equivalent_node_idempotent_unop);
+ register_equivalent_node_func(op_Mul, equivalent_node_Mul);
+ register_equivalent_node_func(op_Mux, equivalent_node_Mux);
+ register_equivalent_node_func(op_Not, equivalent_node_idempotent_unop);
+ register_equivalent_node_func(op_Or, equivalent_node_Or);
+ register_equivalent_node_func(op_Phi, equivalent_node_Phi);
+ register_equivalent_node_func(op_Proj, equivalent_node_Proj);
+ register_equivalent_node_func(op_Rotl, equivalent_node_left_zero);
+ register_equivalent_node_func(op_Shl, equivalent_node_left_zero);
+ register_equivalent_node_func(op_Shr, equivalent_node_left_zero);
+ register_equivalent_node_func(op_Shrs, equivalent_node_left_zero);
+ register_equivalent_node_func(op_Sub, equivalent_node_Sub);
+ register_equivalent_node_func_proj(op_Bound, equivalent_node_Proj_Bound);
+ register_equivalent_node_func_proj(op_CopyB, equivalent_node_Proj_CopyB);
+ register_equivalent_node_func_proj(op_Div, equivalent_node_Proj_Div);
+ register_equivalent_node_func_proj(op_Tuple, equivalent_node_Proj_Tuple);
+
+ register_transform_node_func(op_Add, transform_node_Add);
+ register_transform_node_func(op_And, transform_node_And);
+ register_transform_node_func(op_Block, transform_node_Block);
+ register_transform_node_func(op_Call, transform_node_Call);
+ register_transform_node_func(op_Cmp, transform_node_Cmp);
+ register_transform_node_func(op_Cond, transform_node_Cond);
+ register_transform_node_func(op_Conv, transform_node_Conv);
+ register_transform_node_func(op_Div, transform_node_Div);
+ register_transform_node_func(op_End, transform_node_End);
+ register_transform_node_func(op_Eor, transform_node_Eor);
+ register_transform_node_func(op_Load, transform_node_Load);
+ register_transform_node_func(op_Minus, transform_node_Minus);
+ register_transform_node_func(op_Mod, transform_node_Mod);
+ register_transform_node_func(op_Mul, transform_node_Mul);
+ register_transform_node_func(op_Mux, transform_node_Mux);
+ register_transform_node_func(op_Not, transform_node_Not);
+ register_transform_node_func(op_Or, transform_node_Or);
+ register_transform_node_func(op_Phi, transform_node_Phi);
+ register_transform_node_func(op_Proj, transform_node_Proj);
+ register_transform_node_func(op_Rotl, transform_node_Rotl);
+ register_transform_node_func(op_Shl, transform_node_Shl);
+ register_transform_node_func(op_Shrs, transform_node_Shrs);
+ register_transform_node_func(op_Shr, transform_node_Shr);
+ register_transform_node_func(op_Sub, transform_node_Sub);
+ register_transform_node_func(op_Switch, transform_node_Switch);
+ register_transform_node_func(op_Sync, transform_node_Sync);
+ register_transform_node_func_proj(op_Bound, transform_node_Proj_Bound);
+ register_transform_node_func_proj(op_CopyB, transform_node_Proj_CopyB);
+ register_transform_node_func_proj(op_Div, transform_node_Proj_Div);
+ register_transform_node_func_proj(op_Load, transform_node_Proj_Load);
+ register_transform_node_func_proj(op_Mod, transform_node_Proj_Mod);
+ register_transform_node_func_proj(op_Store, transform_node_Proj_Store);
+}
+
/* **************** Common Subexpression Elimination **************** */
/** The size of the hash table used, should estimate the number of nodes
in a graph. */
#define N_IR_NODES 512
-/** Compares two exception attributes */
-static int node_cmp_exception(const ir_node *a, const ir_node *b)
-{
- const except_attr *ea = &a->attr.except;
- const except_attr *eb = &b->attr.except;
- return ea->pin_state != eb->pin_state;
-}
-
-/** Compares the attributes of two Const nodes. */
-static int node_cmp_attr_Const(const ir_node *a, const ir_node *b)
-{
- return get_Const_tarval(a) != get_Const_tarval(b);
-}
-
-/** Compares the attributes of two Proj nodes. */
-static int node_cmp_attr_Proj(const ir_node *a, const ir_node *b)
-{
- return a->attr.proj.proj != b->attr.proj.proj;
-}
-
-/** Compares the attributes of two Alloc nodes. */
-static int node_cmp_attr_Alloc(const ir_node *a, const ir_node *b)
-{
- const alloc_attr *pa = &a->attr.alloc;
- const alloc_attr *pb = &b->attr.alloc;
- if (pa->where != pb->where || pa->type != pb->type)
- return 1;
- return node_cmp_exception(a, b);
-}
-
-/** Compares the attributes of two Free nodes. */
-static int node_cmp_attr_Free(const ir_node *a, const ir_node *b)
-{
- const free_attr *pa = &a->attr.free;
- const free_attr *pb = &b->attr.free;
- return (pa->where != pb->where) || (pa->type != pb->type);
-}
-
-/** Compares the attributes of two SymConst nodes. */
-static int node_cmp_attr_SymConst(const ir_node *a, const ir_node *b)
-{
- const symconst_attr *pa = &a->attr.symc;
- const symconst_attr *pb = &b->attr.symc;
- return (pa->kind != pb->kind)
- || (pa->sym.type_p != pb->sym.type_p);
-}
-
-/** Compares the attributes of two Call nodes. */
-static int node_cmp_attr_Call(const ir_node *a, const ir_node *b)
-{
- const call_attr *pa = &a->attr.call;
- const call_attr *pb = &b->attr.call;
- if (pa->type != pb->type)
- return 1;
- return node_cmp_exception(a, b);
-}
-
-/** Compares the attributes of two Sel nodes. */
-static int node_cmp_attr_Sel(const ir_node *a, const ir_node *b)
-{
- const ir_entity *a_ent = get_Sel_entity(a);
- const ir_entity *b_ent = get_Sel_entity(b);
- return a_ent != b_ent;
-}
-
-/** Compares the attributes of two Phi nodes. */
-static int node_cmp_attr_Phi(const ir_node *a, const ir_node *b)
-{
- (void) b;
- /* do not CSE Phi-nodes without any inputs when building new graphs */
- if (get_irn_arity(a) == 0 &&
- get_irg_phase_state(get_irn_irg(a)) == phase_building) {
- return 1;
- }
- return 0;
-}
-
-/** Compares the attributes of two Conv nodes. */
-static int node_cmp_attr_Conv(const ir_node *a, const ir_node *b)
-{
- return get_Conv_strict(a) != get_Conv_strict(b);
-}
-
-/** Compares the attributes of two Cast nodes. */
-static int node_cmp_attr_Cast(const ir_node *a, const ir_node *b)
-{
- return get_Cast_type(a) != get_Cast_type(b);
-}
-
-/** Compares the attributes of two Load nodes. */
-static int node_cmp_attr_Load(const ir_node *a, const ir_node *b)
-{
- if (get_Load_volatility(a) == volatility_is_volatile ||
- get_Load_volatility(b) == volatility_is_volatile)
- /* NEVER do CSE on volatile Loads */
- return 1;
- /* do not CSE Loads with different alignment. Be conservative. */
- if (get_Load_unaligned(a) != get_Load_unaligned(b))
- return 1;
- if (get_Load_mode(a) != get_Load_mode(b))
- return 1;
- return node_cmp_exception(a, b);
-}
-
-/** Compares the attributes of two Store nodes. */
-static int node_cmp_attr_Store(const ir_node *a, const ir_node *b)
-{
- /* do not CSE Stores with different alignment. Be conservative. */
- if (get_Store_unaligned(a) != get_Store_unaligned(b))
- return 1;
- /* NEVER do CSE on volatile Stores */
- if (get_Store_volatility(a) == volatility_is_volatile ||
- get_Store_volatility(b) == volatility_is_volatile)
- return 1;
- return node_cmp_exception(a, b);
-}
-
-static int node_cmp_attr_CopyB(const ir_node *a, const ir_node *b)
-{
- if (get_CopyB_type(a) != get_CopyB_type(b))
- return 1;
-
- return node_cmp_exception(a, b);
-}
-
-static int node_cmp_attr_Bound(const ir_node *a, const ir_node *b)
-{
- return node_cmp_exception(a, b);
-}
-
-/** Compares the attributes of two Div nodes. */
-static int node_cmp_attr_Div(const ir_node *a, const ir_node *b)
-{
- const div_attr *ma = &a->attr.div;
- const div_attr *mb = &b->attr.div;
- if (ma->resmode != mb->resmode || ma->no_remainder != mb->no_remainder)
- return 1;
- return node_cmp_exception(a, b);
-}
-
-/** Compares the attributes of two Mod nodes. */
-static int node_cmp_attr_Mod(const ir_node *a, const ir_node *b)
-{
- const mod_attr *ma = &a->attr.mod;
- const mod_attr *mb = &b->attr.mod;
- if (ma->resmode != mb->resmode)
- return 1;
- return node_cmp_exception(a, b);
-}
-
-static int node_cmp_attr_Cmp(const ir_node *a, const ir_node *b)
-{
- const cmp_attr *ma = &a->attr.cmp;
- const cmp_attr *mb = &b->attr.cmp;
- return ma->relation != mb->relation;
-}
-
-/** Compares the attributes of two Confirm nodes. */
-static int node_cmp_attr_Confirm(const ir_node *a, const ir_node *b)
-{
- const confirm_attr *ma = &a->attr.confirm;
- const confirm_attr *mb = &b->attr.confirm;
- return ma->relation != mb->relation;
-}
-
-/** Compares the attributes of two Builtin nodes. */
-static int node_cmp_attr_Builtin(const ir_node *a, const ir_node *b)
-{
- if (get_Builtin_kind(a) != get_Builtin_kind(b))
- return 1;
- if (get_Builtin_type(a) != get_Builtin_type(b))
- return 1;
- return node_cmp_exception(a, b);
-}
-
-/** Compares the attributes of two ASM nodes. */
-static int node_cmp_attr_ASM(const ir_node *a, const ir_node *b)
-{
- size_t n;
- size_t i;
- const ir_asm_constraint *ca;
- const ir_asm_constraint *cb;
- ident **cla, **clb;
-
- if (get_ASM_text(a) != get_ASM_text(b))
- return 1;
-
- /* Should we really check the constraints here? Should be better, but is strange. */
- n = get_ASM_n_input_constraints(a);
- if (n != get_ASM_n_input_constraints(b))
- return 1;
-
- ca = get_ASM_input_constraints(a);
- cb = get_ASM_input_constraints(b);
- for (i = 0; i < n; ++i) {
- if (ca[i].pos != cb[i].pos || ca[i].constraint != cb[i].constraint
- || ca[i].mode != cb[i].mode)
- return 1;
- }
-
- n = get_ASM_n_output_constraints(a);
- if (n != get_ASM_n_output_constraints(b))
- return 1;
-
- ca = get_ASM_output_constraints(a);
- cb = get_ASM_output_constraints(b);
- for (i = 0; i < n; ++i) {
- if (ca[i].pos != cb[i].pos || ca[i].constraint != cb[i].constraint
- || ca[i].mode != cb[i].mode)
- return 1;
- }
-
- n = get_ASM_n_clobbers(a);
- if (n != get_ASM_n_clobbers(b))
- return 1;
-
- cla = get_ASM_clobbers(a);
- clb = get_ASM_clobbers(b);
- for (i = 0; i < n; ++i) {
- if (cla[i] != clb[i])
- return 1;
- }
-
- return node_cmp_exception(a, b);
-}
-
-/** Compares the inexistent attributes of two Dummy nodes. */
-static int node_cmp_attr_Dummy(const ir_node *a, const ir_node *b)
-{
- (void) a;
- (void) b;
- /* Dummy nodes never equal by definition */
- return 1;
-}
-
-static int node_cmp_attr_InstOf(const ir_node *a, const ir_node *b)
-{
- if (get_InstOf_type(a) != get_InstOf_type(b))
- return 1;
- return node_cmp_exception(a, b);
-}
-
-void firm_set_default_node_cmp_attr(ir_opcode code, ir_op_ops *ops)
-{
-#define CASE(a) \
- case iro_##a: \
- ops->node_cmp_attr = node_cmp_attr_##a; \
- break
-
- switch (code) {
- CASE(ASM);
- CASE(Alloc);
- CASE(Bound);
- CASE(Builtin);
- CASE(Call);
- CASE(Cast);
- CASE(Cmp);
- CASE(Confirm);
- CASE(Const);
- CASE(Conv);
- CASE(CopyB);
- CASE(Div);
- CASE(Dummy);
- CASE(Free);
- CASE(InstOf);
- CASE(Load);
- CASE(Mod);
- CASE(Phi);
- CASE(Proj);
- CASE(Sel);
- CASE(Store);
- CASE(SymConst);
- default:
- /* leave NULL */
- break;
- }
-#undef CASE
-}
-
int identities_cmp(const void *elt, const void *key)
{
ir_node *a = (ir_node *)elt;
clear_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_DOMINANCE);
return optimize_in_place_2(n);
}
-
-/**
- * Calculate a hash value of a Const node.
- */
-static unsigned hash_Const(const ir_node *node)
-{
- unsigned h;
-
- /* special value for const, as they only differ in their tarval. */
- h = hash_ptr(node->attr.con.tarval);
-
- return h;
-}
-
-/**
- * Calculate a hash value of a SymConst node.
- */
-static unsigned hash_SymConst(const ir_node *node)
-{
- unsigned h;
-
- /* all others are pointers */
- h = hash_ptr(node->attr.symc.sym.type_p);
-
- return h;
-}
-
-void firm_set_default_hash(unsigned code, ir_op_ops *ops)
-{
-#define CASE(a) \
- case iro_##a: \
- ops->hash = hash_##a; \
- break
-
- /* hash function already set */
- if (ops->hash != NULL)
- return;
-
- switch (code) {
- CASE(Const);
- CASE(SymConst);
- default:
- /* use input/mode default hash if no function was given */
- ops->hash = firm_default_hash;
- }
-#undef CASE
-}
bool ir_is_optimizable_mux(const ir_node *sel, const ir_node *mux_false,
const ir_node *mux_true);
-/**
- * Set the default hash operation in an ir_op_ops.
- *
- * @param code the opcode for the default operation
- * @param ops the operations initialized
- */
-void firm_set_default_hash(unsigned code, ir_op_ops *ops);
-
-/**
- * Set the default computed_value evaluator in an ir_op_ops.
- *
- * @param code the opcode for the default operation
- * @param ops the operations initialized
- */
-void firm_set_default_computed_value(ir_opcode code, ir_op_ops *ops);
-
-/**
- * Sets the default equivalent node operation for an ir_op_ops.
- *
- * @param code the opcode for the default operation
- * @param ops the operations initialized
- */
-void firm_set_default_equivalent_node(ir_opcode code, ir_op_ops *ops);
-
-/**
- * Sets the default transform node operation for an ir_op_ops.
- *
- * @param code the opcode for the default operation
- * @param ops the operations initialized
- */
-void firm_set_default_transform_node(ir_opcode code, ir_op_ops *ops);
-
-/**
- * Set the default node attribute compare operation for an ir_op_ops.
- *
- * @param code the opcode for the default operation
- * @param ops the operations initialized
- */
-void firm_set_default_node_cmp_attr(ir_opcode code, ir_op_ops *ops);
-
+void ir_register_opt_node_ops(void);
#endif
return env.res;
}
-void firm_set_default_verifier(unsigned code, ir_op_ops *ops)
-{
-#define CASE(a) \
- case iro_##a: \
- ops->verify_node = verify_node_##a; \
- break
-
- switch (code) {
- CASE(Add);
- CASE(Alloc);
- CASE(And);
- CASE(Block);
- CASE(Bound);
- CASE(Call);
- CASE(Cast);
- CASE(Cmp);
- CASE(Cond);
- CASE(Confirm);
- CASE(Const);
- CASE(Conv);
- CASE(CopyB);
- CASE(Div);
- CASE(Eor);
- CASE(Free);
- CASE(IJmp);
- CASE(InstOf);
- CASE(Jmp);
- CASE(Load);
- CASE(Minus);
- CASE(Mod);
- CASE(Mul);
- CASE(Mulh);
- CASE(Mux);
- CASE(Not);
- CASE(Or);
- CASE(Phi);
- CASE(Proj);
- CASE(Raise);
- CASE(Return);
- CASE(Rotl);
- CASE(Sel);
- CASE(Shl);
- CASE(Shr);
- CASE(Shrs);
- CASE(Start);
- CASE(Store);
- CASE(Sub);
- CASE(Switch);
- CASE(SymConst);
- CASE(Sync);
- default:
- break;
- }
-#undef CASE
-
-#define CASE(a) \
- case iro_##a: \
- ops->verify_proj_node = verify_node_Proj_##a; \
- break
-
- switch (code) {
- CASE(Alloc);
- CASE(Bound);
- CASE(Call);
- CASE(Cond);
- CASE(CopyB);
- CASE(Div);
- CASE(InstOf);
- CASE(Load);
- CASE(Mod);
- CASE(Proj);
- CASE(Raise);
- CASE(Start);
- CASE(Store);
- CASE(Switch);
- CASE(Tuple);
- default:
- break;
- }
-#undef CASE
+static void register_verify_node_func(ir_op *op, verify_node_func func)
+{
+ op->ops.verify_node = func;
+}
+
+static void register_verify_node_func_proj(ir_op *op, verify_node_func func)
+{
+ op->ops.verify_proj_node = func;
+}
+
+void ir_register_verify_node_ops(void)
+{
+ register_verify_node_func(op_Add, verify_node_Add);
+ register_verify_node_func(op_Alloc, verify_node_Alloc);
+ register_verify_node_func(op_And, verify_node_And);
+ register_verify_node_func(op_Block, verify_node_Block);
+ register_verify_node_func(op_Bound, verify_node_Bound);
+ register_verify_node_func(op_Call, verify_node_Call);
+ register_verify_node_func(op_Cast, verify_node_Cast);
+ register_verify_node_func(op_Cmp, verify_node_Cmp);
+ register_verify_node_func(op_Cond, verify_node_Cond);
+ register_verify_node_func(op_Confirm, verify_node_Confirm);
+ register_verify_node_func(op_Const, verify_node_Const);
+ register_verify_node_func(op_Conv, verify_node_Conv);
+ register_verify_node_func(op_CopyB, verify_node_CopyB);
+ register_verify_node_func(op_Div, verify_node_Div);
+ register_verify_node_func(op_Eor, verify_node_Eor);
+ register_verify_node_func(op_Free, verify_node_Free);
+ register_verify_node_func(op_IJmp, verify_node_IJmp);
+ register_verify_node_func(op_InstOf, verify_node_InstOf);
+ register_verify_node_func(op_Jmp, verify_node_Jmp);
+ register_verify_node_func(op_Load, verify_node_Load);
+ register_verify_node_func(op_Minus, verify_node_Minus);
+ register_verify_node_func(op_Mod, verify_node_Mod);
+ register_verify_node_func(op_Mul, verify_node_Mul);
+ register_verify_node_func(op_Mulh, verify_node_Mulh);
+ register_verify_node_func(op_Mux, verify_node_Mux);
+ register_verify_node_func(op_Not, verify_node_Not);
+ register_verify_node_func(op_Or, verify_node_Or);
+ register_verify_node_func(op_Phi, verify_node_Phi);
+ register_verify_node_func(op_Proj, verify_node_Proj);
+ register_verify_node_func(op_Raise, verify_node_Raise);
+ register_verify_node_func(op_Return, verify_node_Return);
+ register_verify_node_func(op_Rotl, verify_node_Rotl);
+ register_verify_node_func(op_Sel, verify_node_Sel);
+ register_verify_node_func(op_Shl, verify_node_Shl);
+ register_verify_node_func(op_Shr, verify_node_Shr);
+ register_verify_node_func(op_Shrs, verify_node_Shrs);
+ register_verify_node_func(op_Start, verify_node_Start);
+ register_verify_node_func(op_Store, verify_node_Store);
+ register_verify_node_func(op_Sub, verify_node_Sub);
+ register_verify_node_func(op_Switch, verify_node_Switch);
+ register_verify_node_func(op_SymConst, verify_node_SymConst);
+ register_verify_node_func(op_Sync, verify_node_Sync);
+
+ register_verify_node_func_proj(op_Alloc, verify_node_Proj_Alloc);
+ register_verify_node_func_proj(op_Bound, verify_node_Proj_Bound);
+ register_verify_node_func_proj(op_Call, verify_node_Proj_Call);
+ register_verify_node_func_proj(op_Cond, verify_node_Proj_Cond);
+ register_verify_node_func_proj(op_CopyB, verify_node_Proj_CopyB);
+ register_verify_node_func_proj(op_Div, verify_node_Proj_Div);
+ register_verify_node_func_proj(op_InstOf, verify_node_Proj_InstOf);
+ register_verify_node_func_proj(op_Load, verify_node_Proj_Load);
+ register_verify_node_func_proj(op_Mod, verify_node_Proj_Mod);
+ register_verify_node_func_proj(op_Proj, verify_node_Proj_Proj);
+ register_verify_node_func_proj(op_Raise, verify_node_Proj_Raise);
+ register_verify_node_func_proj(op_Start, verify_node_Proj_Start);
+ register_verify_node_func_proj(op_Store, verify_node_Proj_Store);
+ register_verify_node_func_proj(op_Switch, verify_node_Proj_Switch);
+ register_verify_node_func_proj(op_Tuple, verify_node_Proj_Tuple);
}
#endif
/**
- * Set the default verify_node and verify_proj_node operation for an ir_op_ops.
+ * Set the default verify_node and verify_proj_node operations.
*/
-void firm_set_default_verifier(unsigned code, ir_op_ops *ops);
+void ir_register_verify_node_ops(void);
#endif
return def_graph_pass(name ? name : "reassoc", optimize_reassociation);
} /* optimize_reassociation_pass */
-/* Sets the default reassociation operation for an ir_op_ops. */
-ir_op_ops *firm_set_default_reassoc(unsigned code, ir_op_ops *ops)
+static void register_node_reassoc_func(ir_op *op, reassociate_func func)
{
-#define CASE(a) case iro_##a: ops->reassociate = reassoc_##a; break
-
- switch (code) {
- CASE(Mul);
- CASE(Add);
- CASE(Sub);
- CASE(And);
- CASE(Or);
- CASE(Eor);
- CASE(Shl);
- default:
- break;
- }
+ op->ops.reassociate = func;
+}
- return ops;
-#undef CASE
-} /* firm_set_default_reassoc */
+void ir_register_reassoc_node_ops(void)
+{
+ register_node_reassoc_func(op_Mul, reassoc_Mul);
+ register_node_reassoc_func(op_Add, reassoc_Add);
+ register_node_reassoc_func(op_Sub, reassoc_Sub);
+ register_node_reassoc_func(op_And, reassoc_And);
+ register_node_reassoc_func(op_Or, reassoc_Or);
+ register_node_reassoc_func(op_Eor, reassoc_Eor);
+ register_node_reassoc_func(op_Shl, reassoc_Shl);
+}
/* initialize the reassociation by adding operations to some opcodes */
void firm_init_reassociation(void)
#ifndef REASSOC_T_H
#define REASSOC_T_H
-/**
- * Sets the default reassociation operation for an ir_op_ops.
- *
- * @param code the opcode for the default operation
- * @param ops the operations initialized
- *
- * @return
- * The operations.
- */
-ir_op_ops *firm_set_default_reassoc(unsigned code, ir_op_ops *ops);
+void ir_register_reassoc_node_ops(void);
-#endif /* _REASSOC_T_H_ */
+#endif
{{node|arity}}
{{node|opindex}}
{{node|attr_size}}
- NULL
{% endfilter %});
{%- if "uses_memory" in node.flags: %}
ir_op_set_memory_index(op_{{node.name}}, n_{{node.name}}_mem);