2 * This file is part of libFirm.
3 * Copyright (C) 2012 University of Karlsruhe.
8 * @brief Representation of opcode of intermediate operation.
9 * @author Christian Schaefer, Goetz Lindenmaier, Michael Beck
19 #include "irbackedge_t.h"
22 #include "irverify_t.h"
23 #include "reassoc_t.h"
28 static ir_op **opcodes;
29 /** the available next opcode */
30 static unsigned next_iro = iro_MaxOpcode;
32 static ir_type *default_get_type_attr(const ir_node *node);
33 static ir_entity *default_get_entity_attr(const ir_node *node);
34 static unsigned default_hash_node(const ir_node *node);
35 static void default_copy_attr(ir_graph *irg, const ir_node *old_node,
38 ir_op *new_ir_op(unsigned code, const char *name, op_pin_state p,
39 irop_flags flags, op_arity opar, int op_index,
42 ir_op *res = XMALLOCZ(ir_op);
45 res->name = new_id_from_chars(name, strlen(name));
47 res->attr_size = attr_size;
50 res->op_index = op_index;
53 memset(&res->ops, 0, sizeof(res->ops));
54 res->ops.hash = default_hash_node;
55 res->ops.copy_attr = default_copy_attr;
56 res->ops.get_type_attr = default_get_type_attr;
57 res->ops.get_entity_attr = default_get_entity_attr;
60 size_t len = ARR_LEN(opcodes);
61 if ((size_t)code >= len) {
62 ARR_RESIZE(ir_op*, opcodes, (size_t)code+1);
63 memset(&opcodes[len], 0, (code-len+1) * sizeof(opcodes[0]));
65 if (opcodes[code] != NULL)
66 panic("opcode registered twice");
74 void free_ir_op(ir_op *code)
76 hook_free_ir_op(code);
78 assert(opcodes[code->code] == code);
79 opcodes[code->code] = NULL;
84 unsigned ir_get_n_opcodes(void)
86 return ARR_LEN(opcodes);
89 ir_op *ir_get_opcode(unsigned code)
91 assert((size_t)code < ARR_LEN(opcodes));
95 void ir_clear_opcodes_generic_func(void)
97 size_t n = ir_get_n_opcodes();
100 for (i = 0; i < n; ++i) {
101 ir_op *op = ir_get_opcode(i);
104 op->ops.generic = (op_func)NULL;
105 op->ops.generic1 = (op_func)NULL;
106 op->ops.generic2 = (op_func)NULL;
110 void ir_op_set_memory_index(ir_op *op, int memory_index)
112 assert(op->flags & irop_flag_uses_memory);
113 op->memory_index = memory_index;
116 void ir_op_set_fragile_indices(ir_op *op, int pn_x_regular, int pn_x_except)
118 assert(op->flags & irop_flag_fragile);
119 op->pn_x_regular = pn_x_regular;
120 op->pn_x_except = pn_x_except;
123 const char *get_op_name (const ir_op *op)
125 return get_id_str(op->name);
128 unsigned (get_op_code)(const ir_op *op)
130 return get_op_code_(op);
133 ident *(get_op_ident)(const ir_op *op)
135 return get_op_ident_(op);
138 const char *get_op_pin_state_name(op_pin_state s)
141 #define XXX(s) case s: return #s
142 XXX(op_pin_state_floats);
143 XXX(op_pin_state_pinned);
144 XXX(op_pin_state_exc_pinned);
145 XXX(op_pin_state_mem_pinned);
151 op_pin_state (get_op_pinned)(const ir_op *op)
153 return get_op_pinned_(op);
156 void set_op_pinned(ir_op *op, op_pin_state pinned)
158 if (op == op_Block || op == op_Phi || is_op_cfopcode(op)) return;
159 op->pin_state = pinned;
162 unsigned get_next_ir_opcode(void)
167 unsigned get_next_ir_opcodes(unsigned num)
169 unsigned base = next_iro;
174 op_func (get_generic_function_ptr)(const ir_op *op)
176 return get_generic_function_ptr_(op);
179 void (set_generic_function_ptr)(ir_op *op, op_func func)
181 set_generic_function_ptr_(op, func);
184 ir_op_ops const *(get_op_ops)(ir_op const *const op)
186 return get_op_ops_(op);
189 irop_flags get_op_flags(const ir_op *op)
191 return (irop_flags)op->flags;
194 static ir_type *default_get_type_attr(const ir_node *node)
197 return get_unknown_type();
200 static ir_entity *default_get_entity_attr(const ir_node *node)
206 static unsigned default_hash_node(const ir_node *node)
211 /* hash table value = 9*(9*(9*(9*(9*arity+in[0])+in[1])+ ...)+mode)+code */
212 h = irn_arity = get_irn_arity(node);
214 /* consider all in nodes... except the block if not a control flow. */
215 for (i = is_cfop(node) ? -1 : 0; i < irn_arity; ++i) {
216 ir_node *pred = get_irn_n(node, i);
217 if (is_irn_cse_neutral(pred))
220 h = 9*h + hash_ptr(pred);
224 h = 9*h + hash_ptr(get_irn_mode(node));
226 h = 9*h + hash_ptr(get_irn_op(node));
232 * Calculate a hash value of a Const node.
234 static unsigned hash_Const(const ir_node *node)
238 /* special value for const, as they only differ in their tarval. */
239 h = hash_ptr(node->attr.con.tarval);
245 * Calculate a hash value of a SymConst node.
247 static unsigned hash_SymConst(const ir_node *node)
251 /* all others are pointers */
252 h = hash_ptr(node->attr.symc.sym.type_p);
257 /** Compares two exception attributes */
258 static int node_cmp_exception(const ir_node *a, const ir_node *b)
260 const except_attr *ea = &a->attr.except;
261 const except_attr *eb = &b->attr.except;
262 return ea->pin_state != eb->pin_state;
265 /** Compares the attributes of two Const nodes. */
266 static int node_cmp_attr_Const(const ir_node *a, const ir_node *b)
268 return get_Const_tarval(a) != get_Const_tarval(b);
271 /** Compares the attributes of two Proj nodes. */
272 static int node_cmp_attr_Proj(const ir_node *a, const ir_node *b)
274 return a->attr.proj.proj != b->attr.proj.proj;
277 /** Compares the attributes of two Alloc nodes. */
278 static int node_cmp_attr_Alloc(const ir_node *a, const ir_node *b)
280 const alloc_attr *pa = &a->attr.alloc;
281 const alloc_attr *pb = &b->attr.alloc;
282 if (pa->where != pb->where || pa->type != pb->type)
284 return node_cmp_exception(a, b);
287 /** Compares the attributes of two Free nodes. */
288 static int node_cmp_attr_Free(const ir_node *a, const ir_node *b)
290 const free_attr *pa = &a->attr.free;
291 const free_attr *pb = &b->attr.free;
292 return (pa->where != pb->where) || (pa->type != pb->type);
295 /** Compares the attributes of two SymConst nodes. */
296 static int node_cmp_attr_SymConst(const ir_node *a, const ir_node *b)
298 const symconst_attr *pa = &a->attr.symc;
299 const symconst_attr *pb = &b->attr.symc;
300 return (pa->kind != pb->kind)
301 || (pa->sym.type_p != pb->sym.type_p);
304 /** Compares the attributes of two Call nodes. */
305 static int node_cmp_attr_Call(const ir_node *a, const ir_node *b)
307 const call_attr *pa = &a->attr.call;
308 const call_attr *pb = &b->attr.call;
309 if (pa->type != pb->type)
311 return node_cmp_exception(a, b);
314 /** Compares the attributes of two Sel nodes. */
315 static int node_cmp_attr_Sel(const ir_node *a, const ir_node *b)
317 const ir_entity *a_ent = get_Sel_entity(a);
318 const ir_entity *b_ent = get_Sel_entity(b);
319 return a_ent != b_ent;
322 /** Compares the attributes of two Phi nodes. */
323 static int node_cmp_attr_Phi(const ir_node *a, const ir_node *b)
326 /* do not CSE Phi-nodes without any inputs when building new graphs */
327 if (get_irn_arity(a) == 0 &&
328 irg_is_constrained(get_irn_irg(a), IR_GRAPH_CONSTRAINT_CONSTRUCTION)) {
334 /** Compares the attributes of two Load nodes. */
335 static int node_cmp_attr_Load(const ir_node *a, const ir_node *b)
337 if (get_Load_volatility(a) == volatility_is_volatile ||
338 get_Load_volatility(b) == volatility_is_volatile)
339 /* NEVER do CSE on volatile Loads */
341 /* do not CSE Loads with different alignment. Be conservative. */
342 if (get_Load_unaligned(a) != get_Load_unaligned(b))
344 if (get_Load_mode(a) != get_Load_mode(b))
346 return node_cmp_exception(a, b);
349 /** Compares the attributes of two Store nodes. */
350 static int node_cmp_attr_Store(const ir_node *a, const ir_node *b)
352 /* do not CSE Stores with different alignment. Be conservative. */
353 if (get_Store_unaligned(a) != get_Store_unaligned(b))
355 /* NEVER do CSE on volatile Stores */
356 if (get_Store_volatility(a) == volatility_is_volatile ||
357 get_Store_volatility(b) == volatility_is_volatile)
359 return node_cmp_exception(a, b);
362 static int node_cmp_attr_CopyB(const ir_node *a, const ir_node *b)
364 if (get_CopyB_type(a) != get_CopyB_type(b))
367 return node_cmp_exception(a, b);
370 /** Compares the attributes of two Div nodes. */
371 static int node_cmp_attr_Div(const ir_node *a, const ir_node *b)
373 const div_attr *ma = &a->attr.div;
374 const div_attr *mb = &b->attr.div;
375 if (ma->resmode != mb->resmode || ma->no_remainder != mb->no_remainder)
377 return node_cmp_exception(a, b);
380 /** Compares the attributes of two Mod nodes. */
381 static int node_cmp_attr_Mod(const ir_node *a, const ir_node *b)
383 const mod_attr *ma = &a->attr.mod;
384 const mod_attr *mb = &b->attr.mod;
385 if (ma->resmode != mb->resmode)
387 return node_cmp_exception(a, b);
390 static int node_cmp_attr_Cmp(const ir_node *a, const ir_node *b)
392 const cmp_attr *ma = &a->attr.cmp;
393 const cmp_attr *mb = &b->attr.cmp;
394 return ma->relation != mb->relation;
397 /** Compares the attributes of two Confirm nodes. */
398 static int node_cmp_attr_Confirm(const ir_node *a, const ir_node *b)
400 const confirm_attr *ma = &a->attr.confirm;
401 const confirm_attr *mb = &b->attr.confirm;
402 return ma->relation != mb->relation;
405 /** Compares the attributes of two Builtin nodes. */
406 static int node_cmp_attr_Builtin(const ir_node *a, const ir_node *b)
408 if (get_Builtin_kind(a) != get_Builtin_kind(b))
410 if (get_Builtin_type(a) != get_Builtin_type(b))
412 return node_cmp_exception(a, b);
415 /** Compares the attributes of two ASM nodes. */
416 static int node_cmp_attr_ASM(const ir_node *a, const ir_node *b)
418 if (get_ASM_text(a) != get_ASM_text(b))
421 int n_inputs = get_ASM_n_inputs(a);
422 if (n_inputs != get_ASM_n_inputs(b))
425 const ir_asm_constraint *in_a = get_ASM_input_constraints(a);
426 const ir_asm_constraint *in_b = get_ASM_input_constraints(b);
427 for (int i = 0; i < n_inputs; ++i) {
428 if (in_a[i].pos != in_b[i].pos
429 || in_a[i].constraint != in_b[i].constraint
430 || in_a[i].mode != in_b[i].mode)
434 size_t n_outputs = get_ASM_n_output_constraints(a);
435 if (n_outputs != get_ASM_n_output_constraints(b))
438 const ir_asm_constraint *out_a = get_ASM_output_constraints(a);
439 const ir_asm_constraint *out_b = get_ASM_output_constraints(b);
440 for (size_t i = 0; i < n_outputs; ++i) {
441 if (out_a[i].pos != out_b[i].pos
442 || out_a[i].constraint != out_b[i].constraint
443 || out_a[i].mode != out_b[i].mode)
447 size_t n_clobbers = get_ASM_n_clobbers(a);
448 if (n_clobbers != get_ASM_n_clobbers(b))
451 ident **cla = get_ASM_clobbers(a);
452 ident **clb = get_ASM_clobbers(b);
453 for (size_t i = 0; i < n_clobbers; ++i) {
454 if (cla[i] != clb[i])
458 return node_cmp_exception(a, b);
461 /** Compares the inexistent attributes of two Dummy nodes. */
462 static int node_cmp_attr_Dummy(const ir_node *a, const ir_node *b)
466 /* Dummy nodes never equal by definition */
470 static int node_cmp_attr_InstOf(const ir_node *a, const ir_node *b)
472 if (get_InstOf_type(a) != get_InstOf_type(b))
474 return node_cmp_exception(a, b);
477 static void default_copy_attr(ir_graph *irg, const ir_node *old_node,
482 assert(get_irn_op(old_node) == get_irn_op(new_node));
483 memcpy(&new_node->attr, &old_node->attr, get_op_attr_size(get_irn_op(old_node)));
487 * Copies all Call attributes stored in the old node to the new node.
489 static void call_copy_attr(ir_graph *irg, const ir_node *old_node,
492 default_copy_attr(irg, old_node, new_node);
493 remove_Call_callee_arr(new_node);
497 * Copies all Block attributes stored in the old node to the new node.
499 static void block_copy_attr(ir_graph *irg, const ir_node *old_node,
502 default_copy_attr(irg, old_node, new_node);
503 new_node->attr.block.irg.irg = irg;
504 new_node->attr.block.phis = NULL;
505 new_node->attr.block.backedge = new_backedge_arr(get_irg_obstack(irg), get_irn_arity(new_node));
506 new_node->attr.block.block_visited = 0;
507 memset(&new_node->attr.block.dom, 0, sizeof(new_node->attr.block.dom));
508 memset(&new_node->attr.block.pdom, 0, sizeof(new_node->attr.block.pdom));
509 /* It should be safe to copy the entity here, as it has no back-link to the old block.
510 * It serves just as a label number, so copying a labeled block results in an exact copy.
511 * This is at least what we need for DCE to work. */
512 new_node->attr.block.entity = old_node->attr.block.entity;
513 new_node->attr.block.phis = NULL;
517 * Copies all phi attributes stored in old node to the new node
519 static void phi_copy_attr(ir_graph *irg, const ir_node *old_node,
522 default_copy_attr(irg, old_node, new_node);
523 new_node->attr.phi.next = NULL;
524 new_node->attr.phi.u.backedge = new_backedge_arr(get_irg_obstack(irg), get_irn_arity(new_node));
528 * Copies all ASM attributes stored in old node to the new node
530 static void ASM_copy_attr(ir_graph *irg, const ir_node *old_node,
533 default_copy_attr(irg, old_node, new_node);
534 struct obstack *const obst = get_irg_obstack(irg);
535 new_node->attr.assem.input_constraints = DUP_ARR_D(ir_asm_constraint, obst, old_node->attr.assem.input_constraints);
536 new_node->attr.assem.output_constraints = DUP_ARR_D(ir_asm_constraint, obst, old_node->attr.assem.output_constraints);
537 new_node->attr.assem.clobbers = DUP_ARR_D(ident*, obst, old_node->attr.assem.clobbers);
540 static void switch_copy_attr(ir_graph *irg, const ir_node *old_node,
543 const ir_switch_table *table = get_Switch_table(old_node);
544 new_node->attr.switcha.table = ir_switch_table_duplicate(irg, table);
545 new_node->attr.switcha.n_outs = old_node->attr.switcha.n_outs;
548 static void register_node_cmp_func(ir_op *op, node_cmp_attr_func func)
550 op->ops.node_cmp_attr = func;
553 static void register_node_hash_func(ir_op *op, hash_func func)
558 static void register_node_copy_attr_func(ir_op *op, copy_attr_func func)
560 op->ops.copy_attr = func;
563 static void generated_init_op(void);
564 static void generated_finish_op(void);
566 void firm_init_op(void)
568 opcodes = NEW_ARR_F(ir_op*, 0);
572 register_node_cmp_func(op_ASM, node_cmp_attr_ASM);
573 register_node_cmp_func(op_Alloc, node_cmp_attr_Alloc);
574 register_node_cmp_func(op_Builtin, node_cmp_attr_Builtin);
575 register_node_cmp_func(op_Call, node_cmp_attr_Call);
576 register_node_cmp_func(op_Cmp, node_cmp_attr_Cmp);
577 register_node_cmp_func(op_Confirm, node_cmp_attr_Confirm);
578 register_node_cmp_func(op_Const, node_cmp_attr_Const);
579 register_node_cmp_func(op_CopyB, node_cmp_attr_CopyB);
580 register_node_cmp_func(op_Div, node_cmp_attr_Div);
581 register_node_cmp_func(op_Dummy, node_cmp_attr_Dummy);
582 register_node_cmp_func(op_Free, node_cmp_attr_Free);
583 register_node_cmp_func(op_InstOf, node_cmp_attr_InstOf);
584 register_node_cmp_func(op_Load, node_cmp_attr_Load);
585 register_node_cmp_func(op_Mod, node_cmp_attr_Mod);
586 register_node_cmp_func(op_Phi, node_cmp_attr_Phi);
587 register_node_cmp_func(op_Proj, node_cmp_attr_Proj);
588 register_node_cmp_func(op_Sel, node_cmp_attr_Sel);
589 register_node_cmp_func(op_Store, node_cmp_attr_Store);
590 register_node_cmp_func(op_SymConst, node_cmp_attr_SymConst);
592 register_node_hash_func(op_Const, hash_Const);
593 register_node_hash_func(op_SymConst, hash_SymConst);
595 register_node_copy_attr_func(op_Call, call_copy_attr);
596 register_node_copy_attr_func(op_Block, block_copy_attr);
597 register_node_copy_attr_func(op_Phi, phi_copy_attr);
598 register_node_copy_attr_func(op_ASM, ASM_copy_attr);
599 register_node_copy_attr_func(op_Switch, switch_copy_attr);
601 ir_register_opt_node_ops();
602 ir_register_reassoc_node_ops();
603 ir_register_verify_node_ops();
606 void firm_finish_op(void)
609 generated_finish_op();
614 #include "gen_irop.c.inl"