2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Representation of opcode of intermediate operation.
23 * @author Christian Schaefer, Goetz Lindenmaier, Michael Beck
32 #include "irbackedge_t.h"
35 #include "irverify_t.h"
36 #include "reassoc_t.h"
41 static ir_op **opcodes;
42 /** the available next opcode */
43 static unsigned next_iro = iro_MaxOpcode;
45 void default_copy_attr(ir_graph *irg, const ir_node *old_node,
48 unsigned size = firm_add_node_size;
51 assert(get_irn_op(old_node) == get_irn_op(new_node));
52 memcpy(&new_node->attr, &old_node->attr, get_op_attr_size(get_irn_op(old_node)));
55 /* copy additional node data */
56 memcpy(get_irn_data(new_node, void, size), get_irn_data(old_node, void, size), size);
61 * Copies all Call attributes stored in the old node to the new node.
63 static void call_copy_attr(ir_graph *irg, const ir_node *old_node,
66 default_copy_attr(irg, old_node, new_node);
67 remove_Call_callee_arr(new_node);
71 * Copies all Block attributes stored in the old node to the new node.
73 static void block_copy_attr(ir_graph *irg, const ir_node *old_node,
76 default_copy_attr(irg, old_node, new_node);
77 new_node->attr.block.irg.irg = irg;
78 new_node->attr.block.phis = NULL;
79 new_node->attr.block.cg_backedge = NULL;
80 new_node->attr.block.backedge = new_backedge_arr(irg->obst, get_irn_arity(new_node));
81 new_node->attr.block.block_visited = 0;
82 memset(&new_node->attr.block.dom, 0, sizeof(new_node->attr.block.dom));
83 memset(&new_node->attr.block.pdom, 0, sizeof(new_node->attr.block.pdom));
84 /* It should be safe to copy the entity here, as it has no back-link to the old block.
85 * It serves just as a label number, so copying a labeled block results in an exact copy.
86 * This is at least what we need for DCE to work. */
87 new_node->attr.block.entity = old_node->attr.block.entity;
88 new_node->attr.block.phis = NULL;
89 INIT_LIST_HEAD(&new_node->attr.block.succ_head);
93 * Copies all phi attributes stored in old node to the new node
95 static void phi_copy_attr(ir_graph *irg, const ir_node *old_node,
98 default_copy_attr(irg, old_node, new_node);
99 new_node->attr.phi.next = NULL;
100 new_node->attr.phi.u.backedge = new_backedge_arr(irg->obst, get_irn_arity(new_node));
104 * Copies all ASM attributes stored in old node to the new node
106 static void ASM_copy_attr(ir_graph *irg, const ir_node *old_node,
109 default_copy_attr(irg, old_node, new_node);
110 new_node->attr.assem.input_constraints = DUP_ARR_D(ir_asm_constraint, irg->obst, old_node->attr.assem.input_constraints);
111 new_node->attr.assem.output_constraints = DUP_ARR_D(ir_asm_constraint, irg->obst, old_node->attr.assem.output_constraints);
112 new_node->attr.assem.clobbers = DUP_ARR_D(ident*, irg->obst, old_node->attr.assem.clobbers);
115 static void switch_copy_attr(ir_graph *irg, const ir_node *old_node,
118 const ir_switch_table *table = get_Switch_table(old_node);
119 new_node->attr.switcha.table = ir_switch_table_duplicate(irg, table);
120 new_node->attr.switcha.n_outs = old_node->attr.switcha.n_outs;
124 * Sets the default copy_attr operation for an ir_ops
126 * @param code the opcode for the default operation
127 * @param ops the operations initialized
132 static void firm_set_default_copy_attr(unsigned code, ir_op_ops *ops)
135 case iro_Call: ops->copy_attr = call_copy_attr; break;
136 case iro_Block: ops->copy_attr = block_copy_attr; break;
137 case iro_Phi: ops->copy_attr = phi_copy_attr; break;
138 case iro_ASM: ops->copy_attr = ASM_copy_attr; break;
139 case iro_Switch: ops->copy_attr = switch_copy_attr; break;
141 if (ops->copy_attr == NULL)
142 ops->copy_attr = default_copy_attr;
147 * Sets the default operation for an ir_ops.
149 static void set_default_operations(unsigned code, ir_op_ops *ops)
151 firm_set_default_hash(code, ops);
152 firm_set_default_computed_value(code, ops);
153 firm_set_default_equivalent_node(code, ops);
154 firm_set_default_transform_node(code, ops);
155 firm_set_default_node_cmp_attr(code, ops);
156 firm_set_default_get_type_attr(code, ops);
157 firm_set_default_get_entity_attr(code, ops);
158 firm_set_default_copy_attr(code, ops);
159 firm_set_default_verifier(code, ops);
160 firm_set_default_reassoc(code, ops);
163 ir_op *new_ir_op(unsigned code, const char *name, op_pin_state p,
164 unsigned flags, op_arity opar, int op_index, size_t attr_size,
165 const ir_op_ops *ops)
167 ir_op *res = XMALLOCZ(ir_op);
170 res->name = new_id_from_chars(name, strlen(name));
172 res->attr_size = attr_size;
175 res->op_index = op_index;
180 else /* no given ops, set all operations to NULL */
181 memset(&res->ops, 0, sizeof(res->ops));
183 set_default_operations(code, &res->ops);
186 size_t len = ARR_LEN(opcodes);
187 if ((size_t)code >= len) {
188 ARR_RESIZE(ir_op*, opcodes, (size_t)code+1);
189 memset(&opcodes[len], 0, (code-len+1) * sizeof(opcodes[0]));
191 if (opcodes[code] != NULL)
192 panic("opcode registered twice");
200 void free_ir_op(ir_op *code)
202 hook_free_ir_op(code);
204 assert(opcodes[code->code] == code);
205 opcodes[code->code] = NULL;
210 unsigned ir_get_n_opcodes(void)
212 return ARR_LEN(opcodes);
215 ir_op *ir_get_opcode(unsigned code)
217 assert((size_t)code < ARR_LEN(opcodes));
218 return opcodes[code];
221 void ir_clear_opcodes_generic_func(void)
223 size_t n = ir_get_n_opcodes();
226 for (i = 0; i < n; ++i) {
227 ir_op *op = ir_get_opcode(i);
229 op->ops.generic = (op_func)NULL;
233 void ir_op_set_memory_index(ir_op *op, int memory_index)
235 assert(op->flags & irop_flag_uses_memory);
236 op->memory_index = memory_index;
239 void ir_op_set_fragile_indices(ir_op *op, int pn_x_regular, int pn_x_except)
241 assert(op->flags & irop_flag_fragile);
242 op->pn_x_regular = pn_x_regular;
243 op->pn_x_except = pn_x_except;
246 const char *get_op_name (const ir_op *op)
248 return get_id_str(op->name);
251 unsigned (get_op_code)(const ir_op *op)
253 return get_op_code_(op);
256 ident *(get_op_ident)(const ir_op *op)
258 return get_op_ident_(op);
261 const char *get_op_pin_state_name(op_pin_state s)
264 #define XXX(s) case s: return #s
265 XXX(op_pin_state_floats);
266 XXX(op_pin_state_pinned);
267 XXX(op_pin_state_exc_pinned);
268 XXX(op_pin_state_mem_pinned);
274 op_pin_state (get_op_pinned)(const ir_op *op)
276 return get_op_pinned_(op);
279 void set_op_pinned(ir_op *op, op_pin_state pinned)
281 if (op == op_Block || op == op_Phi || is_op_cfopcode(op)) return;
282 op->pin_state = pinned;
285 unsigned get_next_ir_opcode(void)
290 unsigned get_next_ir_opcodes(unsigned num)
292 unsigned base = next_iro;
297 op_func (get_generic_function_ptr)(const ir_op *op)
299 return get_generic_function_ptr_(op);
302 void (set_generic_function_ptr)(ir_op *op, op_func func)
304 set_generic_function_ptr_(op, func);
307 const ir_op_ops *(get_op_ops)(const ir_op *op)
309 return get_op_ops_(op);
312 irop_flags get_op_flags(const ir_op *op)
314 return (irop_flags)op->flags;
317 static void generated_init_op(void);
318 static void generated_finish_op(void);
320 void firm_init_op(void)
322 opcodes = NEW_ARR_F(ir_op*, 0);
327 void firm_finish_op(void)
330 generated_finish_op();
335 #include "gen_irop.c.inl"