2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Representation of opcode of intermediate operation.
23 * @author Christian Schaefer, Goetz Lindenmaier, Michael Beck
32 #include "irbackedge_t.h"
35 #include "irverify_t.h"
36 #include "reassoc_t.h"
41 static ir_op **opcodes;
42 /** the available next opcode */
43 static unsigned next_iro = iro_MaxOpcode;
45 void default_copy_attr(ir_graph *irg, const ir_node *old_node,
48 unsigned size = firm_add_node_size;
51 assert(get_irn_op(old_node) == get_irn_op(new_node));
52 memcpy(&new_node->attr, &old_node->attr, get_op_attr_size(get_irn_op(old_node)));
55 /* copy additional node data */
56 memcpy(get_irn_data(new_node, void, size), get_irn_data(old_node, void, size), size);
61 * Copies all Call attributes stored in the old node to the new node.
63 static void call_copy_attr(ir_graph *irg, const ir_node *old_node,
66 default_copy_attr(irg, old_node, new_node);
67 remove_Call_callee_arr(new_node);
71 * Copies all Block attributes stored in the old node to the new node.
73 static void block_copy_attr(ir_graph *irg, const ir_node *old_node,
76 default_copy_attr(irg, old_node, new_node);
77 new_node->attr.block.irg.irg = irg;
78 new_node->attr.block.phis = NULL;
79 new_node->attr.block.backedge = new_backedge_arr(irg->obst, get_irn_arity(new_node));
80 new_node->attr.block.block_visited = 0;
81 memset(&new_node->attr.block.dom, 0, sizeof(new_node->attr.block.dom));
82 memset(&new_node->attr.block.pdom, 0, sizeof(new_node->attr.block.pdom));
83 /* It should be safe to copy the entity here, as it has no back-link to the old block.
84 * It serves just as a label number, so copying a labeled block results in an exact copy.
85 * This is at least what we need for DCE to work. */
86 new_node->attr.block.entity = old_node->attr.block.entity;
87 new_node->attr.block.phis = NULL;
88 INIT_LIST_HEAD(&new_node->attr.block.succ_head);
92 * Copies all phi attributes stored in old node to the new node
94 static void phi_copy_attr(ir_graph *irg, const ir_node *old_node,
97 default_copy_attr(irg, old_node, new_node);
98 new_node->attr.phi.next = NULL;
99 new_node->attr.phi.u.backedge = new_backedge_arr(irg->obst, get_irn_arity(new_node));
103 * Copies all ASM attributes stored in old node to the new node
105 static void ASM_copy_attr(ir_graph *irg, const ir_node *old_node,
108 default_copy_attr(irg, old_node, new_node);
109 new_node->attr.assem.input_constraints = DUP_ARR_D(ir_asm_constraint, irg->obst, old_node->attr.assem.input_constraints);
110 new_node->attr.assem.output_constraints = DUP_ARR_D(ir_asm_constraint, irg->obst, old_node->attr.assem.output_constraints);
111 new_node->attr.assem.clobbers = DUP_ARR_D(ident*, irg->obst, old_node->attr.assem.clobbers);
114 static void switch_copy_attr(ir_graph *irg, const ir_node *old_node,
117 const ir_switch_table *table = get_Switch_table(old_node);
118 new_node->attr.switcha.table = ir_switch_table_duplicate(irg, table);
119 new_node->attr.switcha.n_outs = old_node->attr.switcha.n_outs;
123 * Sets the default copy_attr operation for an ir_ops
125 * @param code the opcode for the default operation
126 * @param ops the operations initialized
131 static void firm_set_default_copy_attr(unsigned code, ir_op_ops *ops)
134 case iro_Call: ops->copy_attr = call_copy_attr; break;
135 case iro_Block: ops->copy_attr = block_copy_attr; break;
136 case iro_Phi: ops->copy_attr = phi_copy_attr; break;
137 case iro_ASM: ops->copy_attr = ASM_copy_attr; break;
138 case iro_Switch: ops->copy_attr = switch_copy_attr; break;
140 if (ops->copy_attr == NULL)
141 ops->copy_attr = default_copy_attr;
146 * Sets the default operation for an ir_ops.
148 static void set_default_operations(unsigned code, ir_op_ops *ops)
150 firm_set_default_hash(code, ops);
151 firm_set_default_computed_value(code, ops);
152 firm_set_default_equivalent_node(code, ops);
153 firm_set_default_transform_node(code, ops);
154 firm_set_default_node_cmp_attr(code, ops);
155 firm_set_default_get_type_attr(code, ops);
156 firm_set_default_get_entity_attr(code, ops);
157 firm_set_default_copy_attr(code, ops);
158 firm_set_default_verifier(code, ops);
159 firm_set_default_reassoc(code, ops);
162 ir_op *new_ir_op(unsigned code, const char *name, op_pin_state p,
163 unsigned flags, op_arity opar, int op_index, size_t attr_size,
164 const ir_op_ops *ops)
166 ir_op *res = XMALLOCZ(ir_op);
169 res->name = new_id_from_chars(name, strlen(name));
171 res->attr_size = attr_size;
174 res->op_index = op_index;
179 else /* no given ops, set all operations to NULL */
180 memset(&res->ops, 0, sizeof(res->ops));
182 set_default_operations(code, &res->ops);
185 size_t len = ARR_LEN(opcodes);
186 if ((size_t)code >= len) {
187 ARR_RESIZE(ir_op*, opcodes, (size_t)code+1);
188 memset(&opcodes[len], 0, (code-len+1) * sizeof(opcodes[0]));
190 if (opcodes[code] != NULL)
191 panic("opcode registered twice");
199 void free_ir_op(ir_op *code)
201 hook_free_ir_op(code);
203 assert(opcodes[code->code] == code);
204 opcodes[code->code] = NULL;
209 unsigned ir_get_n_opcodes(void)
211 return ARR_LEN(opcodes);
214 ir_op *ir_get_opcode(unsigned code)
216 assert((size_t)code < ARR_LEN(opcodes));
217 return opcodes[code];
220 void ir_clear_opcodes_generic_func(void)
222 size_t n = ir_get_n_opcodes();
225 for (i = 0; i < n; ++i) {
226 ir_op *op = ir_get_opcode(i);
228 op->ops.generic = (op_func)NULL;
232 void ir_op_set_memory_index(ir_op *op, int memory_index)
234 assert(op->flags & irop_flag_uses_memory);
235 op->memory_index = memory_index;
238 void ir_op_set_fragile_indices(ir_op *op, int pn_x_regular, int pn_x_except)
240 assert(op->flags & irop_flag_fragile);
241 op->pn_x_regular = pn_x_regular;
242 op->pn_x_except = pn_x_except;
245 const char *get_op_name (const ir_op *op)
247 return get_id_str(op->name);
250 unsigned (get_op_code)(const ir_op *op)
252 return get_op_code_(op);
255 ident *(get_op_ident)(const ir_op *op)
257 return get_op_ident_(op);
260 const char *get_op_pin_state_name(op_pin_state s)
263 #define XXX(s) case s: return #s
264 XXX(op_pin_state_floats);
265 XXX(op_pin_state_pinned);
266 XXX(op_pin_state_exc_pinned);
267 XXX(op_pin_state_mem_pinned);
273 op_pin_state (get_op_pinned)(const ir_op *op)
275 return get_op_pinned_(op);
278 void set_op_pinned(ir_op *op, op_pin_state pinned)
280 if (op == op_Block || op == op_Phi || is_op_cfopcode(op)) return;
281 op->pin_state = pinned;
284 unsigned get_next_ir_opcode(void)
289 unsigned get_next_ir_opcodes(unsigned num)
291 unsigned base = next_iro;
296 op_func (get_generic_function_ptr)(const ir_op *op)
298 return get_generic_function_ptr_(op);
301 void (set_generic_function_ptr)(ir_op *op, op_func func)
303 set_generic_function_ptr_(op, func);
306 const ir_op_ops *(get_op_ops)(const ir_op *op)
308 return get_op_ops_(op);
311 irop_flags get_op_flags(const ir_op *op)
313 return (irop_flags)op->flags;
316 static void generated_init_op(void);
317 static void generated_finish_op(void);
319 void firm_init_op(void)
321 opcodes = NEW_ARR_F(ir_op*, 0);
326 void firm_finish_op(void)
329 generated_finish_op();
334 #include "gen_irop.c.inl"