unsigned flags, op_arity opar, int op_index,
size_t attr_size, const ir_op_ops *ops);
+/** Returns one more than the highest opcode code in use. */
+FIRM_API unsigned ir_get_n_opcodes(void);
+
+/**
+ * Returns the opcode with code @p code.
+ *
+ * @p code has to be smaller than get_irp_n_opcode(), returns NULL if
+ * no opcode with the code exists.
+ */
+FIRM_API ir_op *ir_get_opcode(unsigned code);
+
+/** Sets the generic function pointer of all opcodes to NULL */
+FIRM_API void ir_clear_opcodes_generic_func(void);
+
/**
* Sets memory input of operation using memory
*/
/** Returns the mode at position pos in the irp. */
FIRM_API ir_mode *get_irp_mode(size_t pos);
-/** Adds opcode to the list of opcodes in irp. */
-FIRM_API void add_irp_opcode(ir_op *opcode);
-
-/** Removes opcode from the list of opcodes, deallocates it and
- shrinks the list by one. */
-FIRM_API void remove_irp_opcode(ir_op *opcode);
-
-/** Returns the number of all opcodes in the irp. */
-FIRM_API size_t get_irp_n_opcodes(void);
-
-/** Returns the opcode at position pos in the irp. */
-FIRM_API ir_op *get_irp_opcode(size_t pos);
-
-/** Sets the generic function pointer of all opcodes to NULL */
-FIRM_API void clear_irp_opcodes_generic_func(void);
-
/** Returns the graph for global constants of the current irp.
*
* Returns an irgraph that only contains constant expressions for
static void TEMPLATE_register_emitters(void)
{
/* first clear the generic function pointer for all ops */
- clear_irp_opcodes_generic_func();
+ ir_clear_opcodes_generic_func();
/* register all emitter functions defined in spec */
TEMPLATE_register_spec_emitters();
TEMPLATE_create_opcodes(&TEMPLATE_irn_ops);
}
+static void TEMPLATE_finish(void)
+{
+ TEMPLATE_free_opcodes();
+}
+
static arch_env_t *TEMPLATE_begin_codegeneration(const be_main_env_t *env)
{
TEMPLATE_isa_t *isa = XMALLOC(TEMPLATE_isa_t);
const arch_isa_if_t TEMPLATE_isa_if = {
TEMPLATE_init,
+ TEMPLATE_finish,
TEMPLATE_get_backend_params,
TEMPLATE_lower_for_target,
TEMPLATE_parse_asm_constraint,
static void amd64_register_emitters(void)
{
/* first clear the generic function pointer for all ops */
- clear_irp_opcodes_generic_func();
+ ir_clear_opcodes_generic_func();
/* register all emitter functions defined in spec */
amd64_register_spec_emitters();
amd64_create_opcodes(&amd64_irn_ops);
}
+static void amd64_finish(void)
+{
+ amd64_free_opcodes();
+}
+
static arch_env_t *amd64_begin_codegeneration(const be_main_env_t *env)
{
amd64_isa_t *isa = XMALLOC(amd64_isa_t);
const arch_isa_if_t amd64_isa_if = {
amd64_init,
+ amd64_finish,
amd64_get_backend_params,
amd64_lower_for_target,
amd64_parse_asm_constraint,
static void arm_register_emitters(void)
{
/* first clear the generic function pointer for all ops */
- clear_irp_opcodes_generic_func();
+ ir_clear_opcodes_generic_func();
/* register all emitter functions defined in spec */
arm_register_spec_emitters();
void arm_peephole_optimization(ir_graph *irg)
{
/* register peephole optimizations */
- clear_irp_opcodes_generic_func();
+ ir_clear_opcodes_generic_func();
register_peephole_optimisation(op_be_IncSP, peephole_be_IncSP);
register_peephole_optimisation(op_arm_Str, peephole_arm_Str_Ldr);
register_peephole_optimisation(op_arm_Ldr, peephole_arm_Str_Ldr);
arm_create_opcodes(&arm_irn_ops);
}
+static void arm_finish(void)
+{
+ arm_free_opcodes();
+}
+
static arch_env_t *arm_begin_codegeneration(const be_main_env_t *env)
{
arm_isa_t *isa = XMALLOC(arm_isa_t);
const arch_isa_if_t arm_isa_if = {
arm_init,
+ arm_finish,
arm_get_libfirm_params,
arm_lower_for_target,
arm_parse_asm_constraint,
*/
void (*init)(void);
+ /**
+ * Fress resources allocated by this isa interface.
+ */
+ void (*finish)(void);
+
/**
* Returns the frontend settings needed for this backend.
*/
* Called directly before done is called. This should be the last place
* where the irg is modified.
*/
- void (*finish)(ir_graph *irg);
+ void (*finish_graph)(ir_graph *irg);
/**
* Called after everything happened. This call should emit the final
if (isa_initialized)
return;
isa_if->init();
+ isa_initialized = true;
+}
+
+static void finish_isa(void)
+{
+ if (isa_initialized) {
+ isa_if->finish();
+ isa_initialized = false;
+ }
}
void be_init_default_asm_constraint_flags(void)
/* Finalize the Firm backend. */
void firm_be_finish(void)
{
+ finish_isa();
be_quit_modules();
}
dump(DUMP_RA, irg, "ra");
be_timer_push(T_FINISH);
- if (arch_env->impl->finish != NULL)
- arch_env->impl->finish(irg);
+ if (arch_env->impl->finish_graph != NULL)
+ arch_env->impl->finish_graph(irg);
be_timer_pop(T_FINISH);
dump(DUMP_FINAL, irg, "finish");
{
unsigned opc;
+ assert(op_be_Spill == NULL);
+
/* Acquire all needed opcodes. */
op_be_Spill = new_ir_op(beo_Spill, "be_Spill", op_pin_state_exc_pinned, irop_flag_none, oparity_unary, 0, sizeof(be_frame_attr_t), &be_node_op_ops);
op_be_Reload = new_ir_op(beo_Reload, "be_Reload", op_pin_state_exc_pinned, irop_flag_none, oparity_zero, 0, sizeof(be_frame_attr_t), &be_node_op_ops);
/* attach out dummy_ops to middle end nodes */
for (opc = iro_First; opc <= iro_Last; ++opc) {
- ir_op *op = get_irp_opcode(opc);
+ ir_op *op = ir_get_opcode(opc);
assert(op->ops.be_ops == NULL);
op->ops.be_ops = &dummy_be_irn_ops;
}
op_Phi->ops.be_ops = &phi_irn_ops;
}
+
+void be_finish_op(void)
+{
+ free_ir_op(op_be_Spill); op_be_Spill = NULL;
+ free_ir_op(op_be_Reload); op_be_Reload = NULL;
+ free_ir_op(op_be_Perm); op_be_Perm = NULL;
+ free_ir_op(op_be_MemPerm); op_be_MemPerm = NULL;
+ free_ir_op(op_be_Copy); op_be_Copy = NULL;
+ free_ir_op(op_be_Keep); op_be_Keep = NULL;
+ free_ir_op(op_be_CopyKeep); op_be_CopyKeep = NULL;
+ free_ir_op(op_be_Call); op_be_Call = NULL;
+ free_ir_op(op_be_Return); op_be_Return = NULL;
+ free_ir_op(op_be_IncSP); op_be_IncSP = NULL;
+ free_ir_op(op_be_AddSP); op_be_AddSP = NULL;
+ free_ir_op(op_be_SubSP); op_be_SubSP = NULL;
+ free_ir_op(op_be_Start); op_be_Start = NULL;
+ free_ir_op(op_be_FrameAddr); op_be_FrameAddr = NULL;
+}
*/
void be_init_op(void);
+void be_finish_op(void);
+
/**
* Position numbers for the be_Spill inputs.
*/
void be_start_transform_setup(void)
{
- clear_irp_opcodes_generic_func();
+ ir_clear_opcodes_generic_func();
be_set_transform_function(op_Bad, be_duplicate_node);
be_set_transform_function(op_be_Copy, be_duplicate_node);
* virtual with real x87 instructions, creating a block schedule and peephole
* optimisations.
*/
-static void ia32_finish(ir_graph *irg)
+static void ia32_finish_graph(ir_graph *irg)
{
ia32_irg_data_t *irg_data = ia32_get_irg_data(irg);
be_stack_layout_t *stack_layout = be_get_irg_stack_layout(irg);
init_asm_constraints();
- ia32_register_init();
- ia32_create_opcodes(&ia32_irn_ops);
-
ia32_mode_fpcw = new_int_mode("Fpcw", irma_twos_complement, 16, 0, 0);
/* note mantissa is 64bit but with explicitely encoded 1 so the really
ia32_backend_params.mode_float_arithmetic = ia32_mode_E;
ia32_backend_params.type_long_double = ia32_type_E;
}
+
+ ia32_register_init();
+ ia32_create_opcodes(&ia32_irn_ops);
+}
+
+static void ia32_finish(void)
+{
+ ia32_free_opcodes();
}
/**
&intrinsic_env,
};
- ia32_create_opcodes(&ia32_irn_ops);
-
/* lower compound param handling
* Note: we lower compound arguments ourself, since on ia32 we don't
* have hidden parameters but know where to find the structs on the stack.
const arch_isa_if_t ia32_isa_if = {
ia32_init,
+ ia32_finish,
ia32_get_libfirm_params,
ia32_lower_for_target,
ia32_parse_asm_constraint,
ia32_before_abi, /* before abi introduce hook */
ia32_prepare_graph,
ia32_before_ra, /* before register allocation hook */
- ia32_finish, /* called before codegen */
+ ia32_finish_graph, /* called before codegen */
ia32_emit, /* emit && done */
};
#define BE_IGN(a) op_be_##a->ops.generic = (op_func)emit_Nothing
/* first clear the generic function pointer for all ops */
- clear_irp_opcodes_generic_func();
+ ir_clear_opcodes_generic_func();
/* register all emitter functions defined in spec */
ia32_register_spec_emitters();
static void ia32_register_binary_emitters(void)
{
/* first clear the generic function pointer for all ops */
- clear_irp_opcodes_generic_func();
+ ir_clear_opcodes_generic_func();
/* benode emitter */
register_emitter(op_be_Copy, bemit_copy);
*/
/* pass 1 */
- clear_irp_opcodes_generic_func();
+ ir_clear_opcodes_generic_func();
register_peephole_optimisation(op_ia32_Cmp, peephole_ia32_Cmp);
register_peephole_optimisation(op_ia32_Cmp8Bit, peephole_ia32_Cmp);
register_peephole_optimisation(op_ia32_Lea, peephole_ia32_Lea);
be_peephole_opt(irg);
/* pass 2 */
- clear_irp_opcodes_generic_func();
+ ir_clear_opcodes_generic_func();
register_peephole_optimisation(op_ia32_Const, peephole_ia32_Const);
register_peephole_optimisation(op_be_IncSP, peephole_be_IncSP);
register_peephole_optimisation(op_ia32_Test, peephole_ia32_Test);
"x87 Simulator started for %+F\n", irg));
/* set the generic function pointer of instruction we must simulate */
- clear_irp_opcodes_generic_func();
+ ir_clear_opcodes_generic_func();
register_sim(op_ia32_Call, sim_Call);
register_sim(op_ia32_vfld, sim_fld);
my @obst_get_opvar; # stack for the get_op_<arch>_<op-name>() functions
my $obst_constructor; # stack for node constructor functions
my @obst_new_irop; # stack for the new_ir_op calls
+my @obst_free_irop; # stack for free_ir_op calls
my @obst_enum_op; # stack for creating the <arch>_opcode enum
my $obst_header; # stack for function prototypes
my @obst_is_archirn; # stack for the is_$arch_irn() function
$n_opcodes += $additional_opcodes if (defined($additional_opcodes));
$obst_header .= "void ${arch}_create_opcodes(const arch_irn_ops_t *be_ops);\n";
+$obst_header .= "void ${arch}_free_opcodes(void);\n";
sub create_constructor {
my $op = shift;
push(@obst_new_irop, "\tset_op_attr(op_$op, attr);\n");
}
+ push(@obst_free_irop, "\tfree_ir_op(op_$op); op_$op = NULL;\n");
+
push(@obst_enum_op, "\tiro_$op,\n");
$obst_header .= "\n";
* Creates the $arch specific Firm machine operations
* needed for the assembler irgs.
*/
-void $arch\_create_opcodes(const arch_irn_ops_t *be_ops) {
+void $arch\_create_opcodes(const arch_irn_ops_t *be_ops)
+{
ir_op_ops ops;
int cur_opcode;
- static int run_once = 0;
ENDOFMAIN
if (defined($default_op_attr_type)) {
print OUT<<ENDOFMAIN;
- if (run_once)
- return;
- run_once = 1;
-
cur_opcode = get_next_ir_opcodes(iro_$arch\_last);
$arch\_opcode_start = cur_opcode;
print OUT "\t$arch\_opcode_end = cur_opcode + iro_$arch\_last";
print OUT " + $additional_opcodes" if (defined($additional_opcodes));
print OUT ";\n";
-print OUT "}\n";
+print OUT <<ENDOFMAIN;
+}
+
+void $arch\_free_opcodes(void)
+{
+ENDOFMAIN
+
+print OUT @obst_free_irop;
+
+print OUT <<ENDOFMAIN;
+}
+ENDOFMAIN
close(OUT);
sparc_cconv_init();
}
+static void sparc_finish(void)
+{
+ sparc_free_opcodes();
+}
+
static arch_env_t *sparc_begin_codegeneration(const be_main_env_t *env)
{
sparc_isa_t *isa = XMALLOC(sparc_isa_t);
const arch_isa_if_t sparc_isa_if = {
sparc_init,
+ sparc_finish,
sparc_get_backend_params,
sparc_lower_for_target,
sparc_parse_asm_constraint,
NULL, /* before_abi */
sparc_prepare_graph,
sparc_before_ra,
- sparc_finish,
+ sparc_finish_graph,
sparc_emit_routine,
};
return SPARC_IMMEDIATE_MIN <= value && value <= SPARC_IMMEDIATE_MAX;
}
-void sparc_finish(ir_graph *irg);
+void sparc_finish_graph(ir_graph *irg);
void sparc_introduce_prolog_epilog(ir_graph *irg);
static void sparc_register_emitters(void)
{
/* first clear the generic function pointer for all ops */
- clear_irp_opcodes_generic_func();
+ ir_clear_opcodes_generic_func();
/* register all emitter functions defined in spec */
sparc_register_spec_emitters();
}
}
-void sparc_finish(ir_graph *irg)
+void sparc_finish_graph(ir_graph *irg)
{
be_stack_layout_t *stack_layout = be_get_irg_stack_layout(irg);
bool at_begin = stack_layout->sp_relative ? true : false;
heights = heights_new(irg);
/* perform peephole optimizations */
- clear_irp_opcodes_generic_func();
+ ir_clear_opcodes_generic_func();
register_peephole_optimisation(op_be_IncSP, peephole_be_IncSP);
register_peephole_optimisation(op_sparc_FrameAddr, peephole_sparc_FrameAddr);
register_peephole_optimisation(op_sparc_RestoreZero,
be_peephole_opt(irg);
/* perform legalizations (mostly fix nodes with too big immediates) */
- clear_irp_opcodes_generic_func();
+ ir_clear_opcodes_generic_func();
register_peephole_optimisation(op_be_IncSP, finish_be_IncSP);
register_peephole_optimisation(op_sparc_FrameAddr, finish_sparc_FrameAddr);
register_peephole_optimisation(op_sparc_Ld, finish_sparc_Ld);
init_mode();
/* initialize tarvals, and floating point arithmetic */
init_tarval_2();
+ /* initialize node opcodes */
+ firm_init_op();
/* init graph construction */
firm_init_irgraph();
/* kind of obstack initialization */
firm_init_mangle();
- /* initialize all op codes an irnode can consist of */
- init_op();
/* initialize reassociation */
firm_init_reassociation();
/* initialize function call optimization */
#ifdef DEBUG_libfirm
firm_finish_debugger();
#endif
- free_ir_prog();
+ firm_be_finish();
+ free_ir_prog();
+ firm_finish_op();
finish_tarval();
finish_mode();
finish_tpop();
firm_finish_mangle();
finish_ident();
-
- firm_be_finish();
}
unsigned ir_get_version_major(void)
static void writers_init(void)
{
- clear_irp_opcodes_generic_func();
+ ir_clear_opcodes_generic_func();
register_node_writer(op_Anchor, write_Anchor);
register_node_writer(op_ASM, write_ASM);
register_node_writer(op_Block, write_Block);
#include "reassoc_t.h"
#include "xmalloc.h"
+#include "benode.h"
-void be_init_op(void);
-
+static ir_op **opcodes;
/** the available next opcode */
static unsigned next_iro = iro_MaxOpcode;
set_default_operations(code, &res->ops);
- add_irp_opcode(res);
+ {
+ size_t len = ARR_LEN(opcodes);
+ if ((size_t)code >= len) {
+ ARR_RESIZE(ir_op*, opcodes, (size_t)code+1);
+ memset(&opcodes[len], 0, (code-len+1) * sizeof(opcodes[0]));
+ }
+ if (opcodes[code] != NULL)
+ panic("opcode registered twice");
+ opcodes[code] = res;
+ }
hook_new_ir_op(res);
return res;
{
hook_free_ir_op(code);
- remove_irp_opcode(code);
+ assert(opcodes[code->code] == code);
+ opcodes[code->code] = NULL;
+
free(code);
}
+unsigned ir_get_n_opcodes(void)
+{
+ return ARR_LEN(opcodes);
+}
+
+ir_op *ir_get_opcode(unsigned code)
+{
+ assert((size_t)code < ARR_LEN(opcodes));
+ return opcodes[code];
+}
+
+void ir_clear_opcodes_generic_func(void)
+{
+ size_t n = ir_get_n_opcodes();
+ size_t i;
+
+ for (i = 0; i < n; ++i) {
+ ir_op *op = ir_get_opcode(i);
+ if (op != NULL)
+ op->ops.generic = (op_func)NULL;
+ }
+}
+
void ir_op_set_memory_index(ir_op *op, int memory_index)
{
assert(op->flags & irop_flag_uses_memory);
return (irop_flags)op->flags;
}
+static void generated_init_op(void);
+static void generated_finish_op(void);
+
+void firm_init_op(void)
+{
+ opcodes = NEW_ARR_F(ir_op*, 0);
+ generated_init_op();
+ be_init_op();
+}
+
+void firm_finish_op(void)
+{
+ be_finish_op();
+ generated_finish_op();
+ DEL_ARR_F(opcodes);
+ opcodes = NULL;
+}
+
#include "gen_irop.c.inl"
void free_ir_op(ir_op *code);
/** Initialize the irop module. */
-void init_op(void);
+void firm_init_op(void);
-/** Free memory used by irop module. */
-void finish_op(void);
+/** frees memory allocated by irop module */
+void firm_finish_op(void);
/**
* Copies simply all attributes stored in the old node to the new node.
res->graphs = NEW_ARR_F(ir_graph *, 0);
res->types = NEW_ARR_F(ir_type *, 0);
res->modes = NEW_ARR_F(ir_mode *, 0);
- res->opcodes = NEW_ARR_F(ir_op *, 0);
res->global_asms = NEW_ARR_F(ident *, 0);
res->last_label_nr = 1; /* 0 is reserved as non-label */
res->max_irg_idx = 0;
for (i = get_irp_n_types(); i > 0;)
free_type_entities(get_irp_type(--i));
+ ir_finish_entity(irp);
+
for (i = get_irp_n_types(); i > 0;)
free_type(get_irp_type(--i));
free_ir_graph(irp->const_code_irg);
+
+ ir_finish_type(irp);
+
DEL_ARR_F(irp->graphs);
DEL_ARR_F(irp->types);
DEL_ARR_F(irp->modes);
- finish_op();
- DEL_ARR_F(irp->opcodes);
DEL_ARR_F(irp->global_asms);
irp->name = NULL;
irp->const_code_irg = NULL;
irp->kind = k_BAD;
+ free(irp);
+ irp = NULL;
}
ir_graph *get_irp_main_irg(void)
ARR_APP1(ir_mode *, irp->modes, mode);
}
-void add_irp_opcode(ir_op *opcode)
-{
- size_t len;
- size_t code;
- assert(opcode != NULL);
- assert(irp);
- len = ARR_LEN(irp->opcodes);
- code = opcode->code;
- if (code >= len) {
- ARR_RESIZE(ir_op*, irp->opcodes, code+1);
- memset(&irp->opcodes[len], 0, (code-len+1) * sizeof(irp->opcodes[0]));
- }
-
- assert(irp->opcodes[code] == NULL && "opcode registered twice");
- irp->opcodes[code] = opcode;
-}
-
-void remove_irp_opcode(ir_op *opcode)
-{
- assert(opcode->code < ARR_LEN(irp->opcodes));
- irp->opcodes[opcode->code] = NULL;
-}
-
-size_t (get_irp_n_opcodes)(void)
-{
- return get_irp_n_opcodes_();
-}
-
-ir_op *(get_irp_opcode)(size_t pos)
-{
- return get_irp_opcode_(pos);
-}
-
-void clear_irp_opcodes_generic_func(void)
-{
- size_t i, n;
-
- for (i = 0, n = get_irp_n_opcodes(); i < n; ++i) {
- ir_op *op = get_irp_opcode(i);
- op->ops.generic = (op_func)NULL;
- }
-}
-
void set_irp_prog_name(ident *name)
{
irp->name = name;
return irp->modes[pos];
}
-static inline size_t get_irp_n_opcodes_(void)
-{
- assert(irp && irp->opcodes);
- return ARR_LEN(irp->opcodes);
-}
-
-static inline ir_op *get_irp_opcode_(size_t pos)
-{
- assert(irp && irp->opcodes);
- return irp->opcodes[pos];
-}
-
/** Returns a new, unique number to number nodes or the like. */
static inline long get_irp_new_node_nr(void)
{
#define get_irp_type(pos) get_irp_type_(pos)
#define get_irp_n_modes() get_irp_n_modes_()
#define get_irp_mode(pos) get_irp_mode_(pos)
-#define get_irp_n_opcodes() get_irp_n_opcodes_()
-#define get_irp_opcode(pos) get_irp_opcode_(pos)
#define get_const_code_irg() get_const_code_irg_()
#define get_segment_type(s) get_segment_type_(s)
#define get_glob_type() get_glob_type_()
ir_type *code_type; /**< unique 'code'-type */
ir_type *unknown_type; /**< unique 'unknown'-type */
ir_mode **modes; /**< A list of all modes in the ir. */
- ir_op **opcodes; /**< A list of all opcodes in the ir. */
ident **global_asms; /**< An array of global ASM insertions. */
/* -- states of and access to generated information -- */
param = new_param;
- clear_irp_opcodes_generic_func();
+ ir_clear_opcodes_generic_func();
ir_register_dw_lower_function(op_ASM, lower_ASM);
ir_register_dw_lower_function(op_Add, lower_binop);
ir_register_dw_lower_function(op_And, lower_And);
size_t lower_intrinsics(i_record *list, size_t length, int part_block_used)
{
size_t i, n;
- size_t n_ops = get_irp_n_opcodes();
+ size_t n_ops = ir_get_n_opcodes();
ir_graph *irg;
pmap *c_map = pmap_create_ex(length);
i_instr_record **i_map;
ir_prepare_softfloat_lowering();
- clear_irp_opcodes_generic_func();
+ ir_clear_opcodes_generic_func();
ir_register_softloat_lower_function(op_Add, lower_Add);
ir_register_softloat_lower_function(op_Cmp, lower_Cmp);
ir_register_softloat_lower_function(op_Conv, lower_Conv);
ir_nodeset_destroy(&created_mux_nodes);
}
- clear_irp_opcodes_generic_func();
+ ir_clear_opcodes_generic_func();
ir_register_softloat_lower_function(op_Call, lower_Call);
ir_register_softloat_lower_function(op_Const, lower_Const);
ir_register_softloat_lower_function(op_Div, lower_Div_mode);
size_t i, n;
/* set the default compute function */
- for (i = 0, n = get_irp_n_opcodes(); i < n; ++i) {
- ir_op *op = get_irp_opcode(i);
+ for (i = 0, n = ir_get_n_opcodes(); i < n; ++i) {
+ ir_op *op = ir_get_opcode(i);
op->ops.generic = (op_func)default_compute;
}
ir_op *op_{{node.name}}; ir_op *get_op_{{node.name}}(void) { return op_{{node.name}}; }
{%- endfor %}
-void init_op(void)
+static void generated_init_op(void)
{
- {% for node in nodes %}
+ {%- for node in nodes %}
op_{{node.name}} = new_ir_op(
{%- filter arguments %}
iro_{{node.name}}
ir_op_set_fragile_indices(op_{{node.name}}, pn_{{node.name}}_X_regular, pn_{{node.name}}_X_except);
{%- endif -%}
{%- endfor %}
-
- be_init_op();
}
-void finish_op(void)
+static void generated_finish_op(void)
{
- {% for node in nodes %}
+ {%- for node in nodes %}
free_ir_op(op_{{node.name}}); op_{{node.name}} = NULL;
{%- endfor %}
}