transform_to_Load(&tenv);
}
else if (be_is_Spill(node)) {
+ ir_node *spillval = get_irn_n(node, be_pos_Spill_val);
/* we always spill the whole register */
tenv.dbg = get_irn_dbg_info(node);
tenv.irn = node;
- tenv.mode = fix_spill_mode(cg, get_irn_mode(be_get_Spill_context(node)));
+ tenv.mode = fix_spill_mode(cg, get_irn_mode(spillval));
transform_to_Store(&tenv);
}
}
}
}
+/**
+ * Last touchups for the graph before emit
+ */
+static void ia32_finish(void *self) {
+ ia32_code_gen_t *cg = self;
+ ir_graph *irg = cg->irg;
+
+ ia32_finish_irg(irg, cg);
+ if (cg->dump)
+ be_dump(irg, "-finished", dump_ir_block_graph_sched);
+}
/**
* Emits the code, closes the output file and frees
ia32_code_gen_t *cg = self;
ir_graph *irg = cg->irg;
- ia32_finish_irg(irg, cg);
- if (cg->dump)
- be_dump(irg, "-finished", dump_ir_block_graph_sched);
ia32_gen_routine(cg->isa->out, irg, cg);
cur_reg_set = NULL;
/* de-allocate code generator */
del_set(cg->reg_set);
free(self);
-
}
static void *ia32_cg_init(const be_irg_t *birg);
ia32_before_sched, /* before scheduling hook */
ia32_before_ra, /* before register allocation hook */
ia32_after_ra, /* after register allocation hook */
+ ia32_finish, /* called before codegen */
ia32_codegen /* emit && done */
};