2 * This is the main ia32 firm backend driver.
17 #include "pseudo_irg.h"
21 #include "iredges_t.h"
29 #include "../beabi.h" /* the general register allocator interface */
30 #include "../benode_t.h"
31 #include "../belower.h"
32 #include "../besched_t.h"
34 #include "bearch_ia32_t.h"
36 #include "ia32_new_nodes.h" /* ia32 nodes interface */
37 #include "gen_ia32_regalloc_if.h" /* the generated interface (register type and class defenitions) */
38 #include "ia32_gen_decls.h" /* interface declaration emitter */
39 #include "ia32_transform.h"
40 #include "ia32_emitter.h"
41 #include "ia32_map_regs.h"
42 #include "ia32_optimize.h"
45 #define DEBUG_MODULE "firm.be.ia32.isa"
48 static set *cur_reg_set = NULL;
51 #define is_Start(irn) (get_irn_opcode(irn) == iro_Start)
53 ir_node *ia32_new_NoReg_gp(ia32_code_gen_t *cg) {
54 return be_abi_get_callee_save_irn(cg->birg->abi, &ia32_gp_regs[REG_GP_NOREG]);
57 ir_node *ia32_new_NoReg_fp(ia32_code_gen_t *cg) {
58 return be_abi_get_callee_save_irn(cg->birg->abi,
59 USE_SSE2(cg) ? &ia32_xmm_regs[REG_XMM_NOREG] : &ia32_vfp_regs[REG_VFP_NOREG]);
62 /**************************************************
65 * _ __ ___ __ _ __ _| | | ___ ___ _| |_
66 * | '__/ _ \/ _` | / _` | | |/ _ \ / __| | | _|
67 * | | | __/ (_| | | (_| | | | (_) | (__ | | |
68 * |_| \___|\__, | \__,_|_|_|\___/ \___| |_|_|
71 **************************************************/
73 static ir_node *my_skip_proj(const ir_node *n) {
81 * Return register requirements for an ia32 node.
82 * If the node returns a tuple (mode_T) then the proj's
83 * will be asked for this information.
85 static const arch_register_req_t *ia32_get_irn_reg_req(const void *self, arch_register_req_t *req, const ir_node *irn, int pos) {
86 const ia32_irn_ops_t *ops = self;
87 const ia32_register_req_t *irn_req;
88 long node_pos = pos == -1 ? 0 : pos;
89 ir_mode *mode = is_Block(irn) ? NULL : get_irn_mode(irn);
90 FIRM_DBG_REGISTER(firm_dbg_module_t *mod, DEBUG_MODULE);
92 if (is_Block(irn) || mode == mode_M || mode == mode_X) {
93 DBG((mod, LEVEL_1, "ignoring Block, mode_M, mode_X node %+F\n", irn));
97 if (mode == mode_T && pos < 0) {
98 DBG((mod, LEVEL_1, "ignoring request OUT requirements for node %+F\n", irn));
102 DBG((mod, LEVEL_1, "get requirements at pos %d for %+F ... ", pos, irn));
106 node_pos = ia32_translate_proj_pos(irn);
112 irn = my_skip_proj(irn);
114 DB((mod, LEVEL_1, "skipping Proj, going to %+F at pos %d ... ", irn, node_pos));
117 if (is_ia32_irn(irn)) {
119 irn_req = get_ia32_in_req(irn, pos);
122 irn_req = get_ia32_out_req(irn, node_pos);
125 DB((mod, LEVEL_1, "returning reqs for %+F at pos %d\n", irn, pos));
127 memcpy(req, &(irn_req->req), sizeof(*req));
129 if (arch_register_req_is(&(irn_req->req), should_be_same)) {
130 assert(irn_req->same_pos >= 0 && "should be same constraint for in -> out NYI");
131 req->other_same = get_irn_n(irn, irn_req->same_pos);
134 if (arch_register_req_is(&(irn_req->req), should_be_different)) {
135 assert(irn_req->different_pos >= 0 && "should be different constraint for in -> out NYI");
136 req->other_different = get_irn_n(irn, irn_req->different_pos);
140 /* treat Phi like Const with default requirements */
142 DB((mod, LEVEL_1, "returning standard reqs for %+F\n", irn));
143 if (mode_is_float(mode)) {
144 if (USE_SSE2(ops->cg))
145 memcpy(req, &(ia32_default_req_ia32_xmm.req), sizeof(*req));
147 memcpy(req, &(ia32_default_req_ia32_vfp.req), sizeof(*req));
149 else if (mode_is_int(mode) || mode_is_reference(mode))
150 memcpy(req, &(ia32_default_req_ia32_gp.req), sizeof(*req));
151 else if (mode == mode_T || mode == mode_M) {
152 DBG((mod, LEVEL_1, "ignoring Phi node %+F\n", irn));
156 assert(0 && "unsupported Phi-Mode");
159 DB((mod, LEVEL_1, "returning NULL for %+F (not ia32)\n", irn));
167 static void ia32_set_irn_reg(const void *self, ir_node *irn, const arch_register_t *reg) {
169 const ia32_irn_ops_t *ops = self;
171 if (get_irn_mode(irn) == mode_X) {
175 DBG((ops->cg->mod, LEVEL_1, "ia32 assigned register %s to node %+F\n", reg->name, irn));
178 pos = ia32_translate_proj_pos(irn);
179 irn = my_skip_proj(irn);
182 if (is_ia32_irn(irn)) {
183 const arch_register_t **slots;
185 slots = get_ia32_slots(irn);
189 ia32_set_firm_reg(irn, reg, cur_reg_set);
193 static const arch_register_t *ia32_get_irn_reg(const void *self, const ir_node *irn) {
195 const arch_register_t *reg = NULL;
199 if (get_irn_mode(irn) == mode_X) {
203 pos = ia32_translate_proj_pos(irn);
204 irn = my_skip_proj(irn);
207 if (is_ia32_irn(irn)) {
208 const arch_register_t **slots;
209 slots = get_ia32_slots(irn);
213 reg = ia32_get_firm_reg(irn, cur_reg_set);
219 static arch_irn_class_t ia32_classify(const void *self, const ir_node *irn) {
220 irn = my_skip_proj(irn);
222 return arch_irn_class_branch;
223 else if (is_ia32_irn(irn))
224 return arch_irn_class_normal;
229 static arch_irn_flags_t ia32_get_flags(const void *self, const ir_node *irn) {
230 irn = my_skip_proj(irn);
231 if (is_ia32_irn(irn))
232 return get_ia32_flags(irn);
238 static entity *ia32_get_frame_entity(const void *self, const ir_node *irn) {
239 return is_ia32_irn(irn) ? get_ia32_frame_ent(irn) : NULL;
242 static void ia32_set_stack_bias(const void *self, ir_node *irn, int bias) {
244 const ia32_irn_ops_t *ops = self;
246 if (get_ia32_frame_ent(irn)) {
247 ia32_am_flavour_t am_flav = get_ia32_am_flavour(irn);
249 DBG((ops->cg->mod, LEVEL_1, "stack biased %+F with %d\n", irn, bias));
250 snprintf(buf, sizeof(buf), "%d", bias);
252 if (get_ia32_op_type(irn) == ia32_Normal) {
253 set_ia32_cnst(irn, buf);
256 add_ia32_am_offs(irn, buf);
258 set_ia32_am_flavour(irn, am_flav);
264 be_abi_call_flags_bits_t flags;
265 const arch_isa_t *isa;
269 static void *ia32_abi_init(const be_abi_call_t *call, const arch_env_t *aenv, ir_graph *irg)
271 ia32_abi_env_t *env = xmalloc(sizeof(env[0]));
272 be_abi_call_flags_t fl = be_abi_call_get_flags(call);
273 env->flags = fl.bits;
275 env->isa = aenv->isa;
279 static void ia32_abi_dont_save_regs(void *self, pset *s)
281 ia32_abi_env_t *env = self;
282 if(env->flags.try_omit_fp)
283 pset_insert_ptr(s, env->isa->bp);
286 static const arch_register_t *ia32_abi_prologue(void *self, ir_node **mem, pmap *reg_map)
288 ia32_abi_env_t *env = self;
289 const arch_register_t *frame_reg = env->isa->sp;
291 if(!env->flags.try_omit_fp) {
292 int reg_size = get_mode_size_bytes(env->isa->bp->reg_class->mode);
293 ir_node *bl = get_irg_start_block(env->irg);
294 ir_node *curr_sp = be_abi_reg_map_get(reg_map, env->isa->sp);
295 ir_node *curr_bp = be_abi_reg_map_get(reg_map, env->isa->bp);
296 ir_node *curr_no_reg = be_abi_reg_map_get(reg_map, &ia32_gp_regs[REG_GP_NOREG]);
299 curr_sp = be_new_IncSP(env->isa->sp, env->irg, bl, curr_sp, *mem, reg_size, be_stack_dir_expand);
300 store_bp = new_rd_ia32_Store(NULL, env->irg, bl, curr_sp, curr_no_reg, curr_bp, *mem, mode_T);
301 set_ia32_am_support(store_bp, ia32_am_Dest);
302 set_ia32_am_flavour(store_bp, ia32_B);
303 set_ia32_op_type(store_bp, ia32_AddrModeD);
304 *mem = new_r_Proj(env->irg, bl, store_bp, mode_M, 0);
305 curr_bp = be_new_Copy(env->isa->bp->reg_class, env->irg, bl, curr_sp);
306 be_set_constr_single_reg(curr_bp, BE_OUT_POS(0), env->isa->bp);
307 be_node_set_flags(curr_bp, BE_OUT_POS(0), arch_irn_flags_ignore);
309 be_abi_reg_map_set(reg_map, env->isa->sp, curr_sp);
310 be_abi_reg_map_set(reg_map, env->isa->bp, curr_bp);
316 static void ia32_abi_epilogue(void *self, ir_node *bl, ir_node **mem, pmap *reg_map)
318 ia32_abi_env_t *env = self;
319 ir_node *curr_sp = be_abi_reg_map_get(reg_map, env->isa->sp);
320 ir_node *curr_bp = be_abi_reg_map_get(reg_map, env->isa->bp);
321 ir_node *curr_no_reg = be_abi_reg_map_get(reg_map, &ia32_gp_regs[REG_GP_NOREG]);
323 if(env->flags.try_omit_fp) {
324 curr_sp = be_new_IncSP(env->isa->sp, env->irg, bl, curr_sp, *mem, BE_STACK_FRAME_SIZE, be_stack_dir_shrink);
329 ir_mode *mode_bp = env->isa->bp->reg_class->mode;
331 curr_sp = be_new_SetSP(env->isa->sp, env->irg, bl, curr_sp, curr_bp, *mem);
332 load_bp = new_rd_ia32_Load(NULL, env->irg, bl, curr_sp, curr_no_reg, *mem, mode_T);
333 set_ia32_am_support(load_bp, ia32_am_Source);
334 set_ia32_am_flavour(load_bp, ia32_B);
335 set_ia32_op_type(load_bp, ia32_AddrModeS);
336 set_ia32_ls_mode(load_bp, mode_bp);
337 curr_bp = new_r_Proj(env->irg, bl, load_bp, mode_bp, 0);
338 *mem = new_r_Proj(env->irg, bl, load_bp, mode_M, 1);
341 be_abi_reg_map_set(reg_map, env->isa->sp, curr_sp);
342 be_abi_reg_map_set(reg_map, env->isa->bp, curr_bp);
346 * Produces the type which sits between the stack args and the locals on the stack.
347 * it will contain the return address and space to store the old base pointer.
348 * @return The Firm type modeling the ABI between type.
350 static ir_type *ia32_abi_get_between_type(void *self)
352 static ir_type *omit_fp_between_type = NULL;
353 static ir_type *between_type = NULL;
355 ia32_abi_env_t *env = self;
359 entity *ret_addr_ent;
360 entity *omit_fp_ret_addr_ent;
362 ir_type *old_bp_type = new_type_primitive(new_id_from_str("bp"), mode_P);
363 ir_type *ret_addr_type = new_type_primitive(new_id_from_str("return_addr"), mode_P);
365 between_type = new_type_class(new_id_from_str("ia32_between_type"));
366 old_bp_ent = new_entity(between_type, new_id_from_str("old_bp"), old_bp_type);
367 ret_addr_ent = new_entity(between_type, new_id_from_str("ret_addr"), ret_addr_type);
369 set_entity_offset_bytes(old_bp_ent, 0);
370 set_entity_offset_bytes(ret_addr_ent, get_type_size_bytes(old_bp_type));
371 set_type_size_bytes(between_type, get_type_size_bytes(old_bp_type) + get_type_size_bytes(ret_addr_type));
373 omit_fp_between_type = new_type_class(new_id_from_str("ia32_between_type_omit_fp"));
374 omit_fp_ret_addr_ent = new_entity(omit_fp_between_type, new_id_from_str("ret_addr"), ret_addr_type);
376 set_entity_offset_bytes(omit_fp_ret_addr_ent, 0);
377 set_type_size_bytes(omit_fp_between_type, get_type_size_bytes(ret_addr_type));
380 return env->flags.try_omit_fp ? omit_fp_between_type : between_type;
383 static const be_abi_callbacks_t ia32_abi_callbacks = {
386 ia32_abi_get_between_type,
387 ia32_abi_dont_save_regs,
392 /* fill register allocator interface */
394 static const arch_irn_ops_if_t ia32_irn_ops_if = {
395 ia32_get_irn_reg_req,
400 ia32_get_frame_entity,
404 ia32_irn_ops_t ia32_irn_ops = {
411 /**************************************************
414 * ___ ___ __| | ___ __ _ ___ _ __ _| |_
415 * / __/ _ \ / _` |/ _ \/ _` |/ _ \ '_ \ | | _|
416 * | (_| (_) | (_| | __/ (_| | __/ | | | | | |
417 * \___\___/ \__,_|\___|\__, |\___|_| |_| |_|_|
420 **************************************************/
423 * Transforms the standard firm graph into
426 static void ia32_prepare_graph(void *self) {
427 ia32_code_gen_t *cg = self;
428 DEBUG_ONLY(firm_dbg_module_t *old_mod = cg->mod;)
430 FIRM_DBG_REGISTER(cg->mod, "firm.be.ia32.transform");
431 irg_walk_blkwise_graph(cg->irg, ia32_place_consts_set_modes, ia32_transform_node, cg);
432 be_dump(cg->irg, "-transformed", dump_ir_block_graph_sched);
434 DEBUG_ONLY(cg->mod = old_mod;)
437 edges_deactivate(cg->irg);
438 //dead_node_elimination(cg->irg);
439 edges_activate(cg->irg);
441 irg_walk_blkwise_graph(cg->irg, NULL, ia32_optimize_am, cg);
442 be_dump(cg->irg, "-am", dump_ir_block_graph_sched);
448 * Insert copies for all ia32 nodes where the should_be_same requirement
450 * Transform Sub into Neg -- Add if IN2 == OUT
452 static void ia32_finish_irg_walker(ir_node *irn, void *env) {
453 ia32_code_gen_t *cg = env;
454 const ia32_register_req_t **reqs;
455 const arch_register_t *out_reg, *in_reg, *in2_reg;
457 ir_node *copy, *in_node, *block, *in2_node;
458 ia32_op_type_t op_tp;
460 if (is_ia32_irn(irn)) {
461 /* AM Dest nodes don't produce any values */
462 op_tp = get_ia32_op_type(irn);
463 if (op_tp == ia32_AddrModeD)
466 reqs = get_ia32_out_req_all(irn);
467 n_res = get_ia32_n_res(irn);
468 block = get_nodes_block(irn);
470 /* check all OUT requirements, if there is a should_be_same */
471 if (op_tp == ia32_Normal) {
472 for (i = 0; i < n_res; i++) {
473 if (arch_register_req_is(&(reqs[i]->req), should_be_same)) {
474 /* get in and out register */
475 out_reg = get_ia32_out_reg(irn, i);
476 in_node = get_irn_n(irn, reqs[i]->same_pos);
477 in_reg = arch_get_irn_register(cg->arch_env, in_node);
478 in2_node = get_irn_n(irn, reqs[i]->same_pos ^ 1);
479 in2_reg = arch_get_irn_register(cg->arch_env, in2_node);
481 /* don't copy ignore nodes */
482 if (arch_irn_is(cg->arch_env, in_node, ignore) && is_Proj(in_node))
485 /* check if in and out register are equal */
486 if (! REGS_ARE_EQUAL(out_reg, in_reg)) {
487 /* in case of a commutative op: just exchange the in's */
488 if (is_ia32_commutative(irn) && REGS_ARE_EQUAL(out_reg, in2_reg)) {
489 set_irn_n(irn, reqs[i]->same_pos, in2_node);
490 set_irn_n(irn, reqs[i]->same_pos ^ 1, in_node);
493 DBG((cg->mod, LEVEL_1, "inserting copy for %+F in_pos %d\n", irn, reqs[i]->same_pos));
494 /* create copy from in register */
495 copy = be_new_Copy(arch_register_get_class(in_reg), cg->irg, block, in_node);
497 /* destination is the out register */
498 arch_set_irn_register(cg->arch_env, copy, out_reg);
500 /* insert copy before the node into the schedule */
501 sched_add_before(irn, copy);
504 set_irn_n(irn, reqs[i]->same_pos, copy);
511 /* If we have a CondJmp with immediate, we need to */
512 /* check if it's the right operand, otherwise we have */
513 /* to change it, as CMP doesn't support immediate as */
515 if (is_ia32_CondJmp(irn) && (is_ia32_ImmConst(irn) || is_ia32_ImmSymConst(irn)) && op_tp == ia32_AddrModeS) {
516 long pnc = get_negated_pnc(get_ia32_pncode(irn), get_ia32_res_mode(irn));
517 set_ia32_op_type(irn, ia32_AddrModeD);
518 set_ia32_pncode(irn, pnc);
521 /* check if there is a sub which need to be transformed */
522 ia32_transform_sub_to_neg_add(irn, cg);
524 /* transform a LEA into an Add if possible */
525 ia32_transform_lea_to_add(irn, cg);
528 /* check for peephole optimization */
529 ia32_peephole_optimization(irn, cg);
533 * Add Copy nodes for not fulfilled should_be_equal constraints
535 static void ia32_finish_irg(ir_graph *irg, ia32_code_gen_t *cg) {
536 irg_walk_blkwise_graph(irg, NULL, ia32_finish_irg_walker, cg);
542 * Dummy functions for hooks we don't need but which must be filled.
544 static void ia32_before_sched(void *self) {
548 * Called before the register allocator.
549 * Calculate a block schedule here. We need it for the x87
550 * simulator and the emitter.
552 static void ia32_before_ra(void *self) {
553 ia32_code_gen_t *cg = self;
555 cg->blk_sched = sched_create_block_schedule(cg->irg);
560 * Transforms a be node into a Load.
562 static void transform_to_Load(ia32_transform_env_t *env) {
563 ir_node *irn = env->irn;
564 entity *ent = be_get_frame_entity(irn);
565 ir_mode *mode = env->mode;
566 ir_node *noreg = ia32_new_NoReg_gp(env->cg);
567 ir_node *nomem = new_rd_NoMem(env->irg);
568 ir_node *sched_point = NULL;
569 ir_node *ptr = get_irn_n(irn, 0);
570 ir_node *mem = be_is_Reload(irn) ? get_irn_n(irn, 1) : nomem;
571 ir_node *new_op, *proj;
572 const arch_register_t *reg;
574 if (sched_is_scheduled(irn)) {
575 sched_point = sched_prev(irn);
578 if (mode_is_float(mode)) {
579 if (USE_SSE2(env->cg))
580 new_op = new_rd_ia32_fLoad(env->dbg, env->irg, env->block, ptr, noreg, mem, mode_T);
582 new_op = new_rd_ia32_vfld(env->dbg, env->irg, env->block, ptr, noreg, mem, mode_T);
585 new_op = new_rd_ia32_Load(env->dbg, env->irg, env->block, ptr, noreg, mem, mode_T);
588 set_ia32_am_support(new_op, ia32_am_Source);
589 set_ia32_op_type(new_op, ia32_AddrModeS);
590 set_ia32_am_flavour(new_op, ia32_B);
591 set_ia32_ls_mode(new_op, mode);
592 set_ia32_frame_ent(new_op, ent);
593 set_ia32_use_frame(new_op);
595 proj = new_rd_Proj(env->dbg, env->irg, env->block, new_op, mode, pn_Load_res);
598 sched_add_after(sched_point, new_op);
599 sched_add_after(new_op, proj);
604 /* copy the register from the old node to the new Load */
605 reg = arch_get_irn_register(env->cg->arch_env, irn);
606 arch_set_irn_register(env->cg->arch_env, new_op, reg);
608 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, new_op));
614 * Transforms a be node into a Store.
616 static void transform_to_Store(ia32_transform_env_t *env) {
617 ir_node *irn = env->irn;
618 entity *ent = be_get_frame_entity(irn);
619 ir_mode *mode = env->mode;
620 ir_node *noreg = ia32_new_NoReg_gp(env->cg);
621 ir_node *nomem = new_rd_NoMem(env->irg);
622 ir_node *ptr = get_irn_n(irn, 0);
623 ir_node *val = get_irn_n(irn, 1);
624 ir_node *new_op, *proj;
625 ir_node *sched_point = NULL;
627 if (sched_is_scheduled(irn)) {
628 sched_point = sched_prev(irn);
631 if (mode_is_float(mode)) {
632 if (USE_SSE2(env->cg))
633 new_op = new_rd_ia32_fStore(env->dbg, env->irg, env->block, ptr, noreg, val, nomem, mode_T);
635 new_op = new_rd_ia32_vfst(env->dbg, env->irg, env->block, ptr, noreg, val, nomem, mode_T);
637 else if (get_mode_size_bits(mode) == 8) {
638 new_op = new_rd_ia32_Store8Bit(env->dbg, env->irg, env->block, ptr, noreg, val, nomem, mode_T);
641 new_op = new_rd_ia32_Store(env->dbg, env->irg, env->block, ptr, noreg, val, nomem, mode_T);
644 set_ia32_am_support(new_op, ia32_am_Dest);
645 set_ia32_op_type(new_op, ia32_AddrModeD);
646 set_ia32_am_flavour(new_op, ia32_B);
647 set_ia32_ls_mode(new_op, mode);
648 set_ia32_frame_ent(new_op, ent);
649 set_ia32_use_frame(new_op);
651 proj = new_rd_Proj(env->dbg, env->irg, env->block, new_op, mode_M, 0);
654 sched_add_after(sched_point, new_op);
655 sched_add_after(new_op, proj);
660 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, new_op));
666 * Fix the mode of Spill/Reload
668 static ir_mode *fix_spill_mode(ia32_code_gen_t *cg, ir_mode *mode)
670 if (mode_is_float(mode)) {
682 * Block-Walker: Calls the transform functions Spill and Reload.
684 static void ia32_after_ra_walker(ir_node *block, void *env) {
685 ir_node *node, *prev;
686 ia32_code_gen_t *cg = env;
687 ia32_transform_env_t tenv;
690 tenv.irg = current_ir_graph;
692 DEBUG_ONLY(tenv.mod = cg->mod;)
694 /* beware: the schedule is changed here */
695 for (node = sched_last(block); !sched_is_begin(node); node = prev) {
696 prev = sched_prev(node);
697 if (be_is_Reload(node)) {
698 /* we always reload the whole register */
699 tenv.dbg = get_irn_dbg_info(node);
701 tenv.mode = fix_spill_mode(cg, get_irn_mode(node));
702 transform_to_Load(&tenv);
704 else if (be_is_Spill(node)) {
705 /* we always spill the whole register */
706 tenv.dbg = get_irn_dbg_info(node);
708 tenv.mode = fix_spill_mode(cg, get_irn_mode(be_get_Spill_context(node)));
709 transform_to_Store(&tenv);
715 * We transform Spill and Reload here. This needs to be done before
716 * stack biasing otherwise we would miss the corrected offset for these nodes.
718 * If x87 instruction should be emitted, run the x87 simulator and patch
719 * the virtual instructions. This must obviously be done after register allocation.
721 static void ia32_after_ra(void *self) {
722 ia32_code_gen_t *cg = self;
723 irg_block_walk_graph(cg->irg, NULL, ia32_after_ra_walker, self);
725 /* if we do x87 code generation, rewrite all the virtual instructions and registers */
727 x87_simulate_graph(cg->arch_env, cg->irg, cg->blk_sched);
733 * Emits the code, closes the output file and frees
734 * the code generator interface.
736 static void ia32_codegen(void *self) {
737 ia32_code_gen_t *cg = self;
738 ir_graph *irg = cg->irg;
741 if (cg->emit_decls) {
742 ia32_gen_decls(cg->out);
746 ia32_finish_irg(irg, cg);
747 be_dump(irg, "-finished", dump_ir_block_graph_sched);
748 ia32_gen_routine(out, irg, cg);
752 pmap_destroy(cg->tv_ent);
753 pmap_destroy(cg->types);
755 /* de-allocate code generator */
756 del_set(cg->reg_set);
760 static void *ia32_cg_init(FILE *F, const be_irg_t *birg);
762 static const arch_code_generator_if_t ia32_code_gen_if = {
764 NULL, /* before abi introduce hook */
766 ia32_before_sched, /* before scheduling hook */
767 ia32_before_ra, /* before register allocation hook */
768 ia32_after_ra, /* after register allocation hook */
769 ia32_codegen /* emit && done */
773 * Initializes the code generator.
775 static void *ia32_cg_init(FILE *F, const be_irg_t *birg) {
776 ia32_isa_t *isa = (ia32_isa_t *)birg->main_env->arch_env->isa;
777 ia32_code_gen_t *cg = xcalloc(1, sizeof(*cg));
779 cg->impl = &ia32_code_gen_if;
781 cg->reg_set = new_set(ia32_cmp_irn_reg_assoc, 1024);
783 cg->arch_env = birg->main_env->arch_env;
784 cg->types = pmap_create();
785 cg->tv_ent = pmap_create();
787 cg->blk_sched = NULL;
788 cg->fp_kind = isa->fp_kind;
791 FIRM_DBG_REGISTER(cg->mod, "firm.be.ia32.cg");
793 /* set optimizations */
796 cg->opt.placecnst = 1;
801 if (isa->name_obst_size) {
802 //printf("freed %d bytes from name obst\n", isa->name_obst_size);
803 isa->name_obst_size = 0;
804 obstack_free(isa->name_obst, NULL);
805 obstack_init(isa->name_obst);
811 if (isa->num_codegens > 1)
816 cur_reg_set = cg->reg_set;
818 ia32_irn_ops.cg = cg;
820 return (arch_code_generator_t *)cg;
825 /*****************************************************************
826 * ____ _ _ _____ _____
827 * | _ \ | | | | |_ _|/ ____| /\
828 * | |_) | __ _ ___| | _____ _ __ __| | | | | (___ / \
829 * | _ < / _` |/ __| |/ / _ \ '_ \ / _` | | | \___ \ / /\ \
830 * | |_) | (_| | (__| < __/ | | | (_| | _| |_ ____) / ____ \
831 * |____/ \__,_|\___|_|\_\___|_| |_|\__,_| |_____|_____/_/ \_\
833 *****************************************************************/
835 static ia32_isa_t ia32_isa_template = {
836 &ia32_isa_if, /* isa interface implementation */
837 &ia32_gp_regs[REG_ESP], /* stack pointer register */
838 &ia32_gp_regs[REG_EBP], /* base pointer register */
839 -1, /* stack direction */
840 0, /* number of code generator objects so far */
841 NULL, /* 16bit register names */
842 NULL, /* 8bit register names */
843 fp_sse2, /* use SSE2 unit for fp operations */
845 NULL, /* name obstack */
846 0 /* name obst size */
851 * Initializes the backend ISA.
853 static void *ia32_init(void) {
854 static int inited = 0;
860 isa = xcalloc(1, sizeof(*isa));
861 memcpy(isa, &ia32_isa_template, sizeof(*isa));
863 ia32_register_init(isa);
864 ia32_create_opcodes();
865 ia32_register_copy_attr_func();
867 isa->regs_16bit = pmap_create();
868 isa->regs_8bit = pmap_create();
869 // isa->fp_kind = fp_x87;
871 ia32_build_16bit_reg_map(isa->regs_16bit);
872 ia32_build_8bit_reg_map(isa->regs_8bit);
875 isa->name_obst = xcalloc(1, sizeof(*(isa->name_obst)));
876 obstack_init(isa->name_obst);
877 isa->name_obst_size = 0;
888 * Closes the output file and frees the ISA structure.
890 static void ia32_done(void *self) {
891 ia32_isa_t *isa = self;
893 pmap_destroy(isa->regs_16bit);
894 pmap_destroy(isa->regs_8bit);
897 //printf("name obst size = %d bytes\n", isa->name_obst_size);
898 obstack_free(isa->name_obst, NULL);
906 * Return the number of register classes for this architecture.
907 * We report always these:
908 * - the general purpose registers
909 * - the floating point register set (depending on the unit used for FP)
910 * - MMX/SE registers (currently not supported)
912 static int ia32_get_n_reg_class(const void *self) {
917 * Return the register class for index i.
919 static const arch_register_class_t *ia32_get_reg_class(const void *self, int i) {
920 const ia32_isa_t *isa = self;
921 assert(i >= 0 && i < 2 && "Invalid ia32 register class requested.");
923 return &ia32_reg_classes[CLASS_ia32_gp];
924 return USE_SSE2(isa) ? &ia32_reg_classes[CLASS_ia32_xmm] : &ia32_reg_classes[CLASS_ia32_vfp];
928 * Get the register class which shall be used to store a value of a given mode.
929 * @param self The this pointer.
930 * @param mode The mode in question.
931 * @return A register class which can hold values of the given mode.
933 const arch_register_class_t *ia32_get_reg_class_for_mode(const void *self, const ir_mode *mode) {
934 const ia32_isa_t *isa = self;
935 if (mode_is_float(mode)) {
936 return USE_SSE2(isa) ? &ia32_reg_classes[CLASS_ia32_xmm] : &ia32_reg_classes[CLASS_ia32_vfp];
939 return &ia32_reg_classes[CLASS_ia32_gp];
943 * Get the ABI restrictions for procedure calls.
944 * @param self The this pointer.
945 * @param method_type The type of the method (procedure) in question.
946 * @param abi The abi object to be modified
948 static void ia32_get_call_abi(const void *self, ir_type *method_type, be_abi_call_t *abi) {
949 const ia32_isa_t *isa = self;
952 unsigned cc = get_method_calling_convention(method_type);
953 int n = get_method_n_params(method_type);
956 int i, ignore_1, ignore_2;
958 const arch_register_t *reg;
959 be_abi_call_flags_t call_flags;
961 /* set abi flags for calls */
962 call_flags.bits.left_to_right = 0;
963 call_flags.bits.store_args_sequential = 0;
964 call_flags.bits.try_omit_fp = 1;
965 call_flags.bits.fp_free = 0;
966 call_flags.bits.call_has_imm = 1;
968 /* set stack parameter passing style */
969 be_abi_call_set_flags(abi, call_flags, &ia32_abi_callbacks);
971 /* collect the mode for each type */
972 modes = alloca(n * sizeof(modes[0]));
974 for (i = 0; i < n; i++) {
975 tp = get_method_param_type(method_type, i);
976 modes[i] = get_type_mode(tp);
979 /* set register parameters */
980 if (cc & cc_reg_param) {
981 /* determine the number of parameters passed via registers */
982 biggest_n = ia32_get_n_regparam_class(n, modes, &ignore_1, &ignore_2);
984 /* loop over all parameters and set the register requirements */
985 for (i = 0; i <= biggest_n; i++) {
986 reg = ia32_get_RegParam_reg(n, modes, i, cc);
987 assert(reg && "kaputt");
988 be_abi_call_param_reg(abi, i, reg);
995 /* set stack parameters */
996 for (i = stack_idx; i < n; i++) {
997 be_abi_call_param_stack(abi, i, 1, 0, 0);
1001 /* set return registers */
1002 n = get_method_n_ress(method_type);
1004 assert(n <= 2 && "more than two results not supported");
1006 /* In case of 64bit returns, we will have two 32bit values */
1008 tp = get_method_res_type(method_type, 0);
1009 mode = get_type_mode(tp);
1011 assert(!mode_is_float(mode) && "two FP results not supported");
1013 tp = get_method_res_type(method_type, 1);
1014 mode = get_type_mode(tp);
1016 assert(!mode_is_float(mode) && "two FP results not supported");
1018 be_abi_call_res_reg(abi, 0, &ia32_gp_regs[REG_EAX]);
1019 be_abi_call_res_reg(abi, 1, &ia32_gp_regs[REG_EDX]);
1022 const arch_register_t *reg;
1024 tp = get_method_res_type(method_type, 0);
1025 assert(is_atomic_type(tp));
1026 mode = get_type_mode(tp);
1028 reg = mode_is_float(mode) ?
1029 (USE_SSE2(isa) ? &ia32_xmm_regs[REG_XMM0] : &ia32_vfp_regs[REG_VF0]) :
1030 &ia32_gp_regs[REG_EAX];
1032 be_abi_call_res_reg(abi, 0, reg);
1037 static const void *ia32_get_irn_ops(const arch_irn_handler_t *self, const ir_node *irn) {
1038 return &ia32_irn_ops;
1041 const arch_irn_handler_t ia32_irn_handler = {
1045 const arch_irn_handler_t *ia32_get_irn_handler(const void *self) {
1046 return &ia32_irn_handler;
1049 int ia32_to_appear_in_schedule(void *block_env, const ir_node *irn) {
1050 return is_ia32_irn(irn);
1054 * Initializes the code generator interface.
1056 static const arch_code_generator_if_t *ia32_get_code_generator_if(void *self) {
1057 return &ia32_code_gen_if;
1060 list_sched_selector_t ia32_sched_selector;
1063 * Returns the reg_pressure scheduler with to_appear_in_schedule() overloaded
1065 static const list_sched_selector_t *ia32_get_list_sched_selector(const void *self) {
1066 // memcpy(&ia32_sched_selector, reg_pressure_selector, sizeof(list_sched_selector_t));
1067 memcpy(&ia32_sched_selector, trivial_selector, sizeof(list_sched_selector_t));
1068 ia32_sched_selector.to_appear_in_schedule = ia32_to_appear_in_schedule;
1069 return &ia32_sched_selector;
1073 * Returns the necessary byte alignment for storing a register of given class.
1075 static int ia32_get_reg_class_alignment(const void *self, const arch_register_class_t *cls) {
1076 ir_mode *mode = arch_register_class_mode(cls);
1077 int bytes = get_mode_size_bytes(mode);
1079 if (mode_is_float(mode) && bytes > 8)
1085 static void ia32_register_options(lc_opt_entry_t *ent)
1088 #endif /* WITH_LIBCORE */
1090 const arch_isa_if_t ia32_isa_if = {
1092 ia32_register_options,
1096 ia32_get_n_reg_class,
1098 ia32_get_reg_class_for_mode,
1100 ia32_get_irn_handler,
1101 ia32_get_code_generator_if,
1102 ia32_get_list_sched_selector,
1103 ia32_get_reg_class_alignment