2 * This is the main ia32 firm backend driver.
3 * @author Christian Wuerdig
20 #include <libcore/lc_opts.h>
21 #include <libcore/lc_opts_enum.h>
22 #endif /* WITH_LIBCORE */
26 #include "pseudo_irg.h"
30 #include "iredges_t.h"
38 #include "../beabi.h" /* the general register allocator interface */
39 #include "../benode_t.h"
40 #include "../belower.h"
41 #include "../besched_t.h"
44 #include "../beirgmod.h"
45 #include "../be_dbgout.h"
46 #include "bearch_ia32_t.h"
48 #include "ia32_new_nodes.h" /* ia32 nodes interface */
49 #include "gen_ia32_regalloc_if.h" /* the generated interface (register type and class defenitions) */
50 #include "ia32_gen_decls.h" /* interface declaration emitter */
51 #include "ia32_transform.h"
52 #include "ia32_emitter.h"
53 #include "ia32_map_regs.h"
54 #include "ia32_optimize.h"
56 #include "ia32_dbg_stat.h"
57 #include "ia32_finish.h"
58 #include "ia32_util.h"
60 #define DEBUG_MODULE "firm.be.ia32.isa"
63 static set *cur_reg_set = NULL;
66 #define is_Start(irn) (get_irn_opcode(irn) == iro_Start)
68 /* Creates the unique per irg GP NoReg node. */
69 ir_node *ia32_new_NoReg_gp(ia32_code_gen_t *cg) {
70 return be_abi_get_callee_save_irn(cg->birg->abi, &ia32_gp_regs[REG_GP_NOREG]);
73 /* Creates the unique per irg FP NoReg node. */
74 ir_node *ia32_new_NoReg_fp(ia32_code_gen_t *cg) {
75 return be_abi_get_callee_save_irn(cg->birg->abi,
76 USE_SSE2(cg) ? &ia32_xmm_regs[REG_XMM_NOREG] : &ia32_vfp_regs[REG_VFP_NOREG]);
79 /**************************************************
82 * _ __ ___ __ _ __ _| | | ___ ___ _| |_
83 * | '__/ _ \/ _` | / _` | | |/ _ \ / __| | | _|
84 * | | | __/ (_| | | (_| | | | (_) | (__ | | |
85 * |_| \___|\__, | \__,_|_|_|\___/ \___| |_|_|
88 **************************************************/
90 static ir_node *my_skip_proj(const ir_node *n) {
98 * Return register requirements for an ia32 node.
99 * If the node returns a tuple (mode_T) then the proj's
100 * will be asked for this information.
102 static const arch_register_req_t *ia32_get_irn_reg_req(const void *self, arch_register_req_t *req, const ir_node *irn, int pos) {
103 const ia32_irn_ops_t *ops = self;
104 const ia32_register_req_t *irn_req;
105 long node_pos = pos == -1 ? 0 : pos;
106 ir_mode *mode = is_Block(irn) ? NULL : get_irn_mode(irn);
107 FIRM_DBG_REGISTER(firm_dbg_module_t *mod, DEBUG_MODULE);
109 if (is_Block(irn) || mode == mode_M || mode == mode_X) {
110 DBG((mod, LEVEL_1, "ignoring Block, mode_M, mode_X node %+F\n", irn));
114 if (mode == mode_T && pos < 0) {
115 DBG((mod, LEVEL_1, "ignoring request OUT requirements for node %+F\n", irn));
119 DBG((mod, LEVEL_1, "get requirements at pos %d for %+F ... ", pos, irn));
123 node_pos = ia32_translate_proj_pos(irn);
129 irn = my_skip_proj(irn);
131 DB((mod, LEVEL_1, "skipping Proj, going to %+F at pos %d ... ", irn, node_pos));
134 if (is_ia32_irn(irn)) {
136 irn_req = get_ia32_in_req(irn, pos);
139 irn_req = get_ia32_out_req(irn, node_pos);
142 DB((mod, LEVEL_1, "returning reqs for %+F at pos %d\n", irn, pos));
144 memcpy(req, &(irn_req->req), sizeof(*req));
146 if (arch_register_req_is(&(irn_req->req), should_be_same)) {
147 assert(irn_req->same_pos >= 0 && "should be same constraint for in -> out NYI");
148 req->other_same = get_irn_n(irn, irn_req->same_pos);
151 if (arch_register_req_is(&(irn_req->req), should_be_different)) {
152 assert(irn_req->different_pos >= 0 && "should be different constraint for in -> out NYI");
153 req->other_different = get_irn_n(irn, irn_req->different_pos);
157 /* treat Unknowns like Const with default requirements */
158 if (is_Unknown(irn)) {
159 DB((mod, LEVEL_1, "returning UKNWN reqs for %+F\n", irn));
160 if (mode_is_float(mode)) {
161 if (USE_SSE2(ops->cg))
162 memcpy(req, &(ia32_default_req_ia32_xmm_xmm_UKNWN), sizeof(*req));
164 memcpy(req, &(ia32_default_req_ia32_vfp_vfp_UKNWN), sizeof(*req));
166 else if (mode_is_int(mode) || mode_is_reference(mode))
167 memcpy(req, &(ia32_default_req_ia32_gp_gp_UKNWN), sizeof(*req));
168 else if (mode == mode_T || mode == mode_M) {
169 DBG((mod, LEVEL_1, "ignoring Unknown node %+F\n", irn));
173 assert(0 && "unsupported Unknown-Mode");
176 DB((mod, LEVEL_1, "returning NULL for %+F (not ia32)\n", irn));
184 static void ia32_set_irn_reg(const void *self, ir_node *irn, const arch_register_t *reg) {
186 const ia32_irn_ops_t *ops = self;
188 if (get_irn_mode(irn) == mode_X) {
192 DBG((ops->cg->mod, LEVEL_1, "ia32 assigned register %s to node %+F\n", reg->name, irn));
195 pos = ia32_translate_proj_pos(irn);
196 irn = my_skip_proj(irn);
199 if (is_ia32_irn(irn)) {
200 const arch_register_t **slots;
202 slots = get_ia32_slots(irn);
206 ia32_set_firm_reg(irn, reg, cur_reg_set);
210 static const arch_register_t *ia32_get_irn_reg(const void *self, const ir_node *irn) {
212 const arch_register_t *reg = NULL;
216 if (get_irn_mode(irn) == mode_X) {
220 pos = ia32_translate_proj_pos(irn);
221 irn = my_skip_proj(irn);
224 if (is_ia32_irn(irn)) {
225 const arch_register_t **slots;
226 slots = get_ia32_slots(irn);
230 reg = ia32_get_firm_reg(irn, cur_reg_set);
236 static arch_irn_class_t ia32_classify(const void *self, const ir_node *irn) {
237 arch_irn_class_t classification = arch_irn_class_normal;
239 irn = my_skip_proj(irn);
242 classification |= arch_irn_class_branch;
244 if (! is_ia32_irn(irn))
245 return classification & ~arch_irn_class_normal;
247 if (is_ia32_Cnst(irn))
248 classification |= arch_irn_class_const;
251 classification |= arch_irn_class_load;
253 if (is_ia32_St(irn) || is_ia32_Store8Bit(irn))
254 classification |= arch_irn_class_store;
256 if (is_ia32_got_reload(irn))
257 classification |= arch_irn_class_reload;
259 return classification;
262 static arch_irn_flags_t ia32_get_flags(const void *self, const ir_node *irn) {
265 ir_node *pred = get_Proj_pred(irn);
266 if(is_ia32_Push(pred) && get_Proj_proj(irn) == pn_ia32_Push_stack) {
267 /* Push modifies always ESP, this cannot be changed */
268 return arch_irn_flags_modify_sp | arch_irn_flags_ignore;
270 if(is_ia32_Pop(pred) && get_Proj_proj(irn) == pn_ia32_Pop_stack) {
271 return arch_irn_flags_modify_sp | arch_irn_flags_ignore;
273 if(is_ia32_AddSP(pred) && get_Proj_proj(irn) == pn_ia32_AddSP_stack) {
274 /* AddSP modifies always ESP, this cannot be changed */
275 return arch_irn_flags_modify_sp | arch_irn_flags_ignore;
277 if(is_ia32_SubSP(pred) && get_Proj_proj(irn) == pn_ia32_SubSP_stack) {
278 /* SubSP modifies always ESP, this cannot be changed */
279 return arch_irn_flags_modify_sp | arch_irn_flags_ignore;
283 irn = my_skip_proj(irn);
284 if (is_ia32_irn(irn))
285 return get_ia32_flags(irn);
288 return arch_irn_flags_ignore;
294 be_abi_call_flags_bits_t flags;
295 const arch_isa_t *isa;
296 const arch_env_t *aenv;
300 static void *ia32_abi_init(const be_abi_call_t *call, const arch_env_t *aenv, ir_graph *irg)
302 ia32_abi_env_t *env = xmalloc(sizeof(env[0]));
303 be_abi_call_flags_t fl = be_abi_call_get_flags(call);
304 env->flags = fl.bits;
307 env->isa = aenv->isa;
311 static entity *ia32_get_frame_entity(const void *self, const ir_node *irn) {
312 return is_ia32_irn(irn) ? get_ia32_frame_ent(irn) : NULL;
315 static void ia32_set_frame_entity(const void *self, ir_node *irn, entity *ent) {
316 set_ia32_frame_ent(irn, ent);
319 static void ia32_set_frame_offset(const void *self, ir_node *irn, int bias) {
321 const ia32_irn_ops_t *ops = self;
323 if (get_ia32_frame_ent(irn)) {
324 ia32_am_flavour_t am_flav = get_ia32_am_flavour(irn);
326 /* Pop nodes modify the stack pointer before calculating the destination
327 * address, so fix this here
329 if(is_ia32_Pop(irn)) {
330 ia32_abi_env_t *cb_env = get_abi_cb(ops->cg->birg->abi);
331 if (cb_env->flags.try_omit_fp)
335 DBG((ops->cg->mod, LEVEL_1, "stack biased %+F with %d\n", irn, bias));
337 snprintf(buf, sizeof(buf), "%d", bias);
339 if (get_ia32_op_type(irn) == ia32_Normal) {
340 set_ia32_cnst(irn, buf);
342 add_ia32_am_offs(irn, buf);
344 set_ia32_am_flavour(irn, am_flav);
349 static int ia32_get_sp_bias(const void *self, const ir_node *irn) {
351 int proj = get_Proj_proj(irn);
352 ir_node *pred = get_Proj_pred(irn);
354 if(is_ia32_Push(pred) && proj == 0)
356 if(is_ia32_Pop(pred) && proj == 1)
364 * Put all registers which are saved by the prologue/epilogue in a set.
366 * @param self The callback object.
367 * @param s The result set.
369 static void ia32_abi_dont_save_regs(void *self, pset *s)
371 ia32_abi_env_t *env = self;
372 if(env->flags.try_omit_fp)
373 pset_insert_ptr(s, env->isa->bp);
377 * Generate the routine prologue.
379 * @param self The callback object.
380 * @param mem A pointer to the mem node. Update this if you define new memory.
381 * @param reg_map A map mapping all callee_save/ignore/parameter registers to their defining nodes.
383 * @return The register which shall be used as a stack frame base.
385 * All nodes which define registers in @p reg_map must keep @p reg_map current.
387 static const arch_register_t *ia32_abi_prologue(void *self, ir_node **mem, pmap *reg_map)
389 ia32_abi_env_t *env = self;
391 if (! env->flags.try_omit_fp) {
392 ir_node *bl = get_irg_start_block(env->irg);
393 ir_node *curr_sp = be_abi_reg_map_get(reg_map, env->isa->sp);
394 ir_node *curr_bp = be_abi_reg_map_get(reg_map, env->isa->bp);
398 push = new_rd_ia32_Push(NULL, env->irg, bl, curr_sp, curr_bp, *mem);
399 curr_sp = new_r_Proj(env->irg, bl, push, get_irn_mode(curr_sp), pn_ia32_Push_stack);
400 *mem = new_r_Proj(env->irg, bl, push, mode_M, pn_ia32_Push_M);
402 /* the push must have SP out register */
403 arch_set_irn_register(env->aenv, curr_sp, env->isa->sp);
404 set_ia32_flags(push, arch_irn_flags_ignore);
406 /* move esp to ebp */
407 curr_bp = be_new_Copy(env->isa->bp->reg_class, env->irg, bl, curr_sp);
408 be_set_constr_single_reg(curr_bp, BE_OUT_POS(0), env->isa->bp);
409 arch_set_irn_register(env->aenv, curr_bp, env->isa->bp);
410 be_node_set_flags(curr_bp, BE_OUT_POS(0), arch_irn_flags_ignore);
412 /* beware: the copy must be done before any other sp use */
413 curr_sp = be_new_CopyKeep_single(env->isa->sp->reg_class, env->irg, bl, curr_sp, curr_bp, get_irn_mode(curr_sp));
414 be_set_constr_single_reg(curr_sp, BE_OUT_POS(0), env->isa->sp);
415 arch_set_irn_register(env->aenv, curr_sp, env->isa->sp);
416 be_node_set_flags(curr_sp, BE_OUT_POS(0), arch_irn_flags_ignore);
418 be_abi_reg_map_set(reg_map, env->isa->sp, curr_sp);
419 be_abi_reg_map_set(reg_map, env->isa->bp, curr_bp);
428 * Generate the routine epilogue.
429 * @param self The callback object.
430 * @param bl The block for the epilog
431 * @param mem A pointer to the mem node. Update this if you define new memory.
432 * @param reg_map A map mapping all callee_save/ignore/parameter registers to their defining nodes.
433 * @return The register which shall be used as a stack frame base.
435 * All nodes which define registers in @p reg_map must keep @p reg_map current.
437 static void ia32_abi_epilogue(void *self, ir_node *bl, ir_node **mem, pmap *reg_map)
439 ia32_abi_env_t *env = self;
440 ir_node *curr_sp = be_abi_reg_map_get(reg_map, env->isa->sp);
441 ir_node *curr_bp = be_abi_reg_map_get(reg_map, env->isa->bp);
443 if (env->flags.try_omit_fp) {
444 /* simply remove the stack frame here */
445 curr_sp = be_new_IncSP(env->isa->sp, env->irg, bl, curr_sp, BE_STACK_FRAME_SIZE_SHRINK);
446 add_irn_dep(curr_sp, *mem);
449 const ia32_isa_t *isa = (ia32_isa_t *)env->isa;
450 ir_mode *mode_bp = env->isa->bp->reg_class->mode;
452 /* gcc always emits a leave at the end of a routine */
453 if (1 || ARCH_AMD(isa->opt_arch)) {
457 leave = new_rd_ia32_Leave(NULL, env->irg, bl, curr_sp, curr_bp);
458 set_ia32_flags(leave, arch_irn_flags_ignore);
459 curr_bp = new_r_Proj(current_ir_graph, bl, leave, mode_bp, pn_ia32_Leave_frame);
460 curr_sp = new_r_Proj(current_ir_graph, bl, leave, get_irn_mode(curr_sp), pn_ia32_Leave_stack);
461 *mem = new_r_Proj(current_ir_graph, bl, leave, mode_M, pn_ia32_Leave_M);
466 /* copy ebp to esp */
467 curr_sp = be_new_SetSP(env->isa->sp, env->irg, bl, curr_sp, curr_bp, *mem);
470 pop = new_rd_ia32_Pop(NULL, env->irg, bl, curr_sp, *mem);
471 set_ia32_flags(pop, arch_irn_flags_ignore);
472 curr_bp = new_r_Proj(current_ir_graph, bl, pop, mode_bp, pn_ia32_Pop_res);
473 curr_sp = new_r_Proj(current_ir_graph, bl, pop, get_irn_mode(curr_sp), pn_ia32_Pop_stack);
474 *mem = new_r_Proj(current_ir_graph, bl, pop, mode_M, pn_ia32_Pop_M);
476 arch_set_irn_register(env->aenv, curr_sp, env->isa->sp);
477 arch_set_irn_register(env->aenv, curr_bp, env->isa->bp);
480 be_abi_reg_map_set(reg_map, env->isa->sp, curr_sp);
481 be_abi_reg_map_set(reg_map, env->isa->bp, curr_bp);
485 * Produces the type which sits between the stack args and the locals on the stack.
486 * it will contain the return address and space to store the old base pointer.
487 * @return The Firm type modeling the ABI between type.
489 static ir_type *ia32_abi_get_between_type(void *self)
491 #define IDENT(s) new_id_from_chars(s, sizeof(s)-1)
492 static ir_type *omit_fp_between_type = NULL;
493 static ir_type *between_type = NULL;
495 ia32_abi_env_t *env = self;
497 if ( !between_type) {
499 entity *ret_addr_ent;
500 entity *omit_fp_ret_addr_ent;
502 ir_type *old_bp_type = new_type_primitive(IDENT("bp"), mode_P);
503 ir_type *ret_addr_type = new_type_primitive(IDENT("return_addr"), mode_P);
505 between_type = new_type_struct(IDENT("ia32_between_type"));
506 old_bp_ent = new_entity(between_type, IDENT("old_bp"), old_bp_type);
507 ret_addr_ent = new_entity(between_type, IDENT("ret_addr"), ret_addr_type);
509 set_entity_offset_bytes(old_bp_ent, 0);
510 set_entity_offset_bytes(ret_addr_ent, get_type_size_bytes(old_bp_type));
511 set_type_size_bytes(between_type, get_type_size_bytes(old_bp_type) + get_type_size_bytes(ret_addr_type));
512 set_type_state(between_type, layout_fixed);
514 omit_fp_between_type = new_type_struct(IDENT("ia32_between_type_omit_fp"));
515 omit_fp_ret_addr_ent = new_entity(omit_fp_between_type, IDENT("ret_addr"), ret_addr_type);
517 set_entity_offset_bytes(omit_fp_ret_addr_ent, 0);
518 set_type_size_bytes(omit_fp_between_type, get_type_size_bytes(ret_addr_type));
519 set_type_state(omit_fp_between_type, layout_fixed);
522 return env->flags.try_omit_fp ? omit_fp_between_type : between_type;
527 * Get the estimated cycle count for @p irn.
529 * @param self The this pointer.
530 * @param irn The node.
532 * @return The estimated cycle count for this operation
534 static int ia32_get_op_estimated_cost(const void *self, const ir_node *irn)
537 ia32_op_type_t op_tp;
538 const ia32_irn_ops_t *ops = self;
543 assert(is_ia32_irn(irn));
545 cost = get_ia32_latency(irn);
546 op_tp = get_ia32_op_type(irn);
548 if (is_ia32_CopyB(irn)) {
550 if (ARCH_INTEL(ops->cg->arch))
553 else if (is_ia32_CopyB_i(irn)) {
554 int size = get_tarval_long(get_ia32_Immop_tarval(irn));
555 cost = 20 + (int)ceil((4/3) * size);
556 if (ARCH_INTEL(ops->cg->arch))
559 /* in case of address mode operations add additional cycles */
560 else if (op_tp == ia32_AddrModeD || op_tp == ia32_AddrModeS) {
562 In case of stack access add 5 cycles (we assume stack is in cache),
563 other memory operations cost 20 cycles.
565 cost += is_ia32_use_frame(irn) ? 5 : 20;
572 * Returns the inverse operation if @p irn, recalculating the argument at position @p i.
574 * @param irn The original operation
575 * @param i Index of the argument we want the inverse operation to yield
576 * @param inverse struct to be filled with the resulting inverse op
577 * @param obstack The obstack to use for allocation of the returned nodes array
578 * @return The inverse operation or NULL if operation invertible
580 static arch_inverse_t *ia32_get_inverse(const void *self, const ir_node *irn, int i, arch_inverse_t *inverse, struct obstack *obst) {
583 ir_node *block, *noreg, *nomem;
586 /* we cannot invert non-ia32 irns */
587 if (! is_ia32_irn(irn))
590 /* operand must always be a real operand (not base, index or mem) */
591 if (i != 2 && i != 3)
594 /* we don't invert address mode operations */
595 if (get_ia32_op_type(irn) != ia32_Normal)
598 irg = get_irn_irg(irn);
599 block = get_nodes_block(irn);
600 mode = get_ia32_res_mode(irn);
601 noreg = get_irn_n(irn, 0);
602 nomem = new_r_NoMem(irg);
604 /* initialize structure */
605 inverse->nodes = obstack_alloc(obst, 2 * sizeof(inverse->nodes[0]));
609 switch (get_ia32_irn_opcode(irn)) {
611 if (get_ia32_immop_type(irn) == ia32_ImmConst) {
612 /* we have an add with a const here */
613 /* invers == add with negated const */
614 inverse->nodes[0] = new_rd_ia32_Add(NULL, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem);
615 pnc = pn_ia32_Add_res;
617 copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
618 set_ia32_Immop_tarval(inverse->nodes[0], tarval_neg(get_ia32_Immop_tarval(irn)));
619 set_ia32_commutative(inverse->nodes[0]);
621 else if (get_ia32_immop_type(irn) == ia32_ImmSymConst) {
622 /* we have an add with a symconst here */
623 /* invers == sub with const */
624 inverse->nodes[0] = new_rd_ia32_Sub(NULL, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem);
625 pnc = pn_ia32_Sub_res;
627 copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
630 /* normal add: inverse == sub */
631 ir_node *proj = ia32_get_res_proj(irn);
634 inverse->nodes[0] = new_rd_ia32_Sub(NULL, irg, block, noreg, noreg, proj, get_irn_n(irn, i ^ 1), nomem);
635 pnc = pn_ia32_Sub_res;
640 if (get_ia32_immop_type(irn) != ia32_ImmNone) {
641 /* we have a sub with a const/symconst here */
642 /* invers == add with this const */
643 inverse->nodes[0] = new_rd_ia32_Add(NULL, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem);
644 pnc = pn_ia32_Add_res;
645 inverse->costs += (get_ia32_immop_type(irn) == ia32_ImmSymConst) ? 5 : 1;
646 copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
650 ir_node *proj = ia32_get_res_proj(irn);
654 inverse->nodes[0] = new_rd_ia32_Add(NULL, irg, block, noreg, noreg, proj, get_irn_n(irn, 3), nomem);
657 inverse->nodes[0] = new_rd_ia32_Sub(NULL, irg, block, noreg, noreg, get_irn_n(irn, 2), proj, nomem);
659 pnc = pn_ia32_Sub_res;
664 if (get_ia32_immop_type(irn) != ia32_ImmNone) {
665 /* xor with const: inverse = xor */
666 inverse->nodes[0] = new_rd_ia32_Eor(NULL, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem);
667 pnc = pn_ia32_Eor_res;
668 inverse->costs += (get_ia32_immop_type(irn) == ia32_ImmSymConst) ? 5 : 1;
669 copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
673 inverse->nodes[0] = new_rd_ia32_Eor(NULL, irg, block, noreg, noreg, (ir_node *)irn, get_irn_n(irn, i), nomem);
674 pnc = pn_ia32_Eor_res;
679 ir_node *proj = ia32_get_res_proj(irn);
682 inverse->nodes[0] = new_rd_ia32_Not(NULL, irg, block, noreg, noreg, proj, nomem);
683 pnc = pn_ia32_Not_res;
687 case iro_ia32_Minus: {
688 ir_node *proj = ia32_get_res_proj(irn);
691 inverse->nodes[0] = new_rd_ia32_Minus(NULL, irg, block, noreg, noreg, proj, nomem);
692 pnc = pn_ia32_Minus_res;
697 /* inverse operation not supported */
701 set_ia32_res_mode(inverse->nodes[0], mode);
702 inverse->nodes[1] = new_r_Proj(irg, block, inverse->nodes[0], mode, pnc);
708 * Check if irn can load it's operand at position i from memory (source addressmode).
709 * @param self Pointer to irn ops itself
710 * @param irn The irn to be checked
711 * @param i The operands position
712 * @return Non-Zero if operand can be loaded
714 static int ia32_possible_memory_operand(const void *self, const ir_node *irn, unsigned int i) {
715 if (! is_ia32_irn(irn) || /* must be an ia32 irn */
716 get_irn_arity(irn) != 5 || /* must be a binary operation */
717 get_ia32_op_type(irn) != ia32_Normal || /* must not already be a addressmode irn */
718 ! (get_ia32_am_support(irn) & ia32_am_Source) || /* must be capable of source addressmode */
719 (i != 2 && i != 3) || /* a "real" operand position must be requested */
720 (i == 2 && ! is_ia32_commutative(irn)) || /* if first operand requested irn must be commutative */
721 is_ia32_use_frame(irn)) /* must not already use frame */
727 static void ia32_perform_memory_operand(const void *self, ir_node *irn, ir_node *spill, unsigned int i) {
728 assert(ia32_possible_memory_operand(self, irn, i) && "Cannot perform memory operand change");
731 ir_node *tmp = get_irn_n(irn, 3);
732 set_irn_n(irn, 3, get_irn_n(irn, 2));
733 set_irn_n(irn, 2, tmp);
736 set_ia32_am_support(irn, ia32_am_Source);
737 set_ia32_op_type(irn, ia32_AddrModeS);
738 set_ia32_am_flavour(irn, ia32_B);
739 set_ia32_ls_mode(irn, get_irn_mode(get_irn_n(irn, i)));
740 //TODO this will fail, if spill is a PhiM (give PhiMs entities?)
741 set_ia32_frame_ent(irn, be_get_frame_entity(spill));
742 set_ia32_use_frame(irn);
743 set_ia32_got_reload(irn);
745 set_irn_n(irn, 0, get_irg_frame(get_irn_irg(irn)));
746 set_irn_n(irn, 4, spill);
749 Input at position one is index register, which is NoReg.
750 We would need cg object to get a real noreg, but we cannot
753 set_irn_n(irn, 3, get_irn_n(irn, 1));
755 //FIXME DBG_OPT_AM_S(reload, irn);
758 static const be_abi_callbacks_t ia32_abi_callbacks = {
761 ia32_abi_get_between_type,
762 ia32_abi_dont_save_regs,
767 /* fill register allocator interface */
769 static const arch_irn_ops_if_t ia32_irn_ops_if = {
770 ia32_get_irn_reg_req,
775 ia32_get_frame_entity,
776 ia32_set_frame_entity,
777 ia32_set_frame_offset,
780 ia32_get_op_estimated_cost,
781 ia32_possible_memory_operand,
782 ia32_perform_memory_operand,
785 ia32_irn_ops_t ia32_irn_ops = {
792 /**************************************************
795 * ___ ___ __| | ___ __ _ ___ _ __ _| |_
796 * / __/ _ \ / _` |/ _ \/ _` |/ _ \ '_ \ | | _|
797 * | (_| (_) | (_| | __/ (_| | __/ | | | | | |
798 * \___\___/ \__,_|\___|\__, |\___|_| |_| |_|_|
801 **************************************************/
803 static void ia32_kill_convs(ia32_code_gen_t *cg) {
806 /* BEWARE: the Projs are inserted in the set */
807 foreach_nodeset(cg->kill_conv, irn) {
808 ir_node *in = get_irn_n(get_Proj_pred(irn), 2);
809 edges_reroute(irn, in, cg->birg->irg);
814 * Transform the Thread Local Store base.
816 static void transform_tls(ir_graph *irg) {
817 ir_node *irn = get_irg_tls(irg);
820 dbg_info *dbg = get_irn_dbg_info(irn);
821 ir_node *blk = get_nodes_block(irn);
823 newn = new_rd_ia32_LdTls(dbg, irg, blk, get_irn_mode(irn));
830 * Transforms the standard firm graph into
833 static void ia32_prepare_graph(void *self) {
834 ia32_code_gen_t *cg = self;
835 dom_front_info_t *dom;
836 DEBUG_ONLY(firm_dbg_module_t *old_mod = cg->mod;)
838 FIRM_DBG_REGISTER(cg->mod, "firm.be.ia32.transform");
840 /* 1st: transform constants and psi condition trees */
841 ia32_pre_transform_phase(cg);
843 /* 2nd: transform all remaining nodes */
844 ia32_register_transformers();
845 dom = be_compute_dominance_frontiers(cg->irg);
847 cg->kill_conv = new_nodeset(5);
848 transform_tls(cg->irg);
849 irg_walk_blkwise_graph(cg->irg, NULL, ia32_transform_node, cg);
851 del_nodeset(cg->kill_conv);
853 be_free_dominance_frontiers(dom);
856 be_dump(cg->irg, "-transformed", dump_ir_block_graph_sched);
858 /* 3rd: optimize address mode */
859 FIRM_DBG_REGISTER(cg->mod, "firm.be.ia32.am");
860 ia32_optimize_addressmode(cg);
863 be_dump(cg->irg, "-am", dump_ir_block_graph_sched);
865 DEBUG_ONLY(cg->mod = old_mod;)
869 * Dummy functions for hooks we don't need but which must be filled.
871 static void ia32_before_sched(void *self) {
874 static void remove_unused_nodes(ir_node *irn, bitset_t *already_visited) {
877 ir_node *mem_proj = NULL;
882 mode = get_irn_mode(irn);
884 /* check if we already saw this node or the node has more than one user */
885 if (bitset_contains_irn(already_visited, irn) || get_irn_n_edges(irn) > 1) {
889 /* mark irn visited */
890 bitset_add_irn(already_visited, irn);
892 /* non-Tuple nodes with one user: ok, return */
893 if (get_irn_n_edges(irn) >= 1 && mode != mode_T) {
897 /* tuple node has one user which is not the mem proj-> ok */
898 if (mode == mode_T && get_irn_n_edges(irn) == 1) {
899 mem_proj = ia32_get_proj_for_mode(irn, mode_M);
900 if (mem_proj == NULL) {
905 arity = get_irn_arity(irn);
906 for (i = 0; i < arity; ++i) {
907 ir_node *pred = get_irn_n(irn, i);
909 /* do not follow memory edges or we will accidentally remove stores */
910 if (get_irn_mode(pred) == mode_M) {
911 if(mem_proj != NULL) {
912 edges_reroute(mem_proj, pred, get_irn_irg(mem_proj));
918 set_irn_n(irn, i, new_Bad());
921 The current node is about to be removed: if the predecessor
922 has only this node as user, it need to be removed as well.
924 if (get_irn_n_edges(pred) <= 1)
925 remove_unused_nodes(pred, already_visited);
928 // we need to set the presd to Bad again to also get the memory edges
929 arity = get_irn_arity(irn);
930 for (i = 0; i < arity; ++i) {
931 set_irn_n(irn, i, new_Bad());
934 if (sched_is_scheduled(irn)) {
939 static void remove_unused_loads_walker(ir_node *irn, void *env) {
940 bitset_t *already_visited = env;
941 if (is_ia32_Ld(irn) && ! bitset_contains_irn(already_visited, irn))
942 remove_unused_nodes(irn, env);
946 * Called before the register allocator.
947 * Calculate a block schedule here. We need it for the x87
948 * simulator and the emitter.
950 static void ia32_before_ra(void *self) {
951 ia32_code_gen_t *cg = self;
952 bitset_t *already_visited = bitset_irg_alloca(cg->irg);
956 There are sometimes unused loads, only pinned by memory.
957 We need to remove those Loads and all other nodes which won't be used
958 after removing the Load from schedule.
960 irg_walk_graph(cg->irg, NULL, remove_unused_loads_walker, already_visited);
965 * Transforms a be node into a Load.
967 static void transform_to_Load(ia32_transform_env_t *env) {
968 ir_node *irn = env->irn;
969 entity *ent = be_get_frame_entity(irn);
970 ir_mode *mode = env->mode;
971 ir_node *noreg = ia32_new_NoReg_gp(env->cg);
972 ir_node *nomem = new_rd_NoMem(env->irg);
973 ir_node *sched_point = NULL;
974 ir_node *ptr = get_irn_n(irn, 0);
975 ir_node *mem = be_is_Reload(irn) ? get_irn_n(irn, 1) : nomem;
976 ir_node *new_op, *proj;
977 const arch_register_t *reg;
979 if (sched_is_scheduled(irn)) {
980 sched_point = sched_prev(irn);
983 if (mode_is_float(mode)) {
984 if (USE_SSE2(env->cg))
985 new_op = new_rd_ia32_xLoad(env->dbg, env->irg, env->block, ptr, noreg, mem);
987 new_op = new_rd_ia32_vfld(env->dbg, env->irg, env->block, ptr, noreg, mem);
990 new_op = new_rd_ia32_Load(env->dbg, env->irg, env->block, ptr, noreg, mem);
993 set_ia32_am_support(new_op, ia32_am_Source);
994 set_ia32_op_type(new_op, ia32_AddrModeS);
995 set_ia32_am_flavour(new_op, ia32_B);
996 set_ia32_ls_mode(new_op, mode);
997 set_ia32_frame_ent(new_op, ent);
998 set_ia32_use_frame(new_op);
1000 DBG_OPT_RELOAD2LD(irn, new_op);
1002 proj = new_rd_Proj(env->dbg, env->irg, env->block, new_op, mode, pn_Load_res);
1005 sched_add_after(sched_point, new_op);
1006 sched_add_after(new_op, proj);
1011 /* copy the register from the old node to the new Load */
1012 reg = arch_get_irn_register(env->cg->arch_env, irn);
1013 arch_set_irn_register(env->cg->arch_env, new_op, reg);
1015 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, irn));
1017 exchange(irn, proj);
1021 * Transforms a be node into a Store.
1023 static void transform_to_Store(ia32_transform_env_t *env) {
1024 ir_node *irn = env->irn;
1025 entity *ent = be_get_frame_entity(irn);
1026 ir_mode *mode = env->mode;
1027 ir_node *noreg = ia32_new_NoReg_gp(env->cg);
1028 ir_node *nomem = new_rd_NoMem(env->irg);
1029 ir_node *ptr = get_irn_n(irn, 0);
1030 ir_node *val = get_irn_n(irn, 1);
1031 ir_node *new_op, *proj;
1032 ir_node *sched_point = NULL;
1034 if (sched_is_scheduled(irn)) {
1035 sched_point = sched_prev(irn);
1038 if (mode_is_float(mode)) {
1039 if (USE_SSE2(env->cg))
1040 new_op = new_rd_ia32_xStore(env->dbg, env->irg, env->block, ptr, noreg, val, nomem);
1042 new_op = new_rd_ia32_vfst(env->dbg, env->irg, env->block, ptr, noreg, val, nomem);
1044 else if (get_mode_size_bits(mode) == 8) {
1045 new_op = new_rd_ia32_Store8Bit(env->dbg, env->irg, env->block, ptr, noreg, val, nomem);
1048 new_op = new_rd_ia32_Store(env->dbg, env->irg, env->block, ptr, noreg, val, nomem);
1051 set_ia32_am_support(new_op, ia32_am_Dest);
1052 set_ia32_op_type(new_op, ia32_AddrModeD);
1053 set_ia32_am_flavour(new_op, ia32_B);
1054 set_ia32_ls_mode(new_op, mode);
1055 set_ia32_frame_ent(new_op, ent);
1056 set_ia32_use_frame(new_op);
1058 DBG_OPT_SPILL2ST(irn, new_op);
1060 proj = new_rd_Proj(env->dbg, env->irg, env->block, new_op, mode_M, pn_ia32_Store_M);
1063 sched_add_after(sched_point, new_op);
1067 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, irn));
1069 exchange(irn, proj);
1072 static ir_node *create_push(ia32_transform_env_t *env, ir_node *schedpoint, ir_node *sp, ir_node *mem, entity *ent, const char *offset) {
1073 ir_node *noreg = ia32_new_NoReg_gp(env->cg);
1075 ir_node *push = new_rd_ia32_Push(env->dbg, env->irg, env->block, sp, noreg, mem);
1077 set_ia32_frame_ent(push, ent);
1078 set_ia32_use_frame(push);
1079 set_ia32_op_type(push, ia32_AddrModeS);
1080 set_ia32_am_flavour(push, ia32_B);
1081 set_ia32_ls_mode(push, mode_Is);
1083 add_ia32_am_offs(push, offset);
1085 sched_add_before(schedpoint, push);
1089 static ir_node *create_pop(ia32_transform_env_t *env, ir_node *schedpoint, ir_node *sp, entity *ent, const char *offset) {
1090 ir_node *pop = new_rd_ia32_Pop(env->dbg, env->irg, env->block, sp, new_NoMem());
1092 set_ia32_frame_ent(pop, ent);
1093 set_ia32_use_frame(pop);
1094 set_ia32_op_type(pop, ia32_AddrModeD);
1095 set_ia32_am_flavour(pop, ia32_B);
1096 set_ia32_ls_mode(pop, mode_Is);
1098 add_ia32_am_offs(pop, offset);
1100 sched_add_before(schedpoint, pop);
1105 static ir_node* create_spproj(ia32_transform_env_t *env, ir_node *pred, int pos, ir_node *schedpoint, const ir_node *oldsp) {
1106 ir_mode *spmode = get_irn_mode(oldsp);
1107 const arch_register_t *spreg = arch_get_irn_register(env->cg->arch_env, oldsp);
1110 sp = new_rd_Proj(env->dbg, env->irg, env->block, pred, spmode, pos);
1111 arch_set_irn_register(env->cg->arch_env, sp, spreg);
1112 sched_add_before(schedpoint, sp);
1117 static void transform_MemPerm(ia32_transform_env_t *env) {
1119 * Transform memperm, currently we do this the ugly way and produce
1120 * push/pop into/from memory cascades. This is possible without using
1123 ir_node *node = env->irn;
1125 ir_node *sp = get_irn_n(node, 0);
1126 const ir_edge_t *edge;
1127 const ir_edge_t *next;
1130 arity = be_get_MemPerm_entity_arity(node);
1131 pops = alloca(arity * sizeof(pops[0]));
1134 for(i = 0; i < arity; ++i) {
1135 entity *ent = be_get_MemPerm_in_entity(node, i);
1136 ir_type *enttype = get_entity_type(ent);
1137 int entbits = get_type_size_bits(enttype);
1138 ir_node *mem = get_irn_n(node, i + 1);
1141 assert( (entbits == 32 || entbits == 64) && "spillslot on x86 should be 32 or 64 bit");
1143 push = create_push(env, node, sp, mem, ent, NULL);
1144 sp = create_spproj(env, push, 0, node, sp);
1146 // add another push after the first one
1147 push = create_push(env, node, sp, mem, ent, "4");
1148 sp = create_spproj(env, push, 0, node, sp);
1151 set_irn_n(node, i, new_Bad());
1155 for(i = arity - 1; i >= 0; --i) {
1156 entity *ent = be_get_MemPerm_out_entity(node, i);
1157 ir_type *enttype = get_entity_type(ent);
1158 int entbits = get_type_size_bits(enttype);
1162 assert( (entbits == 32 || entbits == 64) && "spillslot on x86 should be 32 or 64 bit");
1164 pop = create_pop(env, node, sp, ent, NULL);
1166 // add another pop after the first one
1167 sp = create_spproj(env, pop, 1, node, sp);
1168 pop = create_pop(env, node, sp, ent, "4");
1171 sp = create_spproj(env, pop, 1, node, sp);
1177 // exchange memprojs
1178 foreach_out_edge_safe(node, edge, next) {
1179 ir_node *proj = get_edge_src_irn(edge);
1180 int p = get_Proj_proj(proj);
1184 set_Proj_pred(proj, pops[p]);
1185 set_Proj_proj(proj, 3);
1189 arity = get_irn_arity(node);
1190 for(i = 0; i < arity; ++i) {
1191 set_irn_n(node, i, new_Bad());
1197 * Fix the mode of Spill/Reload
1199 static ir_mode *fix_spill_mode(ia32_code_gen_t *cg, ir_mode *mode)
1201 if (mode_is_float(mode)) {
1213 * Block-Walker: Calls the transform functions Spill and Reload.
1215 static void ia32_after_ra_walker(ir_node *block, void *env) {
1216 ir_node *node, *prev;
1217 ia32_code_gen_t *cg = env;
1218 ia32_transform_env_t tenv;
1221 tenv.irg = current_ir_graph;
1223 DEBUG_ONLY(tenv.mod = cg->mod;)
1225 /* beware: the schedule is changed here */
1226 for (node = sched_last(block); !sched_is_begin(node); node = prev) {
1227 prev = sched_prev(node);
1228 if (be_is_Reload(node)) {
1229 /* we always reload the whole register */
1230 tenv.dbg = get_irn_dbg_info(node);
1232 tenv.mode = fix_spill_mode(cg, get_irn_mode(node));
1233 transform_to_Load(&tenv);
1235 else if (be_is_Spill(node)) {
1236 ir_node *spillval = get_irn_n(node, be_pos_Spill_val);
1237 /* we always spill the whole register */
1238 tenv.dbg = get_irn_dbg_info(node);
1240 tenv.mode = fix_spill_mode(cg, get_irn_mode(spillval));
1241 transform_to_Store(&tenv);
1243 else if(be_is_MemPerm(node)) {
1244 tenv.dbg = get_irn_dbg_info(node);
1246 transform_MemPerm(&tenv);
1252 * We transform Spill and Reload here. This needs to be done before
1253 * stack biasing otherwise we would miss the corrected offset for these nodes.
1255 * If x87 instruction should be emitted, run the x87 simulator and patch
1256 * the virtual instructions. This must obviously be done after register allocation.
1258 static void ia32_after_ra(void *self) {
1259 ia32_code_gen_t *cg = self;
1260 ir_graph *irg = cg->irg;
1262 irg_block_walk_graph(irg, NULL, ia32_after_ra_walker, cg);
1264 ia32_finish_irg(irg, cg);
1268 * Last touchups for the graph before emit
1270 static void ia32_finish(void *self) {
1271 ia32_code_gen_t *cg = self;
1272 ir_graph *irg = cg->irg;
1274 // Matze: disabled for now, as the irextbb algo sometimes returns extbb in
1275 // the wrong order if the graph has critical edges
1276 be_remove_empty_blocks(irg);
1278 cg->blk_sched = sched_create_block_schedule(cg->irg, cg->birg->execfreqs);
1280 /* if we do x87 code generation, rewrite all the virtual instructions and registers */
1281 if (cg->used_fp == fp_x87 || cg->force_sim) {
1282 x87_simulate_graph(cg->arch_env, irg, cg->blk_sched);
1285 ia32_peephole_optimization(irg, cg);
1289 * Emits the code, closes the output file and frees
1290 * the code generator interface.
1292 static void ia32_codegen(void *self) {
1293 ia32_code_gen_t *cg = self;
1294 ir_graph *irg = cg->irg;
1296 ia32_gen_routine(cg->isa->out, irg, cg);
1300 /* remove it from the isa */
1303 /* de-allocate code generator */
1304 del_set(cg->reg_set);
1308 static void *ia32_cg_init(const be_irg_t *birg);
1310 static const arch_code_generator_if_t ia32_code_gen_if = {
1312 NULL, /* before abi introduce hook */
1314 ia32_before_sched, /* before scheduling hook */
1315 ia32_before_ra, /* before register allocation hook */
1316 ia32_after_ra, /* after register allocation hook */
1317 ia32_finish, /* called before codegen */
1318 ia32_codegen /* emit && done */
1322 * Initializes a IA32 code generator.
1324 static void *ia32_cg_init(const be_irg_t *birg) {
1325 ia32_isa_t *isa = (ia32_isa_t *)birg->main_env->arch_env->isa;
1326 ia32_code_gen_t *cg = xcalloc(1, sizeof(*cg));
1328 cg->impl = &ia32_code_gen_if;
1329 cg->irg = birg->irg;
1330 cg->reg_set = new_set(ia32_cmp_irn_reg_assoc, 1024);
1331 cg->arch_env = birg->main_env->arch_env;
1334 cg->blk_sched = NULL;
1335 cg->fp_to_gp = NULL;
1336 cg->gp_to_fp = NULL;
1337 cg->fp_kind = isa->fp_kind;
1338 cg->used_fp = fp_none;
1339 cg->dump = (birg->main_env->options->dump_flags & DUMP_BE) ? 1 : 0;
1341 FIRM_DBG_REGISTER(cg->mod, "firm.be.ia32.cg");
1343 /* copy optimizations from isa for easier access */
1345 cg->arch = isa->arch;
1346 cg->opt_arch = isa->opt_arch;
1352 if (isa->name_obst_size) {
1353 //printf("freed %d bytes from name obst\n", isa->name_obst_size);
1354 isa->name_obst_size = 0;
1355 obstack_free(isa->name_obst, NULL);
1356 obstack_init(isa->name_obst);
1360 cur_reg_set = cg->reg_set;
1362 ia32_irn_ops.cg = cg;
1364 return (arch_code_generator_t *)cg;
1369 /*****************************************************************
1370 * ____ _ _ _____ _____
1371 * | _ \ | | | | |_ _|/ ____| /\
1372 * | |_) | __ _ ___| | _____ _ __ __| | | | | (___ / \
1373 * | _ < / _` |/ __| |/ / _ \ '_ \ / _` | | | \___ \ / /\ \
1374 * | |_) | (_| | (__| < __/ | | | (_| | _| |_ ____) / ____ \
1375 * |____/ \__,_|\___|_|\_\___|_| |_|\__,_| |_____|_____/_/ \_\
1377 *****************************************************************/
1380 * Set output modes for GCC
1382 static const tarval_mode_info mo_integer = {
1389 * set the tarval output mode of all integer modes to decimal
1391 static void set_tarval_output_modes(void)
1395 for (i = get_irp_n_modes() - 1; i >= 0; --i) {
1396 ir_mode *mode = get_irp_mode(i);
1398 if (mode_is_int(mode))
1399 set_tarval_mode_output_option(mode, &mo_integer);
1405 * The template that generates a new ISA object.
1406 * Note that this template can be changed by command line
1409 static ia32_isa_t ia32_isa_template = {
1411 &ia32_isa_if, /* isa interface implementation */
1412 &ia32_gp_regs[REG_ESP], /* stack pointer register */
1413 &ia32_gp_regs[REG_EBP], /* base pointer register */
1414 -1, /* stack direction */
1415 NULL, /* main environment */
1417 NULL, /* 16bit register names */
1418 NULL, /* 8bit register names */
1422 IA32_OPT_INCDEC | /* optimize add 1, sub 1 into inc/dec default: on */
1423 IA32_OPT_DOAM | /* optimize address mode default: on */
1424 IA32_OPT_LEA | /* optimize for LEAs default: on */
1425 IA32_OPT_PLACECNST | /* place constants immediately before instructions, default: on */
1426 IA32_OPT_IMMOPS | /* operations can use immediates, default: on */
1427 IA32_OPT_EXTBB), /* use extended basic block scheduling, default: on */
1428 arch_pentium_4, /* instruction architecture */
1429 arch_pentium_4, /* optimize for architecture */
1430 fp_sse2, /* use sse2 unit */
1431 NULL, /* current code generator */
1432 NULL, /* output file */
1434 NULL, /* name obstack */
1435 0 /* name obst size */
1440 * Initializes the backend ISA.
1442 static void *ia32_init(FILE *file_handle) {
1443 static int inited = 0;
1449 set_tarval_output_modes();
1451 isa = xmalloc(sizeof(*isa));
1452 memcpy(isa, &ia32_isa_template, sizeof(*isa));
1454 ia32_register_init(isa);
1455 ia32_create_opcodes();
1457 if ((ARCH_INTEL(isa->arch) && isa->arch < arch_pentium_4) ||
1458 (ARCH_AMD(isa->arch) && isa->arch < arch_athlon))
1459 /* no SSE2 for these cpu's */
1460 isa->fp_kind = fp_x87;
1462 if (ARCH_INTEL(isa->opt_arch) && isa->opt_arch >= arch_pentium_4) {
1463 /* Pentium 4 don't like inc and dec instructions */
1464 isa->opt &= ~IA32_OPT_INCDEC;
1467 isa->regs_16bit = pmap_create();
1468 isa->regs_8bit = pmap_create();
1469 isa->types = pmap_create();
1470 isa->tv_ent = pmap_create();
1471 isa->out = file_handle;
1473 ia32_build_16bit_reg_map(isa->regs_16bit);
1474 ia32_build_8bit_reg_map(isa->regs_8bit);
1476 /* patch register names of x87 registers */
1478 ia32_st_regs[0].name = "st";
1479 ia32_st_regs[1].name = "st(1)";
1480 ia32_st_regs[2].name = "st(2)";
1481 ia32_st_regs[3].name = "st(3)";
1482 ia32_st_regs[4].name = "st(4)";
1483 ia32_st_regs[5].name = "st(5)";
1484 ia32_st_regs[6].name = "st(6)";
1485 ia32_st_regs[7].name = "st(7)";
1489 isa->name_obst = xmalloc(sizeof(*isa->name_obst));
1490 obstack_init(isa->name_obst);
1491 isa->name_obst_size = 0;
1494 ia32_handle_intrinsics();
1495 ia32_switch_section(isa->out, NO_SECTION);
1496 fprintf(isa->out, "\t.intel_syntax\n");
1498 /* needed for the debug support */
1499 ia32_switch_section(isa->out, SECTION_TEXT);
1500 fprintf(isa->out, ".Ltext0:\n");
1510 * Closes the output file and frees the ISA structure.
1512 static void ia32_done(void *self) {
1513 ia32_isa_t *isa = self;
1515 /* emit now all global declarations */
1516 ia32_gen_decls(isa->out, isa->arch_isa.main_env);
1518 pmap_destroy(isa->regs_16bit);
1519 pmap_destroy(isa->regs_8bit);
1520 pmap_destroy(isa->tv_ent);
1521 pmap_destroy(isa->types);
1524 //printf("name obst size = %d bytes\n", isa->name_obst_size);
1525 obstack_free(isa->name_obst, NULL);
1533 * Return the number of register classes for this architecture.
1534 * We report always these:
1535 * - the general purpose registers
1536 * - the SSE floating point register set
1537 * - the virtual floating point registers
1539 static int ia32_get_n_reg_class(const void *self) {
1544 * Return the register class for index i.
1546 static const arch_register_class_t *ia32_get_reg_class(const void *self, int i) {
1547 assert(i >= 0 && i < 3 && "Invalid ia32 register class requested.");
1549 return &ia32_reg_classes[CLASS_ia32_gp];
1551 return &ia32_reg_classes[CLASS_ia32_xmm];
1553 return &ia32_reg_classes[CLASS_ia32_vfp];
1557 * Get the register class which shall be used to store a value of a given mode.
1558 * @param self The this pointer.
1559 * @param mode The mode in question.
1560 * @return A register class which can hold values of the given mode.
1562 const arch_register_class_t *ia32_get_reg_class_for_mode(const void *self, const ir_mode *mode) {
1563 const ia32_isa_t *isa = self;
1564 if (mode_is_float(mode)) {
1565 return USE_SSE2(isa) ? &ia32_reg_classes[CLASS_ia32_xmm] : &ia32_reg_classes[CLASS_ia32_vfp];
1568 return &ia32_reg_classes[CLASS_ia32_gp];
1572 * Get the ABI restrictions for procedure calls.
1573 * @param self The this pointer.
1574 * @param method_type The type of the method (procedure) in question.
1575 * @param abi The abi object to be modified
1577 static void ia32_get_call_abi(const void *self, ir_type *method_type, be_abi_call_t *abi) {
1578 const ia32_isa_t *isa = self;
1581 unsigned cc = get_method_calling_convention(method_type);
1582 int n = get_method_n_params(method_type);
1585 int i, ignore_1, ignore_2;
1587 const arch_register_t *reg;
1588 be_abi_call_flags_t call_flags = be_abi_call_get_flags(abi);
1590 unsigned use_push = !IS_P6_ARCH(isa->opt_arch);
1592 /* set abi flags for calls */
1593 call_flags.bits.left_to_right = 0; /* always last arg first on stack */
1594 call_flags.bits.store_args_sequential = use_push;
1595 /* call_flags.bits.try_omit_fp not changed: can handle both settings */
1596 call_flags.bits.fp_free = 0; /* the frame pointer is fixed in IA32 */
1597 call_flags.bits.call_has_imm = 1; /* IA32 calls can have immediate address */
1599 /* set stack parameter passing style */
1600 be_abi_call_set_flags(abi, call_flags, &ia32_abi_callbacks);
1602 /* collect the mode for each type */
1603 modes = alloca(n * sizeof(modes[0]));
1605 for (i = 0; i < n; i++) {
1606 tp = get_method_param_type(method_type, i);
1607 modes[i] = get_type_mode(tp);
1610 /* set register parameters */
1611 if (cc & cc_reg_param) {
1612 /* determine the number of parameters passed via registers */
1613 biggest_n = ia32_get_n_regparam_class(n, modes, &ignore_1, &ignore_2);
1615 /* loop over all parameters and set the register requirements */
1616 for (i = 0; i <= biggest_n; i++) {
1617 reg = ia32_get_RegParam_reg(n, modes, i, cc);
1618 assert(reg && "kaputt");
1619 be_abi_call_param_reg(abi, i, reg);
1626 /* set stack parameters */
1627 for (i = stack_idx; i < n; i++) {
1628 /* parameters on the stack are 32 bit aligned */
1629 be_abi_call_param_stack(abi, i, 4, 0, 0);
1633 /* set return registers */
1634 n = get_method_n_ress(method_type);
1636 assert(n <= 2 && "more than two results not supported");
1638 /* In case of 64bit returns, we will have two 32bit values */
1640 tp = get_method_res_type(method_type, 0);
1641 mode = get_type_mode(tp);
1643 assert(!mode_is_float(mode) && "two FP results not supported");
1645 tp = get_method_res_type(method_type, 1);
1646 mode = get_type_mode(tp);
1648 assert(!mode_is_float(mode) && "two FP results not supported");
1650 be_abi_call_res_reg(abi, 0, &ia32_gp_regs[REG_EAX]);
1651 be_abi_call_res_reg(abi, 1, &ia32_gp_regs[REG_EDX]);
1654 const arch_register_t *reg;
1656 tp = get_method_res_type(method_type, 0);
1657 assert(is_atomic_type(tp));
1658 mode = get_type_mode(tp);
1660 reg = mode_is_float(mode) ?
1661 (USE_SSE2(isa) ? &ia32_xmm_regs[REG_XMM0] : &ia32_vfp_regs[REG_VF0]) :
1662 &ia32_gp_regs[REG_EAX];
1664 be_abi_call_res_reg(abi, 0, reg);
1669 static const void *ia32_get_irn_ops(const arch_irn_handler_t *self, const ir_node *irn) {
1670 return &ia32_irn_ops;
1673 const arch_irn_handler_t ia32_irn_handler = {
1677 const arch_irn_handler_t *ia32_get_irn_handler(const void *self) {
1678 return &ia32_irn_handler;
1681 int ia32_to_appear_in_schedule(void *block_env, const ir_node *irn) {
1682 return is_ia32_irn(irn) ? 1 : -1;
1686 * Initializes the code generator interface.
1688 static const arch_code_generator_if_t *ia32_get_code_generator_if(void *self) {
1689 return &ia32_code_gen_if;
1693 * Returns the estimated execution time of an ia32 irn.
1695 static sched_timestep_t ia32_sched_exectime(void *env, const ir_node *irn) {
1696 const arch_env_t *arch_env = env;
1697 return is_ia32_irn(irn) ? ia32_get_op_estimated_cost(arch_get_irn_ops(arch_env, irn), irn) : 1;
1700 list_sched_selector_t ia32_sched_selector;
1703 * Returns the reg_pressure scheduler with to_appear_in_schedule() overloaded
1705 static const list_sched_selector_t *ia32_get_list_sched_selector(const void *self, list_sched_selector_t *selector) {
1706 memcpy(&ia32_sched_selector, selector, sizeof(ia32_sched_selector));
1707 ia32_sched_selector.exectime = ia32_sched_exectime;
1708 ia32_sched_selector.to_appear_in_schedule = ia32_to_appear_in_schedule;
1709 return &ia32_sched_selector;
1713 * Returns the necessary byte alignment for storing a register of given class.
1715 static int ia32_get_reg_class_alignment(const void *self, const arch_register_class_t *cls) {
1716 ir_mode *mode = arch_register_class_mode(cls);
1717 int bytes = get_mode_size_bytes(mode);
1719 if (mode_is_float(mode) && bytes > 8)
1724 static ia32_intrinsic_env_t intrinsic_env = { NULL, NULL };
1727 * Returns the libFirm configuration parameter for this backend.
1729 static const backend_params *ia32_get_libfirm_params(void) {
1730 static const arch_dep_params_t ad = {
1731 1, /* also use subs */
1732 4, /* maximum shifts */
1733 31, /* maximum shift amount */
1735 1, /* allow Mulhs */
1736 1, /* allow Mulus */
1737 32 /* Mulh allowed up to 32 bit */
1739 static backend_params p = {
1740 NULL, /* no additional opcodes */
1741 NULL, /* will be set later */
1742 1, /* need dword lowering */
1743 ia32_create_intrinsic_fkt,
1744 &intrinsic_env, /* context for ia32_create_intrinsic_fkt */
1752 /* instruction set architectures. */
1753 static const lc_opt_enum_int_items_t arch_items[] = {
1754 { "386", arch_i386, },
1755 { "486", arch_i486, },
1756 { "pentium", arch_pentium, },
1757 { "586", arch_pentium, },
1758 { "pentiumpro", arch_pentium_pro, },
1759 { "686", arch_pentium_pro, },
1760 { "pentiummmx", arch_pentium_mmx, },
1761 { "pentium2", arch_pentium_2, },
1762 { "p2", arch_pentium_2, },
1763 { "pentium3", arch_pentium_3, },
1764 { "p3", arch_pentium_3, },
1765 { "pentium4", arch_pentium_4, },
1766 { "p4", arch_pentium_4, },
1767 { "pentiumm", arch_pentium_m, },
1768 { "pm", arch_pentium_m, },
1769 { "core", arch_core, },
1771 { "athlon", arch_athlon, },
1772 { "athlon64", arch_athlon_64, },
1773 { "opteron", arch_opteron, },
1777 static lc_opt_enum_int_var_t arch_var = {
1778 &ia32_isa_template.arch, arch_items
1781 static lc_opt_enum_int_var_t opt_arch_var = {
1782 &ia32_isa_template.opt_arch, arch_items
1785 static const lc_opt_enum_int_items_t fp_unit_items[] = {
1787 { "sse2", fp_sse2 },
1791 static lc_opt_enum_int_var_t fp_unit_var = {
1792 &ia32_isa_template.fp_kind, fp_unit_items
1795 static const lc_opt_enum_int_items_t gas_items[] = {
1796 { "linux", ASM_LINUX_GAS },
1797 { "mingw", ASM_MINGW_GAS },
1801 static lc_opt_enum_int_var_t gas_var = {
1802 (int *)&asm_flavour, gas_items
1805 static const lc_opt_table_entry_t ia32_options[] = {
1806 LC_OPT_ENT_ENUM_INT("arch", "select the instruction architecture", &arch_var),
1807 LC_OPT_ENT_ENUM_INT("opt", "optimize for instruction architecture", &opt_arch_var),
1808 LC_OPT_ENT_ENUM_INT("fpunit", "select the floating point unit", &fp_unit_var),
1809 LC_OPT_ENT_NEGBIT("noaddrmode", "do not use address mode", &ia32_isa_template.opt, IA32_OPT_DOAM),
1810 LC_OPT_ENT_NEGBIT("nolea", "do not optimize for LEAs", &ia32_isa_template.opt, IA32_OPT_LEA),
1811 LC_OPT_ENT_NEGBIT("noplacecnst", "do not place constants", &ia32_isa_template.opt, IA32_OPT_PLACECNST),
1812 LC_OPT_ENT_NEGBIT("noimmop", "no operations with immediates", &ia32_isa_template.opt, IA32_OPT_IMMOPS),
1813 LC_OPT_ENT_NEGBIT("noextbb", "do not use extended basic block scheduling", &ia32_isa_template.opt, IA32_OPT_EXTBB),
1814 LC_OPT_ENT_ENUM_INT("gasmode", "set the GAS compatibility mode", &gas_var),
1819 * Register command line options for the ia32 backend.
1823 * ia32-arch=arch create instruction for arch
1824 * ia32-opt=arch optimize for run on arch
1825 * ia32-fpunit=unit select floating point unit (x87 or SSE2)
1826 * ia32-incdec optimize for inc/dec
1827 * ia32-noaddrmode do not use address mode
1828 * ia32-nolea do not optimize for LEAs
1829 * ia32-noplacecnst do not place constants,
1830 * ia32-noimmop no operations with immediates
1831 * ia32-noextbb do not use extended basic block scheduling
1832 * ia32-gasmode set the GAS compatibility mode
1834 static void ia32_register_options(lc_opt_entry_t *ent)
1836 lc_opt_entry_t *be_grp_ia32 = lc_opt_get_grp(ent, "ia32");
1837 lc_opt_add_table(be_grp_ia32, ia32_options);
1839 #endif /* WITH_LIBCORE */
1841 const arch_isa_if_t ia32_isa_if = {
1844 ia32_get_n_reg_class,
1846 ia32_get_reg_class_for_mode,
1848 ia32_get_irn_handler,
1849 ia32_get_code_generator_if,
1850 ia32_get_list_sched_selector,
1851 ia32_get_reg_class_alignment,
1852 ia32_get_libfirm_params,
1854 ia32_register_options