2 * This is the main ia32 firm backend driver.
3 * @author Christian Wuerdig
20 #include <libcore/lc_opts.h>
21 #include <libcore/lc_opts_enum.h>
22 #endif /* WITH_LIBCORE */
26 #include "pseudo_irg.h"
30 #include "iredges_t.h"
38 #include "../beabi.h" /* the general register allocator interface */
39 #include "../benode_t.h"
40 #include "../belower.h"
41 #include "../besched_t.h"
44 #include "bearch_ia32_t.h"
46 #include "ia32_new_nodes.h" /* ia32 nodes interface */
47 #include "gen_ia32_regalloc_if.h" /* the generated interface (register type and class defenitions) */
48 #include "ia32_gen_decls.h" /* interface declaration emitter */
49 #include "ia32_transform.h"
50 #include "ia32_emitter.h"
51 #include "ia32_map_regs.h"
52 #include "ia32_optimize.h"
54 #include "ia32_dbg_stat.h"
55 #include "ia32_finish.h"
56 #include "ia32_util.h"
58 #define DEBUG_MODULE "firm.be.ia32.isa"
61 static set *cur_reg_set = NULL;
64 #define is_Start(irn) (get_irn_opcode(irn) == iro_Start)
66 /* Creates the unique per irg GP NoReg node. */
67 ir_node *ia32_new_NoReg_gp(ia32_code_gen_t *cg) {
68 return be_abi_get_callee_save_irn(cg->birg->abi, &ia32_gp_regs[REG_GP_NOREG]);
71 /* Creates the unique per irg FP NoReg node. */
72 ir_node *ia32_new_NoReg_fp(ia32_code_gen_t *cg) {
73 return be_abi_get_callee_save_irn(cg->birg->abi,
74 USE_SSE2(cg) ? &ia32_xmm_regs[REG_XMM_NOREG] : &ia32_vfp_regs[REG_VFP_NOREG]);
77 /**************************************************
80 * _ __ ___ __ _ __ _| | | ___ ___ _| |_
81 * | '__/ _ \/ _` | / _` | | |/ _ \ / __| | | _|
82 * | | | __/ (_| | | (_| | | | (_) | (__ | | |
83 * |_| \___|\__, | \__,_|_|_|\___/ \___| |_|_|
86 **************************************************/
88 static ir_node *my_skip_proj(const ir_node *n) {
96 * Return register requirements for an ia32 node.
97 * If the node returns a tuple (mode_T) then the proj's
98 * will be asked for this information.
100 static const arch_register_req_t *ia32_get_irn_reg_req(const void *self, arch_register_req_t *req, const ir_node *irn, int pos) {
101 const ia32_irn_ops_t *ops = self;
102 const ia32_register_req_t *irn_req;
103 long node_pos = pos == -1 ? 0 : pos;
104 ir_mode *mode = is_Block(irn) ? NULL : get_irn_mode(irn);
105 FIRM_DBG_REGISTER(firm_dbg_module_t *mod, DEBUG_MODULE);
107 if (is_Block(irn) || mode == mode_M || mode == mode_X) {
108 DBG((mod, LEVEL_1, "ignoring Block, mode_M, mode_X node %+F\n", irn));
112 if (mode == mode_T && pos < 0) {
113 DBG((mod, LEVEL_1, "ignoring request OUT requirements for node %+F\n", irn));
117 DBG((mod, LEVEL_1, "get requirements at pos %d for %+F ... ", pos, irn));
121 node_pos = ia32_translate_proj_pos(irn);
127 irn = my_skip_proj(irn);
129 DB((mod, LEVEL_1, "skipping Proj, going to %+F at pos %d ... ", irn, node_pos));
132 if (is_ia32_irn(irn)) {
134 irn_req = get_ia32_in_req(irn, pos);
137 irn_req = get_ia32_out_req(irn, node_pos);
140 DB((mod, LEVEL_1, "returning reqs for %+F at pos %d\n", irn, pos));
142 memcpy(req, &(irn_req->req), sizeof(*req));
144 if (arch_register_req_is(&(irn_req->req), should_be_same)) {
145 assert(irn_req->same_pos >= 0 && "should be same constraint for in -> out NYI");
146 req->other_same = get_irn_n(irn, irn_req->same_pos);
149 if (arch_register_req_is(&(irn_req->req), should_be_different)) {
150 assert(irn_req->different_pos >= 0 && "should be different constraint for in -> out NYI");
151 req->other_different = get_irn_n(irn, irn_req->different_pos);
155 /* treat Unknowns like Const with default requirements */
156 if (is_Unknown(irn)) {
157 DB((mod, LEVEL_1, "returning UKNWN reqs for %+F\n", irn));
158 if (mode_is_float(mode)) {
159 if (USE_SSE2(ops->cg))
160 memcpy(req, &(ia32_default_req_ia32_xmm_xmm_UKNWN), sizeof(*req));
162 memcpy(req, &(ia32_default_req_ia32_vfp_vfp_UKNWN), sizeof(*req));
164 else if (mode_is_int(mode) || mode_is_reference(mode))
165 memcpy(req, &(ia32_default_req_ia32_gp_gp_UKNWN), sizeof(*req));
166 else if (mode == mode_T || mode == mode_M) {
167 DBG((mod, LEVEL_1, "ignoring Unknown node %+F\n", irn));
171 assert(0 && "unsupported Unknown-Mode");
174 DB((mod, LEVEL_1, "returning NULL for %+F (not ia32)\n", irn));
182 static void ia32_set_irn_reg(const void *self, ir_node *irn, const arch_register_t *reg) {
184 const ia32_irn_ops_t *ops = self;
186 if (get_irn_mode(irn) == mode_X) {
190 DBG((ops->cg->mod, LEVEL_1, "ia32 assigned register %s to node %+F\n", reg->name, irn));
193 pos = ia32_translate_proj_pos(irn);
194 irn = my_skip_proj(irn);
197 if (is_ia32_irn(irn)) {
198 const arch_register_t **slots;
200 slots = get_ia32_slots(irn);
204 ia32_set_firm_reg(irn, reg, cur_reg_set);
208 static const arch_register_t *ia32_get_irn_reg(const void *self, const ir_node *irn) {
210 const arch_register_t *reg = NULL;
214 if (get_irn_mode(irn) == mode_X) {
218 pos = ia32_translate_proj_pos(irn);
219 irn = my_skip_proj(irn);
222 if (is_ia32_irn(irn)) {
223 const arch_register_t **slots;
224 slots = get_ia32_slots(irn);
228 reg = ia32_get_firm_reg(irn, cur_reg_set);
234 static arch_irn_class_t ia32_classify(const void *self, const ir_node *irn) {
235 arch_irn_class_t classification = arch_irn_class_normal;
237 irn = my_skip_proj(irn);
240 classification |= arch_irn_class_branch;
242 if (! is_ia32_irn(irn))
243 return classification & ~arch_irn_class_normal;
245 if (is_ia32_Cnst(irn))
246 classification |= arch_irn_class_const;
249 classification |= arch_irn_class_load;
251 if (is_ia32_St(irn) || is_ia32_Store8Bit(irn))
252 classification |= arch_irn_class_store;
254 if (is_ia32_got_reload(irn))
255 classification |= arch_irn_class_reload;
257 return classification;
260 static arch_irn_flags_t ia32_get_flags(const void *self, const ir_node *irn) {
263 ir_node *pred = get_Proj_pred(irn);
264 if(is_ia32_Push(pred) && get_Proj_proj(irn) == 0) {
265 return arch_irn_flags_modify_sp;
267 if(is_ia32_Pop(pred) && get_Proj_proj(irn) == 1) {
268 return arch_irn_flags_modify_sp;
272 irn = my_skip_proj(irn);
273 if (is_ia32_irn(irn))
274 return get_ia32_flags(irn);
277 return arch_irn_flags_ignore;
282 static entity *ia32_get_frame_entity(const void *self, const ir_node *irn) {
283 return is_ia32_irn(irn) ? get_ia32_frame_ent(irn) : NULL;
286 static void ia32_set_frame_entity(const void *self, ir_node *irn, entity *ent) {
287 set_ia32_frame_ent(irn, ent);
290 static void ia32_set_frame_offset(const void *self, ir_node *irn, int bias) {
292 const ia32_irn_ops_t *ops = self;
294 if (get_ia32_frame_ent(irn)) {
295 ia32_am_flavour_t am_flav = get_ia32_am_flavour(irn);
297 /* Pop nodes modify the stack pointer before reading the destination
298 * address, so fix this here
300 if(is_ia32_Pop(irn)) {
304 DBG((ops->cg->mod, LEVEL_1, "stack biased %+F with %d\n", irn, bias));
306 snprintf(buf, sizeof(buf), "%d", bias);
308 if (get_ia32_op_type(irn) == ia32_Normal) {
309 set_ia32_cnst(irn, buf);
312 add_ia32_am_offs(irn, buf);
314 set_ia32_am_flavour(irn, am_flav);
319 static int ia32_get_sp_bias(const void *self, const ir_node *irn) {
321 int proj = get_Proj_proj(irn);
322 ir_node *pred = get_Proj_pred(irn);
324 if(is_ia32_Push(pred) && proj == 0)
326 else if(is_ia32_Pop(pred) && proj == 1)
334 be_abi_call_flags_bits_t flags;
335 const arch_isa_t *isa;
336 const arch_env_t *aenv;
340 static void *ia32_abi_init(const be_abi_call_t *call, const arch_env_t *aenv, ir_graph *irg)
342 ia32_abi_env_t *env = xmalloc(sizeof(env[0]));
343 be_abi_call_flags_t fl = be_abi_call_get_flags(call);
344 env->flags = fl.bits;
347 env->isa = aenv->isa;
352 * Put all registers which are saved by the prologue/epilogue in a set.
354 * @param self The callback object.
355 * @param s The result set.
357 static void ia32_abi_dont_save_regs(void *self, pset *s)
359 ia32_abi_env_t *env = self;
360 if(env->flags.try_omit_fp)
361 pset_insert_ptr(s, env->isa->bp);
365 * Generate the routine prologue.
367 * @param self The callback object.
368 * @param mem A pointer to the mem node. Update this if you define new memory.
369 * @param reg_map A map mapping all callee_save/ignore/parameter registers to their defining nodes.
371 * @return The register which shall be used as a stack frame base.
373 * All nodes which define registers in @p reg_map must keep @p reg_map current.
375 static const arch_register_t *ia32_abi_prologue(void *self, ir_node **mem, pmap *reg_map)
377 ia32_abi_env_t *env = self;
379 if (! env->flags.try_omit_fp) {
380 ir_node *bl = get_irg_start_block(env->irg);
381 ir_node *curr_sp = be_abi_reg_map_get(reg_map, env->isa->sp);
382 ir_node *curr_bp = be_abi_reg_map_get(reg_map, env->isa->bp);
386 push = new_rd_ia32_Push(NULL, env->irg, bl, curr_sp, curr_bp, *mem);
387 curr_sp = new_r_Proj(env->irg, bl, push, get_irn_mode(curr_sp), pn_ia32_Push_stack);
388 *mem = new_r_Proj(env->irg, bl, push, mode_M, pn_ia32_Push_M);
390 /* the push must have SP out register */
391 arch_set_irn_register(env->aenv, curr_sp, env->isa->sp);
392 set_ia32_flags(push, arch_irn_flags_ignore);
394 /* move esp to ebp */
395 curr_bp = be_new_Copy(env->isa->bp->reg_class, env->irg, bl, curr_sp);
396 be_set_constr_single_reg(curr_bp, BE_OUT_POS(0), env->isa->bp);
397 arch_set_irn_register(env->aenv, curr_bp, env->isa->bp);
398 be_node_set_flags(curr_bp, BE_OUT_POS(0), arch_irn_flags_ignore);
400 /* beware: the copy must be done before any other sp use */
401 curr_sp = be_new_CopyKeep_single(env->isa->sp->reg_class, env->irg, bl, curr_sp, curr_bp, get_irn_mode(curr_sp));
402 be_set_constr_single_reg(curr_sp, BE_OUT_POS(0), env->isa->sp);
403 arch_set_irn_register(env->aenv, curr_sp, env->isa->sp);
404 be_node_set_flags(curr_sp, BE_OUT_POS(0), arch_irn_flags_ignore);
406 be_abi_reg_map_set(reg_map, env->isa->sp, curr_sp);
407 be_abi_reg_map_set(reg_map, env->isa->bp, curr_bp);
416 * Generate the routine epilogue.
417 * @param self The callback object.
418 * @param bl The block for the epilog
419 * @param mem A pointer to the mem node. Update this if you define new memory.
420 * @param reg_map A map mapping all callee_save/ignore/parameter registers to their defining nodes.
421 * @return The register which shall be used as a stack frame base.
423 * All nodes which define registers in @p reg_map must keep @p reg_map current.
425 static void ia32_abi_epilogue(void *self, ir_node *bl, ir_node **mem, pmap *reg_map)
427 ia32_abi_env_t *env = self;
428 ir_node *curr_sp = be_abi_reg_map_get(reg_map, env->isa->sp);
429 ir_node *curr_bp = be_abi_reg_map_get(reg_map, env->isa->bp);
431 if (env->flags.try_omit_fp) {
432 /* simply remove the stack frame here */
433 curr_sp = be_new_IncSP(env->isa->sp, env->irg, bl, curr_sp, *mem, BE_STACK_FRAME_SIZE_SHRINK);
436 const ia32_isa_t *isa = (ia32_isa_t *)env->isa;
437 ir_mode *mode_bp = env->isa->bp->reg_class->mode;
439 /* gcc always emits a leave at the end of a routine */
440 if (1 || ARCH_AMD(isa->opt_arch)) {
444 leave = new_rd_ia32_Leave(NULL, env->irg, bl, curr_sp, *mem);
445 set_ia32_flags(leave, arch_irn_flags_ignore);
446 curr_bp = new_r_Proj(current_ir_graph, bl, leave, mode_bp, pn_ia32_Leave_frame);
447 curr_sp = new_r_Proj(current_ir_graph, bl, leave, get_irn_mode(curr_sp), pn_ia32_Leave_stack);
448 *mem = new_r_Proj(current_ir_graph, bl, leave, mode_M, pn_ia32_Leave_M);
453 /* copy ebp to esp */
454 curr_sp = be_new_SetSP(env->isa->sp, env->irg, bl, curr_sp, curr_bp, *mem);
457 pop = new_rd_ia32_Pop(NULL, env->irg, bl, curr_sp, *mem);
458 set_ia32_flags(pop, arch_irn_flags_ignore);
459 curr_bp = new_r_Proj(current_ir_graph, bl, pop, mode_bp, pn_ia32_Pop_res);
460 curr_sp = new_r_Proj(current_ir_graph, bl, pop, get_irn_mode(curr_sp), pn_ia32_Pop_stack);
461 *mem = new_r_Proj(current_ir_graph, bl, pop, mode_M, pn_ia32_Pop_M);
463 arch_set_irn_register(env->aenv, curr_sp, env->isa->sp);
464 arch_set_irn_register(env->aenv, curr_bp, env->isa->bp);
467 be_abi_reg_map_set(reg_map, env->isa->sp, curr_sp);
468 be_abi_reg_map_set(reg_map, env->isa->bp, curr_bp);
472 * Produces the type which sits between the stack args and the locals on the stack.
473 * it will contain the return address and space to store the old base pointer.
474 * @return The Firm type modeling the ABI between type.
476 static ir_type *ia32_abi_get_between_type(void *self)
478 #define IDENT(s) new_id_from_chars(s, sizeof(s)-1)
479 static ir_type *omit_fp_between_type = NULL;
480 static ir_type *between_type = NULL;
482 ia32_abi_env_t *env = self;
484 if ( !between_type) {
486 entity *ret_addr_ent;
487 entity *omit_fp_ret_addr_ent;
489 ir_type *old_bp_type = new_type_primitive(IDENT("bp"), mode_P);
490 ir_type *ret_addr_type = new_type_primitive(IDENT("return_addr"), mode_P);
492 between_type = new_type_struct(IDENT("ia32_between_type"));
493 old_bp_ent = new_entity(between_type, IDENT("old_bp"), old_bp_type);
494 ret_addr_ent = new_entity(between_type, IDENT("ret_addr"), ret_addr_type);
496 set_entity_offset_bytes(old_bp_ent, 0);
497 set_entity_offset_bytes(ret_addr_ent, get_type_size_bytes(old_bp_type));
498 set_type_size_bytes(between_type, get_type_size_bytes(old_bp_type) + get_type_size_bytes(ret_addr_type));
499 set_type_state(between_type, layout_fixed);
501 omit_fp_between_type = new_type_struct(IDENT("ia32_between_type_omit_fp"));
502 omit_fp_ret_addr_ent = new_entity(omit_fp_between_type, IDENT("ret_addr"), ret_addr_type);
504 set_entity_offset_bytes(omit_fp_ret_addr_ent, 0);
505 set_type_size_bytes(omit_fp_between_type, get_type_size_bytes(ret_addr_type));
506 set_type_state(omit_fp_between_type, layout_fixed);
509 return env->flags.try_omit_fp ? omit_fp_between_type : between_type;
514 * Get the estimated cycle count for @p irn.
516 * @param self The this pointer.
517 * @param irn The node.
519 * @return The estimated cycle count for this operation
521 static int ia32_get_op_estimated_cost(const void *self, const ir_node *irn)
524 ia32_op_type_t op_tp;
525 const ia32_irn_ops_t *ops = self;
530 assert(is_ia32_irn(irn));
532 cost = get_ia32_latency(irn);
533 op_tp = get_ia32_op_type(irn);
535 if (is_ia32_CopyB(irn)) {
537 if (ARCH_INTEL(ops->cg->arch))
540 else if (is_ia32_CopyB_i(irn)) {
541 int size = get_tarval_long(get_ia32_Immop_tarval(irn));
542 cost = 20 + (int)ceil((4/3) * size);
543 if (ARCH_INTEL(ops->cg->arch))
546 /* in case of address mode operations add additional cycles */
547 else if (op_tp == ia32_AddrModeD || op_tp == ia32_AddrModeS) {
549 In case of stack access add 5 cycles (we assume stack is in cache),
550 other memory operations cost 20 cycles.
552 cost += is_ia32_use_frame(irn) ? 5 : 20;
559 * Returns the inverse operation if @p irn, recalculating the argument at position @p i.
561 * @param irn The original operation
562 * @param i Index of the argument we want the inverse operation to yield
563 * @param inverse struct to be filled with the resulting inverse op
564 * @param obstack The obstack to use for allocation of the returned nodes array
565 * @return The inverse operation or NULL if operation invertible
567 static arch_inverse_t *ia32_get_inverse(const void *self, const ir_node *irn, int i, arch_inverse_t *inverse, struct obstack *obst) {
570 ir_node *block, *noreg, *nomem;
573 /* we cannot invert non-ia32 irns */
574 if (! is_ia32_irn(irn))
577 /* operand must always be a real operand (not base, index or mem) */
578 if (i != 2 && i != 3)
581 /* we don't invert address mode operations */
582 if (get_ia32_op_type(irn) != ia32_Normal)
585 irg = get_irn_irg(irn);
586 block = get_nodes_block(irn);
587 mode = get_ia32_res_mode(irn);
588 noreg = get_irn_n(irn, 0);
589 nomem = new_r_NoMem(irg);
591 /* initialize structure */
592 inverse->nodes = obstack_alloc(obst, 2 * sizeof(inverse->nodes[0]));
596 switch (get_ia32_irn_opcode(irn)) {
598 if (get_ia32_immop_type(irn) == ia32_ImmConst) {
599 /* we have an add with a const here */
600 /* invers == add with negated const */
601 inverse->nodes[0] = new_rd_ia32_Add(NULL, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem);
602 pnc = pn_ia32_Add_res;
604 copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
605 set_ia32_Immop_tarval(inverse->nodes[0], tarval_neg(get_ia32_Immop_tarval(irn)));
606 set_ia32_commutative(inverse->nodes[0]);
608 else if (get_ia32_immop_type(irn) == ia32_ImmSymConst) {
609 /* we have an add with a symconst here */
610 /* invers == sub with const */
611 inverse->nodes[0] = new_rd_ia32_Sub(NULL, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem);
612 pnc = pn_ia32_Sub_res;
614 copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
617 /* normal add: inverse == sub */
618 ir_node *proj = ia32_get_res_proj(irn);
621 inverse->nodes[0] = new_rd_ia32_Sub(NULL, irg, block, noreg, noreg, proj, get_irn_n(irn, i ^ 1), nomem);
622 pnc = pn_ia32_Sub_res;
627 if (get_ia32_immop_type(irn) != ia32_ImmNone) {
628 /* we have a sub with a const/symconst here */
629 /* invers == add with this const */
630 inverse->nodes[0] = new_rd_ia32_Add(NULL, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem);
631 pnc = pn_ia32_Add_res;
632 inverse->costs += (get_ia32_immop_type(irn) == ia32_ImmSymConst) ? 5 : 1;
633 copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
637 ir_node *proj = ia32_get_res_proj(irn);
641 inverse->nodes[0] = new_rd_ia32_Add(NULL, irg, block, noreg, noreg, proj, get_irn_n(irn, 3), nomem);
644 inverse->nodes[0] = new_rd_ia32_Sub(NULL, irg, block, noreg, noreg, get_irn_n(irn, 2), proj, nomem);
646 pnc = pn_ia32_Sub_res;
651 if (get_ia32_immop_type(irn) != ia32_ImmNone) {
652 /* xor with const: inverse = xor */
653 inverse->nodes[0] = new_rd_ia32_Eor(NULL, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem);
654 pnc = pn_ia32_Eor_res;
655 inverse->costs += (get_ia32_immop_type(irn) == ia32_ImmSymConst) ? 5 : 1;
656 copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
660 inverse->nodes[0] = new_rd_ia32_Eor(NULL, irg, block, noreg, noreg, (ir_node *)irn, get_irn_n(irn, i), nomem);
661 pnc = pn_ia32_Eor_res;
666 ir_node *proj = ia32_get_res_proj(irn);
669 inverse->nodes[0] = new_rd_ia32_Not(NULL, irg, block, noreg, noreg, proj, nomem);
670 pnc = pn_ia32_Not_res;
674 case iro_ia32_Minus: {
675 ir_node *proj = ia32_get_res_proj(irn);
678 inverse->nodes[0] = new_rd_ia32_Minus(NULL, irg, block, noreg, noreg, proj, nomem);
679 pnc = pn_ia32_Minus_res;
684 /* inverse operation not supported */
688 set_ia32_res_mode(inverse->nodes[0], mode);
689 inverse->nodes[1] = new_r_Proj(irg, block, inverse->nodes[0], mode, pnc);
695 * Check if irn can load it's operand at position i from memory (source addressmode).
696 * @param self Pointer to irn ops itself
697 * @param irn The irn to be checked
698 * @param i The operands position
699 * @return Non-Zero if operand can be loaded
701 static int ia32_possible_memory_operand(const void *self, const ir_node *irn, unsigned int i) {
702 if (! is_ia32_irn(irn) || /* must be an ia32 irn */
703 get_irn_arity(irn) != 5 || /* must be a binary operation */
704 get_ia32_op_type(irn) != ia32_Normal || /* must not already be a addressmode irn */
705 ! (get_ia32_am_support(irn) & ia32_am_Source) || /* must be capable of source addressmode */
706 (i != 2 && i != 3) || /* a "real" operand position must be requested */
707 (i == 2 && ! is_ia32_commutative(irn)) || /* if first operand requested irn must be commutative */
708 is_ia32_use_frame(irn)) /* must not already use frame */
714 static void ia32_perform_memory_operand(const void *self, ir_node *irn, ir_node *reload, unsigned int i) {
715 assert(ia32_possible_memory_operand(self, irn, i) && "Cannot perform memory operand change");
716 assert(get_nodes_block(reload) == get_nodes_block(irn) && "Reload must be in same block as irn.");
718 if (get_irn_n_edges(reload) > 1)
722 ir_node *tmp = get_irn_n(irn, 3);
723 set_irn_n(irn, 3, get_irn_n(irn, 2));
724 set_irn_n(irn, 2, tmp);
727 set_ia32_am_support(irn, ia32_am_Source);
728 set_ia32_op_type(irn, ia32_AddrModeS);
729 set_ia32_am_flavour(irn, ia32_B);
730 set_ia32_ls_mode(irn, get_irn_mode(reload));
731 set_ia32_frame_ent(irn, be_get_frame_entity(reload));
732 set_ia32_use_frame(irn);
733 set_ia32_got_reload(irn);
735 set_irn_n(irn, 0, be_get_Reload_frame(reload));
736 set_irn_n(irn, 4, be_get_Reload_mem(reload));
739 Input at position one is index register, which is NoReg.
740 We would need cg object to get a real noreg, but we cannot
743 set_irn_n(irn, 3, get_irn_n(irn, 1));
745 DBG_OPT_AM_S(reload, irn);
748 static const be_abi_callbacks_t ia32_abi_callbacks = {
751 ia32_abi_get_between_type,
752 ia32_abi_dont_save_regs,
757 /* fill register allocator interface */
759 static const arch_irn_ops_if_t ia32_irn_ops_if = {
760 ia32_get_irn_reg_req,
765 ia32_get_frame_entity,
766 ia32_set_frame_entity,
767 ia32_set_frame_offset,
770 ia32_get_op_estimated_cost,
771 ia32_possible_memory_operand,
772 ia32_perform_memory_operand,
775 ia32_irn_ops_t ia32_irn_ops = {
782 /**************************************************
785 * ___ ___ __| | ___ __ _ ___ _ __ _| |_
786 * / __/ _ \ / _` |/ _ \/ _` |/ _ \ '_ \ | | _|
787 * | (_| (_) | (_| | __/ (_| | __/ | | | | | |
788 * \___\___/ \__,_|\___|\__, |\___|_| |_| |_|_|
791 **************************************************/
793 static void ia32_kill_convs(ia32_code_gen_t *cg) {
796 /* BEWARE: the Projs are inserted in the set */
797 foreach_nodeset(cg->kill_conv, irn) {
798 ir_node *in = get_irn_n(get_Proj_pred(irn), 2);
799 edges_reroute(irn, in, cg->birg->irg);
804 * Transforms the standard firm graph into
807 static void ia32_prepare_graph(void *self) {
808 ia32_code_gen_t *cg = self;
809 dom_front_info_t *dom;
810 DEBUG_ONLY(firm_dbg_module_t *old_mod = cg->mod;)
812 FIRM_DBG_REGISTER(cg->mod, "firm.be.ia32.transform");
814 /* 1st: transform constants and psi condition trees */
815 ia32_pre_transform_phase(cg);
817 /* 2nd: transform all remaining nodes */
818 ia32_register_transformers();
819 dom = be_compute_dominance_frontiers(cg->irg);
821 cg->kill_conv = new_nodeset(5);
822 irg_walk_blkwise_graph(cg->irg, NULL, ia32_transform_node, cg);
824 del_nodeset(cg->kill_conv);
826 be_free_dominance_frontiers(dom);
829 be_dump(cg->irg, "-transformed", dump_ir_block_graph_sched);
831 /* 3rd: optimize address mode */
832 FIRM_DBG_REGISTER(cg->mod, "firm.be.ia32.am");
833 ia32_optimize_addressmode(cg);
836 be_dump(cg->irg, "-am", dump_ir_block_graph_sched);
838 DEBUG_ONLY(cg->mod = old_mod;)
842 * Dummy functions for hooks we don't need but which must be filled.
844 static void ia32_before_sched(void *self) {
847 static void remove_unused_nodes(ir_node *irn, bitset_t *already_visited) {
855 mode = get_irn_mode(irn);
857 /* check if we already saw this node or the node has more than one user */
858 if (bitset_contains_irn(already_visited, irn) || get_irn_n_edges(irn) > 1)
861 /* mark irn visited */
862 bitset_add_irn(already_visited, irn);
864 /* non-Tuple nodes with one user: ok, return */
865 if (get_irn_n_edges(irn) >= 1 && mode != mode_T)
868 /* tuple node has one user which is not the mem proj-> ok */
869 if (mode == mode_T && get_irn_n_edges(irn) == 1) {
870 mem_proj = ia32_get_proj_for_mode(irn, mode_M);
875 for (i = get_irn_arity(irn) - 1; i >= 0; i--) {
876 ir_node *pred = get_irn_n(irn, i);
878 /* do not follow memory edges or we will accidentally remove stores */
879 if (is_Proj(pred) && get_irn_mode(pred) == mode_M)
882 set_irn_n(irn, i, new_Bad());
885 The current node is about to be removed: if the predecessor
886 has only this node as user, it need to be removed as well.
888 if (get_irn_n_edges(pred) <= 1)
889 remove_unused_nodes(pred, already_visited);
892 if (sched_is_scheduled(irn))
896 static void remove_unused_loads_walker(ir_node *irn, void *env) {
897 bitset_t *already_visited = env;
898 if (is_ia32_Ld(irn) && ! bitset_contains_irn(already_visited, irn))
899 remove_unused_nodes(irn, env);
903 * Called before the register allocator.
904 * Calculate a block schedule here. We need it for the x87
905 * simulator and the emitter.
907 static void ia32_before_ra(void *self) {
908 ia32_code_gen_t *cg = self;
909 bitset_t *already_visited = bitset_irg_malloc(cg->irg);
911 cg->blk_sched = sched_create_block_schedule(cg->irg);
915 There are sometimes unused loads, only pinned by memory.
916 We need to remove those Loads and all other nodes which won't be used
917 after removing the Load from schedule.
919 irg_walk_graph(cg->irg, remove_unused_loads_walker, NULL, already_visited);
920 bitset_free(already_visited);
925 * Transforms a be node into a Load.
927 static void transform_to_Load(ia32_transform_env_t *env) {
928 ir_node *irn = env->irn;
929 entity *ent = be_get_frame_entity(irn);
930 ir_mode *mode = env->mode;
931 ir_node *noreg = ia32_new_NoReg_gp(env->cg);
932 ir_node *nomem = new_rd_NoMem(env->irg);
933 ir_node *sched_point = NULL;
934 ir_node *ptr = get_irn_n(irn, 0);
935 ir_node *mem = be_is_Reload(irn) ? get_irn_n(irn, 1) : nomem;
936 ir_node *new_op, *proj;
937 const arch_register_t *reg;
939 if (sched_is_scheduled(irn)) {
940 sched_point = sched_prev(irn);
943 if (mode_is_float(mode)) {
944 if (USE_SSE2(env->cg))
945 new_op = new_rd_ia32_xLoad(env->dbg, env->irg, env->block, ptr, noreg, mem);
947 new_op = new_rd_ia32_vfld(env->dbg, env->irg, env->block, ptr, noreg, mem);
950 new_op = new_rd_ia32_Load(env->dbg, env->irg, env->block, ptr, noreg, mem);
953 set_ia32_am_support(new_op, ia32_am_Source);
954 set_ia32_op_type(new_op, ia32_AddrModeS);
955 set_ia32_am_flavour(new_op, ia32_B);
956 set_ia32_ls_mode(new_op, mode);
957 set_ia32_frame_ent(new_op, ent);
958 set_ia32_use_frame(new_op);
960 DBG_OPT_RELOAD2LD(irn, new_op);
962 proj = new_rd_Proj(env->dbg, env->irg, env->block, new_op, mode, pn_Load_res);
965 sched_add_after(sched_point, new_op);
966 sched_add_after(new_op, proj);
971 /* copy the register from the old node to the new Load */
972 reg = arch_get_irn_register(env->cg->arch_env, irn);
973 arch_set_irn_register(env->cg->arch_env, new_op, reg);
975 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, irn));
981 * Transforms a be node into a Store.
983 static void transform_to_Store(ia32_transform_env_t *env) {
984 ir_node *irn = env->irn;
985 entity *ent = be_get_frame_entity(irn);
986 ir_mode *mode = env->mode;
987 ir_node *noreg = ia32_new_NoReg_gp(env->cg);
988 ir_node *nomem = new_rd_NoMem(env->irg);
989 ir_node *ptr = get_irn_n(irn, 0);
990 ir_node *val = get_irn_n(irn, 1);
991 ir_node *new_op, *proj;
992 ir_node *sched_point = NULL;
994 if (sched_is_scheduled(irn)) {
995 sched_point = sched_prev(irn);
998 if (mode_is_float(mode)) {
999 if (USE_SSE2(env->cg))
1000 new_op = new_rd_ia32_xStore(env->dbg, env->irg, env->block, ptr, noreg, val, nomem);
1002 new_op = new_rd_ia32_vfst(env->dbg, env->irg, env->block, ptr, noreg, val, nomem);
1004 else if (get_mode_size_bits(mode) == 8) {
1005 new_op = new_rd_ia32_Store8Bit(env->dbg, env->irg, env->block, ptr, noreg, val, nomem);
1008 new_op = new_rd_ia32_Store(env->dbg, env->irg, env->block, ptr, noreg, val, nomem);
1011 set_ia32_am_support(new_op, ia32_am_Dest);
1012 set_ia32_op_type(new_op, ia32_AddrModeD);
1013 set_ia32_am_flavour(new_op, ia32_B);
1014 set_ia32_ls_mode(new_op, mode);
1015 set_ia32_frame_ent(new_op, ent);
1016 set_ia32_use_frame(new_op);
1018 DBG_OPT_SPILL2ST(irn, new_op);
1020 proj = new_rd_Proj(env->dbg, env->irg, env->block, new_op, mode_M, pn_ia32_Store_M);
1023 sched_add_after(sched_point, new_op);
1027 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, irn));
1029 exchange(irn, proj);
1032 static ir_node *create_push(ia32_transform_env_t *env, ir_node *schedpoint, ir_node *sp, ir_node *mem, entity *ent, const char *offset) {
1033 ir_node *noreg = ia32_new_NoReg_gp(env->cg);
1035 ir_node *push = new_rd_ia32_Push(env->dbg, env->irg, env->block, sp, noreg, mem);
1037 set_ia32_frame_ent(push, ent);
1038 set_ia32_use_frame(push);
1039 set_ia32_op_type(push, ia32_AddrModeS);
1040 set_ia32_am_flavour(push, ia32_B);
1041 set_ia32_ls_mode(push, mode_Is);
1043 add_ia32_am_offs(push, offset);
1045 sched_add_before(schedpoint, push);
1049 static ir_node *create_pop(ia32_transform_env_t *env, ir_node *schedpoint, ir_node *sp, entity *ent, const char *offset) {
1050 ir_node *pop = new_rd_ia32_Pop(env->dbg, env->irg, env->block, sp, new_NoMem());
1052 set_ia32_frame_ent(pop, ent);
1053 set_ia32_use_frame(pop);
1054 set_ia32_op_type(pop, ia32_AddrModeD);
1055 set_ia32_am_flavour(pop, ia32_B);
1056 set_ia32_ls_mode(pop, mode_Is);
1058 add_ia32_am_offs(pop, offset);
1060 sched_add_before(schedpoint, pop);
1065 static ir_node* create_spproj(ia32_transform_env_t *env, ir_node *pred, int pos, ir_node *schedpoint, const ir_node *oldsp) {
1066 ir_mode *spmode = get_irn_mode(oldsp);
1067 const arch_register_t *spreg = arch_get_irn_register(env->cg->arch_env, oldsp);
1070 sp = new_rd_Proj(env->dbg, env->irg, env->block, pred, spmode, pos);
1071 arch_set_irn_register(env->cg->arch_env, sp, spreg);
1072 sched_add_before(schedpoint, sp);
1077 static void transform_MemPerm(ia32_transform_env_t *env) {
1079 * Transform memperm, currently we do this the ugly way and produce
1080 * push/pop into/from memory cascades. This is possible without using
1083 ir_node *node = env->irn;
1085 ir_node *sp = get_irn_n(node, 0);
1086 const ir_edge_t *edge;
1087 const ir_edge_t *next;
1090 arity = be_get_MemPerm_entity_arity(node);
1091 pops = alloca(arity * sizeof(pops[0]));
1094 for(i = 0; i < arity; ++i) {
1095 entity *ent = be_get_MemPerm_in_entity(node, i);
1096 ir_type *enttype = get_entity_type(ent);
1097 int entbits = get_type_size_bits(enttype);
1098 ir_node *mem = get_irn_n(node, i + 1);
1101 assert( (entbits == 32 || entbits == 64) && "spillslot on x86 should be 32 or 64 bit");
1103 push = create_push(env, node, sp, mem, ent, NULL);
1104 sp = create_spproj(env, push, 0, node, sp);
1106 // add another push after the first one
1107 push = create_push(env, node, sp, mem, ent, "4");
1108 sp = create_spproj(env, push, 0, node, sp);
1111 set_irn_n(node, i, new_Bad());
1115 for(i = arity - 1; i >= 0; --i) {
1116 entity *ent = be_get_MemPerm_out_entity(node, i);
1117 ir_type *enttype = get_entity_type(ent);
1118 int entbits = get_type_size_bits(enttype);
1122 assert( (entbits == 32 || entbits == 64) && "spillslot on x86 should be 32 or 64 bit");
1124 pop = create_pop(env, node, sp, ent, NULL);
1126 // add another pop after the first one
1127 sp = create_spproj(env, pop, 1, node, sp);
1128 pop = create_pop(env, node, sp, ent, "4");
1131 sp = create_spproj(env, pop, 1, node, sp);
1137 // exchange memprojs
1138 foreach_out_edge_safe(node, edge, next) {
1139 ir_node *proj = get_edge_src_irn(edge);
1140 int p = get_Proj_proj(proj);
1144 set_Proj_pred(proj, pops[p]);
1145 set_Proj_proj(proj, 3);
1152 * Fix the mode of Spill/Reload
1154 static ir_mode *fix_spill_mode(ia32_code_gen_t *cg, ir_mode *mode)
1156 if (mode_is_float(mode)) {
1168 * Block-Walker: Calls the transform functions Spill and Reload.
1170 static void ia32_after_ra_walker(ir_node *block, void *env) {
1171 ir_node *node, *prev;
1172 ia32_code_gen_t *cg = env;
1173 ia32_transform_env_t tenv;
1176 tenv.irg = current_ir_graph;
1178 DEBUG_ONLY(tenv.mod = cg->mod;)
1180 /* beware: the schedule is changed here */
1181 for (node = sched_last(block); !sched_is_begin(node); node = prev) {
1182 prev = sched_prev(node);
1183 if (be_is_Reload(node)) {
1184 /* we always reload the whole register */
1185 tenv.dbg = get_irn_dbg_info(node);
1187 tenv.mode = fix_spill_mode(cg, get_irn_mode(node));
1188 transform_to_Load(&tenv);
1190 else if (be_is_Spill(node)) {
1191 ir_node *spillval = get_irn_n(node, be_pos_Spill_val);
1192 /* we always spill the whole register */
1193 tenv.dbg = get_irn_dbg_info(node);
1195 tenv.mode = fix_spill_mode(cg, get_irn_mode(spillval));
1196 transform_to_Store(&tenv);
1198 else if(be_is_MemPerm(node)) {
1199 tenv.dbg = get_irn_dbg_info(node);
1201 transform_MemPerm(&tenv);
1207 * We transform Spill and Reload here. This needs to be done before
1208 * stack biasing otherwise we would miss the corrected offset for these nodes.
1210 * If x87 instruction should be emitted, run the x87 simulator and patch
1211 * the virtual instructions. This must obviously be done after register allocation.
1213 static void ia32_after_ra(void *self) {
1214 ia32_code_gen_t *cg = self;
1216 irg_block_walk_graph(cg->irg, NULL, ia32_after_ra_walker, self);
1218 /* if we do x87 code generation, rewrite all the virtual instructions and registers */
1219 if (cg->used_fp == fp_x87 || cg->force_sim) {
1220 x87_simulate_graph(cg->arch_env, cg->irg, cg->blk_sched);
1225 * Last touchups for the graph before emit
1227 static void ia32_finish(void *self) {
1228 ia32_code_gen_t *cg = self;
1229 ir_graph *irg = cg->irg;
1231 ia32_finish_irg(irg, cg);
1235 * Emits the code, closes the output file and frees
1236 * the code generator interface.
1238 static void ia32_codegen(void *self) {
1239 ia32_code_gen_t *cg = self;
1240 ir_graph *irg = cg->irg;
1242 ia32_gen_routine(cg->isa->out, irg, cg);
1246 /* remove it from the isa */
1249 /* de-allocate code generator */
1250 del_set(cg->reg_set);
1254 static void *ia32_cg_init(const be_irg_t *birg);
1256 static const arch_code_generator_if_t ia32_code_gen_if = {
1258 NULL, /* before abi introduce hook */
1260 ia32_before_sched, /* before scheduling hook */
1261 ia32_before_ra, /* before register allocation hook */
1262 ia32_after_ra, /* after register allocation hook */
1263 ia32_finish, /* called before codegen */
1264 ia32_codegen /* emit && done */
1268 * Initializes a IA32 code generator.
1270 static void *ia32_cg_init(const be_irg_t *birg) {
1271 ia32_isa_t *isa = (ia32_isa_t *)birg->main_env->arch_env->isa;
1272 ia32_code_gen_t *cg = xcalloc(1, sizeof(*cg));
1274 cg->impl = &ia32_code_gen_if;
1275 cg->irg = birg->irg;
1276 cg->reg_set = new_set(ia32_cmp_irn_reg_assoc, 1024);
1277 cg->arch_env = birg->main_env->arch_env;
1280 cg->blk_sched = NULL;
1281 cg->fp_to_gp = NULL;
1282 cg->gp_to_fp = NULL;
1283 cg->fp_kind = isa->fp_kind;
1284 cg->used_fp = fp_none;
1285 cg->dump = (birg->main_env->options->dump_flags & DUMP_BE) ? 1 : 0;
1287 FIRM_DBG_REGISTER(cg->mod, "firm.be.ia32.cg");
1289 /* copy optimizations from isa for easier access */
1291 cg->arch = isa->arch;
1292 cg->opt_arch = isa->opt_arch;
1298 if (isa->name_obst_size) {
1299 //printf("freed %d bytes from name obst\n", isa->name_obst_size);
1300 isa->name_obst_size = 0;
1301 obstack_free(isa->name_obst, NULL);
1302 obstack_init(isa->name_obst);
1306 cur_reg_set = cg->reg_set;
1308 ia32_irn_ops.cg = cg;
1310 return (arch_code_generator_t *)cg;
1315 /*****************************************************************
1316 * ____ _ _ _____ _____
1317 * | _ \ | | | | |_ _|/ ____| /\
1318 * | |_) | __ _ ___| | _____ _ __ __| | | | | (___ / \
1319 * | _ < / _` |/ __| |/ / _ \ '_ \ / _` | | | \___ \ / /\ \
1320 * | |_) | (_| | (__| < __/ | | | (_| | _| |_ ____) / ____ \
1321 * |____/ \__,_|\___|_|\_\___|_| |_|\__,_| |_____|_____/_/ \_\
1323 *****************************************************************/
1326 * Set output modes for GCC
1328 static const tarval_mode_info mo_integer = {
1335 * set the tarval output mode to C-semantics
1337 static void set_tarval_output_modes(void)
1339 set_tarval_mode_output_option(get_modeLs(), &mo_integer);
1340 set_tarval_mode_output_option(get_modeLu(), &mo_integer);
1341 set_tarval_mode_output_option(get_modeIs(), &mo_integer);
1342 set_tarval_mode_output_option(get_modeIu(), &mo_integer);
1343 set_tarval_mode_output_option(get_modeHs(), &mo_integer);
1344 set_tarval_mode_output_option(get_modeHu(), &mo_integer);
1345 set_tarval_mode_output_option(get_modeBs(), &mo_integer);
1346 set_tarval_mode_output_option(get_modeBu(), &mo_integer);
1347 set_tarval_mode_output_option(get_modeC(), &mo_integer);
1348 set_tarval_mode_output_option(get_modeU(), &mo_integer);
1349 set_tarval_mode_output_option(get_modeIu(), &mo_integer);
1354 * The template that generates a new ISA object.
1355 * Note that this template can be changed by command line
1358 static ia32_isa_t ia32_isa_template = {
1360 &ia32_isa_if, /* isa interface implementation */
1361 &ia32_gp_regs[REG_ESP], /* stack pointer register */
1362 &ia32_gp_regs[REG_EBP], /* base pointer register */
1363 -1, /* stack direction */
1365 NULL, /* 16bit register names */
1366 NULL, /* 8bit register names */
1370 IA32_OPT_INCDEC | /* optimize add 1, sub 1 into inc/dec default: on */
1371 IA32_OPT_DOAM | /* optimize address mode default: on */
1372 IA32_OPT_LEA | /* optimize for LEAs default: on */
1373 IA32_OPT_PLACECNST | /* place constants immediately before instructions, default: on */
1374 IA32_OPT_IMMOPS | /* operations can use immediates, default: on */
1375 IA32_OPT_EXTBB), /* use extended basic block scheduling, default: on */
1376 arch_pentium_4, /* instruction architecture */
1377 arch_pentium_4, /* optimize for architecture */
1378 fp_sse2, /* use sse2 unit */
1379 NULL, /* current code generator */
1381 NULL, /* name obstack */
1382 0 /* name obst size */
1387 * Initializes the backend ISA.
1389 static void *ia32_init(FILE *file_handle) {
1390 static int inited = 0;
1396 set_tarval_output_modes();
1398 isa = xmalloc(sizeof(*isa));
1399 memcpy(isa, &ia32_isa_template, sizeof(*isa));
1401 ia32_register_init(isa);
1402 ia32_create_opcodes();
1404 if ((ARCH_INTEL(isa->arch) && isa->arch < arch_pentium_4) ||
1405 (ARCH_AMD(isa->arch) && isa->arch < arch_athlon))
1406 /* no SSE2 for these cpu's */
1407 isa->fp_kind = fp_x87;
1409 if (ARCH_INTEL(isa->opt_arch) && isa->opt_arch >= arch_pentium_4) {
1410 /* Pentium 4 don't like inc and dec instructions */
1411 isa->opt &= ~IA32_OPT_INCDEC;
1414 isa->regs_16bit = pmap_create();
1415 isa->regs_8bit = pmap_create();
1416 isa->types = pmap_create();
1417 isa->tv_ent = pmap_create();
1418 isa->out = file_handle;
1420 ia32_build_16bit_reg_map(isa->regs_16bit);
1421 ia32_build_8bit_reg_map(isa->regs_8bit);
1423 /* patch register names of x87 registers */
1425 ia32_st_regs[0].name = "st";
1426 ia32_st_regs[1].name = "st(1)";
1427 ia32_st_regs[2].name = "st(2)";
1428 ia32_st_regs[3].name = "st(3)";
1429 ia32_st_regs[4].name = "st(4)";
1430 ia32_st_regs[5].name = "st(5)";
1431 ia32_st_regs[6].name = "st(6)";
1432 ia32_st_regs[7].name = "st(7)";
1436 isa->name_obst = xmalloc(sizeof(*isa->name_obst));
1437 obstack_init(isa->name_obst);
1438 isa->name_obst_size = 0;
1441 ia32_handle_intrinsics();
1442 ia32_switch_section(NULL, NO_SECTION);
1443 fprintf(isa->out, "\t.intel_syntax\n");
1453 * Closes the output file and frees the ISA structure.
1455 static void ia32_done(void *self) {
1456 ia32_isa_t *isa = self;
1458 /* emit now all global declarations */
1459 ia32_gen_decls(isa->out);
1461 pmap_destroy(isa->regs_16bit);
1462 pmap_destroy(isa->regs_8bit);
1463 pmap_destroy(isa->tv_ent);
1464 pmap_destroy(isa->types);
1467 //printf("name obst size = %d bytes\n", isa->name_obst_size);
1468 obstack_free(isa->name_obst, NULL);
1476 * Return the number of register classes for this architecture.
1477 * We report always these:
1478 * - the general purpose registers
1479 * - the SSE floating point register set
1480 * - the virtual floating point registers
1482 static int ia32_get_n_reg_class(const void *self) {
1487 * Return the register class for index i.
1489 static const arch_register_class_t *ia32_get_reg_class(const void *self, int i) {
1490 assert(i >= 0 && i < 3 && "Invalid ia32 register class requested.");
1492 return &ia32_reg_classes[CLASS_ia32_gp];
1494 return &ia32_reg_classes[CLASS_ia32_xmm];
1496 return &ia32_reg_classes[CLASS_ia32_vfp];
1500 * Get the register class which shall be used to store a value of a given mode.
1501 * @param self The this pointer.
1502 * @param mode The mode in question.
1503 * @return A register class which can hold values of the given mode.
1505 const arch_register_class_t *ia32_get_reg_class_for_mode(const void *self, const ir_mode *mode) {
1506 const ia32_isa_t *isa = self;
1507 if (mode_is_float(mode)) {
1508 return USE_SSE2(isa) ? &ia32_reg_classes[CLASS_ia32_xmm] : &ia32_reg_classes[CLASS_ia32_vfp];
1511 return &ia32_reg_classes[CLASS_ia32_gp];
1515 * Get the ABI restrictions for procedure calls.
1516 * @param self The this pointer.
1517 * @param method_type The type of the method (procedure) in question.
1518 * @param abi The abi object to be modified
1520 static void ia32_get_call_abi(const void *self, ir_type *method_type, be_abi_call_t *abi) {
1521 const ia32_isa_t *isa = self;
1524 unsigned cc = get_method_calling_convention(method_type);
1525 int n = get_method_n_params(method_type);
1528 int i, ignore_1, ignore_2;
1530 const arch_register_t *reg;
1531 be_abi_call_flags_t call_flags = be_abi_call_get_flags(abi);
1533 unsigned use_push = !IS_P6_ARCH(isa->opt_arch);
1535 /* set abi flags for calls */
1536 call_flags.bits.left_to_right = 0; /* always last arg first on stack */
1537 call_flags.bits.store_args_sequential = use_push;
1538 /* call_flags.bits.try_omit_fp not changed: can handle both settings */
1539 call_flags.bits.fp_free = 0; /* the frame pointer is fixed in IA32 */
1540 call_flags.bits.call_has_imm = 1; /* IA32 calls can have immediate address */
1542 /* set stack parameter passing style */
1543 be_abi_call_set_flags(abi, call_flags, &ia32_abi_callbacks);
1545 /* collect the mode for each type */
1546 modes = alloca(n * sizeof(modes[0]));
1548 for (i = 0; i < n; i++) {
1549 tp = get_method_param_type(method_type, i);
1550 modes[i] = get_type_mode(tp);
1553 /* set register parameters */
1554 if (cc & cc_reg_param) {
1555 /* determine the number of parameters passed via registers */
1556 biggest_n = ia32_get_n_regparam_class(n, modes, &ignore_1, &ignore_2);
1558 /* loop over all parameters and set the register requirements */
1559 for (i = 0; i <= biggest_n; i++) {
1560 reg = ia32_get_RegParam_reg(n, modes, i, cc);
1561 assert(reg && "kaputt");
1562 be_abi_call_param_reg(abi, i, reg);
1569 /* set stack parameters */
1570 for (i = stack_idx; i < n; i++) {
1571 be_abi_call_param_stack(abi, i, 1, 0, 0);
1575 /* set return registers */
1576 n = get_method_n_ress(method_type);
1578 assert(n <= 2 && "more than two results not supported");
1580 /* In case of 64bit returns, we will have two 32bit values */
1582 tp = get_method_res_type(method_type, 0);
1583 mode = get_type_mode(tp);
1585 assert(!mode_is_float(mode) && "two FP results not supported");
1587 tp = get_method_res_type(method_type, 1);
1588 mode = get_type_mode(tp);
1590 assert(!mode_is_float(mode) && "two FP results not supported");
1592 be_abi_call_res_reg(abi, 0, &ia32_gp_regs[REG_EAX]);
1593 be_abi_call_res_reg(abi, 1, &ia32_gp_regs[REG_EDX]);
1596 const arch_register_t *reg;
1598 tp = get_method_res_type(method_type, 0);
1599 assert(is_atomic_type(tp));
1600 mode = get_type_mode(tp);
1602 reg = mode_is_float(mode) ?
1603 (USE_SSE2(isa) ? &ia32_xmm_regs[REG_XMM0] : &ia32_vfp_regs[REG_VF0]) :
1604 &ia32_gp_regs[REG_EAX];
1606 be_abi_call_res_reg(abi, 0, reg);
1611 static const void *ia32_get_irn_ops(const arch_irn_handler_t *self, const ir_node *irn) {
1612 return &ia32_irn_ops;
1615 const arch_irn_handler_t ia32_irn_handler = {
1619 const arch_irn_handler_t *ia32_get_irn_handler(const void *self) {
1620 return &ia32_irn_handler;
1623 int ia32_to_appear_in_schedule(void *block_env, const ir_node *irn) {
1624 return is_ia32_irn(irn) ? 1 : -1;
1628 * Initializes the code generator interface.
1630 static const arch_code_generator_if_t *ia32_get_code_generator_if(void *self) {
1631 return &ia32_code_gen_if;
1635 * Returns the estimated execution time of an ia32 irn.
1637 static sched_timestep_t ia32_sched_exectime(void *env, const ir_node *irn) {
1638 const arch_env_t *arch_env = env;
1639 return is_ia32_irn(irn) ? ia32_get_op_estimated_cost(arch_get_irn_ops(arch_env, irn), irn) : 1;
1642 list_sched_selector_t ia32_sched_selector;
1645 * Returns the reg_pressure scheduler with to_appear_in_schedule() overloaded
1647 static const list_sched_selector_t *ia32_get_list_sched_selector(const void *self) {
1648 // memcpy(&ia32_sched_selector, reg_pressure_selector, sizeof(list_sched_selector_t));
1649 memcpy(&ia32_sched_selector, trivial_selector, sizeof(list_sched_selector_t));
1650 ia32_sched_selector.exectime = ia32_sched_exectime;
1651 ia32_sched_selector.to_appear_in_schedule = ia32_to_appear_in_schedule;
1652 return &ia32_sched_selector;
1656 * Returns the necessary byte alignment for storing a register of given class.
1658 static int ia32_get_reg_class_alignment(const void *self, const arch_register_class_t *cls) {
1659 ir_mode *mode = arch_register_class_mode(cls);
1660 int bytes = get_mode_size_bytes(mode);
1662 if (mode_is_float(mode) && bytes > 8)
1667 static ia32_intrinsic_env_t intrinsic_env = { NULL, NULL };
1670 * Returns the libFirm configuration parameter for this backend.
1672 static const backend_params *ia32_get_libfirm_params(void) {
1673 static const arch_dep_params_t ad = {
1674 1, /* also use subs */
1675 4, /* maximum shifts */
1676 31, /* maximum shift amount */
1678 1, /* allow Mulhs */
1679 1, /* allow Mulus */
1680 32 /* Mulh allowed up to 32 bit */
1682 static backend_params p = {
1683 NULL, /* no additional opcodes */
1684 NULL, /* will be set later */
1685 1, /* need dword lowering */
1686 ia32_create_intrinsic_fkt,
1687 &intrinsic_env, /* context for ia32_create_intrinsic_fkt */
1695 /* instruction set architectures. */
1696 static const lc_opt_enum_int_items_t arch_items[] = {
1697 { "386", arch_i386, },
1698 { "486", arch_i486, },
1699 { "pentium", arch_pentium, },
1700 { "586", arch_pentium, },
1701 { "pentiumpro", arch_pentium_pro, },
1702 { "686", arch_pentium_pro, },
1703 { "pentiummmx", arch_pentium_mmx, },
1704 { "pentium2", arch_pentium_2, },
1705 { "p2", arch_pentium_2, },
1706 { "pentium3", arch_pentium_3, },
1707 { "p3", arch_pentium_3, },
1708 { "pentium4", arch_pentium_4, },
1709 { "p4", arch_pentium_4, },
1710 { "pentiumm", arch_pentium_m, },
1711 { "pm", arch_pentium_m, },
1712 { "core", arch_core, },
1714 { "athlon", arch_athlon, },
1715 { "athlon64", arch_athlon_64, },
1716 { "opteron", arch_opteron, },
1720 static lc_opt_enum_int_var_t arch_var = {
1721 &ia32_isa_template.arch, arch_items
1724 static lc_opt_enum_int_var_t opt_arch_var = {
1725 &ia32_isa_template.opt_arch, arch_items
1728 static const lc_opt_enum_int_items_t fp_unit_items[] = {
1730 { "sse2", fp_sse2 },
1734 static lc_opt_enum_int_var_t fp_unit_var = {
1735 &ia32_isa_template.fp_kind, fp_unit_items
1738 static const lc_opt_enum_int_items_t gas_items[] = {
1739 { "linux", ASM_LINUX_GAS },
1740 { "mingw", ASM_MINGW_GAS },
1744 static lc_opt_enum_int_var_t gas_var = {
1745 (int *)&asm_flavour, gas_items
1748 static const lc_opt_table_entry_t ia32_options[] = {
1749 LC_OPT_ENT_ENUM_INT("arch", "select the instruction architecture", &arch_var),
1750 LC_OPT_ENT_ENUM_INT("opt", "optimize for instruction architecture", &opt_arch_var),
1751 LC_OPT_ENT_ENUM_INT("fpunit", "select the floating point unit", &fp_unit_var),
1752 LC_OPT_ENT_NEGBIT("noaddrmode", "do not use address mode", &ia32_isa_template.opt, IA32_OPT_DOAM),
1753 LC_OPT_ENT_NEGBIT("nolea", "do not optimize for LEAs", &ia32_isa_template.opt, IA32_OPT_LEA),
1754 LC_OPT_ENT_NEGBIT("noplacecnst", "do not place constants", &ia32_isa_template.opt, IA32_OPT_PLACECNST),
1755 LC_OPT_ENT_NEGBIT("noimmop", "no operations with immediates", &ia32_isa_template.opt, IA32_OPT_IMMOPS),
1756 LC_OPT_ENT_NEGBIT("noextbb", "do not use extended basic block scheduling", &ia32_isa_template.opt, IA32_OPT_EXTBB),
1757 LC_OPT_ENT_ENUM_INT("gasmode", "set the GAS compatibility mode", &gas_var),
1762 * Register command line options for the ia32 backend.
1766 * ia32-arch=arch create instruction for arch
1767 * ia32-opt=arch optimize for run on arch
1768 * ia32-fpunit=unit select floating point unit (x87 or SSE2)
1769 * ia32-incdec optimize for inc/dec
1770 * ia32-noaddrmode do not use address mode
1771 * ia32-nolea do not optimize for LEAs
1772 * ia32-noplacecnst do not place constants,
1773 * ia32-noimmop no operations with immediates
1774 * ia32-noextbb do not use extended basic block scheduling
1775 * ia32-gasmode set the GAS compatibility mode
1777 static void ia32_register_options(lc_opt_entry_t *ent)
1779 lc_opt_entry_t *be_grp_ia32 = lc_opt_get_grp(ent, "ia32");
1780 lc_opt_add_table(be_grp_ia32, ia32_options);
1782 #endif /* WITH_LIBCORE */
1784 const arch_isa_if_t ia32_isa_if = {
1787 ia32_get_n_reg_class,
1789 ia32_get_reg_class_for_mode,
1791 ia32_get_irn_handler,
1792 ia32_get_code_generator_if,
1793 ia32_get_list_sched_selector,
1794 ia32_get_reg_class_alignment,
1795 ia32_get_libfirm_params,
1797 ia32_register_options