2 * This is the main ia32 firm backend driver.
3 * @author Christian Wuerdig
20 #include <libcore/lc_opts.h>
21 #include <libcore/lc_opts_enum.h>
22 #endif /* WITH_LIBCORE */
26 #include "pseudo_irg.h"
30 #include "iredges_t.h"
38 #include "../beabi.h" /* the general register allocator interface */
39 #include "../benode_t.h"
40 #include "../belower.h"
41 #include "../besched_t.h"
44 #include "bearch_ia32_t.h"
46 #include "ia32_new_nodes.h" /* ia32 nodes interface */
47 #include "gen_ia32_regalloc_if.h" /* the generated interface (register type and class defenitions) */
48 #include "ia32_gen_decls.h" /* interface declaration emitter */
49 #include "ia32_transform.h"
50 #include "ia32_emitter.h"
51 #include "ia32_map_regs.h"
52 #include "ia32_optimize.h"
54 #include "ia32_dbg_stat.h"
55 #include "ia32_finish.h"
56 #include "ia32_util.h"
58 #define DEBUG_MODULE "firm.be.ia32.isa"
61 static set *cur_reg_set = NULL;
64 #define is_Start(irn) (get_irn_opcode(irn) == iro_Start)
66 /* Creates the unique per irg GP NoReg node. */
67 ir_node *ia32_new_NoReg_gp(ia32_code_gen_t *cg) {
68 return be_abi_get_callee_save_irn(cg->birg->abi, &ia32_gp_regs[REG_GP_NOREG]);
71 /* Creates the unique per irg FP NoReg node. */
72 ir_node *ia32_new_NoReg_fp(ia32_code_gen_t *cg) {
73 return be_abi_get_callee_save_irn(cg->birg->abi,
74 USE_SSE2(cg) ? &ia32_xmm_regs[REG_XMM_NOREG] : &ia32_vfp_regs[REG_VFP_NOREG]);
77 /**************************************************
80 * _ __ ___ __ _ __ _| | | ___ ___ _| |_
81 * | '__/ _ \/ _` | / _` | | |/ _ \ / __| | | _|
82 * | | | __/ (_| | | (_| | | | (_) | (__ | | |
83 * |_| \___|\__, | \__,_|_|_|\___/ \___| |_|_|
86 **************************************************/
88 static ir_node *my_skip_proj(const ir_node *n) {
96 * Return register requirements for an ia32 node.
97 * If the node returns a tuple (mode_T) then the proj's
98 * will be asked for this information.
100 static const arch_register_req_t *ia32_get_irn_reg_req(const void *self, arch_register_req_t *req, const ir_node *irn, int pos) {
101 const ia32_irn_ops_t *ops = self;
102 const ia32_register_req_t *irn_req;
103 long node_pos = pos == -1 ? 0 : pos;
104 ir_mode *mode = is_Block(irn) ? NULL : get_irn_mode(irn);
105 FIRM_DBG_REGISTER(firm_dbg_module_t *mod, DEBUG_MODULE);
107 if (is_Block(irn) || mode == mode_M || mode == mode_X) {
108 DBG((mod, LEVEL_1, "ignoring Block, mode_M, mode_X node %+F\n", irn));
112 if (mode == mode_T && pos < 0) {
113 DBG((mod, LEVEL_1, "ignoring request OUT requirements for node %+F\n", irn));
117 DBG((mod, LEVEL_1, "get requirements at pos %d for %+F ... ", pos, irn));
121 node_pos = ia32_translate_proj_pos(irn);
127 irn = my_skip_proj(irn);
129 DB((mod, LEVEL_1, "skipping Proj, going to %+F at pos %d ... ", irn, node_pos));
132 if (is_ia32_irn(irn)) {
134 irn_req = get_ia32_in_req(irn, pos);
137 irn_req = get_ia32_out_req(irn, node_pos);
140 DB((mod, LEVEL_1, "returning reqs for %+F at pos %d\n", irn, pos));
142 memcpy(req, &(irn_req->req), sizeof(*req));
144 if (arch_register_req_is(&(irn_req->req), should_be_same)) {
145 assert(irn_req->same_pos >= 0 && "should be same constraint for in -> out NYI");
146 req->other_same = get_irn_n(irn, irn_req->same_pos);
149 if (arch_register_req_is(&(irn_req->req), should_be_different)) {
150 assert(irn_req->different_pos >= 0 && "should be different constraint for in -> out NYI");
151 req->other_different = get_irn_n(irn, irn_req->different_pos);
155 /* treat Unknowns like Const with default requirements */
156 if (is_Unknown(irn)) {
157 DB((mod, LEVEL_1, "returning UKNWN reqs for %+F\n", irn));
158 if (mode_is_float(mode)) {
159 if (USE_SSE2(ops->cg))
160 memcpy(req, &(ia32_default_req_ia32_xmm_xmm_UKNWN), sizeof(*req));
162 memcpy(req, &(ia32_default_req_ia32_vfp_vfp_UKNWN), sizeof(*req));
164 else if (mode_is_int(mode) || mode_is_reference(mode))
165 memcpy(req, &(ia32_default_req_ia32_gp_gp_UKNWN), sizeof(*req));
166 else if (mode == mode_T || mode == mode_M) {
167 DBG((mod, LEVEL_1, "ignoring Unknown node %+F\n", irn));
171 assert(0 && "unsupported Unknown-Mode");
174 DB((mod, LEVEL_1, "returning NULL for %+F (not ia32)\n", irn));
182 static void ia32_set_irn_reg(const void *self, ir_node *irn, const arch_register_t *reg) {
184 const ia32_irn_ops_t *ops = self;
186 if (get_irn_mode(irn) == mode_X) {
190 DBG((ops->cg->mod, LEVEL_1, "ia32 assigned register %s to node %+F\n", reg->name, irn));
193 pos = ia32_translate_proj_pos(irn);
194 irn = my_skip_proj(irn);
197 if (is_ia32_irn(irn)) {
198 const arch_register_t **slots;
200 slots = get_ia32_slots(irn);
204 ia32_set_firm_reg(irn, reg, cur_reg_set);
208 static const arch_register_t *ia32_get_irn_reg(const void *self, const ir_node *irn) {
210 const arch_register_t *reg = NULL;
214 if (get_irn_mode(irn) == mode_X) {
218 pos = ia32_translate_proj_pos(irn);
219 irn = my_skip_proj(irn);
222 if (is_ia32_irn(irn)) {
223 const arch_register_t **slots;
224 slots = get_ia32_slots(irn);
228 reg = ia32_get_firm_reg(irn, cur_reg_set);
234 static arch_irn_class_t ia32_classify(const void *self, const ir_node *irn) {
235 arch_irn_class_t classification = arch_irn_class_normal;
237 irn = my_skip_proj(irn);
240 classification |= arch_irn_class_branch;
242 if (! is_ia32_irn(irn))
243 return classification & ~arch_irn_class_normal;
245 if (is_ia32_Cnst(irn))
246 classification |= arch_irn_class_const;
249 classification |= arch_irn_class_load;
251 if (is_ia32_St(irn) || is_ia32_Store8Bit(irn))
252 classification |= arch_irn_class_store;
254 if (is_ia32_got_reload(irn))
255 classification |= arch_irn_class_reload;
257 return classification;
260 static arch_irn_flags_t ia32_get_flags(const void *self, const ir_node *irn) {
261 irn = my_skip_proj(irn);
262 if (is_ia32_irn(irn))
263 return get_ia32_flags(irn);
266 return arch_irn_flags_ignore;
271 static entity *ia32_get_frame_entity(const void *self, const ir_node *irn) {
272 return is_ia32_irn(irn) ? get_ia32_frame_ent(irn) : NULL;
275 static void ia32_set_frame_entity(const void *self, ir_node *irn, entity *ent) {
276 set_ia32_frame_ent(irn, ent);
279 static void ia32_set_stack_bias(const void *self, ir_node *irn, int bias) {
281 const ia32_irn_ops_t *ops = self;
283 if (get_ia32_frame_ent(irn)) {
284 ia32_am_flavour_t am_flav = get_ia32_am_flavour(irn);
286 DBG((ops->cg->mod, LEVEL_1, "stack biased %+F with %d\n", irn, bias));
287 snprintf(buf, sizeof(buf), "%d", bias);
289 if (get_ia32_op_type(irn) == ia32_Normal) {
290 set_ia32_cnst(irn, buf);
293 add_ia32_am_offs(irn, buf);
295 set_ia32_am_flavour(irn, am_flav);
301 be_abi_call_flags_bits_t flags;
302 const arch_isa_t *isa;
303 const arch_env_t *aenv;
307 static void *ia32_abi_init(const be_abi_call_t *call, const arch_env_t *aenv, ir_graph *irg)
309 ia32_abi_env_t *env = xmalloc(sizeof(env[0]));
310 be_abi_call_flags_t fl = be_abi_call_get_flags(call);
311 env->flags = fl.bits;
314 env->isa = aenv->isa;
319 * Put all registers which are saved by the prologue/epilogue in a set.
321 * @param self The callback object.
322 * @param s The result set.
324 static void ia32_abi_dont_save_regs(void *self, pset *s)
326 ia32_abi_env_t *env = self;
327 if(env->flags.try_omit_fp)
328 pset_insert_ptr(s, env->isa->bp);
332 * Generate the routine prologue.
334 * @param self The callback object.
335 * @param mem A pointer to the mem node. Update this if you define new memory.
336 * @param reg_map A map mapping all callee_save/ignore/parameter registers to their defining nodes.
338 * @return The register which shall be used as a stack frame base.
340 * All nodes which define registers in @p reg_map must keep @p reg_map current.
342 static const arch_register_t *ia32_abi_prologue(void *self, ir_node **mem, pmap *reg_map)
344 ia32_abi_env_t *env = self;
346 if (! env->flags.try_omit_fp) {
347 ir_node *bl = get_irg_start_block(env->irg);
348 ir_node *curr_sp = be_abi_reg_map_get(reg_map, env->isa->sp);
349 ir_node *curr_bp = be_abi_reg_map_get(reg_map, env->isa->bp);
353 push = new_rd_ia32_Push(NULL, env->irg, bl, curr_sp, curr_bp, *mem);
354 curr_sp = new_r_Proj(env->irg, bl, push, get_irn_mode(curr_sp), pn_ia32_Push_stack);
355 *mem = new_r_Proj(env->irg, bl, push, mode_M, pn_ia32_Push_M);
357 /* the push must have SP out register */
358 arch_set_irn_register(env->aenv, curr_sp, env->isa->sp);
359 set_ia32_flags(push, arch_irn_flags_ignore);
361 /* move esp to ebp */
362 curr_bp = be_new_Copy(env->isa->bp->reg_class, env->irg, bl, curr_sp);
363 be_set_constr_single_reg(curr_bp, BE_OUT_POS(0), env->isa->bp);
364 arch_set_irn_register(env->aenv, curr_bp, env->isa->bp);
365 be_node_set_flags(curr_bp, BE_OUT_POS(0), arch_irn_flags_ignore);
367 /* beware: the copy must be done before any other sp use */
368 curr_sp = be_new_CopyKeep_single(env->isa->sp->reg_class, env->irg, bl, curr_sp, curr_bp, get_irn_mode(curr_sp));
369 be_set_constr_single_reg(curr_sp, BE_OUT_POS(0), env->isa->sp);
370 arch_set_irn_register(env->aenv, curr_sp, env->isa->sp);
371 be_node_set_flags(curr_sp, BE_OUT_POS(0), arch_irn_flags_ignore);
373 be_abi_reg_map_set(reg_map, env->isa->sp, curr_sp);
374 be_abi_reg_map_set(reg_map, env->isa->bp, curr_bp);
383 * Generate the routine epilogue.
384 * @param self The callback object.
385 * @param bl The block for the epilog
386 * @param mem A pointer to the mem node. Update this if you define new memory.
387 * @param reg_map A map mapping all callee_save/ignore/parameter registers to their defining nodes.
388 * @return The register which shall be used as a stack frame base.
390 * All nodes which define registers in @p reg_map must keep @p reg_map current.
392 static void ia32_abi_epilogue(void *self, ir_node *bl, ir_node **mem, pmap *reg_map)
394 ia32_abi_env_t *env = self;
395 ir_node *curr_sp = be_abi_reg_map_get(reg_map, env->isa->sp);
396 ir_node *curr_bp = be_abi_reg_map_get(reg_map, env->isa->bp);
398 if (env->flags.try_omit_fp) {
399 /* simply remove the stack frame here */
400 curr_sp = be_new_IncSP(env->isa->sp, env->irg, bl, curr_sp, *mem, BE_STACK_FRAME_SIZE, be_stack_dir_shrink);
403 const ia32_isa_t *isa = (ia32_isa_t *)env->isa;
404 ir_mode *mode_bp = env->isa->bp->reg_class->mode;
406 /* gcc always emits a leave at the end of a routine */
407 if (1 || ARCH_AMD(isa->opt_arch)) {
411 leave = new_rd_ia32_Leave(NULL, env->irg, bl, curr_sp, *mem);
412 set_ia32_flags(leave, arch_irn_flags_ignore);
413 curr_bp = new_r_Proj(current_ir_graph, bl, leave, mode_bp, pn_ia32_Leave_frame);
414 curr_sp = new_r_Proj(current_ir_graph, bl, leave, get_irn_mode(curr_sp), pn_ia32_Leave_stack);
415 *mem = new_r_Proj(current_ir_graph, bl, leave, mode_M, pn_ia32_Leave_M);
420 /* copy ebp to esp */
421 curr_sp = be_new_SetSP(env->isa->sp, env->irg, bl, curr_sp, curr_bp, *mem);
424 pop = new_rd_ia32_Pop(NULL, env->irg, bl, curr_sp, *mem);
425 set_ia32_flags(pop, arch_irn_flags_ignore);
426 curr_bp = new_r_Proj(current_ir_graph, bl, pop, mode_bp, pn_ia32_Pop_res);
427 curr_sp = new_r_Proj(current_ir_graph, bl, pop, get_irn_mode(curr_sp), pn_ia32_Pop_stack);
428 *mem = new_r_Proj(current_ir_graph, bl, pop, mode_M, pn_ia32_Pop_M);
430 arch_set_irn_register(env->aenv, curr_sp, env->isa->sp);
431 arch_set_irn_register(env->aenv, curr_bp, env->isa->bp);
434 be_abi_reg_map_set(reg_map, env->isa->sp, curr_sp);
435 be_abi_reg_map_set(reg_map, env->isa->bp, curr_bp);
439 * Produces the type which sits between the stack args and the locals on the stack.
440 * it will contain the return address and space to store the old base pointer.
441 * @return The Firm type modeling the ABI between type.
443 static ir_type *ia32_abi_get_between_type(void *self)
445 #define IDENT(s) new_id_from_chars(s, sizeof(s)-1)
446 static ir_type *omit_fp_between_type = NULL;
447 static ir_type *between_type = NULL;
449 ia32_abi_env_t *env = self;
451 if ( !between_type) {
453 entity *ret_addr_ent;
454 entity *omit_fp_ret_addr_ent;
456 ir_type *old_bp_type = new_type_primitive(IDENT("bp"), mode_P);
457 ir_type *ret_addr_type = new_type_primitive(IDENT("return_addr"), mode_P);
459 between_type = new_type_struct(IDENT("ia32_between_type"));
460 old_bp_ent = new_entity(between_type, IDENT("old_bp"), old_bp_type);
461 ret_addr_ent = new_entity(between_type, IDENT("ret_addr"), ret_addr_type);
463 set_entity_offset_bytes(old_bp_ent, 0);
464 set_entity_offset_bytes(ret_addr_ent, get_type_size_bytes(old_bp_type));
465 set_type_size_bytes(between_type, get_type_size_bytes(old_bp_type) + get_type_size_bytes(ret_addr_type));
466 set_type_state(between_type, layout_fixed);
468 omit_fp_between_type = new_type_struct(IDENT("ia32_between_type_omit_fp"));
469 omit_fp_ret_addr_ent = new_entity(omit_fp_between_type, IDENT("ret_addr"), ret_addr_type);
471 set_entity_offset_bytes(omit_fp_ret_addr_ent, 0);
472 set_type_size_bytes(omit_fp_between_type, get_type_size_bytes(ret_addr_type));
473 set_type_state(omit_fp_between_type, layout_fixed);
476 return env->flags.try_omit_fp ? omit_fp_between_type : between_type;
481 * Get the estimated cycle count for @p irn.
483 * @param self The this pointer.
484 * @param irn The node.
486 * @return The estimated cycle count for this operation
488 static int ia32_get_op_estimated_cost(const void *self, const ir_node *irn)
491 ia32_op_type_t op_tp;
492 const ia32_irn_ops_t *ops = self;
497 assert(is_ia32_irn(irn));
499 cost = get_ia32_latency(irn);
500 op_tp = get_ia32_op_type(irn);
502 if (is_ia32_CopyB(irn)) {
504 if (ARCH_INTEL(ops->cg->arch))
507 else if (is_ia32_CopyB_i(irn)) {
508 int size = get_tarval_long(get_ia32_Immop_tarval(irn));
509 cost = 20 + (int)ceil((4/3) * size);
510 if (ARCH_INTEL(ops->cg->arch))
513 /* in case of address mode operations add additional cycles */
514 else if (op_tp == ia32_AddrModeD || op_tp == ia32_AddrModeS) {
516 In case of stack access add 5 cycles (we assume stack is in cache),
517 other memory operations cost 20 cycles.
519 cost += is_ia32_use_frame(irn) ? 5 : 20;
526 * Returns the inverse operation if @p irn, recalculating the argument at position @p i.
528 * @param irn The original operation
529 * @param i Index of the argument we want the inverse operation to yield
530 * @param inverse struct to be filled with the resulting inverse op
531 * @param obstack The obstack to use for allocation of the returned nodes array
532 * @return The inverse operation or NULL if operation invertible
534 static arch_inverse_t *ia32_get_inverse(const void *self, const ir_node *irn, int i, arch_inverse_t *inverse, struct obstack *obst) {
537 ir_node *block, *noreg, *nomem;
540 /* we cannot invert non-ia32 irns */
541 if (! is_ia32_irn(irn))
544 /* operand must always be a real operand (not base, index or mem) */
545 if (i != 2 && i != 3)
548 /* we don't invert address mode operations */
549 if (get_ia32_op_type(irn) != ia32_Normal)
552 irg = get_irn_irg(irn);
553 block = get_nodes_block(irn);
554 mode = get_ia32_res_mode(irn);
555 noreg = get_irn_n(irn, 0);
556 nomem = new_r_NoMem(irg);
558 /* initialize structure */
559 inverse->nodes = obstack_alloc(obst, 2 * sizeof(inverse->nodes[0]));
563 switch (get_ia32_irn_opcode(irn)) {
565 if (get_ia32_immop_type(irn) == ia32_ImmConst) {
566 /* we have an add with a const here */
567 /* invers == add with negated const */
568 inverse->nodes[0] = new_rd_ia32_Add(NULL, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem);
569 pnc = pn_ia32_Add_res;
571 copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
572 set_ia32_Immop_tarval(inverse->nodes[0], tarval_neg(get_ia32_Immop_tarval(irn)));
573 set_ia32_commutative(inverse->nodes[0]);
575 else if (get_ia32_immop_type(irn) == ia32_ImmSymConst) {
576 /* we have an add with a symconst here */
577 /* invers == sub with const */
578 inverse->nodes[0] = new_rd_ia32_Sub(NULL, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem);
579 pnc = pn_ia32_Sub_res;
581 copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
584 /* normal add: inverse == sub */
585 ir_node *proj = ia32_get_res_proj(irn);
588 inverse->nodes[0] = new_rd_ia32_Sub(NULL, irg, block, noreg, noreg, proj, get_irn_n(irn, i ^ 1), nomem);
589 pnc = pn_ia32_Sub_res;
594 if (get_ia32_immop_type(irn) != ia32_ImmNone) {
595 /* we have a sub with a const/symconst here */
596 /* invers == add with this const */
597 inverse->nodes[0] = new_rd_ia32_Add(NULL, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem);
598 pnc = pn_ia32_Add_res;
599 inverse->costs += (get_ia32_immop_type(irn) == ia32_ImmSymConst) ? 5 : 1;
600 copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
604 ir_node *proj = ia32_get_res_proj(irn);
608 inverse->nodes[0] = new_rd_ia32_Add(NULL, irg, block, noreg, noreg, proj, get_irn_n(irn, 3), nomem);
611 inverse->nodes[0] = new_rd_ia32_Sub(NULL, irg, block, noreg, noreg, get_irn_n(irn, 2), proj, nomem);
613 pnc = pn_ia32_Sub_res;
618 if (get_ia32_immop_type(irn) != ia32_ImmNone) {
619 /* xor with const: inverse = xor */
620 inverse->nodes[0] = new_rd_ia32_Eor(NULL, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem);
621 pnc = pn_ia32_Eor_res;
622 inverse->costs += (get_ia32_immop_type(irn) == ia32_ImmSymConst) ? 5 : 1;
623 copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
627 inverse->nodes[0] = new_rd_ia32_Eor(NULL, irg, block, noreg, noreg, (ir_node *)irn, get_irn_n(irn, i), nomem);
628 pnc = pn_ia32_Eor_res;
633 ir_node *proj = ia32_get_res_proj(irn);
636 inverse->nodes[0] = new_rd_ia32_Not(NULL, irg, block, noreg, noreg, proj, nomem);
637 pnc = pn_ia32_Not_res;
641 case iro_ia32_Minus: {
642 ir_node *proj = ia32_get_res_proj(irn);
645 inverse->nodes[0] = new_rd_ia32_Minus(NULL, irg, block, noreg, noreg, proj, nomem);
646 pnc = pn_ia32_Minus_res;
651 /* inverse operation not supported */
655 set_ia32_res_mode(inverse->nodes[0], mode);
656 inverse->nodes[1] = new_r_Proj(irg, block, inverse->nodes[0], mode, pnc);
662 * Check if irn can load it's operand at position i from memory (source addressmode).
663 * @param self Pointer to irn ops itself
664 * @param irn The irn to be checked
665 * @param i The operands position
666 * @return Non-Zero if operand can be loaded
668 static int ia32_possible_memory_operand(const void *self, const ir_node *irn, unsigned int i) {
669 if (! is_ia32_irn(irn) || /* must be an ia32 irn */
670 get_irn_arity(irn) != 5 || /* must be a binary operation */
671 get_ia32_op_type(irn) != ia32_Normal || /* must not already be a addressmode irn */
672 ! (get_ia32_am_support(irn) & ia32_am_Source) || /* must be capable of source addressmode */
673 (i != 2 && i != 3) || /* a "real" operand position must be requested */
674 (i == 2 && ! is_ia32_commutative(irn)) || /* if first operand requested irn must be commutative */
675 is_ia32_use_frame(irn)) /* must not already use frame */
681 static void ia32_perform_memory_operand(const void *self, ir_node *irn, ir_node *reload, unsigned int i) {
682 assert(ia32_possible_memory_operand(self, irn, i) && "Cannot perform memory operand change");
683 assert(get_nodes_block(reload) == get_nodes_block(irn) && "Reload must be in same block as irn.");
685 if (get_irn_n_edges(reload) > 1)
689 ir_node *tmp = get_irn_n(irn, 3);
690 set_irn_n(irn, 3, get_irn_n(irn, 2));
691 set_irn_n(irn, 2, tmp);
694 set_ia32_am_support(irn, ia32_am_Source);
695 set_ia32_op_type(irn, ia32_AddrModeS);
696 set_ia32_am_flavour(irn, ia32_B);
697 set_ia32_ls_mode(irn, get_irn_mode(reload));
698 set_ia32_frame_ent(irn, be_get_frame_entity(reload));
699 set_ia32_use_frame(irn);
700 set_ia32_got_reload(irn);
702 set_irn_n(irn, 0, be_get_Reload_frame(reload));
703 set_irn_n(irn, 4, be_get_Reload_mem(reload));
706 Input at position one is index register, which is NoReg.
707 We would need cg object to get a real noreg, but we cannot
710 set_irn_n(irn, 3, get_irn_n(irn, 1));
712 DBG_OPT_AM_S(reload, irn);
715 static const be_abi_callbacks_t ia32_abi_callbacks = {
718 ia32_abi_get_between_type,
719 ia32_abi_dont_save_regs,
724 /* fill register allocator interface */
726 static const arch_irn_ops_if_t ia32_irn_ops_if = {
727 ia32_get_irn_reg_req,
732 ia32_get_frame_entity,
733 ia32_set_frame_entity,
736 ia32_get_op_estimated_cost,
737 ia32_possible_memory_operand,
738 ia32_perform_memory_operand,
741 ia32_irn_ops_t ia32_irn_ops = {
748 /**************************************************
751 * ___ ___ __| | ___ __ _ ___ _ __ _| |_
752 * / __/ _ \ / _` |/ _ \/ _` |/ _ \ '_ \ | | _|
753 * | (_| (_) | (_| | __/ (_| | __/ | | | | | |
754 * \___\___/ \__,_|\___|\__, |\___|_| |_| |_|_|
757 **************************************************/
759 static void ia32_kill_convs(ia32_code_gen_t *cg) {
762 /* BEWARE: the Projs are inserted in the set */
763 foreach_nodeset(cg->kill_conv, irn) {
764 ir_node *in = get_irn_n(get_Proj_pred(irn), 2);
765 edges_reroute(irn, in, cg->birg->irg);
770 * Transforms the standard firm graph into
773 static void ia32_prepare_graph(void *self) {
774 ia32_code_gen_t *cg = self;
775 dom_front_info_t *dom;
776 DEBUG_ONLY(firm_dbg_module_t *old_mod = cg->mod;)
778 FIRM_DBG_REGISTER(cg->mod, "firm.be.ia32.transform");
780 /* 1st: transform constants and psi condition trees */
781 ia32_pre_transform_phase(cg);
783 /* 2nd: transform all remaining nodes */
784 ia32_register_transformers();
785 dom = be_compute_dominance_frontiers(cg->irg);
787 cg->kill_conv = new_nodeset(5);
788 irg_walk_blkwise_graph(cg->irg, NULL, ia32_transform_node, cg);
790 del_nodeset(cg->kill_conv);
792 be_free_dominance_frontiers(dom);
795 be_dump(cg->irg, "-transformed", dump_ir_block_graph_sched);
797 /* 3rd: optimize address mode */
798 FIRM_DBG_REGISTER(cg->mod, "firm.be.ia32.am");
799 ia32_optimize_addressmode(cg);
802 be_dump(cg->irg, "-am", dump_ir_block_graph_sched);
804 DEBUG_ONLY(cg->mod = old_mod;)
808 * Dummy functions for hooks we don't need but which must be filled.
810 static void ia32_before_sched(void *self) {
813 static void remove_unused_nodes(ir_node *irn, bitset_t *already_visited) {
821 mode = get_irn_mode(irn);
823 /* check if we already saw this node or the node has more than one user */
824 if (bitset_contains_irn(already_visited, irn) || get_irn_n_edges(irn) > 1)
827 /* mark irn visited */
828 bitset_add_irn(already_visited, irn);
830 /* non-Tuple nodes with one user: ok, return */
831 if (get_irn_n_edges(irn) >= 1 && mode != mode_T)
834 /* tuple node has one user which is not the mem proj-> ok */
835 if (mode == mode_T && get_irn_n_edges(irn) == 1) {
836 mem_proj = ia32_get_proj_for_mode(irn, mode_M);
841 for (i = get_irn_arity(irn) - 1; i >= 0; i--) {
842 ir_node *pred = get_irn_n(irn, i);
844 /* do not follow memory edges or we will accidentally remove stores */
845 if (is_Proj(pred) && get_irn_mode(pred) == mode_M)
848 set_irn_n(irn, i, new_Bad());
851 The current node is about to be removed: if the predecessor
852 has only this node as user, it need to be removed as well.
854 if (get_irn_n_edges(pred) <= 1)
855 remove_unused_nodes(pred, already_visited);
858 if (sched_is_scheduled(irn))
862 static void remove_unused_loads_walker(ir_node *irn, void *env) {
863 bitset_t *already_visited = env;
864 if (is_ia32_Ld(irn) && ! bitset_contains_irn(already_visited, irn))
865 remove_unused_nodes(irn, env);
869 * Called before the register allocator.
870 * Calculate a block schedule here. We need it for the x87
871 * simulator and the emitter.
873 static void ia32_before_ra(void *self) {
874 ia32_code_gen_t *cg = self;
875 bitset_t *already_visited = bitset_irg_malloc(cg->irg);
877 cg->blk_sched = sched_create_block_schedule(cg->irg);
881 There are sometimes unused loads, only pinned by memory.
882 We need to remove those Loads and all other nodes which won't be used
883 after removing the Load from schedule.
885 irg_walk_graph(cg->irg, remove_unused_loads_walker, NULL, already_visited);
886 bitset_free(already_visited);
891 * Transforms a be node into a Load.
893 static void transform_to_Load(ia32_transform_env_t *env) {
894 ir_node *irn = env->irn;
895 entity *ent = be_get_frame_entity(irn);
896 ir_mode *mode = env->mode;
897 ir_node *noreg = ia32_new_NoReg_gp(env->cg);
898 ir_node *nomem = new_rd_NoMem(env->irg);
899 ir_node *sched_point = NULL;
900 ir_node *ptr = get_irn_n(irn, 0);
901 ir_node *mem = be_is_Reload(irn) ? get_irn_n(irn, 1) : nomem;
902 ir_node *new_op, *proj;
903 const arch_register_t *reg;
905 if (sched_is_scheduled(irn)) {
906 sched_point = sched_prev(irn);
909 if (mode_is_float(mode)) {
910 if (USE_SSE2(env->cg))
911 new_op = new_rd_ia32_xLoad(env->dbg, env->irg, env->block, ptr, noreg, mem);
913 new_op = new_rd_ia32_vfld(env->dbg, env->irg, env->block, ptr, noreg, mem);
916 new_op = new_rd_ia32_Load(env->dbg, env->irg, env->block, ptr, noreg, mem);
919 set_ia32_am_support(new_op, ia32_am_Source);
920 set_ia32_op_type(new_op, ia32_AddrModeS);
921 set_ia32_am_flavour(new_op, ia32_B);
922 set_ia32_ls_mode(new_op, mode);
923 set_ia32_frame_ent(new_op, ent);
924 set_ia32_use_frame(new_op);
926 DBG_OPT_RELOAD2LD(irn, new_op);
928 proj = new_rd_Proj(env->dbg, env->irg, env->block, new_op, mode, pn_Load_res);
931 sched_add_after(sched_point, new_op);
932 sched_add_after(new_op, proj);
937 /* copy the register from the old node to the new Load */
938 reg = arch_get_irn_register(env->cg->arch_env, irn);
939 arch_set_irn_register(env->cg->arch_env, new_op, reg);
941 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, irn));
947 * Transforms a be node into a Store.
949 static void transform_to_Store(ia32_transform_env_t *env) {
950 ir_node *irn = env->irn;
951 entity *ent = be_get_frame_entity(irn);
952 ir_mode *mode = env->mode;
953 ir_node *noreg = ia32_new_NoReg_gp(env->cg);
954 ir_node *nomem = new_rd_NoMem(env->irg);
955 ir_node *ptr = get_irn_n(irn, 0);
956 ir_node *val = get_irn_n(irn, 1);
957 ir_node *new_op, *proj;
958 ir_node *sched_point = NULL;
960 if (sched_is_scheduled(irn)) {
961 sched_point = sched_prev(irn);
964 if (mode_is_float(mode)) {
965 if (USE_SSE2(env->cg))
966 new_op = new_rd_ia32_xStore(env->dbg, env->irg, env->block, ptr, noreg, val, nomem);
968 new_op = new_rd_ia32_vfst(env->dbg, env->irg, env->block, ptr, noreg, val, nomem);
970 else if (get_mode_size_bits(mode) == 8) {
971 new_op = new_rd_ia32_Store8Bit(env->dbg, env->irg, env->block, ptr, noreg, val, nomem);
974 new_op = new_rd_ia32_Store(env->dbg, env->irg, env->block, ptr, noreg, val, nomem);
977 set_ia32_am_support(new_op, ia32_am_Dest);
978 set_ia32_op_type(new_op, ia32_AddrModeD);
979 set_ia32_am_flavour(new_op, ia32_B);
980 set_ia32_ls_mode(new_op, mode);
981 set_ia32_frame_ent(new_op, ent);
982 set_ia32_use_frame(new_op);
984 DBG_OPT_SPILL2ST(irn, new_op);
986 proj = new_rd_Proj(env->dbg, env->irg, env->block, new_op, mode_M, pn_ia32_Store_M);
989 sched_add_after(sched_point, new_op);
993 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, irn));
998 static ir_node *create_push(ia32_transform_env_t *env, ir_node *schedpoint, ir_node *sp, ir_node *mem, entity *ent, const char *offset) {
999 ir_node *noreg = ia32_new_NoReg_gp(env->cg);
1001 ir_node *push = new_rd_ia32_Push(env->dbg, env->irg, env->block, sp, noreg, mem);
1003 set_ia32_frame_ent(push, ent);
1004 set_ia32_use_frame(push);
1005 set_ia32_op_type(push, ia32_AddrModeS);
1006 set_ia32_am_flavour(push, ia32_B);
1007 set_ia32_ls_mode(push, mode_Is);
1009 add_ia32_am_offs(push, offset);
1011 sched_add_before(schedpoint, push);
1015 static ir_node *create_pop(ia32_transform_env_t *env, ir_node *schedpoint, ir_node *sp, entity *ent, const char *offset) {
1016 ir_node *pop = new_rd_ia32_Pop(env->dbg, env->irg, env->block, sp, new_NoMem());
1018 set_ia32_frame_ent(pop, ent);
1019 set_ia32_use_frame(pop);
1020 set_ia32_op_type(pop, ia32_AddrModeD);
1021 set_ia32_am_flavour(pop, ia32_B);
1022 set_ia32_ls_mode(pop, mode_Is);
1024 add_ia32_am_offs(pop, offset);
1026 sched_add_before(schedpoint, pop);
1031 static ir_node* create_spproj(ia32_transform_env_t *env, ir_node *pred, ir_node *schedpoint, const ir_node *oldsp) {
1032 ir_mode *spmode = get_irn_mode(oldsp);
1033 const arch_register_t *spreg = arch_get_irn_register(env->cg->arch_env, oldsp);
1036 sp = new_rd_Proj(env->dbg, env->irg, env->block, pred, spmode, 0);
1037 arch_set_irn_register(env->cg->arch_env, sp, spreg);
1038 sched_add_before(schedpoint, sp);
1043 static void transform_MemPerm(ia32_transform_env_t *env) {
1045 * Transform memperm, currently we do this the ugly way and produce
1046 * push/pop into/from memory cascades. This is possible without using
1049 ir_node *node = env->irn;
1051 ir_node *sp = get_irn_n(node, 0);
1052 const ir_edge_t *edge;
1053 const ir_edge_t *next;
1056 arity = be_get_MemPerm_entity_arity(node);
1057 pops = alloca(arity * sizeof(pops[0]));
1060 for(i = 0; i < arity; ++i) {
1061 entity *ent = be_get_MemPerm_in_entity(node, i);
1062 ir_type *enttype = get_entity_type(ent);
1063 int entbits = get_type_size_bits(enttype);
1064 ir_node *mem = get_irn_n(node, i + 1);
1067 assert( (entbits == 32 || entbits == 64) && "spillslot on x86 should be 32 or 64 bit");
1069 push = create_push(env, node, sp, mem, ent, NULL);
1070 sp = create_spproj(env, push, node, sp);
1072 // add another push after the first one
1073 push = create_push(env, node, sp, mem, ent, "4");
1074 sp = create_spproj(env, push, node, sp);
1077 set_irn_n(node, i, new_Bad());
1081 for(i = arity - 1; i >= 0; --i) {
1082 entity *ent = be_get_MemPerm_out_entity(node, i);
1083 ir_type *enttype = get_entity_type(ent);
1084 int entbits = get_type_size_bits(enttype);
1088 assert( (entbits == 32 || entbits == 64) && "spillslot on x86 should be 32 or 64 bit");
1090 pop = create_pop(env, node, sp, ent, NULL);
1092 // add another pop after the first one
1093 sp = create_spproj(env, pop, node, sp);
1094 pop = create_pop(env, node, sp, ent, "4");
1097 sp = create_spproj(env, pop, node, sp);
1103 // exchange memprojs
1104 foreach_out_edge_safe(node, edge, next) {
1105 ir_node *proj = get_edge_src_irn(edge);
1106 int p = get_Proj_proj(proj);
1110 set_Proj_pred(proj, pops[p]);
1111 set_Proj_proj(proj, 3);
1118 * Fix the mode of Spill/Reload
1120 static ir_mode *fix_spill_mode(ia32_code_gen_t *cg, ir_mode *mode)
1122 if (mode_is_float(mode)) {
1134 * Block-Walker: Calls the transform functions Spill and Reload.
1136 static void ia32_after_ra_walker(ir_node *block, void *env) {
1137 ir_node *node, *prev;
1138 ia32_code_gen_t *cg = env;
1139 ia32_transform_env_t tenv;
1142 tenv.irg = current_ir_graph;
1144 DEBUG_ONLY(tenv.mod = cg->mod;)
1146 /* beware: the schedule is changed here */
1147 for (node = sched_last(block); !sched_is_begin(node); node = prev) {
1148 prev = sched_prev(node);
1149 if (be_is_Reload(node)) {
1150 /* we always reload the whole register */
1151 tenv.dbg = get_irn_dbg_info(node);
1153 tenv.mode = fix_spill_mode(cg, get_irn_mode(node));
1154 transform_to_Load(&tenv);
1156 else if (be_is_Spill(node)) {
1157 ir_node *spillval = get_irn_n(node, be_pos_Spill_val);
1158 /* we always spill the whole register */
1159 tenv.dbg = get_irn_dbg_info(node);
1161 tenv.mode = fix_spill_mode(cg, get_irn_mode(spillval));
1162 transform_to_Store(&tenv);
1164 else if(be_is_MemPerm(node)) {
1165 tenv.dbg = get_irn_dbg_info(node);
1167 transform_MemPerm(&tenv);
1173 * We transform Spill and Reload here. This needs to be done before
1174 * stack biasing otherwise we would miss the corrected offset for these nodes.
1176 * If x87 instruction should be emitted, run the x87 simulator and patch
1177 * the virtual instructions. This must obviously be done after register allocation.
1179 static void ia32_after_ra(void *self) {
1180 ia32_code_gen_t *cg = self;
1182 irg_block_walk_graph(cg->irg, NULL, ia32_after_ra_walker, self);
1184 /* if we do x87 code generation, rewrite all the virtual instructions and registers */
1185 if (cg->used_fp == fp_x87 || cg->force_sim) {
1186 x87_simulate_graph(cg->arch_env, cg->irg, cg->blk_sched);
1191 * Last touchups for the graph before emit
1193 static void ia32_finish(void *self) {
1194 ia32_code_gen_t *cg = self;
1195 ir_graph *irg = cg->irg;
1197 ia32_finish_irg(irg, cg);
1201 * Emits the code, closes the output file and frees
1202 * the code generator interface.
1204 static void ia32_codegen(void *self) {
1205 ia32_code_gen_t *cg = self;
1206 ir_graph *irg = cg->irg;
1208 ia32_gen_routine(cg->isa->out, irg, cg);
1212 /* remove it from the isa */
1215 /* de-allocate code generator */
1216 del_set(cg->reg_set);
1220 static void *ia32_cg_init(const be_irg_t *birg);
1222 static const arch_code_generator_if_t ia32_code_gen_if = {
1224 NULL, /* before abi introduce hook */
1226 ia32_before_sched, /* before scheduling hook */
1227 ia32_before_ra, /* before register allocation hook */
1228 ia32_after_ra, /* after register allocation hook */
1229 ia32_finish, /* called before codegen */
1230 ia32_codegen /* emit && done */
1234 * Initializes a IA32 code generator.
1236 static void *ia32_cg_init(const be_irg_t *birg) {
1237 ia32_isa_t *isa = (ia32_isa_t *)birg->main_env->arch_env->isa;
1238 ia32_code_gen_t *cg = xcalloc(1, sizeof(*cg));
1240 cg->impl = &ia32_code_gen_if;
1241 cg->irg = birg->irg;
1242 cg->reg_set = new_set(ia32_cmp_irn_reg_assoc, 1024);
1243 cg->arch_env = birg->main_env->arch_env;
1246 cg->blk_sched = NULL;
1247 cg->fp_to_gp = NULL;
1248 cg->gp_to_fp = NULL;
1249 cg->fp_kind = isa->fp_kind;
1250 cg->used_fp = fp_none;
1251 cg->dump = (birg->main_env->options->dump_flags & DUMP_BE) ? 1 : 0;
1253 FIRM_DBG_REGISTER(cg->mod, "firm.be.ia32.cg");
1255 /* copy optimizations from isa for easier access */
1257 cg->arch = isa->arch;
1258 cg->opt_arch = isa->opt_arch;
1264 if (isa->name_obst_size) {
1265 //printf("freed %d bytes from name obst\n", isa->name_obst_size);
1266 isa->name_obst_size = 0;
1267 obstack_free(isa->name_obst, NULL);
1268 obstack_init(isa->name_obst);
1272 cur_reg_set = cg->reg_set;
1274 ia32_irn_ops.cg = cg;
1276 return (arch_code_generator_t *)cg;
1281 /*****************************************************************
1282 * ____ _ _ _____ _____
1283 * | _ \ | | | | |_ _|/ ____| /\
1284 * | |_) | __ _ ___| | _____ _ __ __| | | | | (___ / \
1285 * | _ < / _` |/ __| |/ / _ \ '_ \ / _` | | | \___ \ / /\ \
1286 * | |_) | (_| | (__| < __/ | | | (_| | _| |_ ____) / ____ \
1287 * |____/ \__,_|\___|_|\_\___|_| |_|\__,_| |_____|_____/_/ \_\
1289 *****************************************************************/
1292 * Set output modes for GCC
1294 static const tarval_mode_info mo_integer = {
1301 * set the tarval output mode to C-semantics
1303 static void set_tarval_output_modes(void)
1305 set_tarval_mode_output_option(get_modeLs(), &mo_integer);
1306 set_tarval_mode_output_option(get_modeLu(), &mo_integer);
1307 set_tarval_mode_output_option(get_modeIs(), &mo_integer);
1308 set_tarval_mode_output_option(get_modeIu(), &mo_integer);
1309 set_tarval_mode_output_option(get_modeHs(), &mo_integer);
1310 set_tarval_mode_output_option(get_modeHu(), &mo_integer);
1311 set_tarval_mode_output_option(get_modeBs(), &mo_integer);
1312 set_tarval_mode_output_option(get_modeBu(), &mo_integer);
1313 set_tarval_mode_output_option(get_modeC(), &mo_integer);
1314 set_tarval_mode_output_option(get_modeU(), &mo_integer);
1315 set_tarval_mode_output_option(get_modeIu(), &mo_integer);
1320 * The template that generates a new ISA object.
1321 * Note that this template can be changed by command line
1324 static ia32_isa_t ia32_isa_template = {
1326 &ia32_isa_if, /* isa interface implementation */
1327 &ia32_gp_regs[REG_ESP], /* stack pointer register */
1328 &ia32_gp_regs[REG_EBP], /* base pointer register */
1329 -1, /* stack direction */
1331 NULL, /* 16bit register names */
1332 NULL, /* 8bit register names */
1336 IA32_OPT_INCDEC | /* optimize add 1, sub 1 into inc/dec default: on */
1337 IA32_OPT_DOAM | /* optimize address mode default: on */
1338 IA32_OPT_LEA | /* optimize for LEAs default: on */
1339 IA32_OPT_PLACECNST | /* place constants immediately before instructions, default: on */
1340 IA32_OPT_IMMOPS | /* operations can use immediates, default: on */
1341 IA32_OPT_EXTBB), /* use extended basic block scheduling, default: on */
1342 arch_pentium_4, /* instruction architecture */
1343 arch_pentium_4, /* optimize for architecture */
1344 fp_sse2, /* use sse2 unit */
1345 NULL, /* current code generator */
1347 NULL, /* name obstack */
1348 0 /* name obst size */
1353 * Initializes the backend ISA.
1355 static void *ia32_init(FILE *file_handle) {
1356 static int inited = 0;
1362 set_tarval_output_modes();
1364 isa = xmalloc(sizeof(*isa));
1365 memcpy(isa, &ia32_isa_template, sizeof(*isa));
1367 ia32_register_init(isa);
1368 ia32_create_opcodes();
1370 if ((ARCH_INTEL(isa->arch) && isa->arch < arch_pentium_4) ||
1371 (ARCH_AMD(isa->arch) && isa->arch < arch_athlon))
1372 /* no SSE2 for these cpu's */
1373 isa->fp_kind = fp_x87;
1375 if (ARCH_INTEL(isa->opt_arch) && isa->opt_arch >= arch_pentium_4) {
1376 /* Pentium 4 don't like inc and dec instructions */
1377 isa->opt &= ~IA32_OPT_INCDEC;
1380 isa->regs_16bit = pmap_create();
1381 isa->regs_8bit = pmap_create();
1382 isa->types = pmap_create();
1383 isa->tv_ent = pmap_create();
1384 isa->out = file_handle;
1386 ia32_build_16bit_reg_map(isa->regs_16bit);
1387 ia32_build_8bit_reg_map(isa->regs_8bit);
1389 /* patch register names of x87 registers */
1391 ia32_st_regs[0].name = "st";
1392 ia32_st_regs[1].name = "st(1)";
1393 ia32_st_regs[2].name = "st(2)";
1394 ia32_st_regs[3].name = "st(3)";
1395 ia32_st_regs[4].name = "st(4)";
1396 ia32_st_regs[5].name = "st(5)";
1397 ia32_st_regs[6].name = "st(6)";
1398 ia32_st_regs[7].name = "st(7)";
1402 isa->name_obst = xmalloc(sizeof(*isa->name_obst));
1403 obstack_init(isa->name_obst);
1404 isa->name_obst_size = 0;
1407 ia32_handle_intrinsics();
1408 ia32_switch_section(NULL, NO_SECTION);
1409 fprintf(isa->out, "\t.intel_syntax\n");
1419 * Closes the output file and frees the ISA structure.
1421 static void ia32_done(void *self) {
1422 ia32_isa_t *isa = self;
1424 /* emit now all global declarations */
1425 ia32_gen_decls(isa->out);
1427 pmap_destroy(isa->regs_16bit);
1428 pmap_destroy(isa->regs_8bit);
1429 pmap_destroy(isa->tv_ent);
1430 pmap_destroy(isa->types);
1433 //printf("name obst size = %d bytes\n", isa->name_obst_size);
1434 obstack_free(isa->name_obst, NULL);
1442 * Return the number of register classes for this architecture.
1443 * We report always these:
1444 * - the general purpose registers
1445 * - the SSE floating point register set
1446 * - the virtual floating point registers
1448 static int ia32_get_n_reg_class(const void *self) {
1453 * Return the register class for index i.
1455 static const arch_register_class_t *ia32_get_reg_class(const void *self, int i) {
1456 assert(i >= 0 && i < 3 && "Invalid ia32 register class requested.");
1458 return &ia32_reg_classes[CLASS_ia32_gp];
1460 return &ia32_reg_classes[CLASS_ia32_xmm];
1462 return &ia32_reg_classes[CLASS_ia32_vfp];
1466 * Get the register class which shall be used to store a value of a given mode.
1467 * @param self The this pointer.
1468 * @param mode The mode in question.
1469 * @return A register class which can hold values of the given mode.
1471 const arch_register_class_t *ia32_get_reg_class_for_mode(const void *self, const ir_mode *mode) {
1472 const ia32_isa_t *isa = self;
1473 if (mode_is_float(mode)) {
1474 return USE_SSE2(isa) ? &ia32_reg_classes[CLASS_ia32_xmm] : &ia32_reg_classes[CLASS_ia32_vfp];
1477 return &ia32_reg_classes[CLASS_ia32_gp];
1481 * Get the ABI restrictions for procedure calls.
1482 * @param self The this pointer.
1483 * @param method_type The type of the method (procedure) in question.
1484 * @param abi The abi object to be modified
1486 static void ia32_get_call_abi(const void *self, ir_type *method_type, be_abi_call_t *abi) {
1487 const ia32_isa_t *isa = self;
1490 unsigned cc = get_method_calling_convention(method_type);
1491 int n = get_method_n_params(method_type);
1494 int i, ignore_1, ignore_2;
1496 const arch_register_t *reg;
1497 be_abi_call_flags_t call_flags = be_abi_call_get_flags(abi);
1499 unsigned use_push = !IS_P6_ARCH(isa->opt_arch);
1501 /* set abi flags for calls */
1502 call_flags.bits.left_to_right = 0; /* always last arg first on stack */
1503 call_flags.bits.store_args_sequential = use_push;
1504 /* call_flags.bits.try_omit_fp not changed: can handle both settings */
1505 call_flags.bits.fp_free = 0; /* the frame pointer is fixed in IA32 */
1506 call_flags.bits.call_has_imm = 1; /* IA32 calls can have immediate address */
1508 /* set stack parameter passing style */
1509 be_abi_call_set_flags(abi, call_flags, &ia32_abi_callbacks);
1511 /* collect the mode for each type */
1512 modes = alloca(n * sizeof(modes[0]));
1514 for (i = 0; i < n; i++) {
1515 tp = get_method_param_type(method_type, i);
1516 modes[i] = get_type_mode(tp);
1519 /* set register parameters */
1520 if (cc & cc_reg_param) {
1521 /* determine the number of parameters passed via registers */
1522 biggest_n = ia32_get_n_regparam_class(n, modes, &ignore_1, &ignore_2);
1524 /* loop over all parameters and set the register requirements */
1525 for (i = 0; i <= biggest_n; i++) {
1526 reg = ia32_get_RegParam_reg(n, modes, i, cc);
1527 assert(reg && "kaputt");
1528 be_abi_call_param_reg(abi, i, reg);
1535 /* set stack parameters */
1536 for (i = stack_idx; i < n; i++) {
1537 be_abi_call_param_stack(abi, i, 1, 0, 0);
1541 /* set return registers */
1542 n = get_method_n_ress(method_type);
1544 assert(n <= 2 && "more than two results not supported");
1546 /* In case of 64bit returns, we will have two 32bit values */
1548 tp = get_method_res_type(method_type, 0);
1549 mode = get_type_mode(tp);
1551 assert(!mode_is_float(mode) && "two FP results not supported");
1553 tp = get_method_res_type(method_type, 1);
1554 mode = get_type_mode(tp);
1556 assert(!mode_is_float(mode) && "two FP results not supported");
1558 be_abi_call_res_reg(abi, 0, &ia32_gp_regs[REG_EAX]);
1559 be_abi_call_res_reg(abi, 1, &ia32_gp_regs[REG_EDX]);
1562 const arch_register_t *reg;
1564 tp = get_method_res_type(method_type, 0);
1565 assert(is_atomic_type(tp));
1566 mode = get_type_mode(tp);
1568 reg = mode_is_float(mode) ?
1569 (USE_SSE2(isa) ? &ia32_xmm_regs[REG_XMM0] : &ia32_vfp_regs[REG_VF0]) :
1570 &ia32_gp_regs[REG_EAX];
1572 be_abi_call_res_reg(abi, 0, reg);
1577 static const void *ia32_get_irn_ops(const arch_irn_handler_t *self, const ir_node *irn) {
1578 return &ia32_irn_ops;
1581 const arch_irn_handler_t ia32_irn_handler = {
1585 const arch_irn_handler_t *ia32_get_irn_handler(const void *self) {
1586 return &ia32_irn_handler;
1589 int ia32_to_appear_in_schedule(void *block_env, const ir_node *irn) {
1590 return is_ia32_irn(irn) ? 1 : -1;
1594 * Initializes the code generator interface.
1596 static const arch_code_generator_if_t *ia32_get_code_generator_if(void *self) {
1597 return &ia32_code_gen_if;
1601 * Returns the estimated execution time of an ia32 irn.
1603 static sched_timestep_t ia32_sched_exectime(void *env, const ir_node *irn) {
1604 const arch_env_t *arch_env = env;
1605 return is_ia32_irn(irn) ? ia32_get_op_estimated_cost(arch_get_irn_ops(arch_env, irn), irn) : 1;
1608 list_sched_selector_t ia32_sched_selector;
1611 * Returns the reg_pressure scheduler with to_appear_in_schedule() overloaded
1613 static const list_sched_selector_t *ia32_get_list_sched_selector(const void *self) {
1614 // memcpy(&ia32_sched_selector, reg_pressure_selector, sizeof(list_sched_selector_t));
1615 memcpy(&ia32_sched_selector, trivial_selector, sizeof(list_sched_selector_t));
1616 ia32_sched_selector.exectime = ia32_sched_exectime;
1617 ia32_sched_selector.to_appear_in_schedule = ia32_to_appear_in_schedule;
1618 return &ia32_sched_selector;
1622 * Returns the necessary byte alignment for storing a register of given class.
1624 static int ia32_get_reg_class_alignment(const void *self, const arch_register_class_t *cls) {
1625 ir_mode *mode = arch_register_class_mode(cls);
1626 int bytes = get_mode_size_bytes(mode);
1628 if (mode_is_float(mode) && bytes > 8)
1633 static ia32_intrinsic_env_t intrinsic_env = { NULL, NULL };
1636 * Returns the libFirm configuration parameter for this backend.
1638 static const backend_params *ia32_get_libfirm_params(void) {
1639 static const arch_dep_params_t ad = {
1640 1, /* also use subs */
1641 4, /* maximum shifts */
1642 31, /* maximum shift amount */
1644 1, /* allow Mulhs */
1645 1, /* allow Mulus */
1646 32 /* Mulh allowed up to 32 bit */
1648 static backend_params p = {
1649 NULL, /* no additional opcodes */
1650 NULL, /* will be set later */
1651 1, /* need dword lowering */
1652 ia32_create_intrinsic_fkt,
1653 &intrinsic_env, /* context for ia32_create_intrinsic_fkt */
1661 /* instruction set architectures. */
1662 static const lc_opt_enum_int_items_t arch_items[] = {
1663 { "386", arch_i386, },
1664 { "486", arch_i486, },
1665 { "pentium", arch_pentium, },
1666 { "586", arch_pentium, },
1667 { "pentiumpro", arch_pentium_pro, },
1668 { "686", arch_pentium_pro, },
1669 { "pentiummmx", arch_pentium_mmx, },
1670 { "pentium2", arch_pentium_2, },
1671 { "p2", arch_pentium_2, },
1672 { "pentium3", arch_pentium_3, },
1673 { "p3", arch_pentium_3, },
1674 { "pentium4", arch_pentium_4, },
1675 { "p4", arch_pentium_4, },
1676 { "pentiumm", arch_pentium_m, },
1677 { "pm", arch_pentium_m, },
1678 { "core", arch_core, },
1680 { "athlon", arch_athlon, },
1681 { "athlon64", arch_athlon_64, },
1682 { "opteron", arch_opteron, },
1686 static lc_opt_enum_int_var_t arch_var = {
1687 &ia32_isa_template.arch, arch_items
1690 static lc_opt_enum_int_var_t opt_arch_var = {
1691 &ia32_isa_template.opt_arch, arch_items
1694 static const lc_opt_enum_int_items_t fp_unit_items[] = {
1696 { "sse2", fp_sse2 },
1700 static lc_opt_enum_int_var_t fp_unit_var = {
1701 &ia32_isa_template.fp_kind, fp_unit_items
1704 static const lc_opt_enum_int_items_t gas_items[] = {
1705 { "linux", ASM_LINUX_GAS },
1706 { "mingw", ASM_MINGW_GAS },
1710 static lc_opt_enum_int_var_t gas_var = {
1711 (int *)&asm_flavour, gas_items
1714 static const lc_opt_table_entry_t ia32_options[] = {
1715 LC_OPT_ENT_ENUM_INT("arch", "select the instruction architecture", &arch_var),
1716 LC_OPT_ENT_ENUM_INT("opt", "optimize for instruction architecture", &opt_arch_var),
1717 LC_OPT_ENT_ENUM_INT("fpunit", "select the floating point unit", &fp_unit_var),
1718 LC_OPT_ENT_NEGBIT("noaddrmode", "do not use address mode", &ia32_isa_template.opt, IA32_OPT_DOAM),
1719 LC_OPT_ENT_NEGBIT("nolea", "do not optimize for LEAs", &ia32_isa_template.opt, IA32_OPT_LEA),
1720 LC_OPT_ENT_NEGBIT("noplacecnst", "do not place constants", &ia32_isa_template.opt, IA32_OPT_PLACECNST),
1721 LC_OPT_ENT_NEGBIT("noimmop", "no operations with immediates", &ia32_isa_template.opt, IA32_OPT_IMMOPS),
1722 LC_OPT_ENT_NEGBIT("noextbb", "do not use extended basic block scheduling", &ia32_isa_template.opt, IA32_OPT_EXTBB),
1723 LC_OPT_ENT_ENUM_INT("gasmode", "set the GAS compatibility mode", &gas_var),
1728 * Register command line options for the ia32 backend.
1732 * ia32-arch=arch create instruction for arch
1733 * ia32-opt=arch optimize for run on arch
1734 * ia32-fpunit=unit select floating point unit (x87 or SSE2)
1735 * ia32-incdec optimize for inc/dec
1736 * ia32-noaddrmode do not use address mode
1737 * ia32-nolea do not optimize for LEAs
1738 * ia32-noplacecnst do not place constants,
1739 * ia32-noimmop no operations with immediates
1740 * ia32-noextbb do not use extended basic block scheduling
1741 * ia32-gasmode set the GAS compatibility mode
1743 static void ia32_register_options(lc_opt_entry_t *ent)
1745 lc_opt_entry_t *be_grp_ia32 = lc_opt_get_grp(ent, "ia32");
1746 lc_opt_add_table(be_grp_ia32, ia32_options);
1748 #endif /* WITH_LIBCORE */
1750 const arch_isa_if_t ia32_isa_if = {
1753 ia32_get_n_reg_class,
1755 ia32_get_reg_class_for_mode,
1757 ia32_get_irn_handler,
1758 ia32_get_code_generator_if,
1759 ia32_get_list_sched_selector,
1760 ia32_get_reg_class_alignment,
1761 ia32_get_libfirm_params,
1763 ia32_register_options