2 * This is the main ia32 firm backend driver.
3 * @author Christian Wuerdig
20 #include <libcore/lc_opts.h>
21 #include <libcore/lc_opts_enum.h>
22 #endif /* WITH_LIBCORE */
24 #include "pseudo_irg.h"
28 #include "iredges_t.h"
36 #include "../beabi.h" /* the general register allocator interface */
37 #include "../benode_t.h"
38 #include "../belower.h"
39 #include "../besched_t.h"
41 #include "bearch_ia32_t.h"
43 #include "ia32_new_nodes.h" /* ia32 nodes interface */
44 #include "gen_ia32_regalloc_if.h" /* the generated interface (register type and class defenitions) */
45 #include "ia32_gen_decls.h" /* interface declaration emitter */
46 #include "ia32_transform.h"
47 #include "ia32_emitter.h"
48 #include "ia32_map_regs.h"
49 #include "ia32_optimize.h"
51 #include "ia32_dbg_stat.h"
52 #include "ia32_finish.h"
53 #include "ia32_util.h"
55 #define DEBUG_MODULE "firm.be.ia32.isa"
58 static set *cur_reg_set = NULL;
61 #define is_Start(irn) (get_irn_opcode(irn) == iro_Start)
63 /* Creates the unique per irg GP NoReg node. */
64 ir_node *ia32_new_NoReg_gp(ia32_code_gen_t *cg) {
65 return be_abi_get_callee_save_irn(cg->birg->abi, &ia32_gp_regs[REG_GP_NOREG]);
68 /* Creates the unique per irg FP NoReg node. */
69 ir_node *ia32_new_NoReg_fp(ia32_code_gen_t *cg) {
70 return be_abi_get_callee_save_irn(cg->birg->abi,
71 USE_SSE2(cg) ? &ia32_xmm_regs[REG_XMM_NOREG] : &ia32_vfp_regs[REG_VFP_NOREG]);
74 /**************************************************
77 * _ __ ___ __ _ __ _| | | ___ ___ _| |_
78 * | '__/ _ \/ _` | / _` | | |/ _ \ / __| | | _|
79 * | | | __/ (_| | | (_| | | | (_) | (__ | | |
80 * |_| \___|\__, | \__,_|_|_|\___/ \___| |_|_|
83 **************************************************/
85 static ir_node *my_skip_proj(const ir_node *n) {
93 * Return register requirements for an ia32 node.
94 * If the node returns a tuple (mode_T) then the proj's
95 * will be asked for this information.
97 static const arch_register_req_t *ia32_get_irn_reg_req(const void *self, arch_register_req_t *req, const ir_node *irn, int pos) {
98 const ia32_irn_ops_t *ops = self;
99 const ia32_register_req_t *irn_req;
100 long node_pos = pos == -1 ? 0 : pos;
101 ir_mode *mode = is_Block(irn) ? NULL : get_irn_mode(irn);
102 FIRM_DBG_REGISTER(firm_dbg_module_t *mod, DEBUG_MODULE);
104 if (is_Block(irn) || mode == mode_M || mode == mode_X) {
105 DBG((mod, LEVEL_1, "ignoring Block, mode_M, mode_X node %+F\n", irn));
109 if (mode == mode_T && pos < 0) {
110 DBG((mod, LEVEL_1, "ignoring request OUT requirements for node %+F\n", irn));
114 DBG((mod, LEVEL_1, "get requirements at pos %d for %+F ... ", pos, irn));
118 node_pos = ia32_translate_proj_pos(irn);
124 irn = my_skip_proj(irn);
126 DB((mod, LEVEL_1, "skipping Proj, going to %+F at pos %d ... ", irn, node_pos));
129 if (is_ia32_irn(irn)) {
131 irn_req = get_ia32_in_req(irn, pos);
134 irn_req = get_ia32_out_req(irn, node_pos);
137 DB((mod, LEVEL_1, "returning reqs for %+F at pos %d\n", irn, pos));
139 memcpy(req, &(irn_req->req), sizeof(*req));
141 if (arch_register_req_is(&(irn_req->req), should_be_same)) {
142 assert(irn_req->same_pos >= 0 && "should be same constraint for in -> out NYI");
143 req->other_same = get_irn_n(irn, irn_req->same_pos);
146 if (arch_register_req_is(&(irn_req->req), should_be_different)) {
147 assert(irn_req->different_pos >= 0 && "should be different constraint for in -> out NYI");
148 req->other_different = get_irn_n(irn, irn_req->different_pos);
152 /* treat Unknowns like Const with default requirements */
153 if (is_Unknown(irn)) {
154 DB((mod, LEVEL_1, "returning UKNWN reqs for %+F\n", irn));
155 if (mode_is_float(mode)) {
156 if (USE_SSE2(ops->cg))
157 memcpy(req, &(ia32_default_req_ia32_xmm_xmm_UKNWN), sizeof(*req));
159 memcpy(req, &(ia32_default_req_ia32_vfp_vfp_UKNWN), sizeof(*req));
161 else if (mode_is_int(mode) || mode_is_reference(mode))
162 memcpy(req, &(ia32_default_req_ia32_gp_gp_UKNWN), sizeof(*req));
163 else if (mode == mode_T || mode == mode_M) {
164 DBG((mod, LEVEL_1, "ignoring Unknown node %+F\n", irn));
168 assert(0 && "unsupported Unknown-Mode");
171 DB((mod, LEVEL_1, "returning NULL for %+F (not ia32)\n", irn));
179 static void ia32_set_irn_reg(const void *self, ir_node *irn, const arch_register_t *reg) {
181 const ia32_irn_ops_t *ops = self;
183 if (get_irn_mode(irn) == mode_X) {
187 DBG((ops->cg->mod, LEVEL_1, "ia32 assigned register %s to node %+F\n", reg->name, irn));
190 pos = ia32_translate_proj_pos(irn);
191 irn = my_skip_proj(irn);
194 if (is_ia32_irn(irn)) {
195 const arch_register_t **slots;
197 slots = get_ia32_slots(irn);
201 ia32_set_firm_reg(irn, reg, cur_reg_set);
205 static const arch_register_t *ia32_get_irn_reg(const void *self, const ir_node *irn) {
207 const arch_register_t *reg = NULL;
211 if (get_irn_mode(irn) == mode_X) {
215 pos = ia32_translate_proj_pos(irn);
216 irn = my_skip_proj(irn);
219 if (is_ia32_irn(irn)) {
220 const arch_register_t **slots;
221 slots = get_ia32_slots(irn);
225 reg = ia32_get_firm_reg(irn, cur_reg_set);
231 static arch_irn_class_t ia32_classify(const void *self, const ir_node *irn) {
232 arch_irn_class_t classification = arch_irn_class_normal;
234 irn = my_skip_proj(irn);
237 classification |= arch_irn_class_branch;
239 if (! is_ia32_irn(irn))
240 return classification & ~arch_irn_class_normal;
242 if (is_ia32_Cnst(irn))
243 classification |= arch_irn_class_const;
246 classification |= arch_irn_class_load;
248 if (is_ia32_St(irn) || is_ia32_Store8Bit(irn))
249 classification |= arch_irn_class_store;
251 if (is_ia32_got_reload(irn))
252 classification |= arch_irn_class_reload;
254 return classification;
257 static arch_irn_flags_t ia32_get_flags(const void *self, const ir_node *irn) {
258 irn = my_skip_proj(irn);
259 if (is_ia32_irn(irn))
260 return get_ia32_flags(irn);
263 return arch_irn_flags_ignore;
268 static entity *ia32_get_frame_entity(const void *self, const ir_node *irn) {
269 return is_ia32_irn(irn) ? get_ia32_frame_ent(irn) : NULL;
272 static void ia32_set_frame_entity(const void *self, ir_node *irn, entity *ent) {
273 set_ia32_frame_ent(irn, ent);
276 static void ia32_set_stack_bias(const void *self, ir_node *irn, int bias) {
278 const ia32_irn_ops_t *ops = self;
280 if (get_ia32_frame_ent(irn)) {
281 ia32_am_flavour_t am_flav = get_ia32_am_flavour(irn);
283 DBG((ops->cg->mod, LEVEL_1, "stack biased %+F with %d\n", irn, bias));
284 snprintf(buf, sizeof(buf), "%d", bias);
286 if (get_ia32_op_type(irn) == ia32_Normal) {
287 set_ia32_cnst(irn, buf);
290 add_ia32_am_offs(irn, buf);
292 set_ia32_am_flavour(irn, am_flav);
298 be_abi_call_flags_bits_t flags;
299 const arch_isa_t *isa;
300 const arch_env_t *aenv;
304 static void *ia32_abi_init(const be_abi_call_t *call, const arch_env_t *aenv, ir_graph *irg)
306 ia32_abi_env_t *env = xmalloc(sizeof(env[0]));
307 be_abi_call_flags_t fl = be_abi_call_get_flags(call);
308 env->flags = fl.bits;
311 env->isa = aenv->isa;
316 * Put all registers which are saved by the prologue/epilogue in a set.
318 * @param self The callback object.
319 * @param s The result set.
321 static void ia32_abi_dont_save_regs(void *self, pset *s)
323 ia32_abi_env_t *env = self;
324 if(env->flags.try_omit_fp)
325 pset_insert_ptr(s, env->isa->bp);
329 * Generate the routine prologue.
331 * @param self The callback object.
332 * @param mem A pointer to the mem node. Update this if you define new memory.
333 * @param reg_map A map mapping all callee_save/ignore/parameter registers to their defining nodes.
335 * @return The register which shall be used as a stack frame base.
337 * All nodes which define registers in @p reg_map must keep @p reg_map current.
339 static const arch_register_t *ia32_abi_prologue(void *self, ir_node **mem, pmap *reg_map)
341 ia32_abi_env_t *env = self;
343 if (!env->flags.try_omit_fp) {
344 ir_node *bl = get_irg_start_block(env->irg);
345 ir_node *curr_sp = be_abi_reg_map_get(reg_map, env->isa->sp);
346 ir_node *curr_bp = be_abi_reg_map_get(reg_map, env->isa->bp);
350 push = new_rd_ia32_Push(NULL, env->irg, bl, curr_sp, curr_bp, *mem);
351 curr_sp = new_r_Proj(env->irg, bl, push, get_irn_mode(curr_sp), pn_ia32_Push_stack);
352 *mem = new_r_Proj(env->irg, bl, push, mode_M, pn_ia32_Push_M);
354 /* the push must have SP out register */
355 arch_set_irn_register(env->aenv, curr_sp, env->isa->sp);
356 set_ia32_flags(push, arch_irn_flags_ignore);
358 /* move esp to ebp */
359 curr_bp = be_new_Copy(env->isa->bp->reg_class, env->irg, bl, curr_sp);
360 be_set_constr_single_reg(curr_bp, BE_OUT_POS(0), env->isa->bp);
361 arch_set_irn_register(env->aenv, curr_bp, env->isa->bp);
362 be_node_set_flags(curr_bp, BE_OUT_POS(0), arch_irn_flags_ignore);
364 /* beware: the copy must be done before any other sp use */
365 curr_sp = be_new_CopyKeep_single(env->isa->sp->reg_class, env->irg, bl, curr_sp, curr_bp, get_irn_mode(curr_sp));
366 be_set_constr_single_reg(curr_sp, BE_OUT_POS(0), env->isa->sp);
367 arch_set_irn_register(env->aenv, curr_sp, env->isa->sp);
368 be_node_set_flags(curr_sp, BE_OUT_POS(0), arch_irn_flags_ignore);
370 be_abi_reg_map_set(reg_map, env->isa->sp, curr_sp);
371 be_abi_reg_map_set(reg_map, env->isa->bp, curr_bp);
380 * Generate the routine epilogue.
381 * @param self The callback object.
382 * @param bl The block for the epilog
383 * @param mem A pointer to the mem node. Update this if you define new memory.
384 * @param reg_map A map mapping all callee_save/ignore/parameter registers to their defining nodes.
385 * @return The register which shall be used as a stack frame base.
387 * All nodes which define registers in @p reg_map must keep @p reg_map current.
389 static void ia32_abi_epilogue(void *self, ir_node *bl, ir_node **mem, pmap *reg_map)
391 ia32_abi_env_t *env = self;
392 ir_node *curr_sp = be_abi_reg_map_get(reg_map, env->isa->sp);
393 ir_node *curr_bp = be_abi_reg_map_get(reg_map, env->isa->bp);
395 if (env->flags.try_omit_fp) {
396 /* simply remove the stack frame here */
397 curr_sp = be_new_IncSP(env->isa->sp, env->irg, bl, curr_sp, *mem, BE_STACK_FRAME_SIZE, be_stack_dir_shrink);
400 const ia32_isa_t *isa = (ia32_isa_t *)env->isa;
401 ir_mode *mode_bp = env->isa->bp->reg_class->mode;
403 /* gcc always emits a leave at the end of a routine */
404 if (1 || ARCH_AMD(isa->opt_arch)) {
408 leave = new_rd_ia32_Leave(NULL, env->irg, bl, curr_sp, *mem);
409 set_ia32_flags(leave, arch_irn_flags_ignore);
410 curr_bp = new_r_Proj(current_ir_graph, bl, leave, mode_bp, pn_ia32_Leave_frame);
411 curr_sp = new_r_Proj(current_ir_graph, bl, leave, get_irn_mode(curr_sp), pn_ia32_Leave_stack);
412 *mem = new_r_Proj(current_ir_graph, bl, leave, mode_M, pn_ia32_Leave_M);
417 /* copy ebp to esp */
418 curr_sp = be_new_SetSP(env->isa->sp, env->irg, bl, curr_sp, curr_bp, *mem);
421 pop = new_rd_ia32_Pop(NULL, env->irg, bl, curr_sp, *mem);
422 set_ia32_flags(pop, arch_irn_flags_ignore);
423 curr_bp = new_r_Proj(current_ir_graph, bl, pop, mode_bp, pn_ia32_Pop_res);
424 curr_sp = new_r_Proj(current_ir_graph, bl, pop, get_irn_mode(curr_sp), pn_ia32_Pop_stack);
425 *mem = new_r_Proj(current_ir_graph, bl, pop, mode_M, pn_ia32_Pop_M);
427 arch_set_irn_register(env->aenv, curr_sp, env->isa->sp);
428 arch_set_irn_register(env->aenv, curr_bp, env->isa->bp);
431 be_abi_reg_map_set(reg_map, env->isa->sp, curr_sp);
432 be_abi_reg_map_set(reg_map, env->isa->bp, curr_bp);
436 * Produces the type which sits between the stack args and the locals on the stack.
437 * it will contain the return address and space to store the old base pointer.
438 * @return The Firm type modeling the ABI between type.
440 static ir_type *ia32_abi_get_between_type(void *self)
442 #define IDENT(s) new_id_from_chars(s, sizeof(s)-1)
443 static ir_type *omit_fp_between_type = NULL;
444 static ir_type *between_type = NULL;
446 ia32_abi_env_t *env = self;
448 if ( !between_type) {
450 entity *ret_addr_ent;
451 entity *omit_fp_ret_addr_ent;
453 ir_type *old_bp_type = new_type_primitive(IDENT("bp"), mode_P);
454 ir_type *ret_addr_type = new_type_primitive(IDENT("return_addr"), mode_P);
456 between_type = new_type_struct(IDENT("ia32_between_type"));
457 old_bp_ent = new_entity(between_type, IDENT("old_bp"), old_bp_type);
458 ret_addr_ent = new_entity(between_type, IDENT("ret_addr"), ret_addr_type);
460 set_entity_offset_bytes(old_bp_ent, 0);
461 set_entity_offset_bytes(ret_addr_ent, get_type_size_bytes(old_bp_type));
462 set_type_size_bytes(between_type, get_type_size_bytes(old_bp_type) + get_type_size_bytes(ret_addr_type));
463 set_type_state(between_type, layout_fixed);
465 omit_fp_between_type = new_type_struct(IDENT("ia32_between_type_omit_fp"));
466 omit_fp_ret_addr_ent = new_entity(omit_fp_between_type, IDENT("ret_addr"), ret_addr_type);
468 set_entity_offset_bytes(omit_fp_ret_addr_ent, 0);
469 set_type_size_bytes(omit_fp_between_type, get_type_size_bytes(ret_addr_type));
470 set_type_state(omit_fp_between_type, layout_fixed);
473 return env->flags.try_omit_fp ? omit_fp_between_type : between_type;
478 * Get the estimated cycle count for @p irn.
480 * @param self The this pointer.
481 * @param irn The node.
483 * @return The estimated cycle count for this operation
485 static int ia32_get_op_estimated_cost(const void *self, const ir_node *irn)
492 switch (get_ia32_irn_opcode(irn)) {
494 case iro_ia32_DivMod:
499 case iro_ia32_l_Load:
509 case iro_ia32_xStore:
510 case iro_ia32_l_Store:
512 case iro_ia32_Store8Bit:
520 case iro_ia32_l_MulS:
533 * Returns the inverse operation if @p irn, recalculating the argument at position @p i.
535 * @param irn The original operation
536 * @param i Index of the argument we want the inverse operation to yield
537 * @param inverse struct to be filled with the resulting inverse op
538 * @param obstack The obstack to use for allocation of the returned nodes array
539 * @return The inverse operation or NULL if operation invertible
541 static arch_inverse_t *ia32_get_inverse(const void *self, const ir_node *irn, int i, arch_inverse_t *inverse, struct obstack *obst) {
544 ir_node *block, *noreg, *nomem;
547 /* we cannot invert non-ia32 irns */
548 if (! is_ia32_irn(irn))
551 /* operand must always be a real operand (not base, index or mem) */
552 if (i != 2 && i != 3)
555 /* we don't invert address mode operations */
556 if (get_ia32_op_type(irn) != ia32_Normal)
559 irg = get_irn_irg(irn);
560 block = get_nodes_block(irn);
561 mode = get_ia32_res_mode(irn);
562 noreg = get_irn_n(irn, 0);
563 nomem = new_r_NoMem(irg);
565 /* initialize structure */
566 inverse->nodes = obstack_alloc(obst, 2 * sizeof(inverse->nodes[0]));
570 switch (get_ia32_irn_opcode(irn)) {
572 if (get_ia32_immop_type(irn) == ia32_ImmConst) {
573 /* we have an add with a const here */
574 /* invers == add with negated const */
575 inverse->nodes[0] = new_rd_ia32_Add(NULL, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem);
576 pnc = pn_ia32_Add_res;
578 copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
579 set_ia32_Immop_tarval(inverse->nodes[0], tarval_neg(get_ia32_Immop_tarval(irn)));
580 set_ia32_commutative(inverse->nodes[0]);
582 else if (get_ia32_immop_type(irn) == ia32_ImmSymConst) {
583 /* we have an add with a symconst here */
584 /* invers == sub with const */
585 inverse->nodes[0] = new_rd_ia32_Sub(NULL, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem);
586 pnc = pn_ia32_Sub_res;
588 copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
591 /* normal add: inverse == sub */
592 ir_node *proj = ia32_get_res_proj(irn);
595 inverse->nodes[0] = new_rd_ia32_Sub(NULL, irg, block, noreg, noreg, proj, get_irn_n(irn, i ^ 1), nomem);
596 pnc = pn_ia32_Sub_res;
601 if (get_ia32_immop_type(irn) != ia32_ImmNone) {
602 /* we have a sub with a const/symconst here */
603 /* invers == add with this const */
604 inverse->nodes[0] = new_rd_ia32_Add(NULL, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem);
605 pnc = pn_ia32_Add_res;
606 inverse->costs += (get_ia32_immop_type(irn) == ia32_ImmSymConst) ? 5 : 1;
607 copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
611 ir_node *proj = ia32_get_res_proj(irn);
615 inverse->nodes[0] = new_rd_ia32_Add(NULL, irg, block, noreg, noreg, proj, get_irn_n(irn, 3), nomem);
618 inverse->nodes[0] = new_rd_ia32_Sub(NULL, irg, block, noreg, noreg, get_irn_n(irn, 2), proj, nomem);
620 pnc = pn_ia32_Sub_res;
625 if (get_ia32_immop_type(irn) != ia32_ImmNone) {
626 /* xor with const: inverse = xor */
627 inverse->nodes[0] = new_rd_ia32_Eor(NULL, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem);
628 pnc = pn_ia32_Eor_res;
629 inverse->costs += (get_ia32_immop_type(irn) == ia32_ImmSymConst) ? 5 : 1;
630 copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
634 inverse->nodes[0] = new_rd_ia32_Eor(NULL, irg, block, noreg, noreg, (ir_node *)irn, get_irn_n(irn, i), nomem);
635 pnc = pn_ia32_Eor_res;
640 ir_node *proj = ia32_get_res_proj(irn);
643 inverse->nodes[0] = new_rd_ia32_Not(NULL, irg, block, noreg, noreg, proj, nomem);
644 pnc = pn_ia32_Not_res;
648 case iro_ia32_Minus: {
649 ir_node *proj = ia32_get_res_proj(irn);
652 inverse->nodes[0] = new_rd_ia32_Minus(NULL, irg, block, noreg, noreg, proj, nomem);
653 pnc = pn_ia32_Minus_res;
658 /* inverse operation not supported */
662 set_ia32_res_mode(inverse->nodes[0], mode);
663 inverse->nodes[1] = new_r_Proj(irg, block, inverse->nodes[0], mode, pnc);
669 * Check if irn can load it's operand at position i from memory (source addressmode).
670 * @param self Pointer to irn ops itself
671 * @param irn The irn to be checked
672 * @param i The operands position
673 * @return Non-Zero if operand can be loaded
675 static int ia32_possible_memory_operand(const void *self, const ir_node *irn, unsigned int i) {
676 if (! is_ia32_irn(irn) || /* must be an ia32 irn */
677 get_irn_arity(irn) != 5 || /* must be a binary operation */
678 get_ia32_op_type(irn) != ia32_Normal || /* must not already be a addressmode irn */
679 ! (get_ia32_am_support(irn) & ia32_am_Source) || /* must be capable of source addressmode */
680 (i != 2 && i != 3) || /* a "real" operand position must be requested */
681 (i == 2 && ! is_ia32_commutative(irn)) || /* if first operand requested irn must be commutative */
682 is_ia32_use_frame(irn)) /* must not already use frame */
688 static void ia32_perform_memory_operand(const void *self, ir_node *irn, ir_node *reload, unsigned int i) {
689 assert(ia32_possible_memory_operand(self, irn, i) && "Cannot perform memory operand change");
690 assert(get_nodes_block(reload) == get_nodes_block(irn) && "Reload must be in same block as irn.");
692 if (get_irn_n_edges(reload) > 1)
696 ir_node *tmp = get_irn_n(irn, 3);
697 set_irn_n(irn, 3, get_irn_n(irn, 2));
698 set_irn_n(irn, 2, tmp);
701 set_ia32_am_support(irn, ia32_am_Source);
702 set_ia32_op_type(irn, ia32_AddrModeS);
703 set_ia32_am_flavour(irn, ia32_B);
704 set_ia32_ls_mode(irn, get_irn_mode(reload));
705 set_ia32_frame_ent(irn, be_get_frame_entity(reload));
706 set_ia32_use_frame(irn);
707 set_ia32_got_reload(irn);
709 set_irn_n(irn, 0, be_get_Reload_frame(reload));
710 set_irn_n(irn, 4, be_get_Reload_mem(reload));
713 Input at position one is index register, which is NoReg.
714 We would need cg object to get a real noreg, but we cannot
717 set_irn_n(irn, 3, get_irn_n(irn, 1));
719 DBG_OPT_AM_S(reload, irn);
722 static const be_abi_callbacks_t ia32_abi_callbacks = {
725 ia32_abi_get_between_type,
726 ia32_abi_dont_save_regs,
731 /* fill register allocator interface */
733 static const arch_irn_ops_if_t ia32_irn_ops_if = {
734 ia32_get_irn_reg_req,
739 ia32_get_frame_entity,
740 ia32_set_frame_entity,
743 ia32_get_op_estimated_cost,
744 ia32_possible_memory_operand,
745 ia32_perform_memory_operand,
748 ia32_irn_ops_t ia32_irn_ops = {
755 /**************************************************
758 * ___ ___ __| | ___ __ _ ___ _ __ _| |_
759 * / __/ _ \ / _` |/ _ \/ _` |/ _ \ '_ \ | | _|
760 * | (_| (_) | (_| | __/ (_| | __/ | | | | | |
761 * \___\___/ \__,_|\___|\__, |\___|_| |_| |_|_|
764 **************************************************/
766 static void ia32_kill_convs(ia32_code_gen_t *cg) {
769 /* BEWARE: the Projs are inserted in the set */
770 foreach_nodeset(cg->kill_conv, irn) {
771 ir_node *in = get_irn_n(get_Proj_pred(irn), 2);
772 edges_reroute(irn, in, cg->birg->irg);
777 * Transforms the standard firm graph into
780 static void ia32_prepare_graph(void *self) {
781 ia32_code_gen_t *cg = self;
782 dom_front_info_t *dom;
783 DEBUG_ONLY(firm_dbg_module_t *old_mod = cg->mod;)
785 FIRM_DBG_REGISTER(cg->mod, "firm.be.ia32.transform");
787 /* 1st: transform constants and psi condition trees */
788 ia32_pre_transform_phase(cg);
790 /* 2nd: transform all remaining nodes */
791 ia32_register_transformers();
792 dom = be_compute_dominance_frontiers(cg->irg);
794 cg->kill_conv = new_nodeset(5);
795 irg_walk_blkwise_graph(cg->irg, NULL, ia32_transform_node, cg);
797 del_nodeset(cg->kill_conv);
799 be_free_dominance_frontiers(dom);
802 be_dump(cg->irg, "-transformed", dump_ir_block_graph_sched);
804 /* 3rd: optimize address mode */
805 FIRM_DBG_REGISTER(cg->mod, "firm.be.ia32.am");
806 ia32_optimize_addressmode(cg);
809 be_dump(cg->irg, "-am", dump_ir_block_graph_sched);
811 DEBUG_ONLY(cg->mod = old_mod;)
815 * Dummy functions for hooks we don't need but which must be filled.
817 static void ia32_before_sched(void *self) {
820 static void remove_unused_nodes(ir_node *irn, bitset_t *already_visited) {
828 mode = get_irn_mode(irn);
830 /* check if we already saw this node or the node has more than one user */
831 if (bitset_contains_irn(already_visited, irn) || get_irn_n_edges(irn) > 1)
834 /* mark irn visited */
835 bitset_add_irn(already_visited, irn);
837 /* non-Tuple nodes with one user: ok, return */
838 if (get_irn_n_edges(irn) >= 1 && mode != mode_T)
841 /* tuple node has one user which is not the mem proj-> ok */
842 if (mode == mode_T && get_irn_n_edges(irn) == 1) {
843 mem_proj = ia32_get_proj_for_mode(irn, mode_M);
848 for (i = get_irn_arity(irn) - 1; i >= 0; i--) {
849 ir_node *pred = get_irn_n(irn, i);
851 /* do not follow memory edges or we will accidentally remove stores */
852 if (is_Proj(pred) && get_irn_mode(pred) == mode_M)
855 set_irn_n(irn, i, new_Bad());
858 The current node is about to be removed: if the predecessor
859 has only this node as user, it need to be removed as well.
861 if (get_irn_n_edges(pred) <= 1)
862 remove_unused_nodes(pred, already_visited);
865 if (sched_is_scheduled(irn))
869 static void remove_unused_loads_walker(ir_node *irn, void *env) {
870 bitset_t *already_visited = env;
871 if (is_ia32_Ld(irn) && ! bitset_contains_irn(already_visited, irn))
872 remove_unused_nodes(irn, env);
876 * Called before the register allocator.
877 * Calculate a block schedule here. We need it for the x87
878 * simulator and the emitter.
880 static void ia32_before_ra(void *self) {
881 ia32_code_gen_t *cg = self;
882 bitset_t *already_visited = bitset_irg_malloc(cg->irg);
884 cg->blk_sched = sched_create_block_schedule(cg->irg);
888 There are sometimes unused loads, only pinned by memory.
889 We need to remove those Loads and all other nodes which won't be used
890 after removing the Load from schedule.
892 irg_walk_graph(cg->irg, remove_unused_loads_walker, NULL, already_visited);
893 bitset_free(already_visited);
898 * Transforms a be node into a Load.
900 static void transform_to_Load(ia32_transform_env_t *env) {
901 ir_node *irn = env->irn;
902 entity *ent = arch_get_frame_entity(env->cg->arch_env, irn);
903 ir_mode *mode = env->mode;
904 ir_node *noreg = ia32_new_NoReg_gp(env->cg);
905 ir_node *nomem = new_rd_NoMem(env->irg);
906 ir_node *sched_point = NULL;
907 ir_node *ptr = get_irn_n(irn, 0);
908 ir_node *mem = be_is_Reload(irn) ? get_irn_n(irn, 1) : nomem;
909 ir_node *new_op, *proj;
910 const arch_register_t *reg;
912 if (sched_is_scheduled(irn)) {
913 sched_point = sched_prev(irn);
916 if (mode_is_float(mode)) {
917 if (USE_SSE2(env->cg))
918 new_op = new_rd_ia32_xLoad(env->dbg, env->irg, env->block, ptr, noreg, mem);
920 new_op = new_rd_ia32_vfld(env->dbg, env->irg, env->block, ptr, noreg, mem);
923 new_op = new_rd_ia32_Load(env->dbg, env->irg, env->block, ptr, noreg, mem);
926 set_ia32_am_support(new_op, ia32_am_Source);
927 set_ia32_op_type(new_op, ia32_AddrModeS);
928 set_ia32_am_flavour(new_op, ia32_B);
929 set_ia32_ls_mode(new_op, mode);
930 set_ia32_frame_ent(new_op, ent);
931 set_ia32_use_frame(new_op);
933 DBG_OPT_RELOAD2LD(irn, new_op);
935 proj = new_rd_Proj(env->dbg, env->irg, env->block, new_op, mode, pn_Load_res);
938 sched_add_after(sched_point, new_op);
939 sched_add_after(new_op, proj);
944 /* copy the register from the old node to the new Load */
945 reg = arch_get_irn_register(env->cg->arch_env, irn);
946 arch_set_irn_register(env->cg->arch_env, new_op, reg);
948 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, irn));
954 * Transforms a be node into a Store.
956 static void transform_to_Store(ia32_transform_env_t *env) {
957 ir_node *irn = env->irn;
958 entity *ent = arch_get_frame_entity(env->cg->arch_env, irn);
959 ir_mode *mode = env->mode;
960 ir_node *noreg = ia32_new_NoReg_gp(env->cg);
961 ir_node *nomem = new_rd_NoMem(env->irg);
962 ir_node *ptr = get_irn_n(irn, 0);
963 ir_node *val = get_irn_n(irn, 1);
964 ir_node *new_op, *proj;
965 ir_node *sched_point = NULL;
967 if (sched_is_scheduled(irn)) {
968 sched_point = sched_prev(irn);
971 if (mode_is_float(mode)) {
972 if (USE_SSE2(env->cg))
973 new_op = new_rd_ia32_xStore(env->dbg, env->irg, env->block, ptr, noreg, val, nomem);
975 new_op = new_rd_ia32_vfst(env->dbg, env->irg, env->block, ptr, noreg, val, nomem);
977 else if (get_mode_size_bits(mode) == 8) {
978 new_op = new_rd_ia32_Store8Bit(env->dbg, env->irg, env->block, ptr, noreg, val, nomem);
981 new_op = new_rd_ia32_Store(env->dbg, env->irg, env->block, ptr, noreg, val, nomem);
984 set_ia32_am_support(new_op, ia32_am_Dest);
985 set_ia32_op_type(new_op, ia32_AddrModeD);
986 set_ia32_am_flavour(new_op, ia32_B);
987 set_ia32_ls_mode(new_op, mode);
988 set_ia32_frame_ent(new_op, ent);
989 set_ia32_use_frame(new_op);
991 DBG_OPT_SPILL2ST(irn, new_op);
993 proj = new_rd_Proj(env->dbg, env->irg, env->block, new_op, mode_M, pn_ia32_Store_M);
996 sched_add_after(sched_point, new_op);
1000 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, irn));
1002 exchange(irn, proj);
1005 static ir_node *create_push(ia32_transform_env_t *env, ir_node *schedpoint, ir_node *sp, ir_node *mem, entity *ent, const char *offset) {
1006 ir_node *noreg = ia32_new_NoReg_gp(env->cg);
1008 ir_node *push = new_rd_ia32_Push(env->dbg, env->irg, env->block, sp, noreg, mem);
1010 set_ia32_frame_ent(push, ent);
1011 set_ia32_use_frame(push);
1012 set_ia32_op_type(push, ia32_AddrModeS);
1013 set_ia32_am_flavour(push, ia32_B);
1014 set_ia32_ls_mode(push, mode_Is);
1016 add_ia32_am_offs(push, offset);
1018 sched_add_before(schedpoint, push);
1022 static ir_node *create_pop(ia32_transform_env_t *env, ir_node *schedpoint, ir_node *sp, entity *ent, const char *offset) {
1023 ir_node *pop = new_rd_ia32_Pop(env->dbg, env->irg, env->block, sp, new_NoMem());
1025 set_ia32_frame_ent(pop, ent);
1026 set_ia32_use_frame(pop);
1027 set_ia32_op_type(pop, ia32_AddrModeD);
1028 set_ia32_am_flavour(pop, ia32_B);
1029 set_ia32_ls_mode(pop, mode_Is);
1031 add_ia32_am_offs(pop, offset);
1033 sched_add_before(schedpoint, pop);
1038 static ir_node* create_spproj(ia32_transform_env_t *env, ir_node *pred, ir_node *schedpoint, const ir_node *oldsp) {
1039 ir_mode *spmode = get_irn_mode(oldsp);
1040 const arch_register_t *spreg = arch_get_irn_register(env->cg->arch_env, oldsp);
1043 sp = new_rd_Proj(env->dbg, env->irg, env->block, pred, spmode, 0);
1044 arch_set_irn_register(env->cg->arch_env, sp, spreg);
1045 sched_add_before(schedpoint, sp);
1050 static void transform_MemPerm(ia32_transform_env_t *env) {
1052 * Transform memperm, currently we do this the ugly way and produce
1053 * push/pop into/from memory cascades. This is possible without using
1056 ir_node *node = env->irn;
1058 ir_node *sp = get_irn_n(node, 0);
1059 const ir_edge_t *edge;
1060 const ir_edge_t *next;
1063 arity = be_get_MemPerm_entity_arity(node);
1064 pops = alloca(arity * sizeof(pops[0]));
1067 for(i = 0; i < arity; ++i) {
1068 entity *ent = be_get_MemPerm_in_entity(node, i);
1069 ir_type *enttype = get_entity_type(ent);
1070 int entbits = get_type_size_bits(enttype);
1071 ir_node *mem = get_irn_n(node, i + 1);
1074 assert( (entbits == 32 || entbits == 64) && "spillslot on x86 should be 32 or 64 bit");
1076 push = create_push(env, node, sp, mem, ent, NULL);
1077 sp = create_spproj(env, push, node, sp);
1079 // add another push after the first one
1080 push = create_push(env, node, sp, mem, ent, "4");
1081 sp = create_spproj(env, push, node, sp);
1084 set_irn_n(node, i, new_Bad());
1088 for(i = arity - 1; i >= 0; --i) {
1089 entity *ent = be_get_MemPerm_out_entity(node, i);
1090 ir_type *enttype = get_entity_type(ent);
1091 int entbits = get_type_size_bits(enttype);
1095 assert( (entbits == 32 || entbits == 64) && "spillslot on x86 should be 32 or 64 bit");
1097 pop = create_pop(env, node, sp, ent, NULL);
1099 // add another pop after the first one
1100 sp = create_spproj(env, pop, node, sp);
1101 pop = create_pop(env, node, sp, ent, "4");
1104 sp = create_spproj(env, pop, node, sp);
1110 // exchange memprojs
1111 foreach_out_edge_safe(node, edge, next) {
1112 ir_node *proj = get_edge_src_irn(edge);
1113 int p = get_Proj_proj(proj);
1117 set_Proj_pred(proj, pops[p]);
1118 set_Proj_proj(proj, 3);
1125 * Fix the mode of Spill/Reload
1127 static ir_mode *fix_spill_mode(ia32_code_gen_t *cg, ir_mode *mode)
1129 if (mode_is_float(mode)) {
1141 * Block-Walker: Calls the transform functions Spill and Reload.
1143 static void ia32_after_ra_walker(ir_node *block, void *env) {
1144 ir_node *node, *prev;
1145 ia32_code_gen_t *cg = env;
1146 ia32_transform_env_t tenv;
1149 tenv.irg = current_ir_graph;
1151 DEBUG_ONLY(tenv.mod = cg->mod;)
1153 /* beware: the schedule is changed here */
1154 for (node = sched_last(block); !sched_is_begin(node); node = prev) {
1155 prev = sched_prev(node);
1156 if (be_is_Reload(node)) {
1157 /* we always reload the whole register */
1158 tenv.dbg = get_irn_dbg_info(node);
1160 tenv.mode = fix_spill_mode(cg, get_irn_mode(node));
1161 transform_to_Load(&tenv);
1163 else if (be_is_Spill(node)) {
1164 ir_node *spillval = get_irn_n(node, be_pos_Spill_val);
1165 /* we always spill the whole register */
1166 tenv.dbg = get_irn_dbg_info(node);
1168 tenv.mode = fix_spill_mode(cg, get_irn_mode(spillval));
1169 transform_to_Store(&tenv);
1171 else if(be_is_MemPerm(node)) {
1172 tenv.dbg = get_irn_dbg_info(node);
1174 transform_MemPerm(&tenv);
1180 * We transform Spill and Reload here. This needs to be done before
1181 * stack biasing otherwise we would miss the corrected offset for these nodes.
1183 * If x87 instruction should be emitted, run the x87 simulator and patch
1184 * the virtual instructions. This must obviously be done after register allocation.
1186 static void ia32_after_ra(void *self) {
1187 ia32_code_gen_t *cg = self;
1189 irg_block_walk_graph(cg->irg, NULL, ia32_after_ra_walker, self);
1191 /* if we do x87 code generation, rewrite all the virtual instructions and registers */
1192 if (cg->used_fp == fp_x87 || cg->force_sim) {
1193 x87_simulate_graph(cg->arch_env, cg->irg, cg->blk_sched);
1198 * Last touchups for the graph before emit
1200 static void ia32_finish(void *self) {
1201 ia32_code_gen_t *cg = self;
1202 ir_graph *irg = cg->irg;
1204 ia32_finish_irg(irg, cg);
1208 * Emits the code, closes the output file and frees
1209 * the code generator interface.
1211 static void ia32_codegen(void *self) {
1212 ia32_code_gen_t *cg = self;
1213 ir_graph *irg = cg->irg;
1215 ia32_gen_routine(cg->isa->out, irg, cg);
1219 /* remove it from the isa */
1222 /* de-allocate code generator */
1223 del_set(cg->reg_set);
1227 static void *ia32_cg_init(const be_irg_t *birg);
1229 static const arch_code_generator_if_t ia32_code_gen_if = {
1231 NULL, /* before abi introduce hook */
1233 ia32_before_sched, /* before scheduling hook */
1234 ia32_before_ra, /* before register allocation hook */
1235 ia32_after_ra, /* after register allocation hook */
1236 ia32_finish, /* called before codegen */
1237 ia32_codegen /* emit && done */
1241 * Initializes a IA32 code generator.
1243 static void *ia32_cg_init(const be_irg_t *birg) {
1244 ia32_isa_t *isa = (ia32_isa_t *)birg->main_env->arch_env->isa;
1245 ia32_code_gen_t *cg = xcalloc(1, sizeof(*cg));
1247 cg->impl = &ia32_code_gen_if;
1248 cg->irg = birg->irg;
1249 cg->reg_set = new_set(ia32_cmp_irn_reg_assoc, 1024);
1250 cg->arch_env = birg->main_env->arch_env;
1253 cg->blk_sched = NULL;
1254 cg->fp_to_gp = NULL;
1255 cg->gp_to_fp = NULL;
1256 cg->fp_kind = isa->fp_kind;
1257 cg->used_fp = fp_none;
1258 cg->dump = (birg->main_env->options->dump_flags & DUMP_BE) ? 1 : 0;
1260 FIRM_DBG_REGISTER(cg->mod, "firm.be.ia32.cg");
1262 /* copy optimizations from isa for easier access */
1264 cg->arch = isa->arch;
1265 cg->opt_arch = isa->opt_arch;
1271 if (isa->name_obst_size) {
1272 //printf("freed %d bytes from name obst\n", isa->name_obst_size);
1273 isa->name_obst_size = 0;
1274 obstack_free(isa->name_obst, NULL);
1275 obstack_init(isa->name_obst);
1279 cur_reg_set = cg->reg_set;
1281 ia32_irn_ops.cg = cg;
1283 return (arch_code_generator_t *)cg;
1288 /*****************************************************************
1289 * ____ _ _ _____ _____
1290 * | _ \ | | | | |_ _|/ ____| /\
1291 * | |_) | __ _ ___| | _____ _ __ __| | | | | (___ / \
1292 * | _ < / _` |/ __| |/ / _ \ '_ \ / _` | | | \___ \ / /\ \
1293 * | |_) | (_| | (__| < __/ | | | (_| | _| |_ ____) / ____ \
1294 * |____/ \__,_|\___|_|\_\___|_| |_|\__,_| |_____|_____/_/ \_\
1296 *****************************************************************/
1299 * Set output modes for GCC
1301 static const tarval_mode_info mo_integer = {
1308 * set the tarval output mode to C-semantics
1310 static void set_tarval_output_modes(void)
1312 set_tarval_mode_output_option(get_modeLs(), &mo_integer);
1313 set_tarval_mode_output_option(get_modeLu(), &mo_integer);
1314 set_tarval_mode_output_option(get_modeIs(), &mo_integer);
1315 set_tarval_mode_output_option(get_modeIu(), &mo_integer);
1316 set_tarval_mode_output_option(get_modeHs(), &mo_integer);
1317 set_tarval_mode_output_option(get_modeHu(), &mo_integer);
1318 set_tarval_mode_output_option(get_modeBs(), &mo_integer);
1319 set_tarval_mode_output_option(get_modeBu(), &mo_integer);
1320 set_tarval_mode_output_option(get_modeC(), &mo_integer);
1321 set_tarval_mode_output_option(get_modeU(), &mo_integer);
1322 set_tarval_mode_output_option(get_modeIu(), &mo_integer);
1327 * The template that generates a new ISA object.
1328 * Note that this template can be changed by command line
1331 static ia32_isa_t ia32_isa_template = {
1333 &ia32_isa_if, /* isa interface implementation */
1334 &ia32_gp_regs[REG_ESP], /* stack pointer register */
1335 &ia32_gp_regs[REG_EBP], /* base pointer register */
1336 -1, /* stack direction */
1338 NULL, /* 16bit register names */
1339 NULL, /* 8bit register names */
1343 IA32_OPT_INCDEC | /* optimize add 1, sub 1 into inc/dec default: on */
1344 IA32_OPT_DOAM | /* optimize address mode default: on */
1345 IA32_OPT_LEA | /* optimize for LEAs default: on */
1346 IA32_OPT_PLACECNST | /* place constants immediately before instructions, default: on */
1347 IA32_OPT_IMMOPS | /* operations can use immediates, default: on */
1348 IA32_OPT_EXTBB), /* use extended basic block scheduling, default: on */
1349 arch_pentium_4, /* instruction architecture */
1350 arch_pentium_4, /* optimize for architecture */
1351 fp_sse2, /* use sse2 unit */
1352 NULL, /* current code generator */
1354 NULL, /* name obstack */
1355 0 /* name obst size */
1360 * Initializes the backend ISA.
1362 static void *ia32_init(FILE *file_handle) {
1363 static int inited = 0;
1369 set_tarval_output_modes();
1371 isa = xmalloc(sizeof(*isa));
1372 memcpy(isa, &ia32_isa_template, sizeof(*isa));
1374 ia32_register_init(isa);
1375 ia32_create_opcodes();
1377 if ((ARCH_INTEL(isa->arch) && isa->arch < arch_pentium_4) ||
1378 (ARCH_AMD(isa->arch) && isa->arch < arch_athlon))
1379 /* no SSE2 for these cpu's */
1380 isa->fp_kind = fp_x87;
1382 if (ARCH_INTEL(isa->opt_arch) && isa->opt_arch >= arch_pentium_4) {
1383 /* Pentium 4 don't like inc and dec instructions */
1384 isa->opt &= ~IA32_OPT_INCDEC;
1387 isa->regs_16bit = pmap_create();
1388 isa->regs_8bit = pmap_create();
1389 isa->types = pmap_create();
1390 isa->tv_ent = pmap_create();
1391 isa->out = file_handle;
1393 ia32_build_16bit_reg_map(isa->regs_16bit);
1394 ia32_build_8bit_reg_map(isa->regs_8bit);
1396 /* patch register names of x87 registers */
1398 ia32_st_regs[0].name = "st";
1399 ia32_st_regs[1].name = "st(1)";
1400 ia32_st_regs[2].name = "st(2)";
1401 ia32_st_regs[3].name = "st(3)";
1402 ia32_st_regs[4].name = "st(4)";
1403 ia32_st_regs[5].name = "st(5)";
1404 ia32_st_regs[6].name = "st(6)";
1405 ia32_st_regs[7].name = "st(7)";
1409 isa->name_obst = xmalloc(sizeof(*isa->name_obst));
1410 obstack_init(isa->name_obst);
1411 isa->name_obst_size = 0;
1414 ia32_handle_intrinsics();
1415 ia32_switch_section(NULL, NO_SECTION);
1416 fprintf(isa->out, "\t.intel_syntax\n");
1426 * Closes the output file and frees the ISA structure.
1428 static void ia32_done(void *self) {
1429 ia32_isa_t *isa = self;
1431 /* emit now all global declarations */
1432 ia32_gen_decls(isa->out);
1434 pmap_destroy(isa->regs_16bit);
1435 pmap_destroy(isa->regs_8bit);
1436 pmap_destroy(isa->tv_ent);
1437 pmap_destroy(isa->types);
1440 //printf("name obst size = %d bytes\n", isa->name_obst_size);
1441 obstack_free(isa->name_obst, NULL);
1449 * Return the number of register classes for this architecture.
1450 * We report always these:
1451 * - the general purpose registers
1452 * - the SSE floating point register set
1453 * - the virtual floating point registers
1455 static int ia32_get_n_reg_class(const void *self) {
1460 * Return the register class for index i.
1462 static const arch_register_class_t *ia32_get_reg_class(const void *self, int i) {
1463 assert(i >= 0 && i < 3 && "Invalid ia32 register class requested.");
1465 return &ia32_reg_classes[CLASS_ia32_gp];
1467 return &ia32_reg_classes[CLASS_ia32_xmm];
1469 return &ia32_reg_classes[CLASS_ia32_vfp];
1473 * Get the register class which shall be used to store a value of a given mode.
1474 * @param self The this pointer.
1475 * @param mode The mode in question.
1476 * @return A register class which can hold values of the given mode.
1478 const arch_register_class_t *ia32_get_reg_class_for_mode(const void *self, const ir_mode *mode) {
1479 const ia32_isa_t *isa = self;
1480 if (mode_is_float(mode)) {
1481 return USE_SSE2(isa) ? &ia32_reg_classes[CLASS_ia32_xmm] : &ia32_reg_classes[CLASS_ia32_vfp];
1484 return &ia32_reg_classes[CLASS_ia32_gp];
1488 * Get the ABI restrictions for procedure calls.
1489 * @param self The this pointer.
1490 * @param method_type The type of the method (procedure) in question.
1491 * @param abi The abi object to be modified
1493 static void ia32_get_call_abi(const void *self, ir_type *method_type, be_abi_call_t *abi) {
1494 const ia32_isa_t *isa = self;
1497 unsigned cc = get_method_calling_convention(method_type);
1498 int n = get_method_n_params(method_type);
1501 int i, ignore_1, ignore_2;
1503 const arch_register_t *reg;
1504 be_abi_call_flags_t call_flags = be_abi_call_get_flags(abi);
1506 unsigned use_push = !IS_P6_ARCH(isa->opt_arch);
1508 /* set abi flags for calls */
1509 call_flags.bits.left_to_right = 0; /* always last arg first on stack */
1510 call_flags.bits.store_args_sequential = use_push;
1511 /* call_flags.bits.try_omit_fp not changed: can handle both settings */
1512 call_flags.bits.fp_free = 0; /* the frame pointer is fixed in IA32 */
1513 call_flags.bits.call_has_imm = 1; /* IA32 calls can have immediate address */
1515 /* set stack parameter passing style */
1516 be_abi_call_set_flags(abi, call_flags, &ia32_abi_callbacks);
1518 /* collect the mode for each type */
1519 modes = alloca(n * sizeof(modes[0]));
1521 for (i = 0; i < n; i++) {
1522 tp = get_method_param_type(method_type, i);
1523 modes[i] = get_type_mode(tp);
1526 /* set register parameters */
1527 if (cc & cc_reg_param) {
1528 /* determine the number of parameters passed via registers */
1529 biggest_n = ia32_get_n_regparam_class(n, modes, &ignore_1, &ignore_2);
1531 /* loop over all parameters and set the register requirements */
1532 for (i = 0; i <= biggest_n; i++) {
1533 reg = ia32_get_RegParam_reg(n, modes, i, cc);
1534 assert(reg && "kaputt");
1535 be_abi_call_param_reg(abi, i, reg);
1542 /* set stack parameters */
1543 for (i = stack_idx; i < n; i++) {
1544 be_abi_call_param_stack(abi, i, 1, 0, 0);
1548 /* set return registers */
1549 n = get_method_n_ress(method_type);
1551 assert(n <= 2 && "more than two results not supported");
1553 /* In case of 64bit returns, we will have two 32bit values */
1555 tp = get_method_res_type(method_type, 0);
1556 mode = get_type_mode(tp);
1558 assert(!mode_is_float(mode) && "two FP results not supported");
1560 tp = get_method_res_type(method_type, 1);
1561 mode = get_type_mode(tp);
1563 assert(!mode_is_float(mode) && "two FP results not supported");
1565 be_abi_call_res_reg(abi, 0, &ia32_gp_regs[REG_EAX]);
1566 be_abi_call_res_reg(abi, 1, &ia32_gp_regs[REG_EDX]);
1569 const arch_register_t *reg;
1571 tp = get_method_res_type(method_type, 0);
1572 assert(is_atomic_type(tp));
1573 mode = get_type_mode(tp);
1575 reg = mode_is_float(mode) ?
1576 (USE_SSE2(isa) ? &ia32_xmm_regs[REG_XMM0] : &ia32_vfp_regs[REG_VF0]) :
1577 &ia32_gp_regs[REG_EAX];
1579 be_abi_call_res_reg(abi, 0, reg);
1584 static const void *ia32_get_irn_ops(const arch_irn_handler_t *self, const ir_node *irn) {
1585 return &ia32_irn_ops;
1588 const arch_irn_handler_t ia32_irn_handler = {
1592 const arch_irn_handler_t *ia32_get_irn_handler(const void *self) {
1593 return &ia32_irn_handler;
1596 int ia32_to_appear_in_schedule(void *block_env, const ir_node *irn) {
1597 return is_ia32_irn(irn) ? 1 : -1;
1601 * Initializes the code generator interface.
1603 static const arch_code_generator_if_t *ia32_get_code_generator_if(void *self) {
1604 return &ia32_code_gen_if;
1607 list_sched_selector_t ia32_sched_selector;
1610 * Returns the reg_pressure scheduler with to_appear_in_schedule() overloaded
1612 static const list_sched_selector_t *ia32_get_list_sched_selector(const void *self) {
1613 // memcpy(&ia32_sched_selector, reg_pressure_selector, sizeof(list_sched_selector_t));
1614 memcpy(&ia32_sched_selector, trivial_selector, sizeof(list_sched_selector_t));
1615 ia32_sched_selector.to_appear_in_schedule = ia32_to_appear_in_schedule;
1616 return &ia32_sched_selector;
1620 * Returns the necessary byte alignment for storing a register of given class.
1622 static int ia32_get_reg_class_alignment(const void *self, const arch_register_class_t *cls) {
1623 ir_mode *mode = arch_register_class_mode(cls);
1624 int bytes = get_mode_size_bytes(mode);
1626 if (mode_is_float(mode) && bytes > 8)
1631 static ia32_intrinsic_env_t intrinsic_env = { NULL, NULL };
1634 * Returns the libFirm configuration parameter for this backend.
1636 static const backend_params *ia32_get_libfirm_params(void) {
1637 static const arch_dep_params_t ad = {
1638 1, /* also use subs */
1639 4, /* maximum shifts */
1640 31, /* maximum shift amount */
1642 1, /* allow Mulhs */
1643 1, /* allow Mulus */
1644 32 /* Mulh allowed up to 32 bit */
1646 static backend_params p = {
1647 NULL, /* no additional opcodes */
1648 NULL, /* will be set later */
1649 1, /* need dword lowering */
1650 ia32_create_intrinsic_fkt,
1651 &intrinsic_env, /* context for ia32_create_intrinsic_fkt */
1659 /* instruction set architectures. */
1660 static const lc_opt_enum_int_items_t arch_items[] = {
1661 { "386", arch_i386, },
1662 { "486", arch_i486, },
1663 { "pentium", arch_pentium, },
1664 { "586", arch_pentium, },
1665 { "pentiumpro", arch_pentium_pro, },
1666 { "686", arch_pentium_pro, },
1667 { "pentiummmx", arch_pentium_mmx, },
1668 { "pentium2", arch_pentium_2, },
1669 { "p2", arch_pentium_2, },
1670 { "pentium3", arch_pentium_3, },
1671 { "p3", arch_pentium_3, },
1672 { "pentium4", arch_pentium_4, },
1673 { "p4", arch_pentium_4, },
1674 { "pentiumm", arch_pentium_m, },
1675 { "pm", arch_pentium_m, },
1676 { "core", arch_core, },
1678 { "athlon", arch_athlon, },
1679 { "athlon64", arch_athlon_64, },
1680 { "opteron", arch_opteron, },
1684 static lc_opt_enum_int_var_t arch_var = {
1685 &ia32_isa_template.arch, arch_items
1688 static lc_opt_enum_int_var_t opt_arch_var = {
1689 &ia32_isa_template.opt_arch, arch_items
1692 static const lc_opt_enum_int_items_t fp_unit_items[] = {
1694 { "sse2", fp_sse2 },
1698 static lc_opt_enum_int_var_t fp_unit_var = {
1699 &ia32_isa_template.fp_kind, fp_unit_items
1702 static const lc_opt_enum_int_items_t gas_items[] = {
1703 { "linux", ASM_LINUX_GAS },
1704 { "mingw", ASM_MINGW_GAS },
1708 static lc_opt_enum_int_var_t gas_var = {
1709 (int *)&asm_flavour, gas_items
1712 static const lc_opt_table_entry_t ia32_options[] = {
1713 LC_OPT_ENT_ENUM_INT("arch", "select the instruction architecture", &arch_var),
1714 LC_OPT_ENT_ENUM_INT("opt", "optimize for instruction architecture", &opt_arch_var),
1715 LC_OPT_ENT_ENUM_INT("fpunit", "select the floating point unit", &fp_unit_var),
1716 LC_OPT_ENT_NEGBIT("noaddrmode", "do not use address mode", &ia32_isa_template.opt, IA32_OPT_DOAM),
1717 LC_OPT_ENT_NEGBIT("nolea", "do not optimize for LEAs", &ia32_isa_template.opt, IA32_OPT_LEA),
1718 LC_OPT_ENT_NEGBIT("noplacecnst", "do not place constants", &ia32_isa_template.opt, IA32_OPT_PLACECNST),
1719 LC_OPT_ENT_NEGBIT("noimmop", "no operations with immediates", &ia32_isa_template.opt, IA32_OPT_IMMOPS),
1720 LC_OPT_ENT_NEGBIT("noextbb", "do not use extended basic block scheduling", &ia32_isa_template.opt, IA32_OPT_EXTBB),
1721 LC_OPT_ENT_ENUM_INT("gasmode", "set the GAS compatibility mode", &gas_var),
1726 * Register command line options for the ia32 backend.
1730 * ia32-arch=arch create instruction for arch
1731 * ia32-opt=arch optimize for run on arch
1732 * ia32-fpunit=unit select floating point unit (x87 or SSE2)
1733 * ia32-incdec optimize for inc/dec
1734 * ia32-noaddrmode do not use address mode
1735 * ia32-nolea do not optimize for LEAs
1736 * ia32-noplacecnst do not place constants,
1737 * ia32-noimmop no operations with immediates
1738 * ia32-noextbb do not use extended basic block scheduling
1739 * ia32-gasmode set the GAS compatibility mode
1741 static void ia32_register_options(lc_opt_entry_t *ent)
1743 lc_opt_entry_t *be_grp_ia32 = lc_opt_get_grp(ent, "ia32");
1744 lc_opt_add_table(be_grp_ia32, ia32_options);
1746 #endif /* WITH_LIBCORE */
1748 const arch_isa_if_t ia32_isa_if = {
1751 ia32_get_n_reg_class,
1753 ia32_get_reg_class_for_mode,
1755 ia32_get_irn_handler,
1756 ia32_get_code_generator_if,
1757 ia32_get_list_sched_selector,
1758 ia32_get_reg_class_alignment,
1759 ia32_get_libfirm_params,
1761 ia32_register_options