2 * This is the main ia32 firm backend driver.
3 * @author Christian Wuerdig
20 #include <libcore/lc_opts.h>
21 #include <libcore/lc_opts_enum.h>
22 #endif /* WITH_LIBCORE */
24 #include "pseudo_irg.h"
28 #include "iredges_t.h"
36 #include "../beabi.h" /* the general register allocator interface */
37 #include "../benode_t.h"
38 #include "../belower.h"
39 #include "../besched_t.h"
41 #include "bearch_ia32_t.h"
43 #include "ia32_new_nodes.h" /* ia32 nodes interface */
44 #include "gen_ia32_regalloc_if.h" /* the generated interface (register type and class defenitions) */
45 #include "ia32_gen_decls.h" /* interface declaration emitter */
46 #include "ia32_transform.h"
47 #include "ia32_emitter.h"
48 #include "ia32_map_regs.h"
49 #include "ia32_optimize.h"
51 #include "ia32_dbg_stat.h"
52 #include "ia32_finish.h"
53 #include "ia32_util.h"
55 #define DEBUG_MODULE "firm.be.ia32.isa"
58 static set *cur_reg_set = NULL;
61 #define is_Start(irn) (get_irn_opcode(irn) == iro_Start)
63 /* Creates the unique per irg GP NoReg node. */
64 ir_node *ia32_new_NoReg_gp(ia32_code_gen_t *cg) {
65 return be_abi_get_callee_save_irn(cg->birg->abi, &ia32_gp_regs[REG_GP_NOREG]);
68 /* Creates the unique per irg FP NoReg node. */
69 ir_node *ia32_new_NoReg_fp(ia32_code_gen_t *cg) {
70 return be_abi_get_callee_save_irn(cg->birg->abi,
71 USE_SSE2(cg) ? &ia32_xmm_regs[REG_XMM_NOREG] : &ia32_vfp_regs[REG_VFP_NOREG]);
74 /**************************************************
77 * _ __ ___ __ _ __ _| | | ___ ___ _| |_
78 * | '__/ _ \/ _` | / _` | | |/ _ \ / __| | | _|
79 * | | | __/ (_| | | (_| | | | (_) | (__ | | |
80 * |_| \___|\__, | \__,_|_|_|\___/ \___| |_|_|
83 **************************************************/
85 static ir_node *my_skip_proj(const ir_node *n) {
93 * Return register requirements for an ia32 node.
94 * If the node returns a tuple (mode_T) then the proj's
95 * will be asked for this information.
97 static const arch_register_req_t *ia32_get_irn_reg_req(const void *self, arch_register_req_t *req, const ir_node *irn, int pos) {
98 const ia32_irn_ops_t *ops = self;
99 const ia32_register_req_t *irn_req;
100 long node_pos = pos == -1 ? 0 : pos;
101 ir_mode *mode = is_Block(irn) ? NULL : get_irn_mode(irn);
102 FIRM_DBG_REGISTER(firm_dbg_module_t *mod, DEBUG_MODULE);
104 if (is_Block(irn) || mode == mode_M || mode == mode_X) {
105 DBG((mod, LEVEL_1, "ignoring Block, mode_M, mode_X node %+F\n", irn));
109 if (mode == mode_T && pos < 0) {
110 DBG((mod, LEVEL_1, "ignoring request OUT requirements for node %+F\n", irn));
114 DBG((mod, LEVEL_1, "get requirements at pos %d for %+F ... ", pos, irn));
118 node_pos = ia32_translate_proj_pos(irn);
124 irn = my_skip_proj(irn);
126 DB((mod, LEVEL_1, "skipping Proj, going to %+F at pos %d ... ", irn, node_pos));
129 if (is_ia32_irn(irn)) {
131 irn_req = get_ia32_in_req(irn, pos);
134 irn_req = get_ia32_out_req(irn, node_pos);
137 DB((mod, LEVEL_1, "returning reqs for %+F at pos %d\n", irn, pos));
139 memcpy(req, &(irn_req->req), sizeof(*req));
141 if (arch_register_req_is(&(irn_req->req), should_be_same)) {
142 assert(irn_req->same_pos >= 0 && "should be same constraint for in -> out NYI");
143 req->other_same = get_irn_n(irn, irn_req->same_pos);
146 if (arch_register_req_is(&(irn_req->req), should_be_different)) {
147 assert(irn_req->different_pos >= 0 && "should be different constraint for in -> out NYI");
148 req->other_different = get_irn_n(irn, irn_req->different_pos);
152 /* treat Unknowns like Const with default requirements */
153 if (is_Unknown(irn)) {
154 DB((mod, LEVEL_1, "returning UKNWN reqs for %+F\n", irn));
155 if (mode_is_float(mode)) {
156 if (USE_SSE2(ops->cg))
157 memcpy(req, &(ia32_default_req_ia32_xmm_xmm_UKNWN), sizeof(*req));
159 memcpy(req, &(ia32_default_req_ia32_vfp_vfp_UKNWN), sizeof(*req));
161 else if (mode_is_int(mode) || mode_is_reference(mode))
162 memcpy(req, &(ia32_default_req_ia32_gp_gp_UKNWN), sizeof(*req));
163 else if (mode == mode_T || mode == mode_M) {
164 DBG((mod, LEVEL_1, "ignoring Unknown node %+F\n", irn));
168 assert(0 && "unsupported Unknown-Mode");
171 DB((mod, LEVEL_1, "returning NULL for %+F (not ia32)\n", irn));
179 static void ia32_set_irn_reg(const void *self, ir_node *irn, const arch_register_t *reg) {
181 const ia32_irn_ops_t *ops = self;
183 if (get_irn_mode(irn) == mode_X) {
187 DBG((ops->cg->mod, LEVEL_1, "ia32 assigned register %s to node %+F\n", reg->name, irn));
190 pos = ia32_translate_proj_pos(irn);
191 irn = my_skip_proj(irn);
194 if (is_ia32_irn(irn)) {
195 const arch_register_t **slots;
197 slots = get_ia32_slots(irn);
201 ia32_set_firm_reg(irn, reg, cur_reg_set);
205 static const arch_register_t *ia32_get_irn_reg(const void *self, const ir_node *irn) {
207 const arch_register_t *reg = NULL;
211 if (get_irn_mode(irn) == mode_X) {
215 pos = ia32_translate_proj_pos(irn);
216 irn = my_skip_proj(irn);
219 if (is_ia32_irn(irn)) {
220 const arch_register_t **slots;
221 slots = get_ia32_slots(irn);
225 reg = ia32_get_firm_reg(irn, cur_reg_set);
231 static arch_irn_class_t ia32_classify(const void *self, const ir_node *irn) {
232 irn = my_skip_proj(irn);
234 return arch_irn_class_branch;
235 else if (is_ia32_Cnst(irn))
236 return arch_irn_class_const;
237 else if (is_ia32_Ld(irn))
238 return arch_irn_class_load;
239 else if (is_ia32_St(irn) || is_ia32_Store8Bit(irn))
240 return arch_irn_class_store;
241 else if (is_ia32_irn(irn))
242 return arch_irn_class_normal;
247 static arch_irn_flags_t ia32_get_flags(const void *self, const ir_node *irn) {
248 irn = my_skip_proj(irn);
249 if (is_ia32_irn(irn))
250 return get_ia32_flags(irn);
253 return arch_irn_flags_ignore;
258 static entity *ia32_get_frame_entity(const void *self, const ir_node *irn) {
259 return is_ia32_irn(irn) ? get_ia32_frame_ent(irn) : NULL;
262 static void ia32_set_stack_bias(const void *self, ir_node *irn, int bias) {
264 const ia32_irn_ops_t *ops = self;
266 if (get_ia32_frame_ent(irn)) {
267 ia32_am_flavour_t am_flav = get_ia32_am_flavour(irn);
269 DBG((ops->cg->mod, LEVEL_1, "stack biased %+F with %d\n", irn, bias));
270 snprintf(buf, sizeof(buf), "%d", bias);
272 if (get_ia32_op_type(irn) == ia32_Normal) {
273 set_ia32_cnst(irn, buf);
276 add_ia32_am_offs(irn, buf);
278 set_ia32_am_flavour(irn, am_flav);
284 be_abi_call_flags_bits_t flags;
285 const arch_isa_t *isa;
286 const arch_env_t *aenv;
290 static void *ia32_abi_init(const be_abi_call_t *call, const arch_env_t *aenv, ir_graph *irg)
292 ia32_abi_env_t *env = xmalloc(sizeof(env[0]));
293 be_abi_call_flags_t fl = be_abi_call_get_flags(call);
294 env->flags = fl.bits;
297 env->isa = aenv->isa;
302 * Put all registers which are saved by the prologue/epilogue in a set.
304 * @param self The callback object.
305 * @param s The result set.
307 static void ia32_abi_dont_save_regs(void *self, pset *s)
309 ia32_abi_env_t *env = self;
310 if(env->flags.try_omit_fp)
311 pset_insert_ptr(s, env->isa->bp);
315 * Generate the routine prologue.
317 * @param self The callback object.
318 * @param mem A pointer to the mem node. Update this if you define new memory.
319 * @param reg_map A map mapping all callee_save/ignore/parameter registers to their defining nodes.
321 * @return The register which shall be used as a stack frame base.
323 * All nodes which define registers in @p reg_map must keep @p reg_map current.
325 static const arch_register_t *ia32_abi_prologue(void *self, ir_node **mem, pmap *reg_map)
327 ia32_abi_env_t *env = self;
329 if (!env->flags.try_omit_fp) {
330 ir_node *bl = get_irg_start_block(env->irg);
331 ir_node *curr_sp = be_abi_reg_map_get(reg_map, env->isa->sp);
332 ir_node *curr_bp = be_abi_reg_map_get(reg_map, env->isa->bp);
336 push = new_rd_ia32_Push(NULL, env->irg, bl, curr_sp, curr_bp, *mem);
337 curr_sp = new_r_Proj(env->irg, bl, push, get_irn_mode(curr_sp), pn_ia32_Push_stack);
338 *mem = new_r_Proj(env->irg, bl, push, mode_M, pn_ia32_Push_M);
340 /* the push must have SP out register */
341 arch_set_irn_register(env->aenv, curr_sp, env->isa->sp);
342 set_ia32_flags(push, arch_irn_flags_ignore);
344 /* move esp to ebp */
345 curr_bp = be_new_Copy(env->isa->bp->reg_class, env->irg, bl, curr_sp);
346 be_set_constr_single_reg(curr_bp, BE_OUT_POS(0), env->isa->bp);
347 arch_set_irn_register(env->aenv, curr_bp, env->isa->bp);
348 be_node_set_flags(curr_bp, BE_OUT_POS(0), arch_irn_flags_ignore);
350 /* beware: the copy must be done before any other sp use */
351 curr_sp = be_new_CopyKeep_single(env->isa->sp->reg_class, env->irg, bl, curr_sp, curr_bp, get_irn_mode(curr_sp));
352 be_set_constr_single_reg(curr_sp, BE_OUT_POS(0), env->isa->sp);
353 arch_set_irn_register(env->aenv, curr_sp, env->isa->sp);
354 be_node_set_flags(curr_sp, BE_OUT_POS(0), arch_irn_flags_ignore);
356 be_abi_reg_map_set(reg_map, env->isa->sp, curr_sp);
357 be_abi_reg_map_set(reg_map, env->isa->bp, curr_bp);
366 * Generate the routine epilogue.
367 * @param self The callback object.
368 * @param bl The block for the epilog
369 * @param mem A pointer to the mem node. Update this if you define new memory.
370 * @param reg_map A map mapping all callee_save/ignore/parameter registers to their defining nodes.
371 * @return The register which shall be used as a stack frame base.
373 * All nodes which define registers in @p reg_map must keep @p reg_map current.
375 static void ia32_abi_epilogue(void *self, ir_node *bl, ir_node **mem, pmap *reg_map)
377 ia32_abi_env_t *env = self;
378 ir_node *curr_sp = be_abi_reg_map_get(reg_map, env->isa->sp);
379 ir_node *curr_bp = be_abi_reg_map_get(reg_map, env->isa->bp);
381 if (env->flags.try_omit_fp) {
382 /* simply remove the stack frame here */
383 curr_sp = be_new_IncSP(env->isa->sp, env->irg, bl, curr_sp, *mem, BE_STACK_FRAME_SIZE, be_stack_dir_shrink);
386 const ia32_isa_t *isa = (ia32_isa_t *)env->isa;
387 ir_mode *mode_bp = env->isa->bp->reg_class->mode;
389 /* gcc always emits a leave at the end of a routine */
390 if (1 || ARCH_AMD(isa->opt_arch)) {
394 leave = new_rd_ia32_Leave(NULL, env->irg, bl, curr_sp, *mem);
395 set_ia32_flags(leave, arch_irn_flags_ignore);
396 curr_bp = new_r_Proj(current_ir_graph, bl, leave, mode_bp, pn_ia32_Leave_frame);
397 curr_sp = new_r_Proj(current_ir_graph, bl, leave, get_irn_mode(curr_sp), pn_ia32_Leave_stack);
398 *mem = new_r_Proj(current_ir_graph, bl, leave, mode_M, pn_ia32_Leave_M);
403 /* copy ebp to esp */
404 curr_sp = be_new_SetSP(env->isa->sp, env->irg, bl, curr_sp, curr_bp, *mem);
407 pop = new_rd_ia32_Pop(NULL, env->irg, bl, curr_sp, *mem);
408 set_ia32_flags(pop, arch_irn_flags_ignore);
409 curr_bp = new_r_Proj(current_ir_graph, bl, pop, mode_bp, pn_ia32_Pop_res);
410 curr_sp = new_r_Proj(current_ir_graph, bl, pop, get_irn_mode(curr_sp), pn_ia32_Pop_stack);
411 *mem = new_r_Proj(current_ir_graph, bl, pop, mode_M, pn_ia32_Pop_M);
413 arch_set_irn_register(env->aenv, curr_sp, env->isa->sp);
414 arch_set_irn_register(env->aenv, curr_bp, env->isa->bp);
417 be_abi_reg_map_set(reg_map, env->isa->sp, curr_sp);
418 be_abi_reg_map_set(reg_map, env->isa->bp, curr_bp);
422 * Produces the type which sits between the stack args and the locals on the stack.
423 * it will contain the return address and space to store the old base pointer.
424 * @return The Firm type modeling the ABI between type.
426 static ir_type *ia32_abi_get_between_type(void *self)
428 #define IDENT(s) new_id_from_chars(s, sizeof(s)-1)
429 static ir_type *omit_fp_between_type = NULL;
430 static ir_type *between_type = NULL;
432 ia32_abi_env_t *env = self;
434 if ( !between_type) {
436 entity *ret_addr_ent;
437 entity *omit_fp_ret_addr_ent;
439 ir_type *old_bp_type = new_type_primitive(IDENT("bp"), mode_P);
440 ir_type *ret_addr_type = new_type_primitive(IDENT("return_addr"), mode_P);
442 between_type = new_type_struct(IDENT("ia32_between_type"));
443 old_bp_ent = new_entity(between_type, IDENT("old_bp"), old_bp_type);
444 ret_addr_ent = new_entity(between_type, IDENT("ret_addr"), ret_addr_type);
446 set_entity_offset_bytes(old_bp_ent, 0);
447 set_entity_offset_bytes(ret_addr_ent, get_type_size_bytes(old_bp_type));
448 set_type_size_bytes(between_type, get_type_size_bytes(old_bp_type) + get_type_size_bytes(ret_addr_type));
449 set_type_state(between_type, layout_fixed);
451 omit_fp_between_type = new_type_struct(IDENT("ia32_between_type_omit_fp"));
452 omit_fp_ret_addr_ent = new_entity(omit_fp_between_type, IDENT("ret_addr"), ret_addr_type);
454 set_entity_offset_bytes(omit_fp_ret_addr_ent, 0);
455 set_type_size_bytes(omit_fp_between_type, get_type_size_bytes(ret_addr_type));
456 set_type_state(omit_fp_between_type, layout_fixed);
459 return env->flags.try_omit_fp ? omit_fp_between_type : between_type;
464 * Get the estimated cycle count for @p irn.
466 * @param self The this pointer.
467 * @param irn The node.
469 * @return The estimated cycle count for this operation
471 static int ia32_get_op_estimated_cost(const void *self, const ir_node *irn)
478 switch (get_ia32_irn_opcode(irn)) {
480 case iro_ia32_DivMod:
485 case iro_ia32_l_Load:
495 case iro_ia32_xStore:
496 case iro_ia32_l_Store:
498 case iro_ia32_Store8Bit:
506 case iro_ia32_l_MulS:
519 * Returns the inverse operation if @p irn, recalculating the argument at position @p i.
521 * @param irn The original operation
522 * @param i Index of the argument we want the inverse operation to yield
523 * @param inverse struct to be filled with the resulting inverse op
524 * @param obstack The obstack to use for allocation of the returned nodes array
525 * @return The inverse operation or NULL if operation invertible
527 static arch_inverse_t *ia32_get_inverse(const void *self, const ir_node *irn, int i, arch_inverse_t *inverse, struct obstack *obst) {
530 ir_node *block, *noreg, *nomem;
533 /* we cannot invert non-ia32 irns */
534 if (! is_ia32_irn(irn))
537 /* operand must always be a real operand (not base, index or mem) */
538 if (i != 2 && i != 3)
541 /* we don't invert address mode operations */
542 if (get_ia32_op_type(irn) != ia32_Normal)
545 irg = get_irn_irg(irn);
546 block = get_nodes_block(irn);
547 mode = get_ia32_res_mode(irn);
548 noreg = get_irn_n(irn, 0);
549 nomem = new_r_NoMem(irg);
551 /* initialize structure */
552 inverse->nodes = obstack_alloc(obst, 2 * sizeof(inverse->nodes[0]));
556 switch (get_ia32_irn_opcode(irn)) {
558 if (get_ia32_immop_type(irn) == ia32_ImmConst) {
559 /* we have an add with a const here */
560 /* invers == add with negated const */
561 inverse->nodes[0] = new_rd_ia32_Add(NULL, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem);
562 pnc = pn_ia32_Add_res;
564 copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
565 set_ia32_Immop_tarval(inverse->nodes[0], tarval_neg(get_ia32_Immop_tarval(irn)));
566 set_ia32_commutative(inverse->nodes[0]);
568 else if (get_ia32_immop_type(irn) == ia32_ImmSymConst) {
569 /* we have an add with a symconst here */
570 /* invers == sub with const */
571 inverse->nodes[0] = new_rd_ia32_Sub(NULL, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem);
572 pnc = pn_ia32_Sub_res;
574 copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
577 /* normal add: inverse == sub */
578 ir_node *proj = ia32_get_res_proj(irn);
581 inverse->nodes[0] = new_rd_ia32_Sub(NULL, irg, block, noreg, noreg, proj, get_irn_n(irn, i ^ 1), nomem);
582 pnc = pn_ia32_Sub_res;
587 if (get_ia32_immop_type(irn) != ia32_ImmNone) {
588 /* we have a sub with a const/symconst here */
589 /* invers == add with this const */
590 inverse->nodes[0] = new_rd_ia32_Add(NULL, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem);
591 pnc = pn_ia32_Add_res;
592 inverse->costs += (get_ia32_immop_type(irn) == ia32_ImmSymConst) ? 5 : 1;
593 copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
597 ir_node *proj = ia32_get_res_proj(irn);
601 inverse->nodes[0] = new_rd_ia32_Add(NULL, irg, block, noreg, noreg, proj, get_irn_n(irn, 3), nomem);
604 inverse->nodes[0] = new_rd_ia32_Sub(NULL, irg, block, noreg, noreg, get_irn_n(irn, 2), proj, nomem);
606 pnc = pn_ia32_Sub_res;
611 if (get_ia32_immop_type(irn) != ia32_ImmNone) {
612 /* xor with const: inverse = xor */
613 inverse->nodes[0] = new_rd_ia32_Eor(NULL, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem);
614 pnc = pn_ia32_Eor_res;
615 inverse->costs += (get_ia32_immop_type(irn) == ia32_ImmSymConst) ? 5 : 1;
616 copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
620 inverse->nodes[0] = new_rd_ia32_Eor(NULL, irg, block, noreg, noreg, (ir_node *)irn, get_irn_n(irn, i), nomem);
621 pnc = pn_ia32_Eor_res;
626 ir_node *proj = ia32_get_res_proj(irn);
629 inverse->nodes[0] = new_rd_ia32_Not(NULL, irg, block, noreg, noreg, proj, nomem);
630 pnc = pn_ia32_Not_res;
634 case iro_ia32_Minus: {
635 ir_node *proj = ia32_get_res_proj(irn);
638 inverse->nodes[0] = new_rd_ia32_Minus(NULL, irg, block, noreg, noreg, proj, nomem);
639 pnc = pn_ia32_Minus_res;
644 /* inverse operation not supported */
648 set_ia32_res_mode(inverse->nodes[0], mode);
649 inverse->nodes[1] = new_r_Proj(irg, block, inverse->nodes[0], mode, pnc);
655 * Check if irn can load it's operand at position i from memory (source addressmode).
656 * @param self Pointer to irn ops itself
657 * @param irn The irn to be checked
658 * @param i The operands position
659 * @return Non-Zero if operand can be loaded
661 static int ia32_possible_memory_operand(const void *self, const ir_node *irn, unsigned int i) {
662 if (! is_ia32_irn(irn) || /* must be an ia32 irn */
663 get_irn_arity(irn) != 5 || /* must be a binary operation */
664 get_ia32_op_type(irn) != ia32_Normal || /* must not already be a addressmode irn */
665 ! (get_ia32_am_support(irn) & ia32_am_Source) || /* must be capable of source addressmode */
666 (i != 2 && i != 3) || /* a "real" operand position must be requested */
667 (i == 2 && ! is_ia32_commutative(irn)) || /* if first operand requested irn must be commutative */
668 is_ia32_use_frame(irn)) /* must not already use frame */
674 static void ia32_perform_memory_operand(const void *self, ir_node *irn, ir_node *reload, unsigned int i) {
675 assert(ia32_possible_memory_operand(self, irn, i) && "Cannot perform memory operand change");
676 assert(get_nodes_block(reload) == get_nodes_block(irn) && "Reload must be in same block as irn.");
678 if (get_irn_n_edges(reload) > 1)
682 ir_node *tmp = get_irn_n(irn, 3);
683 set_irn_n(irn, 3, get_irn_n(irn, 2));
684 set_irn_n(irn, 2, tmp);
687 set_ia32_am_support(irn, ia32_am_Source);
688 set_ia32_op_type(irn, ia32_AddrModeS);
689 set_ia32_am_flavour(irn, ia32_B);
690 set_ia32_ls_mode(irn, get_irn_mode(reload));
691 set_ia32_frame_ent(irn, be_get_frame_entity(reload));
692 set_ia32_use_frame(irn);
694 set_irn_n(irn, 0, be_get_Reload_frame(reload));
695 set_irn_n(irn, 4, be_get_Reload_mem(reload));
698 Input at position one is index register, which is NoReg.
699 We would need cg object to get a real noreg, but we cannot
702 set_irn_n(irn, 3, get_irn_n(irn, 1));
704 DBG_OPT_AM_S(reload, irn);
707 static const be_abi_callbacks_t ia32_abi_callbacks = {
710 ia32_abi_get_between_type,
711 ia32_abi_dont_save_regs,
716 /* fill register allocator interface */
718 static const arch_irn_ops_if_t ia32_irn_ops_if = {
719 ia32_get_irn_reg_req,
724 ia32_get_frame_entity,
727 ia32_get_op_estimated_cost,
728 ia32_possible_memory_operand,
729 ia32_perform_memory_operand,
732 ia32_irn_ops_t ia32_irn_ops = {
739 /**************************************************
742 * ___ ___ __| | ___ __ _ ___ _ __ _| |_
743 * / __/ _ \ / _` |/ _ \/ _` |/ _ \ '_ \ | | _|
744 * | (_| (_) | (_| | __/ (_| | __/ | | | | | |
745 * \___\___/ \__,_|\___|\__, |\___|_| |_| |_|_|
748 **************************************************/
751 * Transforms the standard firm graph into
754 static void ia32_prepare_graph(void *self) {
755 ia32_code_gen_t *cg = self;
756 dom_front_info_t *dom;
757 DEBUG_ONLY(firm_dbg_module_t *old_mod = cg->mod;)
759 FIRM_DBG_REGISTER(cg->mod, "firm.be.ia32.transform");
761 /* 1st: transform constants and psi condition trees */
762 ia32_pre_transform_phase(cg);
764 /* 2nd: transform all remaining nodes */
765 ia32_register_transformers();
766 dom = be_compute_dominance_frontiers(cg->irg);
767 irg_walk_blkwise_graph(cg->irg, NULL, ia32_transform_node, cg);
768 be_free_dominance_frontiers(dom);
771 be_dump(cg->irg, "-transformed", dump_ir_block_graph_sched);
773 /* 3rd: optimize address mode */
774 FIRM_DBG_REGISTER(cg->mod, "firm.be.ia32.am");
775 ia32_optimize_addressmode(cg);
778 be_dump(cg->irg, "-am", dump_ir_block_graph_sched);
780 DEBUG_ONLY(cg->mod = old_mod;)
784 * Dummy functions for hooks we don't need but which must be filled.
786 static void ia32_before_sched(void *self) {
789 static void remove_unused_nodes(ir_node *irn, bitset_t *already_visited) {
797 mode = get_irn_mode(irn);
799 /* check if we already saw this node or the node has more than one user */
800 if (bitset_contains_irn(already_visited, irn) || get_irn_n_edges(irn) > 1)
803 /* mark irn visited */
804 bitset_add_irn(already_visited, irn);
806 /* non-Tuple nodes with one user: ok, return */
807 if (get_irn_n_edges(irn) >= 1 && mode != mode_T)
810 /* tuple node has one user which is not the mem proj-> ok */
811 if (mode == mode_T && get_irn_n_edges(irn) == 1) {
812 mem_proj = ia32_get_proj_for_mode(irn, mode_M);
817 for (i = get_irn_arity(irn) - 1; i >= 0; i--) {
818 ir_node *pred = get_irn_n(irn, i);
820 /* do not follow memory edges or we will accidentally remove stores */
821 if (is_Proj(pred) && get_irn_mode(pred) == mode_M)
824 set_irn_n(irn, i, new_Bad());
827 The current node is about to be removed: if the predecessor
828 has only this node as user, it need to be removed as well.
830 if (get_irn_n_edges(pred) <= 1)
831 remove_unused_nodes(pred, already_visited);
834 if (sched_is_scheduled(irn))
838 static void remove_unused_loads_walker(ir_node *irn, void *env) {
839 bitset_t *already_visited = env;
840 if (is_ia32_Ld(irn) && ! bitset_contains_irn(already_visited, irn))
841 remove_unused_nodes(irn, env);
845 * Called before the register allocator.
846 * Calculate a block schedule here. We need it for the x87
847 * simulator and the emitter.
849 static void ia32_before_ra(void *self) {
850 ia32_code_gen_t *cg = self;
851 bitset_t *already_visited = bitset_irg_malloc(cg->irg);
853 cg->blk_sched = sched_create_block_schedule(cg->irg);
857 There are sometimes unused loads, only pinned by memory.
858 We need to remove those Loads and all other nodes which won't be used
859 after removing the Load from schedule.
861 irg_walk_graph(cg->irg, remove_unused_loads_walker, NULL, already_visited);
862 bitset_free(already_visited);
867 * Transforms a be node into a Load.
869 static void transform_to_Load(ia32_transform_env_t *env) {
870 ir_node *irn = env->irn;
871 entity *ent = be_get_frame_entity(irn);
872 ir_mode *mode = env->mode;
873 ir_node *noreg = ia32_new_NoReg_gp(env->cg);
874 ir_node *nomem = new_rd_NoMem(env->irg);
875 ir_node *sched_point = NULL;
876 ir_node *ptr = get_irn_n(irn, 0);
877 ir_node *mem = be_is_Reload(irn) ? get_irn_n(irn, 1) : nomem;
878 ir_node *new_op, *proj;
879 const arch_register_t *reg;
881 if (sched_is_scheduled(irn)) {
882 sched_point = sched_prev(irn);
885 if (mode_is_float(mode)) {
886 if (USE_SSE2(env->cg))
887 new_op = new_rd_ia32_xLoad(env->dbg, env->irg, env->block, ptr, noreg, mem);
889 new_op = new_rd_ia32_vfld(env->dbg, env->irg, env->block, ptr, noreg, mem);
892 new_op = new_rd_ia32_Load(env->dbg, env->irg, env->block, ptr, noreg, mem);
895 set_ia32_am_support(new_op, ia32_am_Source);
896 set_ia32_op_type(new_op, ia32_AddrModeS);
897 set_ia32_am_flavour(new_op, ia32_B);
898 set_ia32_ls_mode(new_op, mode);
899 set_ia32_frame_ent(new_op, ent);
900 set_ia32_use_frame(new_op);
902 DBG_OPT_RELOAD2LD(irn, new_op);
904 proj = new_rd_Proj(env->dbg, env->irg, env->block, new_op, mode, pn_Load_res);
907 sched_add_after(sched_point, new_op);
908 sched_add_after(new_op, proj);
913 /* copy the register from the old node to the new Load */
914 reg = arch_get_irn_register(env->cg->arch_env, irn);
915 arch_set_irn_register(env->cg->arch_env, new_op, reg);
917 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, irn));
923 * Transforms a be node into a Store.
925 static void transform_to_Store(ia32_transform_env_t *env) {
926 ir_node *irn = env->irn;
927 entity *ent = be_get_frame_entity(irn);
928 ir_mode *mode = env->mode;
929 ir_node *noreg = ia32_new_NoReg_gp(env->cg);
930 ir_node *nomem = new_rd_NoMem(env->irg);
931 ir_node *ptr = get_irn_n(irn, 0);
932 ir_node *val = get_irn_n(irn, 1);
933 ir_node *new_op, *proj;
934 ir_node *sched_point = NULL;
936 if (sched_is_scheduled(irn)) {
937 sched_point = sched_prev(irn);
940 if (mode_is_float(mode)) {
941 if (USE_SSE2(env->cg))
942 new_op = new_rd_ia32_xStore(env->dbg, env->irg, env->block, ptr, noreg, val, nomem);
944 new_op = new_rd_ia32_vfst(env->dbg, env->irg, env->block, ptr, noreg, val, nomem);
946 else if (get_mode_size_bits(mode) == 8) {
947 new_op = new_rd_ia32_Store8Bit(env->dbg, env->irg, env->block, ptr, noreg, val, nomem);
950 new_op = new_rd_ia32_Store(env->dbg, env->irg, env->block, ptr, noreg, val, nomem);
953 set_ia32_am_support(new_op, ia32_am_Dest);
954 set_ia32_op_type(new_op, ia32_AddrModeD);
955 set_ia32_am_flavour(new_op, ia32_B);
956 set_ia32_ls_mode(new_op, mode);
957 set_ia32_frame_ent(new_op, ent);
958 set_ia32_use_frame(new_op);
960 DBG_OPT_SPILL2ST(irn, new_op);
962 proj = new_rd_Proj(env->dbg, env->irg, env->block, new_op, mode_M, pn_ia32_Store_M);
965 sched_add_after(sched_point, new_op);
969 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, irn));
974 static ir_node *create_push(ia32_transform_env_t *env, ir_node *schedpoint, ir_node *sp, ir_node *mem, entity *ent, const char *offset) {
975 ir_node *noreg = ia32_new_NoReg_gp(env->cg);
977 ir_node *push = new_rd_ia32_Push(env->dbg, env->irg, env->block, sp, noreg, mem);
979 set_ia32_frame_ent(push, ent);
980 set_ia32_use_frame(push);
981 set_ia32_op_type(push, ia32_AddrModeS);
982 set_ia32_am_flavour(push, ia32_B);
983 set_ia32_ls_mode(push, mode_Is);
985 add_ia32_am_offs(push, offset);
987 sched_add_before(schedpoint, push);
991 static ir_node *create_pop(ia32_transform_env_t *env, ir_node *schedpoint, ir_node *sp, entity *ent, const char *offset) {
992 ir_node *pop = new_rd_ia32_Pop(env->dbg, env->irg, env->block, sp, new_NoMem());
994 set_ia32_frame_ent(pop, ent);
995 set_ia32_use_frame(pop);
996 set_ia32_op_type(pop, ia32_AddrModeD);
997 set_ia32_am_flavour(pop, ia32_B);
998 set_ia32_ls_mode(pop, mode_Is);
1000 add_ia32_am_offs(pop, offset);
1002 sched_add_before(schedpoint, pop);
1007 static ir_node* create_spproj(ia32_transform_env_t *env, ir_node *pred, ir_node *schedpoint, const ir_node *oldsp) {
1008 ir_mode *spmode = get_irn_mode(oldsp);
1009 const arch_register_t *spreg = arch_get_irn_register(env->cg->arch_env, oldsp);
1012 sp = new_rd_Proj(env->dbg, env->irg, env->block, pred, spmode, 0);
1013 arch_set_irn_register(env->cg->arch_env, sp, spreg);
1014 sched_add_before(schedpoint, sp);
1019 static void transform_MemPerm(ia32_transform_env_t *env) {
1021 * Transform memperm, currently we do this the ugly way and produce
1022 * push/pop into/from memory cascades. This is possible without using
1025 ir_node *node = env->irn;
1027 ir_node *sp = get_irn_n(node, 0);
1028 const ir_edge_t *edge;
1029 const ir_edge_t *next;
1032 arity = be_get_MemPerm_entity_arity(node);
1033 pops = alloca(arity * sizeof(pops[0]));
1036 for(i = 0; i < arity; ++i) {
1037 entity *ent = be_get_MemPerm_in_entity(node, i);
1038 ir_type *enttype = get_entity_type(ent);
1039 int entbits = get_type_size_bits(enttype);
1040 ir_node *mem = get_irn_n(node, i + 1);
1043 assert( (entbits == 32 || entbits == 64) && "spillslot on x86 should be 32 or 64 bit");
1045 push = create_push(env, node, sp, mem, ent, NULL);
1046 sp = create_spproj(env, push, node, sp);
1048 // add another push after the first one
1049 push = create_push(env, node, sp, mem, ent, "4");
1050 sp = create_spproj(env, push, node, sp);
1053 set_irn_n(node, i, new_Bad());
1057 for(i = arity - 1; i >= 0; --i) {
1058 entity *ent = be_get_MemPerm_out_entity(node, i);
1059 ir_type *enttype = get_entity_type(ent);
1060 int entbits = get_type_size_bits(enttype);
1064 assert( (entbits == 32 || entbits == 64) && "spillslot on x86 should be 32 or 64 bit");
1066 pop = create_pop(env, node, sp, ent, NULL);
1068 // add another pop after the first one
1069 sp = create_spproj(env, pop, node, sp);
1070 pop = create_pop(env, node, sp, ent, "4");
1073 sp = create_spproj(env, pop, node, sp);
1079 // exchange memprojs
1080 foreach_out_edge_safe(node, edge, next) {
1081 ir_node *proj = get_edge_src_irn(edge);
1082 int p = get_Proj_proj(proj);
1086 set_Proj_pred(proj, pops[p]);
1087 set_Proj_proj(proj, 3);
1094 * Fix the mode of Spill/Reload
1096 static ir_mode *fix_spill_mode(ia32_code_gen_t *cg, ir_mode *mode)
1098 if (mode_is_float(mode)) {
1110 * Block-Walker: Calls the transform functions Spill and Reload.
1112 static void ia32_after_ra_walker(ir_node *block, void *env) {
1113 ir_node *node, *prev;
1114 ia32_code_gen_t *cg = env;
1115 ia32_transform_env_t tenv;
1118 tenv.irg = current_ir_graph;
1120 DEBUG_ONLY(tenv.mod = cg->mod;)
1122 /* beware: the schedule is changed here */
1123 for (node = sched_last(block); !sched_is_begin(node); node = prev) {
1124 prev = sched_prev(node);
1125 if (be_is_Reload(node)) {
1126 /* we always reload the whole register */
1127 tenv.dbg = get_irn_dbg_info(node);
1129 tenv.mode = fix_spill_mode(cg, get_irn_mode(node));
1130 transform_to_Load(&tenv);
1132 else if (be_is_Spill(node)) {
1133 ir_node *spillval = get_irn_n(node, be_pos_Spill_val);
1134 /* we always spill the whole register */
1135 tenv.dbg = get_irn_dbg_info(node);
1137 tenv.mode = fix_spill_mode(cg, get_irn_mode(spillval));
1138 transform_to_Store(&tenv);
1140 else if(be_is_MemPerm(node)) {
1141 tenv.dbg = get_irn_dbg_info(node);
1143 transform_MemPerm(&tenv);
1149 * We transform Spill and Reload here. This needs to be done before
1150 * stack biasing otherwise we would miss the corrected offset for these nodes.
1152 * If x87 instruction should be emitted, run the x87 simulator and patch
1153 * the virtual instructions. This must obviously be done after register allocation.
1155 static void ia32_after_ra(void *self) {
1156 ia32_code_gen_t *cg = self;
1158 irg_block_walk_graph(cg->irg, NULL, ia32_after_ra_walker, self);
1160 /* if we do x87 code generation, rewrite all the virtual instructions and registers */
1161 if (cg->used_fp == fp_x87 || cg->force_sim) {
1162 x87_simulate_graph(cg->arch_env, cg->irg, cg->blk_sched);
1167 * Last touchups for the graph before emit
1169 static void ia32_finish(void *self) {
1170 ia32_code_gen_t *cg = self;
1171 ir_graph *irg = cg->irg;
1173 ia32_finish_irg(irg, cg);
1177 * Emits the code, closes the output file and frees
1178 * the code generator interface.
1180 static void ia32_codegen(void *self) {
1181 ia32_code_gen_t *cg = self;
1182 ir_graph *irg = cg->irg;
1184 ia32_gen_routine(cg->isa->out, irg, cg);
1188 /* remove it from the isa */
1191 /* de-allocate code generator */
1192 del_set(cg->reg_set);
1196 static void *ia32_cg_init(const be_irg_t *birg);
1198 static const arch_code_generator_if_t ia32_code_gen_if = {
1200 NULL, /* before abi introduce hook */
1202 ia32_before_sched, /* before scheduling hook */
1203 ia32_before_ra, /* before register allocation hook */
1204 ia32_after_ra, /* after register allocation hook */
1205 ia32_finish, /* called before codegen */
1206 ia32_codegen /* emit && done */
1210 * Initializes a IA32 code generator.
1212 static void *ia32_cg_init(const be_irg_t *birg) {
1213 ia32_isa_t *isa = (ia32_isa_t *)birg->main_env->arch_env->isa;
1214 ia32_code_gen_t *cg = xcalloc(1, sizeof(*cg));
1216 cg->impl = &ia32_code_gen_if;
1217 cg->irg = birg->irg;
1218 cg->reg_set = new_set(ia32_cmp_irn_reg_assoc, 1024);
1219 cg->arch_env = birg->main_env->arch_env;
1222 cg->blk_sched = NULL;
1223 cg->fp_to_gp = NULL;
1224 cg->gp_to_fp = NULL;
1225 cg->fp_kind = isa->fp_kind;
1226 cg->used_fp = fp_none;
1227 cg->dump = (birg->main_env->options->dump_flags & DUMP_BE) ? 1 : 0;
1229 FIRM_DBG_REGISTER(cg->mod, "firm.be.ia32.cg");
1231 /* copy optimizations from isa for easier access */
1233 cg->arch = isa->arch;
1234 cg->opt_arch = isa->opt_arch;
1240 if (isa->name_obst_size) {
1241 //printf("freed %d bytes from name obst\n", isa->name_obst_size);
1242 isa->name_obst_size = 0;
1243 obstack_free(isa->name_obst, NULL);
1244 obstack_init(isa->name_obst);
1248 cur_reg_set = cg->reg_set;
1250 ia32_irn_ops.cg = cg;
1252 return (arch_code_generator_t *)cg;
1257 /*****************************************************************
1258 * ____ _ _ _____ _____
1259 * | _ \ | | | | |_ _|/ ____| /\
1260 * | |_) | __ _ ___| | _____ _ __ __| | | | | (___ / \
1261 * | _ < / _` |/ __| |/ / _ \ '_ \ / _` | | | \___ \ / /\ \
1262 * | |_) | (_| | (__| < __/ | | | (_| | _| |_ ____) / ____ \
1263 * |____/ \__,_|\___|_|\_\___|_| |_|\__,_| |_____|_____/_/ \_\
1265 *****************************************************************/
1268 * Set output modes for GCC
1270 static const tarval_mode_info mo_integer = {
1277 * set the tarval output mode to C-semantics
1279 static void set_tarval_output_modes(void)
1281 set_tarval_mode_output_option(get_modeLs(), &mo_integer);
1282 set_tarval_mode_output_option(get_modeLu(), &mo_integer);
1283 set_tarval_mode_output_option(get_modeIs(), &mo_integer);
1284 set_tarval_mode_output_option(get_modeIu(), &mo_integer);
1285 set_tarval_mode_output_option(get_modeHs(), &mo_integer);
1286 set_tarval_mode_output_option(get_modeHu(), &mo_integer);
1287 set_tarval_mode_output_option(get_modeBs(), &mo_integer);
1288 set_tarval_mode_output_option(get_modeBu(), &mo_integer);
1289 set_tarval_mode_output_option(get_modeC(), &mo_integer);
1290 set_tarval_mode_output_option(get_modeU(), &mo_integer);
1291 set_tarval_mode_output_option(get_modeIu(), &mo_integer);
1296 * The template that generates a new ISA object.
1297 * Note that this template can be changed by command line
1300 static ia32_isa_t ia32_isa_template = {
1302 &ia32_isa_if, /* isa interface implementation */
1303 &ia32_gp_regs[REG_ESP], /* stack pointer register */
1304 &ia32_gp_regs[REG_EBP], /* base pointer register */
1305 -1, /* stack direction */
1307 NULL, /* 16bit register names */
1308 NULL, /* 8bit register names */
1312 IA32_OPT_INCDEC | /* optimize add 1, sub 1 into inc/dec default: on */
1313 IA32_OPT_DOAM | /* optimize address mode default: on */
1314 IA32_OPT_LEA | /* optimize for LEAs default: on */
1315 IA32_OPT_PLACECNST | /* place constants immediately before instructions, default: on */
1316 IA32_OPT_IMMOPS | /* operations can use immediates, default: on */
1317 IA32_OPT_EXTBB), /* use extended basic block scheduling, default: on */
1318 arch_pentium_4, /* instruction architecture */
1319 arch_pentium_4, /* optimize for architecture */
1320 fp_sse2, /* use sse2 unit */
1321 NULL, /* current code generator */
1323 NULL, /* name obstack */
1324 0 /* name obst size */
1329 * Initializes the backend ISA.
1331 static void *ia32_init(FILE *file_handle) {
1332 static int inited = 0;
1338 set_tarval_output_modes();
1340 isa = xmalloc(sizeof(*isa));
1341 memcpy(isa, &ia32_isa_template, sizeof(*isa));
1343 ia32_register_init(isa);
1344 ia32_create_opcodes();
1346 if ((ARCH_INTEL(isa->arch) && isa->arch < arch_pentium_4) ||
1347 (ARCH_AMD(isa->arch) && isa->arch < arch_athlon))
1348 /* no SSE2 for these cpu's */
1349 isa->fp_kind = fp_x87;
1351 if (ARCH_INTEL(isa->opt_arch) && isa->opt_arch >= arch_pentium_4) {
1352 /* Pentium 4 don't like inc and dec instructions */
1353 isa->opt &= ~IA32_OPT_INCDEC;
1356 isa->regs_16bit = pmap_create();
1357 isa->regs_8bit = pmap_create();
1358 isa->types = pmap_create();
1359 isa->tv_ent = pmap_create();
1360 isa->out = file_handle;
1362 ia32_build_16bit_reg_map(isa->regs_16bit);
1363 ia32_build_8bit_reg_map(isa->regs_8bit);
1365 /* patch register names of x87 registers */
1367 ia32_st_regs[0].name = "st";
1368 ia32_st_regs[1].name = "st(1)";
1369 ia32_st_regs[2].name = "st(2)";
1370 ia32_st_regs[3].name = "st(3)";
1371 ia32_st_regs[4].name = "st(4)";
1372 ia32_st_regs[5].name = "st(5)";
1373 ia32_st_regs[6].name = "st(6)";
1374 ia32_st_regs[7].name = "st(7)";
1378 isa->name_obst = xmalloc(sizeof(*isa->name_obst));
1379 obstack_init(isa->name_obst);
1380 isa->name_obst_size = 0;
1383 ia32_handle_intrinsics();
1384 ia32_switch_section(NULL, NO_SECTION);
1385 fprintf(isa->out, "\t.intel_syntax\n");
1395 * Closes the output file and frees the ISA structure.
1397 static void ia32_done(void *self) {
1398 ia32_isa_t *isa = self;
1400 /* emit now all global declarations */
1401 ia32_gen_decls(isa->out);
1403 pmap_destroy(isa->regs_16bit);
1404 pmap_destroy(isa->regs_8bit);
1405 pmap_destroy(isa->tv_ent);
1406 pmap_destroy(isa->types);
1409 //printf("name obst size = %d bytes\n", isa->name_obst_size);
1410 obstack_free(isa->name_obst, NULL);
1418 * Return the number of register classes for this architecture.
1419 * We report always these:
1420 * - the general purpose registers
1421 * - the SSE floating point register set
1422 * - the virtual floating point registers
1424 static int ia32_get_n_reg_class(const void *self) {
1429 * Return the register class for index i.
1431 static const arch_register_class_t *ia32_get_reg_class(const void *self, int i) {
1432 assert(i >= 0 && i < 3 && "Invalid ia32 register class requested.");
1434 return &ia32_reg_classes[CLASS_ia32_gp];
1436 return &ia32_reg_classes[CLASS_ia32_xmm];
1438 return &ia32_reg_classes[CLASS_ia32_vfp];
1442 * Get the register class which shall be used to store a value of a given mode.
1443 * @param self The this pointer.
1444 * @param mode The mode in question.
1445 * @return A register class which can hold values of the given mode.
1447 const arch_register_class_t *ia32_get_reg_class_for_mode(const void *self, const ir_mode *mode) {
1448 const ia32_isa_t *isa = self;
1449 if (mode_is_float(mode)) {
1450 return USE_SSE2(isa) ? &ia32_reg_classes[CLASS_ia32_xmm] : &ia32_reg_classes[CLASS_ia32_vfp];
1453 return &ia32_reg_classes[CLASS_ia32_gp];
1457 * Get the ABI restrictions for procedure calls.
1458 * @param self The this pointer.
1459 * @param method_type The type of the method (procedure) in question.
1460 * @param abi The abi object to be modified
1462 static void ia32_get_call_abi(const void *self, ir_type *method_type, be_abi_call_t *abi) {
1463 const ia32_isa_t *isa = self;
1466 unsigned cc = get_method_calling_convention(method_type);
1467 int n = get_method_n_params(method_type);
1470 int i, ignore_1, ignore_2;
1472 const arch_register_t *reg;
1473 be_abi_call_flags_t call_flags = be_abi_call_get_flags(abi);
1475 unsigned use_push = !IS_P6_ARCH(isa->opt_arch);
1477 /* set abi flags for calls */
1478 call_flags.bits.left_to_right = 0; /* always last arg first on stack */
1479 call_flags.bits.store_args_sequential = use_push;
1480 /* call_flags.bits.try_omit_fp not changed: can handle both settings */
1481 call_flags.bits.fp_free = 0; /* the frame pointer is fixed in IA32 */
1482 call_flags.bits.call_has_imm = 1; /* IA32 calls can have immediate address */
1484 /* set stack parameter passing style */
1485 be_abi_call_set_flags(abi, call_flags, &ia32_abi_callbacks);
1487 /* collect the mode for each type */
1488 modes = alloca(n * sizeof(modes[0]));
1490 for (i = 0; i < n; i++) {
1491 tp = get_method_param_type(method_type, i);
1492 modes[i] = get_type_mode(tp);
1495 /* set register parameters */
1496 if (cc & cc_reg_param) {
1497 /* determine the number of parameters passed via registers */
1498 biggest_n = ia32_get_n_regparam_class(n, modes, &ignore_1, &ignore_2);
1500 /* loop over all parameters and set the register requirements */
1501 for (i = 0; i <= biggest_n; i++) {
1502 reg = ia32_get_RegParam_reg(n, modes, i, cc);
1503 assert(reg && "kaputt");
1504 be_abi_call_param_reg(abi, i, reg);
1511 /* set stack parameters */
1512 for (i = stack_idx; i < n; i++) {
1513 be_abi_call_param_stack(abi, i, 1, 0, 0);
1517 /* set return registers */
1518 n = get_method_n_ress(method_type);
1520 assert(n <= 2 && "more than two results not supported");
1522 /* In case of 64bit returns, we will have two 32bit values */
1524 tp = get_method_res_type(method_type, 0);
1525 mode = get_type_mode(tp);
1527 assert(!mode_is_float(mode) && "two FP results not supported");
1529 tp = get_method_res_type(method_type, 1);
1530 mode = get_type_mode(tp);
1532 assert(!mode_is_float(mode) && "two FP results not supported");
1534 be_abi_call_res_reg(abi, 0, &ia32_gp_regs[REG_EAX]);
1535 be_abi_call_res_reg(abi, 1, &ia32_gp_regs[REG_EDX]);
1538 const arch_register_t *reg;
1540 tp = get_method_res_type(method_type, 0);
1541 assert(is_atomic_type(tp));
1542 mode = get_type_mode(tp);
1544 reg = mode_is_float(mode) ?
1545 (USE_SSE2(isa) ? &ia32_xmm_regs[REG_XMM0] : &ia32_vfp_regs[REG_VF0]) :
1546 &ia32_gp_regs[REG_EAX];
1548 be_abi_call_res_reg(abi, 0, reg);
1553 static const void *ia32_get_irn_ops(const arch_irn_handler_t *self, const ir_node *irn) {
1554 return &ia32_irn_ops;
1557 const arch_irn_handler_t ia32_irn_handler = {
1561 const arch_irn_handler_t *ia32_get_irn_handler(const void *self) {
1562 return &ia32_irn_handler;
1565 int ia32_to_appear_in_schedule(void *block_env, const ir_node *irn) {
1566 return is_ia32_irn(irn) ? 1 : -1;
1570 * Initializes the code generator interface.
1572 static const arch_code_generator_if_t *ia32_get_code_generator_if(void *self) {
1573 return &ia32_code_gen_if;
1576 list_sched_selector_t ia32_sched_selector;
1579 * Returns the reg_pressure scheduler with to_appear_in_schedule() overloaded
1581 static const list_sched_selector_t *ia32_get_list_sched_selector(const void *self) {
1582 // memcpy(&ia32_sched_selector, reg_pressure_selector, sizeof(list_sched_selector_t));
1583 memcpy(&ia32_sched_selector, trivial_selector, sizeof(list_sched_selector_t));
1584 ia32_sched_selector.to_appear_in_schedule = ia32_to_appear_in_schedule;
1585 return &ia32_sched_selector;
1589 * Returns the necessary byte alignment for storing a register of given class.
1591 static int ia32_get_reg_class_alignment(const void *self, const arch_register_class_t *cls) {
1592 ir_mode *mode = arch_register_class_mode(cls);
1593 int bytes = get_mode_size_bytes(mode);
1595 if (mode_is_float(mode) && bytes > 8)
1600 static ia32_intrinsic_env_t intrinsic_env = { NULL, NULL };
1603 * Returns the libFirm configuration parameter for this backend.
1605 static const backend_params *ia32_get_libfirm_params(void) {
1606 static const arch_dep_params_t ad = {
1607 1, /* also use subs */
1608 4, /* maximum shifts */
1609 31, /* maximum shift amount */
1611 1, /* allow Mulhs */
1612 1, /* allow Mulus */
1613 32 /* Mulh allowed up to 32 bit */
1615 static backend_params p = {
1616 NULL, /* no additional opcodes */
1617 NULL, /* will be set later */
1618 1, /* need dword lowering */
1619 ia32_create_intrinsic_fkt,
1620 &intrinsic_env, /* context for ia32_create_intrinsic_fkt */
1628 /* instruction set architectures. */
1629 static const lc_opt_enum_int_items_t arch_items[] = {
1630 { "386", arch_i386, },
1631 { "486", arch_i486, },
1632 { "pentium", arch_pentium, },
1633 { "586", arch_pentium, },
1634 { "pentiumpro", arch_pentium_pro, },
1635 { "686", arch_pentium_pro, },
1636 { "pentiummmx", arch_pentium_mmx, },
1637 { "pentium2", arch_pentium_2, },
1638 { "p2", arch_pentium_2, },
1639 { "pentium3", arch_pentium_3, },
1640 { "p3", arch_pentium_3, },
1641 { "pentium4", arch_pentium_4, },
1642 { "p4", arch_pentium_4, },
1643 { "pentiumm", arch_pentium_m, },
1644 { "pm", arch_pentium_m, },
1645 { "core", arch_core, },
1647 { "athlon", arch_athlon, },
1648 { "athlon64", arch_athlon_64, },
1649 { "opteron", arch_opteron, },
1653 static lc_opt_enum_int_var_t arch_var = {
1654 &ia32_isa_template.arch, arch_items
1657 static lc_opt_enum_int_var_t opt_arch_var = {
1658 &ia32_isa_template.opt_arch, arch_items
1661 static const lc_opt_enum_int_items_t fp_unit_items[] = {
1663 { "sse2", fp_sse2 },
1667 static lc_opt_enum_int_var_t fp_unit_var = {
1668 &ia32_isa_template.fp_kind, fp_unit_items
1671 static const lc_opt_enum_int_items_t gas_items[] = {
1672 { "linux", ASM_LINUX_GAS },
1673 { "mingw", ASM_MINGW_GAS },
1677 static lc_opt_enum_int_var_t gas_var = {
1678 (int *)&asm_flavour, gas_items
1681 static const lc_opt_table_entry_t ia32_options[] = {
1682 LC_OPT_ENT_ENUM_INT("arch", "select the instruction architecture", &arch_var),
1683 LC_OPT_ENT_ENUM_INT("opt", "optimize for instruction architecture", &opt_arch_var),
1684 LC_OPT_ENT_ENUM_INT("fpunit", "select the floating point unit", &fp_unit_var),
1685 LC_OPT_ENT_NEGBIT("noaddrmode", "do not use address mode", &ia32_isa_template.opt, IA32_OPT_DOAM),
1686 LC_OPT_ENT_NEGBIT("nolea", "do not optimize for LEAs", &ia32_isa_template.opt, IA32_OPT_LEA),
1687 LC_OPT_ENT_NEGBIT("noplacecnst", "do not place constants", &ia32_isa_template.opt, IA32_OPT_PLACECNST),
1688 LC_OPT_ENT_NEGBIT("noimmop", "no operations with immediates", &ia32_isa_template.opt, IA32_OPT_IMMOPS),
1689 LC_OPT_ENT_NEGBIT("noextbb", "do not use extended basic block scheduling", &ia32_isa_template.opt, IA32_OPT_EXTBB),
1690 LC_OPT_ENT_ENUM_INT("gasmode", "set the GAS compatibility mode", &gas_var),
1695 * Register command line options for the ia32 backend.
1699 * ia32-arch=arch create instruction for arch
1700 * ia32-opt=arch optimize for run on arch
1701 * ia32-fpunit=unit select floating point unit (x87 or SSE2)
1702 * ia32-incdec optimize for inc/dec
1703 * ia32-noaddrmode do not use address mode
1704 * ia32-nolea do not optimize for LEAs
1705 * ia32-noplacecnst do not place constants,
1706 * ia32-noimmop no operations with immediates
1707 * ia32-noextbb do not use extended basic block scheduling
1708 * ia32-gasmode set the GAS compatibility mode
1710 static void ia32_register_options(lc_opt_entry_t *ent)
1712 lc_opt_entry_t *be_grp_ia32 = lc_opt_get_grp(ent, "ia32");
1713 lc_opt_add_table(be_grp_ia32, ia32_options);
1715 #endif /* WITH_LIBCORE */
1717 const arch_isa_if_t ia32_isa_if = {
1720 ia32_get_n_reg_class,
1722 ia32_get_reg_class_for_mode,
1724 ia32_get_irn_handler,
1725 ia32_get_code_generator_if,
1726 ia32_get_list_sched_selector,
1727 ia32_get_reg_class_alignment,
1728 ia32_get_libfirm_params,
1730 ia32_register_options