2 * This is the main ia32 firm backend driver.
3 * @author Christian Wuerdig
20 #include <libcore/lc_opts.h>
21 #include <libcore/lc_opts_enum.h>
22 #endif /* WITH_LIBCORE */
26 #include "pseudo_irg.h"
30 #include "iredges_t.h"
38 #include "../beabi.h" /* the general register allocator interface */
39 #include "../benode_t.h"
40 #include "../belower.h"
41 #include "../besched_t.h"
44 #include "../beirgmod.h"
45 #include "../be_dbgout.h"
46 #include "../beblocksched.h"
47 #include "bearch_ia32_t.h"
49 #include "ia32_new_nodes.h" /* ia32 nodes interface */
50 #include "gen_ia32_regalloc_if.h" /* the generated interface (register type and class defenitions) */
51 #include "ia32_gen_decls.h" /* interface declaration emitter */
52 #include "ia32_transform.h"
53 #include "ia32_emitter.h"
54 #include "ia32_map_regs.h"
55 #include "ia32_optimize.h"
57 #include "ia32_dbg_stat.h"
58 #include "ia32_finish.h"
59 #include "ia32_util.h"
61 #define DEBUG_MODULE "firm.be.ia32.isa"
64 static set *cur_reg_set = NULL;
66 /* Creates the unique per irg GP NoReg node. */
67 ir_node *ia32_new_NoReg_gp(ia32_code_gen_t *cg) {
68 return be_abi_get_callee_save_irn(cg->birg->abi, &ia32_gp_regs[REG_GP_NOREG]);
71 /* Creates the unique per irg FP NoReg node. */
72 ir_node *ia32_new_NoReg_fp(ia32_code_gen_t *cg) {
73 return be_abi_get_callee_save_irn(cg->birg->abi,
74 USE_SSE2(cg) ? &ia32_xmm_regs[REG_XMM_NOREG] : &ia32_vfp_regs[REG_VFP_NOREG]);
78 * Returns gp_noreg or fp_noreg, depending in input requirements.
80 ir_node *ia32_get_admissible_noreg(ia32_code_gen_t *cg, ir_node *irn, int pos) {
81 arch_register_req_t req;
82 const arch_register_req_t *p_req;
84 p_req = arch_get_register_req(cg->arch_env, &req, irn, pos);
85 assert(p_req && "Missing register requirements");
86 if (p_req->cls == &ia32_reg_classes[CLASS_ia32_gp])
87 return ia32_new_NoReg_gp(cg);
89 return ia32_new_NoReg_fp(cg);
92 /**************************************************
95 * _ __ ___ __ _ __ _| | | ___ ___ _| |_
96 * | '__/ _ \/ _` | / _` | | |/ _ \ / __| | | _|
97 * | | | __/ (_| | | (_| | | | (_) | (__ | | |
98 * |_| \___|\__, | \__,_|_|_|\___/ \___| |_|_|
101 **************************************************/
104 * Return register requirements for an ia32 node.
105 * If the node returns a tuple (mode_T) then the proj's
106 * will be asked for this information.
108 static const arch_register_req_t *ia32_get_irn_reg_req(const void *self, arch_register_req_t *req, const ir_node *irn, int pos) {
109 const ia32_irn_ops_t *ops = self;
110 const ia32_register_req_t *irn_req;
111 long node_pos = pos == -1 ? 0 : pos;
112 ir_mode *mode = is_Block(irn) ? NULL : get_irn_mode(irn);
113 FIRM_DBG_REGISTER(firm_dbg_module_t *mod, DEBUG_MODULE);
115 if (is_Block(irn) || mode == mode_M || mode == mode_X) {
116 DBG((mod, LEVEL_1, "ignoring Block, mode_M, mode_X node %+F\n", irn));
120 if (mode == mode_T && pos < 0) {
121 DBG((mod, LEVEL_1, "ignoring request OUT requirements for node %+F\n", irn));
125 DBG((mod, LEVEL_1, "get requirements at pos %d for %+F ... ", pos, irn));
129 DBG((mod, LEVEL_1, "ignoring request IN requirements for node %+F\n", irn));
133 node_pos = (pos == -1) ? get_Proj_proj(irn) : pos;
134 irn = skip_Proj(irn);
136 DB((mod, LEVEL_1, "skipping Proj, going to %+F at pos %d ... ", irn, node_pos));
139 if (is_ia32_irn(irn)) {
140 irn_req = (pos >= 0) ? get_ia32_in_req(irn, pos) : get_ia32_out_req(irn, node_pos);
142 DB((mod, LEVEL_1, "returning reqs for %+F at pos %d\n", irn, pos));
144 memcpy(req, &(irn_req->req), sizeof(*req));
146 if (arch_register_req_is(&(irn_req->req), should_be_same)) {
147 assert(irn_req->same_pos >= 0 && "should be same constraint for in -> out NYI");
148 req->other_same = get_irn_n(irn, irn_req->same_pos);
151 if (arch_register_req_is(&(irn_req->req), should_be_different)) {
152 assert(irn_req->different_pos >= 0 && "should be different constraint for in -> out NYI");
153 req->other_different = get_irn_n(irn, irn_req->different_pos);
157 /* treat Unknowns like Const with default requirements */
158 if (is_Unknown(irn)) {
159 DB((mod, LEVEL_1, "returning UKNWN reqs for %+F\n", irn));
160 if (mode_is_float(mode)) {
161 if (USE_SSE2(ops->cg))
162 memcpy(req, &(ia32_default_req_ia32_xmm_xmm_UKNWN), sizeof(*req));
164 memcpy(req, &(ia32_default_req_ia32_vfp_vfp_UKNWN), sizeof(*req));
166 else if (mode_is_int(mode) || mode_is_reference(mode))
167 memcpy(req, &(ia32_default_req_ia32_gp_gp_UKNWN), sizeof(*req));
168 else if (mode == mode_T || mode == mode_M) {
169 DBG((mod, LEVEL_1, "ignoring Unknown node %+F\n", irn));
173 assert(0 && "unsupported Unknown-Mode");
176 DB((mod, LEVEL_1, "returning NULL for %+F (not ia32)\n", irn));
184 static void ia32_set_irn_reg(const void *self, ir_node *irn, const arch_register_t *reg) {
186 const ia32_irn_ops_t *ops = self;
188 if (get_irn_mode(irn) == mode_X) {
192 DBG((ops->cg->mod, LEVEL_1, "ia32 assigned register %s to node %+F\n", reg->name, irn));
195 pos = get_Proj_proj(irn);
196 irn = skip_Proj(irn);
199 if (is_ia32_irn(irn)) {
200 const arch_register_t **slots;
202 slots = get_ia32_slots(irn);
206 ia32_set_firm_reg(irn, reg, cur_reg_set);
210 static const arch_register_t *ia32_get_irn_reg(const void *self, const ir_node *irn) {
212 const arch_register_t *reg = NULL;
216 if (get_irn_mode(irn) == mode_X) {
220 pos = get_Proj_proj(irn);
221 irn = skip_Proj(irn);
224 if (is_ia32_irn(irn)) {
225 /* retrieve "real" x87 register */
226 if (ia32_has_x87_register(irn))
227 reg = get_ia32_attr(irn)->x87[pos + 2];
229 const arch_register_t **slots;
230 slots = get_ia32_slots(irn);
235 reg = ia32_get_firm_reg(irn, cur_reg_set);
241 static arch_irn_class_t ia32_classify(const void *self, const ir_node *irn) {
242 arch_irn_class_t classification = arch_irn_class_normal;
244 irn = skip_Proj(irn);
247 classification |= arch_irn_class_branch;
249 if (! is_ia32_irn(irn))
250 return classification & ~arch_irn_class_normal;
252 if (is_ia32_Cnst(irn))
253 classification |= arch_irn_class_const;
256 classification |= arch_irn_class_load;
258 if (is_ia32_St(irn) || is_ia32_Store8Bit(irn))
259 classification |= arch_irn_class_store;
261 if (is_ia32_got_reload(irn))
262 classification |= arch_irn_class_reload;
264 return classification;
267 static arch_irn_flags_t ia32_get_flags(const void *self, const ir_node *irn) {
270 ir_node *pred = get_Proj_pred(irn);
271 int ia32_op = get_ia32_irn_opcode(pred);
272 long proj = get_Proj_proj(irn);
273 if (iro_ia32_Push == ia32_op && proj == pn_ia32_Push_stack) {
274 /* Push modifies always ESP, this cannot be changed */
275 return arch_irn_flags_modify_sp | arch_irn_flags_ignore;
277 if (iro_ia32_Pop == ia32_op && proj == pn_ia32_Pop_stack) {
278 /* Pop modifies always ESP, this cannot be changed */
279 return arch_irn_flags_modify_sp | arch_irn_flags_ignore;
281 if (iro_ia32_AddSP == ia32_op && proj == pn_ia32_AddSP_stack) {
282 /* AddSP modifies always ESP, this cannot be changed */
283 return arch_irn_flags_modify_sp | arch_irn_flags_ignore;
285 if (iro_ia32_SubSP == ia32_op && proj == pn_ia32_SubSP_stack) {
286 /* SubSP modifies always ESP, this cannot be changed */
287 return arch_irn_flags_modify_sp | arch_irn_flags_ignore;
291 irn = skip_Proj(irn);
292 if (is_ia32_irn(irn))
293 return get_ia32_flags(irn);
296 return arch_irn_flags_ignore;
302 * The IA32 ABI callback object.
305 be_abi_call_flags_bits_t flags; /**< The call flags. */
306 const arch_isa_t *isa; /**< The ISA handle. */
307 const arch_env_t *aenv; /**< The architecture environment. */
308 ir_graph *irg; /**< The associated graph. */
311 static entity *ia32_get_frame_entity(const void *self, const ir_node *irn) {
312 return is_ia32_irn(irn) ? get_ia32_frame_ent(irn) : NULL;
315 static void ia32_set_frame_entity(const void *self, ir_node *irn, entity *ent) {
316 set_ia32_frame_ent(irn, ent);
319 static void ia32_set_frame_offset(const void *self, ir_node *irn, int bias) {
321 const ia32_irn_ops_t *ops = self;
323 if (get_ia32_frame_ent(irn)) {
324 ia32_am_flavour_t am_flav = get_ia32_am_flavour(irn);
326 if(is_ia32_Pop(irn)) {
327 int omit_fp = be_abi_omit_fp(ops->cg->birg->abi);
329 /* Pop nodes modify the stack pointer before calculating the destination
330 * address, so fix this here
336 DBG((ops->cg->mod, LEVEL_1, "stack biased %+F with %d\n", irn, bias));
338 snprintf(buf, sizeof(buf), "%d", bias);
340 if (get_ia32_op_type(irn) == ia32_Normal) {
341 set_ia32_cnst(irn, buf);
343 add_ia32_am_offs(irn, buf);
345 set_ia32_am_flavour(irn, am_flav);
350 static int ia32_get_sp_bias(const void *self, const ir_node *irn) {
352 long proj = get_Proj_proj(irn);
353 ir_node *pred = get_Proj_pred(irn);
355 if (proj == pn_ia32_Push_stack && is_ia32_Push(pred))
357 if (proj == pn_ia32_Pop_stack && is_ia32_Pop(pred))
365 * Put all registers which are saved by the prologue/epilogue in a set.
367 * @param self The callback object.
368 * @param s The result set.
370 static void ia32_abi_dont_save_regs(void *self, pset *s)
372 ia32_abi_env_t *env = self;
373 if(env->flags.try_omit_fp)
374 pset_insert_ptr(s, env->isa->bp);
378 * Generate the routine prologue.
380 * @param self The callback object.
381 * @param mem A pointer to the mem node. Update this if you define new memory.
382 * @param reg_map A map mapping all callee_save/ignore/parameter registers to their defining nodes.
384 * @return The register which shall be used as a stack frame base.
386 * All nodes which define registers in @p reg_map must keep @p reg_map current.
388 static const arch_register_t *ia32_abi_prologue(void *self, ir_node **mem, pmap *reg_map)
390 ia32_abi_env_t *env = self;
392 if (! env->flags.try_omit_fp) {
393 ir_node *bl = get_irg_start_block(env->irg);
394 ir_node *curr_sp = be_abi_reg_map_get(reg_map, env->isa->sp);
395 ir_node *curr_bp = be_abi_reg_map_get(reg_map, env->isa->bp);
396 ir_node *noreg = be_abi_reg_map_get(reg_map, &ia32_gp_regs[REG_GP_NOREG]);
400 push = new_rd_ia32_Push(NULL, env->irg, bl, noreg, noreg, curr_bp, curr_sp, *mem);
401 curr_sp = new_r_Proj(env->irg, bl, push, get_irn_mode(curr_sp), pn_ia32_Push_stack);
402 *mem = new_r_Proj(env->irg, bl, push, mode_M, pn_ia32_Push_M);
404 /* the push must have SP out register */
405 arch_set_irn_register(env->aenv, curr_sp, env->isa->sp);
406 set_ia32_flags(push, arch_irn_flags_ignore);
408 /* move esp to ebp */
409 curr_bp = be_new_Copy(env->isa->bp->reg_class, env->irg, bl, curr_sp);
410 be_set_constr_single_reg(curr_bp, BE_OUT_POS(0), env->isa->bp);
411 arch_set_irn_register(env->aenv, curr_bp, env->isa->bp);
412 be_node_set_flags(curr_bp, BE_OUT_POS(0), arch_irn_flags_ignore);
414 /* beware: the copy must be done before any other sp use */
415 curr_sp = be_new_CopyKeep_single(env->isa->sp->reg_class, env->irg, bl, curr_sp, curr_bp, get_irn_mode(curr_sp));
416 be_set_constr_single_reg(curr_sp, BE_OUT_POS(0), env->isa->sp);
417 arch_set_irn_register(env->aenv, curr_sp, env->isa->sp);
418 be_node_set_flags(curr_sp, BE_OUT_POS(0), arch_irn_flags_ignore);
420 be_abi_reg_map_set(reg_map, env->isa->sp, curr_sp);
421 be_abi_reg_map_set(reg_map, env->isa->bp, curr_bp);
430 * Generate the routine epilogue.
431 * @param self The callback object.
432 * @param bl The block for the epilog
433 * @param mem A pointer to the mem node. Update this if you define new memory.
434 * @param reg_map A map mapping all callee_save/ignore/parameter registers to their defining nodes.
435 * @return The register which shall be used as a stack frame base.
437 * All nodes which define registers in @p reg_map must keep @p reg_map current.
439 static void ia32_abi_epilogue(void *self, ir_node *bl, ir_node **mem, pmap *reg_map)
441 ia32_abi_env_t *env = self;
442 ir_node *curr_sp = be_abi_reg_map_get(reg_map, env->isa->sp);
443 ir_node *curr_bp = be_abi_reg_map_get(reg_map, env->isa->bp);
445 if (env->flags.try_omit_fp) {
446 /* simply remove the stack frame here */
447 curr_sp = be_new_IncSP(env->isa->sp, env->irg, bl, curr_sp, BE_STACK_FRAME_SIZE_SHRINK);
448 add_irn_dep(curr_sp, *mem);
451 const ia32_isa_t *isa = (ia32_isa_t *)env->isa;
452 ir_mode *mode_bp = env->isa->bp->reg_class->mode;
454 /* gcc always emits a leave at the end of a routine */
455 if (1 || ARCH_AMD(isa->opt_arch)) {
459 leave = new_rd_ia32_Leave(NULL, env->irg, bl, curr_sp, curr_bp);
460 set_ia32_flags(leave, arch_irn_flags_ignore);
461 curr_bp = new_r_Proj(current_ir_graph, bl, leave, mode_bp, pn_ia32_Leave_frame);
462 curr_sp = new_r_Proj(current_ir_graph, bl, leave, get_irn_mode(curr_sp), pn_ia32_Leave_stack);
463 *mem = new_r_Proj(current_ir_graph, bl, leave, mode_M, pn_ia32_Leave_M);
466 ir_node *noreg = be_abi_reg_map_get(reg_map, &ia32_gp_regs[REG_GP_NOREG]);
469 /* copy ebp to esp */
470 curr_sp = be_new_SetSP(env->isa->sp, env->irg, bl, curr_sp, curr_bp, *mem);
473 pop = new_rd_ia32_Pop(NULL, env->irg, bl, noreg, noreg, curr_sp, *mem);
474 set_ia32_flags(pop, arch_irn_flags_ignore);
475 curr_bp = new_r_Proj(current_ir_graph, bl, pop, mode_bp, pn_ia32_Pop_res);
476 curr_sp = new_r_Proj(current_ir_graph, bl, pop, get_irn_mode(curr_sp), pn_ia32_Pop_stack);
477 *mem = new_r_Proj(current_ir_graph, bl, pop, mode_M, pn_ia32_Pop_M);
479 arch_set_irn_register(env->aenv, curr_sp, env->isa->sp);
480 arch_set_irn_register(env->aenv, curr_bp, env->isa->bp);
483 be_abi_reg_map_set(reg_map, env->isa->sp, curr_sp);
484 be_abi_reg_map_set(reg_map, env->isa->bp, curr_bp);
488 * Initialize the callback object.
489 * @param call The call object.
490 * @param aenv The architecture environment.
491 * @param irg The graph with the method.
492 * @return Some pointer. This pointer is passed to all other callback functions as self object.
494 static void *ia32_abi_init(const be_abi_call_t *call, const arch_env_t *aenv, ir_graph *irg)
496 ia32_abi_env_t *env = xmalloc(sizeof(env[0]));
497 be_abi_call_flags_t fl = be_abi_call_get_flags(call);
498 env->flags = fl.bits;
501 env->isa = aenv->isa;
506 * Destroy the callback object.
507 * @param self The callback object.
509 static void ia32_abi_done(void *self) {
514 * Produces the type which sits between the stack args and the locals on the stack.
515 * it will contain the return address and space to store the old base pointer.
516 * @return The Firm type modeling the ABI between type.
518 static ir_type *ia32_abi_get_between_type(void *self)
520 #define IDENT(s) new_id_from_chars(s, sizeof(s)-1)
521 static ir_type *omit_fp_between_type = NULL;
522 static ir_type *between_type = NULL;
524 ia32_abi_env_t *env = self;
526 if ( !between_type) {
528 entity *ret_addr_ent;
529 entity *omit_fp_ret_addr_ent;
531 ir_type *old_bp_type = new_type_primitive(IDENT("bp"), mode_P);
532 ir_type *ret_addr_type = new_type_primitive(IDENT("return_addr"), mode_P);
534 between_type = new_type_struct(IDENT("ia32_between_type"));
535 old_bp_ent = new_entity(between_type, IDENT("old_bp"), old_bp_type);
536 ret_addr_ent = new_entity(between_type, IDENT("ret_addr"), ret_addr_type);
538 set_entity_offset_bytes(old_bp_ent, 0);
539 set_entity_offset_bytes(ret_addr_ent, get_type_size_bytes(old_bp_type));
540 set_type_size_bytes(between_type, get_type_size_bytes(old_bp_type) + get_type_size_bytes(ret_addr_type));
541 set_type_state(between_type, layout_fixed);
543 omit_fp_between_type = new_type_struct(IDENT("ia32_between_type_omit_fp"));
544 omit_fp_ret_addr_ent = new_entity(omit_fp_between_type, IDENT("ret_addr"), ret_addr_type);
546 set_entity_offset_bytes(omit_fp_ret_addr_ent, 0);
547 set_type_size_bytes(omit_fp_between_type, get_type_size_bytes(ret_addr_type));
548 set_type_state(omit_fp_between_type, layout_fixed);
551 return env->flags.try_omit_fp ? omit_fp_between_type : between_type;
556 * Get the estimated cycle count for @p irn.
558 * @param self The this pointer.
559 * @param irn The node.
561 * @return The estimated cycle count for this operation
563 static int ia32_get_op_estimated_cost(const void *self, const ir_node *irn)
566 ia32_op_type_t op_tp;
567 const ia32_irn_ops_t *ops = self;
572 assert(is_ia32_irn(irn));
574 cost = get_ia32_latency(irn);
575 op_tp = get_ia32_op_type(irn);
577 if (is_ia32_CopyB(irn)) {
579 if (ARCH_INTEL(ops->cg->arch))
582 else if (is_ia32_CopyB_i(irn)) {
583 int size = get_tarval_long(get_ia32_Immop_tarval(irn));
584 cost = 20 + (int)ceil((4/3) * size);
585 if (ARCH_INTEL(ops->cg->arch))
588 /* in case of address mode operations add additional cycles */
589 else if (op_tp == ia32_AddrModeD || op_tp == ia32_AddrModeS) {
591 In case of stack access add 5 cycles (we assume stack is in cache),
592 other memory operations cost 20 cycles.
594 cost += is_ia32_use_frame(irn) ? 5 : 20;
601 * Returns the inverse operation if @p irn, recalculating the argument at position @p i.
603 * @param irn The original operation
604 * @param i Index of the argument we want the inverse operation to yield
605 * @param inverse struct to be filled with the resulting inverse op
606 * @param obstack The obstack to use for allocation of the returned nodes array
607 * @return The inverse operation or NULL if operation invertible
609 static arch_inverse_t *ia32_get_inverse(const void *self, const ir_node *irn, int i, arch_inverse_t *inverse, struct obstack *obst) {
612 ir_node *block, *noreg, *nomem;
615 /* we cannot invert non-ia32 irns */
616 if (! is_ia32_irn(irn))
619 /* operand must always be a real operand (not base, index or mem) */
620 if (i != 2 && i != 3)
623 /* we don't invert address mode operations */
624 if (get_ia32_op_type(irn) != ia32_Normal)
627 irg = get_irn_irg(irn);
628 block = get_nodes_block(irn);
629 mode = get_ia32_res_mode(irn);
630 noreg = get_irn_n(irn, 0);
631 nomem = new_r_NoMem(irg);
633 /* initialize structure */
634 inverse->nodes = obstack_alloc(obst, 2 * sizeof(inverse->nodes[0]));
638 switch (get_ia32_irn_opcode(irn)) {
640 if (get_ia32_immop_type(irn) == ia32_ImmConst) {
641 /* we have an add with a const here */
642 /* invers == add with negated const */
643 inverse->nodes[0] = new_rd_ia32_Add(NULL, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem);
644 pnc = pn_ia32_Add_res;
646 copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
647 set_ia32_Immop_tarval(inverse->nodes[0], tarval_neg(get_ia32_Immop_tarval(irn)));
648 set_ia32_commutative(inverse->nodes[0]);
650 else if (get_ia32_immop_type(irn) == ia32_ImmSymConst) {
651 /* we have an add with a symconst here */
652 /* invers == sub with const */
653 inverse->nodes[0] = new_rd_ia32_Sub(NULL, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem);
654 pnc = pn_ia32_Sub_res;
656 copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
659 /* normal add: inverse == sub */
660 ir_node *proj = ia32_get_res_proj(irn);
663 inverse->nodes[0] = new_rd_ia32_Sub(NULL, irg, block, noreg, noreg, proj, get_irn_n(irn, i ^ 1), nomem);
664 pnc = pn_ia32_Sub_res;
669 if (get_ia32_immop_type(irn) != ia32_ImmNone) {
670 /* we have a sub with a const/symconst here */
671 /* invers == add with this const */
672 inverse->nodes[0] = new_rd_ia32_Add(NULL, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem);
673 pnc = pn_ia32_Add_res;
674 inverse->costs += (get_ia32_immop_type(irn) == ia32_ImmSymConst) ? 5 : 1;
675 copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
679 ir_node *proj = ia32_get_res_proj(irn);
683 inverse->nodes[0] = new_rd_ia32_Add(NULL, irg, block, noreg, noreg, proj, get_irn_n(irn, 3), nomem);
686 inverse->nodes[0] = new_rd_ia32_Sub(NULL, irg, block, noreg, noreg, get_irn_n(irn, 2), proj, nomem);
688 pnc = pn_ia32_Sub_res;
693 if (get_ia32_immop_type(irn) != ia32_ImmNone) {
694 /* xor with const: inverse = xor */
695 inverse->nodes[0] = new_rd_ia32_Eor(NULL, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem);
696 pnc = pn_ia32_Eor_res;
697 inverse->costs += (get_ia32_immop_type(irn) == ia32_ImmSymConst) ? 5 : 1;
698 copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
702 inverse->nodes[0] = new_rd_ia32_Eor(NULL, irg, block, noreg, noreg, (ir_node *)irn, get_irn_n(irn, i), nomem);
703 pnc = pn_ia32_Eor_res;
708 ir_node *proj = ia32_get_res_proj(irn);
711 inverse->nodes[0] = new_rd_ia32_Not(NULL, irg, block, noreg, noreg, proj, nomem);
712 pnc = pn_ia32_Not_res;
716 case iro_ia32_Minus: {
717 ir_node *proj = ia32_get_res_proj(irn);
720 inverse->nodes[0] = new_rd_ia32_Minus(NULL, irg, block, noreg, noreg, proj, nomem);
721 pnc = pn_ia32_Minus_res;
726 /* inverse operation not supported */
730 set_ia32_res_mode(inverse->nodes[0], mode);
731 inverse->nodes[1] = new_r_Proj(irg, block, inverse->nodes[0], mode, pnc);
737 * Check if irn can load it's operand at position i from memory (source addressmode).
738 * @param self Pointer to irn ops itself
739 * @param irn The irn to be checked
740 * @param i The operands position
741 * @return Non-Zero if operand can be loaded
743 static int ia32_possible_memory_operand(const void *self, const ir_node *irn, unsigned int i) {
744 if (! is_ia32_irn(irn) || /* must be an ia32 irn */
745 get_irn_arity(irn) != 5 || /* must be a binary operation */
746 get_ia32_op_type(irn) != ia32_Normal || /* must not already be a addressmode irn */
747 ! (get_ia32_am_support(irn) & ia32_am_Source) || /* must be capable of source addressmode */
748 (i != 2 && i != 3) || /* a "real" operand position must be requested */
749 (i == 2 && ! is_ia32_commutative(irn)) || /* if first operand requested irn must be commutative */
750 is_ia32_use_frame(irn)) /* must not already use frame */
756 static void ia32_perform_memory_operand(const void *self, ir_node *irn, ir_node *spill, unsigned int i) {
757 const ia32_irn_ops_t *ops = self;
758 ia32_code_gen_t *cg = ops->cg;
760 assert(ia32_possible_memory_operand(self, irn, i) && "Cannot perform memory operand change");
763 ir_node *tmp = get_irn_n(irn, 3);
764 set_irn_n(irn, 3, get_irn_n(irn, 2));
765 set_irn_n(irn, 2, tmp);
768 set_ia32_am_support(irn, ia32_am_Source);
769 set_ia32_op_type(irn, ia32_AddrModeS);
770 set_ia32_am_flavour(irn, ia32_B);
771 set_ia32_ls_mode(irn, get_irn_mode(get_irn_n(irn, i)));
772 set_ia32_use_frame(irn);
773 set_ia32_got_reload(irn);
775 set_irn_n(irn, 0, get_irg_frame(get_irn_irg(irn)));
776 set_irn_n(irn, 4, spill);
779 Input at position one is index register, which is NoReg.
780 We would need cg object to get a real noreg, but we cannot
783 set_irn_n(irn, 3, ia32_get_admissible_noreg(cg, irn, 3));
785 //FIXME DBG_OPT_AM_S(reload, irn);
788 static const be_abi_callbacks_t ia32_abi_callbacks = {
791 ia32_abi_get_between_type,
792 ia32_abi_dont_save_regs,
797 /* fill register allocator interface */
799 static const arch_irn_ops_if_t ia32_irn_ops_if = {
800 ia32_get_irn_reg_req,
805 ia32_get_frame_entity,
806 ia32_set_frame_entity,
807 ia32_set_frame_offset,
810 ia32_get_op_estimated_cost,
811 ia32_possible_memory_operand,
812 ia32_perform_memory_operand,
815 ia32_irn_ops_t ia32_irn_ops = {
822 /**************************************************
825 * ___ ___ __| | ___ __ _ ___ _ __ _| |_
826 * / __/ _ \ / _` |/ _ \/ _` |/ _ \ '_ \ | | _|
827 * | (_| (_) | (_| | __/ (_| | __/ | | | | | |
828 * \___\___/ \__,_|\___|\__, |\___|_| |_| |_|_|
831 **************************************************/
833 static void ia32_kill_convs(ia32_code_gen_t *cg) {
836 /* BEWARE: the Projs are inserted in the set */
837 foreach_nodeset(cg->kill_conv, irn) {
838 ir_node *in = get_irn_n(get_Proj_pred(irn), 2);
839 edges_reroute(irn, in, cg->birg->irg);
844 * Transform the Thread Local Store base.
846 static void transform_tls(ir_graph *irg) {
847 ir_node *irn = get_irg_tls(irg);
850 dbg_info *dbg = get_irn_dbg_info(irn);
851 ir_node *blk = get_nodes_block(irn);
853 newn = new_rd_ia32_LdTls(dbg, irg, blk, get_irn_mode(irn));
860 * Transforms the standard firm graph into
863 static void ia32_prepare_graph(void *self) {
864 ia32_code_gen_t *cg = self;
865 dom_front_info_t *dom;
866 DEBUG_ONLY(firm_dbg_module_t *old_mod = cg->mod;)
868 FIRM_DBG_REGISTER(cg->mod, "firm.be.ia32.transform");
870 /* 1st: transform constants and psi condition trees */
871 ia32_pre_transform_phase(cg);
873 /* 2nd: transform all remaining nodes */
874 ia32_register_transformers();
875 dom = be_compute_dominance_frontiers(cg->irg);
877 cg->kill_conv = new_nodeset(5);
878 transform_tls(cg->irg);
879 irg_walk_blkwise_graph(cg->irg, NULL, ia32_transform_node, cg);
881 del_nodeset(cg->kill_conv);
883 be_free_dominance_frontiers(dom);
886 be_dump(cg->irg, "-transformed", dump_ir_block_graph_sched);
888 /* 3rd: optimize address mode */
889 FIRM_DBG_REGISTER(cg->mod, "firm.be.ia32.am");
890 ia32_optimize_addressmode(cg);
893 be_dump(cg->irg, "-am", dump_ir_block_graph_sched);
895 DEBUG_ONLY(cg->mod = old_mod;)
899 * Dummy functions for hooks we don't need but which must be filled.
901 static void ia32_before_sched(void *self) {
904 static void remove_unused_nodes(ir_node *irn, bitset_t *already_visited) {
907 ir_node *mem_proj = NULL;
912 mode = get_irn_mode(irn);
914 /* check if we already saw this node or the node has more than one user */
915 if (bitset_contains_irn(already_visited, irn) || get_irn_n_edges(irn) > 1) {
919 /* mark irn visited */
920 bitset_add_irn(already_visited, irn);
922 /* non-Tuple nodes with one user: ok, return */
923 if (get_irn_n_edges(irn) >= 1 && mode != mode_T) {
927 /* tuple node has one user which is not the mem proj-> ok */
928 if (mode == mode_T && get_irn_n_edges(irn) == 1) {
929 mem_proj = ia32_get_proj_for_mode(irn, mode_M);
930 if (mem_proj == NULL) {
935 arity = get_irn_arity(irn);
936 for (i = 0; i < arity; ++i) {
937 ir_node *pred = get_irn_n(irn, i);
939 /* do not follow memory edges or we will accidentally remove stores */
940 if (get_irn_mode(pred) == mode_M) {
941 if(mem_proj != NULL) {
942 edges_reroute(mem_proj, pred, get_irn_irg(mem_proj));
948 set_irn_n(irn, i, new_Bad());
951 The current node is about to be removed: if the predecessor
952 has only this node as user, it need to be removed as well.
954 if (get_irn_n_edges(pred) <= 1)
955 remove_unused_nodes(pred, already_visited);
958 // we need to set the presd to Bad again to also get the memory edges
959 arity = get_irn_arity(irn);
960 for (i = 0; i < arity; ++i) {
961 set_irn_n(irn, i, new_Bad());
964 if (sched_is_scheduled(irn)) {
969 static void remove_unused_loads_walker(ir_node *irn, void *env) {
970 bitset_t *already_visited = env;
971 if (is_ia32_Ld(irn) && ! bitset_contains_irn(already_visited, irn))
972 remove_unused_nodes(irn, env);
976 * Called before the register allocator.
977 * Calculate a block schedule here. We need it for the x87
978 * simulator and the emitter.
980 static void ia32_before_ra(void *self) {
981 ia32_code_gen_t *cg = self;
982 bitset_t *already_visited = bitset_irg_alloca(cg->irg);
986 There are sometimes unused loads, only pinned by memory.
987 We need to remove those Loads and all other nodes which won't be used
988 after removing the Load from schedule.
990 irg_walk_graph(cg->irg, NULL, remove_unused_loads_walker, already_visited);
995 * Transforms a be node into a Load.
997 static void transform_to_Load(ia32_transform_env_t *env) {
998 ir_node *irn = env->irn;
999 entity *ent = be_get_frame_entity(irn);
1000 ir_mode *mode = env->mode;
1001 ir_node *noreg = ia32_new_NoReg_gp(env->cg);
1002 ir_node *nomem = new_rd_NoMem(env->irg);
1003 ir_node *sched_point = NULL;
1004 ir_node *ptr = get_irn_n(irn, 0);
1005 ir_node *mem = be_is_Reload(irn) ? get_irn_n(irn, 1) : nomem;
1006 ir_node *new_op, *proj;
1007 const arch_register_t *reg;
1009 if (sched_is_scheduled(irn)) {
1010 sched_point = sched_prev(irn);
1013 if (mode_is_float(mode)) {
1014 if (USE_SSE2(env->cg))
1015 new_op = new_rd_ia32_xLoad(env->dbg, env->irg, env->block, ptr, noreg, mem);
1017 new_op = new_rd_ia32_vfld(env->dbg, env->irg, env->block, ptr, noreg, mem);
1020 new_op = new_rd_ia32_Load(env->dbg, env->irg, env->block, ptr, noreg, mem);
1022 set_ia32_am_support(new_op, ia32_am_Source);
1023 set_ia32_op_type(new_op, ia32_AddrModeS);
1024 set_ia32_am_flavour(new_op, ia32_B);
1025 set_ia32_ls_mode(new_op, mode);
1026 set_ia32_frame_ent(new_op, ent);
1027 set_ia32_use_frame(new_op);
1029 DBG_OPT_RELOAD2LD(irn, new_op);
1031 proj = new_rd_Proj(env->dbg, env->irg, env->block, new_op, mode, pn_ia32_Load_res);
1034 sched_add_after(sched_point, new_op);
1035 sched_add_after(new_op, proj);
1040 /* copy the register from the old node to the new Load */
1041 reg = arch_get_irn_register(env->cg->arch_env, irn);
1042 arch_set_irn_register(env->cg->arch_env, new_op, reg);
1044 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, irn));
1046 exchange(irn, proj);
1050 * Transforms a be node into a Store.
1052 static void transform_to_Store(ia32_transform_env_t *env) {
1053 ir_node *irn = env->irn;
1054 entity *ent = be_get_frame_entity(irn);
1055 ir_mode *mode = env->mode;
1056 ir_node *noreg = ia32_new_NoReg_gp(env->cg);
1057 ir_node *nomem = new_rd_NoMem(env->irg);
1058 ir_node *ptr = get_irn_n(irn, 0);
1059 ir_node *val = get_irn_n(irn, 1);
1060 ir_node *new_op, *proj;
1061 ir_node *sched_point = NULL;
1063 if (sched_is_scheduled(irn)) {
1064 sched_point = sched_prev(irn);
1067 if (mode_is_float(mode)) {
1068 if (USE_SSE2(env->cg))
1069 new_op = new_rd_ia32_xStore(env->dbg, env->irg, env->block, ptr, noreg, val, nomem);
1071 new_op = new_rd_ia32_vfst(env->dbg, env->irg, env->block, ptr, noreg, val, nomem);
1073 else if (get_mode_size_bits(mode) == 8) {
1074 new_op = new_rd_ia32_Store8Bit(env->dbg, env->irg, env->block, ptr, noreg, val, nomem);
1077 new_op = new_rd_ia32_Store(env->dbg, env->irg, env->block, ptr, noreg, val, nomem);
1080 set_ia32_am_support(new_op, ia32_am_Dest);
1081 set_ia32_op_type(new_op, ia32_AddrModeD);
1082 set_ia32_am_flavour(new_op, ia32_B);
1083 set_ia32_ls_mode(new_op, mode);
1084 set_ia32_frame_ent(new_op, ent);
1085 set_ia32_use_frame(new_op);
1087 DBG_OPT_SPILL2ST(irn, new_op);
1089 proj = new_rd_Proj(env->dbg, env->irg, env->block, new_op, mode_M, pn_ia32_Store_M);
1092 sched_add_after(sched_point, new_op);
1096 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, irn));
1098 exchange(irn, proj);
1101 static ir_node *create_push(ia32_transform_env_t *env, ir_node *schedpoint, ir_node *sp, ir_node *mem, entity *ent) {
1102 ir_node *noreg = ia32_new_NoReg_gp(env->cg);
1103 ir_node *frame = get_irg_frame(env->irg);
1105 ir_node *push = new_rd_ia32_Push(env->dbg, env->irg, env->block, frame, noreg, noreg, sp, mem);
1107 set_ia32_frame_ent(push, ent);
1108 set_ia32_use_frame(push);
1109 set_ia32_op_type(push, ia32_AddrModeS);
1110 set_ia32_am_flavour(push, ia32_B);
1111 set_ia32_ls_mode(push, mode_Is);
1113 sched_add_before(schedpoint, push);
1117 static ir_node *create_pop(ia32_transform_env_t *env, ir_node *schedpoint, ir_node *sp, entity *ent) {
1118 ir_node *noreg = ia32_new_NoReg_gp(env->cg);
1119 ir_node *frame = get_irg_frame(env->irg);
1121 ir_node *pop = new_rd_ia32_Pop(env->dbg, env->irg, env->block, frame, noreg, sp, new_NoMem());
1123 set_ia32_frame_ent(pop, ent);
1124 set_ia32_use_frame(pop);
1125 set_ia32_op_type(pop, ia32_AddrModeD);
1126 set_ia32_am_flavour(pop, ia32_B);
1127 set_ia32_ls_mode(pop, mode_Is);
1129 sched_add_before(schedpoint, pop);
1134 static ir_node* create_spproj(ia32_transform_env_t *env, ir_node *pred, int pos, ir_node *schedpoint) {
1135 ir_mode *spmode = mode_Iu;
1136 const arch_register_t *spreg = &ia32_gp_regs[REG_ESP];
1139 sp = new_rd_Proj(env->dbg, env->irg, env->block, pred, spmode, pos);
1140 arch_set_irn_register(env->cg->arch_env, sp, spreg);
1141 sched_add_before(schedpoint, sp);
1147 * Transform memperm, currently we do this the ugly way and produce
1148 * push/pop into/from memory cascades. This is possible without using
1151 static void transform_MemPerm(ia32_transform_env_t *env) {
1152 ir_node *node = env->irn;
1154 ir_node *sp = be_abi_get_ignore_irn(env->cg->birg->abi, &ia32_gp_regs[REG_ESP]);
1155 const ir_edge_t *edge;
1156 const ir_edge_t *next;
1159 arity = be_get_MemPerm_entity_arity(node);
1160 pops = alloca(arity * sizeof(pops[0]));
1163 for(i = 0; i < arity; ++i) {
1164 entity *ent = be_get_MemPerm_in_entity(node, i);
1165 ir_type *enttype = get_entity_type(ent);
1166 int entbits = get_type_size_bits(enttype);
1167 ir_node *mem = get_irn_n(node, i + 1);
1170 assert( (entbits == 32 || entbits == 64) && "spillslot on x86 should be 32 or 64 bit");
1172 push = create_push(env, node, sp, mem, ent);
1173 sp = create_spproj(env, push, 0, node);
1175 // add another push after the first one
1176 push = create_push(env, node, sp, mem, ent);
1177 add_ia32_am_offs_int(push, 4);
1178 sp = create_spproj(env, push, 0, node);
1181 set_irn_n(node, i, new_Bad());
1185 for(i = arity - 1; i >= 0; --i) {
1186 entity *ent = be_get_MemPerm_out_entity(node, i);
1187 ir_type *enttype = get_entity_type(ent);
1188 int entbits = get_type_size_bits(enttype);
1192 assert( (entbits == 32 || entbits == 64) && "spillslot on x86 should be 32 or 64 bit");
1194 pop = create_pop(env, node, sp, ent);
1196 // add another pop after the first one
1197 sp = create_spproj(env, pop, 1, node);
1198 pop = create_pop(env, node, sp, ent);
1199 add_ia32_am_offs_int(pop, 4);
1201 sp = create_spproj(env, pop, 1, node);
1206 // exchange memprojs
1207 foreach_out_edge_safe(node, edge, next) {
1208 ir_node *proj = get_edge_src_irn(edge);
1209 int p = get_Proj_proj(proj);
1213 set_Proj_pred(proj, pops[p]);
1214 set_Proj_proj(proj, 3);
1218 arity = get_irn_arity(node);
1219 for(i = 0; i < arity; ++i) {
1220 set_irn_n(node, i, new_Bad());
1226 * Fix the mode of Spill/Reload
1228 static ir_mode *fix_spill_mode(ia32_code_gen_t *cg, ir_mode *mode)
1230 if (mode_is_float(mode)) {
1242 * Block-Walker: Calls the transform functions Spill and Reload.
1244 static void ia32_after_ra_walker(ir_node *block, void *env) {
1245 ir_node *node, *prev;
1246 ia32_code_gen_t *cg = env;
1247 ia32_transform_env_t tenv;
1250 tenv.irg = current_ir_graph;
1252 DEBUG_ONLY(tenv.mod = cg->mod;)
1254 /* beware: the schedule is changed here */
1255 for (node = sched_last(block); !sched_is_begin(node); node = prev) {
1256 prev = sched_prev(node);
1257 if (be_is_Reload(node)) {
1258 /* we always reload the whole register */
1259 tenv.dbg = get_irn_dbg_info(node);
1261 tenv.mode = fix_spill_mode(cg, get_irn_mode(node));
1262 transform_to_Load(&tenv);
1264 else if (be_is_Spill(node)) {
1265 ir_node *spillval = get_irn_n(node, be_pos_Spill_val);
1266 /* we always spill the whole register */
1267 tenv.dbg = get_irn_dbg_info(node);
1269 tenv.mode = fix_spill_mode(cg, get_irn_mode(spillval));
1270 transform_to_Store(&tenv);
1272 else if(be_is_MemPerm(node)) {
1273 tenv.dbg = get_irn_dbg_info(node);
1275 transform_MemPerm(&tenv);
1281 * We transform Spill and Reload here. This needs to be done before
1282 * stack biasing otherwise we would miss the corrected offset for these nodes.
1284 * If x87 instruction should be emitted, run the x87 simulator and patch
1285 * the virtual instructions. This must obviously be done after register allocation.
1287 static void ia32_after_ra(void *self) {
1288 ia32_code_gen_t *cg = self;
1289 ir_graph *irg = cg->irg;
1291 irg_block_walk_graph(irg, NULL, ia32_after_ra_walker, cg);
1293 ia32_finish_irg(irg, cg);
1297 * Last touchups for the graph before emit
1299 static void ia32_finish(void *self) {
1300 ia32_code_gen_t *cg = self;
1301 ir_graph *irg = cg->irg;
1303 //be_remove_empty_blocks(irg);
1304 cg->blk_sched = be_create_block_schedule(irg, cg->birg->execfreqs);
1306 //cg->blk_sched = sched_create_block_schedule(cg->irg, cg->birg->execfreqs);
1308 /* if we do x87 code generation, rewrite all the virtual instructions and registers */
1309 if (cg->used_fp == fp_x87 || cg->force_sim) {
1310 x87_simulate_graph(cg->arch_env, irg, cg->blk_sched);
1313 ia32_peephole_optimization(irg, cg);
1317 * Emits the code, closes the output file and frees
1318 * the code generator interface.
1320 static void ia32_codegen(void *self) {
1321 ia32_code_gen_t *cg = self;
1322 ir_graph *irg = cg->irg;
1324 ia32_gen_routine(cg->isa->out, irg, cg);
1328 /* remove it from the isa */
1331 /* de-allocate code generator */
1332 del_set(cg->reg_set);
1336 static void *ia32_cg_init(const be_irg_t *birg);
1338 static const arch_code_generator_if_t ia32_code_gen_if = {
1340 NULL, /* before abi introduce hook */
1342 ia32_before_sched, /* before scheduling hook */
1343 ia32_before_ra, /* before register allocation hook */
1344 ia32_after_ra, /* after register allocation hook */
1345 ia32_finish, /* called before codegen */
1346 ia32_codegen /* emit && done */
1350 * Initializes a IA32 code generator.
1352 static void *ia32_cg_init(const be_irg_t *birg) {
1353 ia32_isa_t *isa = (ia32_isa_t *)birg->main_env->arch_env->isa;
1354 ia32_code_gen_t *cg = xcalloc(1, sizeof(*cg));
1356 cg->impl = &ia32_code_gen_if;
1357 cg->irg = birg->irg;
1358 cg->reg_set = new_set(ia32_cmp_irn_reg_assoc, 1024);
1359 cg->arch_env = birg->main_env->arch_env;
1362 cg->blk_sched = NULL;
1363 cg->fp_to_gp = NULL;
1364 cg->gp_to_fp = NULL;
1365 cg->fp_kind = isa->fp_kind;
1366 cg->used_fp = fp_none;
1367 cg->dump = (birg->main_env->options->dump_flags & DUMP_BE) ? 1 : 0;
1369 FIRM_DBG_REGISTER(cg->mod, "firm.be.ia32.cg");
1371 /* copy optimizations from isa for easier access */
1373 cg->arch = isa->arch;
1374 cg->opt_arch = isa->opt_arch;
1380 if (isa->name_obst_size) {
1381 //printf("freed %d bytes from name obst\n", isa->name_obst_size);
1382 isa->name_obst_size = 0;
1383 obstack_free(isa->name_obst, NULL);
1384 obstack_init(isa->name_obst);
1388 cur_reg_set = cg->reg_set;
1390 ia32_irn_ops.cg = cg;
1392 return (arch_code_generator_t *)cg;
1397 /*****************************************************************
1398 * ____ _ _ _____ _____
1399 * | _ \ | | | | |_ _|/ ____| /\
1400 * | |_) | __ _ ___| | _____ _ __ __| | | | | (___ / \
1401 * | _ < / _` |/ __| |/ / _ \ '_ \ / _` | | | \___ \ / /\ \
1402 * | |_) | (_| | (__| < __/ | | | (_| | _| |_ ____) / ____ \
1403 * |____/ \__,_|\___|_|\_\___|_| |_|\__,_| |_____|_____/_/ \_\
1405 *****************************************************************/
1408 * Set output modes for GCC
1410 static const tarval_mode_info mo_integer = {
1417 * set the tarval output mode of all integer modes to decimal
1419 static void set_tarval_output_modes(void)
1423 for (i = get_irp_n_modes() - 1; i >= 0; --i) {
1424 ir_mode *mode = get_irp_mode(i);
1426 if (mode_is_int(mode))
1427 set_tarval_mode_output_option(mode, &mo_integer);
1433 * The template that generates a new ISA object.
1434 * Note that this template can be changed by command line
1437 static ia32_isa_t ia32_isa_template = {
1439 &ia32_isa_if, /* isa interface implementation */
1440 &ia32_gp_regs[REG_ESP], /* stack pointer register */
1441 &ia32_gp_regs[REG_EBP], /* base pointer register */
1442 -1, /* stack direction */
1443 NULL, /* main environment */
1445 NULL, /* 16bit register names */
1446 NULL, /* 8bit register names */
1450 IA32_OPT_INCDEC | /* optimize add 1, sub 1 into inc/dec default: on */
1451 IA32_OPT_DOAM | /* optimize address mode default: on */
1452 IA32_OPT_LEA | /* optimize for LEAs default: on */
1453 IA32_OPT_PLACECNST | /* place constants immediately before instructions, default: on */
1454 IA32_OPT_IMMOPS | /* operations can use immediates, default: on */
1455 IA32_OPT_EXTBB), /* use extended basic block scheduling, default: on */
1456 arch_pentium_4, /* instruction architecture */
1457 arch_pentium_4, /* optimize for architecture */
1458 fp_sse2, /* use sse2 unit */
1459 NULL, /* current code generator */
1460 NULL, /* output file */
1462 NULL, /* name obstack */
1463 0 /* name obst size */
1468 * Initializes the backend ISA.
1470 static void *ia32_init(FILE *file_handle) {
1471 static int inited = 0;
1477 set_tarval_output_modes();
1479 isa = xmalloc(sizeof(*isa));
1480 memcpy(isa, &ia32_isa_template, sizeof(*isa));
1482 ia32_register_init(isa);
1483 ia32_create_opcodes();
1485 if ((ARCH_INTEL(isa->arch) && isa->arch < arch_pentium_4) ||
1486 (ARCH_AMD(isa->arch) && isa->arch < arch_athlon))
1487 /* no SSE2 for these cpu's */
1488 isa->fp_kind = fp_x87;
1490 if (ARCH_INTEL(isa->opt_arch) && isa->opt_arch >= arch_pentium_4) {
1491 /* Pentium 4 don't like inc and dec instructions */
1492 isa->opt &= ~IA32_OPT_INCDEC;
1495 isa->regs_16bit = pmap_create();
1496 isa->regs_8bit = pmap_create();
1497 isa->types = pmap_create();
1498 isa->tv_ent = pmap_create();
1499 isa->out = file_handle;
1501 ia32_build_16bit_reg_map(isa->regs_16bit);
1502 ia32_build_8bit_reg_map(isa->regs_8bit);
1504 /* patch register names of x87 registers */
1506 ia32_st_regs[0].name = "st";
1507 ia32_st_regs[1].name = "st(1)";
1508 ia32_st_regs[2].name = "st(2)";
1509 ia32_st_regs[3].name = "st(3)";
1510 ia32_st_regs[4].name = "st(4)";
1511 ia32_st_regs[5].name = "st(5)";
1512 ia32_st_regs[6].name = "st(6)";
1513 ia32_st_regs[7].name = "st(7)";
1517 isa->name_obst = xmalloc(sizeof(*isa->name_obst));
1518 obstack_init(isa->name_obst);
1519 isa->name_obst_size = 0;
1522 ia32_handle_intrinsics();
1523 ia32_switch_section(isa->out, NO_SECTION);
1524 fprintf(isa->out, "\t.intel_syntax\n");
1526 /* needed for the debug support */
1527 ia32_switch_section(isa->out, SECTION_TEXT);
1528 fprintf(isa->out, ".Ltext0:\n");
1538 * Closes the output file and frees the ISA structure.
1540 static void ia32_done(void *self) {
1541 ia32_isa_t *isa = self;
1543 /* emit now all global declarations */
1544 ia32_gen_decls(isa->out, isa->arch_isa.main_env);
1546 pmap_destroy(isa->regs_16bit);
1547 pmap_destroy(isa->regs_8bit);
1548 pmap_destroy(isa->tv_ent);
1549 pmap_destroy(isa->types);
1552 //printf("name obst size = %d bytes\n", isa->name_obst_size);
1553 obstack_free(isa->name_obst, NULL);
1561 * Return the number of register classes for this architecture.
1562 * We report always these:
1563 * - the general purpose registers
1564 * - the SSE floating point register set
1565 * - the virtual floating point registers
1567 static int ia32_get_n_reg_class(const void *self) {
1572 * Return the register class for index i.
1574 static const arch_register_class_t *ia32_get_reg_class(const void *self, int i) {
1575 assert(i >= 0 && i < 3 && "Invalid ia32 register class requested.");
1577 return &ia32_reg_classes[CLASS_ia32_gp];
1579 return &ia32_reg_classes[CLASS_ia32_xmm];
1581 return &ia32_reg_classes[CLASS_ia32_vfp];
1585 * Get the register class which shall be used to store a value of a given mode.
1586 * @param self The this pointer.
1587 * @param mode The mode in question.
1588 * @return A register class which can hold values of the given mode.
1590 const arch_register_class_t *ia32_get_reg_class_for_mode(const void *self, const ir_mode *mode) {
1591 const ia32_isa_t *isa = self;
1592 if (mode_is_float(mode)) {
1593 return USE_SSE2(isa) ? &ia32_reg_classes[CLASS_ia32_xmm] : &ia32_reg_classes[CLASS_ia32_vfp];
1596 return &ia32_reg_classes[CLASS_ia32_gp];
1600 * Get the ABI restrictions for procedure calls.
1601 * @param self The this pointer.
1602 * @param method_type The type of the method (procedure) in question.
1603 * @param abi The abi object to be modified
1605 static void ia32_get_call_abi(const void *self, ir_type *method_type, be_abi_call_t *abi) {
1606 const ia32_isa_t *isa = self;
1609 unsigned cc = get_method_calling_convention(method_type);
1610 int n = get_method_n_params(method_type);
1613 int i, ignore_1, ignore_2;
1615 const arch_register_t *reg;
1616 be_abi_call_flags_t call_flags = be_abi_call_get_flags(abi);
1618 unsigned use_push = !IS_P6_ARCH(isa->opt_arch);
1620 /* set abi flags for calls */
1621 call_flags.bits.left_to_right = 0; /* always last arg first on stack */
1622 call_flags.bits.store_args_sequential = use_push;
1623 /* call_flags.bits.try_omit_fp not changed: can handle both settings */
1624 call_flags.bits.fp_free = 0; /* the frame pointer is fixed in IA32 */
1625 call_flags.bits.call_has_imm = 1; /* IA32 calls can have immediate address */
1627 /* set stack parameter passing style */
1628 be_abi_call_set_flags(abi, call_flags, &ia32_abi_callbacks);
1630 /* collect the mode for each type */
1631 modes = alloca(n * sizeof(modes[0]));
1633 for (i = 0; i < n; i++) {
1634 tp = get_method_param_type(method_type, i);
1635 modes[i] = get_type_mode(tp);
1638 /* set register parameters */
1639 if (cc & cc_reg_param) {
1640 /* determine the number of parameters passed via registers */
1641 biggest_n = ia32_get_n_regparam_class(n, modes, &ignore_1, &ignore_2);
1643 /* loop over all parameters and set the register requirements */
1644 for (i = 0; i <= biggest_n; i++) {
1645 reg = ia32_get_RegParam_reg(n, modes, i, cc);
1646 assert(reg && "kaputt");
1647 be_abi_call_param_reg(abi, i, reg);
1654 /* set stack parameters */
1655 for (i = stack_idx; i < n; i++) {
1656 /* parameters on the stack are 32 bit aligned */
1657 be_abi_call_param_stack(abi, i, 4, 0, 0);
1661 /* set return registers */
1662 n = get_method_n_ress(method_type);
1664 assert(n <= 2 && "more than two results not supported");
1666 /* In case of 64bit returns, we will have two 32bit values */
1668 tp = get_method_res_type(method_type, 0);
1669 mode = get_type_mode(tp);
1671 assert(!mode_is_float(mode) && "two FP results not supported");
1673 tp = get_method_res_type(method_type, 1);
1674 mode = get_type_mode(tp);
1676 assert(!mode_is_float(mode) && "mixed INT, FP results not supported");
1678 be_abi_call_res_reg(abi, 0, &ia32_gp_regs[REG_EAX]);
1679 be_abi_call_res_reg(abi, 1, &ia32_gp_regs[REG_EDX]);
1682 const arch_register_t *reg;
1684 tp = get_method_res_type(method_type, 0);
1685 assert(is_atomic_type(tp));
1686 mode = get_type_mode(tp);
1688 reg = mode_is_float(mode) ? &ia32_vfp_regs[REG_VF0] : &ia32_gp_regs[REG_EAX];
1690 be_abi_call_res_reg(abi, 0, reg);
1695 static const void *ia32_get_irn_ops(const arch_irn_handler_t *self, const ir_node *irn) {
1696 return &ia32_irn_ops;
1699 const arch_irn_handler_t ia32_irn_handler = {
1703 const arch_irn_handler_t *ia32_get_irn_handler(const void *self) {
1704 return &ia32_irn_handler;
1707 int ia32_to_appear_in_schedule(void *block_env, const ir_node *irn) {
1708 return is_ia32_irn(irn) ? 1 : -1;
1712 * Initializes the code generator interface.
1714 static const arch_code_generator_if_t *ia32_get_code_generator_if(void *self) {
1715 return &ia32_code_gen_if;
1719 * Returns the estimated execution time of an ia32 irn.
1721 static sched_timestep_t ia32_sched_exectime(void *env, const ir_node *irn) {
1722 const arch_env_t *arch_env = env;
1723 return is_ia32_irn(irn) ? ia32_get_op_estimated_cost(arch_get_irn_ops(arch_env, irn), irn) : 1;
1726 list_sched_selector_t ia32_sched_selector;
1729 * Returns the reg_pressure scheduler with to_appear_in_schedule() overloaded
1731 static const list_sched_selector_t *ia32_get_list_sched_selector(const void *self, list_sched_selector_t *selector) {
1732 memcpy(&ia32_sched_selector, selector, sizeof(ia32_sched_selector));
1733 ia32_sched_selector.exectime = ia32_sched_exectime;
1734 ia32_sched_selector.to_appear_in_schedule = ia32_to_appear_in_schedule;
1735 return &ia32_sched_selector;
1739 * Returns the necessary byte alignment for storing a register of given class.
1741 static int ia32_get_reg_class_alignment(const void *self, const arch_register_class_t *cls) {
1742 ir_mode *mode = arch_register_class_mode(cls);
1743 int bytes = get_mode_size_bytes(mode);
1745 if (mode_is_float(mode) && bytes > 8)
1750 static ia32_intrinsic_env_t intrinsic_env = { NULL, NULL };
1753 * Returns the libFirm configuration parameter for this backend.
1755 static const backend_params *ia32_get_libfirm_params(void) {
1756 static const arch_dep_params_t ad = {
1757 1, /* also use subs */
1758 4, /* maximum shifts */
1759 31, /* maximum shift amount */
1761 1, /* allow Mulhs */
1762 1, /* allow Mulus */
1763 32 /* Mulh allowed up to 32 bit */
1765 static backend_params p = {
1766 NULL, /* no additional opcodes */
1767 NULL, /* will be set later */
1768 1, /* need dword lowering */
1769 ia32_create_intrinsic_fkt,
1770 &intrinsic_env, /* context for ia32_create_intrinsic_fkt */
1778 /* instruction set architectures. */
1779 static const lc_opt_enum_int_items_t arch_items[] = {
1780 { "386", arch_i386, },
1781 { "486", arch_i486, },
1782 { "pentium", arch_pentium, },
1783 { "586", arch_pentium, },
1784 { "pentiumpro", arch_pentium_pro, },
1785 { "686", arch_pentium_pro, },
1786 { "pentiummmx", arch_pentium_mmx, },
1787 { "pentium2", arch_pentium_2, },
1788 { "p2", arch_pentium_2, },
1789 { "pentium3", arch_pentium_3, },
1790 { "p3", arch_pentium_3, },
1791 { "pentium4", arch_pentium_4, },
1792 { "p4", arch_pentium_4, },
1793 { "pentiumm", arch_pentium_m, },
1794 { "pm", arch_pentium_m, },
1795 { "core", arch_core, },
1797 { "athlon", arch_athlon, },
1798 { "athlon64", arch_athlon_64, },
1799 { "opteron", arch_opteron, },
1803 static lc_opt_enum_int_var_t arch_var = {
1804 &ia32_isa_template.arch, arch_items
1807 static lc_opt_enum_int_var_t opt_arch_var = {
1808 &ia32_isa_template.opt_arch, arch_items
1811 static const lc_opt_enum_int_items_t fp_unit_items[] = {
1813 { "sse2", fp_sse2 },
1817 static lc_opt_enum_int_var_t fp_unit_var = {
1818 &ia32_isa_template.fp_kind, fp_unit_items
1821 static const lc_opt_enum_int_items_t gas_items[] = {
1822 { "linux", ASM_LINUX_GAS },
1823 { "mingw", ASM_MINGW_GAS },
1827 static lc_opt_enum_int_var_t gas_var = {
1828 (int *)&asm_flavour, gas_items
1831 static const lc_opt_table_entry_t ia32_options[] = {
1832 LC_OPT_ENT_ENUM_INT("arch", "select the instruction architecture", &arch_var),
1833 LC_OPT_ENT_ENUM_INT("opt", "optimize for instruction architecture", &opt_arch_var),
1834 LC_OPT_ENT_ENUM_INT("fpunit", "select the floating point unit", &fp_unit_var),
1835 LC_OPT_ENT_NEGBIT("noaddrmode", "do not use address mode", &ia32_isa_template.opt, IA32_OPT_DOAM),
1836 LC_OPT_ENT_NEGBIT("nolea", "do not optimize for LEAs", &ia32_isa_template.opt, IA32_OPT_LEA),
1837 LC_OPT_ENT_NEGBIT("noplacecnst", "do not place constants", &ia32_isa_template.opt, IA32_OPT_PLACECNST),
1838 LC_OPT_ENT_NEGBIT("noimmop", "no operations with immediates", &ia32_isa_template.opt, IA32_OPT_IMMOPS),
1839 LC_OPT_ENT_NEGBIT("noextbb", "do not use extended basic block scheduling", &ia32_isa_template.opt, IA32_OPT_EXTBB),
1840 LC_OPT_ENT_ENUM_INT("gasmode", "set the GAS compatibility mode", &gas_var),
1845 * Register command line options for the ia32 backend.
1849 * ia32-arch=arch create instruction for arch
1850 * ia32-opt=arch optimize for run on arch
1851 * ia32-fpunit=unit select floating point unit (x87 or SSE2)
1852 * ia32-incdec optimize for inc/dec
1853 * ia32-noaddrmode do not use address mode
1854 * ia32-nolea do not optimize for LEAs
1855 * ia32-noplacecnst do not place constants,
1856 * ia32-noimmop no operations with immediates
1857 * ia32-noextbb do not use extended basic block scheduling
1858 * ia32-gasmode set the GAS compatibility mode
1860 static void ia32_register_options(lc_opt_entry_t *ent)
1862 lc_opt_entry_t *be_grp_ia32 = lc_opt_get_grp(ent, "ia32");
1863 lc_opt_add_table(be_grp_ia32, ia32_options);
1865 #endif /* WITH_LIBCORE */
1867 const arch_isa_if_t ia32_isa_if = {
1870 ia32_get_n_reg_class,
1872 ia32_get_reg_class_for_mode,
1874 ia32_get_irn_handler,
1875 ia32_get_code_generator_if,
1876 ia32_get_list_sched_selector,
1877 ia32_get_reg_class_alignment,
1878 ia32_get_libfirm_params,
1880 ia32_register_options