2 * This is the main ia32 firm backend driver.
3 * @author Christian Wuerdig
20 #include <libcore/lc_opts.h>
21 #include <libcore/lc_opts_enum.h>
22 #endif /* WITH_LIBCORE */
26 #include "pseudo_irg.h"
30 #include "iredges_t.h"
38 #include "../beabi.h" /* the general register allocator interface */
39 #include "../benode_t.h"
40 #include "../belower.h"
41 #include "../besched_t.h"
44 #include "../beirgmod.h"
45 #include "../be_dbgout.h"
46 #include "bearch_ia32_t.h"
48 #include "ia32_new_nodes.h" /* ia32 nodes interface */
49 #include "gen_ia32_regalloc_if.h" /* the generated interface (register type and class defenitions) */
50 #include "ia32_gen_decls.h" /* interface declaration emitter */
51 #include "ia32_transform.h"
52 #include "ia32_emitter.h"
53 #include "ia32_map_regs.h"
54 #include "ia32_optimize.h"
56 #include "ia32_dbg_stat.h"
57 #include "ia32_finish.h"
58 #include "ia32_util.h"
60 #define DEBUG_MODULE "firm.be.ia32.isa"
63 static set *cur_reg_set = NULL;
65 /* Creates the unique per irg GP NoReg node. */
66 ir_node *ia32_new_NoReg_gp(ia32_code_gen_t *cg) {
67 return be_abi_get_callee_save_irn(cg->birg->abi, &ia32_gp_regs[REG_GP_NOREG]);
70 /* Creates the unique per irg FP NoReg node. */
71 ir_node *ia32_new_NoReg_fp(ia32_code_gen_t *cg) {
72 return be_abi_get_callee_save_irn(cg->birg->abi,
73 USE_SSE2(cg) ? &ia32_xmm_regs[REG_XMM_NOREG] : &ia32_vfp_regs[REG_VFP_NOREG]);
77 * Returns gp_noreg or fp_noreg, depending in input requirements.
79 ir_node *ia32_get_admissible_noreg(ia32_code_gen_t *cg, ir_node *irn, int pos) {
80 arch_register_req_t req;
81 const arch_register_req_t *p_req;
83 p_req = arch_get_register_req(cg->arch_env, &req, irn, pos);
84 assert(p_req && "Missing register requirements");
85 if (p_req->cls == &ia32_reg_classes[CLASS_ia32_gp])
86 return ia32_new_NoReg_gp(cg);
88 return ia32_new_NoReg_fp(cg);
91 /**************************************************
94 * _ __ ___ __ _ __ _| | | ___ ___ _| |_
95 * | '__/ _ \/ _` | / _` | | |/ _ \ / __| | | _|
96 * | | | __/ (_| | | (_| | | | (_) | (__ | | |
97 * |_| \___|\__, | \__,_|_|_|\___/ \___| |_|_|
100 **************************************************/
103 * Return register requirements for an ia32 node.
104 * If the node returns a tuple (mode_T) then the proj's
105 * will be asked for this information.
107 static const arch_register_req_t *ia32_get_irn_reg_req(const void *self, arch_register_req_t *req, const ir_node *irn, int pos) {
108 const ia32_irn_ops_t *ops = self;
109 const ia32_register_req_t *irn_req;
110 long node_pos = pos == -1 ? 0 : pos;
111 ir_mode *mode = is_Block(irn) ? NULL : get_irn_mode(irn);
112 FIRM_DBG_REGISTER(firm_dbg_module_t *mod, DEBUG_MODULE);
114 if (is_Block(irn) || mode == mode_M || mode == mode_X) {
115 DBG((mod, LEVEL_1, "ignoring Block, mode_M, mode_X node %+F\n", irn));
119 if (mode == mode_T && pos < 0) {
120 DBG((mod, LEVEL_1, "ignoring request OUT requirements for node %+F\n", irn));
124 DBG((mod, LEVEL_1, "get requirements at pos %d for %+F ... ", pos, irn));
128 DBG((mod, LEVEL_1, "ignoring request IN requirements for node %+F\n", irn));
133 node_pos = ia32_translate_proj_pos(irn);
139 irn = skip_Proj(irn);
141 DB((mod, LEVEL_1, "skipping Proj, going to %+F at pos %d ... ", irn, node_pos));
144 if (is_ia32_irn(irn)) {
146 irn_req = get_ia32_in_req(irn, pos);
149 irn_req = get_ia32_out_req(irn, node_pos);
152 DB((mod, LEVEL_1, "returning reqs for %+F at pos %d\n", irn, pos));
154 memcpy(req, &(irn_req->req), sizeof(*req));
156 if (arch_register_req_is(&(irn_req->req), should_be_same)) {
157 assert(irn_req->same_pos >= 0 && "should be same constraint for in -> out NYI");
158 req->other_same = get_irn_n(irn, irn_req->same_pos);
161 if (arch_register_req_is(&(irn_req->req), should_be_different)) {
162 assert(irn_req->different_pos >= 0 && "should be different constraint for in -> out NYI");
163 req->other_different = get_irn_n(irn, irn_req->different_pos);
167 /* treat Unknowns like Const with default requirements */
168 if (is_Unknown(irn)) {
169 DB((mod, LEVEL_1, "returning UKNWN reqs for %+F\n", irn));
170 if (mode_is_float(mode)) {
171 if (USE_SSE2(ops->cg))
172 memcpy(req, &(ia32_default_req_ia32_xmm_xmm_UKNWN), sizeof(*req));
174 memcpy(req, &(ia32_default_req_ia32_vfp_vfp_UKNWN), sizeof(*req));
176 else if (mode_is_int(mode) || mode_is_reference(mode))
177 memcpy(req, &(ia32_default_req_ia32_gp_gp_UKNWN), sizeof(*req));
178 else if (mode == mode_T || mode == mode_M) {
179 DBG((mod, LEVEL_1, "ignoring Unknown node %+F\n", irn));
183 assert(0 && "unsupported Unknown-Mode");
186 DB((mod, LEVEL_1, "returning NULL for %+F (not ia32)\n", irn));
194 static void ia32_set_irn_reg(const void *self, ir_node *irn, const arch_register_t *reg) {
196 const ia32_irn_ops_t *ops = self;
198 if (get_irn_mode(irn) == mode_X) {
202 DBG((ops->cg->mod, LEVEL_1, "ia32 assigned register %s to node %+F\n", reg->name, irn));
205 pos = ia32_translate_proj_pos(irn);
206 irn = skip_Proj(irn);
209 if (is_ia32_irn(irn)) {
210 const arch_register_t **slots;
212 slots = get_ia32_slots(irn);
216 ia32_set_firm_reg(irn, reg, cur_reg_set);
220 static const arch_register_t *ia32_get_irn_reg(const void *self, const ir_node *irn) {
222 const arch_register_t *reg = NULL;
226 if (get_irn_mode(irn) == mode_X) {
230 pos = ia32_translate_proj_pos(irn);
231 irn = skip_Proj(irn);
234 if (is_ia32_irn(irn)) {
235 const arch_register_t **slots;
236 slots = get_ia32_slots(irn);
240 reg = ia32_get_firm_reg(irn, cur_reg_set);
246 static arch_irn_class_t ia32_classify(const void *self, const ir_node *irn) {
247 arch_irn_class_t classification = arch_irn_class_normal;
249 irn = skip_Proj(irn);
252 classification |= arch_irn_class_branch;
254 if (! is_ia32_irn(irn))
255 return classification & ~arch_irn_class_normal;
257 if (is_ia32_Cnst(irn))
258 classification |= arch_irn_class_const;
261 classification |= arch_irn_class_load;
263 if (is_ia32_St(irn) || is_ia32_Store8Bit(irn))
264 classification |= arch_irn_class_store;
266 if (is_ia32_got_reload(irn))
267 classification |= arch_irn_class_reload;
269 return classification;
272 static arch_irn_flags_t ia32_get_flags(const void *self, const ir_node *irn) {
275 ir_node *pred = get_Proj_pred(irn);
276 int ia32_op = get_ia32_irn_opcode(pred);
277 long proj = get_Proj_proj(irn);
278 if (iro_ia32_Push == ia32_op && proj == pn_ia32_Push_stack) {
279 /* Push modifies always ESP, this cannot be changed */
280 return arch_irn_flags_modify_sp | arch_irn_flags_ignore;
282 if (iro_ia32_Pop == ia32_op && proj == pn_ia32_Pop_stack) {
283 /* Pop modifies always ESP, this cannot be changed */
284 return arch_irn_flags_modify_sp | arch_irn_flags_ignore;
286 if (iro_ia32_AddSP == ia32_op && proj == pn_ia32_AddSP_stack) {
287 /* AddSP modifies always ESP, this cannot be changed */
288 return arch_irn_flags_modify_sp | arch_irn_flags_ignore;
290 if (iro_ia32_SubSP == ia32_op && proj == pn_ia32_SubSP_stack) {
291 /* SubSP modifies always ESP, this cannot be changed */
292 return arch_irn_flags_modify_sp | arch_irn_flags_ignore;
296 irn = skip_Proj(irn);
297 if (is_ia32_irn(irn))
298 return get_ia32_flags(irn);
301 return arch_irn_flags_ignore;
307 * The IA32 ABI callback object.
310 be_abi_call_flags_bits_t flags; /**< The call flags. */
311 const arch_isa_t *isa; /**< The ISA handle. */
312 const arch_env_t *aenv; /**< The architecture environment. */
313 ir_graph *irg; /**< The associated graph. */
316 static entity *ia32_get_frame_entity(const void *self, const ir_node *irn) {
317 return is_ia32_irn(irn) ? get_ia32_frame_ent(irn) : NULL;
320 static void ia32_set_frame_entity(const void *self, ir_node *irn, entity *ent) {
321 set_ia32_frame_ent(irn, ent);
324 static void ia32_set_frame_offset(const void *self, ir_node *irn, int bias) {
326 const ia32_irn_ops_t *ops = self;
328 if (get_ia32_frame_ent(irn)) {
329 ia32_am_flavour_t am_flav = get_ia32_am_flavour(irn);
331 if(is_ia32_Pop(irn)) {
332 int omit_fp = be_abi_omit_fp(ops->cg->birg->abi);
334 /* Pop nodes modify the stack pointer before calculating the destination
335 * address, so fix this here
341 DBG((ops->cg->mod, LEVEL_1, "stack biased %+F with %d\n", irn, bias));
343 snprintf(buf, sizeof(buf), "%d", bias);
345 if (get_ia32_op_type(irn) == ia32_Normal) {
346 set_ia32_cnst(irn, buf);
348 add_ia32_am_offs(irn, buf);
350 set_ia32_am_flavour(irn, am_flav);
355 static int ia32_get_sp_bias(const void *self, const ir_node *irn) {
357 long proj = get_Proj_proj(irn);
358 ir_node *pred = get_Proj_pred(irn);
360 if (proj == pn_ia32_Push_stack && is_ia32_Push(pred))
362 if (proj == pn_ia32_Pop_stack && is_ia32_Pop(pred))
370 * Put all registers which are saved by the prologue/epilogue in a set.
372 * @param self The callback object.
373 * @param s The result set.
375 static void ia32_abi_dont_save_regs(void *self, pset *s)
377 ia32_abi_env_t *env = self;
378 if(env->flags.try_omit_fp)
379 pset_insert_ptr(s, env->isa->bp);
383 * Generate the routine prologue.
385 * @param self The callback object.
386 * @param mem A pointer to the mem node. Update this if you define new memory.
387 * @param reg_map A map mapping all callee_save/ignore/parameter registers to their defining nodes.
389 * @return The register which shall be used as a stack frame base.
391 * All nodes which define registers in @p reg_map must keep @p reg_map current.
393 static const arch_register_t *ia32_abi_prologue(void *self, ir_node **mem, pmap *reg_map)
395 ia32_abi_env_t *env = self;
397 if (! env->flags.try_omit_fp) {
398 ir_node *bl = get_irg_start_block(env->irg);
399 ir_node *curr_sp = be_abi_reg_map_get(reg_map, env->isa->sp);
400 ir_node *curr_bp = be_abi_reg_map_get(reg_map, env->isa->bp);
401 ir_node *noreg = be_abi_reg_map_get(reg_map, &ia32_gp_regs[REG_GP_NOREG]);
405 push = new_rd_ia32_Push(NULL, env->irg, bl, noreg, noreg, curr_bp, curr_sp, *mem);
406 curr_sp = new_r_Proj(env->irg, bl, push, get_irn_mode(curr_sp), pn_ia32_Push_stack);
407 *mem = new_r_Proj(env->irg, bl, push, mode_M, pn_ia32_Push_M);
409 /* the push must have SP out register */
410 arch_set_irn_register(env->aenv, curr_sp, env->isa->sp);
411 set_ia32_flags(push, arch_irn_flags_ignore);
413 /* move esp to ebp */
414 curr_bp = be_new_Copy(env->isa->bp->reg_class, env->irg, bl, curr_sp);
415 be_set_constr_single_reg(curr_bp, BE_OUT_POS(0), env->isa->bp);
416 arch_set_irn_register(env->aenv, curr_bp, env->isa->bp);
417 be_node_set_flags(curr_bp, BE_OUT_POS(0), arch_irn_flags_ignore);
419 /* beware: the copy must be done before any other sp use */
420 curr_sp = be_new_CopyKeep_single(env->isa->sp->reg_class, env->irg, bl, curr_sp, curr_bp, get_irn_mode(curr_sp));
421 be_set_constr_single_reg(curr_sp, BE_OUT_POS(0), env->isa->sp);
422 arch_set_irn_register(env->aenv, curr_sp, env->isa->sp);
423 be_node_set_flags(curr_sp, BE_OUT_POS(0), arch_irn_flags_ignore);
425 be_abi_reg_map_set(reg_map, env->isa->sp, curr_sp);
426 be_abi_reg_map_set(reg_map, env->isa->bp, curr_bp);
435 * Generate the routine epilogue.
436 * @param self The callback object.
437 * @param bl The block for the epilog
438 * @param mem A pointer to the mem node. Update this if you define new memory.
439 * @param reg_map A map mapping all callee_save/ignore/parameter registers to their defining nodes.
440 * @return The register which shall be used as a stack frame base.
442 * All nodes which define registers in @p reg_map must keep @p reg_map current.
444 static void ia32_abi_epilogue(void *self, ir_node *bl, ir_node **mem, pmap *reg_map)
446 ia32_abi_env_t *env = self;
447 ir_node *curr_sp = be_abi_reg_map_get(reg_map, env->isa->sp);
448 ir_node *curr_bp = be_abi_reg_map_get(reg_map, env->isa->bp);
450 if (env->flags.try_omit_fp) {
451 /* simply remove the stack frame here */
452 curr_sp = be_new_IncSP(env->isa->sp, env->irg, bl, curr_sp, BE_STACK_FRAME_SIZE_SHRINK);
453 add_irn_dep(curr_sp, *mem);
456 const ia32_isa_t *isa = (ia32_isa_t *)env->isa;
457 ir_mode *mode_bp = env->isa->bp->reg_class->mode;
459 /* gcc always emits a leave at the end of a routine */
460 if (1 || ARCH_AMD(isa->opt_arch)) {
464 leave = new_rd_ia32_Leave(NULL, env->irg, bl, curr_sp, curr_bp);
465 set_ia32_flags(leave, arch_irn_flags_ignore);
466 curr_bp = new_r_Proj(current_ir_graph, bl, leave, mode_bp, pn_ia32_Leave_frame);
467 curr_sp = new_r_Proj(current_ir_graph, bl, leave, get_irn_mode(curr_sp), pn_ia32_Leave_stack);
468 *mem = new_r_Proj(current_ir_graph, bl, leave, mode_M, pn_ia32_Leave_M);
471 ir_node *noreg = be_abi_reg_map_get(reg_map, &ia32_gp_regs[REG_GP_NOREG]);
474 /* copy ebp to esp */
475 curr_sp = be_new_SetSP(env->isa->sp, env->irg, bl, curr_sp, curr_bp, *mem);
478 pop = new_rd_ia32_Pop(NULL, env->irg, bl, noreg, noreg, curr_sp, *mem);
479 set_ia32_flags(pop, arch_irn_flags_ignore);
480 curr_bp = new_r_Proj(current_ir_graph, bl, pop, mode_bp, pn_ia32_Pop_res);
481 curr_sp = new_r_Proj(current_ir_graph, bl, pop, get_irn_mode(curr_sp), pn_ia32_Pop_stack);
482 *mem = new_r_Proj(current_ir_graph, bl, pop, mode_M, pn_ia32_Pop_M);
484 arch_set_irn_register(env->aenv, curr_sp, env->isa->sp);
485 arch_set_irn_register(env->aenv, curr_bp, env->isa->bp);
488 be_abi_reg_map_set(reg_map, env->isa->sp, curr_sp);
489 be_abi_reg_map_set(reg_map, env->isa->bp, curr_bp);
493 * Initialize the callback object.
494 * @param call The call object.
495 * @param aenv The architecture environment.
496 * @param irg The graph with the method.
497 * @return Some pointer. This pointer is passed to all other callback functions as self object.
499 static void *ia32_abi_init(const be_abi_call_t *call, const arch_env_t *aenv, ir_graph *irg)
501 ia32_abi_env_t *env = xmalloc(sizeof(env[0]));
502 be_abi_call_flags_t fl = be_abi_call_get_flags(call);
503 env->flags = fl.bits;
506 env->isa = aenv->isa;
511 * Destroy the callback object.
512 * @param self The callback object.
514 static void ia32_abi_done(void *self) {
519 * Produces the type which sits between the stack args and the locals on the stack.
520 * it will contain the return address and space to store the old base pointer.
521 * @return The Firm type modeling the ABI between type.
523 static ir_type *ia32_abi_get_between_type(void *self)
525 #define IDENT(s) new_id_from_chars(s, sizeof(s)-1)
526 static ir_type *omit_fp_between_type = NULL;
527 static ir_type *between_type = NULL;
529 ia32_abi_env_t *env = self;
531 if ( !between_type) {
533 entity *ret_addr_ent;
534 entity *omit_fp_ret_addr_ent;
536 ir_type *old_bp_type = new_type_primitive(IDENT("bp"), mode_P);
537 ir_type *ret_addr_type = new_type_primitive(IDENT("return_addr"), mode_P);
539 between_type = new_type_struct(IDENT("ia32_between_type"));
540 old_bp_ent = new_entity(between_type, IDENT("old_bp"), old_bp_type);
541 ret_addr_ent = new_entity(between_type, IDENT("ret_addr"), ret_addr_type);
543 set_entity_offset_bytes(old_bp_ent, 0);
544 set_entity_offset_bytes(ret_addr_ent, get_type_size_bytes(old_bp_type));
545 set_type_size_bytes(between_type, get_type_size_bytes(old_bp_type) + get_type_size_bytes(ret_addr_type));
546 set_type_state(between_type, layout_fixed);
548 omit_fp_between_type = new_type_struct(IDENT("ia32_between_type_omit_fp"));
549 omit_fp_ret_addr_ent = new_entity(omit_fp_between_type, IDENT("ret_addr"), ret_addr_type);
551 set_entity_offset_bytes(omit_fp_ret_addr_ent, 0);
552 set_type_size_bytes(omit_fp_between_type, get_type_size_bytes(ret_addr_type));
553 set_type_state(omit_fp_between_type, layout_fixed);
556 return env->flags.try_omit_fp ? omit_fp_between_type : between_type;
561 * Get the estimated cycle count for @p irn.
563 * @param self The this pointer.
564 * @param irn The node.
566 * @return The estimated cycle count for this operation
568 static int ia32_get_op_estimated_cost(const void *self, const ir_node *irn)
571 ia32_op_type_t op_tp;
572 const ia32_irn_ops_t *ops = self;
577 assert(is_ia32_irn(irn));
579 cost = get_ia32_latency(irn);
580 op_tp = get_ia32_op_type(irn);
582 if (is_ia32_CopyB(irn)) {
584 if (ARCH_INTEL(ops->cg->arch))
587 else if (is_ia32_CopyB_i(irn)) {
588 int size = get_tarval_long(get_ia32_Immop_tarval(irn));
589 cost = 20 + (int)ceil((4/3) * size);
590 if (ARCH_INTEL(ops->cg->arch))
593 /* in case of address mode operations add additional cycles */
594 else if (op_tp == ia32_AddrModeD || op_tp == ia32_AddrModeS) {
596 In case of stack access add 5 cycles (we assume stack is in cache),
597 other memory operations cost 20 cycles.
599 cost += is_ia32_use_frame(irn) ? 5 : 20;
606 * Returns the inverse operation if @p irn, recalculating the argument at position @p i.
608 * @param irn The original operation
609 * @param i Index of the argument we want the inverse operation to yield
610 * @param inverse struct to be filled with the resulting inverse op
611 * @param obstack The obstack to use for allocation of the returned nodes array
612 * @return The inverse operation or NULL if operation invertible
614 static arch_inverse_t *ia32_get_inverse(const void *self, const ir_node *irn, int i, arch_inverse_t *inverse, struct obstack *obst) {
617 ir_node *block, *noreg, *nomem;
620 /* we cannot invert non-ia32 irns */
621 if (! is_ia32_irn(irn))
624 /* operand must always be a real operand (not base, index or mem) */
625 if (i != 2 && i != 3)
628 /* we don't invert address mode operations */
629 if (get_ia32_op_type(irn) != ia32_Normal)
632 irg = get_irn_irg(irn);
633 block = get_nodes_block(irn);
634 mode = get_ia32_res_mode(irn);
635 noreg = get_irn_n(irn, 0);
636 nomem = new_r_NoMem(irg);
638 /* initialize structure */
639 inverse->nodes = obstack_alloc(obst, 2 * sizeof(inverse->nodes[0]));
643 switch (get_ia32_irn_opcode(irn)) {
645 if (get_ia32_immop_type(irn) == ia32_ImmConst) {
646 /* we have an add with a const here */
647 /* invers == add with negated const */
648 inverse->nodes[0] = new_rd_ia32_Add(NULL, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem);
649 pnc = pn_ia32_Add_res;
651 copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
652 set_ia32_Immop_tarval(inverse->nodes[0], tarval_neg(get_ia32_Immop_tarval(irn)));
653 set_ia32_commutative(inverse->nodes[0]);
655 else if (get_ia32_immop_type(irn) == ia32_ImmSymConst) {
656 /* we have an add with a symconst here */
657 /* invers == sub with const */
658 inverse->nodes[0] = new_rd_ia32_Sub(NULL, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem);
659 pnc = pn_ia32_Sub_res;
661 copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
664 /* normal add: inverse == sub */
665 ir_node *proj = ia32_get_res_proj(irn);
668 inverse->nodes[0] = new_rd_ia32_Sub(NULL, irg, block, noreg, noreg, proj, get_irn_n(irn, i ^ 1), nomem);
669 pnc = pn_ia32_Sub_res;
674 if (get_ia32_immop_type(irn) != ia32_ImmNone) {
675 /* we have a sub with a const/symconst here */
676 /* invers == add with this const */
677 inverse->nodes[0] = new_rd_ia32_Add(NULL, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem);
678 pnc = pn_ia32_Add_res;
679 inverse->costs += (get_ia32_immop_type(irn) == ia32_ImmSymConst) ? 5 : 1;
680 copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
684 ir_node *proj = ia32_get_res_proj(irn);
688 inverse->nodes[0] = new_rd_ia32_Add(NULL, irg, block, noreg, noreg, proj, get_irn_n(irn, 3), nomem);
691 inverse->nodes[0] = new_rd_ia32_Sub(NULL, irg, block, noreg, noreg, get_irn_n(irn, 2), proj, nomem);
693 pnc = pn_ia32_Sub_res;
698 if (get_ia32_immop_type(irn) != ia32_ImmNone) {
699 /* xor with const: inverse = xor */
700 inverse->nodes[0] = new_rd_ia32_Eor(NULL, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem);
701 pnc = pn_ia32_Eor_res;
702 inverse->costs += (get_ia32_immop_type(irn) == ia32_ImmSymConst) ? 5 : 1;
703 copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
707 inverse->nodes[0] = new_rd_ia32_Eor(NULL, irg, block, noreg, noreg, (ir_node *)irn, get_irn_n(irn, i), nomem);
708 pnc = pn_ia32_Eor_res;
713 ir_node *proj = ia32_get_res_proj(irn);
716 inverse->nodes[0] = new_rd_ia32_Not(NULL, irg, block, noreg, noreg, proj, nomem);
717 pnc = pn_ia32_Not_res;
721 case iro_ia32_Minus: {
722 ir_node *proj = ia32_get_res_proj(irn);
725 inverse->nodes[0] = new_rd_ia32_Minus(NULL, irg, block, noreg, noreg, proj, nomem);
726 pnc = pn_ia32_Minus_res;
731 /* inverse operation not supported */
735 set_ia32_res_mode(inverse->nodes[0], mode);
736 inverse->nodes[1] = new_r_Proj(irg, block, inverse->nodes[0], mode, pnc);
742 * Check if irn can load it's operand at position i from memory (source addressmode).
743 * @param self Pointer to irn ops itself
744 * @param irn The irn to be checked
745 * @param i The operands position
746 * @return Non-Zero if operand can be loaded
748 static int ia32_possible_memory_operand(const void *self, const ir_node *irn, unsigned int i) {
749 if (! is_ia32_irn(irn) || /* must be an ia32 irn */
750 get_irn_arity(irn) != 5 || /* must be a binary operation */
751 get_ia32_op_type(irn) != ia32_Normal || /* must not already be a addressmode irn */
752 ! (get_ia32_am_support(irn) & ia32_am_Source) || /* must be capable of source addressmode */
753 (i != 2 && i != 3) || /* a "real" operand position must be requested */
754 (i == 2 && ! is_ia32_commutative(irn)) || /* if first operand requested irn must be commutative */
755 is_ia32_use_frame(irn)) /* must not already use frame */
761 static void ia32_perform_memory_operand(const void *self, ir_node *irn, ir_node *spill, unsigned int i) {
762 const ia32_irn_ops_t *ops = self;
763 ia32_code_gen_t *cg = ops->cg;
765 assert(ia32_possible_memory_operand(self, irn, i) && "Cannot perform memory operand change");
768 ir_node *tmp = get_irn_n(irn, 3);
769 set_irn_n(irn, 3, get_irn_n(irn, 2));
770 set_irn_n(irn, 2, tmp);
773 set_ia32_am_support(irn, ia32_am_Source);
774 set_ia32_op_type(irn, ia32_AddrModeS);
775 set_ia32_am_flavour(irn, ia32_B);
776 set_ia32_ls_mode(irn, get_irn_mode(get_irn_n(irn, i)));
777 set_ia32_use_frame(irn);
778 set_ia32_got_reload(irn);
780 set_irn_n(irn, 0, get_irg_frame(get_irn_irg(irn)));
781 set_irn_n(irn, 4, spill);
784 Input at position one is index register, which is NoReg.
785 We would need cg object to get a real noreg, but we cannot
788 set_irn_n(irn, 3, ia32_get_admissible_noreg(cg, irn, 3));
790 //FIXME DBG_OPT_AM_S(reload, irn);
793 static const be_abi_callbacks_t ia32_abi_callbacks = {
796 ia32_abi_get_between_type,
797 ia32_abi_dont_save_regs,
802 /* fill register allocator interface */
804 static const arch_irn_ops_if_t ia32_irn_ops_if = {
805 ia32_get_irn_reg_req,
810 ia32_get_frame_entity,
811 ia32_set_frame_entity,
812 ia32_set_frame_offset,
815 ia32_get_op_estimated_cost,
816 ia32_possible_memory_operand,
817 ia32_perform_memory_operand,
820 ia32_irn_ops_t ia32_irn_ops = {
827 /**************************************************
830 * ___ ___ __| | ___ __ _ ___ _ __ _| |_
831 * / __/ _ \ / _` |/ _ \/ _` |/ _ \ '_ \ | | _|
832 * | (_| (_) | (_| | __/ (_| | __/ | | | | | |
833 * \___\___/ \__,_|\___|\__, |\___|_| |_| |_|_|
836 **************************************************/
838 static void ia32_kill_convs(ia32_code_gen_t *cg) {
841 /* BEWARE: the Projs are inserted in the set */
842 foreach_nodeset(cg->kill_conv, irn) {
843 ir_node *in = get_irn_n(get_Proj_pred(irn), 2);
844 edges_reroute(irn, in, cg->birg->irg);
849 * Transform the Thread Local Store base.
851 static void transform_tls(ir_graph *irg) {
852 ir_node *irn = get_irg_tls(irg);
855 dbg_info *dbg = get_irn_dbg_info(irn);
856 ir_node *blk = get_nodes_block(irn);
858 newn = new_rd_ia32_LdTls(dbg, irg, blk, get_irn_mode(irn));
865 * Transforms the standard firm graph into
868 static void ia32_prepare_graph(void *self) {
869 ia32_code_gen_t *cg = self;
870 dom_front_info_t *dom;
871 DEBUG_ONLY(firm_dbg_module_t *old_mod = cg->mod;)
873 FIRM_DBG_REGISTER(cg->mod, "firm.be.ia32.transform");
875 /* 1st: transform constants and psi condition trees */
876 ia32_pre_transform_phase(cg);
878 /* 2nd: transform all remaining nodes */
879 ia32_register_transformers();
880 dom = be_compute_dominance_frontiers(cg->irg);
882 cg->kill_conv = new_nodeset(5);
883 transform_tls(cg->irg);
884 irg_walk_blkwise_graph(cg->irg, NULL, ia32_transform_node, cg);
886 del_nodeset(cg->kill_conv);
888 be_free_dominance_frontiers(dom);
891 be_dump(cg->irg, "-transformed", dump_ir_block_graph_sched);
893 /* 3rd: optimize address mode */
894 FIRM_DBG_REGISTER(cg->mod, "firm.be.ia32.am");
895 ia32_optimize_addressmode(cg);
898 be_dump(cg->irg, "-am", dump_ir_block_graph_sched);
900 DEBUG_ONLY(cg->mod = old_mod;)
904 * Dummy functions for hooks we don't need but which must be filled.
906 static void ia32_before_sched(void *self) {
909 static void remove_unused_nodes(ir_node *irn, bitset_t *already_visited) {
912 ir_node *mem_proj = NULL;
917 mode = get_irn_mode(irn);
919 /* check if we already saw this node or the node has more than one user */
920 if (bitset_contains_irn(already_visited, irn) || get_irn_n_edges(irn) > 1) {
924 /* mark irn visited */
925 bitset_add_irn(already_visited, irn);
927 /* non-Tuple nodes with one user: ok, return */
928 if (get_irn_n_edges(irn) >= 1 && mode != mode_T) {
932 /* tuple node has one user which is not the mem proj-> ok */
933 if (mode == mode_T && get_irn_n_edges(irn) == 1) {
934 mem_proj = ia32_get_proj_for_mode(irn, mode_M);
935 if (mem_proj == NULL) {
940 arity = get_irn_arity(irn);
941 for (i = 0; i < arity; ++i) {
942 ir_node *pred = get_irn_n(irn, i);
944 /* do not follow memory edges or we will accidentally remove stores */
945 if (get_irn_mode(pred) == mode_M) {
946 if(mem_proj != NULL) {
947 edges_reroute(mem_proj, pred, get_irn_irg(mem_proj));
953 set_irn_n(irn, i, new_Bad());
956 The current node is about to be removed: if the predecessor
957 has only this node as user, it need to be removed as well.
959 if (get_irn_n_edges(pred) <= 1)
960 remove_unused_nodes(pred, already_visited);
963 // we need to set the presd to Bad again to also get the memory edges
964 arity = get_irn_arity(irn);
965 for (i = 0; i < arity; ++i) {
966 set_irn_n(irn, i, new_Bad());
969 if (sched_is_scheduled(irn)) {
974 static void remove_unused_loads_walker(ir_node *irn, void *env) {
975 bitset_t *already_visited = env;
976 if (is_ia32_Ld(irn) && ! bitset_contains_irn(already_visited, irn))
977 remove_unused_nodes(irn, env);
981 * Called before the register allocator.
982 * Calculate a block schedule here. We need it for the x87
983 * simulator and the emitter.
985 static void ia32_before_ra(void *self) {
986 ia32_code_gen_t *cg = self;
987 bitset_t *already_visited = bitset_irg_alloca(cg->irg);
991 There are sometimes unused loads, only pinned by memory.
992 We need to remove those Loads and all other nodes which won't be used
993 after removing the Load from schedule.
995 irg_walk_graph(cg->irg, NULL, remove_unused_loads_walker, already_visited);
1000 * Transforms a be node into a Load.
1002 static void transform_to_Load(ia32_transform_env_t *env) {
1003 ir_node *irn = env->irn;
1004 entity *ent = be_get_frame_entity(irn);
1005 ir_mode *mode = env->mode;
1006 ir_node *noreg = ia32_new_NoReg_gp(env->cg);
1007 ir_node *nomem = new_rd_NoMem(env->irg);
1008 ir_node *sched_point = NULL;
1009 ir_node *ptr = get_irn_n(irn, 0);
1010 ir_node *mem = be_is_Reload(irn) ? get_irn_n(irn, 1) : nomem;
1011 ir_node *new_op, *proj;
1012 const arch_register_t *reg;
1014 if (sched_is_scheduled(irn)) {
1015 sched_point = sched_prev(irn);
1018 if (mode_is_float(mode)) {
1019 if (USE_SSE2(env->cg))
1020 new_op = new_rd_ia32_xLoad(env->dbg, env->irg, env->block, ptr, noreg, mem);
1022 new_op = new_rd_ia32_vfld(env->dbg, env->irg, env->block, ptr, noreg, mem);
1025 new_op = new_rd_ia32_Load(env->dbg, env->irg, env->block, ptr, noreg, mem);
1028 set_ia32_am_support(new_op, ia32_am_Source);
1029 set_ia32_op_type(new_op, ia32_AddrModeS);
1030 set_ia32_am_flavour(new_op, ia32_B);
1031 set_ia32_ls_mode(new_op, mode);
1032 set_ia32_frame_ent(new_op, ent);
1033 set_ia32_use_frame(new_op);
1035 DBG_OPT_RELOAD2LD(irn, new_op);
1037 proj = new_rd_Proj(env->dbg, env->irg, env->block, new_op, mode, pn_Load_res);
1040 sched_add_after(sched_point, new_op);
1041 sched_add_after(new_op, proj);
1046 /* copy the register from the old node to the new Load */
1047 reg = arch_get_irn_register(env->cg->arch_env, irn);
1048 arch_set_irn_register(env->cg->arch_env, new_op, reg);
1050 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, irn));
1052 exchange(irn, proj);
1056 * Transforms a be node into a Store.
1058 static void transform_to_Store(ia32_transform_env_t *env) {
1059 ir_node *irn = env->irn;
1060 entity *ent = be_get_frame_entity(irn);
1061 ir_mode *mode = env->mode;
1062 ir_node *noreg = ia32_new_NoReg_gp(env->cg);
1063 ir_node *nomem = new_rd_NoMem(env->irg);
1064 ir_node *ptr = get_irn_n(irn, 0);
1065 ir_node *val = get_irn_n(irn, 1);
1066 ir_node *new_op, *proj;
1067 ir_node *sched_point = NULL;
1069 if (sched_is_scheduled(irn)) {
1070 sched_point = sched_prev(irn);
1073 if (mode_is_float(mode)) {
1074 if (USE_SSE2(env->cg))
1075 new_op = new_rd_ia32_xStore(env->dbg, env->irg, env->block, ptr, noreg, val, nomem);
1077 new_op = new_rd_ia32_vfst(env->dbg, env->irg, env->block, ptr, noreg, val, nomem);
1079 else if (get_mode_size_bits(mode) == 8) {
1080 new_op = new_rd_ia32_Store8Bit(env->dbg, env->irg, env->block, ptr, noreg, val, nomem);
1083 new_op = new_rd_ia32_Store(env->dbg, env->irg, env->block, ptr, noreg, val, nomem);
1086 set_ia32_am_support(new_op, ia32_am_Dest);
1087 set_ia32_op_type(new_op, ia32_AddrModeD);
1088 set_ia32_am_flavour(new_op, ia32_B);
1089 set_ia32_ls_mode(new_op, mode);
1090 set_ia32_frame_ent(new_op, ent);
1091 set_ia32_use_frame(new_op);
1093 DBG_OPT_SPILL2ST(irn, new_op);
1095 proj = new_rd_Proj(env->dbg, env->irg, env->block, new_op, mode_M, pn_ia32_Store_M);
1098 sched_add_after(sched_point, new_op);
1102 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, irn));
1104 exchange(irn, proj);
1107 static ir_node *create_push(ia32_transform_env_t *env, ir_node *schedpoint, ir_node *sp, ir_node *mem, entity *ent) {
1108 ir_node *noreg = ia32_new_NoReg_gp(env->cg);
1109 ir_node *frame = get_irg_frame(env->irg);
1111 ir_node *push = new_rd_ia32_Push(env->dbg, env->irg, env->block, frame, noreg, noreg, sp, mem);
1113 set_ia32_frame_ent(push, ent);
1114 set_ia32_use_frame(push);
1115 set_ia32_op_type(push, ia32_AddrModeS);
1116 set_ia32_am_flavour(push, ia32_B);
1117 set_ia32_ls_mode(push, mode_Is);
1119 sched_add_before(schedpoint, push);
1123 static ir_node *create_pop(ia32_transform_env_t *env, ir_node *schedpoint, ir_node *sp, entity *ent) {
1124 ir_node *noreg = ia32_new_NoReg_gp(env->cg);
1125 ir_node *frame = get_irg_frame(env->irg);
1127 ir_node *pop = new_rd_ia32_Pop(env->dbg, env->irg, env->block, frame, noreg, sp, new_NoMem());
1129 set_ia32_frame_ent(pop, ent);
1130 set_ia32_use_frame(pop);
1131 set_ia32_op_type(pop, ia32_AddrModeD);
1132 set_ia32_am_flavour(pop, ia32_B);
1133 set_ia32_ls_mode(pop, mode_Is);
1135 sched_add_before(schedpoint, pop);
1140 static ir_node* create_spproj(ia32_transform_env_t *env, ir_node *pred, int pos, ir_node *schedpoint) {
1141 ir_mode *spmode = mode_Iu;
1142 const arch_register_t *spreg = &ia32_gp_regs[REG_ESP];
1145 sp = new_rd_Proj(env->dbg, env->irg, env->block, pred, spmode, pos);
1146 arch_set_irn_register(env->cg->arch_env, sp, spreg);
1147 sched_add_before(schedpoint, sp);
1153 * Transform memperm, currently we do this the ugly way and produce
1154 * push/pop into/from memory cascades. This is possible without using
1157 static void transform_MemPerm(ia32_transform_env_t *env) {
1158 ir_node *node = env->irn;
1160 ir_node *sp = be_abi_get_ignore_irn(env->cg->birg->abi, &ia32_gp_regs[REG_ESP]);
1161 const ir_edge_t *edge;
1162 const ir_edge_t *next;
1165 arity = be_get_MemPerm_entity_arity(node);
1166 pops = alloca(arity * sizeof(pops[0]));
1169 for(i = 0; i < arity; ++i) {
1170 entity *ent = be_get_MemPerm_in_entity(node, i);
1171 ir_type *enttype = get_entity_type(ent);
1172 int entbits = get_type_size_bits(enttype);
1173 ir_node *mem = get_irn_n(node, i + 1);
1176 assert( (entbits == 32 || entbits == 64) && "spillslot on x86 should be 32 or 64 bit");
1178 push = create_push(env, node, sp, mem, ent);
1179 sp = create_spproj(env, push, 0, node);
1181 // add another push after the first one
1182 push = create_push(env, node, sp, mem, ent);
1183 add_ia32_am_offs_int(push, 4);
1184 sp = create_spproj(env, push, 0, node);
1187 set_irn_n(node, i, new_Bad());
1191 for(i = arity - 1; i >= 0; --i) {
1192 entity *ent = be_get_MemPerm_out_entity(node, i);
1193 ir_type *enttype = get_entity_type(ent);
1194 int entbits = get_type_size_bits(enttype);
1198 assert( (entbits == 32 || entbits == 64) && "spillslot on x86 should be 32 or 64 bit");
1200 pop = create_pop(env, node, sp, ent);
1202 // add another pop after the first one
1203 sp = create_spproj(env, pop, 1, node);
1204 pop = create_pop(env, node, sp, ent);
1205 add_ia32_am_offs_int(pop, 4);
1207 sp = create_spproj(env, pop, 1, node);
1212 // exchange memprojs
1213 foreach_out_edge_safe(node, edge, next) {
1214 ir_node *proj = get_edge_src_irn(edge);
1215 int p = get_Proj_proj(proj);
1219 set_Proj_pred(proj, pops[p]);
1220 set_Proj_proj(proj, 3);
1224 arity = get_irn_arity(node);
1225 for(i = 0; i < arity; ++i) {
1226 set_irn_n(node, i, new_Bad());
1232 * Fix the mode of Spill/Reload
1234 static ir_mode *fix_spill_mode(ia32_code_gen_t *cg, ir_mode *mode)
1236 if (mode_is_float(mode)) {
1248 * Block-Walker: Calls the transform functions Spill and Reload.
1250 static void ia32_after_ra_walker(ir_node *block, void *env) {
1251 ir_node *node, *prev;
1252 ia32_code_gen_t *cg = env;
1253 ia32_transform_env_t tenv;
1256 tenv.irg = current_ir_graph;
1258 DEBUG_ONLY(tenv.mod = cg->mod;)
1260 /* beware: the schedule is changed here */
1261 for (node = sched_last(block); !sched_is_begin(node); node = prev) {
1262 prev = sched_prev(node);
1263 if (be_is_Reload(node)) {
1264 /* we always reload the whole register */
1265 tenv.dbg = get_irn_dbg_info(node);
1267 tenv.mode = fix_spill_mode(cg, get_irn_mode(node));
1268 transform_to_Load(&tenv);
1270 else if (be_is_Spill(node)) {
1271 ir_node *spillval = get_irn_n(node, be_pos_Spill_val);
1272 /* we always spill the whole register */
1273 tenv.dbg = get_irn_dbg_info(node);
1275 tenv.mode = fix_spill_mode(cg, get_irn_mode(spillval));
1276 transform_to_Store(&tenv);
1278 else if(be_is_MemPerm(node)) {
1279 tenv.dbg = get_irn_dbg_info(node);
1281 transform_MemPerm(&tenv);
1287 * We transform Spill and Reload here. This needs to be done before
1288 * stack biasing otherwise we would miss the corrected offset for these nodes.
1290 * If x87 instruction should be emitted, run the x87 simulator and patch
1291 * the virtual instructions. This must obviously be done after register allocation.
1293 static void ia32_after_ra(void *self) {
1294 ia32_code_gen_t *cg = self;
1295 ir_graph *irg = cg->irg;
1297 irg_block_walk_graph(irg, NULL, ia32_after_ra_walker, cg);
1299 ia32_finish_irg(irg, cg);
1303 * Last touchups for the graph before emit
1305 static void ia32_finish(void *self) {
1306 ia32_code_gen_t *cg = self;
1307 ir_graph *irg = cg->irg;
1309 // Matze: disabled for now, as the irextbb algo sometimes returns extbb in
1310 // the wrong order if the graph has critical edges
1311 be_remove_empty_blocks(irg);
1313 cg->blk_sched = sched_create_block_schedule(cg->irg, cg->birg->execfreqs);
1315 /* if we do x87 code generation, rewrite all the virtual instructions and registers */
1316 if (cg->used_fp == fp_x87 || cg->force_sim) {
1317 x87_simulate_graph(cg->arch_env, irg, cg->blk_sched);
1320 ia32_peephole_optimization(irg, cg);
1324 * Emits the code, closes the output file and frees
1325 * the code generator interface.
1327 static void ia32_codegen(void *self) {
1328 ia32_code_gen_t *cg = self;
1329 ir_graph *irg = cg->irg;
1331 ia32_gen_routine(cg->isa->out, irg, cg);
1335 /* remove it from the isa */
1338 /* de-allocate code generator */
1339 del_set(cg->reg_set);
1343 static void *ia32_cg_init(const be_irg_t *birg);
1345 static const arch_code_generator_if_t ia32_code_gen_if = {
1347 NULL, /* before abi introduce hook */
1349 ia32_before_sched, /* before scheduling hook */
1350 ia32_before_ra, /* before register allocation hook */
1351 ia32_after_ra, /* after register allocation hook */
1352 ia32_finish, /* called before codegen */
1353 ia32_codegen /* emit && done */
1357 * Initializes a IA32 code generator.
1359 static void *ia32_cg_init(const be_irg_t *birg) {
1360 ia32_isa_t *isa = (ia32_isa_t *)birg->main_env->arch_env->isa;
1361 ia32_code_gen_t *cg = xcalloc(1, sizeof(*cg));
1363 cg->impl = &ia32_code_gen_if;
1364 cg->irg = birg->irg;
1365 cg->reg_set = new_set(ia32_cmp_irn_reg_assoc, 1024);
1366 cg->arch_env = birg->main_env->arch_env;
1369 cg->blk_sched = NULL;
1370 cg->fp_to_gp = NULL;
1371 cg->gp_to_fp = NULL;
1372 cg->fp_kind = isa->fp_kind;
1373 cg->used_fp = fp_none;
1374 cg->dump = (birg->main_env->options->dump_flags & DUMP_BE) ? 1 : 0;
1376 FIRM_DBG_REGISTER(cg->mod, "firm.be.ia32.cg");
1378 /* copy optimizations from isa for easier access */
1380 cg->arch = isa->arch;
1381 cg->opt_arch = isa->opt_arch;
1387 if (isa->name_obst_size) {
1388 //printf("freed %d bytes from name obst\n", isa->name_obst_size);
1389 isa->name_obst_size = 0;
1390 obstack_free(isa->name_obst, NULL);
1391 obstack_init(isa->name_obst);
1395 cur_reg_set = cg->reg_set;
1397 ia32_irn_ops.cg = cg;
1399 return (arch_code_generator_t *)cg;
1404 /*****************************************************************
1405 * ____ _ _ _____ _____
1406 * | _ \ | | | | |_ _|/ ____| /\
1407 * | |_) | __ _ ___| | _____ _ __ __| | | | | (___ / \
1408 * | _ < / _` |/ __| |/ / _ \ '_ \ / _` | | | \___ \ / /\ \
1409 * | |_) | (_| | (__| < __/ | | | (_| | _| |_ ____) / ____ \
1410 * |____/ \__,_|\___|_|\_\___|_| |_|\__,_| |_____|_____/_/ \_\
1412 *****************************************************************/
1415 * Set output modes for GCC
1417 static const tarval_mode_info mo_integer = {
1424 * set the tarval output mode of all integer modes to decimal
1426 static void set_tarval_output_modes(void)
1430 for (i = get_irp_n_modes() - 1; i >= 0; --i) {
1431 ir_mode *mode = get_irp_mode(i);
1433 if (mode_is_int(mode))
1434 set_tarval_mode_output_option(mode, &mo_integer);
1440 * The template that generates a new ISA object.
1441 * Note that this template can be changed by command line
1444 static ia32_isa_t ia32_isa_template = {
1446 &ia32_isa_if, /* isa interface implementation */
1447 &ia32_gp_regs[REG_ESP], /* stack pointer register */
1448 &ia32_gp_regs[REG_EBP], /* base pointer register */
1449 -1, /* stack direction */
1450 NULL, /* main environment */
1452 NULL, /* 16bit register names */
1453 NULL, /* 8bit register names */
1457 IA32_OPT_INCDEC | /* optimize add 1, sub 1 into inc/dec default: on */
1458 IA32_OPT_DOAM | /* optimize address mode default: on */
1459 IA32_OPT_LEA | /* optimize for LEAs default: on */
1460 IA32_OPT_PLACECNST | /* place constants immediately before instructions, default: on */
1461 IA32_OPT_IMMOPS | /* operations can use immediates, default: on */
1462 IA32_OPT_EXTBB), /* use extended basic block scheduling, default: on */
1463 arch_pentium_4, /* instruction architecture */
1464 arch_pentium_4, /* optimize for architecture */
1465 fp_sse2, /* use sse2 unit */
1466 NULL, /* current code generator */
1467 NULL, /* output file */
1469 NULL, /* name obstack */
1470 0 /* name obst size */
1475 * Initializes the backend ISA.
1477 static void *ia32_init(FILE *file_handle) {
1478 static int inited = 0;
1484 set_tarval_output_modes();
1486 isa = xmalloc(sizeof(*isa));
1487 memcpy(isa, &ia32_isa_template, sizeof(*isa));
1489 ia32_register_init(isa);
1490 ia32_create_opcodes();
1492 if ((ARCH_INTEL(isa->arch) && isa->arch < arch_pentium_4) ||
1493 (ARCH_AMD(isa->arch) && isa->arch < arch_athlon))
1494 /* no SSE2 for these cpu's */
1495 isa->fp_kind = fp_x87;
1497 if (ARCH_INTEL(isa->opt_arch) && isa->opt_arch >= arch_pentium_4) {
1498 /* Pentium 4 don't like inc and dec instructions */
1499 isa->opt &= ~IA32_OPT_INCDEC;
1502 isa->regs_16bit = pmap_create();
1503 isa->regs_8bit = pmap_create();
1504 isa->types = pmap_create();
1505 isa->tv_ent = pmap_create();
1506 isa->out = file_handle;
1508 ia32_build_16bit_reg_map(isa->regs_16bit);
1509 ia32_build_8bit_reg_map(isa->regs_8bit);
1511 /* patch register names of x87 registers */
1513 ia32_st_regs[0].name = "st";
1514 ia32_st_regs[1].name = "st(1)";
1515 ia32_st_regs[2].name = "st(2)";
1516 ia32_st_regs[3].name = "st(3)";
1517 ia32_st_regs[4].name = "st(4)";
1518 ia32_st_regs[5].name = "st(5)";
1519 ia32_st_regs[6].name = "st(6)";
1520 ia32_st_regs[7].name = "st(7)";
1524 isa->name_obst = xmalloc(sizeof(*isa->name_obst));
1525 obstack_init(isa->name_obst);
1526 isa->name_obst_size = 0;
1529 ia32_handle_intrinsics();
1530 ia32_switch_section(isa->out, NO_SECTION);
1531 fprintf(isa->out, "\t.intel_syntax\n");
1533 /* needed for the debug support */
1534 ia32_switch_section(isa->out, SECTION_TEXT);
1535 fprintf(isa->out, ".Ltext0:\n");
1545 * Closes the output file and frees the ISA structure.
1547 static void ia32_done(void *self) {
1548 ia32_isa_t *isa = self;
1550 /* emit now all global declarations */
1551 ia32_gen_decls(isa->out, isa->arch_isa.main_env);
1553 pmap_destroy(isa->regs_16bit);
1554 pmap_destroy(isa->regs_8bit);
1555 pmap_destroy(isa->tv_ent);
1556 pmap_destroy(isa->types);
1559 //printf("name obst size = %d bytes\n", isa->name_obst_size);
1560 obstack_free(isa->name_obst, NULL);
1568 * Return the number of register classes for this architecture.
1569 * We report always these:
1570 * - the general purpose registers
1571 * - the SSE floating point register set
1572 * - the virtual floating point registers
1574 static int ia32_get_n_reg_class(const void *self) {
1579 * Return the register class for index i.
1581 static const arch_register_class_t *ia32_get_reg_class(const void *self, int i) {
1582 assert(i >= 0 && i < 3 && "Invalid ia32 register class requested.");
1584 return &ia32_reg_classes[CLASS_ia32_gp];
1586 return &ia32_reg_classes[CLASS_ia32_xmm];
1588 return &ia32_reg_classes[CLASS_ia32_vfp];
1592 * Get the register class which shall be used to store a value of a given mode.
1593 * @param self The this pointer.
1594 * @param mode The mode in question.
1595 * @return A register class which can hold values of the given mode.
1597 const arch_register_class_t *ia32_get_reg_class_for_mode(const void *self, const ir_mode *mode) {
1598 const ia32_isa_t *isa = self;
1599 if (mode_is_float(mode)) {
1600 return USE_SSE2(isa) ? &ia32_reg_classes[CLASS_ia32_xmm] : &ia32_reg_classes[CLASS_ia32_vfp];
1603 return &ia32_reg_classes[CLASS_ia32_gp];
1607 * Get the ABI restrictions for procedure calls.
1608 * @param self The this pointer.
1609 * @param method_type The type of the method (procedure) in question.
1610 * @param abi The abi object to be modified
1612 static void ia32_get_call_abi(const void *self, ir_type *method_type, be_abi_call_t *abi) {
1613 const ia32_isa_t *isa = self;
1616 unsigned cc = get_method_calling_convention(method_type);
1617 int n = get_method_n_params(method_type);
1620 int i, ignore_1, ignore_2;
1622 const arch_register_t *reg;
1623 be_abi_call_flags_t call_flags = be_abi_call_get_flags(abi);
1625 unsigned use_push = !IS_P6_ARCH(isa->opt_arch);
1627 /* set abi flags for calls */
1628 call_flags.bits.left_to_right = 0; /* always last arg first on stack */
1629 call_flags.bits.store_args_sequential = use_push;
1630 /* call_flags.bits.try_omit_fp not changed: can handle both settings */
1631 call_flags.bits.fp_free = 0; /* the frame pointer is fixed in IA32 */
1632 call_flags.bits.call_has_imm = 1; /* IA32 calls can have immediate address */
1634 /* set stack parameter passing style */
1635 be_abi_call_set_flags(abi, call_flags, &ia32_abi_callbacks);
1637 /* collect the mode for each type */
1638 modes = alloca(n * sizeof(modes[0]));
1640 for (i = 0; i < n; i++) {
1641 tp = get_method_param_type(method_type, i);
1642 modes[i] = get_type_mode(tp);
1645 /* set register parameters */
1646 if (cc & cc_reg_param) {
1647 /* determine the number of parameters passed via registers */
1648 biggest_n = ia32_get_n_regparam_class(n, modes, &ignore_1, &ignore_2);
1650 /* loop over all parameters and set the register requirements */
1651 for (i = 0; i <= biggest_n; i++) {
1652 reg = ia32_get_RegParam_reg(n, modes, i, cc);
1653 assert(reg && "kaputt");
1654 be_abi_call_param_reg(abi, i, reg);
1661 /* set stack parameters */
1662 for (i = stack_idx; i < n; i++) {
1663 /* parameters on the stack are 32 bit aligned */
1664 be_abi_call_param_stack(abi, i, 4, 0, 0);
1668 /* set return registers */
1669 n = get_method_n_ress(method_type);
1671 assert(n <= 2 && "more than two results not supported");
1673 /* In case of 64bit returns, we will have two 32bit values */
1675 tp = get_method_res_type(method_type, 0);
1676 mode = get_type_mode(tp);
1678 assert(!mode_is_float(mode) && "two FP results not supported");
1680 tp = get_method_res_type(method_type, 1);
1681 mode = get_type_mode(tp);
1683 assert(!mode_is_float(mode) && "mixed INT, FP results not supported");
1685 be_abi_call_res_reg(abi, 0, &ia32_gp_regs[REG_EAX]);
1686 be_abi_call_res_reg(abi, 1, &ia32_gp_regs[REG_EDX]);
1689 const arch_register_t *reg;
1691 tp = get_method_res_type(method_type, 0);
1692 assert(is_atomic_type(tp));
1693 mode = get_type_mode(tp);
1695 reg = mode_is_float(mode) ? &ia32_vfp_regs[REG_VF0] : &ia32_gp_regs[REG_EAX];
1697 be_abi_call_res_reg(abi, 0, reg);
1702 static const void *ia32_get_irn_ops(const arch_irn_handler_t *self, const ir_node *irn) {
1703 return &ia32_irn_ops;
1706 const arch_irn_handler_t ia32_irn_handler = {
1710 const arch_irn_handler_t *ia32_get_irn_handler(const void *self) {
1711 return &ia32_irn_handler;
1714 int ia32_to_appear_in_schedule(void *block_env, const ir_node *irn) {
1715 return is_ia32_irn(irn) ? 1 : -1;
1719 * Initializes the code generator interface.
1721 static const arch_code_generator_if_t *ia32_get_code_generator_if(void *self) {
1722 return &ia32_code_gen_if;
1726 * Returns the estimated execution time of an ia32 irn.
1728 static sched_timestep_t ia32_sched_exectime(void *env, const ir_node *irn) {
1729 const arch_env_t *arch_env = env;
1730 return is_ia32_irn(irn) ? ia32_get_op_estimated_cost(arch_get_irn_ops(arch_env, irn), irn) : 1;
1733 list_sched_selector_t ia32_sched_selector;
1736 * Returns the reg_pressure scheduler with to_appear_in_schedule() overloaded
1738 static const list_sched_selector_t *ia32_get_list_sched_selector(const void *self, list_sched_selector_t *selector) {
1739 memcpy(&ia32_sched_selector, selector, sizeof(ia32_sched_selector));
1740 ia32_sched_selector.exectime = ia32_sched_exectime;
1741 ia32_sched_selector.to_appear_in_schedule = ia32_to_appear_in_schedule;
1742 return &ia32_sched_selector;
1746 * Returns the necessary byte alignment for storing a register of given class.
1748 static int ia32_get_reg_class_alignment(const void *self, const arch_register_class_t *cls) {
1749 ir_mode *mode = arch_register_class_mode(cls);
1750 int bytes = get_mode_size_bytes(mode);
1752 if (mode_is_float(mode) && bytes > 8)
1757 static ia32_intrinsic_env_t intrinsic_env = { NULL, NULL };
1760 * Returns the libFirm configuration parameter for this backend.
1762 static const backend_params *ia32_get_libfirm_params(void) {
1763 static const arch_dep_params_t ad = {
1764 1, /* also use subs */
1765 4, /* maximum shifts */
1766 31, /* maximum shift amount */
1768 1, /* allow Mulhs */
1769 1, /* allow Mulus */
1770 32 /* Mulh allowed up to 32 bit */
1772 static backend_params p = {
1773 NULL, /* no additional opcodes */
1774 NULL, /* will be set later */
1775 1, /* need dword lowering */
1776 ia32_create_intrinsic_fkt,
1777 &intrinsic_env, /* context for ia32_create_intrinsic_fkt */
1785 /* instruction set architectures. */
1786 static const lc_opt_enum_int_items_t arch_items[] = {
1787 { "386", arch_i386, },
1788 { "486", arch_i486, },
1789 { "pentium", arch_pentium, },
1790 { "586", arch_pentium, },
1791 { "pentiumpro", arch_pentium_pro, },
1792 { "686", arch_pentium_pro, },
1793 { "pentiummmx", arch_pentium_mmx, },
1794 { "pentium2", arch_pentium_2, },
1795 { "p2", arch_pentium_2, },
1796 { "pentium3", arch_pentium_3, },
1797 { "p3", arch_pentium_3, },
1798 { "pentium4", arch_pentium_4, },
1799 { "p4", arch_pentium_4, },
1800 { "pentiumm", arch_pentium_m, },
1801 { "pm", arch_pentium_m, },
1802 { "core", arch_core, },
1804 { "athlon", arch_athlon, },
1805 { "athlon64", arch_athlon_64, },
1806 { "opteron", arch_opteron, },
1810 static lc_opt_enum_int_var_t arch_var = {
1811 &ia32_isa_template.arch, arch_items
1814 static lc_opt_enum_int_var_t opt_arch_var = {
1815 &ia32_isa_template.opt_arch, arch_items
1818 static const lc_opt_enum_int_items_t fp_unit_items[] = {
1820 { "sse2", fp_sse2 },
1824 static lc_opt_enum_int_var_t fp_unit_var = {
1825 &ia32_isa_template.fp_kind, fp_unit_items
1828 static const lc_opt_enum_int_items_t gas_items[] = {
1829 { "linux", ASM_LINUX_GAS },
1830 { "mingw", ASM_MINGW_GAS },
1834 static lc_opt_enum_int_var_t gas_var = {
1835 (int *)&asm_flavour, gas_items
1838 static const lc_opt_table_entry_t ia32_options[] = {
1839 LC_OPT_ENT_ENUM_INT("arch", "select the instruction architecture", &arch_var),
1840 LC_OPT_ENT_ENUM_INT("opt", "optimize for instruction architecture", &opt_arch_var),
1841 LC_OPT_ENT_ENUM_INT("fpunit", "select the floating point unit", &fp_unit_var),
1842 LC_OPT_ENT_NEGBIT("noaddrmode", "do not use address mode", &ia32_isa_template.opt, IA32_OPT_DOAM),
1843 LC_OPT_ENT_NEGBIT("nolea", "do not optimize for LEAs", &ia32_isa_template.opt, IA32_OPT_LEA),
1844 LC_OPT_ENT_NEGBIT("noplacecnst", "do not place constants", &ia32_isa_template.opt, IA32_OPT_PLACECNST),
1845 LC_OPT_ENT_NEGBIT("noimmop", "no operations with immediates", &ia32_isa_template.opt, IA32_OPT_IMMOPS),
1846 LC_OPT_ENT_NEGBIT("noextbb", "do not use extended basic block scheduling", &ia32_isa_template.opt, IA32_OPT_EXTBB),
1847 LC_OPT_ENT_ENUM_INT("gasmode", "set the GAS compatibility mode", &gas_var),
1852 * Register command line options for the ia32 backend.
1856 * ia32-arch=arch create instruction for arch
1857 * ia32-opt=arch optimize for run on arch
1858 * ia32-fpunit=unit select floating point unit (x87 or SSE2)
1859 * ia32-incdec optimize for inc/dec
1860 * ia32-noaddrmode do not use address mode
1861 * ia32-nolea do not optimize for LEAs
1862 * ia32-noplacecnst do not place constants,
1863 * ia32-noimmop no operations with immediates
1864 * ia32-noextbb do not use extended basic block scheduling
1865 * ia32-gasmode set the GAS compatibility mode
1867 static void ia32_register_options(lc_opt_entry_t *ent)
1869 lc_opt_entry_t *be_grp_ia32 = lc_opt_get_grp(ent, "ia32");
1870 lc_opt_add_table(be_grp_ia32, ia32_options);
1872 #endif /* WITH_LIBCORE */
1874 const arch_isa_if_t ia32_isa_if = {
1877 ia32_get_n_reg_class,
1879 ia32_get_reg_class_for_mode,
1881 ia32_get_irn_handler,
1882 ia32_get_code_generator_if,
1883 ia32_get_list_sched_selector,
1884 ia32_get_reg_class_alignment,
1885 ia32_get_libfirm_params,
1887 ia32_register_options