2 * This is the main ia32 firm backend driver.
3 * @author Christian Wuerdig
10 #include <libcore/lc_opts.h>
11 #include <libcore/lc_opts_enum.h>
15 #include "pseudo_irg.h"
19 #include "iredges_t.h"
32 #include "../beirg_t.h"
33 #include "../benode_t.h"
34 #include "../belower.h"
35 #include "../besched_t.h"
38 #include "../beirgmod.h"
39 #include "../be_dbgout.h"
40 #include "../beblocksched.h"
41 #include "../bemachine.h"
42 #include "../beilpsched.h"
43 #include "../bespillslots.h"
44 #include "../bemodule.h"
45 #include "../begnuas.h"
46 #include "../bestate.h"
48 #include "bearch_ia32_t.h"
50 #include "ia32_new_nodes.h"
51 #include "gen_ia32_regalloc_if.h"
52 #include "gen_ia32_machine.h"
53 #include "ia32_transform.h"
54 #include "ia32_emitter.h"
55 #include "ia32_map_regs.h"
56 #include "ia32_optimize.h"
58 #include "ia32_dbg_stat.h"
59 #include "ia32_finish.h"
60 #include "ia32_util.h"
63 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
66 static set *cur_reg_set = NULL;
68 ir_mode *mode_fpcw = NULL;
70 typedef ir_node *(*create_const_node_func) (dbg_info *dbg, ir_graph *irg, ir_node *block);
72 static INLINE ir_node *create_const(ia32_code_gen_t *cg, ir_node **place,
73 create_const_node_func func,
74 const arch_register_t* reg)
81 block = get_irg_start_block(cg->irg);
82 res = func(NULL, cg->irg, block);
83 arch_set_irn_register(cg->arch_env, res, reg);
87 /* keep the node so it isn't accidently removed when unused ... */
89 keep = be_new_Keep(arch_register_get_class(reg), cg->irg, block, 1, in);
91 add_irn_dep(get_irg_end(cg->irg), res);
92 /* add_irn_dep(get_irg_start(cg->irg), res); */
97 /* Creates the unique per irg GP NoReg node. */
98 ir_node *ia32_new_NoReg_gp(ia32_code_gen_t *cg) {
99 return create_const(cg, &cg->noreg_gp, new_rd_ia32_NoReg_GP,
100 &ia32_gp_regs[REG_GP_NOREG]);
103 ir_node *ia32_new_NoReg_vfp(ia32_code_gen_t *cg) {
104 return create_const(cg, &cg->noreg_vfp, new_rd_ia32_NoReg_VFP,
105 &ia32_vfp_regs[REG_VFP_NOREG]);
108 ir_node *ia32_new_NoReg_xmm(ia32_code_gen_t *cg) {
109 return create_const(cg, &cg->noreg_xmm, new_rd_ia32_NoReg_XMM,
110 &ia32_xmm_regs[REG_XMM_NOREG]);
113 /* Creates the unique per irg FP NoReg node. */
114 ir_node *ia32_new_NoReg_fp(ia32_code_gen_t *cg) {
115 return USE_SSE2(cg) ? ia32_new_NoReg_xmm(cg) : ia32_new_NoReg_vfp(cg);
118 ir_node *ia32_new_Unknown_gp(ia32_code_gen_t *cg) {
119 return create_const(cg, &cg->unknown_gp, new_rd_ia32_Unknown_GP,
120 &ia32_gp_regs[REG_GP_UKNWN]);
123 ir_node *ia32_new_Unknown_vfp(ia32_code_gen_t *cg) {
124 return create_const(cg, &cg->unknown_vfp, new_rd_ia32_Unknown_VFP,
125 &ia32_vfp_regs[REG_VFP_UKNWN]);
128 ir_node *ia32_new_Unknown_xmm(ia32_code_gen_t *cg) {
129 return create_const(cg, &cg->unknown_xmm, new_rd_ia32_Unknown_XMM,
130 &ia32_xmm_regs[REG_XMM_UKNWN]);
133 ir_node *ia32_new_Fpu_truncate(ia32_code_gen_t *cg) {
134 return create_const(cg, &cg->fpu_trunc_mode, new_rd_ia32_ChangeCW,
135 &ia32_fp_cw_regs[REG_FPCW]);
140 * Returns gp_noreg or fp_noreg, depending in input requirements.
142 ir_node *ia32_get_admissible_noreg(ia32_code_gen_t *cg, ir_node *irn, int pos) {
143 const arch_register_req_t *req;
145 req = arch_get_register_req(cg->arch_env, irn, pos);
146 assert(req != NULL && "Missing register requirements");
147 if (req->cls == &ia32_reg_classes[CLASS_ia32_gp])
148 return ia32_new_NoReg_gp(cg);
150 return ia32_new_NoReg_fp(cg);
153 /**************************************************
156 * _ __ ___ __ _ __ _| | | ___ ___ _| |_
157 * | '__/ _ \/ _` | / _` | | |/ _ \ / __| | | _|
158 * | | | __/ (_| | | (_| | | | (_) | (__ | | |
159 * |_| \___|\__, | \__,_|_|_|\___/ \___| |_|_|
162 **************************************************/
165 * Return register requirements for an ia32 node.
166 * If the node returns a tuple (mode_T) then the proj's
167 * will be asked for this information.
169 static const arch_register_req_t *ia32_get_irn_reg_req(const void *self,
172 long node_pos = pos == -1 ? 0 : pos;
173 ir_mode *mode = is_Block(node) ? NULL : get_irn_mode(node);
175 if (is_Block(node) || mode == mode_X) {
176 return arch_no_register_req;
179 if (mode == mode_T && pos < 0) {
180 return arch_no_register_req;
185 return arch_no_register_req;
188 return arch_no_register_req;
191 node_pos = (pos == -1) ? get_Proj_proj(node) : pos;
192 node = skip_Proj_const(node);
195 if (is_ia32_irn(node)) {
196 const arch_register_req_t *req;
198 req = get_ia32_in_req(node, pos);
200 req = get_ia32_out_req(node, node_pos);
207 /* unknowns should be transformed already */
208 assert(!is_Unknown(node));
210 return arch_no_register_req;
213 static void ia32_set_irn_reg(const void *self, ir_node *irn, const arch_register_t *reg) {
216 if (get_irn_mode(irn) == mode_X) {
221 pos = get_Proj_proj(irn);
222 irn = skip_Proj(irn);
225 if (is_ia32_irn(irn)) {
226 const arch_register_t **slots;
228 slots = get_ia32_slots(irn);
231 ia32_set_firm_reg(irn, reg, cur_reg_set);
235 static const arch_register_t *ia32_get_irn_reg(const void *self, const ir_node *irn) {
237 const arch_register_t *reg = NULL;
241 if (get_irn_mode(irn) == mode_X) {
245 pos = get_Proj_proj(irn);
246 irn = skip_Proj_const(irn);
249 if (is_ia32_irn(irn)) {
250 const arch_register_t **slots;
251 slots = get_ia32_slots(irn);
254 reg = ia32_get_firm_reg(irn, cur_reg_set);
260 static arch_irn_class_t ia32_classify(const void *self, const ir_node *irn) {
261 arch_irn_class_t classification = arch_irn_class_normal;
263 irn = skip_Proj_const(irn);
266 classification |= arch_irn_class_branch;
268 if (! is_ia32_irn(irn))
269 return classification & ~arch_irn_class_normal;
271 if (is_ia32_Cnst(irn))
272 classification |= arch_irn_class_const;
275 classification |= arch_irn_class_load;
277 if (is_ia32_St(irn) || is_ia32_Store8Bit(irn))
278 classification |= arch_irn_class_store;
280 if (is_ia32_need_stackent(irn))
281 classification |= arch_irn_class_reload;
283 return classification;
286 static arch_irn_flags_t ia32_get_flags(const void *self, const ir_node *irn) {
287 arch_irn_flags_t flags = arch_irn_flags_none;
290 return arch_irn_flags_ignore;
292 if(is_Proj(irn) && mode_is_datab(get_irn_mode(irn))) {
293 ir_node *pred = get_Proj_pred(irn);
295 if(is_ia32_irn(pred)) {
296 flags = get_ia32_out_flags(pred, get_Proj_proj(irn));
302 if (is_ia32_irn(irn)) {
303 flags |= get_ia32_flags(irn);
310 * The IA32 ABI callback object.
313 be_abi_call_flags_bits_t flags; /**< The call flags. */
314 const arch_isa_t *isa; /**< The ISA handle. */
315 const arch_env_t *aenv; /**< The architecture environment. */
316 ir_graph *irg; /**< The associated graph. */
319 static ir_entity *ia32_get_frame_entity(const void *self, const ir_node *irn) {
320 return is_ia32_irn(irn) ? get_ia32_frame_ent(irn) : NULL;
323 static void ia32_set_frame_entity(const void *self, ir_node *irn, ir_entity *ent) {
324 set_ia32_frame_ent(irn, ent);
327 static void ia32_set_frame_offset(const void *self, ir_node *irn, int bias) {
328 const ia32_irn_ops_t *ops = self;
330 if (get_ia32_frame_ent(irn)) {
331 ia32_am_flavour_t am_flav;
333 if (is_ia32_Pop(irn)) {
334 int omit_fp = be_abi_omit_fp(ops->cg->birg->abi);
336 /* Pop nodes modify the stack pointer before calculating the destination
337 * address, so fix this here
343 am_flav = get_ia32_am_flavour(irn);
345 set_ia32_am_flavour(irn, am_flav);
347 add_ia32_am_offs_int(irn, bias);
351 static int ia32_get_sp_bias(const void *self, const ir_node *irn) {
353 long proj = get_Proj_proj(irn);
354 ir_node *pred = get_Proj_pred(irn);
356 if (is_ia32_Push(pred) && proj == pn_ia32_Push_stack)
358 if (is_ia32_Pop(pred) && proj == pn_ia32_Pop_stack)
366 * Put all registers which are saved by the prologue/epilogue in a set.
368 * @param self The callback object.
369 * @param s The result set.
371 static void ia32_abi_dont_save_regs(void *self, pset *s)
373 ia32_abi_env_t *env = self;
374 if(env->flags.try_omit_fp)
375 pset_insert_ptr(s, env->isa->bp);
379 * Generate the routine prologue.
381 * @param self The callback object.
382 * @param mem A pointer to the mem node. Update this if you define new memory.
383 * @param reg_map A map mapping all callee_save/ignore/parameter registers to their defining nodes.
385 * @return The register which shall be used as a stack frame base.
387 * All nodes which define registers in @p reg_map must keep @p reg_map current.
389 static const arch_register_t *ia32_abi_prologue(void *self, ir_node **mem, pmap *reg_map)
391 ia32_abi_env_t *env = self;
392 const ia32_isa_t *isa = (ia32_isa_t *)env->isa;
393 ia32_code_gen_t *cg = isa->cg;
395 if (! env->flags.try_omit_fp) {
396 ir_node *bl = get_irg_start_block(env->irg);
397 ir_node *curr_sp = be_abi_reg_map_get(reg_map, env->isa->sp);
398 ir_node *curr_bp = be_abi_reg_map_get(reg_map, env->isa->bp);
399 ir_node *noreg = ia32_new_NoReg_gp(cg);
403 push = new_rd_ia32_Push(NULL, env->irg, bl, noreg, noreg, curr_bp, curr_sp, *mem);
404 curr_sp = new_r_Proj(env->irg, bl, push, get_irn_mode(curr_sp), pn_ia32_Push_stack);
405 *mem = new_r_Proj(env->irg, bl, push, mode_M, pn_ia32_Push_M);
407 /* the push must have SP out register */
408 arch_set_irn_register(env->aenv, curr_sp, env->isa->sp);
409 set_ia32_flags(push, arch_irn_flags_ignore);
411 /* move esp to ebp */
412 curr_bp = be_new_Copy(env->isa->bp->reg_class, env->irg, bl, curr_sp);
413 be_set_constr_single_reg(curr_bp, BE_OUT_POS(0), env->isa->bp);
414 arch_set_irn_register(env->aenv, curr_bp, env->isa->bp);
415 be_node_set_flags(curr_bp, BE_OUT_POS(0), arch_irn_flags_ignore);
417 /* beware: the copy must be done before any other sp use */
418 curr_sp = be_new_CopyKeep_single(env->isa->sp->reg_class, env->irg, bl, curr_sp, curr_bp, get_irn_mode(curr_sp));
419 be_set_constr_single_reg(curr_sp, BE_OUT_POS(0), env->isa->sp);
420 arch_set_irn_register(env->aenv, curr_sp, env->isa->sp);
421 be_node_set_flags(curr_sp, BE_OUT_POS(0), arch_irn_flags_ignore);
423 be_abi_reg_map_set(reg_map, env->isa->sp, curr_sp);
424 be_abi_reg_map_set(reg_map, env->isa->bp, curr_bp);
433 * Generate the routine epilogue.
434 * @param self The callback object.
435 * @param bl The block for the epilog
436 * @param mem A pointer to the mem node. Update this if you define new memory.
437 * @param reg_map A map mapping all callee_save/ignore/parameter registers to their defining nodes.
438 * @return The register which shall be used as a stack frame base.
440 * All nodes which define registers in @p reg_map must keep @p reg_map current.
442 static void ia32_abi_epilogue(void *self, ir_node *bl, ir_node **mem, pmap *reg_map)
444 ia32_abi_env_t *env = self;
445 ir_node *curr_sp = be_abi_reg_map_get(reg_map, env->isa->sp);
446 ir_node *curr_bp = be_abi_reg_map_get(reg_map, env->isa->bp);
448 if (env->flags.try_omit_fp) {
449 /* simply remove the stack frame here */
450 curr_sp = be_new_IncSP(env->isa->sp, env->irg, bl, curr_sp, BE_STACK_FRAME_SIZE_SHRINK);
451 add_irn_dep(curr_sp, *mem);
453 const ia32_isa_t *isa = (ia32_isa_t *)env->isa;
454 ia32_code_gen_t *cg = isa->cg;
455 ir_mode *mode_bp = env->isa->bp->reg_class->mode;
457 /* gcc always emits a leave at the end of a routine */
458 if (1 || ARCH_AMD(isa->opt_arch)) {
462 leave = new_rd_ia32_Leave(NULL, env->irg, bl, curr_sp, curr_bp);
463 set_ia32_flags(leave, arch_irn_flags_ignore);
464 curr_bp = new_r_Proj(current_ir_graph, bl, leave, mode_bp, pn_ia32_Leave_frame);
465 curr_sp = new_r_Proj(current_ir_graph, bl, leave, get_irn_mode(curr_sp), pn_ia32_Leave_stack);
467 ir_node *noreg = ia32_new_NoReg_gp(cg);
470 /* copy ebp to esp */
471 curr_sp = be_new_SetSP(env->isa->sp, env->irg, bl, curr_sp, curr_bp, *mem);
474 pop = new_rd_ia32_Pop(NULL, env->irg, bl, noreg, noreg, curr_sp, *mem);
475 set_ia32_flags(pop, arch_irn_flags_ignore);
476 curr_bp = new_r_Proj(current_ir_graph, bl, pop, mode_bp, pn_ia32_Pop_res);
477 curr_sp = new_r_Proj(current_ir_graph, bl, pop, get_irn_mode(curr_sp), pn_ia32_Pop_stack);
479 *mem = new_r_Proj(current_ir_graph, bl, pop, mode_M, pn_ia32_Pop_M);
481 arch_set_irn_register(env->aenv, curr_sp, env->isa->sp);
482 arch_set_irn_register(env->aenv, curr_bp, env->isa->bp);
485 be_abi_reg_map_set(reg_map, env->isa->sp, curr_sp);
486 be_abi_reg_map_set(reg_map, env->isa->bp, curr_bp);
490 * Initialize the callback object.
491 * @param call The call object.
492 * @param aenv The architecture environment.
493 * @param irg The graph with the method.
494 * @return Some pointer. This pointer is passed to all other callback functions as self object.
496 static void *ia32_abi_init(const be_abi_call_t *call, const arch_env_t *aenv, ir_graph *irg)
498 ia32_abi_env_t *env = xmalloc(sizeof(env[0]));
499 be_abi_call_flags_t fl = be_abi_call_get_flags(call);
500 env->flags = fl.bits;
503 env->isa = aenv->isa;
508 * Destroy the callback object.
509 * @param self The callback object.
511 static void ia32_abi_done(void *self) {
516 * Produces the type which sits between the stack args and the locals on the stack.
517 * it will contain the return address and space to store the old base pointer.
518 * @return The Firm type modeling the ABI between type.
520 static ir_type *ia32_abi_get_between_type(void *self)
522 #define IDENT(s) new_id_from_chars(s, sizeof(s)-1)
523 static ir_type *omit_fp_between_type = NULL;
524 static ir_type *between_type = NULL;
526 ia32_abi_env_t *env = self;
528 if (! between_type) {
529 ir_entity *old_bp_ent;
530 ir_entity *ret_addr_ent;
531 ir_entity *omit_fp_ret_addr_ent;
533 ir_type *old_bp_type = new_type_primitive(IDENT("bp"), mode_Iu);
534 ir_type *ret_addr_type = new_type_primitive(IDENT("return_addr"), mode_Iu);
536 between_type = new_type_struct(IDENT("ia32_between_type"));
537 old_bp_ent = new_entity(between_type, IDENT("old_bp"), old_bp_type);
538 ret_addr_ent = new_entity(between_type, IDENT("ret_addr"), ret_addr_type);
540 set_entity_offset(old_bp_ent, 0);
541 set_entity_offset(ret_addr_ent, get_type_size_bytes(old_bp_type));
542 set_type_size_bytes(between_type, get_type_size_bytes(old_bp_type) + get_type_size_bytes(ret_addr_type));
543 set_type_state(between_type, layout_fixed);
545 omit_fp_between_type = new_type_struct(IDENT("ia32_between_type_omit_fp"));
546 omit_fp_ret_addr_ent = new_entity(omit_fp_between_type, IDENT("ret_addr"), ret_addr_type);
548 set_entity_offset(omit_fp_ret_addr_ent, 0);
549 set_type_size_bytes(omit_fp_between_type, get_type_size_bytes(ret_addr_type));
550 set_type_state(omit_fp_between_type, layout_fixed);
553 return env->flags.try_omit_fp ? omit_fp_between_type : between_type;
558 * Get the estimated cycle count for @p irn.
560 * @param self The this pointer.
561 * @param irn The node.
563 * @return The estimated cycle count for this operation
565 static int ia32_get_op_estimated_cost(const void *self, const ir_node *irn)
568 ia32_op_type_t op_tp;
569 const ia32_irn_ops_t *ops = self;
573 if (!is_ia32_irn(irn))
576 assert(is_ia32_irn(irn));
578 cost = get_ia32_latency(irn);
579 op_tp = get_ia32_op_type(irn);
581 if (is_ia32_CopyB(irn)) {
583 if (ARCH_INTEL(ops->cg->arch))
586 else if (is_ia32_CopyB_i(irn)) {
587 int size = get_tarval_long(get_ia32_Immop_tarval(irn));
588 cost = 20 + (int)ceil((4/3) * size);
589 if (ARCH_INTEL(ops->cg->arch))
592 /* in case of address mode operations add additional cycles */
593 else if (op_tp == ia32_AddrModeD || op_tp == ia32_AddrModeS) {
595 In case of stack access add 5 cycles (we assume stack is in cache),
596 other memory operations cost 20 cycles.
598 cost += is_ia32_use_frame(irn) ? 5 : 20;
605 * Returns the inverse operation if @p irn, recalculating the argument at position @p i.
607 * @param irn The original operation
608 * @param i Index of the argument we want the inverse operation to yield
609 * @param inverse struct to be filled with the resulting inverse op
610 * @param obstack The obstack to use for allocation of the returned nodes array
611 * @return The inverse operation or NULL if operation invertible
613 static arch_inverse_t *ia32_get_inverse(const void *self, const ir_node *irn, int i, arch_inverse_t *inverse, struct obstack *obst) {
617 ir_node *block, *noreg, *nomem;
620 /* we cannot invert non-ia32 irns */
621 if (! is_ia32_irn(irn))
624 /* operand must always be a real operand (not base, index or mem) */
625 if (i != 2 && i != 3)
628 /* we don't invert address mode operations */
629 if (get_ia32_op_type(irn) != ia32_Normal)
632 irg = get_irn_irg(irn);
633 block = get_nodes_block(irn);
634 mode = get_irn_mode(irn);
635 irn_mode = get_irn_mode(irn);
636 noreg = get_irn_n(irn, 0);
637 nomem = new_r_NoMem(irg);
638 dbg = get_irn_dbg_info(irn);
640 /* initialize structure */
641 inverse->nodes = obstack_alloc(obst, 2 * sizeof(inverse->nodes[0]));
645 switch (get_ia32_irn_opcode(irn)) {
647 if (get_ia32_immop_type(irn) == ia32_ImmConst) {
648 /* we have an add with a const here */
649 /* invers == add with negated const */
650 inverse->nodes[0] = new_rd_ia32_Add(dbg, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem);
652 copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
653 set_ia32_Immop_tarval(inverse->nodes[0], tarval_neg(get_ia32_Immop_tarval(irn)));
654 set_ia32_commutative(inverse->nodes[0]);
656 else if (get_ia32_immop_type(irn) == ia32_ImmSymConst) {
657 /* we have an add with a symconst here */
658 /* invers == sub with const */
659 inverse->nodes[0] = new_rd_ia32_Sub(dbg, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem);
661 copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
664 /* normal add: inverse == sub */
665 inverse->nodes[0] = new_rd_ia32_Sub(dbg, irg, block, noreg, noreg, (ir_node*) irn, get_irn_n(irn, i ^ 1), nomem);
670 if (get_ia32_immop_type(irn) != ia32_ImmNone) {
671 /* we have a sub with a const/symconst here */
672 /* invers == add with this const */
673 inverse->nodes[0] = new_rd_ia32_Add(dbg, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem);
674 inverse->costs += (get_ia32_immop_type(irn) == ia32_ImmSymConst) ? 5 : 1;
675 copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
680 inverse->nodes[0] = new_rd_ia32_Add(dbg, irg, block, noreg, noreg, (ir_node*) irn, get_irn_n(irn, 3), nomem);
683 inverse->nodes[0] = new_rd_ia32_Sub(dbg, irg, block, noreg, noreg, get_irn_n(irn, 2), (ir_node*) irn, nomem);
689 if (get_ia32_immop_type(irn) != ia32_ImmNone) {
690 /* xor with const: inverse = xor */
691 inverse->nodes[0] = new_rd_ia32_Xor(dbg, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem);
692 inverse->costs += (get_ia32_immop_type(irn) == ia32_ImmSymConst) ? 5 : 1;
693 copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
697 inverse->nodes[0] = new_rd_ia32_Xor(dbg, irg, block, noreg, noreg, (ir_node *) irn, get_irn_n(irn, i), nomem);
702 inverse->nodes[0] = new_rd_ia32_Not(dbg, irg, block, noreg, noreg, (ir_node*) irn, nomem);
707 inverse->nodes[0] = new_rd_ia32_Neg(dbg, irg, block, noreg, noreg, (ir_node*) irn, nomem);
712 /* inverse operation not supported */
719 static ir_mode *get_spill_mode_mode(const ir_mode *mode)
721 if(mode_is_float(mode))
728 * Get the mode that should be used for spilling value node
730 static ir_mode *get_spill_mode(const ir_node *node)
732 ir_mode *mode = get_irn_mode(node);
733 return get_spill_mode_mode(mode);
737 * Checks wether an addressmode reload for a node with mode mode is compatible
738 * with a spillslot of mode spill_mode
740 static int ia32_is_spillmode_compatible(const ir_mode *mode, const ir_mode *spillmode)
742 if(mode_is_float(mode)) {
743 return mode == spillmode;
750 * Check if irn can load it's operand at position i from memory (source addressmode).
751 * @param self Pointer to irn ops itself
752 * @param irn The irn to be checked
753 * @param i The operands position
754 * @return Non-Zero if operand can be loaded
756 static int ia32_possible_memory_operand(const void *self, const ir_node *irn, unsigned int i) {
757 ir_node *op = get_irn_n(irn, i);
758 const ir_mode *mode = get_irn_mode(op);
759 const ir_mode *spillmode = get_spill_mode(op);
761 if (! is_ia32_irn(irn) || /* must be an ia32 irn */
762 get_irn_arity(irn) != 5 || /* must be a binary operation */
763 get_ia32_op_type(irn) != ia32_Normal || /* must not already be a addressmode irn */
764 ! (get_ia32_am_support(irn) & ia32_am_Source) || /* must be capable of source addressmode */
765 ! ia32_is_spillmode_compatible(mode, spillmode) ||
766 (i != 2 && i != 3) || /* a "real" operand position must be requested */
767 (i == 2 && ! is_ia32_commutative(irn)) || /* if first operand requested irn must be commutative */
768 is_ia32_use_frame(irn)) /* must not already use frame */
774 static void ia32_perform_memory_operand(const void *self, ir_node *irn, ir_node *spill, unsigned int i) {
775 const ia32_irn_ops_t *ops = self;
776 ia32_code_gen_t *cg = ops->cg;
778 assert(ia32_possible_memory_operand(self, irn, i) && "Cannot perform memory operand change");
781 ir_node *tmp = get_irn_n(irn, 3);
782 set_irn_n(irn, 3, get_irn_n(irn, 2));
783 set_irn_n(irn, 2, tmp);
786 set_ia32_am_support(irn, ia32_am_Source);
787 set_ia32_op_type(irn, ia32_AddrModeS);
788 set_ia32_am_flavour(irn, ia32_B);
789 set_ia32_ls_mode(irn, get_irn_mode(get_irn_n(irn, i)));
790 set_ia32_use_frame(irn);
791 set_ia32_need_stackent(irn);
793 set_irn_n(irn, 0, get_irg_frame(get_irn_irg(irn)));
794 set_irn_n(irn, 3, ia32_get_admissible_noreg(cg, irn, 3));
795 set_irn_n(irn, 4, spill);
797 //FIXME DBG_OPT_AM_S(reload, irn);
800 static const be_abi_callbacks_t ia32_abi_callbacks = {
803 ia32_abi_get_between_type,
804 ia32_abi_dont_save_regs,
809 /* fill register allocator interface */
811 static const arch_irn_ops_if_t ia32_irn_ops_if = {
812 ia32_get_irn_reg_req,
817 ia32_get_frame_entity,
818 ia32_set_frame_entity,
819 ia32_set_frame_offset,
822 ia32_get_op_estimated_cost,
823 ia32_possible_memory_operand,
824 ia32_perform_memory_operand,
827 ia32_irn_ops_t ia32_irn_ops = {
834 /**************************************************
837 * ___ ___ __| | ___ __ _ ___ _ __ _| |_
838 * / __/ _ \ / _` |/ _ \/ _` |/ _ \ '_ \ | | _|
839 * | (_| (_) | (_| | __/ (_| | __/ | | | | | |
840 * \___\___/ \__,_|\___|\__, |\___|_| |_| |_|_|
843 **************************************************/
846 * Transforms the standard firm graph into
849 static void ia32_prepare_graph(void *self) {
850 ia32_code_gen_t *cg = self;
852 /* transform psi condition trees */
853 ia32_pre_transform_phase(cg);
855 /* transform all remaining nodes */
856 ia32_transform_graph(cg);
857 //add_fpu_edges(cg->birg);
859 // Matze: disabled for now. Because after transformation start block has no
860 // self-loop anymore so it might be merged with its successor block. This
861 // will bring several nodes to the startblock which sometimes get scheduled
862 // before the initial IncSP/Barrier
863 local_optimize_graph(cg->irg);
866 be_dump(cg->irg, "-transformed", dump_ir_block_graph_sched);
868 /* optimize address mode */
869 ia32_optimize_addressmode(cg);
872 be_dump(cg->irg, "-am", dump_ir_block_graph_sched);
874 /* do code placement, to optimize the position of constants */
878 be_dump(cg->irg, "-place", dump_ir_block_graph_sched);
882 * Dummy functions for hooks we don't need but which must be filled.
884 static void ia32_before_sched(void *self) {
887 static void remove_unused_nodes(ir_node *irn, bitset_t *already_visited) {
890 ir_node *mem_proj = NULL;
895 mode = get_irn_mode(irn);
897 /* check if we already saw this node or the node has more than one user */
898 if (bitset_contains_irn(already_visited, irn) || get_irn_n_edges(irn) > 1) {
902 /* mark irn visited */
903 bitset_add_irn(already_visited, irn);
905 /* non-Tuple nodes with one user: ok, return */
906 if (get_irn_n_edges(irn) >= 1 && mode != mode_T) {
910 /* tuple node has one user which is not the mem proj-> ok */
911 if (mode == mode_T && get_irn_n_edges(irn) == 1) {
912 mem_proj = ia32_get_proj_for_mode(irn, mode_M);
913 if (mem_proj == NULL) {
918 arity = get_irn_arity(irn);
919 for (i = 0; i < arity; ++i) {
920 ir_node *pred = get_irn_n(irn, i);
922 /* do not follow memory edges or we will accidentally remove stores */
923 if (get_irn_mode(pred) == mode_M) {
924 if(mem_proj != NULL) {
925 edges_reroute(mem_proj, pred, get_irn_irg(mem_proj));
931 set_irn_n(irn, i, new_Bad());
934 The current node is about to be removed: if the predecessor
935 has only this node as user, it need to be removed as well.
937 if (get_irn_n_edges(pred) <= 1)
938 remove_unused_nodes(pred, already_visited);
941 // we need to set the presd to Bad again to also get the memory edges
942 arity = get_irn_arity(irn);
943 for (i = 0; i < arity; ++i) {
944 set_irn_n(irn, i, new_Bad());
947 if (sched_is_scheduled(irn)) {
952 static void remove_unused_loads_walker(ir_node *irn, void *env) {
953 bitset_t *already_visited = env;
954 if (is_ia32_Ld(irn) && ! bitset_contains_irn(already_visited, irn))
955 remove_unused_nodes(irn, env);
959 * Called before the register allocator.
960 * Calculate a block schedule here. We need it for the x87
961 * simulator and the emitter.
963 static void ia32_before_ra(void *self) {
964 ia32_code_gen_t *cg = self;
965 bitset_t *already_visited = bitset_irg_alloca(cg->irg);
969 There are sometimes unused loads, only pinned by memory.
970 We need to remove those Loads and all other nodes which won't be used
971 after removing the Load from schedule.
973 irg_walk_graph(cg->irg, NULL, remove_unused_loads_walker, already_visited);
975 /* setup fpu rounding modes */
976 ia32_setup_fpu_mode(cg);
981 * Transforms a be_Reload into a ia32 Load.
983 static void transform_to_Load(ia32_code_gen_t *cg, ir_node *node) {
984 ir_graph *irg = get_irn_irg(node);
985 dbg_info *dbg = get_irn_dbg_info(node);
986 ir_node *block = get_nodes_block(node);
987 ir_entity *ent = be_get_frame_entity(node);
988 ir_mode *mode = get_irn_mode(node);
989 ir_mode *spillmode = get_spill_mode(node);
990 ir_node *noreg = ia32_new_NoReg_gp(cg);
991 ir_node *sched_point = NULL;
992 ir_node *ptr = get_irg_frame(irg);
993 ir_node *mem = get_irn_n(node, be_pos_Reload_mem);
994 ir_node *new_op, *proj;
995 const arch_register_t *reg;
997 if (sched_is_scheduled(node)) {
998 sched_point = sched_prev(node);
1001 if (mode_is_float(spillmode)) {
1003 new_op = new_rd_ia32_xLoad(dbg, irg, block, ptr, noreg, mem);
1005 new_op = new_rd_ia32_vfld(dbg, irg, block, ptr, noreg, mem);
1007 else if (get_mode_size_bits(spillmode) == 128) {
1008 // Reload 128 bit sse registers
1009 new_op = new_rd_ia32_xxLoad(dbg, irg, block, ptr, noreg, mem);
1012 new_op = new_rd_ia32_Load(dbg, irg, block, ptr, noreg, mem);
1014 set_ia32_am_support(new_op, ia32_am_Source);
1015 set_ia32_op_type(new_op, ia32_AddrModeS);
1016 set_ia32_am_flavour(new_op, ia32_B);
1017 set_ia32_ls_mode(new_op, spillmode);
1018 set_ia32_frame_ent(new_op, ent);
1019 set_ia32_use_frame(new_op);
1021 DBG_OPT_RELOAD2LD(node, new_op);
1023 proj = new_rd_Proj(dbg, irg, block, new_op, mode, pn_ia32_Load_res);
1026 sched_add_after(sched_point, new_op);
1027 sched_add_after(new_op, proj);
1032 /* copy the register from the old node to the new Load */
1033 reg = arch_get_irn_register(cg->arch_env, node);
1034 arch_set_irn_register(cg->arch_env, new_op, reg);
1036 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(cg, node));
1038 exchange(node, proj);
1042 * Transforms a be_Spill node into a ia32 Store.
1044 static void transform_to_Store(ia32_code_gen_t *cg, ir_node *node) {
1045 ir_graph *irg = get_irn_irg(node);
1046 dbg_info *dbg = get_irn_dbg_info(node);
1047 ir_node *block = get_nodes_block(node);
1048 ir_entity *ent = be_get_frame_entity(node);
1049 const ir_node *spillval = get_irn_n(node, be_pos_Spill_val);
1050 ir_mode *mode = get_spill_mode(spillval);
1051 ir_node *noreg = ia32_new_NoReg_gp(cg);
1052 ir_node *nomem = new_rd_NoMem(irg);
1053 ir_node *ptr = get_irg_frame(irg);
1054 ir_node *val = get_irn_n(node, be_pos_Spill_val);
1056 ir_node *sched_point = NULL;
1058 if (sched_is_scheduled(node)) {
1059 sched_point = sched_prev(node);
1062 /* No need to spill unknown values... */
1063 if(is_ia32_Unknown_GP(val) ||
1064 is_ia32_Unknown_VFP(val) ||
1065 is_ia32_Unknown_XMM(val)) {
1070 exchange(node, store);
1074 if (mode_is_float(mode)) {
1076 store = new_rd_ia32_xStore(dbg, irg, block, ptr, noreg, val, nomem);
1078 store = new_rd_ia32_vfst(dbg, irg, block, ptr, noreg, val, nomem);
1079 } else if (get_mode_size_bits(mode) == 128) {
1080 // Spill 128 bit SSE registers
1081 store = new_rd_ia32_xxStore(dbg, irg, block, ptr, noreg, val, nomem);
1082 } else if (get_mode_size_bits(mode) == 8) {
1083 store = new_rd_ia32_Store8Bit(dbg, irg, block, ptr, noreg, val, nomem);
1085 store = new_rd_ia32_Store(dbg, irg, block, ptr, noreg, val, nomem);
1088 set_ia32_am_support(store, ia32_am_Dest);
1089 set_ia32_op_type(store, ia32_AddrModeD);
1090 set_ia32_am_flavour(store, ia32_B);
1091 set_ia32_ls_mode(store, mode);
1092 set_ia32_frame_ent(store, ent);
1093 set_ia32_use_frame(store);
1094 SET_IA32_ORIG_NODE(store, ia32_get_old_node_name(cg, node));
1095 DBG_OPT_SPILL2ST(node, store);
1098 sched_add_after(sched_point, store);
1102 exchange(node, store);
1105 static ir_node *create_push(ia32_code_gen_t *cg, ir_node *node, ir_node *schedpoint, ir_node *sp, ir_node *mem, ir_entity *ent) {
1106 ir_graph *irg = get_irn_irg(node);
1107 dbg_info *dbg = get_irn_dbg_info(node);
1108 ir_node *block = get_nodes_block(node);
1109 ir_node *noreg = ia32_new_NoReg_gp(cg);
1110 ir_node *frame = get_irg_frame(irg);
1112 ir_node *push = new_rd_ia32_Push(dbg, irg, block, frame, noreg, noreg, sp, mem);
1114 set_ia32_frame_ent(push, ent);
1115 set_ia32_use_frame(push);
1116 set_ia32_op_type(push, ia32_AddrModeS);
1117 set_ia32_am_flavour(push, ia32_B);
1118 set_ia32_ls_mode(push, mode_Is);
1120 sched_add_before(schedpoint, push);
1124 static ir_node *create_pop(ia32_code_gen_t *cg, ir_node *node, ir_node *schedpoint, ir_node *sp, ir_entity *ent) {
1125 ir_graph *irg = get_irn_irg(node);
1126 dbg_info *dbg = get_irn_dbg_info(node);
1127 ir_node *block = get_nodes_block(node);
1128 ir_node *noreg = ia32_new_NoReg_gp(cg);
1129 ir_node *frame = get_irg_frame(irg);
1131 ir_node *pop = new_rd_ia32_Pop(dbg, irg, block, frame, noreg, sp, new_NoMem());
1133 set_ia32_frame_ent(pop, ent);
1134 set_ia32_use_frame(pop);
1135 set_ia32_op_type(pop, ia32_AddrModeD);
1136 set_ia32_am_flavour(pop, ia32_am_OB);
1137 set_ia32_ls_mode(pop, mode_Is);
1139 sched_add_before(schedpoint, pop);
1144 static ir_node* create_spproj(ia32_code_gen_t *cg, ir_node *node, ir_node *pred, int pos, ir_node *schedpoint) {
1145 ir_graph *irg = get_irn_irg(node);
1146 dbg_info *dbg = get_irn_dbg_info(node);
1147 ir_node *block = get_nodes_block(node);
1148 ir_mode *spmode = mode_Iu;
1149 const arch_register_t *spreg = &ia32_gp_regs[REG_ESP];
1152 sp = new_rd_Proj(dbg, irg, block, pred, spmode, pos);
1153 arch_set_irn_register(cg->arch_env, sp, spreg);
1154 sched_add_before(schedpoint, sp);
1160 * Transform memperm, currently we do this the ugly way and produce
1161 * push/pop into/from memory cascades. This is possible without using
1164 static void transform_MemPerm(ia32_code_gen_t *cg, ir_node *node) {
1165 ir_graph *irg = get_irn_irg(node);
1166 ir_node *block = get_nodes_block(node);
1170 ir_node *sp = be_abi_get_ignore_irn(cg->birg->abi, &ia32_gp_regs[REG_ESP]);
1171 const ir_edge_t *edge;
1172 const ir_edge_t *next;
1175 arity = be_get_MemPerm_entity_arity(node);
1176 pops = alloca(arity * sizeof(pops[0]));
1179 for(i = 0; i < arity; ++i) {
1180 ir_entity *ent = be_get_MemPerm_in_entity(node, i);
1181 ir_type *enttype = get_entity_type(ent);
1182 int entbits = get_type_size_bits(enttype);
1183 ir_node *mem = get_irn_n(node, i + 1);
1186 assert(get_entity_type(be_get_MemPerm_out_entity(node, i)) == enttype);
1187 assert( (entbits == 32 || entbits == 64) && "spillslot on x86 should be 32 or 64 bit");
1189 push = create_push(cg, node, node, sp, mem, ent);
1190 sp = create_spproj(cg, node, push, pn_ia32_Push_stack, node);
1192 // add another push after the first one
1193 push = create_push(cg, node, node, sp, mem, ent);
1194 add_ia32_am_offs_int(push, 4);
1195 sp = create_spproj(cg, node, push, pn_ia32_Push_stack, node);
1198 set_irn_n(node, i, new_Bad());
1202 for(i = arity - 1; i >= 0; --i) {
1203 ir_entity *ent = be_get_MemPerm_out_entity(node, i);
1204 ir_type *enttype = get_entity_type(ent);
1205 int entbits = get_type_size_bits(enttype);
1208 assert( (entbits == 32 || entbits == 64) && "spillslot on x86 should be 32 or 64 bit");
1210 pop = create_pop(cg, node, node, sp, ent);
1211 sp = create_spproj(cg, node, pop, pn_ia32_Pop_stack, node);
1213 add_ia32_am_offs_int(pop, 4);
1215 // add another pop after the first one
1216 pop = create_pop(cg, node, node, sp, ent);
1217 sp = create_spproj(cg, node, pop, pn_ia32_Pop_stack, node);
1224 keep = be_new_Keep(&ia32_reg_classes[CLASS_ia32_gp], irg, block, 1, in);
1225 sched_add_before(node, keep);
1227 // exchange memprojs
1228 foreach_out_edge_safe(node, edge, next) {
1229 ir_node *proj = get_edge_src_irn(edge);
1230 int p = get_Proj_proj(proj);
1234 set_Proj_pred(proj, pops[p]);
1235 set_Proj_proj(proj, 3);
1239 arity = get_irn_arity(node);
1240 for(i = 0; i < arity; ++i) {
1241 set_irn_n(node, i, new_Bad());
1247 * Block-Walker: Calls the transform functions Spill and Reload.
1249 static void ia32_after_ra_walker(ir_node *block, void *env) {
1250 ir_node *node, *prev;
1251 ia32_code_gen_t *cg = env;
1253 /* beware: the schedule is changed here */
1254 for (node = sched_last(block); !sched_is_begin(node); node = prev) {
1255 prev = sched_prev(node);
1257 if (be_is_Reload(node)) {
1258 transform_to_Load(cg, node);
1259 } else if (be_is_Spill(node)) {
1260 transform_to_Store(cg, node);
1261 } else if(be_is_MemPerm(node)) {
1262 transform_MemPerm(cg, node);
1268 * Collects nodes that need frame entities assigned.
1270 static void ia32_collect_frame_entity_nodes(ir_node *node, void *data)
1272 be_fec_env_t *env = data;
1274 if (be_is_Reload(node) && be_get_frame_entity(node) == NULL) {
1275 const ir_mode *mode = get_spill_mode_mode(get_irn_mode(node));
1276 int align = get_mode_size_bytes(mode);
1277 be_node_needs_frame_entity(env, node, mode, align);
1278 } else if(is_ia32_irn(node) && get_ia32_frame_ent(node) == NULL
1279 && is_ia32_use_frame(node)) {
1280 if (is_ia32_need_stackent(node) || is_ia32_Load(node)) {
1281 const ir_mode *mode = get_ia32_ls_mode(node);
1282 int align = get_mode_size_bytes(mode);
1283 be_node_needs_frame_entity(env, node, mode, align);
1284 } else if (is_ia32_vfild(node) || is_ia32_xLoad(node)) {
1285 const ir_mode *mode = get_ia32_ls_mode(node);
1287 be_node_needs_frame_entity(env, node, mode, align);
1288 } else if(is_ia32_FldCW(node)) {
1289 const ir_mode *mode = ia32_reg_classes[CLASS_ia32_fp_cw].mode;
1291 be_node_needs_frame_entity(env, node, mode, align);
1292 } else if (is_ia32_SetST0(node)) {
1293 const ir_mode *mode = get_ia32_ls_mode(node);
1295 be_node_needs_frame_entity(env, node, mode, align);
1298 if(!is_ia32_Store(node)
1299 && !is_ia32_xStore(node)
1300 && !is_ia32_xStoreSimple(node)
1301 && !is_ia32_vfist(node)
1302 && !is_ia32_GetST0(node)
1303 && !is_ia32_FnstCW(node)) {
1312 * We transform Spill and Reload here. This needs to be done before
1313 * stack biasing otherwise we would miss the corrected offset for these nodes.
1315 static void ia32_after_ra(void *self) {
1316 ia32_code_gen_t *cg = self;
1317 ir_graph *irg = cg->irg;
1318 be_fec_env_t *fec_env = be_new_frame_entity_coalescer(cg->birg);
1320 /* create and coalesce frame entities */
1321 irg_walk_graph(irg, NULL, ia32_collect_frame_entity_nodes, fec_env);
1322 be_assign_entities(fec_env);
1323 be_free_frame_entity_coalescer(fec_env);
1325 irg_block_walk_graph(irg, NULL, ia32_after_ra_walker, cg);
1327 ia32_finish_irg(irg, cg);
1331 * Last touchups for the graph before emit: x87 simulation to replace the
1332 * virtual with real x87 instructions, creating a block schedule and peephole
1335 static void ia32_finish(void *self) {
1336 ia32_code_gen_t *cg = self;
1337 ir_graph *irg = cg->irg;
1339 /* if we do x87 code generation, rewrite all the virtual instructions and registers */
1340 if (cg->used_fp == fp_x87 || cg->force_sim) {
1341 x87_simulate_graph(cg->arch_env, cg->birg);
1344 /* create block schedule, this also removes empty blocks which might
1345 * produce critical edges */
1346 cg->blk_sched = be_create_block_schedule(irg, cg->birg->exec_freq);
1348 /* do peephole optimisations */
1349 ia32_peephole_optimization(irg, cg);
1353 * Emits the code, closes the output file and frees
1354 * the code generator interface.
1356 static void ia32_codegen(void *self) {
1357 ia32_code_gen_t *cg = self;
1358 ir_graph *irg = cg->irg;
1360 ia32_gen_routine(cg, irg);
1364 /* remove it from the isa */
1367 /* de-allocate code generator */
1368 del_set(cg->reg_set);
1372 static void *ia32_cg_init(be_irg_t *birg);
1374 static const arch_code_generator_if_t ia32_code_gen_if = {
1376 NULL, /* before abi introduce hook */
1379 ia32_before_sched, /* before scheduling hook */
1380 ia32_before_ra, /* before register allocation hook */
1381 ia32_after_ra, /* after register allocation hook */
1382 ia32_finish, /* called before codegen */
1383 ia32_codegen /* emit && done */
1387 * Initializes a IA32 code generator.
1389 static void *ia32_cg_init(be_irg_t *birg) {
1390 ia32_isa_t *isa = (ia32_isa_t *)birg->main_env->arch_env->isa;
1391 ia32_code_gen_t *cg = xcalloc(1, sizeof(*cg));
1393 cg->impl = &ia32_code_gen_if;
1394 cg->irg = birg->irg;
1395 cg->reg_set = new_set(ia32_cmp_irn_reg_assoc, 1024);
1396 cg->arch_env = birg->main_env->arch_env;
1399 cg->blk_sched = NULL;
1400 cg->fp_kind = isa->fp_kind;
1401 cg->used_fp = fp_none;
1402 cg->dump = (birg->main_env->options->dump_flags & DUMP_BE) ? 1 : 0;
1404 /* copy optimizations from isa for easier access */
1406 cg->arch = isa->arch;
1407 cg->opt_arch = isa->opt_arch;
1413 if (isa->name_obst) {
1414 obstack_free(isa->name_obst, NULL);
1415 obstack_init(isa->name_obst);
1419 cur_reg_set = cg->reg_set;
1421 ia32_irn_ops.cg = cg;
1423 return (arch_code_generator_t *)cg;
1428 /*****************************************************************
1429 * ____ _ _ _____ _____
1430 * | _ \ | | | | |_ _|/ ____| /\
1431 * | |_) | __ _ ___| | _____ _ __ __| | | | | (___ / \
1432 * | _ < / _` |/ __| |/ / _ \ '_ \ / _` | | | \___ \ / /\ \
1433 * | |_) | (_| | (__| < __/ | | | (_| | _| |_ ____) / ____ \
1434 * |____/ \__,_|\___|_|\_\___|_| |_|\__,_| |_____|_____/_/ \_\
1436 *****************************************************************/
1439 * Set output modes for GCC
1441 static const tarval_mode_info mo_integer = {
1448 * set the tarval output mode of all integer modes to decimal
1450 static void set_tarval_output_modes(void)
1454 for (i = get_irp_n_modes() - 1; i >= 0; --i) {
1455 ir_mode *mode = get_irp_mode(i);
1457 if (mode_is_int(mode))
1458 set_tarval_mode_output_option(mode, &mo_integer);
1462 const arch_isa_if_t ia32_isa_if;
1465 * The template that generates a new ISA object.
1466 * Note that this template can be changed by command line
1469 static ia32_isa_t ia32_isa_template = {
1471 &ia32_isa_if, /* isa interface implementation */
1472 &ia32_gp_regs[REG_ESP], /* stack pointer register */
1473 &ia32_gp_regs[REG_EBP], /* base pointer register */
1474 -1, /* stack direction */
1475 NULL, /* main environment */
1477 { NULL, }, /* emitter environment */
1478 NULL, /* 16bit register names */
1479 NULL, /* 8bit register names */
1483 IA32_OPT_INCDEC | /* optimize add 1, sub 1 into inc/dec default: on */
1484 IA32_OPT_DOAM | /* optimize address mode default: on */
1485 IA32_OPT_LEA | /* optimize for LEAs default: on */
1486 IA32_OPT_PLACECNST | /* place constants immediately before instructions, default: on */
1487 IA32_OPT_IMMOPS | /* operations can use immediates, default: on */
1488 IA32_OPT_PUSHARGS), /* create pushs for function argument passing, default: on */
1489 arch_pentium_4, /* instruction architecture */
1490 arch_pentium_4, /* optimize for architecture */
1491 fp_sse2, /* use sse2 unit */
1492 NULL, /* current code generator */
1494 NULL, /* name obstack */
1495 0 /* name obst size */
1500 * Initializes the backend ISA.
1502 static void *ia32_init(FILE *file_handle) {
1503 static int inited = 0;
1510 set_tarval_output_modes();
1512 isa = xmalloc(sizeof(*isa));
1513 memcpy(isa, &ia32_isa_template, sizeof(*isa));
1515 if(mode_fpcw == NULL) {
1516 mode_fpcw = new_ir_mode("Fpcw", irms_int_number, 16, 0, irma_none, 0);
1519 ia32_register_init(isa);
1520 ia32_create_opcodes();
1521 ia32_register_copy_attr_func();
1523 if ((ARCH_INTEL(isa->arch) && isa->arch < arch_pentium_4) ||
1524 (ARCH_AMD(isa->arch) && isa->arch < arch_athlon))
1525 /* no SSE2 for these cpu's */
1526 isa->fp_kind = fp_x87;
1528 if (ARCH_INTEL(isa->opt_arch) && isa->opt_arch >= arch_pentium_4) {
1529 /* Pentium 4 don't like inc and dec instructions */
1530 isa->opt &= ~IA32_OPT_INCDEC;
1533 be_emit_init_env(&isa->emit, file_handle);
1534 isa->regs_16bit = pmap_create();
1535 isa->regs_8bit = pmap_create();
1536 isa->types = pmap_create();
1537 isa->tv_ent = pmap_create();
1538 isa->cpu = ia32_init_machine_description();
1540 ia32_build_16bit_reg_map(isa->regs_16bit);
1541 ia32_build_8bit_reg_map(isa->regs_8bit);
1544 isa->name_obst = xmalloc(sizeof(*isa->name_obst));
1545 obstack_init(isa->name_obst);
1548 ia32_handle_intrinsics();
1550 /* needed for the debug support */
1551 be_gas_emit_switch_section(&isa->emit, GAS_SECTION_TEXT);
1552 be_emit_cstring(&isa->emit, ".Ltext0:\n");
1553 be_emit_write_line(&isa->emit);
1555 /* we mark referenced global entities, so we can only emit those which
1556 * are actually referenced. (Note: you mustn't use the type visited flag
1557 * elsewhere in the backend)
1559 inc_master_type_visited();
1567 * Closes the output file and frees the ISA structure.
1569 static void ia32_done(void *self) {
1570 ia32_isa_t *isa = self;
1572 /* emit now all global declarations */
1573 be_gas_emit_decls(&isa->emit, isa->arch_isa.main_env, 1);
1575 pmap_destroy(isa->regs_16bit);
1576 pmap_destroy(isa->regs_8bit);
1577 pmap_destroy(isa->tv_ent);
1578 pmap_destroy(isa->types);
1581 obstack_free(isa->name_obst, NULL);
1584 be_emit_destroy_env(&isa->emit);
1591 * Return the number of register classes for this architecture.
1592 * We report always these:
1593 * - the general purpose registers
1594 * - the SSE floating point register set
1595 * - the virtual floating point registers
1596 * - the SSE vector register set
1598 static int ia32_get_n_reg_class(const void *self) {
1603 * Return the register class for index i.
1605 static const arch_register_class_t *ia32_get_reg_class(const void *self, int i)
1607 assert(i >= 0 && i < N_CLASSES);
1608 return &ia32_reg_classes[i];
1612 * Get the register class which shall be used to store a value of a given mode.
1613 * @param self The this pointer.
1614 * @param mode The mode in question.
1615 * @return A register class which can hold values of the given mode.
1617 const arch_register_class_t *ia32_get_reg_class_for_mode(const void *self, const ir_mode *mode) {
1618 const ia32_isa_t *isa = self;
1619 if (mode_is_float(mode)) {
1620 return USE_SSE2(isa) ? &ia32_reg_classes[CLASS_ia32_xmm] : &ia32_reg_classes[CLASS_ia32_vfp];
1623 return &ia32_reg_classes[CLASS_ia32_gp];
1627 * Get the ABI restrictions for procedure calls.
1628 * @param self The this pointer.
1629 * @param method_type The type of the method (procedure) in question.
1630 * @param abi The abi object to be modified
1632 static void ia32_get_call_abi(const void *self, ir_type *method_type, be_abi_call_t *abi) {
1633 const ia32_isa_t *isa = self;
1636 unsigned cc = get_method_calling_convention(method_type);
1637 int n = get_method_n_params(method_type);
1640 int i, ignore_1, ignore_2;
1642 const arch_register_t *reg;
1643 be_abi_call_flags_t call_flags = be_abi_call_get_flags(abi);
1645 unsigned use_push = !IS_P6_ARCH(isa->opt_arch);
1647 /* set abi flags for calls */
1648 call_flags.bits.left_to_right = 0; /* always last arg first on stack */
1649 call_flags.bits.store_args_sequential = use_push;
1650 /* call_flags.bits.try_omit_fp not changed: can handle both settings */
1651 call_flags.bits.fp_free = 0; /* the frame pointer is fixed in IA32 */
1652 call_flags.bits.call_has_imm = 1; /* IA32 calls can have immediate address */
1654 /* set stack parameter passing style */
1655 be_abi_call_set_flags(abi, call_flags, &ia32_abi_callbacks);
1657 /* collect the mode for each type */
1658 modes = alloca(n * sizeof(modes[0]));
1660 for (i = 0; i < n; i++) {
1661 tp = get_method_param_type(method_type, i);
1662 modes[i] = get_type_mode(tp);
1665 /* set register parameters */
1666 if (cc & cc_reg_param) {
1667 /* determine the number of parameters passed via registers */
1668 biggest_n = ia32_get_n_regparam_class(n, modes, &ignore_1, &ignore_2);
1670 /* loop over all parameters and set the register requirements */
1671 for (i = 0; i <= biggest_n; i++) {
1672 reg = ia32_get_RegParam_reg(n, modes, i, cc);
1673 assert(reg && "kaputt");
1674 be_abi_call_param_reg(abi, i, reg);
1681 /* set stack parameters */
1682 for (i = stack_idx; i < n; i++) {
1683 /* parameters on the stack are 32 bit aligned */
1684 be_abi_call_param_stack(abi, i, 4, 0, 0);
1688 /* set return registers */
1689 n = get_method_n_ress(method_type);
1691 assert(n <= 2 && "more than two results not supported");
1693 /* In case of 64bit returns, we will have two 32bit values */
1695 tp = get_method_res_type(method_type, 0);
1696 mode = get_type_mode(tp);
1698 assert(!mode_is_float(mode) && "two FP results not supported");
1700 tp = get_method_res_type(method_type, 1);
1701 mode = get_type_mode(tp);
1703 assert(!mode_is_float(mode) && "mixed INT, FP results not supported");
1705 be_abi_call_res_reg(abi, 0, &ia32_gp_regs[REG_EAX]);
1706 be_abi_call_res_reg(abi, 1, &ia32_gp_regs[REG_EDX]);
1709 const arch_register_t *reg;
1711 tp = get_method_res_type(method_type, 0);
1712 assert(is_atomic_type(tp));
1713 mode = get_type_mode(tp);
1715 reg = mode_is_float(mode) ? &ia32_vfp_regs[REG_VF0] : &ia32_gp_regs[REG_EAX];
1717 be_abi_call_res_reg(abi, 0, reg);
1722 static const void *ia32_get_irn_ops(const arch_irn_handler_t *self, const ir_node *irn) {
1723 return &ia32_irn_ops;
1726 const arch_irn_handler_t ia32_irn_handler = {
1730 const arch_irn_handler_t *ia32_get_irn_handler(const void *self) {
1731 return &ia32_irn_handler;
1734 int ia32_to_appear_in_schedule(void *block_env, const ir_node *irn) {
1735 if(!is_ia32_irn(irn))
1738 if(is_ia32_NoReg_GP(irn) || is_ia32_NoReg_VFP(irn) || is_ia32_NoReg_XMM(irn)
1739 || is_ia32_Unknown_GP(irn) || is_ia32_Unknown_XMM(irn)
1740 || is_ia32_Unknown_VFP(irn) || is_ia32_ChangeCW(irn))
1747 * Initializes the code generator interface.
1749 static const arch_code_generator_if_t *ia32_get_code_generator_if(void *self) {
1750 return &ia32_code_gen_if;
1754 * Returns the estimated execution time of an ia32 irn.
1756 static sched_timestep_t ia32_sched_exectime(void *env, const ir_node *irn) {
1757 const arch_env_t *arch_env = env;
1758 return is_ia32_irn(irn) ? ia32_get_op_estimated_cost(arch_get_irn_ops(arch_env, irn), irn) : 1;
1761 list_sched_selector_t ia32_sched_selector;
1764 * Returns the reg_pressure scheduler with to_appear_in_schedule() overloaded
1766 static const list_sched_selector_t *ia32_get_list_sched_selector(const void *self, list_sched_selector_t *selector) {
1767 memcpy(&ia32_sched_selector, selector, sizeof(ia32_sched_selector));
1768 ia32_sched_selector.exectime = ia32_sched_exectime;
1769 ia32_sched_selector.to_appear_in_schedule = ia32_to_appear_in_schedule;
1770 return &ia32_sched_selector;
1773 static const ilp_sched_selector_t *ia32_get_ilp_sched_selector(const void *self) {
1778 * Returns the necessary byte alignment for storing a register of given class.
1780 static int ia32_get_reg_class_alignment(const void *self, const arch_register_class_t *cls) {
1781 ir_mode *mode = arch_register_class_mode(cls);
1782 int bytes = get_mode_size_bytes(mode);
1784 if (mode_is_float(mode) && bytes > 8)
1789 static const be_execution_unit_t ***ia32_get_allowed_execution_units(const void *self, const ir_node *irn) {
1790 static const be_execution_unit_t *_allowed_units_BRANCH[] = {
1791 &ia32_execution_units_BRANCH[IA32_EXECUNIT_TP_BRANCH_BRANCH1],
1792 &ia32_execution_units_BRANCH[IA32_EXECUNIT_TP_BRANCH_BRANCH2],
1795 static const be_execution_unit_t *_allowed_units_GP[] = {
1796 &ia32_execution_units_GP[IA32_EXECUNIT_TP_GP_GP_EAX],
1797 &ia32_execution_units_GP[IA32_EXECUNIT_TP_GP_GP_EBX],
1798 &ia32_execution_units_GP[IA32_EXECUNIT_TP_GP_GP_ECX],
1799 &ia32_execution_units_GP[IA32_EXECUNIT_TP_GP_GP_EDX],
1800 &ia32_execution_units_GP[IA32_EXECUNIT_TP_GP_GP_ESI],
1801 &ia32_execution_units_GP[IA32_EXECUNIT_TP_GP_GP_EDI],
1802 &ia32_execution_units_GP[IA32_EXECUNIT_TP_GP_GP_EBP],
1805 static const be_execution_unit_t *_allowed_units_DUMMY[] = {
1806 &be_machine_execution_units_DUMMY[0],
1809 static const be_execution_unit_t **_units_callret[] = {
1810 _allowed_units_BRANCH,
1813 static const be_execution_unit_t **_units_other[] = {
1817 static const be_execution_unit_t **_units_dummy[] = {
1818 _allowed_units_DUMMY,
1821 const be_execution_unit_t ***ret;
1823 if (is_ia32_irn(irn)) {
1824 ret = get_ia32_exec_units(irn);
1826 else if (is_be_node(irn)) {
1827 if (be_is_Call(irn) || be_is_Return(irn)) {
1828 ret = _units_callret;
1830 else if (be_is_Barrier(irn)) {
1845 * Return the abstract ia32 machine.
1847 static const be_machine_t *ia32_get_machine(const void *self) {
1848 const ia32_isa_t *isa = self;
1853 * Return irp irgs in the desired order.
1855 static ir_graph **ia32_get_irg_list(const void *self, ir_graph ***irg_list) {
1860 * Allows or disallows the creation of Psi nodes for the given Phi nodes.
1861 * @return 1 if allowed, 0 otherwise
1863 static int ia32_is_psi_allowed(ir_node *sel, ir_node *phi_list, int i, int j)
1865 ir_node *cmp, *cmp_a, *phi;
1868 /* we don't want long long an floating point Psi */
1869 #define IS_BAD_PSI_MODE(mode) (mode_is_float(mode) || get_mode_size_bits(mode) > 32)
1871 if (get_irn_mode(sel) != mode_b)
1874 cmp = get_Proj_pred(sel);
1875 cmp_a = get_Cmp_left(cmp);
1876 mode = get_irn_mode(cmp_a);
1878 if (IS_BAD_PSI_MODE(mode))
1881 /* check the Phi nodes */
1882 for (phi = phi_list; phi; phi = get_irn_link(phi)) {
1883 ir_node *pred_i = get_irn_n(phi, i);
1884 ir_node *pred_j = get_irn_n(phi, j);
1885 ir_mode *mode_i = get_irn_mode(pred_i);
1886 ir_mode *mode_j = get_irn_mode(pred_j);
1888 if (IS_BAD_PSI_MODE(mode_i) || IS_BAD_PSI_MODE(mode_j))
1892 #undef IS_BAD_PSI_MODE
1897 static ia32_intrinsic_env_t intrinsic_env = {
1898 NULL, /**< the irg, these entities belong to */
1899 NULL, /**< entity for first div operand (move into FPU) */
1900 NULL, /**< entity for second div operand (move into FPU) */
1901 NULL, /**< entity for converts ll -> d */
1902 NULL, /**< entity for converts d -> ll */
1906 * Returns the libFirm configuration parameter for this backend.
1908 static const backend_params *ia32_get_libfirm_params(void) {
1909 static const opt_if_conv_info_t ifconv = {
1910 4, /* maxdepth, doesn't matter for Psi-conversion */
1911 ia32_is_psi_allowed /* allows or disallows Psi creation for given selector */
1913 static const arch_dep_params_t ad = {
1914 1, /* also use subs */
1915 4, /* maximum shifts */
1916 31, /* maximum shift amount */
1918 1, /* allow Mulhs */
1919 1, /* allow Mulus */
1920 32 /* Mulh allowed up to 32 bit */
1922 static backend_params p = {
1923 NULL, /* no additional opcodes */
1924 NULL, /* will be set later */
1925 1, /* need dword lowering */
1926 ia32_create_intrinsic_fkt,
1927 &intrinsic_env, /* context for ia32_create_intrinsic_fkt */
1928 NULL, /* will be set later */
1932 p.if_conv_info = &ifconv;
1936 /* instruction set architectures. */
1937 static const lc_opt_enum_int_items_t arch_items[] = {
1938 { "386", arch_i386, },
1939 { "486", arch_i486, },
1940 { "pentium", arch_pentium, },
1941 { "586", arch_pentium, },
1942 { "pentiumpro", arch_pentium_pro, },
1943 { "686", arch_pentium_pro, },
1944 { "pentiummmx", arch_pentium_mmx, },
1945 { "pentium2", arch_pentium_2, },
1946 { "p2", arch_pentium_2, },
1947 { "pentium3", arch_pentium_3, },
1948 { "p3", arch_pentium_3, },
1949 { "pentium4", arch_pentium_4, },
1950 { "p4", arch_pentium_4, },
1951 { "pentiumm", arch_pentium_m, },
1952 { "pm", arch_pentium_m, },
1953 { "core", arch_core, },
1955 { "athlon", arch_athlon, },
1956 { "athlon64", arch_athlon_64, },
1957 { "opteron", arch_opteron, },
1961 static lc_opt_enum_int_var_t arch_var = {
1962 &ia32_isa_template.arch, arch_items
1965 static lc_opt_enum_int_var_t opt_arch_var = {
1966 &ia32_isa_template.opt_arch, arch_items
1969 static const lc_opt_enum_int_items_t fp_unit_items[] = {
1971 { "sse2", fp_sse2 },
1975 static lc_opt_enum_int_var_t fp_unit_var = {
1976 &ia32_isa_template.fp_kind, fp_unit_items
1979 static const lc_opt_enum_int_items_t gas_items[] = {
1980 { "normal", GAS_FLAVOUR_NORMAL },
1981 { "mingw", GAS_FLAVOUR_MINGW },
1985 static lc_opt_enum_int_var_t gas_var = {
1986 (int*) &be_gas_flavour, gas_items
1989 static const lc_opt_table_entry_t ia32_options[] = {
1990 LC_OPT_ENT_ENUM_INT("arch", "select the instruction architecture", &arch_var),
1991 LC_OPT_ENT_ENUM_INT("opt", "optimize for instruction architecture", &opt_arch_var),
1992 LC_OPT_ENT_ENUM_INT("fpunit", "select the floating point unit", &fp_unit_var),
1993 LC_OPT_ENT_NEGBIT("noaddrmode", "do not use address mode", &ia32_isa_template.opt, IA32_OPT_DOAM),
1994 LC_OPT_ENT_NEGBIT("nolea", "do not optimize for LEAs", &ia32_isa_template.opt, IA32_OPT_LEA),
1995 LC_OPT_ENT_NEGBIT("noplacecnst", "do not place constants", &ia32_isa_template.opt, IA32_OPT_PLACECNST),
1996 LC_OPT_ENT_NEGBIT("noimmop", "no operations with immediates", &ia32_isa_template.opt, IA32_OPT_IMMOPS),
1997 LC_OPT_ENT_NEGBIT("nopushargs", "do not create pushs for function arguments", &ia32_isa_template.opt, IA32_OPT_PUSHARGS),
1998 LC_OPT_ENT_ENUM_INT("gasmode", "set the GAS compatibility mode", &gas_var),
2002 const arch_isa_if_t ia32_isa_if = {
2005 ia32_get_n_reg_class,
2007 ia32_get_reg_class_for_mode,
2009 ia32_get_irn_handler,
2010 ia32_get_code_generator_if,
2011 ia32_get_list_sched_selector,
2012 ia32_get_ilp_sched_selector,
2013 ia32_get_reg_class_alignment,
2014 ia32_get_libfirm_params,
2015 ia32_get_allowed_execution_units,
2020 void ia32_init_emitter(void);
2021 void ia32_init_finish(void);
2022 void ia32_init_optimize(void);
2023 void ia32_init_transform(void);
2024 void ia32_init_x87(void);
2026 void be_init_arch_ia32(void)
2028 lc_opt_entry_t *be_grp = lc_opt_get_grp(firm_opt_get_root(), "be");
2029 lc_opt_entry_t *ia32_grp = lc_opt_get_grp(be_grp, "ia32");
2031 lc_opt_add_table(ia32_grp, ia32_options);
2032 be_register_isa_if("ia32", &ia32_isa_if);
2034 FIRM_DBG_REGISTER(dbg, "firm.be.ia32.cg");
2036 ia32_init_emitter();
2038 ia32_init_optimize();
2039 ia32_init_transform();
2043 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_arch_ia32);