2 * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief This is the main ia32 firm backend driver.
23 * @author Christian Wuerdig
30 #include <libcore/lc_opts.h>
31 #include <libcore/lc_opts_enum.h>
35 #include "pseudo_irg.h"
39 #include "iredges_t.h"
52 #include "../beirg_t.h"
53 #include "../benode_t.h"
54 #include "../belower.h"
55 #include "../besched_t.h"
58 #include "../beirgmod.h"
59 #include "../be_dbgout.h"
60 #include "../beblocksched.h"
61 #include "../bemachine.h"
62 #include "../beilpsched.h"
63 #include "../bespillslots.h"
64 #include "../bemodule.h"
65 #include "../begnuas.h"
66 #include "../bestate.h"
68 #include "bearch_ia32_t.h"
70 #include "ia32_new_nodes.h"
71 #include "gen_ia32_regalloc_if.h"
72 #include "gen_ia32_machine.h"
73 #include "ia32_transform.h"
74 #include "ia32_emitter.h"
75 #include "ia32_map_regs.h"
76 #include "ia32_optimize.h"
78 #include "ia32_dbg_stat.h"
79 #include "ia32_finish.h"
80 #include "ia32_util.h"
83 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
86 static set *cur_reg_set = NULL;
88 ir_mode *mode_fpcw = NULL;
90 typedef ir_node *(*create_const_node_func) (dbg_info *dbg, ir_graph *irg, ir_node *block);
92 static INLINE ir_node *create_const(ia32_code_gen_t *cg, ir_node **place,
93 create_const_node_func func,
94 const arch_register_t* reg)
101 block = get_irg_start_block(cg->irg);
102 res = func(NULL, cg->irg, block);
103 arch_set_irn_register(cg->arch_env, res, reg);
107 /* keep the node so it isn't accidently removed when unused ... */
109 keep = be_new_Keep(arch_register_get_class(reg), cg->irg, block, 1, in);
111 add_irn_dep(get_irg_end(cg->irg), res);
112 /* add_irn_dep(get_irg_start(cg->irg), res); */
117 /* Creates the unique per irg GP NoReg node. */
118 ir_node *ia32_new_NoReg_gp(ia32_code_gen_t *cg) {
119 return create_const(cg, &cg->noreg_gp, new_rd_ia32_NoReg_GP,
120 &ia32_gp_regs[REG_GP_NOREG]);
123 ir_node *ia32_new_NoReg_vfp(ia32_code_gen_t *cg) {
124 return create_const(cg, &cg->noreg_vfp, new_rd_ia32_NoReg_VFP,
125 &ia32_vfp_regs[REG_VFP_NOREG]);
128 ir_node *ia32_new_NoReg_xmm(ia32_code_gen_t *cg) {
129 return create_const(cg, &cg->noreg_xmm, new_rd_ia32_NoReg_XMM,
130 &ia32_xmm_regs[REG_XMM_NOREG]);
133 /* Creates the unique per irg FP NoReg node. */
134 ir_node *ia32_new_NoReg_fp(ia32_code_gen_t *cg) {
135 return USE_SSE2(cg) ? ia32_new_NoReg_xmm(cg) : ia32_new_NoReg_vfp(cg);
138 ir_node *ia32_new_Unknown_gp(ia32_code_gen_t *cg) {
139 return create_const(cg, &cg->unknown_gp, new_rd_ia32_Unknown_GP,
140 &ia32_gp_regs[REG_GP_UKNWN]);
143 ir_node *ia32_new_Unknown_vfp(ia32_code_gen_t *cg) {
144 return create_const(cg, &cg->unknown_vfp, new_rd_ia32_Unknown_VFP,
145 &ia32_vfp_regs[REG_VFP_UKNWN]);
148 ir_node *ia32_new_Unknown_xmm(ia32_code_gen_t *cg) {
149 return create_const(cg, &cg->unknown_xmm, new_rd_ia32_Unknown_XMM,
150 &ia32_xmm_regs[REG_XMM_UKNWN]);
153 ir_node *ia32_new_Fpu_truncate(ia32_code_gen_t *cg) {
154 return create_const(cg, &cg->fpu_trunc_mode, new_rd_ia32_ChangeCW,
155 &ia32_fp_cw_regs[REG_FPCW]);
160 * Returns gp_noreg or fp_noreg, depending in input requirements.
162 ir_node *ia32_get_admissible_noreg(ia32_code_gen_t *cg, ir_node *irn, int pos) {
163 const arch_register_req_t *req;
165 req = arch_get_register_req(cg->arch_env, irn, pos);
166 assert(req != NULL && "Missing register requirements");
167 if (req->cls == &ia32_reg_classes[CLASS_ia32_gp])
168 return ia32_new_NoReg_gp(cg);
170 return ia32_new_NoReg_fp(cg);
173 /**************************************************
176 * _ __ ___ __ _ __ _| | | ___ ___ _| |_
177 * | '__/ _ \/ _` | / _` | | |/ _ \ / __| | | _|
178 * | | | __/ (_| | | (_| | | | (_) | (__ | | |
179 * |_| \___|\__, | \__,_|_|_|\___/ \___| |_|_|
182 **************************************************/
185 * Return register requirements for an ia32 node.
186 * If the node returns a tuple (mode_T) then the proj's
187 * will be asked for this information.
189 static const arch_register_req_t *ia32_get_irn_reg_req(const void *self,
192 long node_pos = pos == -1 ? 0 : pos;
193 ir_mode *mode = is_Block(node) ? NULL : get_irn_mode(node);
195 if (is_Block(node) || mode == mode_X) {
196 return arch_no_register_req;
199 if (mode == mode_T && pos < 0) {
200 return arch_no_register_req;
205 return arch_no_register_req;
208 return arch_no_register_req;
211 node_pos = (pos == -1) ? get_Proj_proj(node) : pos;
212 node = skip_Proj_const(node);
215 if (is_ia32_irn(node)) {
216 const arch_register_req_t *req;
218 req = get_ia32_in_req(node, pos);
220 req = get_ia32_out_req(node, node_pos);
227 /* unknowns should be transformed already */
228 assert(!is_Unknown(node));
230 return arch_no_register_req;
233 static void ia32_set_irn_reg(const void *self, ir_node *irn, const arch_register_t *reg) {
236 if (get_irn_mode(irn) == mode_X) {
241 pos = get_Proj_proj(irn);
242 irn = skip_Proj(irn);
245 if (is_ia32_irn(irn)) {
246 const arch_register_t **slots;
248 slots = get_ia32_slots(irn);
251 ia32_set_firm_reg(irn, reg, cur_reg_set);
255 static const arch_register_t *ia32_get_irn_reg(const void *self, const ir_node *irn) {
257 const arch_register_t *reg = NULL;
261 if (get_irn_mode(irn) == mode_X) {
265 pos = get_Proj_proj(irn);
266 irn = skip_Proj_const(irn);
269 if (is_ia32_irn(irn)) {
270 const arch_register_t **slots;
271 slots = get_ia32_slots(irn);
274 reg = ia32_get_firm_reg(irn, cur_reg_set);
280 static arch_irn_class_t ia32_classify(const void *self, const ir_node *irn) {
281 arch_irn_class_t classification = arch_irn_class_normal;
283 irn = skip_Proj_const(irn);
286 classification |= arch_irn_class_branch;
288 if (! is_ia32_irn(irn))
289 return classification & ~arch_irn_class_normal;
291 if (is_ia32_Cnst(irn))
292 classification |= arch_irn_class_const;
295 classification |= arch_irn_class_load;
297 if (is_ia32_St(irn) || is_ia32_Store8Bit(irn))
298 classification |= arch_irn_class_store;
300 if (is_ia32_need_stackent(irn))
301 classification |= arch_irn_class_reload;
303 return classification;
306 static arch_irn_flags_t ia32_get_flags(const void *self, const ir_node *irn) {
307 arch_irn_flags_t flags = arch_irn_flags_none;
310 return arch_irn_flags_ignore;
312 if(is_Proj(irn) && mode_is_datab(get_irn_mode(irn))) {
313 ir_node *pred = get_Proj_pred(irn);
315 if(is_ia32_irn(pred)) {
316 flags = get_ia32_out_flags(pred, get_Proj_proj(irn));
322 if (is_ia32_irn(irn)) {
323 flags |= get_ia32_flags(irn);
330 * The IA32 ABI callback object.
333 be_abi_call_flags_bits_t flags; /**< The call flags. */
334 const arch_isa_t *isa; /**< The ISA handle. */
335 const arch_env_t *aenv; /**< The architecture environment. */
336 ir_graph *irg; /**< The associated graph. */
339 static ir_entity *ia32_get_frame_entity(const void *self, const ir_node *irn) {
340 return is_ia32_irn(irn) ? get_ia32_frame_ent(irn) : NULL;
343 static void ia32_set_frame_entity(const void *self, ir_node *irn, ir_entity *ent) {
344 set_ia32_frame_ent(irn, ent);
347 static void ia32_set_frame_offset(const void *self, ir_node *irn, int bias) {
348 const ia32_irn_ops_t *ops = self;
350 if (get_ia32_frame_ent(irn)) {
351 ia32_am_flavour_t am_flav;
353 if (is_ia32_Pop(irn)) {
354 int omit_fp = be_abi_omit_fp(ops->cg->birg->abi);
356 /* Pop nodes modify the stack pointer before calculating the destination
357 * address, so fix this here
363 am_flav = get_ia32_am_flavour(irn);
365 set_ia32_am_flavour(irn, am_flav);
367 add_ia32_am_offs_int(irn, bias);
371 static int ia32_get_sp_bias(const void *self, const ir_node *irn) {
373 long proj = get_Proj_proj(irn);
374 ir_node *pred = get_Proj_pred(irn);
376 if (is_ia32_Push(pred) && proj == pn_ia32_Push_stack)
378 if (is_ia32_Pop(pred) && proj == pn_ia32_Pop_stack)
386 * Put all registers which are saved by the prologue/epilogue in a set.
388 * @param self The callback object.
389 * @param s The result set.
391 static void ia32_abi_dont_save_regs(void *self, pset *s)
393 ia32_abi_env_t *env = self;
394 if(env->flags.try_omit_fp)
395 pset_insert_ptr(s, env->isa->bp);
399 * Generate the routine prologue.
401 * @param self The callback object.
402 * @param mem A pointer to the mem node. Update this if you define new memory.
403 * @param reg_map A map mapping all callee_save/ignore/parameter registers to their defining nodes.
405 * @return The register which shall be used as a stack frame base.
407 * All nodes which define registers in @p reg_map must keep @p reg_map current.
409 static const arch_register_t *ia32_abi_prologue(void *self, ir_node **mem, pmap *reg_map)
411 ia32_abi_env_t *env = self;
412 const ia32_isa_t *isa = (ia32_isa_t *)env->isa;
413 ia32_code_gen_t *cg = isa->cg;
415 if (! env->flags.try_omit_fp) {
416 ir_node *bl = get_irg_start_block(env->irg);
417 ir_node *curr_sp = be_abi_reg_map_get(reg_map, env->isa->sp);
418 ir_node *curr_bp = be_abi_reg_map_get(reg_map, env->isa->bp);
419 ir_node *noreg = ia32_new_NoReg_gp(cg);
423 push = new_rd_ia32_Push(NULL, env->irg, bl, noreg, noreg, curr_bp, curr_sp, *mem);
424 curr_sp = new_r_Proj(env->irg, bl, push, get_irn_mode(curr_sp), pn_ia32_Push_stack);
425 *mem = new_r_Proj(env->irg, bl, push, mode_M, pn_ia32_Push_M);
427 /* the push must have SP out register */
428 arch_set_irn_register(env->aenv, curr_sp, env->isa->sp);
429 set_ia32_flags(push, arch_irn_flags_ignore);
431 /* move esp to ebp */
432 curr_bp = be_new_Copy(env->isa->bp->reg_class, env->irg, bl, curr_sp);
433 be_set_constr_single_reg(curr_bp, BE_OUT_POS(0), env->isa->bp);
434 arch_set_irn_register(env->aenv, curr_bp, env->isa->bp);
435 be_node_set_flags(curr_bp, BE_OUT_POS(0), arch_irn_flags_ignore);
437 /* beware: the copy must be done before any other sp use */
438 curr_sp = be_new_CopyKeep_single(env->isa->sp->reg_class, env->irg, bl, curr_sp, curr_bp, get_irn_mode(curr_sp));
439 be_set_constr_single_reg(curr_sp, BE_OUT_POS(0), env->isa->sp);
440 arch_set_irn_register(env->aenv, curr_sp, env->isa->sp);
441 be_node_set_flags(curr_sp, BE_OUT_POS(0), arch_irn_flags_ignore);
443 be_abi_reg_map_set(reg_map, env->isa->sp, curr_sp);
444 be_abi_reg_map_set(reg_map, env->isa->bp, curr_bp);
453 * Generate the routine epilogue.
454 * @param self The callback object.
455 * @param bl The block for the epilog
456 * @param mem A pointer to the mem node. Update this if you define new memory.
457 * @param reg_map A map mapping all callee_save/ignore/parameter registers to their defining nodes.
458 * @return The register which shall be used as a stack frame base.
460 * All nodes which define registers in @p reg_map must keep @p reg_map current.
462 static void ia32_abi_epilogue(void *self, ir_node *bl, ir_node **mem, pmap *reg_map)
464 ia32_abi_env_t *env = self;
465 ir_node *curr_sp = be_abi_reg_map_get(reg_map, env->isa->sp);
466 ir_node *curr_bp = be_abi_reg_map_get(reg_map, env->isa->bp);
468 if (env->flags.try_omit_fp) {
469 /* simply remove the stack frame here */
470 curr_sp = be_new_IncSP(env->isa->sp, env->irg, bl, curr_sp, BE_STACK_FRAME_SIZE_SHRINK);
471 add_irn_dep(curr_sp, *mem);
473 const ia32_isa_t *isa = (ia32_isa_t *)env->isa;
474 ia32_code_gen_t *cg = isa->cg;
475 ir_mode *mode_bp = env->isa->bp->reg_class->mode;
477 /* gcc always emits a leave at the end of a routine */
478 if (1 || ARCH_AMD(isa->opt_arch)) {
482 leave = new_rd_ia32_Leave(NULL, env->irg, bl, curr_sp, curr_bp);
483 set_ia32_flags(leave, arch_irn_flags_ignore);
484 curr_bp = new_r_Proj(current_ir_graph, bl, leave, mode_bp, pn_ia32_Leave_frame);
485 curr_sp = new_r_Proj(current_ir_graph, bl, leave, get_irn_mode(curr_sp), pn_ia32_Leave_stack);
487 ir_node *noreg = ia32_new_NoReg_gp(cg);
490 /* copy ebp to esp */
491 curr_sp = be_new_SetSP(env->isa->sp, env->irg, bl, curr_sp, curr_bp, *mem);
494 pop = new_rd_ia32_Pop(NULL, env->irg, bl, noreg, noreg, curr_sp, *mem);
495 set_ia32_flags(pop, arch_irn_flags_ignore);
496 curr_bp = new_r_Proj(current_ir_graph, bl, pop, mode_bp, pn_ia32_Pop_res);
497 curr_sp = new_r_Proj(current_ir_graph, bl, pop, get_irn_mode(curr_sp), pn_ia32_Pop_stack);
499 *mem = new_r_Proj(current_ir_graph, bl, pop, mode_M, pn_ia32_Pop_M);
501 arch_set_irn_register(env->aenv, curr_sp, env->isa->sp);
502 arch_set_irn_register(env->aenv, curr_bp, env->isa->bp);
505 be_abi_reg_map_set(reg_map, env->isa->sp, curr_sp);
506 be_abi_reg_map_set(reg_map, env->isa->bp, curr_bp);
510 * Initialize the callback object.
511 * @param call The call object.
512 * @param aenv The architecture environment.
513 * @param irg The graph with the method.
514 * @return Some pointer. This pointer is passed to all other callback functions as self object.
516 static void *ia32_abi_init(const be_abi_call_t *call, const arch_env_t *aenv, ir_graph *irg)
518 ia32_abi_env_t *env = xmalloc(sizeof(env[0]));
519 be_abi_call_flags_t fl = be_abi_call_get_flags(call);
520 env->flags = fl.bits;
523 env->isa = aenv->isa;
528 * Destroy the callback object.
529 * @param self The callback object.
531 static void ia32_abi_done(void *self) {
536 * Produces the type which sits between the stack args and the locals on the stack.
537 * it will contain the return address and space to store the old base pointer.
538 * @return The Firm type modeling the ABI between type.
540 static ir_type *ia32_abi_get_between_type(void *self)
542 #define IDENT(s) new_id_from_chars(s, sizeof(s)-1)
543 static ir_type *omit_fp_between_type = NULL;
544 static ir_type *between_type = NULL;
546 ia32_abi_env_t *env = self;
548 if (! between_type) {
549 ir_entity *old_bp_ent;
550 ir_entity *ret_addr_ent;
551 ir_entity *omit_fp_ret_addr_ent;
553 ir_type *old_bp_type = new_type_primitive(IDENT("bp"), mode_Iu);
554 ir_type *ret_addr_type = new_type_primitive(IDENT("return_addr"), mode_Iu);
556 between_type = new_type_struct(IDENT("ia32_between_type"));
557 old_bp_ent = new_entity(between_type, IDENT("old_bp"), old_bp_type);
558 ret_addr_ent = new_entity(between_type, IDENT("ret_addr"), ret_addr_type);
560 set_entity_offset(old_bp_ent, 0);
561 set_entity_offset(ret_addr_ent, get_type_size_bytes(old_bp_type));
562 set_type_size_bytes(between_type, get_type_size_bytes(old_bp_type) + get_type_size_bytes(ret_addr_type));
563 set_type_state(between_type, layout_fixed);
565 omit_fp_between_type = new_type_struct(IDENT("ia32_between_type_omit_fp"));
566 omit_fp_ret_addr_ent = new_entity(omit_fp_between_type, IDENT("ret_addr"), ret_addr_type);
568 set_entity_offset(omit_fp_ret_addr_ent, 0);
569 set_type_size_bytes(omit_fp_between_type, get_type_size_bytes(ret_addr_type));
570 set_type_state(omit_fp_between_type, layout_fixed);
573 return env->flags.try_omit_fp ? omit_fp_between_type : between_type;
578 * Get the estimated cycle count for @p irn.
580 * @param self The this pointer.
581 * @param irn The node.
583 * @return The estimated cycle count for this operation
585 static int ia32_get_op_estimated_cost(const void *self, const ir_node *irn)
588 ia32_op_type_t op_tp;
589 const ia32_irn_ops_t *ops = self;
593 if (!is_ia32_irn(irn))
596 assert(is_ia32_irn(irn));
598 cost = get_ia32_latency(irn);
599 op_tp = get_ia32_op_type(irn);
601 if (is_ia32_CopyB(irn)) {
603 if (ARCH_INTEL(ops->cg->arch))
606 else if (is_ia32_CopyB_i(irn)) {
607 int size = get_tarval_long(get_ia32_Immop_tarval(irn));
608 cost = 20 + (int)ceil((4/3) * size);
609 if (ARCH_INTEL(ops->cg->arch))
612 /* in case of address mode operations add additional cycles */
613 else if (op_tp == ia32_AddrModeD || op_tp == ia32_AddrModeS) {
615 In case of stack access add 5 cycles (we assume stack is in cache),
616 other memory operations cost 20 cycles.
618 cost += is_ia32_use_frame(irn) ? 5 : 20;
625 * Returns the inverse operation if @p irn, recalculating the argument at position @p i.
627 * @param irn The original operation
628 * @param i Index of the argument we want the inverse operation to yield
629 * @param inverse struct to be filled with the resulting inverse op
630 * @param obstack The obstack to use for allocation of the returned nodes array
631 * @return The inverse operation or NULL if operation invertible
633 static arch_inverse_t *ia32_get_inverse(const void *self, const ir_node *irn, int i, arch_inverse_t *inverse, struct obstack *obst) {
637 ir_node *block, *noreg, *nomem;
640 /* we cannot invert non-ia32 irns */
641 if (! is_ia32_irn(irn))
644 /* operand must always be a real operand (not base, index or mem) */
645 if (i != 2 && i != 3)
648 /* we don't invert address mode operations */
649 if (get_ia32_op_type(irn) != ia32_Normal)
652 irg = get_irn_irg(irn);
653 block = get_nodes_block(irn);
654 mode = get_irn_mode(irn);
655 irn_mode = get_irn_mode(irn);
656 noreg = get_irn_n(irn, 0);
657 nomem = new_r_NoMem(irg);
658 dbg = get_irn_dbg_info(irn);
660 /* initialize structure */
661 inverse->nodes = obstack_alloc(obst, 2 * sizeof(inverse->nodes[0]));
665 switch (get_ia32_irn_opcode(irn)) {
667 if (get_ia32_immop_type(irn) == ia32_ImmConst) {
668 /* we have an add with a const here */
669 /* invers == add with negated const */
670 inverse->nodes[0] = new_rd_ia32_Add(dbg, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem);
672 copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
673 set_ia32_Immop_tarval(inverse->nodes[0], tarval_neg(get_ia32_Immop_tarval(irn)));
674 set_ia32_commutative(inverse->nodes[0]);
676 else if (get_ia32_immop_type(irn) == ia32_ImmSymConst) {
677 /* we have an add with a symconst here */
678 /* invers == sub with const */
679 inverse->nodes[0] = new_rd_ia32_Sub(dbg, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem);
681 copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
684 /* normal add: inverse == sub */
685 inverse->nodes[0] = new_rd_ia32_Sub(dbg, irg, block, noreg, noreg, (ir_node*) irn, get_irn_n(irn, i ^ 1), nomem);
690 if (get_ia32_immop_type(irn) != ia32_ImmNone) {
691 /* we have a sub with a const/symconst here */
692 /* invers == add with this const */
693 inverse->nodes[0] = new_rd_ia32_Add(dbg, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem);
694 inverse->costs += (get_ia32_immop_type(irn) == ia32_ImmSymConst) ? 5 : 1;
695 copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
700 inverse->nodes[0] = new_rd_ia32_Add(dbg, irg, block, noreg, noreg, (ir_node*) irn, get_irn_n(irn, 3), nomem);
703 inverse->nodes[0] = new_rd_ia32_Sub(dbg, irg, block, noreg, noreg, get_irn_n(irn, 2), (ir_node*) irn, nomem);
709 if (get_ia32_immop_type(irn) != ia32_ImmNone) {
710 /* xor with const: inverse = xor */
711 inverse->nodes[0] = new_rd_ia32_Xor(dbg, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem);
712 inverse->costs += (get_ia32_immop_type(irn) == ia32_ImmSymConst) ? 5 : 1;
713 copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
717 inverse->nodes[0] = new_rd_ia32_Xor(dbg, irg, block, noreg, noreg, (ir_node *) irn, get_irn_n(irn, i), nomem);
722 inverse->nodes[0] = new_rd_ia32_Not(dbg, irg, block, noreg, noreg, (ir_node*) irn, nomem);
727 inverse->nodes[0] = new_rd_ia32_Neg(dbg, irg, block, noreg, noreg, (ir_node*) irn, nomem);
732 /* inverse operation not supported */
739 static ir_mode *get_spill_mode_mode(const ir_mode *mode)
741 if(mode_is_float(mode))
748 * Get the mode that should be used for spilling value node
750 static ir_mode *get_spill_mode(const ir_node *node)
752 ir_mode *mode = get_irn_mode(node);
753 return get_spill_mode_mode(mode);
757 * Checks wether an addressmode reload for a node with mode mode is compatible
758 * with a spillslot of mode spill_mode
760 static int ia32_is_spillmode_compatible(const ir_mode *mode, const ir_mode *spillmode)
762 if(mode_is_float(mode)) {
763 return mode == spillmode;
770 * Check if irn can load it's operand at position i from memory (source addressmode).
771 * @param self Pointer to irn ops itself
772 * @param irn The irn to be checked
773 * @param i The operands position
774 * @return Non-Zero if operand can be loaded
776 static int ia32_possible_memory_operand(const void *self, const ir_node *irn, unsigned int i) {
777 ir_node *op = get_irn_n(irn, i);
778 const ir_mode *mode = get_irn_mode(op);
779 const ir_mode *spillmode = get_spill_mode(op);
781 if (! is_ia32_irn(irn) || /* must be an ia32 irn */
782 get_irn_arity(irn) != 5 || /* must be a binary operation */
783 get_ia32_op_type(irn) != ia32_Normal || /* must not already be a addressmode irn */
784 ! (get_ia32_am_support(irn) & ia32_am_Source) || /* must be capable of source addressmode */
785 ! ia32_is_spillmode_compatible(mode, spillmode) ||
786 (i != 2 && i != 3) || /* a "real" operand position must be requested */
787 (i == 2 && ! is_ia32_commutative(irn)) || /* if first operand requested irn must be commutative */
788 is_ia32_use_frame(irn)) /* must not already use frame */
794 static void ia32_perform_memory_operand(const void *self, ir_node *irn, ir_node *spill, unsigned int i) {
795 const ia32_irn_ops_t *ops = self;
796 ia32_code_gen_t *cg = ops->cg;
798 assert(ia32_possible_memory_operand(self, irn, i) && "Cannot perform memory operand change");
801 ir_node *tmp = get_irn_n(irn, 3);
802 set_irn_n(irn, 3, get_irn_n(irn, 2));
803 set_irn_n(irn, 2, tmp);
806 set_ia32_am_support(irn, ia32_am_Source);
807 set_ia32_op_type(irn, ia32_AddrModeS);
808 set_ia32_am_flavour(irn, ia32_B);
809 set_ia32_ls_mode(irn, get_irn_mode(get_irn_n(irn, i)));
810 set_ia32_use_frame(irn);
811 set_ia32_need_stackent(irn);
813 set_irn_n(irn, 0, get_irg_frame(get_irn_irg(irn)));
814 set_irn_n(irn, 3, ia32_get_admissible_noreg(cg, irn, 3));
815 set_irn_n(irn, 4, spill);
817 //FIXME DBG_OPT_AM_S(reload, irn);
820 static const be_abi_callbacks_t ia32_abi_callbacks = {
823 ia32_abi_get_between_type,
824 ia32_abi_dont_save_regs,
829 /* fill register allocator interface */
831 static const arch_irn_ops_if_t ia32_irn_ops_if = {
832 ia32_get_irn_reg_req,
837 ia32_get_frame_entity,
838 ia32_set_frame_entity,
839 ia32_set_frame_offset,
842 ia32_get_op_estimated_cost,
843 ia32_possible_memory_operand,
844 ia32_perform_memory_operand,
847 ia32_irn_ops_t ia32_irn_ops = {
854 /**************************************************
857 * ___ ___ __| | ___ __ _ ___ _ __ _| |_
858 * / __/ _ \ / _` |/ _ \/ _` |/ _ \ '_ \ | | _|
859 * | (_| (_) | (_| | __/ (_| | __/ | | | | | |
860 * \___\___/ \__,_|\___|\__, |\___|_| |_| |_|_|
863 **************************************************/
866 * Transforms the standard firm graph into
869 static void ia32_prepare_graph(void *self) {
870 ia32_code_gen_t *cg = self;
872 /* transform psi condition trees */
873 ia32_pre_transform_phase(cg);
875 /* transform all remaining nodes */
876 ia32_transform_graph(cg);
877 //add_fpu_edges(cg->birg);
879 // Matze: disabled for now. Because after transformation start block has no
880 // self-loop anymore so it might be merged with its successor block. This
881 // will bring several nodes to the startblock which sometimes get scheduled
882 // before the initial IncSP/Barrier
883 local_optimize_graph(cg->irg);
886 be_dump(cg->irg, "-transformed", dump_ir_block_graph_sched);
888 /* optimize address mode */
889 ia32_optimize_graph(cg);
892 be_dump(cg->irg, "-am", dump_ir_block_graph_sched);
894 /* do code placement, to optimize the position of constants */
898 be_dump(cg->irg, "-place", dump_ir_block_graph_sched);
902 * Dummy functions for hooks we don't need but which must be filled.
904 static void ia32_before_sched(void *self) {
907 static void remove_unused_nodes(ir_node *irn, bitset_t *already_visited) {
910 ir_node *mem_proj = NULL;
915 mode = get_irn_mode(irn);
917 /* check if we already saw this node or the node has more than one user */
918 if (bitset_contains_irn(already_visited, irn) || get_irn_n_edges(irn) > 1) {
922 /* mark irn visited */
923 bitset_add_irn(already_visited, irn);
925 /* non-Tuple nodes with one user: ok, return */
926 if (get_irn_n_edges(irn) >= 1 && mode != mode_T) {
930 /* tuple node has one user which is not the mem proj-> ok */
931 if (mode == mode_T && get_irn_n_edges(irn) == 1) {
932 mem_proj = ia32_get_proj_for_mode(irn, mode_M);
933 if (mem_proj == NULL) {
938 arity = get_irn_arity(irn);
939 for (i = 0; i < arity; ++i) {
940 ir_node *pred = get_irn_n(irn, i);
942 /* do not follow memory edges or we will accidentally remove stores */
943 if (get_irn_mode(pred) == mode_M) {
944 if(mem_proj != NULL) {
945 edges_reroute(mem_proj, pred, get_irn_irg(mem_proj));
951 set_irn_n(irn, i, new_Bad());
954 The current node is about to be removed: if the predecessor
955 has only this node as user, it need to be removed as well.
957 if (get_irn_n_edges(pred) <= 1)
958 remove_unused_nodes(pred, already_visited);
961 // we need to set the presd to Bad again to also get the memory edges
962 arity = get_irn_arity(irn);
963 for (i = 0; i < arity; ++i) {
964 set_irn_n(irn, i, new_Bad());
967 if (sched_is_scheduled(irn)) {
972 static void remove_unused_loads_walker(ir_node *irn, void *env) {
973 bitset_t *already_visited = env;
974 if (is_ia32_Ld(irn) && ! bitset_contains_irn(already_visited, irn))
975 remove_unused_nodes(irn, env);
979 * Called before the register allocator.
980 * Calculate a block schedule here. We need it for the x87
981 * simulator and the emitter.
983 static void ia32_before_ra(void *self) {
984 ia32_code_gen_t *cg = self;
985 bitset_t *already_visited = bitset_irg_alloca(cg->irg);
989 There are sometimes unused loads, only pinned by memory.
990 We need to remove those Loads and all other nodes which won't be used
991 after removing the Load from schedule.
993 irg_walk_graph(cg->irg, NULL, remove_unused_loads_walker, already_visited);
995 /* setup fpu rounding modes */
996 ia32_setup_fpu_mode(cg);
1001 * Transforms a be_Reload into a ia32 Load.
1003 static void transform_to_Load(ia32_code_gen_t *cg, ir_node *node) {
1004 ir_graph *irg = get_irn_irg(node);
1005 dbg_info *dbg = get_irn_dbg_info(node);
1006 ir_node *block = get_nodes_block(node);
1007 ir_entity *ent = be_get_frame_entity(node);
1008 ir_mode *mode = get_irn_mode(node);
1009 ir_mode *spillmode = get_spill_mode(node);
1010 ir_node *noreg = ia32_new_NoReg_gp(cg);
1011 ir_node *sched_point = NULL;
1012 ir_node *ptr = get_irg_frame(irg);
1013 ir_node *mem = get_irn_n(node, be_pos_Reload_mem);
1014 ir_node *new_op, *proj;
1015 const arch_register_t *reg;
1017 if (sched_is_scheduled(node)) {
1018 sched_point = sched_prev(node);
1021 if (mode_is_float(spillmode)) {
1023 new_op = new_rd_ia32_xLoad(dbg, irg, block, ptr, noreg, mem);
1025 new_op = new_rd_ia32_vfld(dbg, irg, block, ptr, noreg, mem);
1027 else if (get_mode_size_bits(spillmode) == 128) {
1028 // Reload 128 bit sse registers
1029 new_op = new_rd_ia32_xxLoad(dbg, irg, block, ptr, noreg, mem);
1032 new_op = new_rd_ia32_Load(dbg, irg, block, ptr, noreg, mem);
1034 set_ia32_am_support(new_op, ia32_am_Source);
1035 set_ia32_op_type(new_op, ia32_AddrModeS);
1036 set_ia32_am_flavour(new_op, ia32_B);
1037 set_ia32_ls_mode(new_op, spillmode);
1038 set_ia32_frame_ent(new_op, ent);
1039 set_ia32_use_frame(new_op);
1041 DBG_OPT_RELOAD2LD(node, new_op);
1043 proj = new_rd_Proj(dbg, irg, block, new_op, mode, pn_ia32_Load_res);
1046 sched_add_after(sched_point, new_op);
1047 sched_add_after(new_op, proj);
1052 /* copy the register from the old node to the new Load */
1053 reg = arch_get_irn_register(cg->arch_env, node);
1054 arch_set_irn_register(cg->arch_env, new_op, reg);
1056 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(cg, node));
1058 exchange(node, proj);
1062 * Transforms a be_Spill node into a ia32 Store.
1064 static void transform_to_Store(ia32_code_gen_t *cg, ir_node *node) {
1065 ir_graph *irg = get_irn_irg(node);
1066 dbg_info *dbg = get_irn_dbg_info(node);
1067 ir_node *block = get_nodes_block(node);
1068 ir_entity *ent = be_get_frame_entity(node);
1069 const ir_node *spillval = get_irn_n(node, be_pos_Spill_val);
1070 ir_mode *mode = get_spill_mode(spillval);
1071 ir_node *noreg = ia32_new_NoReg_gp(cg);
1072 ir_node *nomem = new_rd_NoMem(irg);
1073 ir_node *ptr = get_irg_frame(irg);
1074 ir_node *val = get_irn_n(node, be_pos_Spill_val);
1076 ir_node *sched_point = NULL;
1078 if (sched_is_scheduled(node)) {
1079 sched_point = sched_prev(node);
1082 /* No need to spill unknown values... */
1083 if(is_ia32_Unknown_GP(val) ||
1084 is_ia32_Unknown_VFP(val) ||
1085 is_ia32_Unknown_XMM(val)) {
1090 exchange(node, store);
1094 if (mode_is_float(mode)) {
1096 store = new_rd_ia32_xStore(dbg, irg, block, ptr, noreg, val, nomem);
1098 store = new_rd_ia32_vfst(dbg, irg, block, ptr, noreg, val, nomem);
1099 } else if (get_mode_size_bits(mode) == 128) {
1100 // Spill 128 bit SSE registers
1101 store = new_rd_ia32_xxStore(dbg, irg, block, ptr, noreg, val, nomem);
1102 } else if (get_mode_size_bits(mode) == 8) {
1103 store = new_rd_ia32_Store8Bit(dbg, irg, block, ptr, noreg, val, nomem);
1105 store = new_rd_ia32_Store(dbg, irg, block, ptr, noreg, val, nomem);
1108 set_ia32_am_support(store, ia32_am_Dest);
1109 set_ia32_op_type(store, ia32_AddrModeD);
1110 set_ia32_am_flavour(store, ia32_B);
1111 set_ia32_ls_mode(store, mode);
1112 set_ia32_frame_ent(store, ent);
1113 set_ia32_use_frame(store);
1114 SET_IA32_ORIG_NODE(store, ia32_get_old_node_name(cg, node));
1115 DBG_OPT_SPILL2ST(node, store);
1118 sched_add_after(sched_point, store);
1122 exchange(node, store);
1125 static ir_node *create_push(ia32_code_gen_t *cg, ir_node *node, ir_node *schedpoint, ir_node *sp, ir_node *mem, ir_entity *ent) {
1126 ir_graph *irg = get_irn_irg(node);
1127 dbg_info *dbg = get_irn_dbg_info(node);
1128 ir_node *block = get_nodes_block(node);
1129 ir_node *noreg = ia32_new_NoReg_gp(cg);
1130 ir_node *frame = get_irg_frame(irg);
1132 ir_node *push = new_rd_ia32_Push(dbg, irg, block, frame, noreg, noreg, sp, mem);
1134 set_ia32_frame_ent(push, ent);
1135 set_ia32_use_frame(push);
1136 set_ia32_op_type(push, ia32_AddrModeS);
1137 set_ia32_am_flavour(push, ia32_B);
1138 set_ia32_ls_mode(push, mode_Is);
1140 sched_add_before(schedpoint, push);
1144 static ir_node *create_pop(ia32_code_gen_t *cg, ir_node *node, ir_node *schedpoint, ir_node *sp, ir_entity *ent) {
1145 ir_graph *irg = get_irn_irg(node);
1146 dbg_info *dbg = get_irn_dbg_info(node);
1147 ir_node *block = get_nodes_block(node);
1148 ir_node *noreg = ia32_new_NoReg_gp(cg);
1149 ir_node *frame = get_irg_frame(irg);
1151 ir_node *pop = new_rd_ia32_Pop(dbg, irg, block, frame, noreg, sp, new_NoMem());
1153 set_ia32_frame_ent(pop, ent);
1154 set_ia32_use_frame(pop);
1155 set_ia32_op_type(pop, ia32_AddrModeD);
1156 set_ia32_am_flavour(pop, ia32_am_OB);
1157 set_ia32_ls_mode(pop, mode_Is);
1159 sched_add_before(schedpoint, pop);
1164 static ir_node* create_spproj(ia32_code_gen_t *cg, ir_node *node, ir_node *pred, int pos, ir_node *schedpoint) {
1165 ir_graph *irg = get_irn_irg(node);
1166 dbg_info *dbg = get_irn_dbg_info(node);
1167 ir_node *block = get_nodes_block(node);
1168 ir_mode *spmode = mode_Iu;
1169 const arch_register_t *spreg = &ia32_gp_regs[REG_ESP];
1172 sp = new_rd_Proj(dbg, irg, block, pred, spmode, pos);
1173 arch_set_irn_register(cg->arch_env, sp, spreg);
1174 sched_add_before(schedpoint, sp);
1180 * Transform memperm, currently we do this the ugly way and produce
1181 * push/pop into/from memory cascades. This is possible without using
1184 static void transform_MemPerm(ia32_code_gen_t *cg, ir_node *node) {
1185 ir_graph *irg = get_irn_irg(node);
1186 ir_node *block = get_nodes_block(node);
1190 ir_node *sp = be_abi_get_ignore_irn(cg->birg->abi, &ia32_gp_regs[REG_ESP]);
1191 const ir_edge_t *edge;
1192 const ir_edge_t *next;
1195 arity = be_get_MemPerm_entity_arity(node);
1196 pops = alloca(arity * sizeof(pops[0]));
1199 for(i = 0; i < arity; ++i) {
1200 ir_entity *ent = be_get_MemPerm_in_entity(node, i);
1201 ir_type *enttype = get_entity_type(ent);
1202 int entbits = get_type_size_bits(enttype);
1203 ir_node *mem = get_irn_n(node, i + 1);
1207 get_type_size_bits(get_entity_type(be_get_MemPerm_out_entity(node, i))) == get_type_size_bits(enttype));
1208 assert( (entbits == 32 || entbits == 64) && "spillslot on x86 should be 32 or 64 bit");
1210 push = create_push(cg, node, node, sp, mem, ent);
1211 sp = create_spproj(cg, node, push, pn_ia32_Push_stack, node);
1213 // add another push after the first one
1214 push = create_push(cg, node, node, sp, mem, ent);
1215 add_ia32_am_offs_int(push, 4);
1216 sp = create_spproj(cg, node, push, pn_ia32_Push_stack, node);
1219 set_irn_n(node, i, new_Bad());
1223 for(i = arity - 1; i >= 0; --i) {
1224 ir_entity *ent = be_get_MemPerm_out_entity(node, i);
1225 ir_type *enttype = get_entity_type(ent);
1226 int entbits = get_type_size_bits(enttype);
1229 assert( (entbits == 32 || entbits == 64) && "spillslot on x86 should be 32 or 64 bit");
1231 pop = create_pop(cg, node, node, sp, ent);
1232 sp = create_spproj(cg, node, pop, pn_ia32_Pop_stack, node);
1234 add_ia32_am_offs_int(pop, 4);
1236 // add another pop after the first one
1237 pop = create_pop(cg, node, node, sp, ent);
1238 sp = create_spproj(cg, node, pop, pn_ia32_Pop_stack, node);
1245 keep = be_new_Keep(&ia32_reg_classes[CLASS_ia32_gp], irg, block, 1, in);
1246 sched_add_before(node, keep);
1248 // exchange memprojs
1249 foreach_out_edge_safe(node, edge, next) {
1250 ir_node *proj = get_edge_src_irn(edge);
1251 int p = get_Proj_proj(proj);
1255 set_Proj_pred(proj, pops[p]);
1256 set_Proj_proj(proj, 3);
1260 arity = get_irn_arity(node);
1261 for(i = 0; i < arity; ++i) {
1262 set_irn_n(node, i, new_Bad());
1268 * Block-Walker: Calls the transform functions Spill and Reload.
1270 static void ia32_after_ra_walker(ir_node *block, void *env) {
1271 ir_node *node, *prev;
1272 ia32_code_gen_t *cg = env;
1274 /* beware: the schedule is changed here */
1275 for (node = sched_last(block); !sched_is_begin(node); node = prev) {
1276 prev = sched_prev(node);
1278 if (be_is_Reload(node)) {
1279 transform_to_Load(cg, node);
1280 } else if (be_is_Spill(node)) {
1281 transform_to_Store(cg, node);
1282 } else if(be_is_MemPerm(node)) {
1283 transform_MemPerm(cg, node);
1289 * Collects nodes that need frame entities assigned.
1291 static void ia32_collect_frame_entity_nodes(ir_node *node, void *data)
1293 be_fec_env_t *env = data;
1295 if (be_is_Reload(node) && be_get_frame_entity(node) == NULL) {
1296 const ir_mode *mode = get_spill_mode_mode(get_irn_mode(node));
1297 int align = get_mode_size_bytes(mode);
1298 be_node_needs_frame_entity(env, node, mode, align);
1299 } else if(is_ia32_irn(node) && get_ia32_frame_ent(node) == NULL
1300 && is_ia32_use_frame(node)) {
1301 if (is_ia32_need_stackent(node) || is_ia32_Load(node)) {
1302 const ir_mode *mode = get_ia32_ls_mode(node);
1303 int align = get_mode_size_bytes(mode);
1304 be_node_needs_frame_entity(env, node, mode, align);
1305 } else if (is_ia32_vfild(node) || is_ia32_xLoad(node)) {
1306 const ir_mode *mode = get_ia32_ls_mode(node);
1308 be_node_needs_frame_entity(env, node, mode, align);
1309 } else if(is_ia32_FldCW(node)) {
1310 const ir_mode *mode = ia32_reg_classes[CLASS_ia32_fp_cw].mode;
1312 be_node_needs_frame_entity(env, node, mode, align);
1313 } else if (is_ia32_SetST0(node)) {
1314 const ir_mode *mode = get_ia32_ls_mode(node);
1316 be_node_needs_frame_entity(env, node, mode, align);
1319 if(!is_ia32_Store(node)
1320 && !is_ia32_xStore(node)
1321 && !is_ia32_xStoreSimple(node)
1322 && !is_ia32_vfist(node)
1323 && !is_ia32_GetST0(node)
1324 && !is_ia32_FnstCW(node)) {
1333 * We transform Spill and Reload here. This needs to be done before
1334 * stack biasing otherwise we would miss the corrected offset for these nodes.
1336 static void ia32_after_ra(void *self) {
1337 ia32_code_gen_t *cg = self;
1338 ir_graph *irg = cg->irg;
1339 be_fec_env_t *fec_env = be_new_frame_entity_coalescer(cg->birg);
1341 /* create and coalesce frame entities */
1342 irg_walk_graph(irg, NULL, ia32_collect_frame_entity_nodes, fec_env);
1343 be_assign_entities(fec_env);
1344 be_free_frame_entity_coalescer(fec_env);
1346 irg_block_walk_graph(irg, NULL, ia32_after_ra_walker, cg);
1348 ia32_finish_irg(irg, cg);
1352 * Last touchups for the graph before emit: x87 simulation to replace the
1353 * virtual with real x87 instructions, creating a block schedule and peephole
1356 static void ia32_finish(void *self) {
1357 ia32_code_gen_t *cg = self;
1358 ir_graph *irg = cg->irg;
1360 /* if we do x87 code generation, rewrite all the virtual instructions and registers */
1361 if (cg->used_fp == fp_x87 || cg->force_sim) {
1362 x87_simulate_graph(cg->arch_env, cg->birg);
1365 /* create block schedule, this also removes empty blocks which might
1366 * produce critical edges */
1367 cg->blk_sched = be_create_block_schedule(irg, cg->birg->exec_freq);
1369 /* do peephole optimisations */
1370 ia32_peephole_optimization(irg, cg);
1374 * Emits the code, closes the output file and frees
1375 * the code generator interface.
1377 static void ia32_codegen(void *self) {
1378 ia32_code_gen_t *cg = self;
1379 ir_graph *irg = cg->irg;
1381 ia32_gen_routine(cg, irg);
1385 /* remove it from the isa */
1388 /* de-allocate code generator */
1389 del_set(cg->reg_set);
1393 static void *ia32_cg_init(be_irg_t *birg);
1395 static const arch_code_generator_if_t ia32_code_gen_if = {
1397 NULL, /* before abi introduce hook */
1400 ia32_before_sched, /* before scheduling hook */
1401 ia32_before_ra, /* before register allocation hook */
1402 ia32_after_ra, /* after register allocation hook */
1403 ia32_finish, /* called before codegen */
1404 ia32_codegen /* emit && done */
1408 * Initializes a IA32 code generator.
1410 static void *ia32_cg_init(be_irg_t *birg) {
1411 ia32_isa_t *isa = (ia32_isa_t *)birg->main_env->arch_env->isa;
1412 ia32_code_gen_t *cg = xcalloc(1, sizeof(*cg));
1414 cg->impl = &ia32_code_gen_if;
1415 cg->irg = birg->irg;
1416 cg->reg_set = new_set(ia32_cmp_irn_reg_assoc, 1024);
1417 cg->arch_env = birg->main_env->arch_env;
1420 cg->blk_sched = NULL;
1421 cg->fp_kind = isa->fp_kind;
1422 cg->used_fp = fp_none;
1423 cg->dump = (birg->main_env->options->dump_flags & DUMP_BE) ? 1 : 0;
1425 /* copy optimizations from isa for easier access */
1427 cg->arch = isa->arch;
1428 cg->opt_arch = isa->opt_arch;
1434 if (isa->name_obst) {
1435 obstack_free(isa->name_obst, NULL);
1436 obstack_init(isa->name_obst);
1440 cur_reg_set = cg->reg_set;
1442 ia32_irn_ops.cg = cg;
1444 return (arch_code_generator_t *)cg;
1449 /*****************************************************************
1450 * ____ _ _ _____ _____
1451 * | _ \ | | | | |_ _|/ ____| /\
1452 * | |_) | __ _ ___| | _____ _ __ __| | | | | (___ / \
1453 * | _ < / _` |/ __| |/ / _ \ '_ \ / _` | | | \___ \ / /\ \
1454 * | |_) | (_| | (__| < __/ | | | (_| | _| |_ ____) / ____ \
1455 * |____/ \__,_|\___|_|\_\___|_| |_|\__,_| |_____|_____/_/ \_\
1457 *****************************************************************/
1460 * Set output modes for GCC
1462 static const tarval_mode_info mo_integer = {
1469 * set the tarval output mode of all integer modes to decimal
1471 static void set_tarval_output_modes(void)
1475 for (i = get_irp_n_modes() - 1; i >= 0; --i) {
1476 ir_mode *mode = get_irp_mode(i);
1478 if (mode_is_int(mode))
1479 set_tarval_mode_output_option(mode, &mo_integer);
1483 const arch_isa_if_t ia32_isa_if;
1486 * The template that generates a new ISA object.
1487 * Note that this template can be changed by command line
1490 static ia32_isa_t ia32_isa_template = {
1492 &ia32_isa_if, /* isa interface implementation */
1493 &ia32_gp_regs[REG_ESP], /* stack pointer register */
1494 &ia32_gp_regs[REG_EBP], /* base pointer register */
1495 -1, /* stack direction */
1496 NULL, /* main environment */
1498 { NULL, }, /* emitter environment */
1499 NULL, /* 16bit register names */
1500 NULL, /* 8bit register names */
1504 IA32_OPT_INCDEC | /* optimize add 1, sub 1 into inc/dec default: on */
1505 IA32_OPT_DOAM | /* optimize address mode default: on */
1506 IA32_OPT_LEA | /* optimize for LEAs default: on */
1507 IA32_OPT_PLACECNST | /* place constants immediately before instructions, default: on */
1508 IA32_OPT_IMMOPS | /* operations can use immediates, default: on */
1509 IA32_OPT_PUSHARGS), /* create pushs for function argument passing, default: on */
1510 arch_pentium_4, /* instruction architecture */
1511 arch_pentium_4, /* optimize for architecture */
1512 fp_sse2, /* use sse2 unit */
1513 NULL, /* current code generator */
1515 NULL, /* name obstack */
1516 0 /* name obst size */
1521 * Initializes the backend ISA.
1523 static void *ia32_init(FILE *file_handle) {
1524 static int inited = 0;
1531 set_tarval_output_modes();
1533 isa = xmalloc(sizeof(*isa));
1534 memcpy(isa, &ia32_isa_template, sizeof(*isa));
1536 if(mode_fpcw == NULL) {
1537 mode_fpcw = new_ir_mode("Fpcw", irms_int_number, 16, 0, irma_none, 0);
1540 ia32_register_init(isa);
1541 ia32_create_opcodes();
1542 ia32_register_copy_attr_func();
1544 if ((ARCH_INTEL(isa->arch) && isa->arch < arch_pentium_4) ||
1545 (ARCH_AMD(isa->arch) && isa->arch < arch_athlon))
1546 /* no SSE2 for these cpu's */
1547 isa->fp_kind = fp_x87;
1549 if (ARCH_INTEL(isa->opt_arch) && isa->opt_arch >= arch_pentium_4) {
1550 /* Pentium 4 don't like inc and dec instructions */
1551 isa->opt &= ~IA32_OPT_INCDEC;
1554 be_emit_init_env(&isa->emit, file_handle);
1555 isa->regs_16bit = pmap_create();
1556 isa->regs_8bit = pmap_create();
1557 isa->types = pmap_create();
1558 isa->tv_ent = pmap_create();
1559 isa->cpu = ia32_init_machine_description();
1561 ia32_build_16bit_reg_map(isa->regs_16bit);
1562 ia32_build_8bit_reg_map(isa->regs_8bit);
1565 isa->name_obst = xmalloc(sizeof(*isa->name_obst));
1566 obstack_init(isa->name_obst);
1569 ia32_handle_intrinsics();
1571 /* needed for the debug support */
1572 be_gas_emit_switch_section(&isa->emit, GAS_SECTION_TEXT);
1573 be_emit_cstring(&isa->emit, ".Ltext0:\n");
1574 be_emit_write_line(&isa->emit);
1576 /* we mark referenced global entities, so we can only emit those which
1577 * are actually referenced. (Note: you mustn't use the type visited flag
1578 * elsewhere in the backend)
1580 inc_master_type_visited();
1588 * Closes the output file and frees the ISA structure.
1590 static void ia32_done(void *self) {
1591 ia32_isa_t *isa = self;
1593 /* emit now all global declarations */
1594 be_gas_emit_decls(&isa->emit, isa->arch_isa.main_env, 1);
1596 pmap_destroy(isa->regs_16bit);
1597 pmap_destroy(isa->regs_8bit);
1598 pmap_destroy(isa->tv_ent);
1599 pmap_destroy(isa->types);
1602 obstack_free(isa->name_obst, NULL);
1605 be_emit_destroy_env(&isa->emit);
1612 * Return the number of register classes for this architecture.
1613 * We report always these:
1614 * - the general purpose registers
1615 * - the SSE floating point register set
1616 * - the virtual floating point registers
1617 * - the SSE vector register set
1619 static int ia32_get_n_reg_class(const void *self) {
1624 * Return the register class for index i.
1626 static const arch_register_class_t *ia32_get_reg_class(const void *self, int i)
1628 assert(i >= 0 && i < N_CLASSES);
1629 return &ia32_reg_classes[i];
1633 * Get the register class which shall be used to store a value of a given mode.
1634 * @param self The this pointer.
1635 * @param mode The mode in question.
1636 * @return A register class which can hold values of the given mode.
1638 const arch_register_class_t *ia32_get_reg_class_for_mode(const void *self, const ir_mode *mode) {
1639 const ia32_isa_t *isa = self;
1640 if (mode_is_float(mode)) {
1641 return USE_SSE2(isa) ? &ia32_reg_classes[CLASS_ia32_xmm] : &ia32_reg_classes[CLASS_ia32_vfp];
1644 return &ia32_reg_classes[CLASS_ia32_gp];
1648 * Get the ABI restrictions for procedure calls.
1649 * @param self The this pointer.
1650 * @param method_type The type of the method (procedure) in question.
1651 * @param abi The abi object to be modified
1653 static void ia32_get_call_abi(const void *self, ir_type *method_type, be_abi_call_t *abi) {
1654 const ia32_isa_t *isa = self;
1657 unsigned cc = get_method_calling_convention(method_type);
1658 int n = get_method_n_params(method_type);
1661 int i, ignore_1, ignore_2;
1663 const arch_register_t *reg;
1664 be_abi_call_flags_t call_flags = be_abi_call_get_flags(abi);
1666 unsigned use_push = !IS_P6_ARCH(isa->opt_arch);
1668 /* set abi flags for calls */
1669 call_flags.bits.left_to_right = 0; /* always last arg first on stack */
1670 call_flags.bits.store_args_sequential = use_push;
1671 /* call_flags.bits.try_omit_fp not changed: can handle both settings */
1672 call_flags.bits.fp_free = 0; /* the frame pointer is fixed in IA32 */
1673 call_flags.bits.call_has_imm = 1; /* IA32 calls can have immediate address */
1675 /* set stack parameter passing style */
1676 be_abi_call_set_flags(abi, call_flags, &ia32_abi_callbacks);
1678 /* collect the mode for each type */
1679 modes = alloca(n * sizeof(modes[0]));
1681 for (i = 0; i < n; i++) {
1682 tp = get_method_param_type(method_type, i);
1683 modes[i] = get_type_mode(tp);
1686 /* set register parameters */
1687 if (cc & cc_reg_param) {
1688 /* determine the number of parameters passed via registers */
1689 biggest_n = ia32_get_n_regparam_class(n, modes, &ignore_1, &ignore_2);
1691 /* loop over all parameters and set the register requirements */
1692 for (i = 0; i <= biggest_n; i++) {
1693 reg = ia32_get_RegParam_reg(n, modes, i, cc);
1694 assert(reg && "kaputt");
1695 be_abi_call_param_reg(abi, i, reg);
1702 /* set stack parameters */
1703 for (i = stack_idx; i < n; i++) {
1704 /* parameters on the stack are 32 bit aligned */
1705 be_abi_call_param_stack(abi, i, 4, 0, 0);
1709 /* set return registers */
1710 n = get_method_n_ress(method_type);
1712 assert(n <= 2 && "more than two results not supported");
1714 /* In case of 64bit returns, we will have two 32bit values */
1716 tp = get_method_res_type(method_type, 0);
1717 mode = get_type_mode(tp);
1719 assert(!mode_is_float(mode) && "two FP results not supported");
1721 tp = get_method_res_type(method_type, 1);
1722 mode = get_type_mode(tp);
1724 assert(!mode_is_float(mode) && "mixed INT, FP results not supported");
1726 be_abi_call_res_reg(abi, 0, &ia32_gp_regs[REG_EAX]);
1727 be_abi_call_res_reg(abi, 1, &ia32_gp_regs[REG_EDX]);
1730 const arch_register_t *reg;
1732 tp = get_method_res_type(method_type, 0);
1733 assert(is_atomic_type(tp));
1734 mode = get_type_mode(tp);
1736 reg = mode_is_float(mode) ? &ia32_vfp_regs[REG_VF0] : &ia32_gp_regs[REG_EAX];
1738 be_abi_call_res_reg(abi, 0, reg);
1743 static const void *ia32_get_irn_ops(const arch_irn_handler_t *self, const ir_node *irn) {
1744 return &ia32_irn_ops;
1747 const arch_irn_handler_t ia32_irn_handler = {
1751 const arch_irn_handler_t *ia32_get_irn_handler(const void *self) {
1752 return &ia32_irn_handler;
1755 int ia32_to_appear_in_schedule(void *block_env, const ir_node *irn) {
1756 if(!is_ia32_irn(irn))
1759 if(is_ia32_NoReg_GP(irn) || is_ia32_NoReg_VFP(irn) || is_ia32_NoReg_XMM(irn)
1760 || is_ia32_Unknown_GP(irn) || is_ia32_Unknown_XMM(irn)
1761 || is_ia32_Unknown_VFP(irn) || is_ia32_ChangeCW(irn))
1768 * Initializes the code generator interface.
1770 static const arch_code_generator_if_t *ia32_get_code_generator_if(void *self) {
1771 return &ia32_code_gen_if;
1775 * Returns the estimated execution time of an ia32 irn.
1777 static sched_timestep_t ia32_sched_exectime(void *env, const ir_node *irn) {
1778 const arch_env_t *arch_env = env;
1779 return is_ia32_irn(irn) ? ia32_get_op_estimated_cost(arch_get_irn_ops(arch_env, irn), irn) : 1;
1782 list_sched_selector_t ia32_sched_selector;
1785 * Returns the reg_pressure scheduler with to_appear_in_schedule() overloaded
1787 static const list_sched_selector_t *ia32_get_list_sched_selector(const void *self, list_sched_selector_t *selector) {
1788 memcpy(&ia32_sched_selector, selector, sizeof(ia32_sched_selector));
1789 ia32_sched_selector.exectime = ia32_sched_exectime;
1790 ia32_sched_selector.to_appear_in_schedule = ia32_to_appear_in_schedule;
1791 return &ia32_sched_selector;
1794 static const ilp_sched_selector_t *ia32_get_ilp_sched_selector(const void *self) {
1799 * Returns the necessary byte alignment for storing a register of given class.
1801 static int ia32_get_reg_class_alignment(const void *self, const arch_register_class_t *cls) {
1802 ir_mode *mode = arch_register_class_mode(cls);
1803 int bytes = get_mode_size_bytes(mode);
1805 if (mode_is_float(mode) && bytes > 8)
1810 static const be_execution_unit_t ***ia32_get_allowed_execution_units(const void *self, const ir_node *irn) {
1811 static const be_execution_unit_t *_allowed_units_BRANCH[] = {
1812 &ia32_execution_units_BRANCH[IA32_EXECUNIT_TP_BRANCH_BRANCH1],
1813 &ia32_execution_units_BRANCH[IA32_EXECUNIT_TP_BRANCH_BRANCH2],
1816 static const be_execution_unit_t *_allowed_units_GP[] = {
1817 &ia32_execution_units_GP[IA32_EXECUNIT_TP_GP_GP_EAX],
1818 &ia32_execution_units_GP[IA32_EXECUNIT_TP_GP_GP_EBX],
1819 &ia32_execution_units_GP[IA32_EXECUNIT_TP_GP_GP_ECX],
1820 &ia32_execution_units_GP[IA32_EXECUNIT_TP_GP_GP_EDX],
1821 &ia32_execution_units_GP[IA32_EXECUNIT_TP_GP_GP_ESI],
1822 &ia32_execution_units_GP[IA32_EXECUNIT_TP_GP_GP_EDI],
1823 &ia32_execution_units_GP[IA32_EXECUNIT_TP_GP_GP_EBP],
1826 static const be_execution_unit_t *_allowed_units_DUMMY[] = {
1827 &be_machine_execution_units_DUMMY[0],
1830 static const be_execution_unit_t **_units_callret[] = {
1831 _allowed_units_BRANCH,
1834 static const be_execution_unit_t **_units_other[] = {
1838 static const be_execution_unit_t **_units_dummy[] = {
1839 _allowed_units_DUMMY,
1842 const be_execution_unit_t ***ret;
1844 if (is_ia32_irn(irn)) {
1845 ret = get_ia32_exec_units(irn);
1847 else if (is_be_node(irn)) {
1848 if (be_is_Call(irn) || be_is_Return(irn)) {
1849 ret = _units_callret;
1851 else if (be_is_Barrier(irn)) {
1866 * Return the abstract ia32 machine.
1868 static const be_machine_t *ia32_get_machine(const void *self) {
1869 const ia32_isa_t *isa = self;
1874 * Return irp irgs in the desired order.
1876 static ir_graph **ia32_get_irg_list(const void *self, ir_graph ***irg_list) {
1881 * Allows or disallows the creation of Psi nodes for the given Phi nodes.
1882 * @return 1 if allowed, 0 otherwise
1884 static int ia32_is_psi_allowed(ir_node *sel, ir_node *phi_list, int i, int j)
1886 ir_node *cmp, *cmp_a, *phi;
1889 /* we don't want long long an floating point Psi */
1890 #define IS_BAD_PSI_MODE(mode) (mode_is_float(mode) || get_mode_size_bits(mode) > 32)
1892 if (get_irn_mode(sel) != mode_b)
1895 cmp = get_Proj_pred(sel);
1896 cmp_a = get_Cmp_left(cmp);
1897 mode = get_irn_mode(cmp_a);
1899 if (IS_BAD_PSI_MODE(mode))
1902 /* check the Phi nodes */
1903 for (phi = phi_list; phi; phi = get_irn_link(phi)) {
1904 ir_node *pred_i = get_irn_n(phi, i);
1905 ir_node *pred_j = get_irn_n(phi, j);
1906 ir_mode *mode_i = get_irn_mode(pred_i);
1907 ir_mode *mode_j = get_irn_mode(pred_j);
1909 if (IS_BAD_PSI_MODE(mode_i) || IS_BAD_PSI_MODE(mode_j))
1913 #undef IS_BAD_PSI_MODE
1918 static ia32_intrinsic_env_t intrinsic_env = {
1919 NULL, /**< the irg, these entities belong to */
1920 NULL, /**< entity for first div operand (move into FPU) */
1921 NULL, /**< entity for second div operand (move into FPU) */
1922 NULL, /**< entity for converts ll -> d */
1923 NULL, /**< entity for converts d -> ll */
1927 * Returns the libFirm configuration parameter for this backend.
1929 static const backend_params *ia32_get_libfirm_params(void) {
1930 static const opt_if_conv_info_t ifconv = {
1931 4, /* maxdepth, doesn't matter for Psi-conversion */
1932 ia32_is_psi_allowed /* allows or disallows Psi creation for given selector */
1934 static const arch_dep_params_t ad = {
1935 1, /* also use subs */
1936 4, /* maximum shifts */
1937 31, /* maximum shift amount */
1939 1, /* allow Mulhs */
1940 1, /* allow Mulus */
1941 32 /* Mulh allowed up to 32 bit */
1943 static backend_params p = {
1944 NULL, /* no additional opcodes */
1945 NULL, /* will be set later */
1946 1, /* need dword lowering */
1947 ia32_create_intrinsic_fkt,
1948 &intrinsic_env, /* context for ia32_create_intrinsic_fkt */
1949 NULL, /* will be set later */
1953 p.if_conv_info = &ifconv;
1957 /* instruction set architectures. */
1958 static const lc_opt_enum_int_items_t arch_items[] = {
1959 { "386", arch_i386, },
1960 { "486", arch_i486, },
1961 { "pentium", arch_pentium, },
1962 { "586", arch_pentium, },
1963 { "pentiumpro", arch_pentium_pro, },
1964 { "686", arch_pentium_pro, },
1965 { "pentiummmx", arch_pentium_mmx, },
1966 { "pentium2", arch_pentium_2, },
1967 { "p2", arch_pentium_2, },
1968 { "pentium3", arch_pentium_3, },
1969 { "p3", arch_pentium_3, },
1970 { "pentium4", arch_pentium_4, },
1971 { "p4", arch_pentium_4, },
1972 { "pentiumm", arch_pentium_m, },
1973 { "pm", arch_pentium_m, },
1974 { "core", arch_core, },
1976 { "athlon", arch_athlon, },
1977 { "athlon64", arch_athlon_64, },
1978 { "opteron", arch_opteron, },
1982 static lc_opt_enum_int_var_t arch_var = {
1983 &ia32_isa_template.arch, arch_items
1986 static lc_opt_enum_int_var_t opt_arch_var = {
1987 &ia32_isa_template.opt_arch, arch_items
1990 static const lc_opt_enum_int_items_t fp_unit_items[] = {
1992 { "sse2", fp_sse2 },
1996 static lc_opt_enum_int_var_t fp_unit_var = {
1997 &ia32_isa_template.fp_kind, fp_unit_items
2000 static const lc_opt_enum_int_items_t gas_items[] = {
2001 { "normal", GAS_FLAVOUR_NORMAL },
2002 { "mingw", GAS_FLAVOUR_MINGW },
2006 static lc_opt_enum_int_var_t gas_var = {
2007 (int*) &be_gas_flavour, gas_items
2010 static const lc_opt_table_entry_t ia32_options[] = {
2011 LC_OPT_ENT_ENUM_INT("arch", "select the instruction architecture", &arch_var),
2012 LC_OPT_ENT_ENUM_INT("opt", "optimize for instruction architecture", &opt_arch_var),
2013 LC_OPT_ENT_ENUM_INT("fpunit", "select the floating point unit", &fp_unit_var),
2014 LC_OPT_ENT_NEGBIT("noaddrmode", "do not use address mode", &ia32_isa_template.opt, IA32_OPT_DOAM),
2015 LC_OPT_ENT_NEGBIT("nolea", "do not optimize for LEAs", &ia32_isa_template.opt, IA32_OPT_LEA),
2016 LC_OPT_ENT_NEGBIT("noplacecnst", "do not place constants", &ia32_isa_template.opt, IA32_OPT_PLACECNST),
2017 LC_OPT_ENT_NEGBIT("noimmop", "no operations with immediates", &ia32_isa_template.opt, IA32_OPT_IMMOPS),
2018 LC_OPT_ENT_NEGBIT("nopushargs", "do not create pushs for function arguments", &ia32_isa_template.opt, IA32_OPT_PUSHARGS),
2019 LC_OPT_ENT_ENUM_INT("gasmode", "set the GAS compatibility mode", &gas_var),
2023 const arch_isa_if_t ia32_isa_if = {
2026 ia32_get_n_reg_class,
2028 ia32_get_reg_class_for_mode,
2030 ia32_get_irn_handler,
2031 ia32_get_code_generator_if,
2032 ia32_get_list_sched_selector,
2033 ia32_get_ilp_sched_selector,
2034 ia32_get_reg_class_alignment,
2035 ia32_get_libfirm_params,
2036 ia32_get_allowed_execution_units,
2041 void ia32_init_emitter(void);
2042 void ia32_init_finish(void);
2043 void ia32_init_optimize(void);
2044 void ia32_init_transform(void);
2045 void ia32_init_x87(void);
2047 void be_init_arch_ia32(void)
2049 lc_opt_entry_t *be_grp = lc_opt_get_grp(firm_opt_get_root(), "be");
2050 lc_opt_entry_t *ia32_grp = lc_opt_get_grp(be_grp, "ia32");
2052 lc_opt_add_table(ia32_grp, ia32_options);
2053 be_register_isa_if("ia32", &ia32_isa_if);
2055 FIRM_DBG_REGISTER(dbg, "firm.be.ia32.cg");
2057 ia32_init_emitter();
2059 ia32_init_optimize();
2060 ia32_init_transform();
2064 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_arch_ia32);