2 * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief This is the main ia32 firm backend driver.
23 * @author Christian Wuerdig
30 #include <libcore/lc_opts.h>
31 #include <libcore/lc_opts_enum.h>
35 #include "pseudo_irg.h"
39 #include "iredges_t.h"
52 #include "../beirg_t.h"
53 #include "../benode_t.h"
54 #include "../belower.h"
55 #include "../besched_t.h"
58 #include "../beirgmod.h"
59 #include "../be_dbgout.h"
60 #include "../beblocksched.h"
61 #include "../bemachine.h"
62 #include "../beilpsched.h"
63 #include "../bespillslots.h"
64 #include "../bemodule.h"
65 #include "../begnuas.h"
66 #include "../bestate.h"
68 #include "bearch_ia32_t.h"
70 #include "ia32_new_nodes.h"
71 #include "gen_ia32_regalloc_if.h"
72 #include "gen_ia32_machine.h"
73 #include "ia32_transform.h"
74 #include "ia32_emitter.h"
75 #include "ia32_map_regs.h"
76 #include "ia32_optimize.h"
78 #include "ia32_dbg_stat.h"
79 #include "ia32_finish.h"
80 #include "ia32_util.h"
83 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
86 static set *cur_reg_set = NULL;
88 ir_mode *mode_fpcw = NULL;
90 typedef ir_node *(*create_const_node_func) (dbg_info *dbg, ir_graph *irg, ir_node *block);
92 static INLINE ir_node *create_const(ia32_code_gen_t *cg, ir_node **place,
93 create_const_node_func func,
94 const arch_register_t* reg)
101 block = get_irg_start_block(cg->irg);
102 res = func(NULL, cg->irg, block);
103 arch_set_irn_register(cg->arch_env, res, reg);
106 add_irn_dep(get_irg_end(cg->irg), res);
107 /* add_irn_dep(get_irg_start(cg->irg), res); */
112 /* Creates the unique per irg GP NoReg node. */
113 ir_node *ia32_new_NoReg_gp(ia32_code_gen_t *cg) {
114 return create_const(cg, &cg->noreg_gp, new_rd_ia32_NoReg_GP,
115 &ia32_gp_regs[REG_GP_NOREG]);
118 ir_node *ia32_new_NoReg_vfp(ia32_code_gen_t *cg) {
119 return create_const(cg, &cg->noreg_vfp, new_rd_ia32_NoReg_VFP,
120 &ia32_vfp_regs[REG_VFP_NOREG]);
123 ir_node *ia32_new_NoReg_xmm(ia32_code_gen_t *cg) {
124 return create_const(cg, &cg->noreg_xmm, new_rd_ia32_NoReg_XMM,
125 &ia32_xmm_regs[REG_XMM_NOREG]);
128 /* Creates the unique per irg FP NoReg node. */
129 ir_node *ia32_new_NoReg_fp(ia32_code_gen_t *cg) {
130 return USE_SSE2(cg) ? ia32_new_NoReg_xmm(cg) : ia32_new_NoReg_vfp(cg);
133 ir_node *ia32_new_Unknown_gp(ia32_code_gen_t *cg) {
134 return create_const(cg, &cg->unknown_gp, new_rd_ia32_Unknown_GP,
135 &ia32_gp_regs[REG_GP_UKNWN]);
138 ir_node *ia32_new_Unknown_vfp(ia32_code_gen_t *cg) {
139 return create_const(cg, &cg->unknown_vfp, new_rd_ia32_Unknown_VFP,
140 &ia32_vfp_regs[REG_VFP_UKNWN]);
143 ir_node *ia32_new_Unknown_xmm(ia32_code_gen_t *cg) {
144 return create_const(cg, &cg->unknown_xmm, new_rd_ia32_Unknown_XMM,
145 &ia32_xmm_regs[REG_XMM_UKNWN]);
148 ir_node *ia32_new_Fpu_truncate(ia32_code_gen_t *cg) {
149 return create_const(cg, &cg->fpu_trunc_mode, new_rd_ia32_ChangeCW,
150 &ia32_fp_cw_regs[REG_FPCW]);
155 * Returns gp_noreg or fp_noreg, depending in input requirements.
157 ir_node *ia32_get_admissible_noreg(ia32_code_gen_t *cg, ir_node *irn, int pos) {
158 const arch_register_req_t *req;
160 req = arch_get_register_req(cg->arch_env, irn, pos);
161 assert(req != NULL && "Missing register requirements");
162 if (req->cls == &ia32_reg_classes[CLASS_ia32_gp])
163 return ia32_new_NoReg_gp(cg);
165 return ia32_new_NoReg_fp(cg);
168 /**************************************************
171 * _ __ ___ __ _ __ _| | | ___ ___ _| |_
172 * | '__/ _ \/ _` | / _` | | |/ _ \ / __| | | _|
173 * | | | __/ (_| | | (_| | | | (_) | (__ | | |
174 * |_| \___|\__, | \__,_|_|_|\___/ \___| |_|_|
177 **************************************************/
180 * Return register requirements for an ia32 node.
181 * If the node returns a tuple (mode_T) then the proj's
182 * will be asked for this information.
184 static const arch_register_req_t *ia32_get_irn_reg_req(const void *self,
187 long node_pos = pos == -1 ? 0 : pos;
188 ir_mode *mode = is_Block(node) ? NULL : get_irn_mode(node);
190 if (is_Block(node) || mode == mode_X) {
191 return arch_no_register_req;
194 if (mode == mode_T && pos < 0) {
195 return arch_no_register_req;
200 return arch_no_register_req;
203 return arch_no_register_req;
206 node_pos = (pos == -1) ? get_Proj_proj(node) : pos;
207 node = skip_Proj_const(node);
210 if (is_ia32_irn(node)) {
211 const arch_register_req_t *req;
213 req = get_ia32_in_req(node, pos);
215 req = get_ia32_out_req(node, node_pos);
222 /* unknowns should be transformed already */
223 assert(!is_Unknown(node));
225 return arch_no_register_req;
228 static void ia32_set_irn_reg(const void *self, ir_node *irn, const arch_register_t *reg) {
231 if (get_irn_mode(irn) == mode_X) {
236 pos = get_Proj_proj(irn);
237 irn = skip_Proj(irn);
240 if (is_ia32_irn(irn)) {
241 const arch_register_t **slots;
243 slots = get_ia32_slots(irn);
246 ia32_set_firm_reg(irn, reg, cur_reg_set);
250 static const arch_register_t *ia32_get_irn_reg(const void *self, const ir_node *irn) {
252 const arch_register_t *reg = NULL;
256 if (get_irn_mode(irn) == mode_X) {
260 pos = get_Proj_proj(irn);
261 irn = skip_Proj_const(irn);
264 if (is_ia32_irn(irn)) {
265 const arch_register_t **slots;
266 slots = get_ia32_slots(irn);
269 reg = ia32_get_firm_reg(irn, cur_reg_set);
275 static arch_irn_class_t ia32_classify(const void *self, const ir_node *irn) {
276 arch_irn_class_t classification = arch_irn_class_normal;
278 irn = skip_Proj_const(irn);
281 classification |= arch_irn_class_branch;
283 if (! is_ia32_irn(irn))
284 return classification & ~arch_irn_class_normal;
286 if (is_ia32_Cnst(irn))
287 classification |= arch_irn_class_const;
290 classification |= arch_irn_class_load;
292 if (is_ia32_St(irn) || is_ia32_Store8Bit(irn))
293 classification |= arch_irn_class_store;
295 if (is_ia32_need_stackent(irn))
296 classification |= arch_irn_class_reload;
298 return classification;
301 static arch_irn_flags_t ia32_get_flags(const void *self, const ir_node *irn) {
302 arch_irn_flags_t flags = arch_irn_flags_none;
305 return arch_irn_flags_ignore;
307 if(is_Proj(irn) && mode_is_datab(get_irn_mode(irn))) {
308 ir_node *pred = get_Proj_pred(irn);
310 if(is_ia32_irn(pred)) {
311 flags = get_ia32_out_flags(pred, get_Proj_proj(irn));
317 if (is_ia32_irn(irn)) {
318 flags |= get_ia32_flags(irn);
325 * The IA32 ABI callback object.
328 be_abi_call_flags_bits_t flags; /**< The call flags. */
329 const arch_isa_t *isa; /**< The ISA handle. */
330 const arch_env_t *aenv; /**< The architecture environment. */
331 ir_graph *irg; /**< The associated graph. */
334 static ir_entity *ia32_get_frame_entity(const void *self, const ir_node *irn) {
335 return is_ia32_irn(irn) ? get_ia32_frame_ent(irn) : NULL;
338 static void ia32_set_frame_entity(const void *self, ir_node *irn, ir_entity *ent) {
339 set_ia32_frame_ent(irn, ent);
342 static void ia32_set_frame_offset(const void *self, ir_node *irn, int bias) {
343 const ia32_irn_ops_t *ops = self;
345 if (get_ia32_frame_ent(irn)) {
346 ia32_am_flavour_t am_flav;
348 if (is_ia32_Pop(irn)) {
349 int omit_fp = be_abi_omit_fp(ops->cg->birg->abi);
351 /* Pop nodes modify the stack pointer before calculating the destination
352 * address, so fix this here
358 am_flav = get_ia32_am_flavour(irn);
360 set_ia32_am_flavour(irn, am_flav);
362 add_ia32_am_offs_int(irn, bias);
366 static int ia32_get_sp_bias(const void *self, const ir_node *irn) {
368 long proj = get_Proj_proj(irn);
369 ir_node *pred = get_Proj_pred(irn);
371 if (is_ia32_Push(pred) && proj == pn_ia32_Push_stack)
373 if (is_ia32_Pop(pred) && proj == pn_ia32_Pop_stack)
381 * Put all registers which are saved by the prologue/epilogue in a set.
383 * @param self The callback object.
384 * @param s The result set.
386 static void ia32_abi_dont_save_regs(void *self, pset *s)
388 ia32_abi_env_t *env = self;
389 if(env->flags.try_omit_fp)
390 pset_insert_ptr(s, env->isa->bp);
394 * Generate the routine prologue.
396 * @param self The callback object.
397 * @param mem A pointer to the mem node. Update this if you define new memory.
398 * @param reg_map A map mapping all callee_save/ignore/parameter registers to their defining nodes.
400 * @return The register which shall be used as a stack frame base.
402 * All nodes which define registers in @p reg_map must keep @p reg_map current.
404 static const arch_register_t *ia32_abi_prologue(void *self, ir_node **mem, pmap *reg_map)
406 ia32_abi_env_t *env = self;
407 const ia32_isa_t *isa = (ia32_isa_t *)env->isa;
408 ia32_code_gen_t *cg = isa->cg;
410 if (! env->flags.try_omit_fp) {
411 ir_node *bl = get_irg_start_block(env->irg);
412 ir_node *curr_sp = be_abi_reg_map_get(reg_map, env->isa->sp);
413 ir_node *curr_bp = be_abi_reg_map_get(reg_map, env->isa->bp);
414 ir_node *noreg = ia32_new_NoReg_gp(cg);
417 /* ALL nodes representing bp must be set to ignore. */
418 be_node_set_flags(get_Proj_pred(curr_bp), BE_OUT_POS(get_Proj_proj(curr_bp)), arch_irn_flags_ignore);
421 push = new_rd_ia32_Push(NULL, env->irg, bl, noreg, noreg, curr_bp, curr_sp, *mem);
422 curr_sp = new_r_Proj(env->irg, bl, push, get_irn_mode(curr_sp), pn_ia32_Push_stack);
423 *mem = new_r_Proj(env->irg, bl, push, mode_M, pn_ia32_Push_M);
425 /* the push must have SP out register */
426 arch_set_irn_register(env->aenv, curr_sp, env->isa->sp);
427 set_ia32_flags(push, arch_irn_flags_ignore);
429 /* move esp to ebp */
430 curr_bp = be_new_Copy(env->isa->bp->reg_class, env->irg, bl, curr_sp);
431 be_set_constr_single_reg(curr_bp, BE_OUT_POS(0), env->isa->bp);
432 arch_set_irn_register(env->aenv, curr_bp, env->isa->bp);
433 be_node_set_flags(curr_bp, BE_OUT_POS(0), arch_irn_flags_ignore);
435 /* beware: the copy must be done before any other sp use */
436 curr_sp = be_new_CopyKeep_single(env->isa->sp->reg_class, env->irg, bl, curr_sp, curr_bp, get_irn_mode(curr_sp));
437 be_set_constr_single_reg(curr_sp, BE_OUT_POS(0), env->isa->sp);
438 arch_set_irn_register(env->aenv, curr_sp, env->isa->sp);
439 be_node_set_flags(curr_sp, BE_OUT_POS(0), arch_irn_flags_ignore);
441 be_abi_reg_map_set(reg_map, env->isa->sp, curr_sp);
442 be_abi_reg_map_set(reg_map, env->isa->bp, curr_bp);
451 * Generate the routine epilogue.
452 * @param self The callback object.
453 * @param bl The block for the epilog
454 * @param mem A pointer to the mem node. Update this if you define new memory.
455 * @param reg_map A map mapping all callee_save/ignore/parameter registers to their defining nodes.
456 * @return The register which shall be used as a stack frame base.
458 * All nodes which define registers in @p reg_map must keep @p reg_map current.
460 static void ia32_abi_epilogue(void *self, ir_node *bl, ir_node **mem, pmap *reg_map)
462 ia32_abi_env_t *env = self;
463 ir_node *curr_sp = be_abi_reg_map_get(reg_map, env->isa->sp);
464 ir_node *curr_bp = be_abi_reg_map_get(reg_map, env->isa->bp);
466 if (env->flags.try_omit_fp) {
467 /* simply remove the stack frame here */
468 curr_sp = be_new_IncSP(env->isa->sp, env->irg, bl, curr_sp, BE_STACK_FRAME_SIZE_SHRINK);
469 add_irn_dep(curr_sp, *mem);
471 const ia32_isa_t *isa = (ia32_isa_t *)env->isa;
472 ia32_code_gen_t *cg = isa->cg;
473 ir_mode *mode_bp = env->isa->bp->reg_class->mode;
475 /* gcc always emits a leave at the end of a routine */
476 if (1 || ARCH_AMD(isa->opt_arch)) {
480 leave = new_rd_ia32_Leave(NULL, env->irg, bl, curr_sp, curr_bp);
481 set_ia32_flags(leave, arch_irn_flags_ignore);
482 curr_bp = new_r_Proj(current_ir_graph, bl, leave, mode_bp, pn_ia32_Leave_frame);
483 curr_sp = new_r_Proj(current_ir_graph, bl, leave, get_irn_mode(curr_sp), pn_ia32_Leave_stack);
485 ir_node *noreg = ia32_new_NoReg_gp(cg);
488 /* copy ebp to esp */
489 curr_sp = be_new_SetSP(env->isa->sp, env->irg, bl, curr_sp, curr_bp, *mem);
492 pop = new_rd_ia32_Pop(NULL, env->irg, bl, noreg, noreg, curr_sp, *mem);
493 set_ia32_flags(pop, arch_irn_flags_ignore);
494 curr_bp = new_r_Proj(current_ir_graph, bl, pop, mode_bp, pn_ia32_Pop_res);
495 curr_sp = new_r_Proj(current_ir_graph, bl, pop, get_irn_mode(curr_sp), pn_ia32_Pop_stack);
497 *mem = new_r_Proj(current_ir_graph, bl, pop, mode_M, pn_ia32_Pop_M);
499 arch_set_irn_register(env->aenv, curr_sp, env->isa->sp);
500 arch_set_irn_register(env->aenv, curr_bp, env->isa->bp);
503 be_abi_reg_map_set(reg_map, env->isa->sp, curr_sp);
504 be_abi_reg_map_set(reg_map, env->isa->bp, curr_bp);
508 * Initialize the callback object.
509 * @param call The call object.
510 * @param aenv The architecture environment.
511 * @param irg The graph with the method.
512 * @return Some pointer. This pointer is passed to all other callback functions as self object.
514 static void *ia32_abi_init(const be_abi_call_t *call, const arch_env_t *aenv, ir_graph *irg)
516 ia32_abi_env_t *env = xmalloc(sizeof(env[0]));
517 be_abi_call_flags_t fl = be_abi_call_get_flags(call);
518 env->flags = fl.bits;
521 env->isa = aenv->isa;
526 * Destroy the callback object.
527 * @param self The callback object.
529 static void ia32_abi_done(void *self) {
534 * Produces the type which sits between the stack args and the locals on the stack.
535 * it will contain the return address and space to store the old base pointer.
536 * @return The Firm type modeling the ABI between type.
538 static ir_type *ia32_abi_get_between_type(void *self)
540 #define IDENT(s) new_id_from_chars(s, sizeof(s)-1)
541 static ir_type *omit_fp_between_type = NULL;
542 static ir_type *between_type = NULL;
544 ia32_abi_env_t *env = self;
546 if (! between_type) {
547 ir_entity *old_bp_ent;
548 ir_entity *ret_addr_ent;
549 ir_entity *omit_fp_ret_addr_ent;
551 ir_type *old_bp_type = new_type_primitive(IDENT("bp"), mode_Iu);
552 ir_type *ret_addr_type = new_type_primitive(IDENT("return_addr"), mode_Iu);
554 between_type = new_type_struct(IDENT("ia32_between_type"));
555 old_bp_ent = new_entity(between_type, IDENT("old_bp"), old_bp_type);
556 ret_addr_ent = new_entity(between_type, IDENT("ret_addr"), ret_addr_type);
558 set_entity_offset(old_bp_ent, 0);
559 set_entity_offset(ret_addr_ent, get_type_size_bytes(old_bp_type));
560 set_type_size_bytes(between_type, get_type_size_bytes(old_bp_type) + get_type_size_bytes(ret_addr_type));
561 set_type_state(between_type, layout_fixed);
563 omit_fp_between_type = new_type_struct(IDENT("ia32_between_type_omit_fp"));
564 omit_fp_ret_addr_ent = new_entity(omit_fp_between_type, IDENT("ret_addr"), ret_addr_type);
566 set_entity_offset(omit_fp_ret_addr_ent, 0);
567 set_type_size_bytes(omit_fp_between_type, get_type_size_bytes(ret_addr_type));
568 set_type_state(omit_fp_between_type, layout_fixed);
571 return env->flags.try_omit_fp ? omit_fp_between_type : between_type;
576 * Get the estimated cycle count for @p irn.
578 * @param self The this pointer.
579 * @param irn The node.
581 * @return The estimated cycle count for this operation
583 static int ia32_get_op_estimated_cost(const void *self, const ir_node *irn)
586 ia32_op_type_t op_tp;
587 const ia32_irn_ops_t *ops = self;
591 if (!is_ia32_irn(irn))
594 assert(is_ia32_irn(irn));
596 cost = get_ia32_latency(irn);
597 op_tp = get_ia32_op_type(irn);
599 if (is_ia32_CopyB(irn)) {
601 if (ARCH_INTEL(ops->cg->arch))
604 else if (is_ia32_CopyB_i(irn)) {
605 int size = get_tarval_long(get_ia32_Immop_tarval(irn));
606 cost = 20 + (int)ceil((4/3) * size);
607 if (ARCH_INTEL(ops->cg->arch))
610 /* in case of address mode operations add additional cycles */
611 else if (op_tp == ia32_AddrModeD || op_tp == ia32_AddrModeS) {
613 In case of stack access add 5 cycles (we assume stack is in cache),
614 other memory operations cost 20 cycles.
616 cost += is_ia32_use_frame(irn) ? 5 : 20;
623 * Returns the inverse operation if @p irn, recalculating the argument at position @p i.
625 * @param irn The original operation
626 * @param i Index of the argument we want the inverse operation to yield
627 * @param inverse struct to be filled with the resulting inverse op
628 * @param obstack The obstack to use for allocation of the returned nodes array
629 * @return The inverse operation or NULL if operation invertible
631 static arch_inverse_t *ia32_get_inverse(const void *self, const ir_node *irn, int i, arch_inverse_t *inverse, struct obstack *obst) {
635 ir_node *block, *noreg, *nomem;
638 /* we cannot invert non-ia32 irns */
639 if (! is_ia32_irn(irn))
642 /* operand must always be a real operand (not base, index or mem) */
643 if (i != 2 && i != 3)
646 /* we don't invert address mode operations */
647 if (get_ia32_op_type(irn) != ia32_Normal)
650 irg = get_irn_irg(irn);
651 block = get_nodes_block(irn);
652 mode = get_irn_mode(irn);
653 irn_mode = get_irn_mode(irn);
654 noreg = get_irn_n(irn, 0);
655 nomem = new_r_NoMem(irg);
656 dbg = get_irn_dbg_info(irn);
658 /* initialize structure */
659 inverse->nodes = obstack_alloc(obst, 2 * sizeof(inverse->nodes[0]));
663 switch (get_ia32_irn_opcode(irn)) {
665 if (get_ia32_immop_type(irn) == ia32_ImmConst) {
666 /* we have an add with a const here */
667 /* invers == add with negated const */
668 inverse->nodes[0] = new_rd_ia32_Add(dbg, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem);
670 copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
671 set_ia32_Immop_tarval(inverse->nodes[0], tarval_neg(get_ia32_Immop_tarval(irn)));
672 set_ia32_commutative(inverse->nodes[0]);
674 else if (get_ia32_immop_type(irn) == ia32_ImmSymConst) {
675 /* we have an add with a symconst here */
676 /* invers == sub with const */
677 inverse->nodes[0] = new_rd_ia32_Sub(dbg, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem);
679 copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
682 /* normal add: inverse == sub */
683 inverse->nodes[0] = new_rd_ia32_Sub(dbg, irg, block, noreg, noreg, (ir_node*) irn, get_irn_n(irn, i ^ 1), nomem);
688 if (get_ia32_immop_type(irn) != ia32_ImmNone) {
689 /* we have a sub with a const/symconst here */
690 /* invers == add with this const */
691 inverse->nodes[0] = new_rd_ia32_Add(dbg, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem);
692 inverse->costs += (get_ia32_immop_type(irn) == ia32_ImmSymConst) ? 5 : 1;
693 copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
698 inverse->nodes[0] = new_rd_ia32_Add(dbg, irg, block, noreg, noreg, (ir_node*) irn, get_irn_n(irn, 3), nomem);
701 inverse->nodes[0] = new_rd_ia32_Sub(dbg, irg, block, noreg, noreg, get_irn_n(irn, 2), (ir_node*) irn, nomem);
707 if (get_ia32_immop_type(irn) != ia32_ImmNone) {
708 /* xor with const: inverse = xor */
709 inverse->nodes[0] = new_rd_ia32_Xor(dbg, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem);
710 inverse->costs += (get_ia32_immop_type(irn) == ia32_ImmSymConst) ? 5 : 1;
711 copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
715 inverse->nodes[0] = new_rd_ia32_Xor(dbg, irg, block, noreg, noreg, (ir_node *) irn, get_irn_n(irn, i), nomem);
720 inverse->nodes[0] = new_rd_ia32_Not(dbg, irg, block, noreg, noreg, (ir_node*) irn, nomem);
725 inverse->nodes[0] = new_rd_ia32_Neg(dbg, irg, block, noreg, noreg, (ir_node*) irn, nomem);
730 /* inverse operation not supported */
737 static ir_mode *get_spill_mode_mode(const ir_mode *mode)
739 if(mode_is_float(mode))
746 * Get the mode that should be used for spilling value node
748 static ir_mode *get_spill_mode(const ir_node *node)
750 ir_mode *mode = get_irn_mode(node);
751 return get_spill_mode_mode(mode);
755 * Checks wether an addressmode reload for a node with mode mode is compatible
756 * with a spillslot of mode spill_mode
758 static int ia32_is_spillmode_compatible(const ir_mode *mode, const ir_mode *spillmode)
760 if(mode_is_float(mode)) {
761 return mode == spillmode;
768 * Check if irn can load it's operand at position i from memory (source addressmode).
769 * @param self Pointer to irn ops itself
770 * @param irn The irn to be checked
771 * @param i The operands position
772 * @return Non-Zero if operand can be loaded
774 static int ia32_possible_memory_operand(const void *self, const ir_node *irn, unsigned int i) {
775 ir_node *op = get_irn_n(irn, i);
776 const ir_mode *mode = get_irn_mode(op);
777 const ir_mode *spillmode = get_spill_mode(op);
779 if (! is_ia32_irn(irn) || /* must be an ia32 irn */
780 get_irn_arity(irn) != 5 || /* must be a binary operation */
781 get_ia32_op_type(irn) != ia32_Normal || /* must not already be a addressmode irn */
782 ! (get_ia32_am_support(irn) & ia32_am_Source) || /* must be capable of source addressmode */
783 ! ia32_is_spillmode_compatible(mode, spillmode) ||
784 (i != 2 && i != 3) || /* a "real" operand position must be requested */
785 (i == 2 && ! is_ia32_commutative(irn)) || /* if first operand requested irn must be commutative */
786 is_ia32_use_frame(irn)) /* must not already use frame */
792 static void ia32_perform_memory_operand(const void *self, ir_node *irn, ir_node *spill, unsigned int i) {
793 const ia32_irn_ops_t *ops = self;
794 ia32_code_gen_t *cg = ops->cg;
796 assert(ia32_possible_memory_operand(self, irn, i) && "Cannot perform memory operand change");
799 ir_node *tmp = get_irn_n(irn, 3);
800 set_irn_n(irn, 3, get_irn_n(irn, 2));
801 set_irn_n(irn, 2, tmp);
804 set_ia32_am_support(irn, ia32_am_Source);
805 set_ia32_op_type(irn, ia32_AddrModeS);
806 set_ia32_am_flavour(irn, ia32_B);
807 set_ia32_ls_mode(irn, get_irn_mode(get_irn_n(irn, i)));
808 set_ia32_use_frame(irn);
809 set_ia32_need_stackent(irn);
811 set_irn_n(irn, 0, get_irg_frame(get_irn_irg(irn)));
812 set_irn_n(irn, 3, ia32_get_admissible_noreg(cg, irn, 3));
813 set_irn_n(irn, 4, spill);
815 //FIXME DBG_OPT_AM_S(reload, irn);
818 static const be_abi_callbacks_t ia32_abi_callbacks = {
821 ia32_abi_get_between_type,
822 ia32_abi_dont_save_regs,
827 /* fill register allocator interface */
829 static const arch_irn_ops_if_t ia32_irn_ops_if = {
830 ia32_get_irn_reg_req,
835 ia32_get_frame_entity,
836 ia32_set_frame_entity,
837 ia32_set_frame_offset,
840 ia32_get_op_estimated_cost,
841 ia32_possible_memory_operand,
842 ia32_perform_memory_operand,
845 ia32_irn_ops_t ia32_irn_ops = {
852 /**************************************************
855 * ___ ___ __| | ___ __ _ ___ _ __ _| |_
856 * / __/ _ \ / _` |/ _ \/ _` |/ _ \ '_ \ | | _|
857 * | (_| (_) | (_| | __/ (_| | __/ | | | | | |
858 * \___\___/ \__,_|\___|\__, |\___|_| |_| |_|_|
861 **************************************************/
864 * Transforms the standard firm graph into
867 static void ia32_prepare_graph(void *self) {
868 ia32_code_gen_t *cg = self;
870 /* transform psi condition trees */
871 ia32_pre_transform_phase(cg);
873 /* transform all remaining nodes */
874 ia32_transform_graph(cg);
875 //add_fpu_edges(cg->birg);
877 // Matze: disabled for now. Because after transformation start block has no
878 // self-loop anymore so it might be merged with its successor block. This
879 // will bring several nodes to the startblock which sometimes get scheduled
880 // before the initial IncSP/Barrier
881 local_optimize_graph(cg->irg);
884 be_dump(cg->irg, "-transformed", dump_ir_block_graph_sched);
886 /* optimize address mode */
887 ia32_optimize_graph(cg);
890 be_dump(cg->irg, "-am", dump_ir_block_graph_sched);
892 /* do code placement, to optimize the position of constants */
896 be_dump(cg->irg, "-place", dump_ir_block_graph_sched);
900 * Dummy functions for hooks we don't need but which must be filled.
902 static void ia32_before_sched(void *self) {
905 static void remove_unused_nodes(ir_node *irn, bitset_t *already_visited) {
908 ir_node *mem_proj = NULL;
913 mode = get_irn_mode(irn);
915 /* check if we already saw this node or the node has more than one user */
916 if (bitset_contains_irn(already_visited, irn) || get_irn_n_edges(irn) > 1) {
920 /* mark irn visited */
921 bitset_add_irn(already_visited, irn);
923 /* non-Tuple nodes with one user: ok, return */
924 if (get_irn_n_edges(irn) >= 1 && mode != mode_T) {
928 /* tuple node has one user which is not the mem proj-> ok */
929 if (mode == mode_T && get_irn_n_edges(irn) == 1) {
930 mem_proj = ia32_get_proj_for_mode(irn, mode_M);
931 if (mem_proj == NULL) {
936 arity = get_irn_arity(irn);
937 for (i = 0; i < arity; ++i) {
938 ir_node *pred = get_irn_n(irn, i);
940 /* do not follow memory edges or we will accidentally remove stores */
941 if (get_irn_mode(pred) == mode_M) {
942 if(mem_proj != NULL) {
943 edges_reroute(mem_proj, pred, get_irn_irg(mem_proj));
949 set_irn_n(irn, i, new_Bad());
952 The current node is about to be removed: if the predecessor
953 has only this node as user, it need to be removed as well.
955 if (get_irn_n_edges(pred) <= 1)
956 remove_unused_nodes(pred, already_visited);
959 // we need to set the presd to Bad again to also get the memory edges
960 arity = get_irn_arity(irn);
961 for (i = 0; i < arity; ++i) {
962 set_irn_n(irn, i, new_Bad());
965 if (sched_is_scheduled(irn)) {
970 static void remove_unused_loads_walker(ir_node *irn, void *env) {
971 bitset_t *already_visited = env;
972 if (is_ia32_Ld(irn) && ! bitset_contains_irn(already_visited, irn))
973 remove_unused_nodes(irn, env);
977 * Called before the register allocator.
978 * Calculate a block schedule here. We need it for the x87
979 * simulator and the emitter.
981 static void ia32_before_ra(void *self) {
982 ia32_code_gen_t *cg = self;
983 bitset_t *already_visited = bitset_irg_alloca(cg->irg);
987 There are sometimes unused loads, only pinned by memory.
988 We need to remove those Loads and all other nodes which won't be used
989 after removing the Load from schedule.
991 irg_walk_graph(cg->irg, NULL, remove_unused_loads_walker, already_visited);
993 /* setup fpu rounding modes */
994 ia32_setup_fpu_mode(cg);
999 * Transforms a be_Reload into a ia32 Load.
1001 static void transform_to_Load(ia32_code_gen_t *cg, ir_node *node) {
1002 ir_graph *irg = get_irn_irg(node);
1003 dbg_info *dbg = get_irn_dbg_info(node);
1004 ir_node *block = get_nodes_block(node);
1005 ir_entity *ent = be_get_frame_entity(node);
1006 ir_mode *mode = get_irn_mode(node);
1007 ir_mode *spillmode = get_spill_mode(node);
1008 ir_node *noreg = ia32_new_NoReg_gp(cg);
1009 ir_node *sched_point = NULL;
1010 ir_node *ptr = get_irg_frame(irg);
1011 ir_node *mem = get_irn_n(node, be_pos_Reload_mem);
1012 ir_node *new_op, *proj;
1013 const arch_register_t *reg;
1015 if (sched_is_scheduled(node)) {
1016 sched_point = sched_prev(node);
1019 if (mode_is_float(spillmode)) {
1021 new_op = new_rd_ia32_xLoad(dbg, irg, block, ptr, noreg, mem);
1023 new_op = new_rd_ia32_vfld(dbg, irg, block, ptr, noreg, mem);
1025 else if (get_mode_size_bits(spillmode) == 128) {
1026 // Reload 128 bit sse registers
1027 new_op = new_rd_ia32_xxLoad(dbg, irg, block, ptr, noreg, mem);
1030 new_op = new_rd_ia32_Load(dbg, irg, block, ptr, noreg, mem);
1032 set_ia32_am_support(new_op, ia32_am_Source);
1033 set_ia32_op_type(new_op, ia32_AddrModeS);
1034 set_ia32_am_flavour(new_op, ia32_B);
1035 set_ia32_ls_mode(new_op, spillmode);
1036 set_ia32_frame_ent(new_op, ent);
1037 set_ia32_use_frame(new_op);
1039 DBG_OPT_RELOAD2LD(node, new_op);
1041 proj = new_rd_Proj(dbg, irg, block, new_op, mode, pn_ia32_Load_res);
1044 sched_add_after(sched_point, new_op);
1045 sched_add_after(new_op, proj);
1050 /* copy the register from the old node to the new Load */
1051 reg = arch_get_irn_register(cg->arch_env, node);
1052 arch_set_irn_register(cg->arch_env, new_op, reg);
1054 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(cg, node));
1056 exchange(node, proj);
1060 * Transforms a be_Spill node into a ia32 Store.
1062 static void transform_to_Store(ia32_code_gen_t *cg, ir_node *node) {
1063 ir_graph *irg = get_irn_irg(node);
1064 dbg_info *dbg = get_irn_dbg_info(node);
1065 ir_node *block = get_nodes_block(node);
1066 ir_entity *ent = be_get_frame_entity(node);
1067 const ir_node *spillval = get_irn_n(node, be_pos_Spill_val);
1068 ir_mode *mode = get_spill_mode(spillval);
1069 ir_node *noreg = ia32_new_NoReg_gp(cg);
1070 ir_node *nomem = new_rd_NoMem(irg);
1071 ir_node *ptr = get_irg_frame(irg);
1072 ir_node *val = get_irn_n(node, be_pos_Spill_val);
1074 ir_node *sched_point = NULL;
1076 if (sched_is_scheduled(node)) {
1077 sched_point = sched_prev(node);
1080 /* No need to spill unknown values... */
1081 if(is_ia32_Unknown_GP(val) ||
1082 is_ia32_Unknown_VFP(val) ||
1083 is_ia32_Unknown_XMM(val)) {
1088 exchange(node, store);
1092 if (mode_is_float(mode)) {
1094 store = new_rd_ia32_xStore(dbg, irg, block, ptr, noreg, val, nomem);
1096 store = new_rd_ia32_vfst(dbg, irg, block, ptr, noreg, val, nomem);
1097 } else if (get_mode_size_bits(mode) == 128) {
1098 // Spill 128 bit SSE registers
1099 store = new_rd_ia32_xxStore(dbg, irg, block, ptr, noreg, val, nomem);
1100 } else if (get_mode_size_bits(mode) == 8) {
1101 store = new_rd_ia32_Store8Bit(dbg, irg, block, ptr, noreg, val, nomem);
1103 store = new_rd_ia32_Store(dbg, irg, block, ptr, noreg, val, nomem);
1106 set_ia32_am_support(store, ia32_am_Dest);
1107 set_ia32_op_type(store, ia32_AddrModeD);
1108 set_ia32_am_flavour(store, ia32_B);
1109 set_ia32_ls_mode(store, mode);
1110 set_ia32_frame_ent(store, ent);
1111 set_ia32_use_frame(store);
1112 SET_IA32_ORIG_NODE(store, ia32_get_old_node_name(cg, node));
1113 DBG_OPT_SPILL2ST(node, store);
1116 sched_add_after(sched_point, store);
1120 exchange(node, store);
1123 static ir_node *create_push(ia32_code_gen_t *cg, ir_node *node, ir_node *schedpoint, ir_node *sp, ir_node *mem, ir_entity *ent) {
1124 ir_graph *irg = get_irn_irg(node);
1125 dbg_info *dbg = get_irn_dbg_info(node);
1126 ir_node *block = get_nodes_block(node);
1127 ir_node *noreg = ia32_new_NoReg_gp(cg);
1128 ir_node *frame = get_irg_frame(irg);
1130 ir_node *push = new_rd_ia32_Push(dbg, irg, block, frame, noreg, noreg, sp, mem);
1132 set_ia32_frame_ent(push, ent);
1133 set_ia32_use_frame(push);
1134 set_ia32_op_type(push, ia32_AddrModeS);
1135 set_ia32_am_flavour(push, ia32_B);
1136 set_ia32_ls_mode(push, mode_Is);
1138 sched_add_before(schedpoint, push);
1142 static ir_node *create_pop(ia32_code_gen_t *cg, ir_node *node, ir_node *schedpoint, ir_node *sp, ir_entity *ent) {
1143 ir_graph *irg = get_irn_irg(node);
1144 dbg_info *dbg = get_irn_dbg_info(node);
1145 ir_node *block = get_nodes_block(node);
1146 ir_node *noreg = ia32_new_NoReg_gp(cg);
1147 ir_node *frame = get_irg_frame(irg);
1149 ir_node *pop = new_rd_ia32_Pop(dbg, irg, block, frame, noreg, sp, new_NoMem());
1151 set_ia32_frame_ent(pop, ent);
1152 set_ia32_use_frame(pop);
1153 set_ia32_op_type(pop, ia32_AddrModeD);
1154 set_ia32_am_flavour(pop, ia32_am_OB);
1155 set_ia32_ls_mode(pop, mode_Is);
1157 sched_add_before(schedpoint, pop);
1162 static ir_node* create_spproj(ia32_code_gen_t *cg, ir_node *node, ir_node *pred, int pos, ir_node *schedpoint) {
1163 ir_graph *irg = get_irn_irg(node);
1164 dbg_info *dbg = get_irn_dbg_info(node);
1165 ir_node *block = get_nodes_block(node);
1166 ir_mode *spmode = mode_Iu;
1167 const arch_register_t *spreg = &ia32_gp_regs[REG_ESP];
1170 sp = new_rd_Proj(dbg, irg, block, pred, spmode, pos);
1171 arch_set_irn_register(cg->arch_env, sp, spreg);
1172 sched_add_before(schedpoint, sp);
1178 * Transform memperm, currently we do this the ugly way and produce
1179 * push/pop into/from memory cascades. This is possible without using
1182 static void transform_MemPerm(ia32_code_gen_t *cg, ir_node *node) {
1183 ir_graph *irg = get_irn_irg(node);
1184 ir_node *block = get_nodes_block(node);
1188 ir_node *sp = be_abi_get_ignore_irn(cg->birg->abi, &ia32_gp_regs[REG_ESP]);
1189 const ir_edge_t *edge;
1190 const ir_edge_t *next;
1193 arity = be_get_MemPerm_entity_arity(node);
1194 pops = alloca(arity * sizeof(pops[0]));
1197 for(i = 0; i < arity; ++i) {
1198 ir_entity *ent = be_get_MemPerm_in_entity(node, i);
1199 ir_type *enttype = get_entity_type(ent);
1200 int entbits = get_type_size_bits(enttype);
1201 ir_node *mem = get_irn_n(node, i + 1);
1205 get_type_size_bits(get_entity_type(be_get_MemPerm_out_entity(node, i))) == get_type_size_bits(enttype));
1206 assert( (entbits == 32 || entbits == 64) && "spillslot on x86 should be 32 or 64 bit");
1208 push = create_push(cg, node, node, sp, mem, ent);
1209 sp = create_spproj(cg, node, push, pn_ia32_Push_stack, node);
1211 // add another push after the first one
1212 push = create_push(cg, node, node, sp, mem, ent);
1213 add_ia32_am_offs_int(push, 4);
1214 sp = create_spproj(cg, node, push, pn_ia32_Push_stack, node);
1217 set_irn_n(node, i, new_Bad());
1221 for(i = arity - 1; i >= 0; --i) {
1222 ir_entity *ent = be_get_MemPerm_out_entity(node, i);
1223 ir_type *enttype = get_entity_type(ent);
1224 int entbits = get_type_size_bits(enttype);
1227 assert( (entbits == 32 || entbits == 64) && "spillslot on x86 should be 32 or 64 bit");
1229 pop = create_pop(cg, node, node, sp, ent);
1230 sp = create_spproj(cg, node, pop, pn_ia32_Pop_stack, node);
1232 add_ia32_am_offs_int(pop, 4);
1234 // add another pop after the first one
1235 pop = create_pop(cg, node, node, sp, ent);
1236 sp = create_spproj(cg, node, pop, pn_ia32_Pop_stack, node);
1243 keep = be_new_Keep(&ia32_reg_classes[CLASS_ia32_gp], irg, block, 1, in);
1244 sched_add_before(node, keep);
1246 // exchange memprojs
1247 foreach_out_edge_safe(node, edge, next) {
1248 ir_node *proj = get_edge_src_irn(edge);
1249 int p = get_Proj_proj(proj);
1253 set_Proj_pred(proj, pops[p]);
1254 set_Proj_proj(proj, 3);
1258 arity = get_irn_arity(node);
1259 for(i = 0; i < arity; ++i) {
1260 set_irn_n(node, i, new_Bad());
1266 * Block-Walker: Calls the transform functions Spill and Reload.
1268 static void ia32_after_ra_walker(ir_node *block, void *env) {
1269 ir_node *node, *prev;
1270 ia32_code_gen_t *cg = env;
1272 /* beware: the schedule is changed here */
1273 for (node = sched_last(block); !sched_is_begin(node); node = prev) {
1274 prev = sched_prev(node);
1276 if (be_is_Reload(node)) {
1277 transform_to_Load(cg, node);
1278 } else if (be_is_Spill(node)) {
1279 transform_to_Store(cg, node);
1280 } else if(be_is_MemPerm(node)) {
1281 transform_MemPerm(cg, node);
1287 * Collects nodes that need frame entities assigned.
1289 static void ia32_collect_frame_entity_nodes(ir_node *node, void *data)
1291 be_fec_env_t *env = data;
1293 if (be_is_Reload(node) && be_get_frame_entity(node) == NULL) {
1294 const ir_mode *mode = get_spill_mode_mode(get_irn_mode(node));
1295 int align = get_mode_size_bytes(mode);
1296 be_node_needs_frame_entity(env, node, mode, align);
1297 } else if(is_ia32_irn(node) && get_ia32_frame_ent(node) == NULL
1298 && is_ia32_use_frame(node)) {
1299 if (is_ia32_need_stackent(node) || is_ia32_Load(node)) {
1300 const ir_mode *mode = get_ia32_ls_mode(node);
1301 int align = get_mode_size_bytes(mode);
1302 be_node_needs_frame_entity(env, node, mode, align);
1303 } else if (is_ia32_vfild(node) || is_ia32_xLoad(node)) {
1304 const ir_mode *mode = get_ia32_ls_mode(node);
1306 be_node_needs_frame_entity(env, node, mode, align);
1307 } else if(is_ia32_FldCW(node)) {
1308 const ir_mode *mode = ia32_reg_classes[CLASS_ia32_fp_cw].mode;
1310 be_node_needs_frame_entity(env, node, mode, align);
1311 } else if (is_ia32_SetST0(node)) {
1312 const ir_mode *mode = get_ia32_ls_mode(node);
1314 be_node_needs_frame_entity(env, node, mode, align);
1317 if(!is_ia32_Store(node)
1318 && !is_ia32_xStore(node)
1319 && !is_ia32_xStoreSimple(node)
1320 && !is_ia32_vfist(node)
1321 && !is_ia32_GetST0(node)
1322 && !is_ia32_FnstCW(node)) {
1331 * We transform Spill and Reload here. This needs to be done before
1332 * stack biasing otherwise we would miss the corrected offset for these nodes.
1334 static void ia32_after_ra(void *self) {
1335 ia32_code_gen_t *cg = self;
1336 ir_graph *irg = cg->irg;
1337 be_fec_env_t *fec_env = be_new_frame_entity_coalescer(cg->birg);
1339 /* create and coalesce frame entities */
1340 irg_walk_graph(irg, NULL, ia32_collect_frame_entity_nodes, fec_env);
1341 be_assign_entities(fec_env);
1342 be_free_frame_entity_coalescer(fec_env);
1344 irg_block_walk_graph(irg, NULL, ia32_after_ra_walker, cg);
1346 ia32_finish_irg(irg, cg);
1350 * Last touchups for the graph before emit: x87 simulation to replace the
1351 * virtual with real x87 instructions, creating a block schedule and peephole
1354 static void ia32_finish(void *self) {
1355 ia32_code_gen_t *cg = self;
1356 ir_graph *irg = cg->irg;
1358 /* if we do x87 code generation, rewrite all the virtual instructions and registers */
1359 if (cg->used_fp == fp_x87 || cg->force_sim) {
1360 x87_simulate_graph(cg->arch_env, cg->birg);
1363 /* create block schedule, this also removes empty blocks which might
1364 * produce critical edges */
1365 cg->blk_sched = be_create_block_schedule(irg, cg->birg->exec_freq);
1367 /* do peephole optimisations */
1368 ia32_peephole_optimization(irg, cg);
1372 * Emits the code, closes the output file and frees
1373 * the code generator interface.
1375 static void ia32_codegen(void *self) {
1376 ia32_code_gen_t *cg = self;
1377 ir_graph *irg = cg->irg;
1379 ia32_gen_routine(cg, irg);
1383 /* remove it from the isa */
1386 /* de-allocate code generator */
1387 del_set(cg->reg_set);
1391 static void *ia32_cg_init(be_irg_t *birg);
1393 static const arch_code_generator_if_t ia32_code_gen_if = {
1395 NULL, /* before abi introduce hook */
1398 ia32_before_sched, /* before scheduling hook */
1399 ia32_before_ra, /* before register allocation hook */
1400 ia32_after_ra, /* after register allocation hook */
1401 ia32_finish, /* called before codegen */
1402 ia32_codegen /* emit && done */
1406 * Initializes a IA32 code generator.
1408 static void *ia32_cg_init(be_irg_t *birg) {
1409 ia32_isa_t *isa = (ia32_isa_t *)birg->main_env->arch_env->isa;
1410 ia32_code_gen_t *cg = xcalloc(1, sizeof(*cg));
1412 cg->impl = &ia32_code_gen_if;
1413 cg->irg = birg->irg;
1414 cg->reg_set = new_set(ia32_cmp_irn_reg_assoc, 1024);
1415 cg->arch_env = birg->main_env->arch_env;
1418 cg->blk_sched = NULL;
1419 cg->fp_kind = isa->fp_kind;
1420 cg->used_fp = fp_none;
1421 cg->dump = (birg->main_env->options->dump_flags & DUMP_BE) ? 1 : 0;
1423 /* copy optimizations from isa for easier access */
1425 cg->arch = isa->arch;
1426 cg->opt_arch = isa->opt_arch;
1432 if (isa->name_obst) {
1433 obstack_free(isa->name_obst, NULL);
1434 obstack_init(isa->name_obst);
1438 cur_reg_set = cg->reg_set;
1440 ia32_irn_ops.cg = cg;
1442 return (arch_code_generator_t *)cg;
1447 /*****************************************************************
1448 * ____ _ _ _____ _____
1449 * | _ \ | | | | |_ _|/ ____| /\
1450 * | |_) | __ _ ___| | _____ _ __ __| | | | | (___ / \
1451 * | _ < / _` |/ __| |/ / _ \ '_ \ / _` | | | \___ \ / /\ \
1452 * | |_) | (_| | (__| < __/ | | | (_| | _| |_ ____) / ____ \
1453 * |____/ \__,_|\___|_|\_\___|_| |_|\__,_| |_____|_____/_/ \_\
1455 *****************************************************************/
1458 * Set output modes for GCC
1460 static const tarval_mode_info mo_integer = {
1467 * set the tarval output mode of all integer modes to decimal
1469 static void set_tarval_output_modes(void)
1473 for (i = get_irp_n_modes() - 1; i >= 0; --i) {
1474 ir_mode *mode = get_irp_mode(i);
1476 if (mode_is_int(mode))
1477 set_tarval_mode_output_option(mode, &mo_integer);
1481 const arch_isa_if_t ia32_isa_if;
1484 * The template that generates a new ISA object.
1485 * Note that this template can be changed by command line
1488 static ia32_isa_t ia32_isa_template = {
1490 &ia32_isa_if, /* isa interface implementation */
1491 &ia32_gp_regs[REG_ESP], /* stack pointer register */
1492 &ia32_gp_regs[REG_EBP], /* base pointer register */
1493 -1, /* stack direction */
1494 NULL, /* main environment */
1496 { NULL, }, /* emitter environment */
1497 NULL, /* 16bit register names */
1498 NULL, /* 8bit register names */
1502 IA32_OPT_INCDEC | /* optimize add 1, sub 1 into inc/dec default: on */
1503 IA32_OPT_DOAM | /* optimize address mode default: on */
1504 IA32_OPT_LEA | /* optimize for LEAs default: on */
1505 IA32_OPT_PLACECNST | /* place constants immediately before instructions, default: on */
1506 IA32_OPT_IMMOPS | /* operations can use immediates, default: on */
1507 IA32_OPT_PUSHARGS), /* create pushs for function argument passing, default: on */
1508 arch_pentium_4, /* instruction architecture */
1509 arch_pentium_4, /* optimize for architecture */
1510 fp_sse2, /* use sse2 unit */
1511 NULL, /* current code generator */
1513 NULL, /* name obstack */
1514 0 /* name obst size */
1519 * Initializes the backend ISA.
1521 static void *ia32_init(FILE *file_handle) {
1522 static int inited = 0;
1529 set_tarval_output_modes();
1531 isa = xmalloc(sizeof(*isa));
1532 memcpy(isa, &ia32_isa_template, sizeof(*isa));
1534 if(mode_fpcw == NULL) {
1535 mode_fpcw = new_ir_mode("Fpcw", irms_int_number, 16, 0, irma_none, 0);
1538 ia32_register_init(isa);
1539 ia32_create_opcodes();
1540 ia32_register_copy_attr_func();
1542 if ((ARCH_INTEL(isa->arch) && isa->arch < arch_pentium_4) ||
1543 (ARCH_AMD(isa->arch) && isa->arch < arch_athlon))
1544 /* no SSE2 for these cpu's */
1545 isa->fp_kind = fp_x87;
1547 if (ARCH_INTEL(isa->opt_arch) && isa->opt_arch >= arch_pentium_4) {
1548 /* Pentium 4 don't like inc and dec instructions */
1549 isa->opt &= ~IA32_OPT_INCDEC;
1552 be_emit_init_env(&isa->emit, file_handle);
1553 isa->regs_16bit = pmap_create();
1554 isa->regs_8bit = pmap_create();
1555 isa->types = pmap_create();
1556 isa->tv_ent = pmap_create();
1557 isa->cpu = ia32_init_machine_description();
1559 ia32_build_16bit_reg_map(isa->regs_16bit);
1560 ia32_build_8bit_reg_map(isa->regs_8bit);
1563 isa->name_obst = xmalloc(sizeof(*isa->name_obst));
1564 obstack_init(isa->name_obst);
1567 ia32_handle_intrinsics();
1569 /* needed for the debug support */
1570 be_gas_emit_switch_section(&isa->emit, GAS_SECTION_TEXT);
1571 be_emit_cstring(&isa->emit, ".Ltext0:\n");
1572 be_emit_write_line(&isa->emit);
1574 /* we mark referenced global entities, so we can only emit those which
1575 * are actually referenced. (Note: you mustn't use the type visited flag
1576 * elsewhere in the backend)
1578 inc_master_type_visited();
1586 * Closes the output file and frees the ISA structure.
1588 static void ia32_done(void *self) {
1589 ia32_isa_t *isa = self;
1591 /* emit now all global declarations */
1592 be_gas_emit_decls(&isa->emit, isa->arch_isa.main_env, 1);
1594 pmap_destroy(isa->regs_16bit);
1595 pmap_destroy(isa->regs_8bit);
1596 pmap_destroy(isa->tv_ent);
1597 pmap_destroy(isa->types);
1600 obstack_free(isa->name_obst, NULL);
1603 be_emit_destroy_env(&isa->emit);
1610 * Return the number of register classes for this architecture.
1611 * We report always these:
1612 * - the general purpose registers
1613 * - the SSE floating point register set
1614 * - the virtual floating point registers
1615 * - the SSE vector register set
1617 static int ia32_get_n_reg_class(const void *self) {
1622 * Return the register class for index i.
1624 static const arch_register_class_t *ia32_get_reg_class(const void *self, int i)
1626 assert(i >= 0 && i < N_CLASSES);
1627 return &ia32_reg_classes[i];
1631 * Get the register class which shall be used to store a value of a given mode.
1632 * @param self The this pointer.
1633 * @param mode The mode in question.
1634 * @return A register class which can hold values of the given mode.
1636 const arch_register_class_t *ia32_get_reg_class_for_mode(const void *self, const ir_mode *mode) {
1637 const ia32_isa_t *isa = self;
1638 if (mode_is_float(mode)) {
1639 return USE_SSE2(isa) ? &ia32_reg_classes[CLASS_ia32_xmm] : &ia32_reg_classes[CLASS_ia32_vfp];
1642 return &ia32_reg_classes[CLASS_ia32_gp];
1646 * Get the ABI restrictions for procedure calls.
1647 * @param self The this pointer.
1648 * @param method_type The type of the method (procedure) in question.
1649 * @param abi The abi object to be modified
1651 static void ia32_get_call_abi(const void *self, ir_type *method_type, be_abi_call_t *abi) {
1652 const ia32_isa_t *isa = self;
1655 unsigned cc = get_method_calling_convention(method_type);
1656 int n = get_method_n_params(method_type);
1659 int i, ignore_1, ignore_2;
1661 const arch_register_t *reg;
1662 be_abi_call_flags_t call_flags = be_abi_call_get_flags(abi);
1664 unsigned use_push = !IS_P6_ARCH(isa->opt_arch);
1666 /* set abi flags for calls */
1667 call_flags.bits.left_to_right = 0; /* always last arg first on stack */
1668 call_flags.bits.store_args_sequential = use_push;
1669 /* call_flags.bits.try_omit_fp not changed: can handle both settings */
1670 call_flags.bits.fp_free = 0; /* the frame pointer is fixed in IA32 */
1671 call_flags.bits.call_has_imm = 1; /* IA32 calls can have immediate address */
1673 /* set stack parameter passing style */
1674 be_abi_call_set_flags(abi, call_flags, &ia32_abi_callbacks);
1676 /* collect the mode for each type */
1677 modes = alloca(n * sizeof(modes[0]));
1679 for (i = 0; i < n; i++) {
1680 tp = get_method_param_type(method_type, i);
1681 modes[i] = get_type_mode(tp);
1684 /* set register parameters */
1685 if (cc & cc_reg_param) {
1686 /* determine the number of parameters passed via registers */
1687 biggest_n = ia32_get_n_regparam_class(n, modes, &ignore_1, &ignore_2);
1689 /* loop over all parameters and set the register requirements */
1690 for (i = 0; i <= biggest_n; i++) {
1691 reg = ia32_get_RegParam_reg(n, modes, i, cc);
1692 assert(reg && "kaputt");
1693 be_abi_call_param_reg(abi, i, reg);
1700 /* set stack parameters */
1701 for (i = stack_idx; i < n; i++) {
1702 /* parameters on the stack are 32 bit aligned */
1703 be_abi_call_param_stack(abi, i, 4, 0, 0);
1707 /* set return registers */
1708 n = get_method_n_ress(method_type);
1710 assert(n <= 2 && "more than two results not supported");
1712 /* In case of 64bit returns, we will have two 32bit values */
1714 tp = get_method_res_type(method_type, 0);
1715 mode = get_type_mode(tp);
1717 assert(!mode_is_float(mode) && "two FP results not supported");
1719 tp = get_method_res_type(method_type, 1);
1720 mode = get_type_mode(tp);
1722 assert(!mode_is_float(mode) && "mixed INT, FP results not supported");
1724 be_abi_call_res_reg(abi, 0, &ia32_gp_regs[REG_EAX]);
1725 be_abi_call_res_reg(abi, 1, &ia32_gp_regs[REG_EDX]);
1728 const arch_register_t *reg;
1730 tp = get_method_res_type(method_type, 0);
1731 assert(is_atomic_type(tp));
1732 mode = get_type_mode(tp);
1734 reg = mode_is_float(mode) ? &ia32_vfp_regs[REG_VF0] : &ia32_gp_regs[REG_EAX];
1736 be_abi_call_res_reg(abi, 0, reg);
1741 static const void *ia32_get_irn_ops(const arch_irn_handler_t *self, const ir_node *irn) {
1742 return &ia32_irn_ops;
1745 const arch_irn_handler_t ia32_irn_handler = {
1749 const arch_irn_handler_t *ia32_get_irn_handler(const void *self) {
1750 return &ia32_irn_handler;
1753 int ia32_to_appear_in_schedule(void *block_env, const ir_node *irn) {
1754 if(!is_ia32_irn(irn))
1757 if(is_ia32_NoReg_GP(irn) || is_ia32_NoReg_VFP(irn) || is_ia32_NoReg_XMM(irn)
1758 || is_ia32_Unknown_GP(irn) || is_ia32_Unknown_XMM(irn)
1759 || is_ia32_Unknown_VFP(irn) || is_ia32_ChangeCW(irn))
1766 * Initializes the code generator interface.
1768 static const arch_code_generator_if_t *ia32_get_code_generator_if(void *self) {
1769 return &ia32_code_gen_if;
1773 * Returns the estimated execution time of an ia32 irn.
1775 static sched_timestep_t ia32_sched_exectime(void *env, const ir_node *irn) {
1776 const arch_env_t *arch_env = env;
1777 return is_ia32_irn(irn) ? ia32_get_op_estimated_cost(arch_get_irn_ops(arch_env, irn), irn) : 1;
1780 list_sched_selector_t ia32_sched_selector;
1783 * Returns the reg_pressure scheduler with to_appear_in_schedule() overloaded
1785 static const list_sched_selector_t *ia32_get_list_sched_selector(const void *self, list_sched_selector_t *selector) {
1786 memcpy(&ia32_sched_selector, selector, sizeof(ia32_sched_selector));
1787 ia32_sched_selector.exectime = ia32_sched_exectime;
1788 ia32_sched_selector.to_appear_in_schedule = ia32_to_appear_in_schedule;
1789 return &ia32_sched_selector;
1792 static const ilp_sched_selector_t *ia32_get_ilp_sched_selector(const void *self) {
1797 * Returns the necessary byte alignment for storing a register of given class.
1799 static int ia32_get_reg_class_alignment(const void *self, const arch_register_class_t *cls) {
1800 ir_mode *mode = arch_register_class_mode(cls);
1801 int bytes = get_mode_size_bytes(mode);
1803 if (mode_is_float(mode) && bytes > 8)
1808 static const be_execution_unit_t ***ia32_get_allowed_execution_units(const void *self, const ir_node *irn) {
1809 static const be_execution_unit_t *_allowed_units_BRANCH[] = {
1810 &ia32_execution_units_BRANCH[IA32_EXECUNIT_TP_BRANCH_BRANCH1],
1811 &ia32_execution_units_BRANCH[IA32_EXECUNIT_TP_BRANCH_BRANCH2],
1814 static const be_execution_unit_t *_allowed_units_GP[] = {
1815 &ia32_execution_units_GP[IA32_EXECUNIT_TP_GP_GP_EAX],
1816 &ia32_execution_units_GP[IA32_EXECUNIT_TP_GP_GP_EBX],
1817 &ia32_execution_units_GP[IA32_EXECUNIT_TP_GP_GP_ECX],
1818 &ia32_execution_units_GP[IA32_EXECUNIT_TP_GP_GP_EDX],
1819 &ia32_execution_units_GP[IA32_EXECUNIT_TP_GP_GP_ESI],
1820 &ia32_execution_units_GP[IA32_EXECUNIT_TP_GP_GP_EDI],
1821 &ia32_execution_units_GP[IA32_EXECUNIT_TP_GP_GP_EBP],
1824 static const be_execution_unit_t *_allowed_units_DUMMY[] = {
1825 &be_machine_execution_units_DUMMY[0],
1828 static const be_execution_unit_t **_units_callret[] = {
1829 _allowed_units_BRANCH,
1832 static const be_execution_unit_t **_units_other[] = {
1836 static const be_execution_unit_t **_units_dummy[] = {
1837 _allowed_units_DUMMY,
1840 const be_execution_unit_t ***ret;
1842 if (is_ia32_irn(irn)) {
1843 ret = get_ia32_exec_units(irn);
1845 else if (is_be_node(irn)) {
1846 if (be_is_Call(irn) || be_is_Return(irn)) {
1847 ret = _units_callret;
1849 else if (be_is_Barrier(irn)) {
1864 * Return the abstract ia32 machine.
1866 static const be_machine_t *ia32_get_machine(const void *self) {
1867 const ia32_isa_t *isa = self;
1872 * Return irp irgs in the desired order.
1874 static ir_graph **ia32_get_irg_list(const void *self, ir_graph ***irg_list) {
1879 * Allows or disallows the creation of Psi nodes for the given Phi nodes.
1880 * @return 1 if allowed, 0 otherwise
1882 static int ia32_is_psi_allowed(ir_node *sel, ir_node *phi_list, int i, int j)
1884 ir_node *cmp, *cmp_a, *phi;
1887 /* we don't want long long an floating point Psi */
1888 #define IS_BAD_PSI_MODE(mode) (mode_is_float(mode) || get_mode_size_bits(mode) > 32)
1890 if (get_irn_mode(sel) != mode_b)
1893 cmp = get_Proj_pred(sel);
1894 cmp_a = get_Cmp_left(cmp);
1895 mode = get_irn_mode(cmp_a);
1897 if (IS_BAD_PSI_MODE(mode))
1900 /* check the Phi nodes */
1901 for (phi = phi_list; phi; phi = get_irn_link(phi)) {
1902 ir_node *pred_i = get_irn_n(phi, i);
1903 ir_node *pred_j = get_irn_n(phi, j);
1904 ir_mode *mode_i = get_irn_mode(pred_i);
1905 ir_mode *mode_j = get_irn_mode(pred_j);
1907 if (IS_BAD_PSI_MODE(mode_i) || IS_BAD_PSI_MODE(mode_j))
1911 #undef IS_BAD_PSI_MODE
1916 static ia32_intrinsic_env_t intrinsic_env = {
1917 NULL, /**< the irg, these entities belong to */
1918 NULL, /**< entity for first div operand (move into FPU) */
1919 NULL, /**< entity for second div operand (move into FPU) */
1920 NULL, /**< entity for converts ll -> d */
1921 NULL, /**< entity for converts d -> ll */
1925 * Returns the libFirm configuration parameter for this backend.
1927 static const backend_params *ia32_get_libfirm_params(void) {
1928 static const opt_if_conv_info_t ifconv = {
1929 4, /* maxdepth, doesn't matter for Psi-conversion */
1930 ia32_is_psi_allowed /* allows or disallows Psi creation for given selector */
1932 static const arch_dep_params_t ad = {
1933 1, /* also use subs */
1934 4, /* maximum shifts */
1935 31, /* maximum shift amount */
1937 1, /* allow Mulhs */
1938 1, /* allow Mulus */
1939 32 /* Mulh allowed up to 32 bit */
1941 static backend_params p = {
1942 NULL, /* no additional opcodes */
1943 NULL, /* will be set later */
1944 1, /* need dword lowering */
1945 ia32_create_intrinsic_fkt,
1946 &intrinsic_env, /* context for ia32_create_intrinsic_fkt */
1947 NULL, /* will be set later */
1951 p.if_conv_info = &ifconv;
1955 /* instruction set architectures. */
1956 static const lc_opt_enum_int_items_t arch_items[] = {
1957 { "386", arch_i386, },
1958 { "486", arch_i486, },
1959 { "pentium", arch_pentium, },
1960 { "586", arch_pentium, },
1961 { "pentiumpro", arch_pentium_pro, },
1962 { "686", arch_pentium_pro, },
1963 { "pentiummmx", arch_pentium_mmx, },
1964 { "pentium2", arch_pentium_2, },
1965 { "p2", arch_pentium_2, },
1966 { "pentium3", arch_pentium_3, },
1967 { "p3", arch_pentium_3, },
1968 { "pentium4", arch_pentium_4, },
1969 { "p4", arch_pentium_4, },
1970 { "pentiumm", arch_pentium_m, },
1971 { "pm", arch_pentium_m, },
1972 { "core", arch_core, },
1974 { "athlon", arch_athlon, },
1975 { "athlon64", arch_athlon_64, },
1976 { "opteron", arch_opteron, },
1980 static lc_opt_enum_int_var_t arch_var = {
1981 &ia32_isa_template.arch, arch_items
1984 static lc_opt_enum_int_var_t opt_arch_var = {
1985 &ia32_isa_template.opt_arch, arch_items
1988 static const lc_opt_enum_int_items_t fp_unit_items[] = {
1990 { "sse2", fp_sse2 },
1994 static lc_opt_enum_int_var_t fp_unit_var = {
1995 &ia32_isa_template.fp_kind, fp_unit_items
1998 static const lc_opt_enum_int_items_t gas_items[] = {
1999 { "normal", GAS_FLAVOUR_NORMAL },
2000 { "mingw", GAS_FLAVOUR_MINGW },
2004 static lc_opt_enum_int_var_t gas_var = {
2005 (int*) &be_gas_flavour, gas_items
2008 static const lc_opt_table_entry_t ia32_options[] = {
2009 LC_OPT_ENT_ENUM_INT("arch", "select the instruction architecture", &arch_var),
2010 LC_OPT_ENT_ENUM_INT("opt", "optimize for instruction architecture", &opt_arch_var),
2011 LC_OPT_ENT_ENUM_INT("fpunit", "select the floating point unit", &fp_unit_var),
2012 LC_OPT_ENT_NEGBIT("noaddrmode", "do not use address mode", &ia32_isa_template.opt, IA32_OPT_DOAM),
2013 LC_OPT_ENT_NEGBIT("nolea", "do not optimize for LEAs", &ia32_isa_template.opt, IA32_OPT_LEA),
2014 LC_OPT_ENT_NEGBIT("noplacecnst", "do not place constants", &ia32_isa_template.opt, IA32_OPT_PLACECNST),
2015 LC_OPT_ENT_NEGBIT("noimmop", "no operations with immediates", &ia32_isa_template.opt, IA32_OPT_IMMOPS),
2016 LC_OPT_ENT_NEGBIT("nopushargs", "do not create pushs for function arguments", &ia32_isa_template.opt, IA32_OPT_PUSHARGS),
2017 LC_OPT_ENT_ENUM_INT("gasmode", "set the GAS compatibility mode", &gas_var),
2021 const arch_isa_if_t ia32_isa_if = {
2024 ia32_get_n_reg_class,
2026 ia32_get_reg_class_for_mode,
2028 ia32_get_irn_handler,
2029 ia32_get_code_generator_if,
2030 ia32_get_list_sched_selector,
2031 ia32_get_ilp_sched_selector,
2032 ia32_get_reg_class_alignment,
2033 ia32_get_libfirm_params,
2034 ia32_get_allowed_execution_units,
2039 void ia32_init_emitter(void);
2040 void ia32_init_finish(void);
2041 void ia32_init_optimize(void);
2042 void ia32_init_transform(void);
2043 void ia32_init_x87(void);
2045 void be_init_arch_ia32(void)
2047 lc_opt_entry_t *be_grp = lc_opt_get_grp(firm_opt_get_root(), "be");
2048 lc_opt_entry_t *ia32_grp = lc_opt_get_grp(be_grp, "ia32");
2050 lc_opt_add_table(ia32_grp, ia32_options);
2051 be_register_isa_if("ia32", &ia32_isa_if);
2053 FIRM_DBG_REGISTER(dbg, "firm.be.ia32.cg");
2055 ia32_init_emitter();
2057 ia32_init_optimize();
2058 ia32_init_transform();
2062 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_arch_ia32);