2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief This is the main ia32 firm backend driver.
23 * @author Christian Wuerdig
31 #include "lc_opts_enum.h"
35 #include "pseudo_irg.h"
40 #include "iredges_t.h"
53 #include "iroptimize.h"
54 #include "instrument.h"
57 #include "../beirg_t.h"
58 #include "../benode_t.h"
59 #include "../belower.h"
60 #include "../besched_t.h"
63 #include "../beirgmod.h"
64 #include "../be_dbgout.h"
65 #include "../beblocksched.h"
66 #include "../bemachine.h"
67 #include "../beilpsched.h"
68 #include "../bespillslots.h"
69 #include "../bemodule.h"
70 #include "../begnuas.h"
71 #include "../bestate.h"
72 #include "../beflags.h"
74 #include "bearch_ia32_t.h"
76 #include "ia32_new_nodes.h"
77 #include "gen_ia32_regalloc_if.h"
78 #include "gen_ia32_machine.h"
79 #include "ia32_common_transform.h"
80 #include "ia32_transform.h"
81 #include "ia32_emitter.h"
82 #include "ia32_map_regs.h"
83 #include "ia32_optimize.h"
85 #include "ia32_dbg_stat.h"
86 #include "ia32_finish.h"
87 #include "ia32_util.h"
89 #include "ia32_architecture.h"
92 #include "ia32_pbqp_transform.h"
95 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
98 static set *cur_reg_set = NULL;
100 ir_mode *mode_fpcw = NULL;
101 ia32_code_gen_t *ia32_current_cg = NULL;
104 * The environment for the intrinsic mapping.
106 static ia32_intrinsic_env_t intrinsic_env = {
108 NULL, /* the irg, these entities belong to */
109 NULL, /* entity for first div operand (move into FPU) */
110 NULL, /* entity for second div operand (move into FPU) */
111 NULL, /* entity for converts ll -> d */
112 NULL, /* entity for converts d -> ll */
113 NULL, /* entity for __divdi3 library call */
114 NULL, /* entity for __moddi3 library call */
115 NULL, /* entity for __udivdi3 library call */
116 NULL, /* entity for __umoddi3 library call */
117 NULL, /* bias value for conversion from float to unsigned 64 */
121 typedef ir_node *(*create_const_node_func) (dbg_info *dbg, ir_graph *irg, ir_node *block);
123 static INLINE ir_node *create_const(ia32_code_gen_t *cg, ir_node **place,
124 create_const_node_func func,
125 const arch_register_t* reg)
127 ir_node *block, *res;
132 block = get_irg_start_block(cg->irg);
133 res = func(NULL, cg->irg, block);
134 arch_set_irn_register(cg->arch_env, res, reg);
137 add_irn_dep(get_irg_end(cg->irg), res);
138 /* add_irn_dep(get_irg_start(cg->irg), res); */
143 /* Creates the unique per irg GP NoReg node. */
144 ir_node *ia32_new_NoReg_gp(ia32_code_gen_t *cg) {
145 return create_const(cg, &cg->noreg_gp, new_rd_ia32_NoReg_GP,
146 &ia32_gp_regs[REG_GP_NOREG]);
149 ir_node *ia32_new_NoReg_vfp(ia32_code_gen_t *cg) {
150 return create_const(cg, &cg->noreg_vfp, new_rd_ia32_NoReg_VFP,
151 &ia32_vfp_regs[REG_VFP_NOREG]);
154 ir_node *ia32_new_NoReg_xmm(ia32_code_gen_t *cg) {
155 return create_const(cg, &cg->noreg_xmm, new_rd_ia32_NoReg_XMM,
156 &ia32_xmm_regs[REG_XMM_NOREG]);
159 ir_node *ia32_new_Unknown_gp(ia32_code_gen_t *cg) {
160 return create_const(cg, &cg->unknown_gp, new_rd_ia32_Unknown_GP,
161 &ia32_gp_regs[REG_GP_UKNWN]);
164 ir_node *ia32_new_Unknown_vfp(ia32_code_gen_t *cg) {
165 return create_const(cg, &cg->unknown_vfp, new_rd_ia32_Unknown_VFP,
166 &ia32_vfp_regs[REG_VFP_UKNWN]);
169 ir_node *ia32_new_Unknown_xmm(ia32_code_gen_t *cg) {
170 return create_const(cg, &cg->unknown_xmm, new_rd_ia32_Unknown_XMM,
171 &ia32_xmm_regs[REG_XMM_UKNWN]);
174 ir_node *ia32_new_Fpu_truncate(ia32_code_gen_t *cg) {
175 return create_const(cg, &cg->fpu_trunc_mode, new_rd_ia32_ChangeCW,
176 &ia32_fp_cw_regs[REG_FPCW]);
181 * Returns the admissible noreg register node for input register pos of node irn.
183 ir_node *ia32_get_admissible_noreg(ia32_code_gen_t *cg, ir_node *irn, int pos) {
184 const arch_register_req_t *req;
186 req = arch_get_register_req(cg->arch_env, irn, pos);
187 assert(req != NULL && "Missing register requirements");
188 if (req->cls == &ia32_reg_classes[CLASS_ia32_gp])
189 return ia32_new_NoReg_gp(cg);
191 if (ia32_cg_config.use_sse2) {
192 return ia32_new_NoReg_xmm(cg);
194 return ia32_new_NoReg_vfp(cg);
198 /**************************************************
201 * _ __ ___ __ _ __ _| | | ___ ___ _| |_
202 * | '__/ _ \/ _` | / _` | | |/ _ \ / __| | | _|
203 * | | | __/ (_| | | (_| | | | (_) | (__ | | |
204 * |_| \___|\__, | \__,_|_|_|\___/ \___| |_|_|
207 **************************************************/
210 * Return register requirements for an ia32 node.
211 * If the node returns a tuple (mode_T) then the proj's
212 * will be asked for this information.
214 static const arch_register_req_t *ia32_get_irn_reg_req(const ir_node *node,
217 ir_mode *mode = get_irn_mode(node);
220 if (mode == mode_X || is_Block(node)) {
221 return arch_no_register_req;
224 if (mode == mode_T && pos < 0) {
225 return arch_no_register_req;
228 node_pos = pos == -1 ? 0 : pos;
230 if (mode == mode_M || pos >= 0) {
231 return arch_no_register_req;
234 node_pos = (pos == -1) ? get_Proj_proj(node) : pos;
235 node = skip_Proj_const(node);
238 if (is_ia32_irn(node)) {
239 const arch_register_req_t *req;
241 req = get_ia32_in_req(node, pos);
243 req = get_ia32_out_req(node, node_pos);
250 /* unknowns should be transformed already */
251 assert(!is_Unknown(node));
252 return arch_no_register_req;
255 static void ia32_set_irn_reg(ir_node *irn, const arch_register_t *reg)
259 if (get_irn_mode(irn) == mode_X) {
264 pos = get_Proj_proj(irn);
265 irn = skip_Proj(irn);
268 if (is_ia32_irn(irn)) {
269 const arch_register_t **slots;
271 slots = get_ia32_slots(irn);
274 ia32_set_firm_reg(irn, reg, cur_reg_set);
278 static const arch_register_t *ia32_get_irn_reg(const ir_node *irn)
281 const arch_register_t *reg = NULL;
285 if (get_irn_mode(irn) == mode_X) {
289 pos = get_Proj_proj(irn);
290 irn = skip_Proj_const(irn);
293 if (is_ia32_irn(irn)) {
294 const arch_register_t **slots;
295 slots = get_ia32_slots(irn);
296 assert(pos < get_ia32_n_res(irn));
299 reg = ia32_get_firm_reg(irn, cur_reg_set);
305 static arch_irn_class_t ia32_classify(const ir_node *irn) {
306 arch_irn_class_t classification = arch_irn_class_normal;
308 irn = skip_Proj_const(irn);
311 classification |= arch_irn_class_branch;
313 if (! is_ia32_irn(irn))
314 return classification & ~arch_irn_class_normal;
317 classification |= arch_irn_class_load;
320 classification |= arch_irn_class_store;
322 if (is_ia32_need_stackent(irn))
323 classification |= arch_irn_class_reload;
325 return classification;
328 static arch_irn_flags_t ia32_get_flags(const ir_node *irn) {
329 arch_irn_flags_t flags = arch_irn_flags_none;
332 return arch_irn_flags_ignore;
334 if(is_Proj(irn) && mode_is_datab(get_irn_mode(irn))) {
335 ir_node *pred = get_Proj_pred(irn);
337 if(is_ia32_irn(pred)) {
338 flags = get_ia32_out_flags(pred, get_Proj_proj(irn));
344 if (is_ia32_irn(irn)) {
345 flags |= get_ia32_flags(irn);
352 * The IA32 ABI callback object.
355 be_abi_call_flags_bits_t flags; /**< The call flags. */
356 const arch_env_t *aenv; /**< The architecture environment. */
357 ir_graph *irg; /**< The associated graph. */
360 static ir_entity *ia32_get_frame_entity(const ir_node *irn) {
361 return is_ia32_irn(irn) ? get_ia32_frame_ent(irn) : NULL;
364 static void ia32_set_frame_entity(ir_node *irn, ir_entity *ent) {
365 set_ia32_frame_ent(irn, ent);
368 static void ia32_set_frame_offset(ir_node *irn, int bias)
370 if (get_ia32_frame_ent(irn) == NULL)
373 if (is_ia32_Pop(irn) || is_ia32_PopMem(irn)) {
374 ia32_code_gen_t *cg = ia32_current_cg;
375 int omit_fp = be_abi_omit_fp(cg->birg->abi);
377 /* Pop nodes modify the stack pointer before calculating the
378 * destination address, so fix this here
383 add_ia32_am_offs_int(irn, bias);
386 static int ia32_get_sp_bias(const ir_node *node)
388 if (is_ia32_Push(node))
391 if (is_ia32_Pop(node) || is_ia32_PopMem(node))
398 * Put all registers which are saved by the prologue/epilogue in a set.
400 * @param self The callback object.
401 * @param s The result set.
403 static void ia32_abi_dont_save_regs(void *self, pset *s)
405 ia32_abi_env_t *env = self;
406 if(env->flags.try_omit_fp)
407 pset_insert_ptr(s, env->aenv->bp);
411 * Generate the routine prologue.
413 * @param self The callback object.
414 * @param mem A pointer to the mem node. Update this if you define new memory.
415 * @param reg_map A map mapping all callee_save/ignore/parameter registers to their defining nodes.
417 * @return The register which shall be used as a stack frame base.
419 * All nodes which define registers in @p reg_map must keep @p reg_map current.
421 static const arch_register_t *ia32_abi_prologue(void *self, ir_node **mem, pmap *reg_map)
423 ia32_abi_env_t *env = self;
424 ia32_code_gen_t *cg = ia32_current_cg;
425 const arch_env_t *arch_env = env->aenv;
427 if (! env->flags.try_omit_fp) {
428 ir_graph *irg =env->irg;
429 ir_node *bl = get_irg_start_block(irg);
430 ir_node *curr_sp = be_abi_reg_map_get(reg_map, arch_env->sp);
431 ir_node *curr_bp = be_abi_reg_map_get(reg_map, arch_env->bp);
432 ir_node *noreg = ia32_new_NoReg_gp(cg);
435 /* ALL nodes representing bp must be set to ignore. */
436 be_node_set_flags(get_Proj_pred(curr_bp), BE_OUT_POS(get_Proj_proj(curr_bp)), arch_irn_flags_ignore);
439 push = new_rd_ia32_Push(NULL, irg, bl, noreg, noreg, *mem, curr_bp, curr_sp);
440 curr_sp = new_r_Proj(irg, bl, push, get_irn_mode(curr_sp), pn_ia32_Push_stack);
441 *mem = new_r_Proj(irg, bl, push, mode_M, pn_ia32_Push_M);
443 /* the push must have SP out register */
444 arch_set_irn_register(arch_env, curr_sp, arch_env->sp);
445 set_ia32_flags(push, arch_irn_flags_ignore);
447 /* move esp to ebp */
448 curr_bp = be_new_Copy(arch_env->bp->reg_class, irg, bl, curr_sp);
449 be_set_constr_single_reg(curr_bp, BE_OUT_POS(0), arch_env->bp);
450 arch_set_irn_register(arch_env, curr_bp, arch_env->bp);
451 be_node_set_flags(curr_bp, BE_OUT_POS(0), arch_irn_flags_ignore);
453 /* beware: the copy must be done before any other sp use */
454 curr_sp = be_new_CopyKeep_single(arch_env->sp->reg_class, irg, bl, curr_sp, curr_bp, get_irn_mode(curr_sp));
455 be_set_constr_single_reg(curr_sp, BE_OUT_POS(0), arch_env->sp);
456 arch_set_irn_register(arch_env, curr_sp, arch_env->sp);
457 be_node_set_flags(curr_sp, BE_OUT_POS(0), arch_irn_flags_ignore);
459 be_abi_reg_map_set(reg_map, arch_env->sp, curr_sp);
460 be_abi_reg_map_set(reg_map, arch_env->bp, curr_bp);
469 * Generate the routine epilogue.
470 * @param self The callback object.
471 * @param bl The block for the epilog
472 * @param mem A pointer to the mem node. Update this if you define new memory.
473 * @param reg_map A map mapping all callee_save/ignore/parameter registers to their defining nodes.
474 * @return The register which shall be used as a stack frame base.
476 * All nodes which define registers in @p reg_map must keep @p reg_map current.
478 static void ia32_abi_epilogue(void *self, ir_node *bl, ir_node **mem, pmap *reg_map)
480 ia32_abi_env_t *env = self;
481 const arch_env_t *arch_env = env->aenv;
482 ir_node *curr_sp = be_abi_reg_map_get(reg_map, arch_env->sp);
483 ir_node *curr_bp = be_abi_reg_map_get(reg_map, arch_env->bp);
484 ir_graph *irg = env->irg;
486 if (env->flags.try_omit_fp) {
487 /* simply remove the stack frame here */
488 curr_sp = be_new_IncSP(arch_env->sp, irg, bl, curr_sp, BE_STACK_FRAME_SIZE_SHRINK, 0);
490 ir_mode *mode_bp = arch_env->bp->reg_class->mode;
492 if (ia32_cg_config.use_leave) {
496 leave = new_rd_ia32_Leave(NULL, irg, bl, curr_sp, curr_bp);
497 set_ia32_flags(leave, arch_irn_flags_ignore);
498 curr_bp = new_r_Proj(irg, bl, leave, mode_bp, pn_ia32_Leave_frame);
499 curr_sp = new_r_Proj(irg, bl, leave, get_irn_mode(curr_sp), pn_ia32_Leave_stack);
503 /* the old SP is not needed anymore (kill the proj) */
504 assert(is_Proj(curr_sp));
507 /* copy ebp to esp */
508 curr_sp = be_new_Copy(&ia32_reg_classes[CLASS_ia32_gp], irg, bl, curr_bp);
509 arch_set_irn_register(arch_env, curr_sp, arch_env->sp);
510 be_node_set_flags(curr_sp, BE_OUT_POS(0), arch_irn_flags_ignore);
513 pop = new_rd_ia32_Pop(NULL, env->irg, bl, *mem, curr_sp);
514 set_ia32_flags(pop, arch_irn_flags_ignore);
515 curr_bp = new_r_Proj(irg, bl, pop, mode_bp, pn_ia32_Pop_res);
516 curr_sp = new_r_Proj(irg, bl, pop, get_irn_mode(curr_sp), pn_ia32_Pop_stack);
518 *mem = new_r_Proj(irg, bl, pop, mode_M, pn_ia32_Pop_M);
520 arch_set_irn_register(arch_env, curr_sp, arch_env->sp);
521 arch_set_irn_register(arch_env, curr_bp, arch_env->bp);
524 be_abi_reg_map_set(reg_map, arch_env->sp, curr_sp);
525 be_abi_reg_map_set(reg_map, arch_env->bp, curr_bp);
529 * Initialize the callback object.
530 * @param call The call object.
531 * @param aenv The architecture environment.
532 * @param irg The graph with the method.
533 * @return Some pointer. This pointer is passed to all other callback functions as self object.
535 static void *ia32_abi_init(const be_abi_call_t *call, const arch_env_t *aenv, ir_graph *irg)
537 ia32_abi_env_t *env = xmalloc(sizeof(env[0]));
538 be_abi_call_flags_t fl = be_abi_call_get_flags(call);
539 env->flags = fl.bits;
546 * Destroy the callback object.
547 * @param self The callback object.
549 static void ia32_abi_done(void *self) {
554 * Produces the type which sits between the stack args and the locals on the stack.
555 * it will contain the return address and space to store the old base pointer.
556 * @return The Firm type modeling the ABI between type.
558 static ir_type *ia32_abi_get_between_type(void *self)
560 #define IDENT(s) new_id_from_chars(s, sizeof(s)-1)
561 static ir_type *between_type = NULL;
564 if (! between_type) {
565 ir_entity *ret_addr_ent;
566 ir_type *ret_addr_type;
568 ret_addr_type = new_type_primitive(IDENT("return_addr"), mode_Iu);
569 between_type = new_type_struct(IDENT("ia32_between_type"));
570 ret_addr_ent = new_entity(between_type, IDENT("ret_addr"), ret_addr_type);
572 set_entity_offset(ret_addr_ent, 0);
573 set_type_size_bytes(between_type, get_type_size_bytes(ret_addr_type));
574 set_type_state(between_type, layout_fixed);
582 * Get the estimated cycle count for @p irn.
584 * @param self The this pointer.
585 * @param irn The node.
587 * @return The estimated cycle count for this operation
589 static int ia32_get_op_estimated_cost(const ir_node *irn)
592 ia32_op_type_t op_tp;
596 if (!is_ia32_irn(irn))
599 assert(is_ia32_irn(irn));
601 cost = get_ia32_latency(irn);
602 op_tp = get_ia32_op_type(irn);
604 if (is_ia32_CopyB(irn)) {
607 else if (is_ia32_CopyB_i(irn)) {
608 int size = get_ia32_copyb_size(irn);
609 cost = 20 + (int)ceil((4/3) * size);
611 /* in case of address mode operations add additional cycles */
612 else if (op_tp == ia32_AddrModeD || op_tp == ia32_AddrModeS) {
614 In case of stack access and access to fixed addresses add 5 cycles
615 (we assume they are in cache), other memory operations cost 20
618 if (is_ia32_use_frame(irn) || (
619 is_ia32_NoReg_GP(get_irn_n(irn, n_ia32_base)) &&
620 is_ia32_NoReg_GP(get_irn_n(irn, n_ia32_index))
632 * Returns the inverse operation if @p irn, recalculating the argument at position @p i.
634 * @param irn The original operation
635 * @param i Index of the argument we want the inverse operation to yield
636 * @param inverse struct to be filled with the resulting inverse op
637 * @param obstack The obstack to use for allocation of the returned nodes array
638 * @return The inverse operation or NULL if operation invertible
640 static arch_inverse_t *ia32_get_inverse(const ir_node *irn, int i, arch_inverse_t *inverse, struct obstack *obst) {
644 ir_node *block, *noreg, *nomem;
647 /* we cannot invert non-ia32 irns */
648 if (! is_ia32_irn(irn))
651 /* operand must always be a real operand (not base, index or mem) */
652 if (i != n_ia32_binary_left && i != n_ia32_binary_right)
655 /* we don't invert address mode operations */
656 if (get_ia32_op_type(irn) != ia32_Normal)
659 /* TODO: adjust for new immediates... */
660 ir_fprintf(stderr, "TODO: fix get_inverse for new immediates (%+F)\n",
664 irg = get_irn_irg(irn);
665 block = get_nodes_block(irn);
666 mode = get_irn_mode(irn);
667 irn_mode = get_irn_mode(irn);
668 noreg = get_irn_n(irn, 0);
669 nomem = new_r_NoMem(irg);
670 dbg = get_irn_dbg_info(irn);
672 /* initialize structure */
673 inverse->nodes = obstack_alloc(obst, 2 * sizeof(inverse->nodes[0]));
677 switch (get_ia32_irn_opcode(irn)) {
680 if (get_ia32_immop_type(irn) == ia32_ImmConst) {
681 /* we have an add with a const here */
682 /* invers == add with negated const */
683 inverse->nodes[0] = new_rd_ia32_Add(dbg, irg, block, noreg, noreg, nomem, get_irn_n(irn, i), noreg);
685 copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
686 set_ia32_Immop_tarval(inverse->nodes[0], tarval_neg(get_ia32_Immop_tarval(irn)));
687 set_ia32_commutative(inverse->nodes[0]);
689 else if (get_ia32_immop_type(irn) == ia32_ImmSymConst) {
690 /* we have an add with a symconst here */
691 /* invers == sub with const */
692 inverse->nodes[0] = new_rd_ia32_Sub(dbg, irg, block, noreg, noreg, nomem, get_irn_n(irn, i), noreg);
694 copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
697 /* normal add: inverse == sub */
698 inverse->nodes[0] = new_rd_ia32_Sub(dbg, irg, block, noreg, noreg, nomem, (ir_node*) irn, get_irn_n(irn, i ^ 1));
705 if (get_ia32_immop_type(irn) != ia32_ImmNone) {
706 /* we have a sub with a const/symconst here */
707 /* invers == add with this const */
708 inverse->nodes[0] = new_rd_ia32_Add(dbg, irg, block, noreg, noreg, nomem, get_irn_n(irn, i), noreg);
709 inverse->costs += (get_ia32_immop_type(irn) == ia32_ImmSymConst) ? 5 : 1;
710 copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
714 if (i == n_ia32_binary_left) {
715 inverse->nodes[0] = new_rd_ia32_Add(dbg, irg, block, noreg, noreg, nomem, (ir_node*) irn, get_irn_n(irn, 3));
718 inverse->nodes[0] = new_rd_ia32_Sub(dbg, irg, block, noreg, noreg, nomem, get_irn_n(irn, n_ia32_binary_left), (ir_node*) irn);
726 if (get_ia32_immop_type(irn) != ia32_ImmNone) {
727 /* xor with const: inverse = xor */
728 inverse->nodes[0] = new_rd_ia32_Xor(dbg, irg, block, noreg, noreg, nomem, get_irn_n(irn, i), noreg);
729 inverse->costs += (get_ia32_immop_type(irn) == ia32_ImmSymConst) ? 5 : 1;
730 copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
734 inverse->nodes[0] = new_rd_ia32_Xor(dbg, irg, block, noreg, noreg, nomem, (ir_node *) irn, get_irn_n(irn, i));
740 inverse->nodes[0] = new_rd_ia32_Not(dbg, irg, block, (ir_node*) irn);
745 inverse->nodes[0] = new_rd_ia32_Neg(dbg, irg, block, (ir_node*) irn);
750 /* inverse operation not supported */
757 static ir_mode *get_spill_mode_mode(const ir_mode *mode)
759 if(mode_is_float(mode))
766 * Get the mode that should be used for spilling value node
768 static ir_mode *get_spill_mode(const ir_node *node)
770 ir_mode *mode = get_irn_mode(node);
771 return get_spill_mode_mode(mode);
775 * Checks whether an addressmode reload for a node with mode mode is compatible
776 * with a spillslot of mode spill_mode
778 static int ia32_is_spillmode_compatible(const ir_mode *mode, const ir_mode *spillmode)
780 if(mode_is_float(mode)) {
781 return mode == spillmode;
788 * Check if irn can load its operand at position i from memory (source addressmode).
789 * @param self Pointer to irn ops itself
790 * @param irn The irn to be checked
791 * @param i The operands position
792 * @return Non-Zero if operand can be loaded
794 static int ia32_possible_memory_operand(const ir_node *irn, unsigned int i) {
795 ir_node *op = get_irn_n(irn, i);
796 const ir_mode *mode = get_irn_mode(op);
797 const ir_mode *spillmode = get_spill_mode(op);
800 (i != n_ia32_binary_left && i != n_ia32_binary_right) || /* a "real" operand position must be requested */
801 ! is_ia32_irn(irn) || /* must be an ia32 irn */
802 get_ia32_am_arity(irn) != ia32_am_binary || /* must be a binary operation TODO is this necessary? */
803 get_ia32_op_type(irn) != ia32_Normal || /* must not already be a addressmode irn */
804 ! (get_ia32_am_support(irn) & ia32_am_Source) || /* must be capable of source addressmode */
805 ! ia32_is_spillmode_compatible(mode, spillmode) ||
806 is_ia32_use_frame(irn)) /* must not already use frame */
809 if (i == n_ia32_binary_left) {
810 const arch_register_req_t *req;
811 if(!is_ia32_commutative(irn))
813 /* we can't swap left/right for limited registers
814 * (As this (currently) breaks constraint handling copies)
816 req = get_ia32_in_req(irn, n_ia32_binary_left);
817 if (req->type & arch_register_req_type_limited) {
825 static void ia32_perform_memory_operand(ir_node *irn, ir_node *spill,
829 ir_mode *dest_op_mode;
831 ia32_code_gen_t *cg = ia32_current_cg;
833 assert(ia32_possible_memory_operand(irn, i) && "Cannot perform memory operand change");
835 if (i == n_ia32_binary_left) {
836 ia32_swap_left_right(irn);
839 set_ia32_op_type(irn, ia32_AddrModeS);
841 load_mode = get_irn_mode(get_irn_n(irn, i));
842 dest_op_mode = get_ia32_ls_mode(irn);
843 if (get_mode_size_bits(load_mode) <= get_mode_size_bits(dest_op_mode)) {
844 set_ia32_ls_mode(irn, load_mode);
846 set_ia32_use_frame(irn);
847 set_ia32_need_stackent(irn);
849 set_irn_n(irn, n_ia32_base, get_irg_frame(get_irn_irg(irn)));
850 set_irn_n(irn, n_ia32_binary_right, ia32_get_admissible_noreg(cg, irn, n_ia32_binary_right));
851 set_irn_n(irn, n_ia32_mem, spill);
852 set_ia32_is_reload(irn);
854 /* immediates are only allowed on the right side */
855 if (i == n_ia32_binary_left && is_ia32_Immediate(get_irn_n(irn, n_ia32_binary_left))) {
856 ia32_swap_left_right(irn);
860 static const be_abi_callbacks_t ia32_abi_callbacks = {
863 ia32_abi_get_between_type,
864 ia32_abi_dont_save_regs,
869 /* fill register allocator interface */
871 static const arch_irn_ops_t ia32_irn_ops = {
872 ia32_get_irn_reg_req,
877 ia32_get_frame_entity,
878 ia32_set_frame_entity,
879 ia32_set_frame_offset,
882 ia32_get_op_estimated_cost,
883 ia32_possible_memory_operand,
884 ia32_perform_memory_operand,
887 /**************************************************
890 * ___ ___ __| | ___ __ _ ___ _ __ _| |_
891 * / __/ _ \ / _` |/ _ \/ _` |/ _ \ '_ \ | | _|
892 * | (_| (_) | (_| | __/ (_| | __/ | | | | | |
893 * \___\___/ \__,_|\___|\__, |\___|_| |_| |_|_|
896 **************************************************/
898 static ir_entity *mcount = NULL;
900 #define ID(s) new_id_from_chars(s, sizeof(s) - 1)
902 static void ia32_before_abi(void *self) {
903 lower_mode_b_config_t lower_mode_b_config = {
904 mode_Iu, /* lowered mode */
905 mode_Bu, /* prefered mode for set */
906 0, /* don't lower direct compares */
908 ia32_code_gen_t *cg = self;
910 ir_lower_mode_b(cg->irg, &lower_mode_b_config);
912 be_dump(cg->irg, "-lower_modeb", dump_ir_block_graph_sched);
914 if (mcount == NULL) {
915 ir_type *tp = new_type_method(ID("FKT.mcount"), 0, 0);
916 mcount = new_entity(get_glob_type(), ID("mcount"), tp);
917 /* FIXME: enter the right ld_ident here */
918 set_entity_ld_ident(mcount, get_entity_ident(mcount));
919 set_entity_visibility(mcount, visibility_external_allocated);
921 instrument_initcall(cg->irg, mcount);
925 transformer_t be_transformer = TRANSFORMER_DEFAULT;
928 * Transforms the standard firm graph into
931 static void ia32_prepare_graph(void *self) {
932 ia32_code_gen_t *cg = self;
934 /* do local optimizations */
935 optimize_graph_df(cg->irg);
937 /* TODO: we often have dead code reachable through out-edges here. So for
938 * now we rebuild edges (as we need correct user count for code selection)
941 edges_deactivate(cg->irg);
942 edges_activate(cg->irg);
946 be_dump(cg->irg, "-pre_transform", dump_ir_block_graph_sched);
948 switch (be_transformer) {
949 case TRANSFORMER_DEFAULT:
950 /* transform remaining nodes into assembler instructions */
951 ia32_transform_graph(cg);
955 case TRANSFORMER_PBQP:
956 // disable CSE, because of two-step node-construction
959 /* transform nodes into assembler instructions by PBQP magic */
960 ia32_transform_graph_by_pbqp(cg);
966 default: panic("invalid transformer");
969 /* do local optimizations (mainly CSE) */
970 optimize_graph_df(cg->irg);
973 be_dump(cg->irg, "-transformed", dump_ir_block_graph_sched);
975 /* optimize address mode */
976 ia32_optimize_graph(cg);
979 be_dump(cg->irg, "-am", dump_ir_block_graph_sched);
981 /* do code placement, to optimize the position of constants */
985 be_dump(cg->irg, "-place", dump_ir_block_graph_sched);
989 * Dummy functions for hooks we don't need but which must be filled.
991 static void ia32_before_sched(void *self) {
995 static void turn_back_am(ir_node *node)
997 ir_graph *irg = current_ir_graph;
998 dbg_info *dbgi = get_irn_dbg_info(node);
999 ir_node *block = get_nodes_block(node);
1000 ir_node *base = get_irn_n(node, n_ia32_base);
1001 ir_node *index = get_irn_n(node, n_ia32_index);
1002 ir_node *mem = get_irn_n(node, n_ia32_mem);
1003 ir_node *noreg = ia32_new_NoReg_gp(ia32_current_cg);
1007 const ir_edge_t *edge;
1009 load = new_rd_ia32_Load(dbgi, irg, block, base, index, mem);
1010 load_res = new_rd_Proj(dbgi, irg, block, load, mode_Iu, pn_ia32_Load_res);
1012 ia32_copy_am_attrs(load, node);
1013 set_irn_n(node, n_ia32_mem, new_NoMem());
1015 switch (get_ia32_am_arity(node)) {
1017 set_irn_n(node, n_ia32_unary_op, load_res);
1020 case ia32_am_binary:
1021 if (is_ia32_Immediate(get_irn_n(node, n_ia32_Cmp_right))) {
1022 assert(is_ia32_Cmp(node) || is_ia32_Cmp8Bit(node) ||
1023 is_ia32_Test(node) || is_ia32_Test8Bit(node));
1024 set_irn_n(node, n_ia32_binary_left, load_res);
1026 set_irn_n(node, n_ia32_binary_right, load_res);
1030 case ia32_am_ternary:
1031 set_irn_n(node, n_ia32_binary_right, load_res);
1036 set_irn_n(node, n_ia32_base, noreg);
1037 set_irn_n(node, n_ia32_index, noreg);
1038 set_ia32_am_offs_int(node, 0);
1039 set_ia32_am_sc(node, NULL);
1040 set_ia32_am_scale(node, 0);
1041 clear_ia32_am_sc_sign(node);
1043 /* rewire mem-proj */
1044 if (get_irn_mode(node) == mode_T) {
1046 foreach_out_edge(node, edge) {
1047 ir_node *out = get_edge_src_irn(edge);
1048 if(get_irn_mode(out) == mode_M) {
1049 assert(mem_proj == NULL);
1054 if(mem_proj != NULL) {
1055 set_Proj_pred(mem_proj, load);
1056 set_Proj_proj(mem_proj, pn_ia32_Load_M);
1060 set_ia32_op_type(node, ia32_Normal);
1061 if (sched_is_scheduled(node))
1062 sched_add_before(node, load);
1065 static ir_node *flags_remat(ir_node *node, ir_node *after)
1067 /* we should turn back source address mode when rematerializing nodes */
1068 ia32_op_type_t type;
1072 if (is_Block(after)) {
1075 block = get_nodes_block(after);
1078 type = get_ia32_op_type(node);
1080 case ia32_AddrModeS:
1084 case ia32_AddrModeD:
1085 /* TODO implement this later... */
1086 panic("found DestAM with flag user %+F this should not happen", node);
1089 default: assert(type == ia32_Normal); break;
1092 copy = exact_copy(node);
1093 set_nodes_block(copy, block);
1094 sched_add_after(after, copy);
1100 * Called before the register allocator.
1101 * Calculate a block schedule here. We need it for the x87
1102 * simulator and the emitter.
1104 static void ia32_before_ra(void *self) {
1105 ia32_code_gen_t *cg = self;
1107 /* setup fpu rounding modes */
1108 ia32_setup_fpu_mode(cg);
1111 be_sched_fix_flags(cg->birg, &ia32_reg_classes[CLASS_ia32_flags],
1114 ia32_add_missing_keeps(cg);
1119 * Transforms a be_Reload into a ia32 Load.
1121 static void transform_to_Load(ia32_code_gen_t *cg, ir_node *node) {
1122 ir_graph *irg = get_irn_irg(node);
1123 dbg_info *dbg = get_irn_dbg_info(node);
1124 ir_node *block = get_nodes_block(node);
1125 ir_entity *ent = be_get_frame_entity(node);
1126 ir_mode *mode = get_irn_mode(node);
1127 ir_mode *spillmode = get_spill_mode(node);
1128 ir_node *noreg = ia32_new_NoReg_gp(cg);
1129 ir_node *sched_point = NULL;
1130 ir_node *ptr = get_irg_frame(irg);
1131 ir_node *mem = get_irn_n(node, be_pos_Reload_mem);
1132 ir_node *new_op, *proj;
1133 const arch_register_t *reg;
1135 if (sched_is_scheduled(node)) {
1136 sched_point = sched_prev(node);
1139 if (mode_is_float(spillmode)) {
1140 if (ia32_cg_config.use_sse2)
1141 new_op = new_rd_ia32_xLoad(dbg, irg, block, ptr, noreg, mem, spillmode);
1143 new_op = new_rd_ia32_vfld(dbg, irg, block, ptr, noreg, mem, spillmode);
1145 else if (get_mode_size_bits(spillmode) == 128) {
1146 /* Reload 128 bit SSE registers */
1147 new_op = new_rd_ia32_xxLoad(dbg, irg, block, ptr, noreg, mem);
1150 new_op = new_rd_ia32_Load(dbg, irg, block, ptr, noreg, mem);
1152 set_ia32_op_type(new_op, ia32_AddrModeS);
1153 set_ia32_ls_mode(new_op, spillmode);
1154 set_ia32_frame_ent(new_op, ent);
1155 set_ia32_use_frame(new_op);
1157 DBG_OPT_RELOAD2LD(node, new_op);
1159 proj = new_rd_Proj(dbg, irg, block, new_op, mode, pn_ia32_Load_res);
1162 sched_add_after(sched_point, new_op);
1166 /* copy the register from the old node to the new Load */
1167 reg = arch_get_irn_register(cg->arch_env, node);
1168 arch_set_irn_register(cg->arch_env, new_op, reg);
1170 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(cg, node));
1172 exchange(node, proj);
1176 * Transforms a be_Spill node into a ia32 Store.
1178 static void transform_to_Store(ia32_code_gen_t *cg, ir_node *node) {
1179 ir_graph *irg = get_irn_irg(node);
1180 dbg_info *dbg = get_irn_dbg_info(node);
1181 ir_node *block = get_nodes_block(node);
1182 ir_entity *ent = be_get_frame_entity(node);
1183 const ir_node *spillval = get_irn_n(node, be_pos_Spill_val);
1184 ir_mode *mode = get_spill_mode(spillval);
1185 ir_node *noreg = ia32_new_NoReg_gp(cg);
1186 ir_node *nomem = new_rd_NoMem(irg);
1187 ir_node *ptr = get_irg_frame(irg);
1188 ir_node *val = get_irn_n(node, be_pos_Spill_val);
1190 ir_node *sched_point = NULL;
1192 if (sched_is_scheduled(node)) {
1193 sched_point = sched_prev(node);
1196 /* No need to spill unknown values... */
1197 if(is_ia32_Unknown_GP(val) ||
1198 is_ia32_Unknown_VFP(val) ||
1199 is_ia32_Unknown_XMM(val)) {
1204 exchange(node, store);
1208 if (mode_is_float(mode)) {
1209 if (ia32_cg_config.use_sse2)
1210 store = new_rd_ia32_xStore(dbg, irg, block, ptr, noreg, nomem, val);
1212 store = new_rd_ia32_vfst(dbg, irg, block, ptr, noreg, nomem, val, mode);
1213 } else if (get_mode_size_bits(mode) == 128) {
1214 /* Spill 128 bit SSE registers */
1215 store = new_rd_ia32_xxStore(dbg, irg, block, ptr, noreg, nomem, val);
1216 } else if (get_mode_size_bits(mode) == 8) {
1217 store = new_rd_ia32_Store8Bit(dbg, irg, block, ptr, noreg, nomem, val);
1219 store = new_rd_ia32_Store(dbg, irg, block, ptr, noreg, nomem, val);
1222 set_ia32_op_type(store, ia32_AddrModeD);
1223 set_ia32_ls_mode(store, mode);
1224 set_ia32_frame_ent(store, ent);
1225 set_ia32_use_frame(store);
1226 SET_IA32_ORIG_NODE(store, ia32_get_old_node_name(cg, node));
1227 DBG_OPT_SPILL2ST(node, store);
1230 sched_add_after(sched_point, store);
1234 exchange(node, store);
1237 static ir_node *create_push(ia32_code_gen_t *cg, ir_node *node, ir_node *schedpoint, ir_node *sp, ir_node *mem, ir_entity *ent) {
1238 ir_graph *irg = get_irn_irg(node);
1239 dbg_info *dbg = get_irn_dbg_info(node);
1240 ir_node *block = get_nodes_block(node);
1241 ir_node *noreg = ia32_new_NoReg_gp(cg);
1242 ir_node *frame = get_irg_frame(irg);
1244 ir_node *push = new_rd_ia32_Push(dbg, irg, block, frame, noreg, mem, noreg, sp);
1246 set_ia32_frame_ent(push, ent);
1247 set_ia32_use_frame(push);
1248 set_ia32_op_type(push, ia32_AddrModeS);
1249 set_ia32_ls_mode(push, mode_Is);
1251 sched_add_before(schedpoint, push);
1255 static ir_node *create_pop(ia32_code_gen_t *cg, ir_node *node, ir_node *schedpoint, ir_node *sp, ir_entity *ent) {
1256 ir_graph *irg = get_irn_irg(node);
1257 dbg_info *dbg = get_irn_dbg_info(node);
1258 ir_node *block = get_nodes_block(node);
1259 ir_node *noreg = ia32_new_NoReg_gp(cg);
1260 ir_node *frame = get_irg_frame(irg);
1262 ir_node *pop = new_rd_ia32_PopMem(dbg, irg, block, frame, noreg, new_NoMem(), sp);
1264 set_ia32_frame_ent(pop, ent);
1265 set_ia32_use_frame(pop);
1266 set_ia32_op_type(pop, ia32_AddrModeD);
1267 set_ia32_ls_mode(pop, mode_Is);
1269 sched_add_before(schedpoint, pop);
1274 static ir_node* create_spproj(ia32_code_gen_t *cg, ir_node *node, ir_node *pred, int pos) {
1275 ir_graph *irg = get_irn_irg(node);
1276 dbg_info *dbg = get_irn_dbg_info(node);
1277 ir_node *block = get_nodes_block(node);
1278 ir_mode *spmode = mode_Iu;
1279 const arch_register_t *spreg = &ia32_gp_regs[REG_ESP];
1282 sp = new_rd_Proj(dbg, irg, block, pred, spmode, pos);
1283 arch_set_irn_register(cg->arch_env, sp, spreg);
1289 * Transform MemPerm, currently we do this the ugly way and produce
1290 * push/pop into/from memory cascades. This is possible without using
1293 static void transform_MemPerm(ia32_code_gen_t *cg, ir_node *node) {
1294 ir_graph *irg = get_irn_irg(node);
1295 ir_node *block = get_nodes_block(node);
1299 ir_node *sp = be_abi_get_ignore_irn(cg->birg->abi, &ia32_gp_regs[REG_ESP]);
1300 const ir_edge_t *edge;
1301 const ir_edge_t *next;
1304 arity = be_get_MemPerm_entity_arity(node);
1305 pops = alloca(arity * sizeof(pops[0]));
1308 for(i = 0; i < arity; ++i) {
1309 ir_entity *inent = be_get_MemPerm_in_entity(node, i);
1310 ir_entity *outent = be_get_MemPerm_out_entity(node, i);
1311 ir_type *enttype = get_entity_type(inent);
1312 unsigned entsize = get_type_size_bytes(enttype);
1313 unsigned entsize2 = get_type_size_bytes(get_entity_type(outent));
1314 ir_node *mem = get_irn_n(node, i + 1);
1317 /* work around cases where entities have different sizes */
1318 if(entsize2 < entsize)
1320 assert( (entsize == 4 || entsize == 8) && "spillslot on x86 should be 32 or 64 bit");
1322 push = create_push(cg, node, node, sp, mem, inent);
1323 sp = create_spproj(cg, node, push, pn_ia32_Push_stack);
1325 /* add another push after the first one */
1326 push = create_push(cg, node, node, sp, mem, inent);
1327 add_ia32_am_offs_int(push, 4);
1328 sp = create_spproj(cg, node, push, pn_ia32_Push_stack);
1331 set_irn_n(node, i, new_Bad());
1335 for(i = arity - 1; i >= 0; --i) {
1336 ir_entity *inent = be_get_MemPerm_in_entity(node, i);
1337 ir_entity *outent = be_get_MemPerm_out_entity(node, i);
1338 ir_type *enttype = get_entity_type(outent);
1339 unsigned entsize = get_type_size_bytes(enttype);
1340 unsigned entsize2 = get_type_size_bytes(get_entity_type(inent));
1343 /* work around cases where entities have different sizes */
1344 if(entsize2 < entsize)
1346 assert( (entsize == 4 || entsize == 8) && "spillslot on x86 should be 32 or 64 bit");
1348 pop = create_pop(cg, node, node, sp, outent);
1349 sp = create_spproj(cg, node, pop, pn_ia32_Pop_stack);
1351 add_ia32_am_offs_int(pop, 4);
1353 /* add another pop after the first one */
1354 pop = create_pop(cg, node, node, sp, outent);
1355 sp = create_spproj(cg, node, pop, pn_ia32_Pop_stack);
1362 keep = be_new_Keep(&ia32_reg_classes[CLASS_ia32_gp], irg, block, 1, in);
1363 sched_add_before(node, keep);
1365 /* exchange memprojs */
1366 foreach_out_edge_safe(node, edge, next) {
1367 ir_node *proj = get_edge_src_irn(edge);
1368 int p = get_Proj_proj(proj);
1372 set_Proj_pred(proj, pops[p]);
1373 set_Proj_proj(proj, pn_ia32_Pop_M);
1376 /* remove memperm */
1377 arity = get_irn_arity(node);
1378 for(i = 0; i < arity; ++i) {
1379 set_irn_n(node, i, new_Bad());
1385 * Block-Walker: Calls the transform functions Spill and Reload.
1387 static void ia32_after_ra_walker(ir_node *block, void *env) {
1388 ir_node *node, *prev;
1389 ia32_code_gen_t *cg = env;
1391 /* beware: the schedule is changed here */
1392 for (node = sched_last(block); !sched_is_begin(node); node = prev) {
1393 prev = sched_prev(node);
1395 if (be_is_Reload(node)) {
1396 transform_to_Load(cg, node);
1397 } else if (be_is_Spill(node)) {
1398 transform_to_Store(cg, node);
1399 } else if (be_is_MemPerm(node)) {
1400 transform_MemPerm(cg, node);
1406 * Collects nodes that need frame entities assigned.
1408 static void ia32_collect_frame_entity_nodes(ir_node *node, void *data)
1410 be_fec_env_t *env = data;
1412 if (be_is_Reload(node) && be_get_frame_entity(node) == NULL) {
1413 const ir_mode *mode = get_spill_mode_mode(get_irn_mode(node));
1414 int align = get_mode_size_bytes(mode);
1415 be_node_needs_frame_entity(env, node, mode, align);
1416 } else if(is_ia32_irn(node) && get_ia32_frame_ent(node) == NULL
1417 && is_ia32_use_frame(node)) {
1418 if (is_ia32_need_stackent(node) || is_ia32_Load(node)) {
1419 const ir_mode *mode = get_ia32_ls_mode(node);
1420 const ia32_attr_t *attr = get_ia32_attr_const(node);
1423 if (is_ia32_is_reload(node)) {
1424 mode = get_spill_mode_mode(mode);
1427 if(attr->data.need_64bit_stackent) {
1430 if(attr->data.need_32bit_stackent) {
1433 align = get_mode_size_bytes(mode);
1434 be_node_needs_frame_entity(env, node, mode, align);
1435 } else if (is_ia32_vfild(node) || is_ia32_xLoad(node)
1436 || is_ia32_vfld(node)) {
1437 const ir_mode *mode = get_ia32_ls_mode(node);
1439 be_node_needs_frame_entity(env, node, mode, align);
1440 } else if(is_ia32_FldCW(node)) {
1441 /* although 2 byte would be enough 4 byte performs best */
1442 const ir_mode *mode = mode_Iu;
1444 be_node_needs_frame_entity(env, node, mode, align);
1447 assert(is_ia32_St(node) ||
1448 is_ia32_xStoreSimple(node) ||
1449 is_ia32_vfst(node) ||
1450 is_ia32_vfist(node) ||
1451 is_ia32_vfisttp(node) ||
1452 is_ia32_FnstCW(node));
1459 * We transform Spill and Reload here. This needs to be done before
1460 * stack biasing otherwise we would miss the corrected offset for these nodes.
1462 static void ia32_after_ra(void *self) {
1463 ia32_code_gen_t *cg = self;
1464 ir_graph *irg = cg->irg;
1465 be_fec_env_t *fec_env = be_new_frame_entity_coalescer(cg->birg);
1467 /* create and coalesce frame entities */
1468 irg_walk_graph(irg, NULL, ia32_collect_frame_entity_nodes, fec_env);
1469 be_assign_entities(fec_env);
1470 be_free_frame_entity_coalescer(fec_env);
1472 irg_block_walk_graph(irg, NULL, ia32_after_ra_walker, cg);
1476 * Last touchups for the graph before emit: x87 simulation to replace the
1477 * virtual with real x87 instructions, creating a block schedule and peephole
1480 static void ia32_finish(void *self) {
1481 ia32_code_gen_t *cg = self;
1482 ir_graph *irg = cg->irg;
1484 ia32_finish_irg(irg, cg);
1486 /* we might have to rewrite x87 virtual registers */
1487 if (cg->do_x87_sim) {
1488 x87_simulate_graph(cg->arch_env, cg->birg);
1491 /* do peephole optimisations */
1492 ia32_peephole_optimization(cg);
1494 /* create block schedule, this also removes empty blocks which might
1495 * produce critical edges */
1496 cg->blk_sched = be_create_block_schedule(irg, cg->birg->exec_freq);
1500 * Emits the code, closes the output file and frees
1501 * the code generator interface.
1503 static void ia32_codegen(void *self) {
1504 ia32_code_gen_t *cg = self;
1505 ir_graph *irg = cg->irg;
1507 ia32_gen_routine(cg, irg);
1511 /* remove it from the isa */
1514 assert(ia32_current_cg == cg);
1515 ia32_current_cg = NULL;
1517 /* de-allocate code generator */
1518 del_set(cg->reg_set);
1523 * Returns the node representing the PIC base.
1525 static ir_node *ia32_get_pic_base(void *self) {
1527 ia32_code_gen_t *cg = self;
1528 ir_node *get_eip = cg->get_eip;
1529 if (get_eip != NULL)
1532 block = get_irg_start_block(cg->irg);
1533 get_eip = new_rd_ia32_GetEIP(NULL, cg->irg, block);
1534 cg->get_eip = get_eip;
1536 add_irn_dep(get_eip, get_irg_frame(cg->irg));
1541 static void *ia32_cg_init(be_irg_t *birg);
1543 static const arch_code_generator_if_t ia32_code_gen_if = {
1545 ia32_get_pic_base, /* return node used as base in pic code addresses */
1546 ia32_before_abi, /* before abi introduce hook */
1549 ia32_before_sched, /* before scheduling hook */
1550 ia32_before_ra, /* before register allocation hook */
1551 ia32_after_ra, /* after register allocation hook */
1552 ia32_finish, /* called before codegen */
1553 ia32_codegen /* emit && done */
1557 * Initializes a IA32 code generator.
1559 static void *ia32_cg_init(be_irg_t *birg) {
1560 ia32_isa_t *isa = (ia32_isa_t *)birg->main_env->arch_env;
1561 ia32_code_gen_t *cg = xcalloc(1, sizeof(*cg));
1563 cg->impl = &ia32_code_gen_if;
1564 cg->irg = birg->irg;
1565 cg->reg_set = new_set(ia32_cmp_irn_reg_assoc, 1024);
1567 cg->arch_env = birg->main_env->arch_env;
1569 cg->blk_sched = NULL;
1570 cg->dump = (birg->main_env->options->dump_flags & DUMP_BE) ? 1 : 0;
1571 cg->gprof = (birg->main_env->options->gprof) ? 1 : 0;
1574 /* Linux gprof implementation needs base pointer */
1575 birg->main_env->options->omit_fp = 0;
1582 if (isa->name_obst) {
1583 obstack_free(isa->name_obst, NULL);
1584 obstack_init(isa->name_obst);
1588 cur_reg_set = cg->reg_set;
1590 assert(ia32_current_cg == NULL);
1591 ia32_current_cg = cg;
1593 return (arch_code_generator_t *)cg;
1598 /*****************************************************************
1599 * ____ _ _ _____ _____
1600 * | _ \ | | | | |_ _|/ ____| /\
1601 * | |_) | __ _ ___| | _____ _ __ __| | | | | (___ / \
1602 * | _ < / _` |/ __| |/ / _ \ '_ \ / _` | | | \___ \ / /\ \
1603 * | |_) | (_| | (__| < __/ | | | (_| | _| |_ ____) / ____ \
1604 * |____/ \__,_|\___|_|\_\___|_| |_|\__,_| |_____|_____/_/ \_\
1606 *****************************************************************/
1609 * Set output modes for GCC
1611 static const tarval_mode_info mo_integer = {
1618 * set the tarval output mode of all integer modes to decimal
1620 static void set_tarval_output_modes(void)
1624 for (i = get_irp_n_modes() - 1; i >= 0; --i) {
1625 ir_mode *mode = get_irp_mode(i);
1627 if (mode_is_int(mode))
1628 set_tarval_mode_output_option(mode, &mo_integer);
1632 const arch_isa_if_t ia32_isa_if;
1635 * The template that generates a new ISA object.
1636 * Note that this template can be changed by command line
1639 static ia32_isa_t ia32_isa_template = {
1641 &ia32_isa_if, /* isa interface implementation */
1642 &ia32_gp_regs[REG_ESP], /* stack pointer register */
1643 &ia32_gp_regs[REG_EBP], /* base pointer register */
1644 -1, /* stack direction */
1645 4, /* power of two stack alignment, 2^4 == 16 */
1646 NULL, /* main environment */
1647 7, /* costs for a spill instruction */
1648 5, /* costs for a reload instruction */
1650 NULL, /* 16bit register names */
1651 NULL, /* 8bit register names */
1652 NULL, /* 8bit register names high */
1655 NULL, /* current code generator */
1656 NULL, /* abstract machine */
1658 NULL, /* name obstack */
1662 static void init_asm_constraints(void)
1664 be_init_default_asm_constraint_flags();
1666 asm_constraint_flags['a'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1667 asm_constraint_flags['b'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1668 asm_constraint_flags['c'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1669 asm_constraint_flags['d'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1670 asm_constraint_flags['D'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1671 asm_constraint_flags['S'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1672 asm_constraint_flags['Q'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1673 asm_constraint_flags['q'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1674 asm_constraint_flags['A'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1675 asm_constraint_flags['l'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1676 asm_constraint_flags['R'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1677 asm_constraint_flags['r'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1678 asm_constraint_flags['p'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1679 asm_constraint_flags['f'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1680 asm_constraint_flags['t'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1681 asm_constraint_flags['u'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1682 asm_constraint_flags['Y'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1683 asm_constraint_flags['X'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1684 asm_constraint_flags['n'] = ASM_CONSTRAINT_FLAG_SUPPORTS_IMMEDIATE;
1685 asm_constraint_flags['g'] = ASM_CONSTRAINT_FLAG_SUPPORTS_IMMEDIATE;
1687 /* no support for autodecrement/autoincrement */
1688 asm_constraint_flags['<'] = ASM_CONSTRAINT_FLAG_NO_SUPPORT;
1689 asm_constraint_flags['>'] = ASM_CONSTRAINT_FLAG_NO_SUPPORT;
1690 /* no float consts */
1691 asm_constraint_flags['E'] = ASM_CONSTRAINT_FLAG_NO_SUPPORT;
1692 asm_constraint_flags['F'] = ASM_CONSTRAINT_FLAG_NO_SUPPORT;
1693 /* makes no sense on x86 */
1694 asm_constraint_flags['s'] = ASM_CONSTRAINT_FLAG_NO_SUPPORT;
1695 /* no support for sse consts yet */
1696 asm_constraint_flags['C'] = ASM_CONSTRAINT_FLAG_NO_SUPPORT;
1697 /* no support for x87 consts yet */
1698 asm_constraint_flags['G'] = ASM_CONSTRAINT_FLAG_NO_SUPPORT;
1699 /* no support for mmx registers yet */
1700 asm_constraint_flags['y'] = ASM_CONSTRAINT_FLAG_NO_SUPPORT;
1701 /* not available in 32bit mode */
1702 asm_constraint_flags['Z'] = ASM_CONSTRAINT_FLAG_NO_SUPPORT;
1703 asm_constraint_flags['e'] = ASM_CONSTRAINT_FLAG_NO_SUPPORT;
1705 /* no code yet to determine register class needed... */
1706 asm_constraint_flags['X'] = ASM_CONSTRAINT_FLAG_NO_SUPPORT;
1710 * Initializes the backend ISA.
1712 static arch_env_t *ia32_init(FILE *file_handle) {
1713 static int inited = 0;
1721 set_tarval_output_modes();
1723 isa = xmalloc(sizeof(*isa));
1724 memcpy(isa, &ia32_isa_template, sizeof(*isa));
1726 if(mode_fpcw == NULL) {
1727 mode_fpcw = new_ir_mode("Fpcw", irms_int_number, 16, 0, irma_none, 0);
1730 ia32_register_init();
1731 ia32_create_opcodes(&ia32_irn_ops);
1733 be_emit_init(file_handle);
1734 isa->regs_16bit = pmap_create();
1735 isa->regs_8bit = pmap_create();
1736 isa->regs_8bit_high = pmap_create();
1737 isa->types = pmap_create();
1738 isa->tv_ent = pmap_create();
1739 isa->cpu = ia32_init_machine_description();
1741 ia32_build_16bit_reg_map(isa->regs_16bit);
1742 ia32_build_8bit_reg_map(isa->regs_8bit);
1743 ia32_build_8bit_reg_map_high(isa->regs_8bit_high);
1746 isa->name_obst = xmalloc(sizeof(*isa->name_obst));
1747 obstack_init(isa->name_obst);
1750 /* enter the ISA object into the intrinsic environment */
1751 intrinsic_env.isa = isa;
1752 ia32_handle_intrinsics();
1754 /* emit asm includes */
1755 n = get_irp_n_asms();
1756 for (i = 0; i < n; ++i) {
1757 be_emit_cstring("#APP\n");
1758 be_emit_ident(get_irp_asm(i));
1759 be_emit_cstring("\n#NO_APP\n");
1762 /* needed for the debug support */
1763 be_gas_emit_switch_section(GAS_SECTION_TEXT);
1764 be_emit_cstring(".Ltext0:\n");
1765 be_emit_write_line();
1767 /* we mark referenced global entities, so we can only emit those which
1768 * are actually referenced. (Note: you mustn't use the type visited flag
1769 * elsewhere in the backend)
1771 inc_master_type_visited();
1773 return &isa->arch_env;
1779 * Closes the output file and frees the ISA structure.
1781 static void ia32_done(void *self) {
1782 ia32_isa_t *isa = self;
1784 /* emit now all global declarations */
1785 be_gas_emit_decls(isa->arch_env.main_env, 1);
1787 pmap_destroy(isa->regs_16bit);
1788 pmap_destroy(isa->regs_8bit);
1789 pmap_destroy(isa->regs_8bit_high);
1790 pmap_destroy(isa->tv_ent);
1791 pmap_destroy(isa->types);
1794 obstack_free(isa->name_obst, NULL);
1804 * Return the number of register classes for this architecture.
1805 * We report always these:
1806 * - the general purpose registers
1807 * - the SSE floating point register set
1808 * - the virtual floating point registers
1809 * - the SSE vector register set
1811 static unsigned ia32_get_n_reg_class(const void *self) {
1817 * Return the register class for index i.
1819 static const arch_register_class_t *ia32_get_reg_class(const void *self,
1823 assert(i < N_CLASSES);
1824 return &ia32_reg_classes[i];
1828 * Get the register class which shall be used to store a value of a given mode.
1829 * @param self The this pointer.
1830 * @param mode The mode in question.
1831 * @return A register class which can hold values of the given mode.
1833 const arch_register_class_t *ia32_get_reg_class_for_mode(const void *self,
1834 const ir_mode *mode)
1838 if (mode_is_float(mode)) {
1839 return ia32_cg_config.use_sse2 ? &ia32_reg_classes[CLASS_ia32_xmm] : &ia32_reg_classes[CLASS_ia32_vfp];
1842 return &ia32_reg_classes[CLASS_ia32_gp];
1846 * Get the ABI restrictions for procedure calls.
1847 * @param self The this pointer.
1848 * @param method_type The type of the method (procedure) in question.
1849 * @param abi The abi object to be modified
1851 static void ia32_get_call_abi(const void *self, ir_type *method_type,
1859 be_abi_call_flags_t call_flags = be_abi_call_get_flags(abi);
1863 /* set abi flags for calls */
1864 call_flags.bits.left_to_right = 0; /* always last arg first on stack */
1865 call_flags.bits.store_args_sequential = 0;
1866 /* call_flags.bits.try_omit_fp not changed: can handle both settings */
1867 call_flags.bits.fp_free = 0; /* the frame pointer is fixed in IA32 */
1868 call_flags.bits.call_has_imm = 1; /* No call immediates, we handle this by ourselves */
1870 /* set parameter passing style */
1871 be_abi_call_set_flags(abi, call_flags, &ia32_abi_callbacks);
1873 if (get_method_variadicity(method_type) == variadicity_variadic) {
1874 /* pass all parameters of a variadic function on the stack */
1877 cc = get_method_calling_convention(method_type);
1878 if (get_method_additional_properties(method_type) & mtp_property_private &&
1879 ia32_cg_config.optimize_cc) {
1880 /* set the calling conventions to register parameter */
1881 cc = (cc & ~cc_bits) | cc_reg_param;
1885 /* we have to pop the shadow parameter ourself for compound calls */
1886 if( (get_method_calling_convention(method_type) & cc_compound_ret)
1887 && !(cc & cc_reg_param)) {
1888 pop_amount += get_mode_size_bytes(mode_P_data);
1891 n = get_method_n_params(method_type);
1892 for (i = regnum = 0; i < n; i++) {
1894 const arch_register_t *reg = NULL;
1896 tp = get_method_param_type(method_type, i);
1897 mode = get_type_mode(tp);
1899 reg = ia32_get_RegParam_reg(cc, regnum, mode);
1902 be_abi_call_param_reg(abi, i, reg);
1905 /* Micro optimisation: if the mode is shorter than 4 bytes, load 4 bytes.
1906 * movl has a shorter opcode than mov[sz][bw]l */
1907 ir_mode *load_mode = mode;
1910 unsigned size = get_mode_size_bytes(mode);
1912 if (cc & cc_callee_clear_stk) {
1913 pop_amount += (size + 3U) & ~3U;
1916 if (size < 4) load_mode = mode_Iu;
1919 be_abi_call_param_stack(abi, i, load_mode, 4, 0, 0);
1923 be_abi_call_set_pop(abi, pop_amount);
1925 /* set return registers */
1926 n = get_method_n_ress(method_type);
1928 assert(n <= 2 && "more than two results not supported");
1930 /* In case of 64bit returns, we will have two 32bit values */
1932 tp = get_method_res_type(method_type, 0);
1933 mode = get_type_mode(tp);
1935 assert(!mode_is_float(mode) && "two FP results not supported");
1937 tp = get_method_res_type(method_type, 1);
1938 mode = get_type_mode(tp);
1940 assert(!mode_is_float(mode) && "mixed INT, FP results not supported");
1942 be_abi_call_res_reg(abi, 0, &ia32_gp_regs[REG_EAX]);
1943 be_abi_call_res_reg(abi, 1, &ia32_gp_regs[REG_EDX]);
1946 const arch_register_t *reg;
1948 tp = get_method_res_type(method_type, 0);
1949 assert(is_atomic_type(tp));
1950 mode = get_type_mode(tp);
1952 reg = mode_is_float(mode) ? &ia32_vfp_regs[REG_VF0] : &ia32_gp_regs[REG_EAX];
1954 be_abi_call_res_reg(abi, 0, reg);
1958 int ia32_to_appear_in_schedule(void *block_env, const ir_node *irn)
1962 if(!is_ia32_irn(irn)) {
1966 if(is_ia32_NoReg_GP(irn) || is_ia32_NoReg_VFP(irn) || is_ia32_NoReg_XMM(irn)
1967 || is_ia32_Unknown_GP(irn) || is_ia32_Unknown_XMM(irn)
1968 || is_ia32_Unknown_VFP(irn) || is_ia32_ChangeCW(irn)
1969 || is_ia32_Immediate(irn))
1976 * Initializes the code generator interface.
1978 static const arch_code_generator_if_t *ia32_get_code_generator_if(void *self)
1981 return &ia32_code_gen_if;
1985 * Returns the estimated execution time of an ia32 irn.
1987 static sched_timestep_t ia32_sched_exectime(void *env, const ir_node *irn) {
1989 return is_ia32_irn(irn) ? ia32_get_op_estimated_cost(irn) : 1;
1992 list_sched_selector_t ia32_sched_selector;
1995 * Returns the reg_pressure scheduler with to_appear_in_schedule() overloaded
1997 static const list_sched_selector_t *ia32_get_list_sched_selector(
1998 const void *self, list_sched_selector_t *selector)
2001 memcpy(&ia32_sched_selector, selector, sizeof(ia32_sched_selector));
2002 ia32_sched_selector.exectime = ia32_sched_exectime;
2003 ia32_sched_selector.to_appear_in_schedule = ia32_to_appear_in_schedule;
2004 return &ia32_sched_selector;
2007 static const ilp_sched_selector_t *ia32_get_ilp_sched_selector(const void *self)
2014 * Returns the necessary byte alignment for storing a register of given class.
2016 static int ia32_get_reg_class_alignment(const void *self,
2017 const arch_register_class_t *cls)
2019 ir_mode *mode = arch_register_class_mode(cls);
2020 int bytes = get_mode_size_bytes(mode);
2023 if (mode_is_float(mode) && bytes > 8)
2028 static const be_execution_unit_t ***ia32_get_allowed_execution_units(
2029 const void *self, const ir_node *irn)
2031 static const be_execution_unit_t *_allowed_units_BRANCH[] = {
2032 &ia32_execution_units_BRANCH[IA32_EXECUNIT_TP_BRANCH_BRANCH1],
2033 &ia32_execution_units_BRANCH[IA32_EXECUNIT_TP_BRANCH_BRANCH2],
2036 static const be_execution_unit_t *_allowed_units_GP[] = {
2037 &ia32_execution_units_GP[IA32_EXECUNIT_TP_GP_GP_EAX],
2038 &ia32_execution_units_GP[IA32_EXECUNIT_TP_GP_GP_EBX],
2039 &ia32_execution_units_GP[IA32_EXECUNIT_TP_GP_GP_ECX],
2040 &ia32_execution_units_GP[IA32_EXECUNIT_TP_GP_GP_EDX],
2041 &ia32_execution_units_GP[IA32_EXECUNIT_TP_GP_GP_ESI],
2042 &ia32_execution_units_GP[IA32_EXECUNIT_TP_GP_GP_EDI],
2043 &ia32_execution_units_GP[IA32_EXECUNIT_TP_GP_GP_EBP],
2046 static const be_execution_unit_t *_allowed_units_DUMMY[] = {
2047 &be_machine_execution_units_DUMMY[0],
2050 static const be_execution_unit_t **_units_callret[] = {
2051 _allowed_units_BRANCH,
2054 static const be_execution_unit_t **_units_other[] = {
2058 static const be_execution_unit_t **_units_dummy[] = {
2059 _allowed_units_DUMMY,
2062 const be_execution_unit_t ***ret;
2065 if (is_ia32_irn(irn)) {
2066 ret = get_ia32_exec_units(irn);
2068 else if (is_be_node(irn)) {
2069 if (be_is_Call(irn) || be_is_Return(irn)) {
2070 ret = _units_callret;
2072 else if (be_is_Barrier(irn)) {
2087 * Return the abstract ia32 machine.
2089 static const be_machine_t *ia32_get_machine(const void *self) {
2090 const ia32_isa_t *isa = self;
2095 * Return irp irgs in the desired order.
2097 static ir_graph **ia32_get_irg_list(const void *self, ir_graph ***irg_list)
2105 * Allows or disallows the creation of Psi nodes for the given Phi nodes.
2106 * @return 1 if allowed, 0 otherwise
2108 static int ia32_is_psi_allowed(ir_node *sel, ir_node *phi_list, int i, int j)
2111 ir_node *cmp = NULL;
2113 /* we can't handle psis with 64bit compares yet */
2115 cmp = get_Proj_pred(sel);
2117 ir_node *left = get_Cmp_left(cmp);
2118 ir_mode *cmp_mode = get_irn_mode(left);
2119 if (!mode_is_float(cmp_mode) && get_mode_size_bits(cmp_mode) > 32)
2126 if (ia32_cg_config.use_cmov) {
2127 if (ia32_cg_config.use_sse2 && cmp != NULL) {
2128 pn_Cmp pn = get_Proj_proj(sel);
2129 ir_node *cl = get_Cmp_left(cmp);
2130 ir_node *cr = get_Cmp_right(cmp);
2132 /* check the Phi nodes: no 64bit and no floating point cmov */
2133 for (phi = phi_list; phi; phi = get_Phi_next(phi)) {
2134 ir_mode *mode = get_irn_mode(phi);
2136 if (mode_is_float(mode)) {
2137 /* check for Min, Max */
2138 ir_node *t = get_Phi_pred(phi, i);
2139 ir_node *f = get_Phi_pred(phi, j);
2142 /* SSE2 supports Min & Max */
2143 if (pn == pn_Cmp_Lt || pn == pn_Cmp_Le || pn == pn_Cmp_Ge || pn == pn_Cmp_Gt) {
2144 if (cl == t && cr == f) {
2145 /* Psi(a <=/>= b, a, b) => MIN, MAX */
2147 } else if (cl == f && cr == t) {
2148 /* Psi(a <=/>= b, b, a) => MAX, MIN */
2155 } else if (get_mode_size_bits(mode) > 32)
2159 /* check the Phi nodes: no 64bit and no floating point cmov */
2160 for (phi = phi_list; phi; phi = get_Phi_next(phi)) {
2161 ir_mode *mode = get_irn_mode(phi);
2163 if (mode_is_float(mode) || get_mode_size_bits(mode) > 32)
2173 /* No cmov, only some special cases */
2177 /* Now some supported cases here */
2178 pn = get_Proj_proj(sel);
2179 cl = get_Cmp_left(cmp);
2180 cr = get_Cmp_right(cmp);
2182 for (phi = phi_list; phi; phi = get_Phi_next(phi)) {
2183 ir_mode *mode = get_irn_mode(phi);
2187 t = get_Phi_pred(phi, i);
2188 f = get_Phi_pred(phi, j);
2190 /* no floating point and no 64bit yet */
2191 if (mode_is_float(mode) || get_mode_size_bits(mode) > 32)
2194 if (is_Const(t) && is_Const(f)) {
2195 if ((is_Const_null(t) && is_Const_one(f)) || (is_Const_one(t) && is_Const_null(f))) {
2196 /* always support Psi(x, C1, C2) */
2199 } else if (pn == pn_Cmp_Lt || pn == pn_Cmp_Le || pn == pn_Cmp_Ge || pn == pn_Cmp_Gt) {
2202 } else if (cl == t && cr == f) {
2203 /* Psi(a <=/>= b, a, b) => Min, Max */
2205 } else if (cl == f && cr == t) {
2206 /* Psi(a <=/>= b, b, a) => Max, Min */
2209 } else if ((pn & pn_Cmp_Gt) && !mode_is_signed(mode) &&
2210 is_Const(f) && is_Const_null(f) && is_Sub(t) &&
2211 get_Sub_left(t) == cl && get_Sub_right(t) == cr) {
2212 /* Psi(a >=u b, a - b, 0) unsigned Doz */
2214 } else if ((pn & pn_Cmp_Lt) && !mode_is_signed(mode) &&
2215 is_Const(t) && is_Const_null(t) && is_Sub(f) &&
2216 get_Sub_left(f) == cl && get_Sub_right(f) == cr) {
2217 /* Psi(a <=u b, 0, a - b) unsigned Doz */
2219 } else if (is_Const(cr) && is_Const_null(cr)) {
2220 if (cl == t && is_Minus(f) && get_Minus_op(f) == cl) {
2221 /* Psi(a <=/>= 0 ? a : -a) Nabs/Abs */
2223 } else if (cl == f && is_Minus(t) && get_Minus_op(t) == cl) {
2224 /* Psi(a <=/>= 0 ? -a : a) Abs/Nabs */
2232 /* all checks passed */
2238 static asm_constraint_flags_t ia32_parse_asm_constraint(const void *self, const char **c)
2243 /* we already added all our simple flags to the flags modifier list in
2244 * init, so this flag we don't know. */
2245 return ASM_CONSTRAINT_FLAG_INVALID;
2248 static int ia32_is_valid_clobber(const void *self, const char *clobber)
2252 return ia32_get_clobber_register(clobber) != NULL;
2256 * Returns the libFirm configuration parameter for this backend.
2258 static const backend_params *ia32_get_libfirm_params(void) {
2259 static const ir_settings_if_conv_t ifconv = {
2260 4, /* maxdepth, doesn't matter for Psi-conversion */
2261 ia32_is_psi_allowed /* allows or disallows Psi creation for given selector */
2263 static const ir_settings_arch_dep_t ad = {
2264 1, /* also use subs */
2265 4, /* maximum shifts */
2266 31, /* maximum shift amount */
2267 ia32_evaluate_insn, /* evaluate the instruction sequence */
2269 1, /* allow Mulhs */
2270 1, /* allow Mulus */
2271 32 /* Mulh allowed up to 32 bit */
2273 static backend_params p = {
2274 1, /* need dword lowering */
2275 1, /* support inline assembly */
2276 0, /* no immediate floating point mode. */
2277 NULL, /* no additional opcodes */
2278 NULL, /* will be set later */
2279 ia32_create_intrinsic_fkt,
2280 &intrinsic_env, /* context for ia32_create_intrinsic_fkt */
2281 NULL, /* will be set below */
2282 NULL /* will be set below */
2285 ia32_setup_cg_config();
2287 /* doesn't really belong here, but this is the earliest place the backend
2289 init_asm_constraints();
2292 p.if_conv_info = &ifconv;
2296 static const lc_opt_enum_int_items_t gas_items[] = {
2297 { "elf", GAS_FLAVOUR_ELF },
2298 { "mingw", GAS_FLAVOUR_MINGW },
2299 { "yasm", GAS_FLAVOUR_YASM },
2300 { "macho", GAS_FLAVOUR_MACH_O },
2304 static lc_opt_enum_int_var_t gas_var = {
2305 (int*) &be_gas_flavour, gas_items
2308 static const lc_opt_enum_int_items_t transformer_items[] = {
2309 { "default", TRANSFORMER_DEFAULT },
2310 #ifdef FIRM_GRGEN_BE
2311 { "pbqp", TRANSFORMER_PBQP },
2316 static lc_opt_enum_int_var_t transformer_var = {
2317 (int*)&be_transformer, transformer_items
2320 static const lc_opt_table_entry_t ia32_options[] = {
2321 LC_OPT_ENT_ENUM_INT("gasmode", "set the GAS compatibility mode", &gas_var),
2322 LC_OPT_ENT_ENUM_INT("transformer", "the transformer used for code selection", &transformer_var),
2323 LC_OPT_ENT_INT("stackalign", "set power of two stack alignment for calls",
2324 &ia32_isa_template.arch_env.stack_alignment),
2328 const arch_isa_if_t ia32_isa_if = {
2331 ia32_get_n_reg_class,
2333 ia32_get_reg_class_for_mode,
2335 ia32_get_code_generator_if,
2336 ia32_get_list_sched_selector,
2337 ia32_get_ilp_sched_selector,
2338 ia32_get_reg_class_alignment,
2339 ia32_get_libfirm_params,
2340 ia32_get_allowed_execution_units,
2343 ia32_parse_asm_constraint,
2344 ia32_is_valid_clobber
2347 void ia32_init_emitter(void);
2348 void ia32_init_finish(void);
2349 void ia32_init_optimize(void);
2350 void ia32_init_transform(void);
2351 void ia32_init_x87(void);
2353 void be_init_arch_ia32(void)
2355 lc_opt_entry_t *be_grp = lc_opt_get_grp(firm_opt_get_root(), "be");
2356 lc_opt_entry_t *ia32_grp = lc_opt_get_grp(be_grp, "ia32");
2358 lc_opt_add_table(ia32_grp, ia32_options);
2359 be_register_isa_if("ia32", &ia32_isa_if);
2361 FIRM_DBG_REGISTER(dbg, "firm.be.ia32.cg");
2363 ia32_init_emitter();
2365 ia32_init_optimize();
2366 ia32_init_transform();
2368 ia32_init_architecture();
2371 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_arch_ia32);