2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief This is the main ia32 firm backend driver.
23 * @author Christian Wuerdig
29 #include "lc_opts_enum.h"
33 #include "pseudo_irg.h"
38 #include "iredges_t.h"
51 #include "iroptimize.h"
52 #include "instrument.h"
56 #include "../benode.h"
57 #include "../belower.h"
58 #include "../besched.h"
61 #include "../beirgmod.h"
62 #include "../be_dbgout.h"
63 #include "../beblocksched.h"
64 #include "../bemachine.h"
65 #include "../beilpsched.h"
66 #include "../bespillslots.h"
67 #include "../bemodule.h"
68 #include "../begnuas.h"
69 #include "../bestate.h"
70 #include "../beflags.h"
71 #include "../betranshlp.h"
72 #include "../belistsched.h"
74 #include "bearch_ia32_t.h"
76 #include "ia32_new_nodes.h"
77 #include "gen_ia32_regalloc_if.h"
78 #include "gen_ia32_machine.h"
79 #include "ia32_common_transform.h"
80 #include "ia32_transform.h"
81 #include "ia32_emitter.h"
82 #include "ia32_map_regs.h"
83 #include "ia32_optimize.h"
85 #include "ia32_dbg_stat.h"
86 #include "ia32_finish.h"
87 #include "ia32_util.h"
89 #include "ia32_architecture.h"
92 #include "ia32_pbqp_transform.h"
94 transformer_t be_transformer = TRANSFORMER_DEFAULT;
97 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
100 static set *cur_reg_set = NULL;
102 ir_mode *mode_fpcw = NULL;
103 ia32_code_gen_t *ia32_current_cg = NULL;
105 /** The current omit-fp state */
106 static unsigned ia32_curr_fp_ommitted = 0;
107 static ir_type *omit_fp_between_type = NULL;
108 static ir_type *between_type = NULL;
109 static ir_entity *old_bp_ent = NULL;
110 static ir_entity *ret_addr_ent = NULL;
111 static ir_entity *omit_fp_ret_addr_ent = NULL;
114 * The environment for the intrinsic mapping.
116 static ia32_intrinsic_env_t intrinsic_env = {
118 NULL, /* the irg, these entities belong to */
119 NULL, /* entity for __divdi3 library call */
120 NULL, /* entity for __moddi3 library call */
121 NULL, /* entity for __udivdi3 library call */
122 NULL, /* entity for __umoddi3 library call */
126 typedef ir_node *(*create_const_node_func) (dbg_info *dbg, ir_node *block);
129 * Used to create per-graph unique pseudo nodes.
131 static inline ir_node *create_const(ia32_code_gen_t *cg, ir_node **place,
132 create_const_node_func func,
133 const arch_register_t* reg)
135 ir_node *block, *res;
140 block = get_irg_start_block(cg->irg);
141 res = func(NULL, block);
142 arch_set_irn_register(res, reg);
148 /* Creates the unique per irg GP NoReg node. */
149 ir_node *ia32_new_NoReg_gp(ia32_code_gen_t *cg)
151 return create_const(cg, &cg->noreg_gp, new_bd_ia32_NoReg_GP,
152 &ia32_gp_regs[REG_GP_NOREG]);
155 ir_node *ia32_new_NoReg_vfp(ia32_code_gen_t *cg)
157 return create_const(cg, &cg->noreg_vfp, new_bd_ia32_NoReg_VFP,
158 &ia32_vfp_regs[REG_VFP_NOREG]);
161 ir_node *ia32_new_NoReg_xmm(ia32_code_gen_t *cg)
163 return create_const(cg, &cg->noreg_xmm, new_bd_ia32_NoReg_XMM,
164 &ia32_xmm_regs[REG_XMM_NOREG]);
167 ir_node *ia32_new_Unknown_gp(ia32_code_gen_t *cg)
169 return create_const(cg, &cg->unknown_gp, new_bd_ia32_Unknown_GP,
170 &ia32_gp_regs[REG_GP_UKNWN]);
173 ir_node *ia32_new_Unknown_vfp(ia32_code_gen_t *cg)
175 return create_const(cg, &cg->unknown_vfp, new_bd_ia32_Unknown_VFP,
176 &ia32_vfp_regs[REG_VFP_UKNWN]);
179 ir_node *ia32_new_Unknown_xmm(ia32_code_gen_t *cg)
181 return create_const(cg, &cg->unknown_xmm, new_bd_ia32_Unknown_XMM,
182 &ia32_xmm_regs[REG_XMM_UKNWN]);
185 ir_node *ia32_new_Fpu_truncate(ia32_code_gen_t *cg)
187 return create_const(cg, &cg->fpu_trunc_mode, new_bd_ia32_ChangeCW,
188 &ia32_fp_cw_regs[REG_FPCW]);
193 * Returns the admissible noreg register node for input register pos of node irn.
195 static ir_node *ia32_get_admissible_noreg(ia32_code_gen_t *cg, ir_node *irn, int pos)
197 const arch_register_req_t *req = arch_get_register_req(irn, pos);
199 assert(req != NULL && "Missing register requirements");
200 if (req->cls == &ia32_reg_classes[CLASS_ia32_gp])
201 return ia32_new_NoReg_gp(cg);
203 if (ia32_cg_config.use_sse2) {
204 return ia32_new_NoReg_xmm(cg);
206 return ia32_new_NoReg_vfp(cg);
210 /**************************************************
213 * _ __ ___ __ _ __ _| | | ___ ___ _| |_
214 * | '__/ _ \/ _` | / _` | | |/ _ \ / __| | | _|
215 * | | | __/ (_| | | (_| | | | (_) | (__ | | |
216 * |_| \___|\__, | \__,_|_|_|\___/ \___| |_|_|
219 **************************************************/
221 static const arch_register_req_t *get_ia32_SwitchJmp_out_req(
222 const ir_node *node, int pos)
226 return arch_no_register_req;
229 static arch_irn_class_t ia32_classify(const ir_node *irn)
231 arch_irn_class_t classification = 0;
233 assert(is_ia32_irn(irn));
235 if (is_ia32_is_reload(irn))
236 classification |= arch_irn_class_reload;
238 if (is_ia32_is_spill(irn))
239 classification |= arch_irn_class_spill;
241 if (is_ia32_is_remat(irn))
242 classification |= arch_irn_class_remat;
244 return classification;
248 * The IA32 ABI callback object.
251 be_abi_call_flags_bits_t flags; /**< The call flags. */
252 const arch_env_t *aenv; /**< The architecture environment. */
253 ir_graph *irg; /**< The associated graph. */
256 static ir_entity *ia32_get_frame_entity(const ir_node *irn)
258 return is_ia32_irn(irn) ? get_ia32_frame_ent(irn) : NULL;
261 static void ia32_set_frame_entity(ir_node *irn, ir_entity *ent)
263 set_ia32_frame_ent(irn, ent);
266 static void ia32_set_frame_offset(ir_node *irn, int bias)
268 if (get_ia32_frame_ent(irn) == NULL)
271 if (is_ia32_Pop(irn) || is_ia32_PopMem(irn)) {
272 ia32_code_gen_t *cg = ia32_current_cg;
273 int omit_fp = be_abi_omit_fp(cg->birg->abi);
275 /* Pop nodes modify the stack pointer before calculating the
276 * destination address, so fix this here
281 add_ia32_am_offs_int(irn, bias);
284 static int ia32_get_sp_bias(const ir_node *node)
286 if (is_ia32_Call(node))
287 return -(int)get_ia32_call_attr_const(node)->pop;
289 if (is_ia32_Push(node))
292 if (is_ia32_Pop(node) || is_ia32_PopMem(node))
299 * Generate the routine prologue.
301 * @param self The callback object.
302 * @param mem A pointer to the mem node. Update this if you define new memory.
303 * @param reg_map A map mapping all callee_save/ignore/parameter registers to their defining nodes.
304 * @param stack_bias Points to the current stack bias, can be modified if needed.
306 * @return The register which shall be used as a stack frame base.
308 * All nodes which define registers in @p reg_map must keep @p reg_map current.
310 static const arch_register_t *ia32_abi_prologue(void *self, ir_node **mem, pmap *reg_map, int *stack_bias)
312 ia32_abi_env_t *env = self;
313 ia32_code_gen_t *cg = ia32_current_cg;
314 const arch_env_t *arch_env = env->aenv;
316 ia32_curr_fp_ommitted = env->flags.try_omit_fp;
317 if (! env->flags.try_omit_fp) {
318 ir_node *bl = get_irg_start_block(env->irg);
319 ir_node *curr_sp = be_abi_reg_map_get(reg_map, arch_env->sp);
320 ir_node *curr_bp = be_abi_reg_map_get(reg_map, arch_env->bp);
321 ir_node *noreg = ia32_new_NoReg_gp(cg);
324 /* mark bp register as ignore */
325 be_set_constr_single_reg_out(get_Proj_pred(curr_bp),
326 get_Proj_proj(curr_bp), arch_env->bp, arch_register_req_type_ignore);
329 push = new_bd_ia32_Push(NULL, bl, noreg, noreg, *mem, curr_bp, curr_sp);
330 curr_sp = new_r_Proj(bl, push, get_irn_mode(curr_sp), pn_ia32_Push_stack);
331 *mem = new_r_Proj(bl, push, mode_M, pn_ia32_Push_M);
333 /* the push must have SP out register */
334 arch_set_irn_register(curr_sp, arch_env->sp);
336 /* this modifies the stack bias, because we pushed 32bit */
339 /* move esp to ebp */
340 curr_bp = be_new_Copy(arch_env->bp->reg_class, bl, curr_sp);
341 be_set_constr_single_reg_out(curr_bp, 0, arch_env->bp,
342 arch_register_req_type_ignore);
344 /* beware: the copy must be done before any other sp use */
345 curr_sp = be_new_CopyKeep_single(arch_env->sp->reg_class, bl, curr_sp, curr_bp, get_irn_mode(curr_sp));
346 be_set_constr_single_reg_out(curr_sp, 0, arch_env->sp,
347 arch_register_req_type_produces_sp);
349 be_abi_reg_map_set(reg_map, arch_env->sp, curr_sp);
350 be_abi_reg_map_set(reg_map, arch_env->bp, curr_bp);
359 * Generate the routine epilogue.
360 * @param self The callback object.
361 * @param bl The block for the epilog
362 * @param mem A pointer to the mem node. Update this if you define new memory.
363 * @param reg_map A map mapping all callee_save/ignore/parameter registers to their defining nodes.
364 * @return The register which shall be used as a stack frame base.
366 * All nodes which define registers in @p reg_map must keep @p reg_map current.
368 static void ia32_abi_epilogue(void *self, ir_node *bl, ir_node **mem, pmap *reg_map)
370 ia32_abi_env_t *env = self;
371 const arch_env_t *arch_env = env->aenv;
372 ir_node *curr_sp = be_abi_reg_map_get(reg_map, arch_env->sp);
373 ir_node *curr_bp = be_abi_reg_map_get(reg_map, arch_env->bp);
375 if (env->flags.try_omit_fp) {
376 /* simply remove the stack frame here */
377 curr_sp = be_new_IncSP(arch_env->sp, bl, curr_sp, BE_STACK_FRAME_SIZE_SHRINK, 0);
379 ir_mode *mode_bp = arch_env->bp->reg_class->mode;
381 if (ia32_cg_config.use_leave) {
385 leave = new_bd_ia32_Leave(NULL, bl, curr_bp);
386 curr_bp = new_r_Proj(bl, leave, mode_bp, pn_ia32_Leave_frame);
387 curr_sp = new_r_Proj(bl, leave, get_irn_mode(curr_sp), pn_ia32_Leave_stack);
391 /* the old SP is not needed anymore (kill the proj) */
392 assert(is_Proj(curr_sp));
395 /* copy ebp to esp */
396 curr_sp = be_new_Copy(&ia32_reg_classes[CLASS_ia32_gp], bl, curr_bp);
397 arch_set_irn_register(curr_sp, arch_env->sp);
398 be_set_constr_single_reg_out(curr_sp, 0, arch_env->sp,
399 arch_register_req_type_ignore);
402 pop = new_bd_ia32_PopEbp(NULL, bl, *mem, curr_sp);
403 curr_bp = new_r_Proj(bl, pop, mode_bp, pn_ia32_Pop_res);
404 curr_sp = new_r_Proj(bl, pop, get_irn_mode(curr_sp), pn_ia32_Pop_stack);
406 *mem = new_r_Proj(bl, pop, mode_M, pn_ia32_Pop_M);
408 arch_set_irn_register(curr_sp, arch_env->sp);
409 arch_set_irn_register(curr_bp, arch_env->bp);
412 be_abi_reg_map_set(reg_map, arch_env->sp, curr_sp);
413 be_abi_reg_map_set(reg_map, arch_env->bp, curr_bp);
417 * Initialize the callback object.
418 * @param call The call object.
419 * @param aenv The architecture environment.
420 * @param irg The graph with the method.
421 * @return Some pointer. This pointer is passed to all other callback functions as self object.
423 static void *ia32_abi_init(const be_abi_call_t *call, const arch_env_t *aenv, ir_graph *irg)
425 ia32_abi_env_t *env = XMALLOC(ia32_abi_env_t);
426 be_abi_call_flags_t fl = be_abi_call_get_flags(call);
427 env->flags = fl.bits;
434 * Destroy the callback object.
435 * @param self The callback object.
437 static void ia32_abi_done(void *self)
443 * Build the between type and entities if not already build.
445 static void ia32_build_between_type(void)
447 #define IDENT(s) new_id_from_chars(s, sizeof(s)-1)
448 if (! between_type) {
449 ir_type *old_bp_type = new_type_primitive(IDENT("bp"), mode_Iu);
450 ir_type *ret_addr_type = new_type_primitive(IDENT("return_addr"), mode_Iu);
452 between_type = new_type_struct(IDENT("ia32_between_type"));
453 old_bp_ent = new_entity(between_type, IDENT("old_bp"), old_bp_type);
454 ret_addr_ent = new_entity(between_type, IDENT("ret_addr"), ret_addr_type);
456 set_entity_offset(old_bp_ent, 0);
457 set_entity_offset(ret_addr_ent, get_type_size_bytes(old_bp_type));
458 set_type_size_bytes(between_type, get_type_size_bytes(old_bp_type) + get_type_size_bytes(ret_addr_type));
459 set_type_state(between_type, layout_fixed);
461 omit_fp_between_type = new_type_struct(IDENT("ia32_between_type_omit_fp"));
462 omit_fp_ret_addr_ent = new_entity(omit_fp_between_type, IDENT("ret_addr"), ret_addr_type);
464 set_entity_offset(omit_fp_ret_addr_ent, 0);
465 set_type_size_bytes(omit_fp_between_type, get_type_size_bytes(ret_addr_type));
466 set_type_state(omit_fp_between_type, layout_fixed);
472 * Produces the type which sits between the stack args and the locals on the stack.
473 * it will contain the return address and space to store the old base pointer.
474 * @return The Firm type modeling the ABI between type.
476 static ir_type *ia32_abi_get_between_type(void *self)
478 ia32_abi_env_t *env = self;
480 ia32_build_between_type();
481 return env->flags.try_omit_fp ? omit_fp_between_type : between_type;
485 * Return the stack entity that contains the return address.
487 ir_entity *ia32_get_return_address_entity(void)
489 ia32_build_between_type();
490 return ia32_curr_fp_ommitted ? omit_fp_ret_addr_ent : ret_addr_ent;
494 * Return the stack entity that contains the frame address.
496 ir_entity *ia32_get_frame_address_entity(void)
498 ia32_build_between_type();
499 return ia32_curr_fp_ommitted ? NULL : old_bp_ent;
503 * Get the estimated cycle count for @p irn.
505 * @param self The this pointer.
506 * @param irn The node.
508 * @return The estimated cycle count for this operation
510 static int ia32_get_op_estimated_cost(const ir_node *irn)
513 ia32_op_type_t op_tp;
517 if (!is_ia32_irn(irn))
520 assert(is_ia32_irn(irn));
522 cost = get_ia32_latency(irn);
523 op_tp = get_ia32_op_type(irn);
525 if (is_ia32_CopyB(irn)) {
528 else if (is_ia32_CopyB_i(irn)) {
529 int size = get_ia32_copyb_size(irn);
530 cost = 20 + (int)ceil((4/3) * size);
532 /* in case of address mode operations add additional cycles */
533 else if (op_tp == ia32_AddrModeD || op_tp == ia32_AddrModeS) {
535 In case of stack access and access to fixed addresses add 5 cycles
536 (we assume they are in cache), other memory operations cost 20
539 if (is_ia32_use_frame(irn) || (
540 is_ia32_NoReg_GP(get_irn_n(irn, n_ia32_base)) &&
541 is_ia32_NoReg_GP(get_irn_n(irn, n_ia32_index))
553 * Returns the inverse operation if @p irn, recalculating the argument at position @p i.
555 * @param irn The original operation
556 * @param i Index of the argument we want the inverse operation to yield
557 * @param inverse struct to be filled with the resulting inverse op
558 * @param obstack The obstack to use for allocation of the returned nodes array
559 * @return The inverse operation or NULL if operation invertible
561 static arch_inverse_t *ia32_get_inverse(const ir_node *irn, int i, arch_inverse_t *inverse, struct obstack *obst)
565 ir_node *block, *noreg, *nomem;
568 /* we cannot invert non-ia32 irns */
569 if (! is_ia32_irn(irn))
572 /* operand must always be a real operand (not base, index or mem) */
573 if (i != n_ia32_binary_left && i != n_ia32_binary_right)
576 /* we don't invert address mode operations */
577 if (get_ia32_op_type(irn) != ia32_Normal)
580 /* TODO: adjust for new immediates... */
581 ir_fprintf(stderr, "TODO: fix get_inverse for new immediates (%+F)\n",
585 block = get_nodes_block(irn);
586 mode = get_irn_mode(irn);
587 irn_mode = get_irn_mode(irn);
588 noreg = get_irn_n(irn, 0);
590 dbg = get_irn_dbg_info(irn);
592 /* initialize structure */
593 inverse->nodes = obstack_alloc(obst, 2 * sizeof(inverse->nodes[0]));
597 switch (get_ia32_irn_opcode(irn)) {
600 if (get_ia32_immop_type(irn) == ia32_ImmConst) {
601 /* we have an add with a const here */
602 /* invers == add with negated const */
603 inverse->nodes[0] = new_bd_ia32_Add(dbg, block, noreg, noreg, nomem, get_irn_n(irn, i), noreg);
605 copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
606 set_ia32_Immop_tarval(inverse->nodes[0], tarval_neg(get_ia32_Immop_tarval(irn)));
607 set_ia32_commutative(inverse->nodes[0]);
609 else if (get_ia32_immop_type(irn) == ia32_ImmSymConst) {
610 /* we have an add with a symconst here */
611 /* invers == sub with const */
612 inverse->nodes[0] = new_bd_ia32_Sub(dbg, block, noreg, noreg, nomem, get_irn_n(irn, i), noreg);
614 copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
617 /* normal add: inverse == sub */
618 inverse->nodes[0] = new_bd_ia32_Sub(dbg, block, noreg, noreg, nomem, (ir_node*) irn, get_irn_n(irn, i ^ 1));
625 if (get_ia32_immop_type(irn) != ia32_ImmNone) {
626 /* we have a sub with a const/symconst here */
627 /* invers == add with this const */
628 inverse->nodes[0] = new_bd_ia32_Add(dbg, block, noreg, noreg, nomem, get_irn_n(irn, i), noreg);
629 inverse->costs += (get_ia32_immop_type(irn) == ia32_ImmSymConst) ? 5 : 1;
630 copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
634 if (i == n_ia32_binary_left) {
635 inverse->nodes[0] = new_bd_ia32_Add(dbg, block, noreg, noreg, nomem, (ir_node*) irn, get_irn_n(irn, 3));
638 inverse->nodes[0] = new_bd_ia32_Sub(dbg, block, noreg, noreg, nomem, get_irn_n(irn, n_ia32_binary_left), (ir_node*) irn);
646 if (get_ia32_immop_type(irn) != ia32_ImmNone) {
647 /* xor with const: inverse = xor */
648 inverse->nodes[0] = new_bd_ia32_Xor(dbg, block, noreg, noreg, nomem, get_irn_n(irn, i), noreg);
649 inverse->costs += (get_ia32_immop_type(irn) == ia32_ImmSymConst) ? 5 : 1;
650 copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
654 inverse->nodes[0] = new_bd_ia32_Xor(dbg, block, noreg, noreg, nomem, (ir_node *) irn, get_irn_n(irn, i));
660 inverse->nodes[0] = new_bd_ia32_Not(dbg, block, (ir_node*) irn);
665 inverse->nodes[0] = new_bd_ia32_Neg(dbg, block, (ir_node*) irn);
670 /* inverse operation not supported */
677 static ir_mode *get_spill_mode_mode(const ir_mode *mode)
679 if(mode_is_float(mode))
686 * Get the mode that should be used for spilling value node
688 static ir_mode *get_spill_mode(const ir_node *node)
690 ir_mode *mode = get_irn_mode(node);
691 return get_spill_mode_mode(mode);
695 * Checks whether an addressmode reload for a node with mode mode is compatible
696 * with a spillslot of mode spill_mode
698 static int ia32_is_spillmode_compatible(const ir_mode *mode, const ir_mode *spillmode)
700 return !mode_is_float(mode) || mode == spillmode;
704 * Check if irn can load its operand at position i from memory (source addressmode).
705 * @param irn The irn to be checked
706 * @param i The operands position
707 * @return Non-Zero if operand can be loaded
709 static int ia32_possible_memory_operand(const ir_node *irn, unsigned int i)
711 ir_node *op = get_irn_n(irn, i);
712 const ir_mode *mode = get_irn_mode(op);
713 const ir_mode *spillmode = get_spill_mode(op);
715 if (!is_ia32_irn(irn) || /* must be an ia32 irn */
716 get_ia32_op_type(irn) != ia32_Normal || /* must not already be a addressmode irn */
717 !ia32_is_spillmode_compatible(mode, spillmode) ||
718 is_ia32_use_frame(irn)) /* must not already use frame */
721 switch (get_ia32_am_support(irn)) {
726 if (i != n_ia32_unary_op)
732 case n_ia32_binary_left: {
733 const arch_register_req_t *req;
734 if (!is_ia32_commutative(irn))
737 /* we can't swap left/right for limited registers
738 * (As this (currently) breaks constraint handling copies)
740 req = get_ia32_in_req(irn, n_ia32_binary_left);
741 if (req->type & arch_register_req_type_limited)
746 case n_ia32_binary_right:
755 panic("Unknown AM type");
758 /* HACK: must not already use "real" memory.
759 * This can happen for Call and Div */
760 if (!is_NoMem(get_irn_n(irn, n_ia32_mem)))
766 static void ia32_perform_memory_operand(ir_node *irn, ir_node *spill,
770 ir_mode *dest_op_mode;
772 assert(ia32_possible_memory_operand(irn, i) && "Cannot perform memory operand change");
774 set_ia32_op_type(irn, ia32_AddrModeS);
776 load_mode = get_irn_mode(get_irn_n(irn, i));
777 dest_op_mode = get_ia32_ls_mode(irn);
778 if (get_mode_size_bits(load_mode) <= get_mode_size_bits(dest_op_mode)) {
779 set_ia32_ls_mode(irn, load_mode);
781 set_ia32_use_frame(irn);
782 set_ia32_need_stackent(irn);
784 if (i == n_ia32_binary_left &&
785 get_ia32_am_support(irn) == ia32_am_binary &&
786 /* immediates are only allowed on the right side */
787 !is_ia32_Immediate(get_irn_n(irn, n_ia32_binary_right))) {
788 ia32_swap_left_right(irn);
789 i = n_ia32_binary_right;
792 assert(is_NoMem(get_irn_n(irn, n_ia32_mem)));
794 set_irn_n(irn, n_ia32_base, get_irg_frame(get_irn_irg(irn)));
795 set_irn_n(irn, n_ia32_mem, spill);
796 set_irn_n(irn, i, ia32_get_admissible_noreg(ia32_current_cg, irn, i));
797 set_ia32_is_reload(irn);
800 static const be_abi_callbacks_t ia32_abi_callbacks = {
803 ia32_abi_get_between_type,
808 /* register allocator interface */
809 static const arch_irn_ops_t ia32_irn_ops = {
813 ia32_get_frame_entity,
814 ia32_set_frame_entity,
815 ia32_set_frame_offset,
818 ia32_get_op_estimated_cost,
819 ia32_possible_memory_operand,
820 ia32_perform_memory_operand,
823 /* special register allocator interface for SwitchJmp
824 as it possibly has a WIDE range of Proj numbers.
825 We don't want to allocate output for register constraints for
827 static const arch_irn_ops_t ia32_SwitchJmp_irn_ops = {
828 /* Note: we also use SwitchJmp_out_req for the inputs too:
829 This is because the bearch API has a conceptual problem at the moment.
830 Querying for negative proj numbers which can happen for switchs
831 isn't possible and will result in inputs getting queried */
832 get_ia32_SwitchJmp_out_req,
833 get_ia32_SwitchJmp_out_req,
835 ia32_get_frame_entity,
836 ia32_set_frame_entity,
837 ia32_set_frame_offset,
840 ia32_get_op_estimated_cost,
841 ia32_possible_memory_operand,
842 ia32_perform_memory_operand,
845 /**************************************************
848 * ___ ___ __| | ___ __ _ ___ _ __ _| |_
849 * / __/ _ \ / _` |/ _ \/ _` |/ _ \ '_ \ | | _|
850 * | (_| (_) | (_| | __/ (_| | __/ | | | | | |
851 * \___\___/ \__,_|\___|\__, |\___|_| |_| |_|_|
854 **************************************************/
856 static ir_entity *mcount = NULL;
858 #define ID(s) new_id_from_chars(s, sizeof(s) - 1)
860 static void ia32_before_abi(void *self)
862 lower_mode_b_config_t lower_mode_b_config = {
863 mode_Iu, /* lowered mode */
864 mode_Bu, /* preferred mode for set */
865 0, /* don't lower direct compares */
867 ia32_code_gen_t *cg = self;
869 ir_lower_mode_b(cg->irg, &lower_mode_b_config);
871 be_dump(cg->irg, "-lower_modeb", dump_ir_block_graph_sched);
873 if (mcount == NULL) {
874 ir_type *tp = new_type_method(ID("FKT.mcount"), 0, 0);
875 mcount = new_entity(get_glob_type(), ID("mcount"), tp);
876 /* FIXME: enter the right ld_ident here */
877 set_entity_ld_ident(mcount, get_entity_ident(mcount));
878 set_entity_visibility(mcount, visibility_external_allocated);
880 instrument_initcall(cg->irg, mcount);
885 * Transforms the standard firm graph into
888 static void ia32_prepare_graph(void *self)
890 ia32_code_gen_t *cg = self;
891 ir_graph *irg = cg->irg;
893 switch (be_transformer) {
894 case TRANSFORMER_DEFAULT:
895 /* transform remaining nodes into assembler instructions */
896 ia32_transform_graph(cg);
900 case TRANSFORMER_PBQP:
901 case TRANSFORMER_RAND:
902 /* transform nodes into assembler instructions by PBQP magic */
903 ia32_transform_graph_by_pbqp(cg);
908 panic("invalid transformer");
911 /* do local optimizations (mainly CSE) */
912 optimize_graph_df(cg->irg);
915 be_dump(cg->irg, "-transformed", dump_ir_block_graph_sched);
917 /* optimize address mode */
918 ia32_optimize_graph(cg);
920 /* do code placement, to optimize the position of constants */
924 be_dump(cg->irg, "-place", dump_ir_block_graph_sched);
927 ir_node *turn_back_am(ir_node *node)
929 dbg_info *dbgi = get_irn_dbg_info(node);
930 ir_node *block = get_nodes_block(node);
931 ir_node *base = get_irn_n(node, n_ia32_base);
932 ir_node *index = get_irn_n(node, n_ia32_index);
933 ir_node *mem = get_irn_n(node, n_ia32_mem);
936 ir_node *load = new_bd_ia32_Load(dbgi, block, base, index, mem);
937 ir_node *load_res = new_rd_Proj(dbgi, block, load, mode_Iu, pn_ia32_Load_res);
939 ia32_copy_am_attrs(load, node);
940 if (is_ia32_is_reload(node))
941 set_ia32_is_reload(load);
942 set_irn_n(node, n_ia32_mem, new_NoMem());
944 switch (get_ia32_am_support(node)) {
946 set_irn_n(node, n_ia32_unary_op, load_res);
950 if (is_ia32_Immediate(get_irn_n(node, n_ia32_binary_right))) {
951 set_irn_n(node, n_ia32_binary_left, load_res);
953 set_irn_n(node, n_ia32_binary_right, load_res);
958 panic("Unknown AM type");
960 noreg = ia32_new_NoReg_gp(ia32_current_cg);
961 set_irn_n(node, n_ia32_base, noreg);
962 set_irn_n(node, n_ia32_index, noreg);
963 set_ia32_am_offs_int(node, 0);
964 set_ia32_am_sc(node, NULL);
965 set_ia32_am_scale(node, 0);
966 clear_ia32_am_sc_sign(node);
968 /* rewire mem-proj */
969 if (get_irn_mode(node) == mode_T) {
970 const ir_edge_t *edge;
971 foreach_out_edge(node, edge) {
972 ir_node *out = get_edge_src_irn(edge);
973 if (get_irn_mode(out) == mode_M) {
974 set_Proj_pred(out, load);
975 set_Proj_proj(out, pn_ia32_Load_M);
981 set_ia32_op_type(node, ia32_Normal);
982 if (sched_is_scheduled(node))
983 sched_add_before(node, load);
988 static ir_node *flags_remat(ir_node *node, ir_node *after)
990 /* we should turn back source address mode when rematerializing nodes */
995 if (is_Block(after)) {
998 block = get_nodes_block(after);
1001 type = get_ia32_op_type(node);
1003 case ia32_AddrModeS:
1007 case ia32_AddrModeD:
1008 /* TODO implement this later... */
1009 panic("found DestAM with flag user %+F this should not happen", node);
1012 default: assert(type == ia32_Normal); break;
1015 copy = exact_copy(node);
1016 set_nodes_block(copy, block);
1017 sched_add_after(after, copy);
1023 * Called before the register allocator.
1025 static void ia32_before_ra(void *self)
1027 ia32_code_gen_t *cg = self;
1029 /* setup fpu rounding modes */
1030 ia32_setup_fpu_mode(cg);
1033 be_sched_fix_flags(cg->birg, &ia32_reg_classes[CLASS_ia32_flags],
1036 ia32_add_missing_keeps(cg);
1041 * Transforms a be_Reload into a ia32 Load.
1043 static void transform_to_Load(ia32_code_gen_t *cg, ir_node *node)
1045 ir_graph *irg = get_irn_irg(node);
1046 dbg_info *dbg = get_irn_dbg_info(node);
1047 ir_node *block = get_nodes_block(node);
1048 ir_entity *ent = be_get_frame_entity(node);
1049 ir_mode *mode = get_irn_mode(node);
1050 ir_mode *spillmode = get_spill_mode(node);
1051 ir_node *noreg = ia32_new_NoReg_gp(cg);
1052 ir_node *sched_point = NULL;
1053 ir_node *ptr = get_irg_frame(irg);
1054 ir_node *mem = get_irn_n(node, be_pos_Reload_mem);
1055 ir_node *new_op, *proj;
1056 const arch_register_t *reg;
1058 if (sched_is_scheduled(node)) {
1059 sched_point = sched_prev(node);
1062 if (mode_is_float(spillmode)) {
1063 if (ia32_cg_config.use_sse2)
1064 new_op = new_bd_ia32_xLoad(dbg, block, ptr, noreg, mem, spillmode);
1066 new_op = new_bd_ia32_vfld(dbg, block, ptr, noreg, mem, spillmode);
1068 else if (get_mode_size_bits(spillmode) == 128) {
1069 /* Reload 128 bit SSE registers */
1070 new_op = new_bd_ia32_xxLoad(dbg, block, ptr, noreg, mem);
1073 new_op = new_bd_ia32_Load(dbg, block, ptr, noreg, mem);
1075 set_ia32_op_type(new_op, ia32_AddrModeS);
1076 set_ia32_ls_mode(new_op, spillmode);
1077 set_ia32_frame_ent(new_op, ent);
1078 set_ia32_use_frame(new_op);
1079 set_ia32_is_reload(new_op);
1081 DBG_OPT_RELOAD2LD(node, new_op);
1083 proj = new_rd_Proj(dbg, block, new_op, mode, pn_ia32_Load_res);
1086 sched_add_after(sched_point, new_op);
1090 /* copy the register from the old node to the new Load */
1091 reg = arch_get_irn_register(node);
1092 arch_set_irn_register(proj, reg);
1094 SET_IA32_ORIG_NODE(new_op, node);
1096 exchange(node, proj);
1100 * Transforms a be_Spill node into a ia32 Store.
1102 static void transform_to_Store(ia32_code_gen_t *cg, ir_node *node)
1104 ir_graph *irg = get_irn_irg(node);
1105 dbg_info *dbg = get_irn_dbg_info(node);
1106 ir_node *block = get_nodes_block(node);
1107 ir_entity *ent = be_get_frame_entity(node);
1108 const ir_node *spillval = get_irn_n(node, be_pos_Spill_val);
1109 ir_mode *mode = get_spill_mode(spillval);
1110 ir_node *noreg = ia32_new_NoReg_gp(cg);
1111 ir_node *nomem = new_NoMem();
1112 ir_node *ptr = get_irg_frame(irg);
1113 ir_node *val = get_irn_n(node, be_pos_Spill_val);
1115 ir_node *sched_point = NULL;
1117 if (sched_is_scheduled(node)) {
1118 sched_point = sched_prev(node);
1121 /* No need to spill unknown values... */
1122 if(is_ia32_Unknown_GP(val) ||
1123 is_ia32_Unknown_VFP(val) ||
1124 is_ia32_Unknown_XMM(val)) {
1129 exchange(node, store);
1133 if (mode_is_float(mode)) {
1134 if (ia32_cg_config.use_sse2)
1135 store = new_bd_ia32_xStore(dbg, block, ptr, noreg, nomem, val);
1137 store = new_bd_ia32_vfst(dbg, block, ptr, noreg, nomem, val, mode);
1138 } else if (get_mode_size_bits(mode) == 128) {
1139 /* Spill 128 bit SSE registers */
1140 store = new_bd_ia32_xxStore(dbg, block, ptr, noreg, nomem, val);
1141 } else if (get_mode_size_bits(mode) == 8) {
1142 store = new_bd_ia32_Store8Bit(dbg, block, ptr, noreg, nomem, val);
1144 store = new_bd_ia32_Store(dbg, block, ptr, noreg, nomem, val);
1147 set_ia32_op_type(store, ia32_AddrModeD);
1148 set_ia32_ls_mode(store, mode);
1149 set_ia32_frame_ent(store, ent);
1150 set_ia32_use_frame(store);
1151 set_ia32_is_spill(store);
1152 SET_IA32_ORIG_NODE(store, node);
1153 DBG_OPT_SPILL2ST(node, store);
1156 sched_add_after(sched_point, store);
1160 exchange(node, store);
1163 static ir_node *create_push(ia32_code_gen_t *cg, ir_node *node, ir_node *schedpoint, ir_node *sp, ir_node *mem, ir_entity *ent)
1165 dbg_info *dbg = get_irn_dbg_info(node);
1166 ir_node *block = get_nodes_block(node);
1167 ir_node *noreg = ia32_new_NoReg_gp(cg);
1168 ir_graph *irg = get_irn_irg(node);
1169 ir_node *frame = get_irg_frame(irg);
1171 ir_node *push = new_bd_ia32_Push(dbg, block, frame, noreg, mem, noreg, sp);
1173 set_ia32_frame_ent(push, ent);
1174 set_ia32_use_frame(push);
1175 set_ia32_op_type(push, ia32_AddrModeS);
1176 set_ia32_ls_mode(push, mode_Is);
1177 set_ia32_is_spill(push);
1179 sched_add_before(schedpoint, push);
1183 static ir_node *create_pop(ia32_code_gen_t *cg, ir_node *node, ir_node *schedpoint, ir_node *sp, ir_entity *ent)
1185 dbg_info *dbg = get_irn_dbg_info(node);
1186 ir_node *block = get_nodes_block(node);
1187 ir_node *noreg = ia32_new_NoReg_gp(cg);
1188 ir_graph *irg = get_irn_irg(node);
1189 ir_node *frame = get_irg_frame(irg);
1191 ir_node *pop = new_bd_ia32_PopMem(dbg, block, frame, noreg, new_NoMem(), sp);
1193 set_ia32_frame_ent(pop, ent);
1194 set_ia32_use_frame(pop);
1195 set_ia32_op_type(pop, ia32_AddrModeD);
1196 set_ia32_ls_mode(pop, mode_Is);
1197 set_ia32_is_reload(pop);
1199 sched_add_before(schedpoint, pop);
1204 static ir_node* create_spproj(ir_node *node, ir_node *pred, int pos)
1206 dbg_info *dbg = get_irn_dbg_info(node);
1207 ir_node *block = get_nodes_block(node);
1208 ir_mode *spmode = mode_Iu;
1209 const arch_register_t *spreg = &ia32_gp_regs[REG_ESP];
1212 sp = new_rd_Proj(dbg, block, pred, spmode, pos);
1213 arch_set_irn_register(sp, spreg);
1219 * Transform MemPerm, currently we do this the ugly way and produce
1220 * push/pop into/from memory cascades. This is possible without using
1223 static void transform_MemPerm(ia32_code_gen_t *cg, ir_node *node)
1225 ir_node *block = get_nodes_block(node);
1226 ir_node *sp = be_abi_get_ignore_irn(cg->birg->abi, &ia32_gp_regs[REG_ESP]);
1227 int arity = be_get_MemPerm_entity_arity(node);
1228 ir_node **pops = ALLOCAN(ir_node*, arity);
1232 const ir_edge_t *edge;
1233 const ir_edge_t *next;
1236 for(i = 0; i < arity; ++i) {
1237 ir_entity *inent = be_get_MemPerm_in_entity(node, i);
1238 ir_entity *outent = be_get_MemPerm_out_entity(node, i);
1239 ir_type *enttype = get_entity_type(inent);
1240 unsigned entsize = get_type_size_bytes(enttype);
1241 unsigned entsize2 = get_type_size_bytes(get_entity_type(outent));
1242 ir_node *mem = get_irn_n(node, i + 1);
1245 /* work around cases where entities have different sizes */
1246 if(entsize2 < entsize)
1248 assert( (entsize == 4 || entsize == 8) && "spillslot on x86 should be 32 or 64 bit");
1250 push = create_push(cg, node, node, sp, mem, inent);
1251 sp = create_spproj(node, push, pn_ia32_Push_stack);
1253 /* add another push after the first one */
1254 push = create_push(cg, node, node, sp, mem, inent);
1255 add_ia32_am_offs_int(push, 4);
1256 sp = create_spproj(node, push, pn_ia32_Push_stack);
1259 set_irn_n(node, i, new_Bad());
1263 for(i = arity - 1; i >= 0; --i) {
1264 ir_entity *inent = be_get_MemPerm_in_entity(node, i);
1265 ir_entity *outent = be_get_MemPerm_out_entity(node, i);
1266 ir_type *enttype = get_entity_type(outent);
1267 unsigned entsize = get_type_size_bytes(enttype);
1268 unsigned entsize2 = get_type_size_bytes(get_entity_type(inent));
1271 /* work around cases where entities have different sizes */
1272 if(entsize2 < entsize)
1274 assert( (entsize == 4 || entsize == 8) && "spillslot on x86 should be 32 or 64 bit");
1276 pop = create_pop(cg, node, node, sp, outent);
1277 sp = create_spproj(node, pop, pn_ia32_Pop_stack);
1279 add_ia32_am_offs_int(pop, 4);
1281 /* add another pop after the first one */
1282 pop = create_pop(cg, node, node, sp, outent);
1283 sp = create_spproj(node, pop, pn_ia32_Pop_stack);
1290 keep = be_new_Keep(&ia32_reg_classes[CLASS_ia32_gp], block, 1, in);
1291 sched_add_before(node, keep);
1293 /* exchange memprojs */
1294 foreach_out_edge_safe(node, edge, next) {
1295 ir_node *proj = get_edge_src_irn(edge);
1296 int p = get_Proj_proj(proj);
1300 set_Proj_pred(proj, pops[p]);
1301 set_Proj_proj(proj, pn_ia32_Pop_M);
1304 /* remove memperm */
1305 arity = get_irn_arity(node);
1306 for(i = 0; i < arity; ++i) {
1307 set_irn_n(node, i, new_Bad());
1313 * Block-Walker: Calls the transform functions Spill and Reload.
1315 static void ia32_after_ra_walker(ir_node *block, void *env)
1317 ir_node *node, *prev;
1318 ia32_code_gen_t *cg = env;
1320 /* beware: the schedule is changed here */
1321 for (node = sched_last(block); !sched_is_begin(node); node = prev) {
1322 prev = sched_prev(node);
1324 if (be_is_Reload(node)) {
1325 transform_to_Load(cg, node);
1326 } else if (be_is_Spill(node)) {
1327 transform_to_Store(cg, node);
1328 } else if (be_is_MemPerm(node)) {
1329 transform_MemPerm(cg, node);
1335 * Collects nodes that need frame entities assigned.
1337 static void ia32_collect_frame_entity_nodes(ir_node *node, void *data)
1339 be_fec_env_t *env = data;
1340 const ir_mode *mode;
1343 if (be_is_Reload(node) && be_get_frame_entity(node) == NULL) {
1344 mode = get_spill_mode_mode(get_irn_mode(node));
1345 align = get_mode_size_bytes(mode);
1346 } else if (is_ia32_irn(node) &&
1347 get_ia32_frame_ent(node) == NULL &&
1348 is_ia32_use_frame(node)) {
1349 if (is_ia32_need_stackent(node))
1352 switch (get_ia32_irn_opcode(node)) {
1354 case iro_ia32_Load: {
1355 const ia32_attr_t *attr = get_ia32_attr_const(node);
1357 if (attr->data.need_32bit_stackent) {
1359 } else if (attr->data.need_64bit_stackent) {
1362 mode = get_ia32_ls_mode(node);
1363 if (is_ia32_is_reload(node))
1364 mode = get_spill_mode_mode(mode);
1366 align = get_mode_size_bytes(mode);
1370 case iro_ia32_vfild:
1372 case iro_ia32_xLoad: {
1373 mode = get_ia32_ls_mode(node);
1378 case iro_ia32_FldCW: {
1379 /* although 2 byte would be enough 4 byte performs best */
1387 panic("unexpected frame user while collection frame entity nodes");
1389 case iro_ia32_FnstCW:
1390 case iro_ia32_Store8Bit:
1391 case iro_ia32_Store:
1394 case iro_ia32_vfist:
1395 case iro_ia32_vfisttp:
1397 case iro_ia32_xStore:
1398 case iro_ia32_xStoreSimple:
1405 be_node_needs_frame_entity(env, node, mode, align);
1409 * We transform Spill and Reload here. This needs to be done before
1410 * stack biasing otherwise we would miss the corrected offset for these nodes.
1412 static void ia32_after_ra(void *self)
1414 ia32_code_gen_t *cg = self;
1415 ir_graph *irg = cg->irg;
1416 be_fec_env_t *fec_env = be_new_frame_entity_coalescer(cg->birg);
1418 /* create and coalesce frame entities */
1419 irg_walk_graph(irg, NULL, ia32_collect_frame_entity_nodes, fec_env);
1420 be_assign_entities(fec_env);
1421 be_free_frame_entity_coalescer(fec_env);
1423 irg_block_walk_graph(irg, NULL, ia32_after_ra_walker, cg);
1427 * Last touchups for the graph before emit: x87 simulation to replace the
1428 * virtual with real x87 instructions, creating a block schedule and peephole
1431 static void ia32_finish(void *self)
1433 ia32_code_gen_t *cg = self;
1434 ir_graph *irg = cg->irg;
1436 ia32_finish_irg(irg, cg);
1438 /* we might have to rewrite x87 virtual registers */
1439 if (cg->do_x87_sim) {
1440 x87_simulate_graph(cg->birg);
1443 /* do peephole optimisations */
1444 ia32_peephole_optimization(cg);
1446 /* create block schedule, this also removes empty blocks which might
1447 * produce critical edges */
1448 cg->blk_sched = be_create_block_schedule(irg, cg->birg->exec_freq);
1452 * Emits the code, closes the output file and frees
1453 * the code generator interface.
1455 static void ia32_codegen(void *self)
1457 ia32_code_gen_t *cg = self;
1458 ir_graph *irg = cg->irg;
1460 ia32_gen_routine(cg, irg);
1464 /* remove it from the isa */
1467 assert(ia32_current_cg == cg);
1468 ia32_current_cg = NULL;
1470 /* de-allocate code generator */
1471 del_set(cg->reg_set);
1476 * Returns the node representing the PIC base.
1478 static ir_node *ia32_get_pic_base(void *self)
1481 ia32_code_gen_t *cg = self;
1482 ir_node *get_eip = cg->get_eip;
1483 if (get_eip != NULL)
1486 block = get_irg_start_block(cg->irg);
1487 get_eip = new_bd_ia32_GetEIP(NULL, block);
1488 cg->get_eip = get_eip;
1490 be_dep_on_frame(get_eip);
1494 static void *ia32_cg_init(be_irg_t *birg);
1496 static const arch_code_generator_if_t ia32_code_gen_if = {
1498 ia32_get_pic_base, /* return node used as base in pic code addresses */
1499 ia32_before_abi, /* before abi introduce hook */
1502 ia32_before_ra, /* before register allocation hook */
1503 ia32_after_ra, /* after register allocation hook */
1504 ia32_finish, /* called before codegen */
1505 ia32_codegen /* emit && done */
1509 * Initializes a IA32 code generator.
1511 static void *ia32_cg_init(be_irg_t *birg)
1513 ia32_isa_t *isa = (ia32_isa_t *)birg->main_env->arch_env;
1514 ia32_code_gen_t *cg = XMALLOCZ(ia32_code_gen_t);
1516 cg->impl = &ia32_code_gen_if;
1517 cg->irg = birg->irg;
1518 cg->reg_set = new_set(ia32_cmp_irn_reg_assoc, 1024);
1521 cg->blk_sched = NULL;
1522 cg->dump = (birg->main_env->options->dump_flags & DUMP_BE) ? 1 : 0;
1523 cg->gprof = (birg->main_env->options->gprof) ? 1 : 0;
1526 /* Linux gprof implementation needs base pointer */
1527 birg->main_env->options->omit_fp = 0;
1534 if (isa->name_obst) {
1535 obstack_free(isa->name_obst, NULL);
1536 obstack_init(isa->name_obst);
1540 cur_reg_set = cg->reg_set;
1542 assert(ia32_current_cg == NULL);
1543 ia32_current_cg = cg;
1545 return (arch_code_generator_t *)cg;
1550 /*****************************************************************
1551 * ____ _ _ _____ _____
1552 * | _ \ | | | | |_ _|/ ____| /\
1553 * | |_) | __ _ ___| | _____ _ __ __| | | | | (___ / \
1554 * | _ < / _` |/ __| |/ / _ \ '_ \ / _` | | | \___ \ / /\ \
1555 * | |_) | (_| | (__| < __/ | | | (_| | _| |_ ____) / ____ \
1556 * |____/ \__,_|\___|_|\_\___|_| |_|\__,_| |_____|_____/_/ \_\
1558 *****************************************************************/
1561 * Set output modes for GCC
1563 static const tarval_mode_info mo_integer = {
1570 * set the tarval output mode of all integer modes to decimal
1572 static void set_tarval_output_modes(void)
1576 for (i = get_irp_n_modes() - 1; i >= 0; --i) {
1577 ir_mode *mode = get_irp_mode(i);
1579 if (mode_is_int(mode))
1580 set_tarval_mode_output_option(mode, &mo_integer);
1584 const arch_isa_if_t ia32_isa_if;
1587 * The template that generates a new ISA object.
1588 * Note that this template can be changed by command line
1591 static ia32_isa_t ia32_isa_template = {
1593 &ia32_isa_if, /* isa interface implementation */
1594 &ia32_gp_regs[REG_ESP], /* stack pointer register */
1595 &ia32_gp_regs[REG_EBP], /* base pointer register */
1596 &ia32_reg_classes[CLASS_ia32_gp], /* static link pointer register class */
1597 -1, /* stack direction */
1598 2, /* power of two stack alignment, 2^2 == 4 */
1599 NULL, /* main environment */
1600 7, /* costs for a spill instruction */
1601 5, /* costs for a reload instruction */
1603 NULL, /* 16bit register names */
1604 NULL, /* 8bit register names */
1605 NULL, /* 8bit register names high */
1608 NULL, /* current code generator */
1609 NULL, /* abstract machine */
1611 NULL, /* name obstack */
1615 static void init_asm_constraints(void)
1617 be_init_default_asm_constraint_flags();
1619 asm_constraint_flags['a'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1620 asm_constraint_flags['b'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1621 asm_constraint_flags['c'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1622 asm_constraint_flags['d'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1623 asm_constraint_flags['D'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1624 asm_constraint_flags['S'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1625 asm_constraint_flags['Q'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1626 asm_constraint_flags['q'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1627 asm_constraint_flags['A'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1628 asm_constraint_flags['l'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1629 asm_constraint_flags['R'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1630 asm_constraint_flags['r'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1631 asm_constraint_flags['p'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1632 asm_constraint_flags['f'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1633 asm_constraint_flags['t'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1634 asm_constraint_flags['u'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1635 asm_constraint_flags['Y'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1636 asm_constraint_flags['X'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1637 asm_constraint_flags['n'] = ASM_CONSTRAINT_FLAG_SUPPORTS_IMMEDIATE;
1638 asm_constraint_flags['g'] = ASM_CONSTRAINT_FLAG_SUPPORTS_IMMEDIATE;
1640 /* no support for autodecrement/autoincrement */
1641 asm_constraint_flags['<'] = ASM_CONSTRAINT_FLAG_NO_SUPPORT;
1642 asm_constraint_flags['>'] = ASM_CONSTRAINT_FLAG_NO_SUPPORT;
1643 /* no float consts */
1644 asm_constraint_flags['E'] = ASM_CONSTRAINT_FLAG_NO_SUPPORT;
1645 asm_constraint_flags['F'] = ASM_CONSTRAINT_FLAG_NO_SUPPORT;
1646 /* makes no sense on x86 */
1647 asm_constraint_flags['s'] = ASM_CONSTRAINT_FLAG_NO_SUPPORT;
1648 /* no support for sse consts yet */
1649 asm_constraint_flags['C'] = ASM_CONSTRAINT_FLAG_NO_SUPPORT;
1650 /* no support for x87 consts yet */
1651 asm_constraint_flags['G'] = ASM_CONSTRAINT_FLAG_NO_SUPPORT;
1652 /* no support for mmx registers yet */
1653 asm_constraint_flags['y'] = ASM_CONSTRAINT_FLAG_NO_SUPPORT;
1654 /* not available in 32bit mode */
1655 asm_constraint_flags['Z'] = ASM_CONSTRAINT_FLAG_NO_SUPPORT;
1656 asm_constraint_flags['e'] = ASM_CONSTRAINT_FLAG_NO_SUPPORT;
1658 /* no code yet to determine register class needed... */
1659 asm_constraint_flags['X'] = ASM_CONSTRAINT_FLAG_NO_SUPPORT;
1663 * Initializes the backend ISA.
1665 static arch_env_t *ia32_init(FILE *file_handle)
1667 static int inited = 0;
1675 set_tarval_output_modes();
1677 isa = XMALLOC(ia32_isa_t);
1678 memcpy(isa, &ia32_isa_template, sizeof(*isa));
1680 if(mode_fpcw == NULL) {
1681 mode_fpcw = new_ir_mode("Fpcw", irms_int_number, 16, 0, irma_none, 0);
1684 ia32_register_init();
1685 ia32_create_opcodes(&ia32_irn_ops);
1686 /* special handling for SwitchJmp */
1687 op_ia32_SwitchJmp->ops.be_ops = &ia32_SwitchJmp_irn_ops;
1689 be_emit_init(file_handle);
1690 isa->regs_16bit = pmap_create();
1691 isa->regs_8bit = pmap_create();
1692 isa->regs_8bit_high = pmap_create();
1693 isa->types = pmap_create();
1694 isa->tv_ent = pmap_create();
1695 isa->cpu = ia32_init_machine_description();
1697 ia32_build_16bit_reg_map(isa->regs_16bit);
1698 ia32_build_8bit_reg_map(isa->regs_8bit);
1699 ia32_build_8bit_reg_map_high(isa->regs_8bit_high);
1702 isa->name_obst = XMALLOC(struct obstack);
1703 obstack_init(isa->name_obst);
1706 /* enter the ISA object into the intrinsic environment */
1707 intrinsic_env.isa = isa;
1709 /* emit asm includes */
1710 n = get_irp_n_asms();
1711 for (i = 0; i < n; ++i) {
1712 be_emit_cstring("#APP\n");
1713 be_emit_ident(get_irp_asm(i));
1714 be_emit_cstring("\n#NO_APP\n");
1717 /* needed for the debug support */
1718 be_gas_emit_switch_section(GAS_SECTION_TEXT);
1719 be_emit_cstring(".Ltext0:\n");
1720 be_emit_write_line();
1722 /* we mark referenced global entities, so we can only emit those which
1723 * are actually referenced. (Note: you mustn't use the type visited flag
1724 * elsewhere in the backend)
1726 inc_master_type_visited();
1728 return &isa->arch_env;
1734 * Closes the output file and frees the ISA structure.
1736 static void ia32_done(void *self)
1738 ia32_isa_t *isa = self;
1740 /* emit now all global declarations */
1741 be_gas_emit_decls(isa->arch_env.main_env, 1);
1743 pmap_destroy(isa->regs_16bit);
1744 pmap_destroy(isa->regs_8bit);
1745 pmap_destroy(isa->regs_8bit_high);
1746 pmap_destroy(isa->tv_ent);
1747 pmap_destroy(isa->types);
1750 obstack_free(isa->name_obst, NULL);
1760 * Return the number of register classes for this architecture.
1761 * We report always these:
1762 * - the general purpose registers
1763 * - the SSE floating point register set
1764 * - the virtual floating point registers
1765 * - the SSE vector register set
1767 static unsigned ia32_get_n_reg_class(const void *self)
1774 * Return the register class for index i.
1776 static const arch_register_class_t *ia32_get_reg_class(const void *self,
1780 assert(i < N_CLASSES);
1781 return &ia32_reg_classes[i];
1785 * Get the register class which shall be used to store a value of a given mode.
1786 * @param self The this pointer.
1787 * @param mode The mode in question.
1788 * @return A register class which can hold values of the given mode.
1790 const arch_register_class_t *ia32_get_reg_class_for_mode(const void *self,
1791 const ir_mode *mode)
1795 if (mode_is_float(mode)) {
1796 return ia32_cg_config.use_sse2 ? &ia32_reg_classes[CLASS_ia32_xmm] : &ia32_reg_classes[CLASS_ia32_vfp];
1799 return &ia32_reg_classes[CLASS_ia32_gp];
1803 * Get the ABI restrictions for procedure calls.
1804 * @param self The this pointer.
1805 * @param method_type The type of the method (procedure) in question.
1806 * @param abi The abi object to be modified
1808 static void ia32_get_call_abi(const void *self, ir_type *method_type,
1816 be_abi_call_flags_t call_flags = be_abi_call_get_flags(abi);
1820 /* set abi flags for calls */
1821 call_flags.bits.left_to_right = 0; /* always last arg first on stack */
1822 call_flags.bits.store_args_sequential = 0;
1823 /* call_flags.bits.try_omit_fp not changed: can handle both settings */
1824 call_flags.bits.fp_free = 0; /* the frame pointer is fixed in IA32 */
1825 call_flags.bits.call_has_imm = 0; /* No call immediate, we handle this by ourselves */
1827 /* set parameter passing style */
1828 be_abi_call_set_flags(abi, call_flags, &ia32_abi_callbacks);
1830 cc = get_method_calling_convention(method_type);
1831 if (get_method_variadicity(method_type) == variadicity_variadic) {
1832 /* pass all parameters of a variadic function on the stack */
1833 cc = cc_cdecl_set | (cc & cc_this_call);
1835 if (get_method_additional_properties(method_type) & mtp_property_private &&
1836 ia32_cg_config.optimize_cc) {
1837 /* set the fast calling conventions (allowing up to 3) */
1838 cc = SET_FASTCALL(cc) | 3;
1842 /* we have to pop the shadow parameter ourself for compound calls */
1843 if ( (get_method_calling_convention(method_type) & cc_compound_ret)
1844 && !(cc & cc_reg_param)) {
1845 pop_amount += get_mode_size_bytes(mode_P_data);
1848 n = get_method_n_params(method_type);
1849 for (i = regnum = 0; i < n; i++) {
1851 const arch_register_t *reg = NULL;
1853 tp = get_method_param_type(method_type, i);
1854 mode = get_type_mode(tp);
1856 reg = ia32_get_RegParam_reg(cc, regnum, mode);
1859 be_abi_call_param_reg(abi, i, reg);
1862 /* Micro optimisation: if the mode is shorter than 4 bytes, load 4 bytes.
1863 * movl has a shorter opcode than mov[sz][bw]l */
1864 ir_mode *load_mode = mode;
1867 unsigned size = get_mode_size_bytes(mode);
1869 if (cc & cc_callee_clear_stk) {
1870 pop_amount += (size + 3U) & ~3U;
1873 if (size < 4) load_mode = mode_Iu;
1876 be_abi_call_param_stack(abi, i, load_mode, 4, 0, 0);
1880 be_abi_call_set_pop(abi, pop_amount);
1882 /* set return registers */
1883 n = get_method_n_ress(method_type);
1885 assert(n <= 2 && "more than two results not supported");
1887 /* In case of 64bit returns, we will have two 32bit values */
1889 tp = get_method_res_type(method_type, 0);
1890 mode = get_type_mode(tp);
1892 assert(!mode_is_float(mode) && "two FP results not supported");
1894 tp = get_method_res_type(method_type, 1);
1895 mode = get_type_mode(tp);
1897 assert(!mode_is_float(mode) && "mixed INT, FP results not supported");
1899 be_abi_call_res_reg(abi, 0, &ia32_gp_regs[REG_EAX]);
1900 be_abi_call_res_reg(abi, 1, &ia32_gp_regs[REG_EDX]);
1903 const arch_register_t *reg;
1905 tp = get_method_res_type(method_type, 0);
1906 assert(is_atomic_type(tp));
1907 mode = get_type_mode(tp);
1909 reg = mode_is_float(mode) ? &ia32_vfp_regs[REG_VF0] : &ia32_gp_regs[REG_EAX];
1911 be_abi_call_res_reg(abi, 0, reg);
1915 int ia32_to_appear_in_schedule(void *block_env, const ir_node *irn)
1919 if(!is_ia32_irn(irn)) {
1923 if(is_ia32_NoReg_GP(irn) || is_ia32_NoReg_VFP(irn) || is_ia32_NoReg_XMM(irn)
1924 || is_ia32_Unknown_GP(irn) || is_ia32_Unknown_XMM(irn)
1925 || is_ia32_Unknown_VFP(irn) || is_ia32_ChangeCW(irn)
1926 || is_ia32_Immediate(irn))
1933 * Initializes the code generator interface.
1935 static const arch_code_generator_if_t *ia32_get_code_generator_if(void *self)
1938 return &ia32_code_gen_if;
1942 * Returns the estimated execution time of an ia32 irn.
1944 static sched_timestep_t ia32_sched_exectime(void *env, const ir_node *irn)
1947 return is_ia32_irn(irn) ? ia32_get_op_estimated_cost(irn) : 1;
1950 list_sched_selector_t ia32_sched_selector;
1953 * Returns the reg_pressure scheduler with to_appear_in_schedule() overloaded
1955 static const list_sched_selector_t *ia32_get_list_sched_selector(
1956 const void *self, list_sched_selector_t *selector)
1959 memcpy(&ia32_sched_selector, selector, sizeof(ia32_sched_selector));
1960 ia32_sched_selector.exectime = ia32_sched_exectime;
1961 ia32_sched_selector.to_appear_in_schedule = ia32_to_appear_in_schedule;
1962 return &ia32_sched_selector;
1965 static const ilp_sched_selector_t *ia32_get_ilp_sched_selector(const void *self)
1972 * Returns the necessary byte alignment for storing a register of given class.
1974 static int ia32_get_reg_class_alignment(const void *self,
1975 const arch_register_class_t *cls)
1977 ir_mode *mode = arch_register_class_mode(cls);
1978 int bytes = get_mode_size_bytes(mode);
1981 if (mode_is_float(mode) && bytes > 8)
1986 static const be_execution_unit_t ***ia32_get_allowed_execution_units(
1987 const void *self, const ir_node *irn)
1989 static const be_execution_unit_t *_allowed_units_BRANCH[] = {
1990 &ia32_execution_units_BRANCH[IA32_EXECUNIT_TP_BRANCH_BRANCH1],
1991 &ia32_execution_units_BRANCH[IA32_EXECUNIT_TP_BRANCH_BRANCH2],
1994 static const be_execution_unit_t *_allowed_units_GP[] = {
1995 &ia32_execution_units_GP[IA32_EXECUNIT_TP_GP_GP_EAX],
1996 &ia32_execution_units_GP[IA32_EXECUNIT_TP_GP_GP_EBX],
1997 &ia32_execution_units_GP[IA32_EXECUNIT_TP_GP_GP_ECX],
1998 &ia32_execution_units_GP[IA32_EXECUNIT_TP_GP_GP_EDX],
1999 &ia32_execution_units_GP[IA32_EXECUNIT_TP_GP_GP_ESI],
2000 &ia32_execution_units_GP[IA32_EXECUNIT_TP_GP_GP_EDI],
2001 &ia32_execution_units_GP[IA32_EXECUNIT_TP_GP_GP_EBP],
2004 static const be_execution_unit_t *_allowed_units_DUMMY[] = {
2005 &be_machine_execution_units_DUMMY[0],
2008 static const be_execution_unit_t **_units_callret[] = {
2009 _allowed_units_BRANCH,
2012 static const be_execution_unit_t **_units_other[] = {
2016 static const be_execution_unit_t **_units_dummy[] = {
2017 _allowed_units_DUMMY,
2020 const be_execution_unit_t ***ret;
2023 if (is_ia32_irn(irn)) {
2024 ret = get_ia32_exec_units(irn);
2025 } else if (is_be_node(irn)) {
2026 if (be_is_Return(irn)) {
2027 ret = _units_callret;
2028 } else if (be_is_Barrier(irn)) {
2042 * Return the abstract ia32 machine.
2044 static const be_machine_t *ia32_get_machine(const void *self)
2046 const ia32_isa_t *isa = self;
2051 * Return irp irgs in the desired order.
2053 static ir_graph **ia32_get_irg_list(const void *self, ir_graph ***irg_list)
2060 static void ia32_mark_remat(const void *self, ir_node *node)
2063 if (is_ia32_irn(node)) {
2064 set_ia32_is_remat(node);
2069 * Check for Abs or -Abs.
2071 static int psi_is_Abs_or_Nabs(ir_node *cmp, ir_node *sel, ir_node *t, ir_node *f)
2079 /* must be <, <=, >=, > */
2080 pnc = get_Proj_proj(sel);
2081 if (pnc != pn_Cmp_Ge && pnc != pn_Cmp_Gt &&
2082 pnc != pn_Cmp_Le && pnc != pn_Cmp_Lt)
2085 l = get_Cmp_left(cmp);
2086 r = get_Cmp_right(cmp);
2088 /* must be x cmp 0 */
2089 if ((l != t && l != f) || !is_Const(r) || !is_Const_null(r))
2092 if ((!is_Minus(t) || get_Minus_op(t) != f) &&
2093 (!is_Minus(f) || get_Minus_op(f) != t))
2099 * Check for Abs only
2101 static int psi_is_Abs(ir_node *cmp, ir_node *sel, ir_node *t, ir_node *f)
2109 /* must be <, <=, >=, > */
2110 pnc = get_Proj_proj(sel);
2111 if (pnc != pn_Cmp_Ge && pnc != pn_Cmp_Gt &&
2112 pnc != pn_Cmp_Le && pnc != pn_Cmp_Lt)
2115 l = get_Cmp_left(cmp);
2116 r = get_Cmp_right(cmp);
2118 /* must be x cmp 0 */
2119 if ((l != t && l != f) || !is_Const(r) || !is_Const_null(r))
2122 if ((!is_Minus(t) || get_Minus_op(t) != f) &&
2123 (!is_Minus(f) || get_Minus_op(f) != t))
2126 if (pnc & pn_Cmp_Gt) {
2127 /* x >= 0 ? -x : x is NABS */
2131 /* x < 0 ? x : -x is NABS */
2140 * Allows or disallows the creation of Mux nodes for the given Phi nodes.
2142 * @param sel A selector of a Cond.
2143 * @param phi_list List of Phi nodes about to be converted (linked via get_Phi_next() field)
2144 * @param i First data predecessor involved in if conversion
2145 * @param j Second data predecessor involved in if conversion
2147 * @return 1 if allowed, 0 otherwise
2149 static int ia32_is_mux_allowed(ir_node *sel, ir_node *phi_list, int i, int j)
2156 /* we can't handle Muxs with 64bit compares yet */
2158 cmp = get_Proj_pred(sel);
2160 ir_node *left = get_Cmp_left(cmp);
2161 ir_mode *cmp_mode = get_irn_mode(left);
2162 if (!mode_is_float(cmp_mode) && get_mode_size_bits(cmp_mode) > 32) {
2163 /* 64bit Abs IS supported */
2164 for (phi = phi_list; phi; phi = get_Phi_next(phi)) {
2165 ir_node *t = get_Phi_pred(phi, i);
2166 ir_node *f = get_Phi_pred(phi, j);
2168 if (! psi_is_Abs(cmp, sel, t, f))
2174 /* we do not support nodes without Cmp yet */
2178 /* we do not support nodes without Cmp yet */
2182 pn = get_Proj_proj(sel);
2183 cl = get_Cmp_left(cmp);
2184 cr = get_Cmp_right(cmp);
2186 if (ia32_cg_config.use_cmov) {
2187 if (ia32_cg_config.use_sse2) {
2188 /* check the Phi nodes: no 64bit and no floating point cmov */
2189 for (phi = phi_list; phi; phi = get_Phi_next(phi)) {
2190 ir_mode *mode = get_irn_mode(phi);
2192 if (mode_is_float(mode)) {
2193 /* check for Min, Max */
2194 ir_node *t = get_Phi_pred(phi, i);
2195 ir_node *f = get_Phi_pred(phi, j);
2197 /* SSE2 supports Min & Max */
2198 if (pn == pn_Cmp_Lt || pn == pn_Cmp_Le || pn == pn_Cmp_Ge || pn == pn_Cmp_Gt) {
2199 if (cl == t && cr == f) {
2200 /* Mux(a <=/>= b, a, b) => MIN, MAX */
2202 } else if (cl == f && cr == t) {
2203 /* Mux(a <=/>= b, b, a) => MAX, MIN */
2208 } else if (get_mode_size_bits(mode) > 32) {
2214 /* check the Phi nodes: no 64bit and no floating point cmov */
2215 for (phi = phi_list; phi; phi = get_Phi_next(phi)) {
2216 ir_mode *mode = get_irn_mode(phi);
2218 if (mode_is_float(mode)) {
2219 ir_node *t = get_Phi_pred(phi, i);
2220 ir_node *f = get_Phi_pred(phi, j);
2222 /* always support Mux(!float, C1, C2) */
2223 if (is_Const(t) && is_Const(f) && !mode_is_float(get_irn_mode(cl))) {
2224 switch (be_transformer) {
2225 case TRANSFORMER_DEFAULT:
2226 /* always support Mux(!float, C1, C2) */
2228 #ifdef FIRM_GRGEN_BE
2229 case TRANSFORMER_PBQP:
2230 case TRANSFORMER_RAND:
2231 /* no support for Mux(*, C1, C2) */
2235 panic("invalid transformer");
2238 /* only abs or nabs supported */
2239 if (! psi_is_Abs_or_Nabs(cmp, sel, t, f))
2241 } else if (get_mode_size_bits(mode) > 32)
2247 } else { /* No Cmov, only some special cases */
2249 /* Now some supported cases here */
2250 for (phi = phi_list; phi; phi = get_Phi_next(phi)) {
2251 ir_mode *mode = get_irn_mode(phi);
2254 t = get_Phi_pred(phi, i);
2255 f = get_Phi_pred(phi, j);
2257 if (mode_is_float(mode)) {
2258 /* always support Mux(!float, C1, C2) */
2259 if (is_Const(t) && is_Const(f) &&
2260 !mode_is_float(get_irn_mode(cl))) {
2261 switch (be_transformer) {
2262 case TRANSFORMER_DEFAULT:
2263 /* always support Mux(!float, C1, C2) */
2265 #ifdef FIRM_GRGEN_BE
2266 case TRANSFORMER_PBQP:
2267 case TRANSFORMER_RAND:
2268 /* no support for Mux(*, C1, C2) */
2272 panic("invalid transformer");
2275 /* only abs or nabs supported */
2276 if (! psi_is_Abs_or_Nabs(cmp, sel, t, f))
2278 } else if (get_mode_size_bits(mode) > 32) {
2283 if (is_Const(t) && is_Const(f)) {
2284 if ((is_Const_null(t) && is_Const_one(f)) || (is_Const_one(t) && is_Const_null(f))) {
2285 /* always support Mux(x, C1, C2) */
2288 } else if (pn == pn_Cmp_Lt || pn == pn_Cmp_Le || pn == pn_Cmp_Ge || pn == pn_Cmp_Gt) {
2290 if (cl == t && cr == f) {
2291 /* Mux(a <=/>= b, a, b) => Min, Max */
2294 if (cl == f && cr == t) {
2295 /* Mux(a <=/>= b, b, a) => Max, Min */
2299 if ((pn & pn_Cmp_Gt) && !mode_is_signed(mode) &&
2300 is_Const(f) && is_Const_null(f) && is_Sub(t) &&
2301 get_Sub_left(t) == cl && get_Sub_right(t) == cr) {
2302 /* Mux(a >=u b, a - b, 0) unsigned Doz */
2305 if ((pn & pn_Cmp_Lt) && !mode_is_signed(mode) &&
2306 is_Const(t) && is_Const_null(t) && is_Sub(f) &&
2307 get_Sub_left(f) == cl && get_Sub_right(f) == cr) {
2308 /* Mux(a <=u b, 0, a - b) unsigned Doz */
2311 if (is_Const(cr) && is_Const_null(cr)) {
2312 if (cl == t && is_Minus(f) && get_Minus_op(f) == cl) {
2313 /* Mux(a <=/>= 0 ? a : -a) Nabs/Abs */
2315 } else if (cl == f && is_Minus(t) && get_Minus_op(t) == cl) {
2316 /* Mux(a <=/>= 0 ? -a : a) Abs/Nabs */
2323 /* all checks passed */
2329 static asm_constraint_flags_t ia32_parse_asm_constraint(const void *self, const char **c)
2334 /* we already added all our simple flags to the flags modifier list in
2335 * init, so this flag we don't know. */
2336 return ASM_CONSTRAINT_FLAG_INVALID;
2339 static int ia32_is_valid_clobber(const void *self, const char *clobber)
2343 return ia32_get_clobber_register(clobber) != NULL;
2347 * Create the trampoline code.
2349 static ir_node *ia32_create_trampoline_fkt(ir_node *block, ir_node *mem, ir_node *trampoline, ir_node *env, ir_node *callee)
2351 ir_node *st, *p = trampoline;
2352 ir_mode *mode = get_irn_mode(p);
2355 st = new_r_Store(block, mem, p, new_Const_long(mode_Bu, 0xb9), 0);
2356 mem = new_r_Proj(block, st, mode_M, pn_Store_M);
2357 p = new_r_Add(block, p, new_Const_long(mode_Iu, 1), mode);
2358 st = new_r_Store(block, mem, p, env, 0);
2359 mem = new_r_Proj(block, st, mode_M, pn_Store_M);
2360 p = new_r_Add(block, p, new_Const_long(mode_Iu, 4), mode);
2362 st = new_r_Store(block, mem, p, new_Const_long(mode_Bu, 0xe9), 0);
2363 mem = new_r_Proj(block, st, mode_M, pn_Store_M);
2364 p = new_r_Add(block, p, new_Const_long(mode_Iu, 1), mode);
2365 st = new_r_Store(block, mem, p, callee, 0);
2366 mem = new_r_Proj(block, st, mode_M, pn_Store_M);
2367 p = new_r_Add(block, p, new_Const_long(mode_Iu, 4), mode);
2373 * Returns the libFirm configuration parameter for this backend.
2375 static const backend_params *ia32_get_libfirm_params(void)
2377 static const ir_settings_if_conv_t ifconv = {
2378 4, /* maxdepth, doesn't matter for Mux-conversion */
2379 ia32_is_mux_allowed /* allows or disallows Mux creation for given selector */
2381 static const ir_settings_arch_dep_t ad = {
2382 1, /* also use subs */
2383 4, /* maximum shifts */
2384 31, /* maximum shift amount */
2385 ia32_evaluate_insn, /* evaluate the instruction sequence */
2387 1, /* allow Mulhs */
2388 1, /* allow Mulus */
2389 32, /* Mulh allowed up to 32 bit */
2391 static backend_params p = {
2392 1, /* need dword lowering */
2393 1, /* support inline assembly */
2394 NULL, /* will be set later */
2395 ia32_create_intrinsic_fkt,
2396 &intrinsic_env, /* context for ia32_create_intrinsic_fkt */
2397 NULL, /* ifconv info will be set below */
2398 NULL, /* float arithmetic mode, will be set below */
2399 12, /* size of trampoline code */
2400 4, /* alignment of trampoline code */
2401 ia32_create_trampoline_fkt,
2402 4 /* alignment of stack parameter */
2405 ia32_setup_cg_config();
2407 /* doesn't really belong here, but this is the earliest place the backend
2409 init_asm_constraints();
2412 p.if_conv_info = &ifconv;
2413 if (! ia32_cg_config.use_sse2)
2414 p.mode_float_arithmetic = mode_E;
2418 static const lc_opt_enum_int_items_t gas_items[] = {
2419 { "elf", GAS_FLAVOUR_ELF },
2420 { "mingw", GAS_FLAVOUR_MINGW },
2421 { "yasm", GAS_FLAVOUR_YASM },
2422 { "macho", GAS_FLAVOUR_MACH_O },
2426 static lc_opt_enum_int_var_t gas_var = {
2427 (int*) &be_gas_flavour, gas_items
2430 #ifdef FIRM_GRGEN_BE
2431 static const lc_opt_enum_int_items_t transformer_items[] = {
2432 { "default", TRANSFORMER_DEFAULT },
2433 { "pbqp", TRANSFORMER_PBQP },
2434 { "random", TRANSFORMER_RAND },
2438 static lc_opt_enum_int_var_t transformer_var = {
2439 (int*)&be_transformer, transformer_items
2443 static const lc_opt_table_entry_t ia32_options[] = {
2444 LC_OPT_ENT_ENUM_INT("gasmode", "set the GAS compatibility mode", &gas_var),
2445 #ifdef FIRM_GRGEN_BE
2446 LC_OPT_ENT_ENUM_INT("transformer", "the transformer used for code selection", &transformer_var),
2448 LC_OPT_ENT_INT("stackalign", "set power of two stack alignment for calls",
2449 &ia32_isa_template.arch_env.stack_alignment),
2453 const arch_isa_if_t ia32_isa_if = {
2456 ia32_handle_intrinsics,
2457 ia32_get_n_reg_class,
2459 ia32_get_reg_class_for_mode,
2461 ia32_get_code_generator_if,
2462 ia32_get_list_sched_selector,
2463 ia32_get_ilp_sched_selector,
2464 ia32_get_reg_class_alignment,
2465 ia32_get_libfirm_params,
2466 ia32_get_allowed_execution_units,
2470 ia32_parse_asm_constraint,
2471 ia32_is_valid_clobber
2474 void be_init_arch_ia32(void)
2476 lc_opt_entry_t *be_grp = lc_opt_get_grp(firm_opt_get_root(), "be");
2477 lc_opt_entry_t *ia32_grp = lc_opt_get_grp(be_grp, "ia32");
2479 lc_opt_add_table(ia32_grp, ia32_options);
2480 be_register_isa_if("ia32", &ia32_isa_if);
2482 FIRM_DBG_REGISTER(dbg, "firm.be.ia32.cg");
2484 ia32_init_emitter();
2486 ia32_init_optimize();
2487 ia32_init_transform();
2489 ia32_init_architecture();
2492 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_arch_ia32);