2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief This is the main ia32 firm backend driver.
23 * @author Christian Wuerdig
29 #include "lc_opts_enum.h"
33 #include "pseudo_irg.h"
38 #include "iredges_t.h"
51 #include "iroptimize.h"
52 #include "instrument.h"
57 #include "../benode.h"
58 #include "../belower.h"
59 #include "../besched.h"
62 #include "../beirgmod.h"
63 #include "../be_dbgout.h"
64 #include "../beblocksched.h"
65 #include "../bemachine.h"
66 #include "../beilpsched.h"
67 #include "../bespillslots.h"
68 #include "../bemodule.h"
69 #include "../begnuas.h"
70 #include "../bestate.h"
71 #include "../beflags.h"
72 #include "../betranshlp.h"
73 #include "../belistsched.h"
75 #include "bearch_ia32_t.h"
77 #include "ia32_new_nodes.h"
78 #include "gen_ia32_regalloc_if.h"
79 #include "gen_ia32_machine.h"
80 #include "ia32_common_transform.h"
81 #include "ia32_transform.h"
82 #include "ia32_emitter.h"
83 #include "ia32_map_regs.h"
84 #include "ia32_optimize.h"
86 #include "ia32_dbg_stat.h"
87 #include "ia32_finish.h"
88 #include "ia32_util.h"
90 #include "ia32_architecture.h"
93 #include "ia32_pbqp_transform.h"
95 transformer_t be_transformer = TRANSFORMER_DEFAULT;
98 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
100 ir_mode *mode_fpcw = NULL;
101 ia32_code_gen_t *ia32_current_cg = NULL;
103 /** The current omit-fp state */
104 static unsigned ia32_curr_fp_ommitted = 0;
105 static ir_type *omit_fp_between_type = NULL;
106 static ir_type *between_type = NULL;
107 static ir_entity *old_bp_ent = NULL;
108 static ir_entity *ret_addr_ent = NULL;
109 static ir_entity *omit_fp_ret_addr_ent = NULL;
112 * The environment for the intrinsic mapping.
114 static ia32_intrinsic_env_t intrinsic_env = {
116 NULL, /* the irg, these entities belong to */
117 NULL, /* entity for __divdi3 library call */
118 NULL, /* entity for __moddi3 library call */
119 NULL, /* entity for __udivdi3 library call */
120 NULL, /* entity for __umoddi3 library call */
124 typedef ir_node *(*create_const_node_func) (dbg_info *dbg, ir_node *block);
127 * Used to create per-graph unique pseudo nodes.
129 static inline ir_node *create_const(ia32_code_gen_t *cg, ir_node **place,
130 create_const_node_func func,
131 const arch_register_t* reg)
133 ir_node *block, *res;
138 block = get_irg_start_block(cg->irg);
139 res = func(NULL, block);
140 arch_set_irn_register(res, reg);
146 /* Creates the unique per irg GP NoReg node. */
147 ir_node *ia32_new_NoReg_gp(ia32_code_gen_t *cg)
149 return create_const(cg, &cg->noreg_gp, new_bd_ia32_NoReg_GP,
150 &ia32_gp_regs[REG_GP_NOREG]);
153 ir_node *ia32_new_NoReg_vfp(ia32_code_gen_t *cg)
155 return create_const(cg, &cg->noreg_vfp, new_bd_ia32_NoReg_VFP,
156 &ia32_vfp_regs[REG_VFP_NOREG]);
159 ir_node *ia32_new_NoReg_xmm(ia32_code_gen_t *cg)
161 return create_const(cg, &cg->noreg_xmm, new_bd_ia32_NoReg_XMM,
162 &ia32_xmm_regs[REG_XMM_NOREG]);
165 ir_node *ia32_new_Unknown_gp(ia32_code_gen_t *cg)
167 return create_const(cg, &cg->unknown_gp, new_bd_ia32_Unknown_GP,
168 &ia32_gp_regs[REG_GP_UKNWN]);
171 ir_node *ia32_new_Unknown_vfp(ia32_code_gen_t *cg)
173 return create_const(cg, &cg->unknown_vfp, new_bd_ia32_Unknown_VFP,
174 &ia32_vfp_regs[REG_VFP_UKNWN]);
177 ir_node *ia32_new_Unknown_xmm(ia32_code_gen_t *cg)
179 return create_const(cg, &cg->unknown_xmm, new_bd_ia32_Unknown_XMM,
180 &ia32_xmm_regs[REG_XMM_UKNWN]);
183 ir_node *ia32_new_Fpu_truncate(ia32_code_gen_t *cg)
185 return create_const(cg, &cg->fpu_trunc_mode, new_bd_ia32_ChangeCW,
186 &ia32_fp_cw_regs[REG_FPCW]);
191 * Returns the admissible noreg register node for input register pos of node irn.
193 static ir_node *ia32_get_admissible_noreg(ia32_code_gen_t *cg, ir_node *irn, int pos)
195 const arch_register_req_t *req = arch_get_register_req(irn, pos);
197 assert(req != NULL && "Missing register requirements");
198 if (req->cls == &ia32_reg_classes[CLASS_ia32_gp])
199 return ia32_new_NoReg_gp(cg);
201 if (ia32_cg_config.use_sse2) {
202 return ia32_new_NoReg_xmm(cg);
204 return ia32_new_NoReg_vfp(cg);
208 /**************************************************
211 * _ __ ___ __ _ __ _| | | ___ ___ _| |_
212 * | '__/ _ \/ _` | / _` | | |/ _ \ / __| | | _|
213 * | | | __/ (_| | | (_| | | | (_) | (__ | | |
214 * |_| \___|\__, | \__,_|_|_|\___/ \___| |_|_|
217 **************************************************/
219 static const arch_register_req_t *get_ia32_SwitchJmp_out_req(
220 const ir_node *node, int pos)
224 return arch_no_register_req;
227 static arch_irn_class_t ia32_classify(const ir_node *irn)
229 arch_irn_class_t classification = 0;
231 assert(is_ia32_irn(irn));
233 if (is_ia32_is_reload(irn))
234 classification |= arch_irn_class_reload;
236 if (is_ia32_is_spill(irn))
237 classification |= arch_irn_class_spill;
239 if (is_ia32_is_remat(irn))
240 classification |= arch_irn_class_remat;
242 return classification;
246 * The IA32 ABI callback object.
249 be_abi_call_flags_bits_t flags; /**< The call flags. */
250 const arch_env_t *aenv; /**< The architecture environment. */
251 ir_graph *irg; /**< The associated graph. */
254 static ir_entity *ia32_get_frame_entity(const ir_node *irn)
256 return is_ia32_irn(irn) ? get_ia32_frame_ent(irn) : NULL;
259 static void ia32_set_frame_entity(ir_node *irn, ir_entity *ent)
261 set_ia32_frame_ent(irn, ent);
264 static void ia32_set_frame_offset(ir_node *irn, int bias)
266 if (get_ia32_frame_ent(irn) == NULL)
269 if (is_ia32_Pop(irn) || is_ia32_PopMem(irn)) {
270 ia32_code_gen_t *cg = ia32_current_cg;
271 int omit_fp = be_abi_omit_fp(cg->birg->abi);
273 /* Pop nodes modify the stack pointer before calculating the
274 * destination address, so fix this here
279 add_ia32_am_offs_int(irn, bias);
282 static int ia32_get_sp_bias(const ir_node *node)
284 if (is_ia32_Call(node))
285 return -(int)get_ia32_call_attr_const(node)->pop;
287 if (is_ia32_Push(node))
290 if (is_ia32_Pop(node) || is_ia32_PopMem(node))
297 * Generate the routine prologue.
299 * @param self The callback object.
300 * @param mem A pointer to the mem node. Update this if you define new memory.
301 * @param reg_map A map mapping all callee_save/ignore/parameter registers to their defining nodes.
302 * @param stack_bias Points to the current stack bias, can be modified if needed.
304 * @return The register which shall be used as a stack frame base.
306 * All nodes which define registers in @p reg_map must keep @p reg_map current.
308 static const arch_register_t *ia32_abi_prologue(void *self, ir_node **mem, pmap *reg_map, int *stack_bias)
310 ia32_abi_env_t *env = self;
311 ia32_code_gen_t *cg = ia32_current_cg;
312 const arch_env_t *arch_env = env->aenv;
314 ia32_curr_fp_ommitted = env->flags.try_omit_fp;
315 if (! env->flags.try_omit_fp) {
316 ir_node *bl = get_irg_start_block(env->irg);
317 ir_node *curr_sp = be_abi_reg_map_get(reg_map, arch_env->sp);
318 ir_node *curr_bp = be_abi_reg_map_get(reg_map, arch_env->bp);
319 ir_node *noreg = ia32_new_NoReg_gp(cg);
322 /* mark bp register as ignore */
323 be_set_constr_single_reg_out(get_Proj_pred(curr_bp),
324 get_Proj_proj(curr_bp), arch_env->bp, arch_register_req_type_ignore);
327 push = new_bd_ia32_Push(NULL, bl, noreg, noreg, *mem, curr_bp, curr_sp);
328 curr_sp = new_r_Proj(bl, push, get_irn_mode(curr_sp), pn_ia32_Push_stack);
329 *mem = new_r_Proj(bl, push, mode_M, pn_ia32_Push_M);
331 /* the push must have SP out register */
332 arch_set_irn_register(curr_sp, arch_env->sp);
334 /* this modifies the stack bias, because we pushed 32bit */
337 /* move esp to ebp */
338 curr_bp = be_new_Copy(arch_env->bp->reg_class, bl, curr_sp);
339 be_set_constr_single_reg_out(curr_bp, 0, arch_env->bp,
340 arch_register_req_type_ignore);
342 /* beware: the copy must be done before any other sp use */
343 curr_sp = be_new_CopyKeep_single(arch_env->sp->reg_class, bl, curr_sp, curr_bp, get_irn_mode(curr_sp));
344 be_set_constr_single_reg_out(curr_sp, 0, arch_env->sp,
345 arch_register_req_type_produces_sp);
347 be_abi_reg_map_set(reg_map, arch_env->sp, curr_sp);
348 be_abi_reg_map_set(reg_map, arch_env->bp, curr_bp);
357 * Generate the routine epilogue.
358 * @param self The callback object.
359 * @param bl The block for the epilog
360 * @param mem A pointer to the mem node. Update this if you define new memory.
361 * @param reg_map A map mapping all callee_save/ignore/parameter registers to their defining nodes.
362 * @return The register which shall be used as a stack frame base.
364 * All nodes which define registers in @p reg_map must keep @p reg_map current.
366 static void ia32_abi_epilogue(void *self, ir_node *bl, ir_node **mem, pmap *reg_map)
368 ia32_abi_env_t *env = self;
369 const arch_env_t *arch_env = env->aenv;
370 ir_node *curr_sp = be_abi_reg_map_get(reg_map, arch_env->sp);
371 ir_node *curr_bp = be_abi_reg_map_get(reg_map, arch_env->bp);
373 if (env->flags.try_omit_fp) {
374 /* simply remove the stack frame here */
375 curr_sp = be_new_IncSP(arch_env->sp, bl, curr_sp, BE_STACK_FRAME_SIZE_SHRINK, 0);
377 ir_mode *mode_bp = arch_env->bp->reg_class->mode;
379 if (ia32_cg_config.use_leave) {
383 leave = new_bd_ia32_Leave(NULL, bl, curr_bp);
384 curr_bp = new_r_Proj(bl, leave, mode_bp, pn_ia32_Leave_frame);
385 curr_sp = new_r_Proj(bl, leave, get_irn_mode(curr_sp), pn_ia32_Leave_stack);
389 /* the old SP is not needed anymore (kill the proj) */
390 assert(is_Proj(curr_sp));
393 /* copy ebp to esp */
394 curr_sp = be_new_Copy(&ia32_reg_classes[CLASS_ia32_gp], bl, curr_bp);
395 arch_set_irn_register(curr_sp, arch_env->sp);
396 be_set_constr_single_reg_out(curr_sp, 0, arch_env->sp,
397 arch_register_req_type_ignore);
400 pop = new_bd_ia32_PopEbp(NULL, bl, *mem, curr_sp);
401 curr_bp = new_r_Proj(bl, pop, mode_bp, pn_ia32_Pop_res);
402 curr_sp = new_r_Proj(bl, pop, get_irn_mode(curr_sp), pn_ia32_Pop_stack);
404 *mem = new_r_Proj(bl, pop, mode_M, pn_ia32_Pop_M);
406 arch_set_irn_register(curr_sp, arch_env->sp);
407 arch_set_irn_register(curr_bp, arch_env->bp);
410 be_abi_reg_map_set(reg_map, arch_env->sp, curr_sp);
411 be_abi_reg_map_set(reg_map, arch_env->bp, curr_bp);
415 * Initialize the callback object.
416 * @param call The call object.
417 * @param aenv The architecture environment.
418 * @param irg The graph with the method.
419 * @return Some pointer. This pointer is passed to all other callback functions as self object.
421 static void *ia32_abi_init(const be_abi_call_t *call, const arch_env_t *aenv, ir_graph *irg)
423 ia32_abi_env_t *env = XMALLOC(ia32_abi_env_t);
424 be_abi_call_flags_t fl = be_abi_call_get_flags(call);
425 env->flags = fl.bits;
432 * Destroy the callback object.
433 * @param self The callback object.
435 static void ia32_abi_done(void *self)
441 * Build the between type and entities if not already build.
443 static void ia32_build_between_type(void)
445 #define IDENT(s) new_id_from_chars(s, sizeof(s)-1)
446 if (! between_type) {
447 ir_type *old_bp_type = new_type_primitive(mode_Iu);
448 ir_type *ret_addr_type = new_type_primitive(mode_Iu);
450 between_type = new_type_struct(IDENT("ia32_between_type"));
451 old_bp_ent = new_entity(between_type, IDENT("old_bp"), old_bp_type);
452 ret_addr_ent = new_entity(between_type, IDENT("ret_addr"), ret_addr_type);
454 set_entity_offset(old_bp_ent, 0);
455 set_entity_offset(ret_addr_ent, get_type_size_bytes(old_bp_type));
456 set_type_size_bytes(between_type, get_type_size_bytes(old_bp_type) + get_type_size_bytes(ret_addr_type));
457 set_type_state(between_type, layout_fixed);
459 omit_fp_between_type = new_type_struct(IDENT("ia32_between_type_omit_fp"));
460 omit_fp_ret_addr_ent = new_entity(omit_fp_between_type, IDENT("ret_addr"), ret_addr_type);
462 set_entity_offset(omit_fp_ret_addr_ent, 0);
463 set_type_size_bytes(omit_fp_between_type, get_type_size_bytes(ret_addr_type));
464 set_type_state(omit_fp_between_type, layout_fixed);
470 * Produces the type which sits between the stack args and the locals on the stack.
471 * it will contain the return address and space to store the old base pointer.
472 * @return The Firm type modeling the ABI between type.
474 static ir_type *ia32_abi_get_between_type(void *self)
476 ia32_abi_env_t *env = self;
478 ia32_build_between_type();
479 return env->flags.try_omit_fp ? omit_fp_between_type : between_type;
483 * Return the stack entity that contains the return address.
485 ir_entity *ia32_get_return_address_entity(void)
487 ia32_build_between_type();
488 return ia32_curr_fp_ommitted ? omit_fp_ret_addr_ent : ret_addr_ent;
492 * Return the stack entity that contains the frame address.
494 ir_entity *ia32_get_frame_address_entity(void)
496 ia32_build_between_type();
497 return ia32_curr_fp_ommitted ? NULL : old_bp_ent;
501 * Get the estimated cycle count for @p irn.
503 * @param self The this pointer.
504 * @param irn The node.
506 * @return The estimated cycle count for this operation
508 static int ia32_get_op_estimated_cost(const ir_node *irn)
511 ia32_op_type_t op_tp;
515 if (!is_ia32_irn(irn))
518 assert(is_ia32_irn(irn));
520 cost = get_ia32_latency(irn);
521 op_tp = get_ia32_op_type(irn);
523 if (is_ia32_CopyB(irn)) {
526 else if (is_ia32_CopyB_i(irn)) {
527 int size = get_ia32_copyb_size(irn);
528 cost = 20 + (int)ceil((4/3) * size);
530 /* in case of address mode operations add additional cycles */
531 else if (op_tp == ia32_AddrModeD || op_tp == ia32_AddrModeS) {
533 In case of stack access and access to fixed addresses add 5 cycles
534 (we assume they are in cache), other memory operations cost 20
537 if (is_ia32_use_frame(irn) || (
538 is_ia32_NoReg_GP(get_irn_n(irn, n_ia32_base)) &&
539 is_ia32_NoReg_GP(get_irn_n(irn, n_ia32_index))
551 * Returns the inverse operation if @p irn, recalculating the argument at position @p i.
553 * @param irn The original operation
554 * @param i Index of the argument we want the inverse operation to yield
555 * @param inverse struct to be filled with the resulting inverse op
556 * @param obstack The obstack to use for allocation of the returned nodes array
557 * @return The inverse operation or NULL if operation invertible
559 static arch_inverse_t *ia32_get_inverse(const ir_node *irn, int i, arch_inverse_t *inverse, struct obstack *obst)
563 ir_node *block, *noreg, *nomem;
566 /* we cannot invert non-ia32 irns */
567 if (! is_ia32_irn(irn))
570 /* operand must always be a real operand (not base, index or mem) */
571 if (i != n_ia32_binary_left && i != n_ia32_binary_right)
574 /* we don't invert address mode operations */
575 if (get_ia32_op_type(irn) != ia32_Normal)
578 /* TODO: adjust for new immediates... */
579 ir_fprintf(stderr, "TODO: fix get_inverse for new immediates (%+F)\n",
583 block = get_nodes_block(irn);
584 mode = get_irn_mode(irn);
585 irn_mode = get_irn_mode(irn);
586 noreg = get_irn_n(irn, 0);
588 dbg = get_irn_dbg_info(irn);
590 /* initialize structure */
591 inverse->nodes = obstack_alloc(obst, 2 * sizeof(inverse->nodes[0]));
595 switch (get_ia32_irn_opcode(irn)) {
598 if (get_ia32_immop_type(irn) == ia32_ImmConst) {
599 /* we have an add with a const here */
600 /* invers == add with negated const */
601 inverse->nodes[0] = new_bd_ia32_Add(dbg, block, noreg, noreg, nomem, get_irn_n(irn, i), noreg);
603 copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
604 set_ia32_Immop_tarval(inverse->nodes[0], tarval_neg(get_ia32_Immop_tarval(irn)));
605 set_ia32_commutative(inverse->nodes[0]);
607 else if (get_ia32_immop_type(irn) == ia32_ImmSymConst) {
608 /* we have an add with a symconst here */
609 /* invers == sub with const */
610 inverse->nodes[0] = new_bd_ia32_Sub(dbg, block, noreg, noreg, nomem, get_irn_n(irn, i), noreg);
612 copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
615 /* normal add: inverse == sub */
616 inverse->nodes[0] = new_bd_ia32_Sub(dbg, block, noreg, noreg, nomem, (ir_node*) irn, get_irn_n(irn, i ^ 1));
623 if (get_ia32_immop_type(irn) != ia32_ImmNone) {
624 /* we have a sub with a const/symconst here */
625 /* invers == add with this const */
626 inverse->nodes[0] = new_bd_ia32_Add(dbg, block, noreg, noreg, nomem, get_irn_n(irn, i), noreg);
627 inverse->costs += (get_ia32_immop_type(irn) == ia32_ImmSymConst) ? 5 : 1;
628 copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
632 if (i == n_ia32_binary_left) {
633 inverse->nodes[0] = new_bd_ia32_Add(dbg, block, noreg, noreg, nomem, (ir_node*) irn, get_irn_n(irn, 3));
636 inverse->nodes[0] = new_bd_ia32_Sub(dbg, block, noreg, noreg, nomem, get_irn_n(irn, n_ia32_binary_left), (ir_node*) irn);
644 if (get_ia32_immop_type(irn) != ia32_ImmNone) {
645 /* xor with const: inverse = xor */
646 inverse->nodes[0] = new_bd_ia32_Xor(dbg, block, noreg, noreg, nomem, get_irn_n(irn, i), noreg);
647 inverse->costs += (get_ia32_immop_type(irn) == ia32_ImmSymConst) ? 5 : 1;
648 copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
652 inverse->nodes[0] = new_bd_ia32_Xor(dbg, block, noreg, noreg, nomem, (ir_node *) irn, get_irn_n(irn, i));
658 inverse->nodes[0] = new_bd_ia32_Not(dbg, block, (ir_node*) irn);
663 inverse->nodes[0] = new_bd_ia32_Neg(dbg, block, (ir_node*) irn);
668 /* inverse operation not supported */
675 static ir_mode *get_spill_mode_mode(const ir_mode *mode)
677 if(mode_is_float(mode))
684 * Get the mode that should be used for spilling value node
686 static ir_mode *get_spill_mode(const ir_node *node)
688 ir_mode *mode = get_irn_mode(node);
689 return get_spill_mode_mode(mode);
693 * Checks whether an addressmode reload for a node with mode mode is compatible
694 * with a spillslot of mode spill_mode
696 static int ia32_is_spillmode_compatible(const ir_mode *mode, const ir_mode *spillmode)
698 return !mode_is_float(mode) || mode == spillmode;
702 * Check if irn can load its operand at position i from memory (source addressmode).
703 * @param irn The irn to be checked
704 * @param i The operands position
705 * @return Non-Zero if operand can be loaded
707 static int ia32_possible_memory_operand(const ir_node *irn, unsigned int i)
709 ir_node *op = get_irn_n(irn, i);
710 const ir_mode *mode = get_irn_mode(op);
711 const ir_mode *spillmode = get_spill_mode(op);
713 if (!is_ia32_irn(irn) || /* must be an ia32 irn */
714 get_ia32_op_type(irn) != ia32_Normal || /* must not already be a addressmode irn */
715 !ia32_is_spillmode_compatible(mode, spillmode) ||
716 is_ia32_use_frame(irn)) /* must not already use frame */
719 switch (get_ia32_am_support(irn)) {
724 if (i != n_ia32_unary_op)
730 case n_ia32_binary_left: {
731 const arch_register_req_t *req;
732 if (!is_ia32_commutative(irn))
735 /* we can't swap left/right for limited registers
736 * (As this (currently) breaks constraint handling copies)
738 req = get_ia32_in_req(irn, n_ia32_binary_left);
739 if (req->type & arch_register_req_type_limited)
744 case n_ia32_binary_right:
753 panic("Unknown AM type");
756 /* HACK: must not already use "real" memory.
757 * This can happen for Call and Div */
758 if (!is_NoMem(get_irn_n(irn, n_ia32_mem)))
764 static void ia32_perform_memory_operand(ir_node *irn, ir_node *spill,
768 ir_mode *dest_op_mode;
770 assert(ia32_possible_memory_operand(irn, i) && "Cannot perform memory operand change");
772 set_ia32_op_type(irn, ia32_AddrModeS);
774 load_mode = get_irn_mode(get_irn_n(irn, i));
775 dest_op_mode = get_ia32_ls_mode(irn);
776 if (get_mode_size_bits(load_mode) <= get_mode_size_bits(dest_op_mode)) {
777 set_ia32_ls_mode(irn, load_mode);
779 set_ia32_use_frame(irn);
780 set_ia32_need_stackent(irn);
782 if (i == n_ia32_binary_left &&
783 get_ia32_am_support(irn) == ia32_am_binary &&
784 /* immediates are only allowed on the right side */
785 !is_ia32_Immediate(get_irn_n(irn, n_ia32_binary_right))) {
786 ia32_swap_left_right(irn);
787 i = n_ia32_binary_right;
790 assert(is_NoMem(get_irn_n(irn, n_ia32_mem)));
792 set_irn_n(irn, n_ia32_base, get_irg_frame(get_irn_irg(irn)));
793 set_irn_n(irn, n_ia32_mem, spill);
794 set_irn_n(irn, i, ia32_get_admissible_noreg(ia32_current_cg, irn, i));
795 set_ia32_is_reload(irn);
798 static const be_abi_callbacks_t ia32_abi_callbacks = {
801 ia32_abi_get_between_type,
806 /* register allocator interface */
807 static const arch_irn_ops_t ia32_irn_ops = {
810 ia32_get_frame_entity,
811 ia32_set_frame_entity,
812 ia32_set_frame_offset,
815 ia32_get_op_estimated_cost,
816 ia32_possible_memory_operand,
817 ia32_perform_memory_operand,
820 /* special register allocator interface for SwitchJmp
821 as it possibly has a WIDE range of Proj numbers.
822 We don't want to allocate output for register constraints for
824 static const arch_irn_ops_t ia32_SwitchJmp_irn_ops = {
825 /* Note: we also use SwitchJmp_out_req for the inputs too:
826 This is because the bearch API has a conceptual problem at the moment.
827 Querying for negative proj numbers which can happen for switchs
828 isn't possible and will result in inputs getting queried */
829 get_ia32_SwitchJmp_out_req,
831 ia32_get_frame_entity,
832 ia32_set_frame_entity,
833 ia32_set_frame_offset,
836 ia32_get_op_estimated_cost,
837 ia32_possible_memory_operand,
838 ia32_perform_memory_operand,
841 /**************************************************
844 * ___ ___ __| | ___ __ _ ___ _ __ _| |_
845 * / __/ _ \ / _` |/ _ \/ _` |/ _ \ '_ \ | | _|
846 * | (_| (_) | (_| | __/ (_| | __/ | | | | | |
847 * \___\___/ \__,_|\___|\__, |\___|_| |_| |_|_|
850 **************************************************/
852 static ir_entity *mcount = NULL;
854 #define ID(s) new_id_from_chars(s, sizeof(s) - 1)
856 static void ia32_before_abi(void *self)
858 lower_mode_b_config_t lower_mode_b_config = {
859 mode_Iu, /* lowered mode */
860 mode_Bu, /* preferred mode for set */
861 0, /* don't lower direct compares */
863 ia32_code_gen_t *cg = self;
865 ir_lower_mode_b(cg->irg, &lower_mode_b_config);
867 be_dump(cg->irg, "-lower_modeb", dump_ir_block_graph_sched);
869 if (mcount == NULL) {
870 ir_type *tp = new_type_method(0, 0);
871 mcount = new_entity(get_glob_type(), ID("mcount"), tp);
872 /* FIXME: enter the right ld_ident here */
873 set_entity_ld_ident(mcount, get_entity_ident(mcount));
874 set_entity_visibility(mcount, visibility_external_allocated);
876 instrument_initcall(cg->irg, mcount);
881 * Transforms the standard firm graph into
884 static void ia32_prepare_graph(void *self)
886 ia32_code_gen_t *cg = self;
888 switch (be_transformer) {
889 case TRANSFORMER_DEFAULT:
890 /* transform remaining nodes into assembler instructions */
891 ia32_transform_graph(cg);
895 case TRANSFORMER_PBQP:
896 case TRANSFORMER_RAND:
897 /* transform nodes into assembler instructions by PBQP magic */
898 ia32_transform_graph_by_pbqp(cg);
903 panic("invalid transformer");
906 /* do local optimizations (mainly CSE) */
907 optimize_graph_df(cg->irg);
910 be_dump(cg->irg, "-transformed", dump_ir_block_graph_sched);
912 /* optimize address mode */
913 ia32_optimize_graph(cg);
915 /* do code placement, to optimize the position of constants */
919 be_dump(cg->irg, "-place", dump_ir_block_graph_sched);
922 ir_node *turn_back_am(ir_node *node)
924 dbg_info *dbgi = get_irn_dbg_info(node);
925 ir_node *block = get_nodes_block(node);
926 ir_node *base = get_irn_n(node, n_ia32_base);
927 ir_node *index = get_irn_n(node, n_ia32_index);
928 ir_node *mem = get_irn_n(node, n_ia32_mem);
931 ir_node *load = new_bd_ia32_Load(dbgi, block, base, index, mem);
932 ir_node *load_res = new_rd_Proj(dbgi, block, load, mode_Iu, pn_ia32_Load_res);
934 ia32_copy_am_attrs(load, node);
935 if (is_ia32_is_reload(node))
936 set_ia32_is_reload(load);
937 set_irn_n(node, n_ia32_mem, new_NoMem());
939 switch (get_ia32_am_support(node)) {
941 set_irn_n(node, n_ia32_unary_op, load_res);
945 if (is_ia32_Immediate(get_irn_n(node, n_ia32_binary_right))) {
946 set_irn_n(node, n_ia32_binary_left, load_res);
948 set_irn_n(node, n_ia32_binary_right, load_res);
953 panic("Unknown AM type");
955 noreg = ia32_new_NoReg_gp(ia32_current_cg);
956 set_irn_n(node, n_ia32_base, noreg);
957 set_irn_n(node, n_ia32_index, noreg);
958 set_ia32_am_offs_int(node, 0);
959 set_ia32_am_sc(node, NULL);
960 set_ia32_am_scale(node, 0);
961 clear_ia32_am_sc_sign(node);
963 /* rewire mem-proj */
964 if (get_irn_mode(node) == mode_T) {
965 const ir_edge_t *edge;
966 foreach_out_edge(node, edge) {
967 ir_node *out = get_edge_src_irn(edge);
968 if (get_irn_mode(out) == mode_M) {
969 set_Proj_pred(out, load);
970 set_Proj_proj(out, pn_ia32_Load_M);
976 set_ia32_op_type(node, ia32_Normal);
977 if (sched_is_scheduled(node))
978 sched_add_before(node, load);
983 static ir_node *flags_remat(ir_node *node, ir_node *after)
985 /* we should turn back source address mode when rematerializing nodes */
990 if (is_Block(after)) {
993 block = get_nodes_block(after);
996 type = get_ia32_op_type(node);
1002 case ia32_AddrModeD:
1003 /* TODO implement this later... */
1004 panic("found DestAM with flag user %+F this should not happen", node);
1007 default: assert(type == ia32_Normal); break;
1010 copy = exact_copy(node);
1011 set_nodes_block(copy, block);
1012 sched_add_after(after, copy);
1018 * Called before the register allocator.
1020 static void ia32_before_ra(void *self)
1022 ia32_code_gen_t *cg = self;
1024 /* setup fpu rounding modes */
1025 ia32_setup_fpu_mode(cg);
1028 be_sched_fix_flags(cg->birg, &ia32_reg_classes[CLASS_ia32_flags],
1031 ia32_add_missing_keeps(cg);
1036 * Transforms a be_Reload into a ia32 Load.
1038 static void transform_to_Load(ia32_code_gen_t *cg, ir_node *node)
1040 ir_graph *irg = get_irn_irg(node);
1041 dbg_info *dbg = get_irn_dbg_info(node);
1042 ir_node *block = get_nodes_block(node);
1043 ir_entity *ent = be_get_frame_entity(node);
1044 ir_mode *mode = get_irn_mode(node);
1045 ir_mode *spillmode = get_spill_mode(node);
1046 ir_node *noreg = ia32_new_NoReg_gp(cg);
1047 ir_node *sched_point = NULL;
1048 ir_node *ptr = get_irg_frame(irg);
1049 ir_node *mem = get_irn_n(node, be_pos_Reload_mem);
1050 ir_node *new_op, *proj;
1051 const arch_register_t *reg;
1053 if (sched_is_scheduled(node)) {
1054 sched_point = sched_prev(node);
1057 if (mode_is_float(spillmode)) {
1058 if (ia32_cg_config.use_sse2)
1059 new_op = new_bd_ia32_xLoad(dbg, block, ptr, noreg, mem, spillmode);
1061 new_op = new_bd_ia32_vfld(dbg, block, ptr, noreg, mem, spillmode);
1063 else if (get_mode_size_bits(spillmode) == 128) {
1064 /* Reload 128 bit SSE registers */
1065 new_op = new_bd_ia32_xxLoad(dbg, block, ptr, noreg, mem);
1068 new_op = new_bd_ia32_Load(dbg, block, ptr, noreg, mem);
1070 set_ia32_op_type(new_op, ia32_AddrModeS);
1071 set_ia32_ls_mode(new_op, spillmode);
1072 set_ia32_frame_ent(new_op, ent);
1073 set_ia32_use_frame(new_op);
1074 set_ia32_is_reload(new_op);
1076 DBG_OPT_RELOAD2LD(node, new_op);
1078 proj = new_rd_Proj(dbg, block, new_op, mode, pn_ia32_Load_res);
1081 sched_add_after(sched_point, new_op);
1085 /* copy the register from the old node to the new Load */
1086 reg = arch_get_irn_register(node);
1087 arch_set_irn_register(proj, reg);
1089 SET_IA32_ORIG_NODE(new_op, node);
1091 exchange(node, proj);
1095 * Transforms a be_Spill node into a ia32 Store.
1097 static void transform_to_Store(ia32_code_gen_t *cg, ir_node *node)
1099 ir_graph *irg = get_irn_irg(node);
1100 dbg_info *dbg = get_irn_dbg_info(node);
1101 ir_node *block = get_nodes_block(node);
1102 ir_entity *ent = be_get_frame_entity(node);
1103 const ir_node *spillval = get_irn_n(node, be_pos_Spill_val);
1104 ir_mode *mode = get_spill_mode(spillval);
1105 ir_node *noreg = ia32_new_NoReg_gp(cg);
1106 ir_node *nomem = new_NoMem();
1107 ir_node *ptr = get_irg_frame(irg);
1108 ir_node *val = get_irn_n(node, be_pos_Spill_val);
1110 ir_node *sched_point = NULL;
1112 if (sched_is_scheduled(node)) {
1113 sched_point = sched_prev(node);
1116 /* No need to spill unknown values... */
1117 if(is_ia32_Unknown_GP(val) ||
1118 is_ia32_Unknown_VFP(val) ||
1119 is_ia32_Unknown_XMM(val)) {
1124 exchange(node, store);
1128 if (mode_is_float(mode)) {
1129 if (ia32_cg_config.use_sse2)
1130 store = new_bd_ia32_xStore(dbg, block, ptr, noreg, nomem, val);
1132 store = new_bd_ia32_vfst(dbg, block, ptr, noreg, nomem, val, mode);
1133 } else if (get_mode_size_bits(mode) == 128) {
1134 /* Spill 128 bit SSE registers */
1135 store = new_bd_ia32_xxStore(dbg, block, ptr, noreg, nomem, val);
1136 } else if (get_mode_size_bits(mode) == 8) {
1137 store = new_bd_ia32_Store8Bit(dbg, block, ptr, noreg, nomem, val);
1139 store = new_bd_ia32_Store(dbg, block, ptr, noreg, nomem, val);
1142 set_ia32_op_type(store, ia32_AddrModeD);
1143 set_ia32_ls_mode(store, mode);
1144 set_ia32_frame_ent(store, ent);
1145 set_ia32_use_frame(store);
1146 set_ia32_is_spill(store);
1147 SET_IA32_ORIG_NODE(store, node);
1148 DBG_OPT_SPILL2ST(node, store);
1151 sched_add_after(sched_point, store);
1155 exchange(node, store);
1158 static ir_node *create_push(ia32_code_gen_t *cg, ir_node *node, ir_node *schedpoint, ir_node *sp, ir_node *mem, ir_entity *ent)
1160 dbg_info *dbg = get_irn_dbg_info(node);
1161 ir_node *block = get_nodes_block(node);
1162 ir_node *noreg = ia32_new_NoReg_gp(cg);
1163 ir_graph *irg = get_irn_irg(node);
1164 ir_node *frame = get_irg_frame(irg);
1166 ir_node *push = new_bd_ia32_Push(dbg, block, frame, noreg, mem, noreg, sp);
1168 set_ia32_frame_ent(push, ent);
1169 set_ia32_use_frame(push);
1170 set_ia32_op_type(push, ia32_AddrModeS);
1171 set_ia32_ls_mode(push, mode_Is);
1172 set_ia32_is_spill(push);
1174 sched_add_before(schedpoint, push);
1178 static ir_node *create_pop(ia32_code_gen_t *cg, ir_node *node, ir_node *schedpoint, ir_node *sp, ir_entity *ent)
1180 dbg_info *dbg = get_irn_dbg_info(node);
1181 ir_node *block = get_nodes_block(node);
1182 ir_node *noreg = ia32_new_NoReg_gp(cg);
1183 ir_graph *irg = get_irn_irg(node);
1184 ir_node *frame = get_irg_frame(irg);
1186 ir_node *pop = new_bd_ia32_PopMem(dbg, block, frame, noreg, new_NoMem(), sp);
1188 set_ia32_frame_ent(pop, ent);
1189 set_ia32_use_frame(pop);
1190 set_ia32_op_type(pop, ia32_AddrModeD);
1191 set_ia32_ls_mode(pop, mode_Is);
1192 set_ia32_is_reload(pop);
1194 sched_add_before(schedpoint, pop);
1199 static ir_node* create_spproj(ir_node *node, ir_node *pred, int pos)
1201 dbg_info *dbg = get_irn_dbg_info(node);
1202 ir_node *block = get_nodes_block(node);
1203 ir_mode *spmode = mode_Iu;
1204 const arch_register_t *spreg = &ia32_gp_regs[REG_ESP];
1207 sp = new_rd_Proj(dbg, block, pred, spmode, pos);
1208 arch_set_irn_register(sp, spreg);
1214 * Transform MemPerm, currently we do this the ugly way and produce
1215 * push/pop into/from memory cascades. This is possible without using
1218 static void transform_MemPerm(ia32_code_gen_t *cg, ir_node *node)
1220 ir_node *block = get_nodes_block(node);
1221 ir_node *sp = be_abi_get_ignore_irn(cg->birg->abi, &ia32_gp_regs[REG_ESP]);
1222 int arity = be_get_MemPerm_entity_arity(node);
1223 ir_node **pops = ALLOCAN(ir_node*, arity);
1227 const ir_edge_t *edge;
1228 const ir_edge_t *next;
1231 for(i = 0; i < arity; ++i) {
1232 ir_entity *inent = be_get_MemPerm_in_entity(node, i);
1233 ir_entity *outent = be_get_MemPerm_out_entity(node, i);
1234 ir_type *enttype = get_entity_type(inent);
1235 unsigned entsize = get_type_size_bytes(enttype);
1236 unsigned entsize2 = get_type_size_bytes(get_entity_type(outent));
1237 ir_node *mem = get_irn_n(node, i + 1);
1240 /* work around cases where entities have different sizes */
1241 if(entsize2 < entsize)
1243 assert( (entsize == 4 || entsize == 8) && "spillslot on x86 should be 32 or 64 bit");
1245 push = create_push(cg, node, node, sp, mem, inent);
1246 sp = create_spproj(node, push, pn_ia32_Push_stack);
1248 /* add another push after the first one */
1249 push = create_push(cg, node, node, sp, mem, inent);
1250 add_ia32_am_offs_int(push, 4);
1251 sp = create_spproj(node, push, pn_ia32_Push_stack);
1254 set_irn_n(node, i, new_Bad());
1258 for(i = arity - 1; i >= 0; --i) {
1259 ir_entity *inent = be_get_MemPerm_in_entity(node, i);
1260 ir_entity *outent = be_get_MemPerm_out_entity(node, i);
1261 ir_type *enttype = get_entity_type(outent);
1262 unsigned entsize = get_type_size_bytes(enttype);
1263 unsigned entsize2 = get_type_size_bytes(get_entity_type(inent));
1266 /* work around cases where entities have different sizes */
1267 if(entsize2 < entsize)
1269 assert( (entsize == 4 || entsize == 8) && "spillslot on x86 should be 32 or 64 bit");
1271 pop = create_pop(cg, node, node, sp, outent);
1272 sp = create_spproj(node, pop, pn_ia32_Pop_stack);
1274 add_ia32_am_offs_int(pop, 4);
1276 /* add another pop after the first one */
1277 pop = create_pop(cg, node, node, sp, outent);
1278 sp = create_spproj(node, pop, pn_ia32_Pop_stack);
1285 keep = be_new_Keep(block, 1, in);
1286 sched_add_before(node, keep);
1288 /* exchange memprojs */
1289 foreach_out_edge_safe(node, edge, next) {
1290 ir_node *proj = get_edge_src_irn(edge);
1291 int p = get_Proj_proj(proj);
1295 set_Proj_pred(proj, pops[p]);
1296 set_Proj_proj(proj, pn_ia32_Pop_M);
1299 /* remove memperm */
1300 arity = get_irn_arity(node);
1301 for(i = 0; i < arity; ++i) {
1302 set_irn_n(node, i, new_Bad());
1308 * Block-Walker: Calls the transform functions Spill and Reload.
1310 static void ia32_after_ra_walker(ir_node *block, void *env)
1312 ir_node *node, *prev;
1313 ia32_code_gen_t *cg = env;
1315 /* beware: the schedule is changed here */
1316 for (node = sched_last(block); !sched_is_begin(node); node = prev) {
1317 prev = sched_prev(node);
1319 if (be_is_Reload(node)) {
1320 transform_to_Load(cg, node);
1321 } else if (be_is_Spill(node)) {
1322 transform_to_Store(cg, node);
1323 } else if (be_is_MemPerm(node)) {
1324 transform_MemPerm(cg, node);
1330 * Collects nodes that need frame entities assigned.
1332 static void ia32_collect_frame_entity_nodes(ir_node *node, void *data)
1334 be_fec_env_t *env = data;
1335 const ir_mode *mode;
1338 if (be_is_Reload(node) && be_get_frame_entity(node) == NULL) {
1339 mode = get_spill_mode_mode(get_irn_mode(node));
1340 align = get_mode_size_bytes(mode);
1341 } else if (is_ia32_irn(node) &&
1342 get_ia32_frame_ent(node) == NULL &&
1343 is_ia32_use_frame(node)) {
1344 if (is_ia32_need_stackent(node))
1347 switch (get_ia32_irn_opcode(node)) {
1349 case iro_ia32_Load: {
1350 const ia32_attr_t *attr = get_ia32_attr_const(node);
1352 if (attr->data.need_32bit_stackent) {
1354 } else if (attr->data.need_64bit_stackent) {
1357 mode = get_ia32_ls_mode(node);
1358 if (is_ia32_is_reload(node))
1359 mode = get_spill_mode_mode(mode);
1361 align = get_mode_size_bytes(mode);
1365 case iro_ia32_vfild:
1367 case iro_ia32_xLoad: {
1368 mode = get_ia32_ls_mode(node);
1373 case iro_ia32_FldCW: {
1374 /* although 2 byte would be enough 4 byte performs best */
1382 panic("unexpected frame user while collection frame entity nodes");
1384 case iro_ia32_FnstCW:
1385 case iro_ia32_Store8Bit:
1386 case iro_ia32_Store:
1389 case iro_ia32_vfist:
1390 case iro_ia32_vfisttp:
1392 case iro_ia32_xStore:
1393 case iro_ia32_xStoreSimple:
1400 be_node_needs_frame_entity(env, node, mode, align);
1404 * We transform Spill and Reload here. This needs to be done before
1405 * stack biasing otherwise we would miss the corrected offset for these nodes.
1407 static void ia32_after_ra(void *self)
1409 ia32_code_gen_t *cg = self;
1410 ir_graph *irg = cg->irg;
1411 be_fec_env_t *fec_env = be_new_frame_entity_coalescer(cg->birg);
1413 /* create and coalesce frame entities */
1414 irg_walk_graph(irg, NULL, ia32_collect_frame_entity_nodes, fec_env);
1415 be_assign_entities(fec_env);
1416 be_free_frame_entity_coalescer(fec_env);
1418 irg_block_walk_graph(irg, NULL, ia32_after_ra_walker, cg);
1422 * Last touchups for the graph before emit: x87 simulation to replace the
1423 * virtual with real x87 instructions, creating a block schedule and peephole
1426 static void ia32_finish(void *self)
1428 ia32_code_gen_t *cg = self;
1429 ir_graph *irg = cg->irg;
1431 ia32_finish_irg(irg, cg);
1433 /* we might have to rewrite x87 virtual registers */
1434 if (cg->do_x87_sim) {
1435 x87_simulate_graph(cg->birg);
1438 /* do peephole optimisations */
1439 ia32_peephole_optimization(cg);
1441 /* create block schedule, this also removes empty blocks which might
1442 * produce critical edges */
1443 cg->blk_sched = be_create_block_schedule(irg, cg->birg->exec_freq);
1447 * Emits the code, closes the output file and frees
1448 * the code generator interface.
1450 static void ia32_codegen(void *self)
1452 ia32_code_gen_t *cg = self;
1453 ir_graph *irg = cg->irg;
1455 if (ia32_cg_config.emit_machcode) {
1456 ia32_gen_binary_routine(cg, irg);
1458 ia32_gen_routine(cg, irg);
1461 /* remove it from the isa */
1464 assert(ia32_current_cg == cg);
1465 ia32_current_cg = NULL;
1467 /* de-allocate code generator */
1472 * Returns the node representing the PIC base.
1474 static ir_node *ia32_get_pic_base(void *self)
1477 ia32_code_gen_t *cg = self;
1478 ir_node *get_eip = cg->get_eip;
1479 if (get_eip != NULL)
1482 block = get_irg_start_block(cg->irg);
1483 get_eip = new_bd_ia32_GetEIP(NULL, block);
1484 cg->get_eip = get_eip;
1486 be_dep_on_frame(get_eip);
1490 static void *ia32_cg_init(be_irg_t *birg);
1492 static const arch_code_generator_if_t ia32_code_gen_if = {
1494 ia32_get_pic_base, /* return node used as base in pic code addresses */
1495 ia32_before_abi, /* before abi introduce hook */
1498 ia32_before_ra, /* before register allocation hook */
1499 ia32_after_ra, /* after register allocation hook */
1500 ia32_finish, /* called before codegen */
1501 ia32_codegen /* emit && done */
1505 * Initializes a IA32 code generator.
1507 static void *ia32_cg_init(be_irg_t *birg)
1509 ia32_isa_t *isa = (ia32_isa_t *)birg->main_env->arch_env;
1510 ia32_code_gen_t *cg = XMALLOCZ(ia32_code_gen_t);
1512 cg->impl = &ia32_code_gen_if;
1513 cg->irg = birg->irg;
1516 cg->blk_sched = NULL;
1517 cg->dump = (birg->main_env->options->dump_flags & DUMP_BE) ? 1 : 0;
1518 cg->gprof = (birg->main_env->options->gprof) ? 1 : 0;
1521 /* Linux gprof implementation needs base pointer */
1522 birg->main_env->options->omit_fp = 0;
1529 if (isa->name_obst) {
1530 obstack_free(isa->name_obst, NULL);
1531 obstack_init(isa->name_obst);
1535 assert(ia32_current_cg == NULL);
1536 ia32_current_cg = cg;
1538 return (arch_code_generator_t *)cg;
1543 /*****************************************************************
1544 * ____ _ _ _____ _____
1545 * | _ \ | | | | |_ _|/ ____| /\
1546 * | |_) | __ _ ___| | _____ _ __ __| | | | | (___ / \
1547 * | _ < / _` |/ __| |/ / _ \ '_ \ / _` | | | \___ \ / /\ \
1548 * | |_) | (_| | (__| < __/ | | | (_| | _| |_ ____) / ____ \
1549 * |____/ \__,_|\___|_|\_\___|_| |_|\__,_| |_____|_____/_/ \_\
1551 *****************************************************************/
1554 * Set output modes for GCC
1556 static const tarval_mode_info mo_integer = {
1563 * set the tarval output mode of all integer modes to decimal
1565 static void set_tarval_output_modes(void)
1569 for (i = get_irp_n_modes() - 1; i >= 0; --i) {
1570 ir_mode *mode = get_irp_mode(i);
1572 if (mode_is_int(mode))
1573 set_tarval_mode_output_option(mode, &mo_integer);
1577 const arch_isa_if_t ia32_isa_if;
1580 * The template that generates a new ISA object.
1581 * Note that this template can be changed by command line
1584 static ia32_isa_t ia32_isa_template = {
1586 &ia32_isa_if, /* isa interface implementation */
1587 &ia32_gp_regs[REG_ESP], /* stack pointer register */
1588 &ia32_gp_regs[REG_EBP], /* base pointer register */
1589 &ia32_reg_classes[CLASS_ia32_gp], /* static link pointer register class */
1590 -1, /* stack direction */
1591 2, /* power of two stack alignment, 2^2 == 4 */
1592 NULL, /* main environment */
1593 7, /* costs for a spill instruction */
1594 5, /* costs for a reload instruction */
1596 NULL, /* 16bit register names */
1597 NULL, /* 8bit register names */
1598 NULL, /* 8bit register names high */
1601 NULL, /* current code generator */
1602 NULL, /* abstract machine */
1604 NULL, /* name obstack */
1608 static void init_asm_constraints(void)
1610 be_init_default_asm_constraint_flags();
1612 asm_constraint_flags['a'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1613 asm_constraint_flags['b'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1614 asm_constraint_flags['c'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1615 asm_constraint_flags['d'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1616 asm_constraint_flags['D'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1617 asm_constraint_flags['S'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1618 asm_constraint_flags['Q'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1619 asm_constraint_flags['q'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1620 asm_constraint_flags['A'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1621 asm_constraint_flags['l'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1622 asm_constraint_flags['R'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1623 asm_constraint_flags['r'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1624 asm_constraint_flags['p'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1625 asm_constraint_flags['f'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1626 asm_constraint_flags['t'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1627 asm_constraint_flags['u'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1628 asm_constraint_flags['Y'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1629 asm_constraint_flags['X'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1630 asm_constraint_flags['n'] = ASM_CONSTRAINT_FLAG_SUPPORTS_IMMEDIATE;
1631 asm_constraint_flags['g'] = ASM_CONSTRAINT_FLAG_SUPPORTS_IMMEDIATE;
1633 /* no support for autodecrement/autoincrement */
1634 asm_constraint_flags['<'] = ASM_CONSTRAINT_FLAG_NO_SUPPORT;
1635 asm_constraint_flags['>'] = ASM_CONSTRAINT_FLAG_NO_SUPPORT;
1636 /* no float consts */
1637 asm_constraint_flags['E'] = ASM_CONSTRAINT_FLAG_NO_SUPPORT;
1638 asm_constraint_flags['F'] = ASM_CONSTRAINT_FLAG_NO_SUPPORT;
1639 /* makes no sense on x86 */
1640 asm_constraint_flags['s'] = ASM_CONSTRAINT_FLAG_NO_SUPPORT;
1641 /* no support for sse consts yet */
1642 asm_constraint_flags['C'] = ASM_CONSTRAINT_FLAG_NO_SUPPORT;
1643 /* no support for x87 consts yet */
1644 asm_constraint_flags['G'] = ASM_CONSTRAINT_FLAG_NO_SUPPORT;
1645 /* no support for mmx registers yet */
1646 asm_constraint_flags['y'] = ASM_CONSTRAINT_FLAG_NO_SUPPORT;
1647 /* not available in 32bit mode */
1648 asm_constraint_flags['Z'] = ASM_CONSTRAINT_FLAG_NO_SUPPORT;
1649 asm_constraint_flags['e'] = ASM_CONSTRAINT_FLAG_NO_SUPPORT;
1651 /* no code yet to determine register class needed... */
1652 asm_constraint_flags['X'] = ASM_CONSTRAINT_FLAG_NO_SUPPORT;
1656 * Initializes the backend ISA.
1658 static arch_env_t *ia32_init(FILE *file_handle)
1660 static int inited = 0;
1668 set_tarval_output_modes();
1670 isa = XMALLOC(ia32_isa_t);
1671 memcpy(isa, &ia32_isa_template, sizeof(*isa));
1673 if(mode_fpcw == NULL) {
1674 mode_fpcw = new_ir_mode("Fpcw", irms_int_number, 16, 0, irma_none, 0);
1677 ia32_register_init();
1678 ia32_create_opcodes(&ia32_irn_ops);
1679 /* special handling for SwitchJmp */
1680 op_ia32_SwitchJmp->ops.be_ops = &ia32_SwitchJmp_irn_ops;
1682 be_emit_init(file_handle);
1683 isa->regs_16bit = pmap_create();
1684 isa->regs_8bit = pmap_create();
1685 isa->regs_8bit_high = pmap_create();
1686 isa->types = pmap_create();
1687 isa->tv_ent = pmap_create();
1688 isa->cpu = ia32_init_machine_description();
1690 ia32_build_16bit_reg_map(isa->regs_16bit);
1691 ia32_build_8bit_reg_map(isa->regs_8bit);
1692 ia32_build_8bit_reg_map_high(isa->regs_8bit_high);
1695 isa->name_obst = XMALLOC(struct obstack);
1696 obstack_init(isa->name_obst);
1699 /* enter the ISA object into the intrinsic environment */
1700 intrinsic_env.isa = isa;
1702 /* emit asm includes */
1703 n = get_irp_n_asms();
1704 for (i = 0; i < n; ++i) {
1705 be_emit_cstring("#APP\n");
1706 be_emit_ident(get_irp_asm(i));
1707 be_emit_cstring("\n#NO_APP\n");
1710 /* needed for the debug support */
1711 be_gas_emit_switch_section(GAS_SECTION_TEXT);
1712 be_emit_cstring(".Ltext0:\n");
1713 be_emit_write_line();
1715 /* we mark referenced global entities, so we can only emit those which
1716 * are actually referenced. (Note: you mustn't use the type visited flag
1717 * elsewhere in the backend)
1719 inc_master_type_visited();
1721 return &isa->arch_env;
1727 * Closes the output file and frees the ISA structure.
1729 static void ia32_done(void *self)
1731 ia32_isa_t *isa = self;
1733 /* emit now all global declarations */
1734 be_gas_emit_decls(isa->arch_env.main_env, 1);
1736 pmap_destroy(isa->regs_16bit);
1737 pmap_destroy(isa->regs_8bit);
1738 pmap_destroy(isa->regs_8bit_high);
1739 pmap_destroy(isa->tv_ent);
1740 pmap_destroy(isa->types);
1743 obstack_free(isa->name_obst, NULL);
1753 * Return the number of register classes for this architecture.
1754 * We report always these:
1755 * - the general purpose registers
1756 * - the SSE floating point register set
1757 * - the virtual floating point registers
1758 * - the SSE vector register set
1760 static unsigned ia32_get_n_reg_class(void)
1766 * Return the register class for index i.
1768 static const arch_register_class_t *ia32_get_reg_class(unsigned i)
1770 assert(i < N_CLASSES);
1771 return &ia32_reg_classes[i];
1775 * Get the register class which shall be used to store a value of a given mode.
1776 * @param self The this pointer.
1777 * @param mode The mode in question.
1778 * @return A register class which can hold values of the given mode.
1780 const arch_register_class_t *ia32_get_reg_class_for_mode(const ir_mode *mode)
1782 if (mode_is_float(mode)) {
1783 return ia32_cg_config.use_sse2 ? &ia32_reg_classes[CLASS_ia32_xmm] : &ia32_reg_classes[CLASS_ia32_vfp];
1786 return &ia32_reg_classes[CLASS_ia32_gp];
1790 * Returns the register for parameter nr.
1792 static const arch_register_t *ia32_get_RegParam_reg(unsigned cc, unsigned nr,
1793 const ir_mode *mode)
1795 static const arch_register_t *gpreg_param_reg_fastcall[] = {
1796 &ia32_gp_regs[REG_ECX],
1797 &ia32_gp_regs[REG_EDX],
1800 static const unsigned MAXNUM_GPREG_ARGS = 3;
1802 static const arch_register_t *gpreg_param_reg_regparam[] = {
1803 &ia32_gp_regs[REG_EAX],
1804 &ia32_gp_regs[REG_EDX],
1805 &ia32_gp_regs[REG_ECX]
1808 static const arch_register_t *gpreg_param_reg_this[] = {
1809 &ia32_gp_regs[REG_ECX],
1814 static const arch_register_t *fpreg_sse_param_reg_std[] = {
1815 &ia32_xmm_regs[REG_XMM0],
1816 &ia32_xmm_regs[REG_XMM1],
1817 &ia32_xmm_regs[REG_XMM2],
1818 &ia32_xmm_regs[REG_XMM3],
1819 &ia32_xmm_regs[REG_XMM4],
1820 &ia32_xmm_regs[REG_XMM5],
1821 &ia32_xmm_regs[REG_XMM6],
1822 &ia32_xmm_regs[REG_XMM7]
1825 static const arch_register_t *fpreg_sse_param_reg_this[] = {
1826 NULL, /* in case of a "this" pointer, the first parameter must not be a float */
1828 static const unsigned MAXNUM_SSE_ARGS = 8;
1830 if ((cc & cc_this_call) && nr == 0)
1831 return gpreg_param_reg_this[0];
1833 if (! (cc & cc_reg_param))
1836 if (mode_is_float(mode)) {
1837 if (!ia32_cg_config.use_sse2 || (cc & cc_fpreg_param) == 0)
1839 if (nr >= MAXNUM_SSE_ARGS)
1842 if (cc & cc_this_call) {
1843 return fpreg_sse_param_reg_this[nr];
1845 return fpreg_sse_param_reg_std[nr];
1846 } else if (mode_is_int(mode) || mode_is_reference(mode)) {
1847 unsigned num_regparam;
1849 if (get_mode_size_bits(mode) > 32)
1852 if (nr >= MAXNUM_GPREG_ARGS)
1855 if (cc & cc_this_call) {
1856 return gpreg_param_reg_this[nr];
1858 num_regparam = cc & ~cc_bits;
1859 if (num_regparam == 0) {
1860 /* default fastcall */
1861 return gpreg_param_reg_fastcall[nr];
1863 if (nr < num_regparam)
1864 return gpreg_param_reg_regparam[nr];
1868 panic("unknown argument mode");
1872 * Get the ABI restrictions for procedure calls.
1873 * @param self The this pointer.
1874 * @param method_type The type of the method (procedure) in question.
1875 * @param abi The abi object to be modified
1877 static void ia32_get_call_abi(const void *self, ir_type *method_type,
1885 be_abi_call_flags_t call_flags = be_abi_call_get_flags(abi);
1889 /* set abi flags for calls */
1890 call_flags.bits.left_to_right = 0; /* always last arg first on stack */
1891 call_flags.bits.store_args_sequential = 0;
1892 /* call_flags.bits.try_omit_fp not changed: can handle both settings */
1893 call_flags.bits.fp_free = 0; /* the frame pointer is fixed in IA32 */
1894 call_flags.bits.call_has_imm = 0; /* No call immediate, we handle this by ourselves */
1896 /* set parameter passing style */
1897 be_abi_call_set_flags(abi, call_flags, &ia32_abi_callbacks);
1899 cc = get_method_calling_convention(method_type);
1900 if (get_method_variadicity(method_type) == variadicity_variadic) {
1901 /* pass all parameters of a variadic function on the stack */
1902 cc = cc_cdecl_set | (cc & cc_this_call);
1904 if (get_method_additional_properties(method_type) & mtp_property_private &&
1905 ia32_cg_config.optimize_cc) {
1906 /* set the fast calling conventions (allowing up to 3) */
1907 cc = SET_FASTCALL(cc) | 3;
1911 /* we have to pop the shadow parameter ourself for compound calls */
1912 if ( (get_method_calling_convention(method_type) & cc_compound_ret)
1913 && !(cc & cc_reg_param)) {
1914 pop_amount += get_mode_size_bytes(mode_P_data);
1917 n = get_method_n_params(method_type);
1918 for (i = regnum = 0; i < n; i++) {
1920 const arch_register_t *reg = NULL;
1922 tp = get_method_param_type(method_type, i);
1923 mode = get_type_mode(tp);
1925 reg = ia32_get_RegParam_reg(cc, regnum, mode);
1928 be_abi_call_param_reg(abi, i, reg);
1931 /* Micro optimisation: if the mode is shorter than 4 bytes, load 4 bytes.
1932 * movl has a shorter opcode than mov[sz][bw]l */
1933 ir_mode *load_mode = mode;
1936 unsigned size = get_mode_size_bytes(mode);
1938 if (cc & cc_callee_clear_stk) {
1939 pop_amount += (size + 3U) & ~3U;
1942 if (size < 4) load_mode = mode_Iu;
1945 be_abi_call_param_stack(abi, i, load_mode, 4, 0, 0);
1949 be_abi_call_set_pop(abi, pop_amount);
1951 /* set return registers */
1952 n = get_method_n_ress(method_type);
1954 assert(n <= 2 && "more than two results not supported");
1956 /* In case of 64bit returns, we will have two 32bit values */
1958 tp = get_method_res_type(method_type, 0);
1959 mode = get_type_mode(tp);
1961 assert(!mode_is_float(mode) && "two FP results not supported");
1963 tp = get_method_res_type(method_type, 1);
1964 mode = get_type_mode(tp);
1966 assert(!mode_is_float(mode) && "mixed INT, FP results not supported");
1968 be_abi_call_res_reg(abi, 0, &ia32_gp_regs[REG_EAX]);
1969 be_abi_call_res_reg(abi, 1, &ia32_gp_regs[REG_EDX]);
1972 const arch_register_t *reg;
1974 tp = get_method_res_type(method_type, 0);
1975 assert(is_atomic_type(tp));
1976 mode = get_type_mode(tp);
1978 reg = mode_is_float(mode) ? &ia32_vfp_regs[REG_VF0] : &ia32_gp_regs[REG_EAX];
1980 be_abi_call_res_reg(abi, 0, reg);
1984 int ia32_to_appear_in_schedule(void *block_env, const ir_node *irn)
1988 if(!is_ia32_irn(irn)) {
1992 if(is_ia32_NoReg_GP(irn) || is_ia32_NoReg_VFP(irn) || is_ia32_NoReg_XMM(irn)
1993 || is_ia32_Unknown_GP(irn) || is_ia32_Unknown_XMM(irn)
1994 || is_ia32_Unknown_VFP(irn) || is_ia32_ChangeCW(irn)
1995 || is_ia32_Immediate(irn))
2002 * Initializes the code generator interface.
2004 static const arch_code_generator_if_t *ia32_get_code_generator_if(void *self)
2007 return &ia32_code_gen_if;
2011 * Returns the estimated execution time of an ia32 irn.
2013 static sched_timestep_t ia32_sched_exectime(void *env, const ir_node *irn)
2016 return is_ia32_irn(irn) ? ia32_get_op_estimated_cost(irn) : 1;
2019 list_sched_selector_t ia32_sched_selector;
2022 * Returns the reg_pressure scheduler with to_appear_in_schedule() overloaded
2024 static const list_sched_selector_t *ia32_get_list_sched_selector(
2025 const void *self, list_sched_selector_t *selector)
2028 memcpy(&ia32_sched_selector, selector, sizeof(ia32_sched_selector));
2029 ia32_sched_selector.exectime = ia32_sched_exectime;
2030 ia32_sched_selector.to_appear_in_schedule = ia32_to_appear_in_schedule;
2031 return &ia32_sched_selector;
2034 static const ilp_sched_selector_t *ia32_get_ilp_sched_selector(const void *self)
2041 * Returns the necessary byte alignment for storing a register of given class.
2043 static int ia32_get_reg_class_alignment(const arch_register_class_t *cls)
2045 ir_mode *mode = arch_register_class_mode(cls);
2046 int bytes = get_mode_size_bytes(mode);
2048 if (mode_is_float(mode) && bytes > 8)
2053 static const be_execution_unit_t ***ia32_get_allowed_execution_units(
2056 static const be_execution_unit_t *_allowed_units_BRANCH[] = {
2057 &ia32_execution_units_BRANCH[IA32_EXECUNIT_TP_BRANCH_BRANCH1],
2058 &ia32_execution_units_BRANCH[IA32_EXECUNIT_TP_BRANCH_BRANCH2],
2061 static const be_execution_unit_t *_allowed_units_GP[] = {
2062 &ia32_execution_units_GP[IA32_EXECUNIT_TP_GP_GP_EAX],
2063 &ia32_execution_units_GP[IA32_EXECUNIT_TP_GP_GP_EBX],
2064 &ia32_execution_units_GP[IA32_EXECUNIT_TP_GP_GP_ECX],
2065 &ia32_execution_units_GP[IA32_EXECUNIT_TP_GP_GP_EDX],
2066 &ia32_execution_units_GP[IA32_EXECUNIT_TP_GP_GP_ESI],
2067 &ia32_execution_units_GP[IA32_EXECUNIT_TP_GP_GP_EDI],
2068 &ia32_execution_units_GP[IA32_EXECUNIT_TP_GP_GP_EBP],
2071 static const be_execution_unit_t *_allowed_units_DUMMY[] = {
2072 &be_machine_execution_units_DUMMY[0],
2075 static const be_execution_unit_t **_units_callret[] = {
2076 _allowed_units_BRANCH,
2079 static const be_execution_unit_t **_units_other[] = {
2083 static const be_execution_unit_t **_units_dummy[] = {
2084 _allowed_units_DUMMY,
2087 const be_execution_unit_t ***ret;
2089 if (is_ia32_irn(irn)) {
2090 ret = get_ia32_exec_units(irn);
2091 } else if (is_be_node(irn)) {
2092 if (be_is_Return(irn)) {
2093 ret = _units_callret;
2094 } else if (be_is_Barrier(irn)) {
2108 * Return the abstract ia32 machine.
2110 static const be_machine_t *ia32_get_machine(const void *self)
2112 const ia32_isa_t *isa = self;
2117 * Return irp irgs in the desired order.
2119 static ir_graph **ia32_get_irg_list(const void *self, ir_graph ***irg_list)
2126 static void ia32_mark_remat(ir_node *node)
2128 if (is_ia32_irn(node)) {
2129 set_ia32_is_remat(node);
2134 * Check if Mux(sel, t, f) would represent an Abs (or -Abs).
2136 static bool mux_is_abs(ir_node *sel, ir_node *mux_true, ir_node *mux_false)
2145 cmp = get_Proj_pred(sel);
2149 /* must be <, <=, >=, > */
2150 pnc = get_Proj_proj(sel);
2165 if (!is_negated_value(mux_true, mux_false))
2168 /* must be x cmp 0 */
2169 cmp_right = get_Cmp_right(cmp);
2170 if (!is_Const(cmp_right) || !is_Const_null(cmp_right))
2173 cmp_left = get_Cmp_left(cmp);
2174 if (cmp_left != mux_true && cmp_left != mux_false)
2181 * Check if Mux(sel, mux_true, mux_false) would represent a Max or Min operation
2183 static bool mux_is_float_min_max(ir_node *sel, ir_node *mux_true, ir_node *mux_false)
2192 cmp = get_Proj_pred(sel);
2196 cmp_l = get_Cmp_left(cmp);
2197 cmp_r = get_Cmp_right(cmp);
2198 if (!mode_is_float(get_irn_mode(cmp_l)))
2201 /* check for min/max. They're defined as (C-Semantik):
2202 * min(a, b) = a < b ? a : b
2203 * or min(a, b) = a <= b ? a : b
2204 * max(a, b) = a > b ? a : b
2205 * or max(a, b) = a >= b ? a : b
2206 * (Note we only handle float min/max here)
2208 pnc = get_Proj_proj(sel);
2213 if (cmp_l == mux_true && cmp_r == mux_false)
2219 if (cmp_l == mux_true && cmp_r == mux_false)
2225 if (cmp_l == mux_false && cmp_r == mux_true)
2231 if (cmp_l == mux_false && cmp_r == mux_true)
2242 static bool mux_is_set(ir_node *sel, ir_node *mux_true, ir_node *mux_false)
2245 ir_mode *mode = get_irn_mode(mux_true);
2247 if (!mode_is_int(mode) && !mode_is_reference(mode))
2250 if (is_Const(mux_true) && is_Const_one(mux_true)
2251 && is_Const(mux_false) && is_Const_null(mux_false)) {
2254 if (is_Const(mux_true) && is_Const_null(mux_true)
2255 && is_Const(mux_false) && is_Const_one(mux_false)) {
2262 static bool mux_is_float_const_const(ir_node *sel, ir_node *mux_true, ir_node *mux_false)
2266 if (!mode_is_float(get_irn_mode(mux_true)))
2269 return is_Const(mux_true) && is_Const(mux_false);
2272 static bool mux_is_doz(ir_node *sel, ir_node *mux_true, ir_node *mux_false)
2283 cmp = get_Proj_pred(sel);
2287 cmp_left = get_Cmp_left(cmp);
2288 cmp_right = get_Cmp_right(cmp);
2289 mode = get_irn_mode(mux_true);
2290 pn = get_Proj_proj(sel);
2291 if ((pn & pn_Cmp_Gt) && !mode_is_signed(mode) &&
2292 is_Const(mux_false) && is_Const_null(mux_false) && is_Sub(mux_true) &&
2293 get_Sub_left(mux_true) == cmp_left &&
2294 get_Sub_right(mux_true) == cmp_right) {
2295 /* Mux(a >=u b, a - b, 0) unsigned Doz */
2298 if ((pn & pn_Cmp_Lt) && !mode_is_signed(mode) &&
2299 is_Const(mux_true) && is_Const_null(mux_true) && is_Sub(mux_false) &&
2300 get_Sub_left(mux_false) == cmp_left &&
2301 get_Sub_right(mux_false) == cmp_right) {
2302 /* Mux(a <=u b, 0, a - b) unsigned Doz */
2309 static int ia32_is_mux_allowed(ir_node *sel, ir_node *mux_false,
2314 /* we can handle Abs for all modes and compares */
2315 if (mux_is_abs(sel, mux_true, mux_false))
2317 /* we can handle Set for all modes and compares */
2318 if (mux_is_set(sel, mux_true, mux_false))
2320 /* SSE has own min/max operations */
2321 if (ia32_cg_config.use_sse2
2322 && mux_is_float_min_max(sel, mux_true, mux_false))
2324 /* we can handle Mux(?, Const[f], Const[f]) */
2325 if (mux_is_float_const_const(sel, mux_true, mux_false)) {
2326 #ifdef FIRM_GRGEN_BE
2327 /* well, some code selectors can't handle it */
2328 if (be_transformer != TRANSFORMER_PBQP
2329 || be_transformer != TRANSFORMER_RAND)
2336 /* no support for 64bit inputs to cmov */
2337 mode = get_irn_mode(mux_true);
2338 if (get_mode_size_bits(mode) > 32)
2341 if (mux_is_doz(sel, mux_true, mux_false))
2344 /* Check Cmp before the node */
2346 ir_node *cmp = get_Proj_pred(sel);
2348 ir_mode *cmp_mode = get_irn_mode(get_Cmp_left(cmp));
2350 /* we can't handle 64bit compares */
2351 if (get_mode_size_bits(cmp_mode) > 32)
2354 /* we can't handle float compares */
2355 if (mode_is_float(cmp_mode))
2360 /* did we disable cmov generation? */
2361 if (!ia32_cg_config.use_cmov)
2364 /* we can use a cmov */
2368 static asm_constraint_flags_t ia32_parse_asm_constraint(const char **c)
2372 /* we already added all our simple flags to the flags modifier list in
2373 * init, so this flag we don't know. */
2374 return ASM_CONSTRAINT_FLAG_INVALID;
2377 static int ia32_is_valid_clobber(const char *clobber)
2379 return ia32_get_clobber_register(clobber) != NULL;
2383 * Create the trampoline code.
2385 static ir_node *ia32_create_trampoline_fkt(ir_node *block, ir_node *mem, ir_node *trampoline, ir_node *env, ir_node *callee)
2387 ir_node *st, *p = trampoline;
2388 ir_mode *mode = get_irn_mode(p);
2391 st = new_r_Store(block, mem, p, new_Const_long(mode_Bu, 0xb9), 0);
2392 mem = new_r_Proj(block, st, mode_M, pn_Store_M);
2393 p = new_r_Add(block, p, new_Const_long(mode_Iu, 1), mode);
2394 st = new_r_Store(block, mem, p, env, 0);
2395 mem = new_r_Proj(block, st, mode_M, pn_Store_M);
2396 p = new_r_Add(block, p, new_Const_long(mode_Iu, 4), mode);
2398 st = new_r_Store(block, mem, p, new_Const_long(mode_Bu, 0xe9), 0);
2399 mem = new_r_Proj(block, st, mode_M, pn_Store_M);
2400 p = new_r_Add(block, p, new_Const_long(mode_Iu, 1), mode);
2401 st = new_r_Store(block, mem, p, callee, 0);
2402 mem = new_r_Proj(block, st, mode_M, pn_Store_M);
2403 p = new_r_Add(block, p, new_Const_long(mode_Iu, 4), mode);
2409 * Returns the libFirm configuration parameter for this backend.
2411 static const backend_params *ia32_get_libfirm_params(void)
2413 static const ir_settings_if_conv_t ifconv = {
2414 4, /* maxdepth, doesn't matter for Mux-conversion */
2415 ia32_is_mux_allowed /* allows or disallows Mux creation for given selector */
2417 static const ir_settings_arch_dep_t ad = {
2418 1, /* also use subs */
2419 4, /* maximum shifts */
2420 31, /* maximum shift amount */
2421 ia32_evaluate_insn, /* evaluate the instruction sequence */
2423 1, /* allow Mulhs */
2424 1, /* allow Mulus */
2425 32, /* Mulh allowed up to 32 bit */
2427 static backend_params p = {
2428 1, /* need dword lowering */
2429 1, /* support inline assembly */
2430 NULL, /* will be set later */
2431 ia32_create_intrinsic_fkt,
2432 &intrinsic_env, /* context for ia32_create_intrinsic_fkt */
2433 NULL, /* ifconv info will be set below */
2434 NULL, /* float arithmetic mode, will be set below */
2435 12, /* size of trampoline code */
2436 4, /* alignment of trampoline code */
2437 ia32_create_trampoline_fkt,
2438 4 /* alignment of stack parameter */
2441 ia32_setup_cg_config();
2443 /* doesn't really belong here, but this is the earliest place the backend
2445 init_asm_constraints();
2448 p.if_conv_info = &ifconv;
2449 if (! ia32_cg_config.use_sse2)
2450 p.mode_float_arithmetic = mode_E;
2454 static const lc_opt_enum_int_items_t gas_items[] = {
2455 { "elf", GAS_FLAVOUR_ELF },
2456 { "mingw", GAS_FLAVOUR_MINGW },
2457 { "yasm", GAS_FLAVOUR_YASM },
2458 { "macho", GAS_FLAVOUR_MACH_O },
2462 static lc_opt_enum_int_var_t gas_var = {
2463 (int*) &be_gas_flavour, gas_items
2466 #ifdef FIRM_GRGEN_BE
2467 static const lc_opt_enum_int_items_t transformer_items[] = {
2468 { "default", TRANSFORMER_DEFAULT },
2469 { "pbqp", TRANSFORMER_PBQP },
2470 { "random", TRANSFORMER_RAND },
2474 static lc_opt_enum_int_var_t transformer_var = {
2475 (int*)&be_transformer, transformer_items
2479 static const lc_opt_table_entry_t ia32_options[] = {
2480 LC_OPT_ENT_ENUM_INT("gasmode", "set the GAS compatibility mode", &gas_var),
2481 #ifdef FIRM_GRGEN_BE
2482 LC_OPT_ENT_ENUM_INT("transformer", "the transformer used for code selection", &transformer_var),
2484 LC_OPT_ENT_INT("stackalign", "set power of two stack alignment for calls",
2485 &ia32_isa_template.arch_env.stack_alignment),
2489 const arch_isa_if_t ia32_isa_if = {
2492 ia32_handle_intrinsics,
2493 ia32_get_n_reg_class,
2495 ia32_get_reg_class_for_mode,
2497 ia32_get_code_generator_if,
2498 ia32_get_list_sched_selector,
2499 ia32_get_ilp_sched_selector,
2500 ia32_get_reg_class_alignment,
2501 ia32_get_libfirm_params,
2502 ia32_get_allowed_execution_units,
2506 ia32_parse_asm_constraint,
2507 ia32_is_valid_clobber
2510 void be_init_arch_ia32(void)
2512 lc_opt_entry_t *be_grp = lc_opt_get_grp(firm_opt_get_root(), "be");
2513 lc_opt_entry_t *ia32_grp = lc_opt_get_grp(be_grp, "ia32");
2515 lc_opt_add_table(ia32_grp, ia32_options);
2516 be_register_isa_if("ia32", &ia32_isa_if);
2518 FIRM_DBG_REGISTER(dbg, "firm.be.ia32.cg");
2520 ia32_init_emitter();
2522 ia32_init_optimize();
2523 ia32_init_transform();
2525 ia32_init_architecture();
2528 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_arch_ia32);