2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief This is the main ia32 firm backend driver.
23 * @author Christian Wuerdig
29 #include "lc_opts_enum.h"
33 #include "pseudo_irg.h"
38 #include "iredges_t.h"
51 #include "iroptimize.h"
52 #include "instrument.h"
55 #include "../beirg_t.h"
56 #include "../benode_t.h"
57 #include "../belower.h"
58 #include "../besched_t.h"
61 #include "../beirgmod.h"
62 #include "../be_dbgout.h"
63 #include "../beblocksched.h"
64 #include "../bemachine.h"
65 #include "../beilpsched.h"
66 #include "../bespillslots.h"
67 #include "../bemodule.h"
68 #include "../begnuas.h"
69 #include "../bestate.h"
70 #include "../beflags.h"
71 #include "../betranshlp.h"
73 #include "bearch_ia32_t.h"
75 #include "ia32_new_nodes.h"
76 #include "gen_ia32_regalloc_if.h"
77 #include "gen_ia32_machine.h"
78 #include "ia32_common_transform.h"
79 #include "ia32_transform.h"
80 #include "ia32_emitter.h"
81 #include "ia32_map_regs.h"
82 #include "ia32_optimize.h"
84 #include "ia32_dbg_stat.h"
85 #include "ia32_finish.h"
86 #include "ia32_util.h"
88 #include "ia32_architecture.h"
91 #include "ia32_pbqp_transform.h"
93 transformer_t be_transformer = TRANSFORMER_DEFAULT;
96 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
99 static set *cur_reg_set = NULL;
101 ir_mode *mode_fpcw = NULL;
102 ia32_code_gen_t *ia32_current_cg = NULL;
105 * The environment for the intrinsic mapping.
107 static ia32_intrinsic_env_t intrinsic_env = {
109 NULL, /* the irg, these entities belong to */
110 NULL, /* entity for first div operand (move into FPU) */
111 NULL, /* entity for second div operand (move into FPU) */
112 NULL, /* entity for converts ll -> d */
113 NULL, /* entity for converts d -> ll */
114 NULL, /* entity for __divdi3 library call */
115 NULL, /* entity for __moddi3 library call */
116 NULL, /* entity for __udivdi3 library call */
117 NULL, /* entity for __umoddi3 library call */
118 NULL, /* bias value for conversion from float to unsigned 64 */
122 typedef ir_node *(*create_const_node_func) (dbg_info *dbg, ir_node *block);
124 static inline ir_node *create_const(ia32_code_gen_t *cg, ir_node **place,
125 create_const_node_func func,
126 const arch_register_t* reg)
128 ir_node *block, *res;
133 block = get_irg_start_block(cg->irg);
134 res = func(NULL, block);
135 arch_set_irn_register(res, reg);
138 add_irn_dep(get_irg_end(cg->irg), res);
139 /* add_irn_dep(get_irg_start(cg->irg), res); */
144 /* Creates the unique per irg GP NoReg node. */
145 ir_node *ia32_new_NoReg_gp(ia32_code_gen_t *cg) {
146 return create_const(cg, &cg->noreg_gp, new_bd_ia32_NoReg_GP,
147 &ia32_gp_regs[REG_GP_NOREG]);
150 ir_node *ia32_new_NoReg_vfp(ia32_code_gen_t *cg) {
151 return create_const(cg, &cg->noreg_vfp, new_bd_ia32_NoReg_VFP,
152 &ia32_vfp_regs[REG_VFP_NOREG]);
155 ir_node *ia32_new_NoReg_xmm(ia32_code_gen_t *cg) {
156 return create_const(cg, &cg->noreg_xmm, new_bd_ia32_NoReg_XMM,
157 &ia32_xmm_regs[REG_XMM_NOREG]);
160 ir_node *ia32_new_Unknown_gp(ia32_code_gen_t *cg) {
161 return create_const(cg, &cg->unknown_gp, new_bd_ia32_Unknown_GP,
162 &ia32_gp_regs[REG_GP_UKNWN]);
165 ir_node *ia32_new_Unknown_vfp(ia32_code_gen_t *cg) {
166 return create_const(cg, &cg->unknown_vfp, new_bd_ia32_Unknown_VFP,
167 &ia32_vfp_regs[REG_VFP_UKNWN]);
170 ir_node *ia32_new_Unknown_xmm(ia32_code_gen_t *cg) {
171 return create_const(cg, &cg->unknown_xmm, new_bd_ia32_Unknown_XMM,
172 &ia32_xmm_regs[REG_XMM_UKNWN]);
175 ir_node *ia32_new_Fpu_truncate(ia32_code_gen_t *cg) {
176 return create_const(cg, &cg->fpu_trunc_mode, new_bd_ia32_ChangeCW,
177 &ia32_fp_cw_regs[REG_FPCW]);
182 * Returns the admissible noreg register node for input register pos of node irn.
184 static ir_node *ia32_get_admissible_noreg(ia32_code_gen_t *cg, ir_node *irn, int pos)
186 const arch_register_req_t *req = arch_get_register_req(irn, pos);
188 assert(req != NULL && "Missing register requirements");
189 if (req->cls == &ia32_reg_classes[CLASS_ia32_gp])
190 return ia32_new_NoReg_gp(cg);
192 if (ia32_cg_config.use_sse2) {
193 return ia32_new_NoReg_xmm(cg);
195 return ia32_new_NoReg_vfp(cg);
199 /**************************************************
202 * _ __ ___ __ _ __ _| | | ___ ___ _| |_
203 * | '__/ _ \/ _` | / _` | | |/ _ \ / __| | | _|
204 * | | | __/ (_| | | (_| | | | (_) | (__ | | |
205 * |_| \___|\__, | \__,_|_|_|\___/ \___| |_|_|
208 **************************************************/
211 * Return register requirements for an ia32 node.
212 * If the node returns a tuple (mode_T) then the proj's
213 * will be asked for this information.
215 static const arch_register_req_t *ia32_get_irn_reg_req(const ir_node *node,
218 ir_mode *mode = get_irn_mode(node);
221 if (mode == mode_X || is_Block(node)) {
222 return arch_no_register_req;
225 if (mode == mode_T && pos < 0) {
226 return arch_no_register_req;
229 node_pos = pos == -1 ? 0 : pos;
231 if (mode == mode_M || pos >= 0) {
232 return arch_no_register_req;
235 node_pos = (pos == -1) ? get_Proj_proj(node) : pos;
236 node = skip_Proj_const(node);
239 if (is_ia32_irn(node)) {
240 const arch_register_req_t *req;
242 req = get_ia32_in_req(node, pos);
244 req = get_ia32_out_req(node, node_pos);
251 /* unknowns should be transformed already */
252 return arch_no_register_req;
255 static arch_irn_class_t ia32_classify(const ir_node *irn) {
256 arch_irn_class_t classification = 0;
258 irn = skip_Proj_const(irn);
261 classification |= arch_irn_class_branch;
263 if (! is_ia32_irn(irn))
264 return classification;
266 if (is_ia32_is_reload(irn))
267 classification |= arch_irn_class_reload;
269 if (is_ia32_is_spill(irn))
270 classification |= arch_irn_class_spill;
272 if (is_ia32_is_remat(irn))
273 classification |= arch_irn_class_remat;
275 return classification;
279 * The IA32 ABI callback object.
282 be_abi_call_flags_bits_t flags; /**< The call flags. */
283 const arch_env_t *aenv; /**< The architecture environment. */
284 ir_graph *irg; /**< The associated graph. */
287 static ir_entity *ia32_get_frame_entity(const ir_node *irn) {
288 return is_ia32_irn(irn) ? get_ia32_frame_ent(irn) : NULL;
291 static void ia32_set_frame_entity(ir_node *irn, ir_entity *ent) {
292 set_ia32_frame_ent(irn, ent);
295 static void ia32_set_frame_offset(ir_node *irn, int bias)
297 if (get_ia32_frame_ent(irn) == NULL)
300 if (is_ia32_Pop(irn) || is_ia32_PopMem(irn)) {
301 ia32_code_gen_t *cg = ia32_current_cg;
302 int omit_fp = be_abi_omit_fp(cg->birg->abi);
304 /* Pop nodes modify the stack pointer before calculating the
305 * destination address, so fix this here
310 add_ia32_am_offs_int(irn, bias);
313 static int ia32_get_sp_bias(const ir_node *node)
315 if (is_ia32_Call(node))
316 return -(int)get_ia32_call_attr_const(node)->pop;
318 if (is_ia32_Push(node))
321 if (is_ia32_Pop(node) || is_ia32_PopMem(node))
328 * Generate the routine prologue.
330 * @param self The callback object.
331 * @param mem A pointer to the mem node. Update this if you define new memory.
332 * @param reg_map A map mapping all callee_save/ignore/parameter registers to their defining nodes.
333 * @param stack_bias Points to the current stack bias, can be modified if needed.
335 * @return The register which shall be used as a stack frame base.
337 * All nodes which define registers in @p reg_map must keep @p reg_map current.
339 static const arch_register_t *ia32_abi_prologue(void *self, ir_node **mem, pmap *reg_map, int *stack_bias)
341 ia32_abi_env_t *env = self;
342 ia32_code_gen_t *cg = ia32_current_cg;
343 const arch_env_t *arch_env = env->aenv;
345 if (! env->flags.try_omit_fp) {
346 ir_graph *irg = env->irg;
347 ir_node *bl = get_irg_start_block(irg);
348 ir_node *curr_sp = be_abi_reg_map_get(reg_map, arch_env->sp);
349 ir_node *curr_bp = be_abi_reg_map_get(reg_map, arch_env->bp);
350 ir_node *noreg = ia32_new_NoReg_gp(cg);
353 /* mark bp register as ignore */
354 be_set_constr_single_reg_out(get_Proj_pred(curr_bp),
355 get_Proj_proj(curr_bp), arch_env->bp, arch_register_req_type_ignore);
358 push = new_bd_ia32_Push(NULL, bl, noreg, noreg, *mem, curr_bp, curr_sp);
359 curr_sp = new_r_Proj(irg, bl, push, get_irn_mode(curr_sp), pn_ia32_Push_stack);
360 *mem = new_r_Proj(irg, bl, push, mode_M, pn_ia32_Push_M);
362 /* the push must have SP out register */
363 arch_set_irn_register(curr_sp, arch_env->sp);
365 /* this modifies the stack bias, because we pushed 32bit */
368 /* move esp to ebp */
369 curr_bp = be_new_Copy(arch_env->bp->reg_class, irg, bl, curr_sp);
370 be_set_constr_single_reg_out(curr_bp, 0, arch_env->bp,
371 arch_register_req_type_ignore);
373 /* beware: the copy must be done before any other sp use */
374 curr_sp = be_new_CopyKeep_single(arch_env->sp->reg_class, irg, bl, curr_sp, curr_bp, get_irn_mode(curr_sp));
375 be_set_constr_single_reg_out(curr_sp, 0, arch_env->sp,
376 arch_register_req_type_produces_sp);
378 be_abi_reg_map_set(reg_map, arch_env->sp, curr_sp);
379 be_abi_reg_map_set(reg_map, arch_env->bp, curr_bp);
388 * Generate the routine epilogue.
389 * @param self The callback object.
390 * @param bl The block for the epilog
391 * @param mem A pointer to the mem node. Update this if you define new memory.
392 * @param reg_map A map mapping all callee_save/ignore/parameter registers to their defining nodes.
393 * @return The register which shall be used as a stack frame base.
395 * All nodes which define registers in @p reg_map must keep @p reg_map current.
397 static void ia32_abi_epilogue(void *self, ir_node *bl, ir_node **mem, pmap *reg_map)
399 ia32_abi_env_t *env = self;
400 const arch_env_t *arch_env = env->aenv;
401 ir_node *curr_sp = be_abi_reg_map_get(reg_map, arch_env->sp);
402 ir_node *curr_bp = be_abi_reg_map_get(reg_map, arch_env->bp);
403 ir_graph *irg = env->irg;
405 if (env->flags.try_omit_fp) {
406 /* simply remove the stack frame here */
407 curr_sp = be_new_IncSP(arch_env->sp, irg, bl, curr_sp, BE_STACK_FRAME_SIZE_SHRINK, 0);
409 ir_mode *mode_bp = arch_env->bp->reg_class->mode;
411 if (ia32_cg_config.use_leave) {
415 leave = new_bd_ia32_Leave(NULL, bl, curr_bp);
416 curr_bp = new_r_Proj(irg, bl, leave, mode_bp, pn_ia32_Leave_frame);
417 curr_sp = new_r_Proj(irg, bl, leave, get_irn_mode(curr_sp), pn_ia32_Leave_stack);
421 /* the old SP is not needed anymore (kill the proj) */
422 assert(is_Proj(curr_sp));
425 /* copy ebp to esp */
426 curr_sp = be_new_Copy(&ia32_reg_classes[CLASS_ia32_gp], irg, bl, curr_bp);
427 arch_set_irn_register(curr_sp, arch_env->sp);
428 be_set_constr_single_reg_out(curr_sp, 0, arch_env->sp,
429 arch_register_req_type_ignore);
432 pop = new_bd_ia32_PopEbp(NULL, bl, *mem, curr_sp);
433 curr_bp = new_r_Proj(irg, bl, pop, mode_bp, pn_ia32_Pop_res);
434 curr_sp = new_r_Proj(irg, bl, pop, get_irn_mode(curr_sp), pn_ia32_Pop_stack);
436 *mem = new_r_Proj(irg, bl, pop, mode_M, pn_ia32_Pop_M);
438 arch_set_irn_register(curr_sp, arch_env->sp);
439 arch_set_irn_register(curr_bp, arch_env->bp);
442 be_abi_reg_map_set(reg_map, arch_env->sp, curr_sp);
443 be_abi_reg_map_set(reg_map, arch_env->bp, curr_bp);
447 * Initialize the callback object.
448 * @param call The call object.
449 * @param aenv The architecture environment.
450 * @param irg The graph with the method.
451 * @return Some pointer. This pointer is passed to all other callback functions as self object.
453 static void *ia32_abi_init(const be_abi_call_t *call, const arch_env_t *aenv, ir_graph *irg)
455 ia32_abi_env_t *env = XMALLOC(ia32_abi_env_t);
456 be_abi_call_flags_t fl = be_abi_call_get_flags(call);
457 env->flags = fl.bits;
464 * Destroy the callback object.
465 * @param self The callback object.
467 static void ia32_abi_done(void *self) {
472 * Produces the type which sits between the stack args and the locals on the stack.
473 * it will contain the return address and space to store the old base pointer.
474 * @return The Firm type modeling the ABI between type.
476 static ir_type *ia32_abi_get_between_type(void *self)
478 #define IDENT(s) new_id_from_chars(s, sizeof(s)-1)
479 static ir_type *omit_fp_between_type = NULL;
480 static ir_type *between_type = NULL;
482 ia32_abi_env_t *env = self;
484 if (! between_type) {
485 ir_entity *old_bp_ent;
486 ir_entity *ret_addr_ent;
487 ir_entity *omit_fp_ret_addr_ent;
489 ir_type *old_bp_type = new_type_primitive(IDENT("bp"), mode_Iu);
490 ir_type *ret_addr_type = new_type_primitive(IDENT("return_addr"), mode_Iu);
492 between_type = new_type_struct(IDENT("ia32_between_type"));
493 old_bp_ent = new_entity(between_type, IDENT("old_bp"), old_bp_type);
494 ret_addr_ent = new_entity(between_type, IDENT("ret_addr"), ret_addr_type);
496 set_entity_offset(old_bp_ent, 0);
497 set_entity_offset(ret_addr_ent, get_type_size_bytes(old_bp_type));
498 set_type_size_bytes(between_type, get_type_size_bytes(old_bp_type) + get_type_size_bytes(ret_addr_type));
499 set_type_state(between_type, layout_fixed);
501 omit_fp_between_type = new_type_struct(IDENT("ia32_between_type_omit_fp"));
502 omit_fp_ret_addr_ent = new_entity(omit_fp_between_type, IDENT("ret_addr"), ret_addr_type);
504 set_entity_offset(omit_fp_ret_addr_ent, 0);
505 set_type_size_bytes(omit_fp_between_type, get_type_size_bytes(ret_addr_type));
506 set_type_state(omit_fp_between_type, layout_fixed);
509 return env->flags.try_omit_fp ? omit_fp_between_type : between_type;
514 * Get the estimated cycle count for @p irn.
516 * @param self The this pointer.
517 * @param irn The node.
519 * @return The estimated cycle count for this operation
521 static int ia32_get_op_estimated_cost(const ir_node *irn)
524 ia32_op_type_t op_tp;
528 if (!is_ia32_irn(irn))
531 assert(is_ia32_irn(irn));
533 cost = get_ia32_latency(irn);
534 op_tp = get_ia32_op_type(irn);
536 if (is_ia32_CopyB(irn)) {
539 else if (is_ia32_CopyB_i(irn)) {
540 int size = get_ia32_copyb_size(irn);
541 cost = 20 + (int)ceil((4/3) * size);
543 /* in case of address mode operations add additional cycles */
544 else if (op_tp == ia32_AddrModeD || op_tp == ia32_AddrModeS) {
546 In case of stack access and access to fixed addresses add 5 cycles
547 (we assume they are in cache), other memory operations cost 20
550 if (is_ia32_use_frame(irn) || (
551 is_ia32_NoReg_GP(get_irn_n(irn, n_ia32_base)) &&
552 is_ia32_NoReg_GP(get_irn_n(irn, n_ia32_index))
564 * Returns the inverse operation if @p irn, recalculating the argument at position @p i.
566 * @param irn The original operation
567 * @param i Index of the argument we want the inverse operation to yield
568 * @param inverse struct to be filled with the resulting inverse op
569 * @param obstack The obstack to use for allocation of the returned nodes array
570 * @return The inverse operation or NULL if operation invertible
572 static arch_inverse_t *ia32_get_inverse(const ir_node *irn, int i, arch_inverse_t *inverse, struct obstack *obst) {
575 ir_node *block, *noreg, *nomem;
578 /* we cannot invert non-ia32 irns */
579 if (! is_ia32_irn(irn))
582 /* operand must always be a real operand (not base, index or mem) */
583 if (i != n_ia32_binary_left && i != n_ia32_binary_right)
586 /* we don't invert address mode operations */
587 if (get_ia32_op_type(irn) != ia32_Normal)
590 /* TODO: adjust for new immediates... */
591 ir_fprintf(stderr, "TODO: fix get_inverse for new immediates (%+F)\n",
595 block = get_nodes_block(irn);
596 mode = get_irn_mode(irn);
597 irn_mode = get_irn_mode(irn);
598 noreg = get_irn_n(irn, 0);
600 dbg = get_irn_dbg_info(irn);
602 /* initialize structure */
603 inverse->nodes = obstack_alloc(obst, 2 * sizeof(inverse->nodes[0]));
607 switch (get_ia32_irn_opcode(irn)) {
610 if (get_ia32_immop_type(irn) == ia32_ImmConst) {
611 /* we have an add with a const here */
612 /* invers == add with negated const */
613 inverse->nodes[0] = new_bd_ia32_Add(dbg, block, noreg, noreg, nomem, get_irn_n(irn, i), noreg);
615 copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
616 set_ia32_Immop_tarval(inverse->nodes[0], tarval_neg(get_ia32_Immop_tarval(irn)));
617 set_ia32_commutative(inverse->nodes[0]);
619 else if (get_ia32_immop_type(irn) == ia32_ImmSymConst) {
620 /* we have an add with a symconst here */
621 /* invers == sub with const */
622 inverse->nodes[0] = new_bd_ia32_Sub(dbg, block, noreg, noreg, nomem, get_irn_n(irn, i), noreg);
624 copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
627 /* normal add: inverse == sub */
628 inverse->nodes[0] = new_bd_ia32_Sub(dbg, block, noreg, noreg, nomem, (ir_node*) irn, get_irn_n(irn, i ^ 1));
635 if (get_ia32_immop_type(irn) != ia32_ImmNone) {
636 /* we have a sub with a const/symconst here */
637 /* invers == add with this const */
638 inverse->nodes[0] = new_bd_ia32_Add(dbg, block, noreg, noreg, nomem, get_irn_n(irn, i), noreg);
639 inverse->costs += (get_ia32_immop_type(irn) == ia32_ImmSymConst) ? 5 : 1;
640 copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
644 if (i == n_ia32_binary_left) {
645 inverse->nodes[0] = new_bd_ia32_Add(dbg, block, noreg, noreg, nomem, (ir_node*) irn, get_irn_n(irn, 3));
648 inverse->nodes[0] = new_bd_ia32_Sub(dbg, block, noreg, noreg, nomem, get_irn_n(irn, n_ia32_binary_left), (ir_node*) irn);
656 if (get_ia32_immop_type(irn) != ia32_ImmNone) {
657 /* xor with const: inverse = xor */
658 inverse->nodes[0] = new_bd_ia32_Xor(dbg, block, noreg, noreg, nomem, get_irn_n(irn, i), noreg);
659 inverse->costs += (get_ia32_immop_type(irn) == ia32_ImmSymConst) ? 5 : 1;
660 copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
664 inverse->nodes[0] = new_bd_ia32_Xor(dbg, block, noreg, noreg, nomem, (ir_node *) irn, get_irn_n(irn, i));
670 inverse->nodes[0] = new_bd_ia32_Not(dbg, block, (ir_node*) irn);
675 inverse->nodes[0] = new_bd_ia32_Neg(dbg, block, (ir_node*) irn);
680 /* inverse operation not supported */
687 static ir_mode *get_spill_mode_mode(const ir_mode *mode)
689 if(mode_is_float(mode))
696 * Get the mode that should be used for spilling value node
698 static ir_mode *get_spill_mode(const ir_node *node)
700 ir_mode *mode = get_irn_mode(node);
701 return get_spill_mode_mode(mode);
705 * Checks whether an addressmode reload for a node with mode mode is compatible
706 * with a spillslot of mode spill_mode
708 static int ia32_is_spillmode_compatible(const ir_mode *mode, const ir_mode *spillmode)
710 return !mode_is_float(mode) || mode == spillmode;
714 * Check if irn can load its operand at position i from memory (source addressmode).
715 * @param irn The irn to be checked
716 * @param i The operands position
717 * @return Non-Zero if operand can be loaded
719 static int ia32_possible_memory_operand(const ir_node *irn, unsigned int i)
721 ir_node *op = get_irn_n(irn, i);
722 const ir_mode *mode = get_irn_mode(op);
723 const ir_mode *spillmode = get_spill_mode(op);
725 if (!is_ia32_irn(irn) || /* must be an ia32 irn */
726 get_ia32_op_type(irn) != ia32_Normal || /* must not already be a addressmode irn */
727 !ia32_is_spillmode_compatible(mode, spillmode) ||
728 is_ia32_use_frame(irn)) /* must not already use frame */
731 switch (get_ia32_am_support(irn)) {
736 if (i != n_ia32_unary_op)
742 case n_ia32_binary_left: {
743 const arch_register_req_t *req;
744 if (!is_ia32_commutative(irn))
747 /* we can't swap left/right for limited registers
748 * (As this (currently) breaks constraint handling copies)
750 req = get_ia32_in_req(irn, n_ia32_binary_left);
751 if (req->type & arch_register_req_type_limited)
756 case n_ia32_binary_right:
765 panic("Unknown AM type");
768 /* HACK: must not already use "real" memory.
769 * This can happen for Call and Div */
770 if (!is_NoMem(get_irn_n(irn, n_ia32_mem)))
776 static void ia32_perform_memory_operand(ir_node *irn, ir_node *spill,
780 ir_mode *dest_op_mode;
782 assert(ia32_possible_memory_operand(irn, i) && "Cannot perform memory operand change");
784 set_ia32_op_type(irn, ia32_AddrModeS);
786 load_mode = get_irn_mode(get_irn_n(irn, i));
787 dest_op_mode = get_ia32_ls_mode(irn);
788 if (get_mode_size_bits(load_mode) <= get_mode_size_bits(dest_op_mode)) {
789 set_ia32_ls_mode(irn, load_mode);
791 set_ia32_use_frame(irn);
792 set_ia32_need_stackent(irn);
794 if (i == n_ia32_binary_left &&
795 get_ia32_am_support(irn) == ia32_am_binary &&
796 /* immediates are only allowed on the right side */
797 !is_ia32_Immediate(get_irn_n(irn, n_ia32_binary_right))) {
798 ia32_swap_left_right(irn);
799 i = n_ia32_binary_right;
802 assert(is_NoMem(get_irn_n(irn, n_ia32_mem)));
804 set_irn_n(irn, n_ia32_base, get_irg_frame(get_irn_irg(irn)));
805 set_irn_n(irn, n_ia32_mem, spill);
806 set_irn_n(irn, i, ia32_get_admissible_noreg(ia32_current_cg, irn, i));
807 set_ia32_is_reload(irn);
810 static const be_abi_callbacks_t ia32_abi_callbacks = {
813 ia32_abi_get_between_type,
818 /* fill register allocator interface */
820 static const arch_irn_ops_t ia32_irn_ops = {
821 ia32_get_irn_reg_req,
823 ia32_get_frame_entity,
824 ia32_set_frame_entity,
825 ia32_set_frame_offset,
828 ia32_get_op_estimated_cost,
829 ia32_possible_memory_operand,
830 ia32_perform_memory_operand,
833 /**************************************************
836 * ___ ___ __| | ___ __ _ ___ _ __ _| |_
837 * / __/ _ \ / _` |/ _ \/ _` |/ _ \ '_ \ | | _|
838 * | (_| (_) | (_| | __/ (_| | __/ | | | | | |
839 * \___\___/ \__,_|\___|\__, |\___|_| |_| |_|_|
842 **************************************************/
844 static ir_entity *mcount = NULL;
846 #define ID(s) new_id_from_chars(s, sizeof(s) - 1)
848 static void ia32_before_abi(void *self) {
849 lower_mode_b_config_t lower_mode_b_config = {
850 mode_Iu, /* lowered mode */
851 mode_Bu, /* preferred mode for set */
852 0, /* don't lower direct compares */
854 ia32_code_gen_t *cg = self;
856 ir_lower_mode_b(cg->irg, &lower_mode_b_config);
858 be_dump(cg->irg, "-lower_modeb", dump_ir_block_graph_sched);
860 if (mcount == NULL) {
861 ir_type *tp = new_type_method(ID("FKT.mcount"), 0, 0);
862 mcount = new_entity(get_glob_type(), ID("mcount"), tp);
863 /* FIXME: enter the right ld_ident here */
864 set_entity_ld_ident(mcount, get_entity_ident(mcount));
865 set_entity_visibility(mcount, visibility_external_allocated);
867 instrument_initcall(cg->irg, mcount);
872 * Transforms the standard firm graph into
875 static void ia32_prepare_graph(void *self) {
876 ia32_code_gen_t *cg = self;
878 /* do local optimizations */
879 optimize_graph_df(cg->irg);
881 /* TODO: we often have dead code reachable through out-edges here. So for
882 * now we rebuild edges (as we need correct user count for code selection)
885 edges_deactivate(cg->irg);
886 edges_activate(cg->irg);
890 be_dump(cg->irg, "-pre_transform", dump_ir_block_graph_sched);
892 switch (be_transformer) {
893 case TRANSFORMER_DEFAULT:
894 /* transform remaining nodes into assembler instructions */
895 ia32_transform_graph(cg);
899 case TRANSFORMER_PBQP:
900 case TRANSFORMER_RAND:
901 /* transform nodes into assembler instructions by PBQP magic */
902 ia32_transform_graph_by_pbqp(cg);
907 panic("invalid transformer");
910 /* do local optimizations (mainly CSE) */
911 optimize_graph_df(cg->irg);
914 be_dump(cg->irg, "-transformed", dump_ir_block_graph_sched);
916 /* optimize address mode */
917 ia32_optimize_graph(cg);
919 /* do code placement, to optimize the position of constants */
923 be_dump(cg->irg, "-place", dump_ir_block_graph_sched);
926 ir_node *turn_back_am(ir_node *node)
928 ir_graph *irg = current_ir_graph;
929 dbg_info *dbgi = get_irn_dbg_info(node);
930 ir_node *block = get_nodes_block(node);
931 ir_node *base = get_irn_n(node, n_ia32_base);
932 ir_node *index = get_irn_n(node, n_ia32_index);
933 ir_node *mem = get_irn_n(node, n_ia32_mem);
936 ir_node *load = new_bd_ia32_Load(dbgi, block, base, index, mem);
937 ir_node *load_res = new_rd_Proj(dbgi, irg, block, load, mode_Iu, pn_ia32_Load_res);
939 ia32_copy_am_attrs(load, node);
940 if (is_ia32_is_reload(node))
941 set_ia32_is_reload(load);
942 set_irn_n(node, n_ia32_mem, new_NoMem());
944 switch (get_ia32_am_support(node)) {
946 set_irn_n(node, n_ia32_unary_op, load_res);
950 if (is_ia32_Immediate(get_irn_n(node, n_ia32_binary_right))) {
951 set_irn_n(node, n_ia32_binary_left, load_res);
953 set_irn_n(node, n_ia32_binary_right, load_res);
958 panic("Unknown AM type");
960 noreg = ia32_new_NoReg_gp(ia32_current_cg);
961 set_irn_n(node, n_ia32_base, noreg);
962 set_irn_n(node, n_ia32_index, noreg);
963 set_ia32_am_offs_int(node, 0);
964 set_ia32_am_sc(node, NULL);
965 set_ia32_am_scale(node, 0);
966 clear_ia32_am_sc_sign(node);
968 /* rewire mem-proj */
969 if (get_irn_mode(node) == mode_T) {
970 const ir_edge_t *edge;
971 foreach_out_edge(node, edge) {
972 ir_node *out = get_edge_src_irn(edge);
973 if (get_irn_mode(out) == mode_M) {
974 set_Proj_pred(out, load);
975 set_Proj_proj(out, pn_ia32_Load_M);
981 set_ia32_op_type(node, ia32_Normal);
982 if (sched_is_scheduled(node))
983 sched_add_before(node, load);
988 static ir_node *flags_remat(ir_node *node, ir_node *after)
990 /* we should turn back source address mode when rematerializing nodes */
995 if (is_Block(after)) {
998 block = get_nodes_block(after);
1001 type = get_ia32_op_type(node);
1003 case ia32_AddrModeS:
1007 case ia32_AddrModeD:
1008 /* TODO implement this later... */
1009 panic("found DestAM with flag user %+F this should not happen", node);
1012 default: assert(type == ia32_Normal); break;
1015 copy = exact_copy(node);
1016 set_nodes_block(copy, block);
1017 sched_add_after(after, copy);
1023 * Called before the register allocator.
1025 static void ia32_before_ra(void *self) {
1026 ia32_code_gen_t *cg = self;
1028 /* setup fpu rounding modes */
1029 ia32_setup_fpu_mode(cg);
1032 be_sched_fix_flags(cg->birg, &ia32_reg_classes[CLASS_ia32_flags],
1035 ia32_add_missing_keeps(cg);
1040 * Transforms a be_Reload into a ia32 Load.
1042 static void transform_to_Load(ia32_code_gen_t *cg, ir_node *node) {
1043 ir_graph *irg = get_irn_irg(node);
1044 dbg_info *dbg = get_irn_dbg_info(node);
1045 ir_node *block = get_nodes_block(node);
1046 ir_entity *ent = be_get_frame_entity(node);
1047 ir_mode *mode = get_irn_mode(node);
1048 ir_mode *spillmode = get_spill_mode(node);
1049 ir_node *noreg = ia32_new_NoReg_gp(cg);
1050 ir_node *sched_point = NULL;
1051 ir_node *ptr = get_irg_frame(irg);
1052 ir_node *mem = get_irn_n(node, be_pos_Reload_mem);
1053 ir_node *new_op, *proj;
1054 const arch_register_t *reg;
1056 if (sched_is_scheduled(node)) {
1057 sched_point = sched_prev(node);
1060 if (mode_is_float(spillmode)) {
1061 if (ia32_cg_config.use_sse2)
1062 new_op = new_bd_ia32_xLoad(dbg, block, ptr, noreg, mem, spillmode);
1064 new_op = new_bd_ia32_vfld(dbg, block, ptr, noreg, mem, spillmode);
1066 else if (get_mode_size_bits(spillmode) == 128) {
1067 /* Reload 128 bit SSE registers */
1068 new_op = new_bd_ia32_xxLoad(dbg, block, ptr, noreg, mem);
1071 new_op = new_bd_ia32_Load(dbg, block, ptr, noreg, mem);
1073 set_ia32_op_type(new_op, ia32_AddrModeS);
1074 set_ia32_ls_mode(new_op, spillmode);
1075 set_ia32_frame_ent(new_op, ent);
1076 set_ia32_use_frame(new_op);
1077 set_ia32_is_reload(new_op);
1079 DBG_OPT_RELOAD2LD(node, new_op);
1081 proj = new_rd_Proj(dbg, irg, block, new_op, mode, pn_ia32_Load_res);
1084 sched_add_after(sched_point, new_op);
1088 /* copy the register from the old node to the new Load */
1089 reg = arch_get_irn_register(node);
1090 arch_set_irn_register(proj, reg);
1092 SET_IA32_ORIG_NODE(new_op, node);
1094 exchange(node, proj);
1098 * Transforms a be_Spill node into a ia32 Store.
1100 static void transform_to_Store(ia32_code_gen_t *cg, ir_node *node) {
1101 ir_graph *irg = get_irn_irg(node);
1102 dbg_info *dbg = get_irn_dbg_info(node);
1103 ir_node *block = get_nodes_block(node);
1104 ir_entity *ent = be_get_frame_entity(node);
1105 const ir_node *spillval = get_irn_n(node, be_pos_Spill_val);
1106 ir_mode *mode = get_spill_mode(spillval);
1107 ir_node *noreg = ia32_new_NoReg_gp(cg);
1108 ir_node *nomem = new_NoMem();
1109 ir_node *ptr = get_irg_frame(irg);
1110 ir_node *val = get_irn_n(node, be_pos_Spill_val);
1112 ir_node *sched_point = NULL;
1114 if (sched_is_scheduled(node)) {
1115 sched_point = sched_prev(node);
1118 /* No need to spill unknown values... */
1119 if(is_ia32_Unknown_GP(val) ||
1120 is_ia32_Unknown_VFP(val) ||
1121 is_ia32_Unknown_XMM(val)) {
1126 exchange(node, store);
1130 if (mode_is_float(mode)) {
1131 if (ia32_cg_config.use_sse2)
1132 store = new_bd_ia32_xStore(dbg, block, ptr, noreg, nomem, val);
1134 store = new_bd_ia32_vfst(dbg, block, ptr, noreg, nomem, val, mode);
1135 } else if (get_mode_size_bits(mode) == 128) {
1136 /* Spill 128 bit SSE registers */
1137 store = new_bd_ia32_xxStore(dbg, block, ptr, noreg, nomem, val);
1138 } else if (get_mode_size_bits(mode) == 8) {
1139 store = new_bd_ia32_Store8Bit(dbg, block, ptr, noreg, nomem, val);
1141 store = new_bd_ia32_Store(dbg, block, ptr, noreg, nomem, val);
1144 set_ia32_op_type(store, ia32_AddrModeD);
1145 set_ia32_ls_mode(store, mode);
1146 set_ia32_frame_ent(store, ent);
1147 set_ia32_use_frame(store);
1148 set_ia32_is_spill(store);
1149 SET_IA32_ORIG_NODE(store, node);
1150 DBG_OPT_SPILL2ST(node, store);
1153 sched_add_after(sched_point, store);
1157 exchange(node, store);
1160 static ir_node *create_push(ia32_code_gen_t *cg, ir_node *node, ir_node *schedpoint, ir_node *sp, ir_node *mem, ir_entity *ent) {
1161 dbg_info *dbg = get_irn_dbg_info(node);
1162 ir_node *block = get_nodes_block(node);
1163 ir_node *noreg = ia32_new_NoReg_gp(cg);
1164 ir_graph *irg = get_irn_irg(node);
1165 ir_node *frame = get_irg_frame(irg);
1167 ir_node *push = new_bd_ia32_Push(dbg, block, frame, noreg, mem, noreg, sp);
1169 set_ia32_frame_ent(push, ent);
1170 set_ia32_use_frame(push);
1171 set_ia32_op_type(push, ia32_AddrModeS);
1172 set_ia32_ls_mode(push, mode_Is);
1173 set_ia32_is_spill(push);
1175 sched_add_before(schedpoint, push);
1179 static ir_node *create_pop(ia32_code_gen_t *cg, ir_node *node, ir_node *schedpoint, ir_node *sp, ir_entity *ent) {
1180 dbg_info *dbg = get_irn_dbg_info(node);
1181 ir_node *block = get_nodes_block(node);
1182 ir_node *noreg = ia32_new_NoReg_gp(cg);
1183 ir_graph *irg = get_irn_irg(node);
1184 ir_node *frame = get_irg_frame(irg);
1186 ir_node *pop = new_bd_ia32_PopMem(dbg, block, frame, noreg, new_NoMem(), sp);
1188 set_ia32_frame_ent(pop, ent);
1189 set_ia32_use_frame(pop);
1190 set_ia32_op_type(pop, ia32_AddrModeD);
1191 set_ia32_ls_mode(pop, mode_Is);
1192 set_ia32_is_reload(pop);
1194 sched_add_before(schedpoint, pop);
1199 static ir_node* create_spproj(ir_node *node, ir_node *pred, int pos)
1201 ir_graph *irg = get_irn_irg(node);
1202 dbg_info *dbg = get_irn_dbg_info(node);
1203 ir_node *block = get_nodes_block(node);
1204 ir_mode *spmode = mode_Iu;
1205 const arch_register_t *spreg = &ia32_gp_regs[REG_ESP];
1208 sp = new_rd_Proj(dbg, irg, block, pred, spmode, pos);
1209 arch_set_irn_register(sp, spreg);
1215 * Transform MemPerm, currently we do this the ugly way and produce
1216 * push/pop into/from memory cascades. This is possible without using
1219 static void transform_MemPerm(ia32_code_gen_t *cg, ir_node *node)
1221 ir_graph *irg = get_irn_irg(node);
1222 ir_node *block = get_nodes_block(node);
1223 ir_node *sp = be_abi_get_ignore_irn(cg->birg->abi, &ia32_gp_regs[REG_ESP]);
1224 int arity = be_get_MemPerm_entity_arity(node);
1225 ir_node **pops = ALLOCAN(ir_node*, arity);
1229 const ir_edge_t *edge;
1230 const ir_edge_t *next;
1233 for(i = 0; i < arity; ++i) {
1234 ir_entity *inent = be_get_MemPerm_in_entity(node, i);
1235 ir_entity *outent = be_get_MemPerm_out_entity(node, i);
1236 ir_type *enttype = get_entity_type(inent);
1237 unsigned entsize = get_type_size_bytes(enttype);
1238 unsigned entsize2 = get_type_size_bytes(get_entity_type(outent));
1239 ir_node *mem = get_irn_n(node, i + 1);
1242 /* work around cases where entities have different sizes */
1243 if(entsize2 < entsize)
1245 assert( (entsize == 4 || entsize == 8) && "spillslot on x86 should be 32 or 64 bit");
1247 push = create_push(cg, node, node, sp, mem, inent);
1248 sp = create_spproj(node, push, pn_ia32_Push_stack);
1250 /* add another push after the first one */
1251 push = create_push(cg, node, node, sp, mem, inent);
1252 add_ia32_am_offs_int(push, 4);
1253 sp = create_spproj(node, push, pn_ia32_Push_stack);
1256 set_irn_n(node, i, new_Bad());
1260 for(i = arity - 1; i >= 0; --i) {
1261 ir_entity *inent = be_get_MemPerm_in_entity(node, i);
1262 ir_entity *outent = be_get_MemPerm_out_entity(node, i);
1263 ir_type *enttype = get_entity_type(outent);
1264 unsigned entsize = get_type_size_bytes(enttype);
1265 unsigned entsize2 = get_type_size_bytes(get_entity_type(inent));
1268 /* work around cases where entities have different sizes */
1269 if(entsize2 < entsize)
1271 assert( (entsize == 4 || entsize == 8) && "spillslot on x86 should be 32 or 64 bit");
1273 pop = create_pop(cg, node, node, sp, outent);
1274 sp = create_spproj(node, pop, pn_ia32_Pop_stack);
1276 add_ia32_am_offs_int(pop, 4);
1278 /* add another pop after the first one */
1279 pop = create_pop(cg, node, node, sp, outent);
1280 sp = create_spproj(node, pop, pn_ia32_Pop_stack);
1287 keep = be_new_Keep(&ia32_reg_classes[CLASS_ia32_gp], irg, block, 1, in);
1288 sched_add_before(node, keep);
1290 /* exchange memprojs */
1291 foreach_out_edge_safe(node, edge, next) {
1292 ir_node *proj = get_edge_src_irn(edge);
1293 int p = get_Proj_proj(proj);
1297 set_Proj_pred(proj, pops[p]);
1298 set_Proj_proj(proj, pn_ia32_Pop_M);
1301 /* remove memperm */
1302 arity = get_irn_arity(node);
1303 for(i = 0; i < arity; ++i) {
1304 set_irn_n(node, i, new_Bad());
1310 * Block-Walker: Calls the transform functions Spill and Reload.
1312 static void ia32_after_ra_walker(ir_node *block, void *env) {
1313 ir_node *node, *prev;
1314 ia32_code_gen_t *cg = env;
1316 /* beware: the schedule is changed here */
1317 for (node = sched_last(block); !sched_is_begin(node); node = prev) {
1318 prev = sched_prev(node);
1320 if (be_is_Reload(node)) {
1321 transform_to_Load(cg, node);
1322 } else if (be_is_Spill(node)) {
1323 transform_to_Store(cg, node);
1324 } else if (be_is_MemPerm(node)) {
1325 transform_MemPerm(cg, node);
1331 * Collects nodes that need frame entities assigned.
1333 static void ia32_collect_frame_entity_nodes(ir_node *node, void *data)
1335 be_fec_env_t *env = data;
1336 const ir_mode *mode;
1339 if (be_is_Reload(node) && be_get_frame_entity(node) == NULL) {
1340 mode = get_spill_mode_mode(get_irn_mode(node));
1341 align = get_mode_size_bytes(mode);
1342 } else if (is_ia32_irn(node) &&
1343 get_ia32_frame_ent(node) == NULL &&
1344 is_ia32_use_frame(node)) {
1345 if (is_ia32_need_stackent(node))
1348 switch (get_ia32_irn_opcode(node)) {
1350 case iro_ia32_Load: {
1351 const ia32_attr_t *attr = get_ia32_attr_const(node);
1353 if (attr->data.need_32bit_stackent) {
1355 } else if (attr->data.need_64bit_stackent) {
1358 mode = get_ia32_ls_mode(node);
1359 if (is_ia32_is_reload(node))
1360 mode = get_spill_mode_mode(mode);
1362 align = get_mode_size_bytes(mode);
1366 case iro_ia32_vfild:
1368 case iro_ia32_xLoad: {
1369 mode = get_ia32_ls_mode(node);
1374 case iro_ia32_FldCW: {
1375 /* although 2 byte would be enough 4 byte performs best */
1383 panic("unexpected frame user while collection frame entity nodes");
1385 case iro_ia32_FnstCW:
1386 case iro_ia32_Store8Bit:
1387 case iro_ia32_Store:
1390 case iro_ia32_vfist:
1391 case iro_ia32_vfisttp:
1393 case iro_ia32_xStore:
1394 case iro_ia32_xStoreSimple:
1401 be_node_needs_frame_entity(env, node, mode, align);
1405 * We transform Spill and Reload here. This needs to be done before
1406 * stack biasing otherwise we would miss the corrected offset for these nodes.
1408 static void ia32_after_ra(void *self) {
1409 ia32_code_gen_t *cg = self;
1410 ir_graph *irg = cg->irg;
1411 be_fec_env_t *fec_env = be_new_frame_entity_coalescer(cg->birg);
1413 /* create and coalesce frame entities */
1414 irg_walk_graph(irg, NULL, ia32_collect_frame_entity_nodes, fec_env);
1415 be_assign_entities(fec_env);
1416 be_free_frame_entity_coalescer(fec_env);
1418 irg_block_walk_graph(irg, NULL, ia32_after_ra_walker, cg);
1422 * Last touchups for the graph before emit: x87 simulation to replace the
1423 * virtual with real x87 instructions, creating a block schedule and peephole
1426 static void ia32_finish(void *self) {
1427 ia32_code_gen_t *cg = self;
1428 ir_graph *irg = cg->irg;
1430 ia32_finish_irg(irg, cg);
1432 /* we might have to rewrite x87 virtual registers */
1433 if (cg->do_x87_sim) {
1434 x87_simulate_graph(cg->birg);
1437 /* do peephole optimisations */
1438 ia32_peephole_optimization(cg);
1440 /* create block schedule, this also removes empty blocks which might
1441 * produce critical edges */
1442 cg->blk_sched = be_create_block_schedule(irg, cg->birg->exec_freq);
1446 * Emits the code, closes the output file and frees
1447 * the code generator interface.
1449 static void ia32_codegen(void *self) {
1450 ia32_code_gen_t *cg = self;
1451 ir_graph *irg = cg->irg;
1453 ia32_gen_routine(cg, irg);
1457 /* remove it from the isa */
1460 assert(ia32_current_cg == cg);
1461 ia32_current_cg = NULL;
1463 /* de-allocate code generator */
1464 del_set(cg->reg_set);
1469 * Returns the node representing the PIC base.
1471 static ir_node *ia32_get_pic_base(void *self) {
1473 ia32_code_gen_t *cg = self;
1474 ir_node *get_eip = cg->get_eip;
1475 if (get_eip != NULL)
1478 block = get_irg_start_block(cg->irg);
1479 get_eip = new_bd_ia32_GetEIP(NULL, block);
1480 cg->get_eip = get_eip;
1482 be_dep_on_frame(get_eip);
1486 static void *ia32_cg_init(be_irg_t *birg);
1488 static const arch_code_generator_if_t ia32_code_gen_if = {
1490 ia32_get_pic_base, /* return node used as base in pic code addresses */
1491 ia32_before_abi, /* before abi introduce hook */
1494 ia32_before_ra, /* before register allocation hook */
1495 ia32_after_ra, /* after register allocation hook */
1496 ia32_finish, /* called before codegen */
1497 ia32_codegen /* emit && done */
1501 * Initializes a IA32 code generator.
1503 static void *ia32_cg_init(be_irg_t *birg) {
1504 ia32_isa_t *isa = (ia32_isa_t *)birg->main_env->arch_env;
1505 ia32_code_gen_t *cg = XMALLOCZ(ia32_code_gen_t);
1507 cg->impl = &ia32_code_gen_if;
1508 cg->irg = birg->irg;
1509 cg->reg_set = new_set(ia32_cmp_irn_reg_assoc, 1024);
1512 cg->blk_sched = NULL;
1513 cg->dump = (birg->main_env->options->dump_flags & DUMP_BE) ? 1 : 0;
1514 cg->gprof = (birg->main_env->options->gprof) ? 1 : 0;
1517 /* Linux gprof implementation needs base pointer */
1518 birg->main_env->options->omit_fp = 0;
1525 if (isa->name_obst) {
1526 obstack_free(isa->name_obst, NULL);
1527 obstack_init(isa->name_obst);
1531 cur_reg_set = cg->reg_set;
1533 assert(ia32_current_cg == NULL);
1534 ia32_current_cg = cg;
1536 return (arch_code_generator_t *)cg;
1541 /*****************************************************************
1542 * ____ _ _ _____ _____
1543 * | _ \ | | | | |_ _|/ ____| /\
1544 * | |_) | __ _ ___| | _____ _ __ __| | | | | (___ / \
1545 * | _ < / _` |/ __| |/ / _ \ '_ \ / _` | | | \___ \ / /\ \
1546 * | |_) | (_| | (__| < __/ | | | (_| | _| |_ ____) / ____ \
1547 * |____/ \__,_|\___|_|\_\___|_| |_|\__,_| |_____|_____/_/ \_\
1549 *****************************************************************/
1552 * Set output modes for GCC
1554 static const tarval_mode_info mo_integer = {
1561 * set the tarval output mode of all integer modes to decimal
1563 static void set_tarval_output_modes(void)
1567 for (i = get_irp_n_modes() - 1; i >= 0; --i) {
1568 ir_mode *mode = get_irp_mode(i);
1570 if (mode_is_int(mode))
1571 set_tarval_mode_output_option(mode, &mo_integer);
1575 const arch_isa_if_t ia32_isa_if;
1578 * The template that generates a new ISA object.
1579 * Note that this template can be changed by command line
1582 static ia32_isa_t ia32_isa_template = {
1584 &ia32_isa_if, /* isa interface implementation */
1585 &ia32_gp_regs[REG_ESP], /* stack pointer register */
1586 &ia32_gp_regs[REG_EBP], /* base pointer register */
1587 -1, /* stack direction */
1588 2, /* power of two stack alignment, 2^2 == 4 */
1589 NULL, /* main environment */
1590 7, /* costs for a spill instruction */
1591 5, /* costs for a reload instruction */
1593 NULL, /* 16bit register names */
1594 NULL, /* 8bit register names */
1595 NULL, /* 8bit register names high */
1598 NULL, /* current code generator */
1599 NULL, /* abstract machine */
1601 NULL, /* name obstack */
1605 static void init_asm_constraints(void)
1607 be_init_default_asm_constraint_flags();
1609 asm_constraint_flags['a'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1610 asm_constraint_flags['b'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1611 asm_constraint_flags['c'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1612 asm_constraint_flags['d'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1613 asm_constraint_flags['D'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1614 asm_constraint_flags['S'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1615 asm_constraint_flags['Q'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1616 asm_constraint_flags['q'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1617 asm_constraint_flags['A'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1618 asm_constraint_flags['l'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1619 asm_constraint_flags['R'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1620 asm_constraint_flags['r'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1621 asm_constraint_flags['p'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1622 asm_constraint_flags['f'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1623 asm_constraint_flags['t'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1624 asm_constraint_flags['u'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1625 asm_constraint_flags['Y'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1626 asm_constraint_flags['X'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1627 asm_constraint_flags['n'] = ASM_CONSTRAINT_FLAG_SUPPORTS_IMMEDIATE;
1628 asm_constraint_flags['g'] = ASM_CONSTRAINT_FLAG_SUPPORTS_IMMEDIATE;
1630 /* no support for autodecrement/autoincrement */
1631 asm_constraint_flags['<'] = ASM_CONSTRAINT_FLAG_NO_SUPPORT;
1632 asm_constraint_flags['>'] = ASM_CONSTRAINT_FLAG_NO_SUPPORT;
1633 /* no float consts */
1634 asm_constraint_flags['E'] = ASM_CONSTRAINT_FLAG_NO_SUPPORT;
1635 asm_constraint_flags['F'] = ASM_CONSTRAINT_FLAG_NO_SUPPORT;
1636 /* makes no sense on x86 */
1637 asm_constraint_flags['s'] = ASM_CONSTRAINT_FLAG_NO_SUPPORT;
1638 /* no support for sse consts yet */
1639 asm_constraint_flags['C'] = ASM_CONSTRAINT_FLAG_NO_SUPPORT;
1640 /* no support for x87 consts yet */
1641 asm_constraint_flags['G'] = ASM_CONSTRAINT_FLAG_NO_SUPPORT;
1642 /* no support for mmx registers yet */
1643 asm_constraint_flags['y'] = ASM_CONSTRAINT_FLAG_NO_SUPPORT;
1644 /* not available in 32bit mode */
1645 asm_constraint_flags['Z'] = ASM_CONSTRAINT_FLAG_NO_SUPPORT;
1646 asm_constraint_flags['e'] = ASM_CONSTRAINT_FLAG_NO_SUPPORT;
1648 /* no code yet to determine register class needed... */
1649 asm_constraint_flags['X'] = ASM_CONSTRAINT_FLAG_NO_SUPPORT;
1653 * Initializes the backend ISA.
1655 static arch_env_t *ia32_init(FILE *file_handle) {
1656 static int inited = 0;
1664 set_tarval_output_modes();
1666 isa = XMALLOC(ia32_isa_t);
1667 memcpy(isa, &ia32_isa_template, sizeof(*isa));
1669 if(mode_fpcw == NULL) {
1670 mode_fpcw = new_ir_mode("Fpcw", irms_int_number, 16, 0, irma_none, 0);
1673 ia32_register_init();
1674 ia32_create_opcodes(&ia32_irn_ops);
1676 be_emit_init(file_handle);
1677 isa->regs_16bit = pmap_create();
1678 isa->regs_8bit = pmap_create();
1679 isa->regs_8bit_high = pmap_create();
1680 isa->types = pmap_create();
1681 isa->tv_ent = pmap_create();
1682 isa->cpu = ia32_init_machine_description();
1684 ia32_build_16bit_reg_map(isa->regs_16bit);
1685 ia32_build_8bit_reg_map(isa->regs_8bit);
1686 ia32_build_8bit_reg_map_high(isa->regs_8bit_high);
1689 isa->name_obst = XMALLOC(struct obstack);
1690 obstack_init(isa->name_obst);
1693 /* enter the ISA object into the intrinsic environment */
1694 intrinsic_env.isa = isa;
1696 /* emit asm includes */
1697 n = get_irp_n_asms();
1698 for (i = 0; i < n; ++i) {
1699 be_emit_cstring("#APP\n");
1700 be_emit_ident(get_irp_asm(i));
1701 be_emit_cstring("\n#NO_APP\n");
1704 /* needed for the debug support */
1705 be_gas_emit_switch_section(GAS_SECTION_TEXT);
1706 be_emit_cstring(".Ltext0:\n");
1707 be_emit_write_line();
1709 /* we mark referenced global entities, so we can only emit those which
1710 * are actually referenced. (Note: you mustn't use the type visited flag
1711 * elsewhere in the backend)
1713 inc_master_type_visited();
1715 return &isa->arch_env;
1721 * Closes the output file and frees the ISA structure.
1723 static void ia32_done(void *self) {
1724 ia32_isa_t *isa = self;
1726 /* emit now all global declarations */
1727 be_gas_emit_decls(isa->arch_env.main_env, 1);
1729 pmap_destroy(isa->regs_16bit);
1730 pmap_destroy(isa->regs_8bit);
1731 pmap_destroy(isa->regs_8bit_high);
1732 pmap_destroy(isa->tv_ent);
1733 pmap_destroy(isa->types);
1736 obstack_free(isa->name_obst, NULL);
1746 * Return the number of register classes for this architecture.
1747 * We report always these:
1748 * - the general purpose registers
1749 * - the SSE floating point register set
1750 * - the virtual floating point registers
1751 * - the SSE vector register set
1753 static unsigned ia32_get_n_reg_class(const void *self) {
1759 * Return the register class for index i.
1761 static const arch_register_class_t *ia32_get_reg_class(const void *self,
1765 assert(i < N_CLASSES);
1766 return &ia32_reg_classes[i];
1770 * Get the register class which shall be used to store a value of a given mode.
1771 * @param self The this pointer.
1772 * @param mode The mode in question.
1773 * @return A register class which can hold values of the given mode.
1775 const arch_register_class_t *ia32_get_reg_class_for_mode(const void *self,
1776 const ir_mode *mode)
1780 if (mode_is_float(mode)) {
1781 return ia32_cg_config.use_sse2 ? &ia32_reg_classes[CLASS_ia32_xmm] : &ia32_reg_classes[CLASS_ia32_vfp];
1784 return &ia32_reg_classes[CLASS_ia32_gp];
1788 * Get the ABI restrictions for procedure calls.
1789 * @param self The this pointer.
1790 * @param method_type The type of the method (procedure) in question.
1791 * @param abi The abi object to be modified
1793 static void ia32_get_call_abi(const void *self, ir_type *method_type,
1801 be_abi_call_flags_t call_flags = be_abi_call_get_flags(abi);
1805 /* set abi flags for calls */
1806 call_flags.bits.left_to_right = 0; /* always last arg first on stack */
1807 call_flags.bits.store_args_sequential = 0;
1808 /* call_flags.bits.try_omit_fp not changed: can handle both settings */
1809 call_flags.bits.fp_free = 0; /* the frame pointer is fixed in IA32 */
1810 call_flags.bits.call_has_imm = 0; /* No call immediates, we handle this by ourselves */
1812 /* set parameter passing style */
1813 be_abi_call_set_flags(abi, call_flags, &ia32_abi_callbacks);
1815 if (get_method_variadicity(method_type) == variadicity_variadic) {
1816 /* pass all parameters of a variadic function on the stack */
1819 cc = get_method_calling_convention(method_type);
1820 if (get_method_additional_properties(method_type) & mtp_property_private &&
1821 ia32_cg_config.optimize_cc) {
1822 /* set the calling conventions to register parameter */
1823 cc = (cc & ~cc_bits) | cc_reg_param;
1827 /* we have to pop the shadow parameter ourself for compound calls */
1828 if( (get_method_calling_convention(method_type) & cc_compound_ret)
1829 && !(cc & cc_reg_param)) {
1830 pop_amount += get_mode_size_bytes(mode_P_data);
1833 n = get_method_n_params(method_type);
1834 for (i = regnum = 0; i < n; i++) {
1836 const arch_register_t *reg = NULL;
1838 tp = get_method_param_type(method_type, i);
1839 mode = get_type_mode(tp);
1841 reg = ia32_get_RegParam_reg(cc, regnum, mode);
1844 be_abi_call_param_reg(abi, i, reg);
1847 /* Micro optimisation: if the mode is shorter than 4 bytes, load 4 bytes.
1848 * movl has a shorter opcode than mov[sz][bw]l */
1849 ir_mode *load_mode = mode;
1852 unsigned size = get_mode_size_bytes(mode);
1854 if (cc & cc_callee_clear_stk) {
1855 pop_amount += (size + 3U) & ~3U;
1858 if (size < 4) load_mode = mode_Iu;
1861 be_abi_call_param_stack(abi, i, load_mode, 4, 0, 0);
1865 be_abi_call_set_pop(abi, pop_amount);
1867 /* set return registers */
1868 n = get_method_n_ress(method_type);
1870 assert(n <= 2 && "more than two results not supported");
1872 /* In case of 64bit returns, we will have two 32bit values */
1874 tp = get_method_res_type(method_type, 0);
1875 mode = get_type_mode(tp);
1877 assert(!mode_is_float(mode) && "two FP results not supported");
1879 tp = get_method_res_type(method_type, 1);
1880 mode = get_type_mode(tp);
1882 assert(!mode_is_float(mode) && "mixed INT, FP results not supported");
1884 be_abi_call_res_reg(abi, 0, &ia32_gp_regs[REG_EAX]);
1885 be_abi_call_res_reg(abi, 1, &ia32_gp_regs[REG_EDX]);
1888 const arch_register_t *reg;
1890 tp = get_method_res_type(method_type, 0);
1891 assert(is_atomic_type(tp));
1892 mode = get_type_mode(tp);
1894 reg = mode_is_float(mode) ? &ia32_vfp_regs[REG_VF0] : &ia32_gp_regs[REG_EAX];
1896 be_abi_call_res_reg(abi, 0, reg);
1900 int ia32_to_appear_in_schedule(void *block_env, const ir_node *irn)
1904 if(!is_ia32_irn(irn)) {
1908 if(is_ia32_NoReg_GP(irn) || is_ia32_NoReg_VFP(irn) || is_ia32_NoReg_XMM(irn)
1909 || is_ia32_Unknown_GP(irn) || is_ia32_Unknown_XMM(irn)
1910 || is_ia32_Unknown_VFP(irn) || is_ia32_ChangeCW(irn)
1911 || is_ia32_Immediate(irn))
1918 * Initializes the code generator interface.
1920 static const arch_code_generator_if_t *ia32_get_code_generator_if(void *self)
1923 return &ia32_code_gen_if;
1927 * Returns the estimated execution time of an ia32 irn.
1929 static sched_timestep_t ia32_sched_exectime(void *env, const ir_node *irn) {
1931 return is_ia32_irn(irn) ? ia32_get_op_estimated_cost(irn) : 1;
1934 list_sched_selector_t ia32_sched_selector;
1937 * Returns the reg_pressure scheduler with to_appear_in_schedule() overloaded
1939 static const list_sched_selector_t *ia32_get_list_sched_selector(
1940 const void *self, list_sched_selector_t *selector)
1943 memcpy(&ia32_sched_selector, selector, sizeof(ia32_sched_selector));
1944 ia32_sched_selector.exectime = ia32_sched_exectime;
1945 ia32_sched_selector.to_appear_in_schedule = ia32_to_appear_in_schedule;
1946 return &ia32_sched_selector;
1949 static const ilp_sched_selector_t *ia32_get_ilp_sched_selector(const void *self)
1956 * Returns the necessary byte alignment for storing a register of given class.
1958 static int ia32_get_reg_class_alignment(const void *self,
1959 const arch_register_class_t *cls)
1961 ir_mode *mode = arch_register_class_mode(cls);
1962 int bytes = get_mode_size_bytes(mode);
1965 if (mode_is_float(mode) && bytes > 8)
1970 static const be_execution_unit_t ***ia32_get_allowed_execution_units(
1971 const void *self, const ir_node *irn)
1973 static const be_execution_unit_t *_allowed_units_BRANCH[] = {
1974 &ia32_execution_units_BRANCH[IA32_EXECUNIT_TP_BRANCH_BRANCH1],
1975 &ia32_execution_units_BRANCH[IA32_EXECUNIT_TP_BRANCH_BRANCH2],
1978 static const be_execution_unit_t *_allowed_units_GP[] = {
1979 &ia32_execution_units_GP[IA32_EXECUNIT_TP_GP_GP_EAX],
1980 &ia32_execution_units_GP[IA32_EXECUNIT_TP_GP_GP_EBX],
1981 &ia32_execution_units_GP[IA32_EXECUNIT_TP_GP_GP_ECX],
1982 &ia32_execution_units_GP[IA32_EXECUNIT_TP_GP_GP_EDX],
1983 &ia32_execution_units_GP[IA32_EXECUNIT_TP_GP_GP_ESI],
1984 &ia32_execution_units_GP[IA32_EXECUNIT_TP_GP_GP_EDI],
1985 &ia32_execution_units_GP[IA32_EXECUNIT_TP_GP_GP_EBP],
1988 static const be_execution_unit_t *_allowed_units_DUMMY[] = {
1989 &be_machine_execution_units_DUMMY[0],
1992 static const be_execution_unit_t **_units_callret[] = {
1993 _allowed_units_BRANCH,
1996 static const be_execution_unit_t **_units_other[] = {
2000 static const be_execution_unit_t **_units_dummy[] = {
2001 _allowed_units_DUMMY,
2004 const be_execution_unit_t ***ret;
2007 if (is_ia32_irn(irn)) {
2008 ret = get_ia32_exec_units(irn);
2009 } else if (is_be_node(irn)) {
2010 if (be_is_Return(irn)) {
2011 ret = _units_callret;
2012 } else if (be_is_Barrier(irn)) {
2026 * Return the abstract ia32 machine.
2028 static const be_machine_t *ia32_get_machine(const void *self) {
2029 const ia32_isa_t *isa = self;
2034 * Return irp irgs in the desired order.
2036 static ir_graph **ia32_get_irg_list(const void *self, ir_graph ***irg_list)
2043 static void ia32_mark_remat(const void *self, ir_node *node) {
2045 if (is_ia32_irn(node)) {
2046 set_ia32_is_remat(node);
2051 * Check for Abs or Nabs.
2053 static int is_Abs_or_Nabs(ir_node *cmp, ir_node *sel, ir_node *t, ir_node *f) {
2060 /* must be <, <=, >=, > */
2061 pnc = get_Proj_proj(sel);
2062 if (pnc != pn_Cmp_Ge && pnc != pn_Cmp_Gt &&
2063 pnc != pn_Cmp_Le && pnc != pn_Cmp_Lt)
2066 l = get_Cmp_left(cmp);
2067 r = get_Cmp_right(cmp);
2069 /* must be x cmp 0 */
2070 if ((l != t && l != f) || !is_Const(r) || !is_Const_null(r))
2073 if ((!is_Minus(t) || get_Minus_op(t) != f) &&
2074 (!is_Minus(f) || get_Minus_op(f) != t))
2080 * Allows or disallows the creation of Psi nodes for the given Phi nodes.
2082 * @param sel A selector of a Cond.
2083 * @param phi_list List of Phi nodes about to be converted (linked via get_Phi_next() field)
2084 * @param i First data predecessor involved in if conversion
2085 * @param j Second data predecessor involved in if conversion
2087 * @return 1 if allowed, 0 otherwise
2089 static int ia32_is_psi_allowed(ir_node *sel, ir_node *phi_list, int i, int j)
2092 ir_node *cmp = NULL;
2094 /* we can't handle Psis with 64bit compares yet */
2096 cmp = get_Proj_pred(sel);
2098 ir_node *left = get_Cmp_left(cmp);
2099 ir_mode *cmp_mode = get_irn_mode(left);
2100 if (!mode_is_float(cmp_mode) && get_mode_size_bits(cmp_mode) > 32)
2107 if (ia32_cg_config.use_cmov) {
2108 if (ia32_cg_config.use_sse2 && cmp != NULL) {
2109 pn_Cmp pn = get_Proj_proj(sel);
2110 ir_node *cl = get_Cmp_left(cmp);
2111 ir_node *cr = get_Cmp_right(cmp);
2113 /* check the Phi nodes: no 64bit and no floating point cmov */
2114 for (phi = phi_list; phi; phi = get_Phi_next(phi)) {
2115 ir_mode *mode = get_irn_mode(phi);
2117 if (mode_is_float(mode)) {
2118 /* check for Min, Max */
2119 ir_node *t = get_Phi_pred(phi, i);
2120 ir_node *f = get_Phi_pred(phi, j);
2123 /* SSE2 supports Min & Max */
2124 if (pn == pn_Cmp_Lt || pn == pn_Cmp_Le || pn == pn_Cmp_Ge || pn == pn_Cmp_Gt) {
2125 if (cl == t && cr == f) {
2126 /* Psi(a <=/>= b, a, b) => MIN, MAX */
2128 } else if (cl == f && cr == t) {
2129 /* Psi(a <=/>= b, b, a) => MAX, MIN */
2136 } else if (get_mode_size_bits(mode) > 32)
2140 /* check the Phi nodes: no 64bit and no floating point cmov */
2141 for (phi = phi_list; phi; phi = get_Phi_next(phi)) {
2142 ir_mode *mode = get_irn_mode(phi);
2144 if (mode_is_float(mode)) {
2145 ir_node *t = get_Phi_pred(phi, i);
2146 ir_node *f = get_Phi_pred(phi, j);
2148 if (! is_Abs_or_Nabs(cmp, sel, t, f))
2150 } else if (get_mode_size_bits(mode) > 32)
2160 /* No Cmov, only some special cases */
2164 /* Now some supported cases here */
2165 pn = get_Proj_proj(sel);
2166 cl = get_Cmp_left(cmp);
2167 cr = get_Cmp_right(cmp);
2169 for (phi = phi_list; phi; phi = get_Phi_next(phi)) {
2170 ir_mode *mode = get_irn_mode(phi);
2174 t = get_Phi_pred(phi, i);
2175 f = get_Phi_pred(phi, j);
2177 if (mode_is_float(mode)) {
2178 /* only abs or nabs supported */
2179 if (! is_Abs_or_Nabs(cmp, sel, t, f))
2181 } else if (get_mode_size_bits(mode) > 32) {
2186 if (is_Const(t) && is_Const(f)) {
2187 if ((is_Const_null(t) && is_Const_one(f)) || (is_Const_one(t) && is_Const_null(f))) {
2188 /* always support Psi(x, C1, C2) */
2191 } else if (pn == pn_Cmp_Lt || pn == pn_Cmp_Le || pn == pn_Cmp_Ge || pn == pn_Cmp_Gt) {
2194 } else if (cl == t && cr == f) {
2195 /* Psi(a <=/>= b, a, b) => Min, Max */
2197 } else if (cl == f && cr == t) {
2198 /* Psi(a <=/>= b, b, a) => Max, Min */
2201 } else if ((pn & pn_Cmp_Gt) && !mode_is_signed(mode) &&
2202 is_Const(f) && is_Const_null(f) && is_Sub(t) &&
2203 get_Sub_left(t) == cl && get_Sub_right(t) == cr) {
2204 /* Psi(a >=u b, a - b, 0) unsigned Doz */
2206 } else if ((pn & pn_Cmp_Lt) && !mode_is_signed(mode) &&
2207 is_Const(t) && is_Const_null(t) && is_Sub(f) &&
2208 get_Sub_left(f) == cl && get_Sub_right(f) == cr) {
2209 /* Psi(a <=u b, 0, a - b) unsigned Doz */
2211 } else if (is_Const(cr) && is_Const_null(cr)) {
2212 if (cl == t && is_Minus(f) && get_Minus_op(f) == cl) {
2213 /* Psi(a <=/>= 0 ? a : -a) Nabs/Abs */
2215 } else if (cl == f && is_Minus(t) && get_Minus_op(t) == cl) {
2216 /* Psi(a <=/>= 0 ? -a : a) Abs/Nabs */
2224 /* all checks passed */
2230 static asm_constraint_flags_t ia32_parse_asm_constraint(const void *self, const char **c)
2235 /* we already added all our simple flags to the flags modifier list in
2236 * init, so this flag we don't know. */
2237 return ASM_CONSTRAINT_FLAG_INVALID;
2240 static int ia32_is_valid_clobber(const void *self, const char *clobber)
2244 return ia32_get_clobber_register(clobber) != NULL;
2248 * Returns the libFirm configuration parameter for this backend.
2250 static const backend_params *ia32_get_libfirm_params(void) {
2251 static const ir_settings_if_conv_t ifconv = {
2252 4, /* maxdepth, doesn't matter for Psi-conversion */
2253 ia32_is_psi_allowed /* allows or disallows Psi creation for given selector */
2255 static const ir_settings_arch_dep_t ad = {
2256 1, /* also use subs */
2257 4, /* maximum shifts */
2258 31, /* maximum shift amount */
2259 ia32_evaluate_insn, /* evaluate the instruction sequence */
2261 1, /* allow Mulhs */
2262 1, /* allow Mulus */
2263 32 /* Mulh allowed up to 32 bit */
2265 static backend_params p = {
2266 1, /* need dword lowering */
2267 1, /* support inline assembly */
2268 0, /* no immediate floating point mode. */
2269 NULL, /* no additional opcodes */
2270 NULL, /* will be set later */
2271 ia32_create_intrinsic_fkt,
2272 &intrinsic_env, /* context for ia32_create_intrinsic_fkt */
2273 NULL, /* will be set below */
2274 NULL /* will be set below */
2277 ia32_setup_cg_config();
2279 /* doesn't really belong here, but this is the earliest place the backend
2281 init_asm_constraints();
2284 p.if_conv_info = &ifconv;
2288 static const lc_opt_enum_int_items_t gas_items[] = {
2289 { "elf", GAS_FLAVOUR_ELF },
2290 { "mingw", GAS_FLAVOUR_MINGW },
2291 { "yasm", GAS_FLAVOUR_YASM },
2292 { "macho", GAS_FLAVOUR_MACH_O },
2296 static lc_opt_enum_int_var_t gas_var = {
2297 (int*) &be_gas_flavour, gas_items
2300 #ifdef FIRM_GRGEN_BE
2301 static const lc_opt_enum_int_items_t transformer_items[] = {
2302 { "default", TRANSFORMER_DEFAULT },
2303 { "pbqp", TRANSFORMER_PBQP },
2304 { "random", TRANSFORMER_RAND },
2308 static lc_opt_enum_int_var_t transformer_var = {
2309 (int*)&be_transformer, transformer_items
2313 static const lc_opt_table_entry_t ia32_options[] = {
2314 LC_OPT_ENT_ENUM_INT("gasmode", "set the GAS compatibility mode", &gas_var),
2315 #ifdef FIRM_GRGEN_BE
2316 LC_OPT_ENT_ENUM_INT("transformer", "the transformer used for code selection", &transformer_var),
2318 LC_OPT_ENT_INT("stackalign", "set power of two stack alignment for calls",
2319 &ia32_isa_template.arch_env.stack_alignment),
2323 const arch_isa_if_t ia32_isa_if = {
2326 ia32_handle_intrinsics,
2327 ia32_get_n_reg_class,
2329 ia32_get_reg_class_for_mode,
2331 ia32_get_code_generator_if,
2332 ia32_get_list_sched_selector,
2333 ia32_get_ilp_sched_selector,
2334 ia32_get_reg_class_alignment,
2335 ia32_get_libfirm_params,
2336 ia32_get_allowed_execution_units,
2340 ia32_parse_asm_constraint,
2341 ia32_is_valid_clobber
2344 void be_init_arch_ia32(void)
2346 lc_opt_entry_t *be_grp = lc_opt_get_grp(firm_opt_get_root(), "be");
2347 lc_opt_entry_t *ia32_grp = lc_opt_get_grp(be_grp, "ia32");
2349 lc_opt_add_table(ia32_grp, ia32_options);
2350 be_register_isa_if("ia32", &ia32_isa_if);
2352 FIRM_DBG_REGISTER(dbg, "firm.be.ia32.cg");
2354 ia32_init_emitter();
2356 ia32_init_optimize();
2357 ia32_init_transform();
2359 ia32_init_architecture();
2362 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_arch_ia32);