2 * This is the main ia32 firm backend driver.
3 * @author Christian Wuerdig
20 #include <libcore/lc_opts.h>
21 #include <libcore/lc_opts_enum.h>
22 #endif /* WITH_LIBCORE */
24 #include "pseudo_irg.h"
28 #include "iredges_t.h"
37 #include "../beabi.h" /* the general register allocator interface */
38 #include "../benode_t.h"
39 #include "../belower.h"
40 #include "../besched_t.h"
42 #include "bearch_ia32_t.h"
44 #include "ia32_new_nodes.h" /* ia32 nodes interface */
45 #include "gen_ia32_regalloc_if.h" /* the generated interface (register type and class defenitions) */
46 #include "ia32_gen_decls.h" /* interface declaration emitter */
47 #include "ia32_transform.h"
48 #include "ia32_emitter.h"
49 #include "ia32_map_regs.h"
50 #include "ia32_optimize.h"
52 #include "ia32_dbg_stat.h"
54 #define DEBUG_MODULE "firm.be.ia32.isa"
57 static set *cur_reg_set = NULL;
60 #define is_Start(irn) (get_irn_opcode(irn) == iro_Start)
62 /* Creates the unique per irg GP NoReg node. */
63 ir_node *ia32_new_NoReg_gp(ia32_code_gen_t *cg) {
64 return be_abi_get_callee_save_irn(cg->birg->abi, &ia32_gp_regs[REG_GP_NOREG]);
67 /* Creates the unique per irg FP NoReg node. */
68 ir_node *ia32_new_NoReg_fp(ia32_code_gen_t *cg) {
69 return be_abi_get_callee_save_irn(cg->birg->abi,
70 USE_SSE2(cg) ? &ia32_xmm_regs[REG_XMM_NOREG] : &ia32_vfp_regs[REG_VFP_NOREG]);
73 /**************************************************
76 * _ __ ___ __ _ __ _| | | ___ ___ _| |_
77 * | '__/ _ \/ _` | / _` | | |/ _ \ / __| | | _|
78 * | | | __/ (_| | | (_| | | | (_) | (__ | | |
79 * |_| \___|\__, | \__,_|_|_|\___/ \___| |_|_|
82 **************************************************/
84 static ir_node *my_skip_proj(const ir_node *n) {
92 * Return register requirements for an ia32 node.
93 * If the node returns a tuple (mode_T) then the proj's
94 * will be asked for this information.
96 static const arch_register_req_t *ia32_get_irn_reg_req(const void *self, arch_register_req_t *req, const ir_node *irn, int pos) {
97 const ia32_irn_ops_t *ops = self;
98 const ia32_register_req_t *irn_req;
99 long node_pos = pos == -1 ? 0 : pos;
100 ir_mode *mode = is_Block(irn) ? NULL : get_irn_mode(irn);
101 FIRM_DBG_REGISTER(firm_dbg_module_t *mod, DEBUG_MODULE);
103 if (is_Block(irn) || mode == mode_M || mode == mode_X) {
104 DBG((mod, LEVEL_1, "ignoring Block, mode_M, mode_X node %+F\n", irn));
108 if (mode == mode_T && pos < 0) {
109 DBG((mod, LEVEL_1, "ignoring request OUT requirements for node %+F\n", irn));
113 DBG((mod, LEVEL_1, "get requirements at pos %d for %+F ... ", pos, irn));
117 node_pos = ia32_translate_proj_pos(irn);
123 irn = my_skip_proj(irn);
125 DB((mod, LEVEL_1, "skipping Proj, going to %+F at pos %d ... ", irn, node_pos));
128 if (is_ia32_irn(irn)) {
130 irn_req = get_ia32_in_req(irn, pos);
133 irn_req = get_ia32_out_req(irn, node_pos);
136 DB((mod, LEVEL_1, "returning reqs for %+F at pos %d\n", irn, pos));
138 memcpy(req, &(irn_req->req), sizeof(*req));
140 if (arch_register_req_is(&(irn_req->req), should_be_same)) {
141 assert(irn_req->same_pos >= 0 && "should be same constraint for in -> out NYI");
142 req->other_same = get_irn_n(irn, irn_req->same_pos);
145 if (arch_register_req_is(&(irn_req->req), should_be_different)) {
146 assert(irn_req->different_pos >= 0 && "should be different constraint for in -> out NYI");
147 req->other_different = get_irn_n(irn, irn_req->different_pos);
151 /* treat Unknowns like Const with default requirements */
152 if (is_Unknown(irn)) {
153 DB((mod, LEVEL_1, "returning UKNWN reqs for %+F\n", irn));
154 if (mode_is_float(mode)) {
155 if (USE_SSE2(ops->cg))
156 memcpy(req, &(ia32_default_req_ia32_xmm_xmm_UKNWN), sizeof(*req));
158 memcpy(req, &(ia32_default_req_ia32_vfp_vfp_UKNWN), sizeof(*req));
160 else if (mode_is_int(mode) || mode_is_reference(mode))
161 memcpy(req, &(ia32_default_req_ia32_gp_gp_UKNWN), sizeof(*req));
162 else if (mode == mode_T || mode == mode_M) {
163 DBG((mod, LEVEL_1, "ignoring Unknown node %+F\n", irn));
167 assert(0 && "unsupported Unknown-Mode");
170 DB((mod, LEVEL_1, "returning NULL for %+F (not ia32)\n", irn));
178 static void ia32_set_irn_reg(const void *self, ir_node *irn, const arch_register_t *reg) {
180 const ia32_irn_ops_t *ops = self;
182 if (get_irn_mode(irn) == mode_X) {
186 DBG((ops->cg->mod, LEVEL_1, "ia32 assigned register %s to node %+F\n", reg->name, irn));
189 pos = ia32_translate_proj_pos(irn);
190 irn = my_skip_proj(irn);
193 if (is_ia32_irn(irn)) {
194 const arch_register_t **slots;
196 slots = get_ia32_slots(irn);
200 ia32_set_firm_reg(irn, reg, cur_reg_set);
204 static const arch_register_t *ia32_get_irn_reg(const void *self, const ir_node *irn) {
206 const arch_register_t *reg = NULL;
210 if (get_irn_mode(irn) == mode_X) {
214 pos = ia32_translate_proj_pos(irn);
215 irn = my_skip_proj(irn);
218 if (is_ia32_irn(irn)) {
219 const arch_register_t **slots;
220 slots = get_ia32_slots(irn);
224 reg = ia32_get_firm_reg(irn, cur_reg_set);
230 static arch_irn_class_t ia32_classify(const void *self, const ir_node *irn) {
231 irn = my_skip_proj(irn);
233 return arch_irn_class_branch;
234 else if (is_ia32_Cnst(irn))
235 return arch_irn_class_const;
236 else if (is_ia32_Ld(irn))
237 return arch_irn_class_load;
238 else if (is_ia32_St(irn) || is_ia32_Store8Bit(irn))
239 return arch_irn_class_store;
240 else if (is_ia32_irn(irn))
241 return arch_irn_class_normal;
246 static arch_irn_flags_t ia32_get_flags(const void *self, const ir_node *irn) {
247 irn = my_skip_proj(irn);
248 if (is_ia32_irn(irn))
249 return get_ia32_flags(irn);
252 return arch_irn_flags_ignore;
257 static entity *ia32_get_frame_entity(const void *self, const ir_node *irn) {
258 return is_ia32_irn(irn) ? get_ia32_frame_ent(irn) : NULL;
261 static void ia32_set_stack_bias(const void *self, ir_node *irn, int bias) {
263 const ia32_irn_ops_t *ops = self;
265 if (get_ia32_frame_ent(irn)) {
266 ia32_am_flavour_t am_flav = get_ia32_am_flavour(irn);
268 DBG((ops->cg->mod, LEVEL_1, "stack biased %+F with %d\n", irn, bias));
269 snprintf(buf, sizeof(buf), "%d", bias);
271 if (get_ia32_op_type(irn) == ia32_Normal) {
272 set_ia32_cnst(irn, buf);
275 add_ia32_am_offs(irn, buf);
277 set_ia32_am_flavour(irn, am_flav);
283 be_abi_call_flags_bits_t flags;
284 const arch_isa_t *isa;
285 const arch_env_t *aenv;
289 static void *ia32_abi_init(const be_abi_call_t *call, const arch_env_t *aenv, ir_graph *irg)
291 ia32_abi_env_t *env = xmalloc(sizeof(env[0]));
292 be_abi_call_flags_t fl = be_abi_call_get_flags(call);
293 env->flags = fl.bits;
296 env->isa = aenv->isa;
301 * Put all registers which are saved by the prologue/epilogue in a set.
303 * @param self The callback object.
304 * @param s The result set.
306 static void ia32_abi_dont_save_regs(void *self, pset *s)
308 ia32_abi_env_t *env = self;
309 if(env->flags.try_omit_fp)
310 pset_insert_ptr(s, env->isa->bp);
314 * Generate the routine prologue.
316 * @param self The callback object.
317 * @param mem A pointer to the mem node. Update this if you define new memory.
318 * @param reg_map A map mapping all callee_save/ignore/parameter registers to their defining nodes.
320 * @return The register which shall be used as a stack frame base.
322 * All nodes which define registers in @p reg_map must keep @p reg_map current.
324 static const arch_register_t *ia32_abi_prologue(void *self, ir_node **mem, pmap *reg_map)
326 ia32_abi_env_t *env = self;
328 if (!env->flags.try_omit_fp) {
329 int reg_size = get_mode_size_bytes(env->isa->bp->reg_class->mode);
330 ir_node *bl = get_irg_start_block(env->irg);
331 ir_node *curr_sp = be_abi_reg_map_get(reg_map, env->isa->sp);
332 ir_node *curr_bp = be_abi_reg_map_get(reg_map, env->isa->bp);
336 push = new_rd_ia32_Push(NULL, env->irg, bl, curr_sp, curr_bp, *mem);
337 curr_sp = new_r_Proj(env->irg, bl, push, get_irn_mode(curr_sp), pn_ia32_Push_stack);
338 *mem = new_r_Proj(env->irg, bl, push, mode_M, pn_ia32_Push_M);
340 /* the push must have SP out register */
341 arch_set_irn_register(env->aenv, curr_sp, env->isa->sp);
342 set_ia32_flags(push, arch_irn_flags_ignore);
344 /* move esp to ebp */
345 curr_bp = be_new_Copy(env->isa->bp->reg_class, env->irg, bl, curr_sp);
346 be_set_constr_single_reg(curr_bp, BE_OUT_POS(0), env->isa->bp);
347 arch_set_irn_register(env->aenv, curr_bp, env->isa->bp);
348 be_node_set_flags(curr_bp, BE_OUT_POS(0), arch_irn_flags_ignore);
350 /* beware: the copy must be done before any other sp use */
351 curr_sp = be_new_CopyKeep_single(env->isa->sp->reg_class, env->irg, bl, curr_sp, curr_bp, get_irn_mode(curr_sp));
352 be_set_constr_single_reg(curr_sp, BE_OUT_POS(0), env->isa->sp);
353 arch_set_irn_register(env->aenv, curr_sp, env->isa->sp);
354 be_node_set_flags(curr_sp, BE_OUT_POS(0), arch_irn_flags_ignore);
356 be_abi_reg_map_set(reg_map, env->isa->sp, curr_sp);
357 be_abi_reg_map_set(reg_map, env->isa->bp, curr_bp);
366 * Generate the routine epilogue.
367 * @param self The callback object.
368 * @param bl The block for the epilog
369 * @param mem A pointer to the mem node. Update this if you define new memory.
370 * @param reg_map A map mapping all callee_save/ignore/parameter registers to their defining nodes.
371 * @return The register which shall be used as a stack frame base.
373 * All nodes which define registers in @p reg_map must keep @p reg_map current.
375 static void ia32_abi_epilogue(void *self, ir_node *bl, ir_node **mem, pmap *reg_map)
377 ia32_abi_env_t *env = self;
378 ir_node *curr_sp = be_abi_reg_map_get(reg_map, env->isa->sp);
379 ir_node *curr_bp = be_abi_reg_map_get(reg_map, env->isa->bp);
381 if (env->flags.try_omit_fp) {
382 /* simply remove the stack frame here */
383 curr_sp = be_new_IncSP(env->isa->sp, env->irg, bl, curr_sp, *mem, BE_STACK_FRAME_SIZE, be_stack_dir_shrink);
386 const ia32_isa_t *isa = (ia32_isa_t *)env->isa;
387 ir_mode *mode_bp = env->isa->bp->reg_class->mode;
388 int reg_size = get_mode_size_bytes(env->isa->bp->reg_class->mode);
390 /* gcc always emits a leave at the end of a routine */
391 if (1 || ARCH_AMD(isa->opt_arch)) {
395 leave = new_rd_ia32_Leave(NULL, env->irg, bl, curr_sp, *mem);
396 set_ia32_flags(leave, arch_irn_flags_ignore);
397 curr_bp = new_r_Proj(current_ir_graph, bl, leave, mode_bp, pn_ia32_Leave_frame);
398 curr_sp = new_r_Proj(current_ir_graph, bl, leave, get_irn_mode(curr_sp), pn_ia32_Leave_stack);
399 *mem = new_r_Proj(current_ir_graph, bl, leave, mode_M, pn_ia32_Leave_M);
404 /* copy ebp to esp */
405 curr_sp = be_new_SetSP(env->isa->sp, env->irg, bl, curr_sp, curr_bp, *mem);
408 pop = new_rd_ia32_Pop(NULL, env->irg, bl, curr_sp, *mem);
409 set_ia32_flags(pop, arch_irn_flags_ignore);
410 curr_bp = new_r_Proj(current_ir_graph, bl, pop, mode_bp, pn_ia32_Pop_res);
411 curr_sp = new_r_Proj(current_ir_graph, bl, pop, get_irn_mode(curr_sp), pn_ia32_Pop_stack);
412 *mem = new_r_Proj(current_ir_graph, bl, pop, mode_M, pn_ia32_Pop_M);
414 arch_set_irn_register(env->aenv, curr_sp, env->isa->sp);
415 arch_set_irn_register(env->aenv, curr_bp, env->isa->bp);
418 be_abi_reg_map_set(reg_map, env->isa->sp, curr_sp);
419 be_abi_reg_map_set(reg_map, env->isa->bp, curr_bp);
423 * Produces the type which sits between the stack args and the locals on the stack.
424 * it will contain the return address and space to store the old base pointer.
425 * @return The Firm type modeling the ABI between type.
427 static ir_type *ia32_abi_get_between_type(void *self)
429 #define IDENT(s) new_id_from_chars(s, sizeof(s)-1)
430 static ir_type *omit_fp_between_type = NULL;
431 static ir_type *between_type = NULL;
433 ia32_abi_env_t *env = self;
437 entity *ret_addr_ent;
438 entity *omit_fp_ret_addr_ent;
440 ir_type *old_bp_type = new_type_primitive(IDENT("bp"), mode_P);
441 ir_type *ret_addr_type = new_type_primitive(IDENT("return_addr"), mode_P);
443 between_type = new_type_struct(IDENT("ia32_between_type"));
444 old_bp_ent = new_entity(between_type, IDENT("old_bp"), old_bp_type);
445 ret_addr_ent = new_entity(between_type, IDENT("ret_addr"), ret_addr_type);
447 set_entity_offset_bytes(old_bp_ent, 0);
448 set_entity_offset_bytes(ret_addr_ent, get_type_size_bytes(old_bp_type));
449 set_type_size_bytes(between_type, get_type_size_bytes(old_bp_type) + get_type_size_bytes(ret_addr_type));
450 set_type_state(between_type, layout_fixed);
452 omit_fp_between_type = new_type_struct(IDENT("ia32_between_type_omit_fp"));
453 omit_fp_ret_addr_ent = new_entity(omit_fp_between_type, IDENT("ret_addr"), ret_addr_type);
455 set_entity_offset_bytes(omit_fp_ret_addr_ent, 0);
456 set_type_size_bytes(omit_fp_between_type, get_type_size_bytes(ret_addr_type));
457 set_type_state(omit_fp_between_type, layout_fixed);
460 return env->flags.try_omit_fp ? omit_fp_between_type : between_type;
465 * Get the estimated cycle count for @p irn.
467 * @param self The this pointer.
468 * @param irn The node.
470 * @return The estimated cycle count for this operation
472 static int ia32_get_op_estimated_cost(const void *self, const ir_node *irn)
475 switch (get_ia32_irn_opcode(irn)) {
477 case iro_ia32_DivMod:
482 case iro_ia32_l_Load:
489 case iro_ia32_xStore:
490 case iro_ia32_l_Store:
492 case iro_ia32_Store8Bit:
500 case iro_ia32_l_MulS:
513 * Returns the inverse operation if @p irn, recalculating the argument at position @p i.
515 * @param irn The original operation
516 * @param i Index of the argument we want the inverse operation to yield
517 * @param inverse struct to be filled with the resulting inverse op
518 * @param obstack The obstack to use for allocation of the returned nodes array
519 * @return The inverse operation or NULL if operation invertible
521 static arch_inverse_t *ia32_get_inverse(const void *self, const ir_node *irn, int i, arch_inverse_t *inverse, struct obstack *obst) {
524 ir_node *block, *noreg, *nomem;
527 /* we cannot invert non-ia32 irns */
528 if (! is_ia32_irn(irn))
531 /* operand must always be a real operand (not base, index or mem) */
532 if (i != 2 && i != 3)
535 /* we don't invert address mode operations */
536 if (get_ia32_op_type(irn) != ia32_Normal)
539 irg = get_irn_irg(irn);
540 block = get_nodes_block(irn);
541 mode = get_ia32_res_mode(irn);
542 noreg = get_irn_n(irn, 0);
543 nomem = new_r_NoMem(irg);
545 /* initialize structure */
546 inverse->nodes = obstack_alloc(obst, 2 * sizeof(inverse->nodes[0]));
550 switch (get_ia32_irn_opcode(irn)) {
552 if (get_ia32_immop_type(irn) == ia32_ImmConst) {
553 /* we have an add with a const here */
554 /* invers == add with negated const */
555 inverse->nodes[0] = new_rd_ia32_Add(NULL, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem);
556 pnc = pn_ia32_Add_res;
558 copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
559 set_ia32_Immop_tarval(inverse->nodes[0], tarval_neg(get_ia32_Immop_tarval(irn)));
560 set_ia32_commutative(inverse->nodes[0]);
562 else if (get_ia32_immop_type(irn) == ia32_ImmSymConst) {
563 /* we have an add with a symconst here */
564 /* invers == sub with const */
565 inverse->nodes[0] = new_rd_ia32_Sub(NULL, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem);
566 pnc = pn_ia32_Sub_res;
568 copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
571 /* normal add: inverse == sub */
572 ir_node *proj = get_irn_out_edge_first(irn)->src;
573 assert(proj && is_Proj(proj));
575 inverse->nodes[0] = new_rd_ia32_Sub(NULL, irg, block, noreg, noreg, proj, get_irn_n(irn, i ^ 1), nomem);
576 pnc = pn_ia32_Sub_res;
581 if (get_ia32_immop_type(irn) != ia32_ImmNone) {
582 /* we have a sub with a const/symconst here */
583 /* invers == add with this const */
584 inverse->nodes[0] = new_rd_ia32_Add(NULL, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem);
585 pnc = pn_ia32_Add_res;
586 inverse->costs += (get_ia32_immop_type(irn) == ia32_ImmSymConst) ? 5 : 1;
587 copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
591 ir_node *proj = get_irn_out_edge_first(irn)->src;
592 assert(proj && is_Proj(proj));
595 inverse->nodes[0] = new_rd_ia32_Add(NULL, irg, block, noreg, noreg, proj, get_irn_n(irn, 3), nomem);
598 inverse->nodes[0] = new_rd_ia32_Sub(NULL, irg, block, noreg, noreg, get_irn_n(irn, 2), proj, nomem);
600 pnc = pn_ia32_Sub_res;
605 if (get_ia32_immop_type(irn) != ia32_ImmNone) {
606 /* xor with const: inverse = xor */
607 inverse->nodes[0] = new_rd_ia32_Eor(NULL, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem);
608 pnc = pn_ia32_Eor_res;
609 inverse->costs += (get_ia32_immop_type(irn) == ia32_ImmSymConst) ? 5 : 1;
610 copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
614 inverse->nodes[0] = new_rd_ia32_Eor(NULL, irg, block, noreg, noreg, (ir_node *)irn, get_irn_n(irn, i), nomem);
615 pnc = pn_ia32_Eor_res;
620 inverse->nodes[0] = new_rd_ia32_Not(NULL, irg, block, noreg, noreg, get_irn_n(irn, i), nomem);
621 pnc = pn_ia32_Not_res;
625 inverse->nodes[0] = new_rd_ia32_Minus(NULL, irg, block, noreg, noreg, get_irn_n(irn, i), nomem);
626 pnc = pn_ia32_Minus_res;
630 /* inverse operation not supported */
634 set_ia32_res_mode(inverse->nodes[0], mode);
635 inverse->nodes[1] = new_r_Proj(irg, block, inverse->nodes[0], mode, pnc);
640 static const be_abi_callbacks_t ia32_abi_callbacks = {
643 ia32_abi_get_between_type,
644 ia32_abi_dont_save_regs,
649 /* fill register allocator interface */
651 static const arch_irn_ops_if_t ia32_irn_ops_if = {
652 ia32_get_irn_reg_req,
657 ia32_get_frame_entity,
660 ia32_get_op_estimated_cost
663 ia32_irn_ops_t ia32_irn_ops = {
670 /**************************************************
673 * ___ ___ __| | ___ __ _ ___ _ __ _| |_
674 * / __/ _ \ / _` |/ _ \/ _` |/ _ \ '_ \ | | _|
675 * | (_| (_) | (_| | __/ (_| | __/ | | | | | |
676 * \___\___/ \__,_|\___|\__, |\___|_| |_| |_|_|
679 **************************************************/
682 * Transforms the standard firm graph into
685 static void ia32_prepare_graph(void *self) {
686 ia32_code_gen_t *cg = self;
687 dom_front_info_t *dom;
688 DEBUG_ONLY(firm_dbg_module_t *old_mod = cg->mod;)
690 FIRM_DBG_REGISTER(cg->mod, "firm.be.ia32.transform");
692 /* 1st: transform constants and psi condition trees */
693 irg_walk_blkwise_graph(cg->irg, ia32_place_consts_set_modes, ia32_transform_psi_cond_tree, cg);
695 /* 2nd: transform all remaining nodes */
696 ia32_register_transformers();
697 dom = be_compute_dominance_frontiers(cg->irg);
698 irg_walk_blkwise_graph(cg->irg, NULL, ia32_transform_node, cg);
699 be_free_dominance_frontiers(dom);
702 be_dump(cg->irg, "-transformed", dump_ir_block_graph_sched);
704 /* 3rd: optimize address mode */
705 FIRM_DBG_REGISTER(cg->mod, "firm.be.ia32.am");
706 ia32_optimize_addressmode(cg);
709 be_dump(cg->irg, "-am", dump_ir_block_graph_sched);
711 DEBUG_ONLY(cg->mod = old_mod;)
714 static INLINE int need_constraint_copy(ir_node *irn) {
716 ! is_ia32_Lea(irn) && \
717 ! is_ia32_Conv_I2I(irn) && \
718 ! is_ia32_Conv_I2I8Bit(irn) && \
719 ! is_ia32_CmpCMov(irn) && \
720 ! is_ia32_CmpSet(irn);
724 * Insert copies for all ia32 nodes where the should_be_same requirement
726 * Transform Sub into Neg -- Add if IN2 == OUT
728 static void ia32_finish_node(ir_node *irn, void *env) {
729 ia32_code_gen_t *cg = env;
730 const ia32_register_req_t **reqs;
731 const arch_register_t *out_reg, *in_reg, *in2_reg;
733 ir_node *copy, *in_node, *block, *in2_node;
734 ia32_op_type_t op_tp;
736 if (is_ia32_irn(irn)) {
737 /* AM Dest nodes don't produce any values */
738 op_tp = get_ia32_op_type(irn);
739 if (op_tp == ia32_AddrModeD)
742 reqs = get_ia32_out_req_all(irn);
743 n_res = get_ia32_n_res(irn);
744 block = get_nodes_block(irn);
746 /* check all OUT requirements, if there is a should_be_same */
747 if ((op_tp == ia32_Normal || op_tp == ia32_AddrModeS) && need_constraint_copy(irn))
749 for (i = 0; i < n_res; i++) {
750 if (arch_register_req_is(&(reqs[i]->req), should_be_same)) {
751 /* get in and out register */
752 out_reg = get_ia32_out_reg(irn, i);
753 in_node = get_irn_n(irn, reqs[i]->same_pos);
754 in_reg = arch_get_irn_register(cg->arch_env, in_node);
756 /* don't copy ignore nodes */
757 if (arch_irn_is(cg->arch_env, in_node, ignore) && is_Proj(in_node))
760 /* check if in and out register are equal */
761 if (! REGS_ARE_EQUAL(out_reg, in_reg)) {
762 /* in case of a commutative op: just exchange the in's */
763 /* beware: the current op could be everything, so test for ia32 */
764 /* commutativity first before getting the second in */
765 if (is_ia32_commutative(irn)) {
766 in2_node = get_irn_n(irn, reqs[i]->same_pos ^ 1);
767 in2_reg = arch_get_irn_register(cg->arch_env, in2_node);
769 if (REGS_ARE_EQUAL(out_reg, in2_reg)) {
770 set_irn_n(irn, reqs[i]->same_pos, in2_node);
771 set_irn_n(irn, reqs[i]->same_pos ^ 1, in_node);
778 DBG((cg->mod, LEVEL_1, "inserting copy for %+F in_pos %d\n", irn, reqs[i]->same_pos));
779 /* create copy from in register */
780 copy = be_new_Copy(arch_register_get_class(in_reg), cg->irg, block, in_node);
782 DBG_OPT_2ADDRCPY(copy);
784 /* destination is the out register */
785 arch_set_irn_register(cg->arch_env, copy, out_reg);
787 /* insert copy before the node into the schedule */
788 sched_add_before(irn, copy);
791 set_irn_n(irn, reqs[i]->same_pos, copy);
798 /* If we have a CondJmp/CmpSet/xCmpSet with immediate, we need to */
799 /* check if it's the right operand, otherwise we have */
800 /* to change it, as CMP doesn't support immediate as */
802 if ((is_ia32_CondJmp(irn) || is_ia32_CmpSet(irn) || is_ia32_xCmpSet(irn)) &&
803 (is_ia32_ImmConst(irn) || is_ia32_ImmSymConst(irn)) &&
804 op_tp == ia32_AddrModeS)
806 set_ia32_op_type(irn, ia32_AddrModeD);
807 set_ia32_pncode(irn, get_inversed_pnc(get_ia32_pncode(irn)));
810 /* check if there is a sub which need to be transformed */
811 ia32_transform_sub_to_neg_add(irn, cg);
813 /* transform a LEA into an Add if possible */
814 ia32_transform_lea_to_add(irn, cg);
818 /* check for peephole optimization */
819 ia32_peephole_optimization(irn, cg);
822 static void ia32_finish_irg_walker(ir_node *block, void *env) {
825 for (irn = sched_first(block); ! sched_is_end(irn); irn = next) {
826 next = sched_next(irn);
827 ia32_finish_node(irn, env);
831 static void ia32_push_on_queue_walker(ir_node *block, void *env) {
833 waitq_put(wq, block);
838 * Add Copy nodes for not fulfilled should_be_equal constraints
840 static void ia32_finish_irg(ir_graph *irg, ia32_code_gen_t *cg) {
841 waitq *wq = new_waitq();
843 /* Push the blocks on the waitq because ia32_finish_irg_walker starts more walks ... */
844 irg_block_walk_graph(irg, NULL, ia32_push_on_queue_walker, wq);
846 while (! waitq_empty(wq)) {
847 ir_node *block = waitq_get(wq);
848 ia32_finish_irg_walker(block, cg);
856 * Dummy functions for hooks we don't need but which must be filled.
858 static void ia32_before_sched(void *self) {
862 * Called before the register allocator.
863 * Calculate a block schedule here. We need it for the x87
864 * simulator and the emitter.
866 static void ia32_before_ra(void *self) {
867 ia32_code_gen_t *cg = self;
869 cg->blk_sched = sched_create_block_schedule(cg->irg);
874 * Transforms a be node into a Load.
876 static void transform_to_Load(ia32_transform_env_t *env) {
877 ir_node *irn = env->irn;
878 entity *ent = be_get_frame_entity(irn);
879 ir_mode *mode = env->mode;
880 ir_node *noreg = ia32_new_NoReg_gp(env->cg);
881 ir_node *nomem = new_rd_NoMem(env->irg);
882 ir_node *sched_point = NULL;
883 ir_node *ptr = get_irn_n(irn, 0);
884 ir_node *mem = be_is_Reload(irn) ? get_irn_n(irn, 1) : nomem;
885 ir_node *new_op, *proj;
886 const arch_register_t *reg;
888 if (sched_is_scheduled(irn)) {
889 sched_point = sched_prev(irn);
892 if (mode_is_float(mode)) {
893 if (USE_SSE2(env->cg))
894 new_op = new_rd_ia32_xLoad(env->dbg, env->irg, env->block, ptr, noreg, mem);
896 new_op = new_rd_ia32_vfld(env->dbg, env->irg, env->block, ptr, noreg, mem);
899 new_op = new_rd_ia32_Load(env->dbg, env->irg, env->block, ptr, noreg, mem);
902 set_ia32_am_support(new_op, ia32_am_Source);
903 set_ia32_op_type(new_op, ia32_AddrModeS);
904 set_ia32_am_flavour(new_op, ia32_B);
905 set_ia32_ls_mode(new_op, mode);
906 set_ia32_frame_ent(new_op, ent);
907 set_ia32_use_frame(new_op);
909 DBG_OPT_RELOAD2LD(irn, new_op);
911 proj = new_rd_Proj(env->dbg, env->irg, env->block, new_op, mode, pn_Load_res);
914 sched_add_after(sched_point, new_op);
915 sched_add_after(new_op, proj);
920 /* copy the register from the old node to the new Load */
921 reg = arch_get_irn_register(env->cg->arch_env, irn);
922 arch_set_irn_register(env->cg->arch_env, new_op, reg);
924 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, irn));
930 * Transforms a be node into a Store.
932 static void transform_to_Store(ia32_transform_env_t *env) {
933 ir_node *irn = env->irn;
934 entity *ent = be_get_frame_entity(irn);
935 ir_mode *mode = env->mode;
936 ir_node *noreg = ia32_new_NoReg_gp(env->cg);
937 ir_node *nomem = new_rd_NoMem(env->irg);
938 ir_node *ptr = get_irn_n(irn, 0);
939 ir_node *val = get_irn_n(irn, 1);
940 ir_node *new_op, *proj;
941 ir_node *sched_point = NULL;
943 if (sched_is_scheduled(irn)) {
944 sched_point = sched_prev(irn);
947 if (mode_is_float(mode)) {
948 if (USE_SSE2(env->cg))
949 new_op = new_rd_ia32_xStore(env->dbg, env->irg, env->block, ptr, noreg, val, nomem);
951 new_op = new_rd_ia32_vfst(env->dbg, env->irg, env->block, ptr, noreg, val, nomem);
953 else if (get_mode_size_bits(mode) == 8) {
954 new_op = new_rd_ia32_Store8Bit(env->dbg, env->irg, env->block, ptr, noreg, val, nomem);
957 new_op = new_rd_ia32_Store(env->dbg, env->irg, env->block, ptr, noreg, val, nomem);
960 set_ia32_am_support(new_op, ia32_am_Dest);
961 set_ia32_op_type(new_op, ia32_AddrModeD);
962 set_ia32_am_flavour(new_op, ia32_B);
963 set_ia32_ls_mode(new_op, mode);
964 set_ia32_frame_ent(new_op, ent);
965 set_ia32_use_frame(new_op);
967 DBG_OPT_SPILL2ST(irn, new_op);
969 proj = new_rd_Proj(env->dbg, env->irg, env->block, new_op, mode_M, pn_ia32_Store_M);
972 sched_add_after(sched_point, new_op);
976 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, irn));
982 * Fix the mode of Spill/Reload
984 static ir_mode *fix_spill_mode(ia32_code_gen_t *cg, ir_mode *mode)
986 if (mode_is_float(mode)) {
998 * Block-Walker: Calls the transform functions Spill and Reload.
1000 static void ia32_after_ra_walker(ir_node *block, void *env) {
1001 ir_node *node, *prev;
1002 ia32_code_gen_t *cg = env;
1003 ia32_transform_env_t tenv;
1006 tenv.irg = current_ir_graph;
1008 DEBUG_ONLY(tenv.mod = cg->mod;)
1010 /* beware: the schedule is changed here */
1011 for (node = sched_last(block); !sched_is_begin(node); node = prev) {
1012 prev = sched_prev(node);
1013 if (be_is_Reload(node)) {
1014 /* we always reload the whole register */
1015 tenv.dbg = get_irn_dbg_info(node);
1017 tenv.mode = fix_spill_mode(cg, get_irn_mode(node));
1018 transform_to_Load(&tenv);
1020 else if (be_is_Spill(node)) {
1021 /* we always spill the whole register */
1022 tenv.dbg = get_irn_dbg_info(node);
1024 tenv.mode = fix_spill_mode(cg, get_irn_mode(be_get_Spill_context(node)));
1025 transform_to_Store(&tenv);
1031 * We transform Spill and Reload here. This needs to be done before
1032 * stack biasing otherwise we would miss the corrected offset for these nodes.
1034 * If x87 instruction should be emitted, run the x87 simulator and patch
1035 * the virtual instructions. This must obviously be done after register allocation.
1037 static void ia32_after_ra(void *self) {
1038 ia32_code_gen_t *cg = self;
1039 irg_block_walk_graph(cg->irg, NULL, ia32_after_ra_walker, self);
1041 /* if we do x87 code generation, rewrite all the virtual instructions and registers */
1042 if (cg->used_fp == fp_x87 || cg->force_sim) {
1043 x87_simulate_graph(cg->arch_env, cg->irg, cg->blk_sched);
1049 * Emits the code, closes the output file and frees
1050 * the code generator interface.
1052 static void ia32_codegen(void *self) {
1053 ia32_code_gen_t *cg = self;
1054 ir_graph *irg = cg->irg;
1056 ia32_finish_irg(irg, cg);
1058 be_dump(irg, "-finished", dump_ir_block_graph_sched);
1059 ia32_gen_routine(cg->isa->out, irg, cg);
1063 /* remove it from the isa */
1066 /* de-allocate code generator */
1067 del_set(cg->reg_set);
1072 static void *ia32_cg_init(const be_irg_t *birg);
1074 static const arch_code_generator_if_t ia32_code_gen_if = {
1076 NULL, /* before abi introduce hook */
1078 ia32_before_sched, /* before scheduling hook */
1079 ia32_before_ra, /* before register allocation hook */
1080 ia32_after_ra, /* after register allocation hook */
1081 ia32_codegen /* emit && done */
1085 * Initializes a IA32 code generator.
1087 static void *ia32_cg_init(const be_irg_t *birg) {
1088 ia32_isa_t *isa = (ia32_isa_t *)birg->main_env->arch_env->isa;
1089 ia32_code_gen_t *cg = xcalloc(1, sizeof(*cg));
1091 cg->impl = &ia32_code_gen_if;
1092 cg->irg = birg->irg;
1093 cg->reg_set = new_set(ia32_cmp_irn_reg_assoc, 1024);
1094 cg->arch_env = birg->main_env->arch_env;
1097 cg->blk_sched = NULL;
1098 cg->fp_to_gp = NULL;
1099 cg->gp_to_fp = NULL;
1100 cg->fp_kind = isa->fp_kind;
1101 cg->used_fp = fp_none;
1102 cg->dump = (birg->main_env->options->dump_flags & DUMP_BE) ? 1 : 0;
1104 FIRM_DBG_REGISTER(cg->mod, "firm.be.ia32.cg");
1106 /* copy optimizations from isa for easier access */
1113 if (isa->name_obst_size) {
1114 //printf("freed %d bytes from name obst\n", isa->name_obst_size);
1115 isa->name_obst_size = 0;
1116 obstack_free(isa->name_obst, NULL);
1117 obstack_init(isa->name_obst);
1121 cur_reg_set = cg->reg_set;
1123 ia32_irn_ops.cg = cg;
1125 return (arch_code_generator_t *)cg;
1130 /*****************************************************************
1131 * ____ _ _ _____ _____
1132 * | _ \ | | | | |_ _|/ ____| /\
1133 * | |_) | __ _ ___| | _____ _ __ __| | | | | (___ / \
1134 * | _ < / _` |/ __| |/ / _ \ '_ \ / _` | | | \___ \ / /\ \
1135 * | |_) | (_| | (__| < __/ | | | (_| | _| |_ ____) / ____ \
1136 * |____/ \__,_|\___|_|\_\___|_| |_|\__,_| |_____|_____/_/ \_\
1138 *****************************************************************/
1141 * The template that generates a new ISA object.
1142 * Note that this template can be changed by command line
1145 static ia32_isa_t ia32_isa_template = {
1147 &ia32_isa_if, /* isa interface implementation */
1148 &ia32_gp_regs[REG_ESP], /* stack pointer register */
1149 &ia32_gp_regs[REG_EBP], /* base pointer register */
1150 -1, /* stack direction */
1152 NULL, /* 16bit register names */
1153 NULL, /* 8bit register names */
1157 IA32_OPT_INCDEC | /* optimize add 1, sub 1 into inc/dec default: on */
1158 IA32_OPT_DOAM | /* optimize address mode default: on */
1159 IA32_OPT_LEA | /* optimize for LEAs default: on */
1160 IA32_OPT_PLACECNST | /* place constants immediately before instructions, default: on */
1161 IA32_OPT_IMMOPS | /* operations can use immediates, default: on */
1162 IA32_OPT_EXTBB), /* use extended basic block scheduling, default: on */
1163 arch_pentium_4, /* instruction architecture */
1164 arch_pentium_4, /* optimize for architecture */
1165 fp_sse2, /* use sse2 unit */
1166 NULL, /* current code generator */
1168 NULL, /* name obstack */
1169 0 /* name obst size */
1174 * Initializes the backend ISA.
1176 static void *ia32_init(FILE *file_handle) {
1177 static int inited = 0;
1183 isa = xmalloc(sizeof(*isa));
1184 memcpy(isa, &ia32_isa_template, sizeof(*isa));
1186 ia32_register_init(isa);
1187 ia32_create_opcodes();
1189 if ((ARCH_INTEL(isa->arch) && isa->arch < arch_pentium_4) ||
1190 (ARCH_AMD(isa->arch) && isa->arch < arch_athlon))
1191 /* no SSE2 for these cpu's */
1192 isa->fp_kind = fp_x87;
1194 if (ARCH_INTEL(isa->opt_arch) && isa->opt_arch >= arch_pentium_4) {
1195 /* Pentium 4 don't like inc and dec instructions */
1196 isa->opt &= ~IA32_OPT_INCDEC;
1199 isa->regs_16bit = pmap_create();
1200 isa->regs_8bit = pmap_create();
1201 isa->types = pmap_create();
1202 isa->tv_ent = pmap_create();
1203 isa->out = file_handle;
1205 ia32_build_16bit_reg_map(isa->regs_16bit);
1206 ia32_build_8bit_reg_map(isa->regs_8bit);
1208 /* patch register names of x87 registers */
1210 ia32_st_regs[0].name = "st";
1211 ia32_st_regs[1].name = "st(1)";
1212 ia32_st_regs[2].name = "st(2)";
1213 ia32_st_regs[3].name = "st(3)";
1214 ia32_st_regs[4].name = "st(4)";
1215 ia32_st_regs[5].name = "st(5)";
1216 ia32_st_regs[6].name = "st(6)";
1217 ia32_st_regs[7].name = "st(7)";
1221 isa->name_obst = xmalloc(sizeof(*isa->name_obst));
1222 obstack_init(isa->name_obst);
1223 isa->name_obst_size = 0;
1226 ia32_handle_intrinsics();
1227 ia32_switch_section(NULL, NO_SECTION);
1228 fprintf(isa->out, "\t.intel_syntax\n");
1238 * Closes the output file and frees the ISA structure.
1240 static void ia32_done(void *self) {
1241 ia32_isa_t *isa = self;
1243 /* emit now all global declarations */
1244 ia32_gen_decls(isa->out);
1246 pmap_destroy(isa->regs_16bit);
1247 pmap_destroy(isa->regs_8bit);
1248 pmap_destroy(isa->tv_ent);
1249 pmap_destroy(isa->types);
1252 //printf("name obst size = %d bytes\n", isa->name_obst_size);
1253 obstack_free(isa->name_obst, NULL);
1261 * Return the number of register classes for this architecture.
1262 * We report always these:
1263 * - the general purpose registers
1264 * - the SSE floating point register set
1265 * - the virtual floating point registers
1267 static int ia32_get_n_reg_class(const void *self) {
1272 * Return the register class for index i.
1274 static const arch_register_class_t *ia32_get_reg_class(const void *self, int i) {
1275 const ia32_isa_t *isa = self;
1276 assert(i >= 0 && i < 3 && "Invalid ia32 register class requested.");
1278 return &ia32_reg_classes[CLASS_ia32_gp];
1280 return &ia32_reg_classes[CLASS_ia32_xmm];
1282 return &ia32_reg_classes[CLASS_ia32_vfp];
1286 * Get the register class which shall be used to store a value of a given mode.
1287 * @param self The this pointer.
1288 * @param mode The mode in question.
1289 * @return A register class which can hold values of the given mode.
1291 const arch_register_class_t *ia32_get_reg_class_for_mode(const void *self, const ir_mode *mode) {
1292 const ia32_isa_t *isa = self;
1293 if (mode_is_float(mode)) {
1294 return USE_SSE2(isa) ? &ia32_reg_classes[CLASS_ia32_xmm] : &ia32_reg_classes[CLASS_ia32_vfp];
1297 return &ia32_reg_classes[CLASS_ia32_gp];
1301 * Get the ABI restrictions for procedure calls.
1302 * @param self The this pointer.
1303 * @param method_type The type of the method (procedure) in question.
1304 * @param abi The abi object to be modified
1306 static void ia32_get_call_abi(const void *self, ir_type *method_type, be_abi_call_t *abi) {
1307 const ia32_isa_t *isa = self;
1310 unsigned cc = get_method_calling_convention(method_type);
1311 int n = get_method_n_params(method_type);
1314 int i, ignore_1, ignore_2;
1316 const arch_register_t *reg;
1317 be_abi_call_flags_t call_flags = be_abi_call_get_flags(abi);
1319 unsigned use_push = !IS_P6_ARCH(isa->opt_arch);
1321 /* set abi flags for calls */
1322 call_flags.bits.left_to_right = 0; /* always last arg first on stack */
1323 call_flags.bits.store_args_sequential = use_push;
1324 /* call_flags.bits.try_omit_fp not changed: can handle both settings */
1325 call_flags.bits.fp_free = 0; /* the frame pointer is fixed in IA32 */
1326 call_flags.bits.call_has_imm = 1; /* IA32 calls can have immediate address */
1328 /* set stack parameter passing style */
1329 be_abi_call_set_flags(abi, call_flags, &ia32_abi_callbacks);
1331 /* collect the mode for each type */
1332 modes = alloca(n * sizeof(modes[0]));
1334 for (i = 0; i < n; i++) {
1335 tp = get_method_param_type(method_type, i);
1336 modes[i] = get_type_mode(tp);
1339 /* set register parameters */
1340 if (cc & cc_reg_param) {
1341 /* determine the number of parameters passed via registers */
1342 biggest_n = ia32_get_n_regparam_class(n, modes, &ignore_1, &ignore_2);
1344 /* loop over all parameters and set the register requirements */
1345 for (i = 0; i <= biggest_n; i++) {
1346 reg = ia32_get_RegParam_reg(n, modes, i, cc);
1347 assert(reg && "kaputt");
1348 be_abi_call_param_reg(abi, i, reg);
1355 /* set stack parameters */
1356 for (i = stack_idx; i < n; i++) {
1357 be_abi_call_param_stack(abi, i, 1, 0, 0);
1361 /* set return registers */
1362 n = get_method_n_ress(method_type);
1364 assert(n <= 2 && "more than two results not supported");
1366 /* In case of 64bit returns, we will have two 32bit values */
1368 tp = get_method_res_type(method_type, 0);
1369 mode = get_type_mode(tp);
1371 assert(!mode_is_float(mode) && "two FP results not supported");
1373 tp = get_method_res_type(method_type, 1);
1374 mode = get_type_mode(tp);
1376 assert(!mode_is_float(mode) && "two FP results not supported");
1378 be_abi_call_res_reg(abi, 0, &ia32_gp_regs[REG_EAX]);
1379 be_abi_call_res_reg(abi, 1, &ia32_gp_regs[REG_EDX]);
1382 const arch_register_t *reg;
1384 tp = get_method_res_type(method_type, 0);
1385 assert(is_atomic_type(tp));
1386 mode = get_type_mode(tp);
1388 reg = mode_is_float(mode) ?
1389 (USE_SSE2(isa) ? &ia32_xmm_regs[REG_XMM0] : &ia32_vfp_regs[REG_VF0]) :
1390 &ia32_gp_regs[REG_EAX];
1392 be_abi_call_res_reg(abi, 0, reg);
1397 static const void *ia32_get_irn_ops(const arch_irn_handler_t *self, const ir_node *irn) {
1398 return &ia32_irn_ops;
1401 const arch_irn_handler_t ia32_irn_handler = {
1405 const arch_irn_handler_t *ia32_get_irn_handler(const void *self) {
1406 return &ia32_irn_handler;
1409 /* returns the first Proj with given mode from mode_T node */
1410 static ir_node *get_proj_for_mode(ir_node *node, ir_mode *mode) {
1411 const ir_edge_t *edge;
1413 assert(get_irn_mode(node) == mode_T && "Need mode_T node.");
1415 foreach_out_edge(node, edge) {
1416 ir_node *proj = get_edge_src_irn(edge);
1417 if (get_irn_mode(proj) == mode)
1424 int ia32_to_appear_in_schedule(void *block_env, const ir_node *irn) {
1426 /* Loads with no user do not need to appear in schedule */
1427 if (is_ia32_Ld(irn) && get_irn_n_edges(irn) == 1) {
1428 /* only one user && user is not memory -> schedule */
1429 return get_proj_for_mode(irn, mode_M) == NULL;
1432 return is_ia32_irn(irn) ? 1 : -1;
1436 * Initializes the code generator interface.
1438 static const arch_code_generator_if_t *ia32_get_code_generator_if(void *self) {
1439 return &ia32_code_gen_if;
1442 list_sched_selector_t ia32_sched_selector;
1445 * Returns the reg_pressure scheduler with to_appear_in_schedule() overloaded
1447 static const list_sched_selector_t *ia32_get_list_sched_selector(const void *self) {
1448 // memcpy(&ia32_sched_selector, reg_pressure_selector, sizeof(list_sched_selector_t));
1449 memcpy(&ia32_sched_selector, trivial_selector, sizeof(list_sched_selector_t));
1450 ia32_sched_selector.to_appear_in_schedule = ia32_to_appear_in_schedule;
1451 return &ia32_sched_selector;
1455 * Returns the necessary byte alignment for storing a register of given class.
1457 static int ia32_get_reg_class_alignment(const void *self, const arch_register_class_t *cls) {
1458 ir_mode *mode = arch_register_class_mode(cls);
1459 int bytes = get_mode_size_bytes(mode);
1461 if (mode_is_float(mode) && bytes > 8)
1466 static ia32_intrinsic_env_t intrinsic_env = { NULL, NULL };
1469 * Returns the libFirm configuration parameter for this backend.
1471 static const backend_params *ia32_get_libfirm_params(void) {
1472 static const arch_dep_params_t ad = {
1473 1, /* also use subs */
1474 4, /* maximum shifts */
1475 31, /* maximum shift amount */
1477 1, /* allow Mulhs */
1478 1, /* allow Mulus */
1479 32 /* Mulh allowed up to 32 bit */
1481 static backend_params p = {
1482 NULL, /* no additional opcodes */
1483 NULL, /* will be set later */
1484 1, /* need dword lowering */
1485 ia32_create_intrinsic_fkt,
1486 &intrinsic_env, /* context for ia32_create_intrinsic_fkt */
1494 /* instruction set architectures. */
1495 static const lc_opt_enum_int_items_t arch_items[] = {
1496 { "386", arch_i386, },
1497 { "486", arch_i486, },
1498 { "pentium", arch_pentium, },
1499 { "586", arch_pentium, },
1500 { "pentiumpro", arch_pentium_pro, },
1501 { "686", arch_pentium_pro, },
1502 { "pentiummmx", arch_pentium_mmx, },
1503 { "pentium2", arch_pentium_2, },
1504 { "p2", arch_pentium_2, },
1505 { "pentium3", arch_pentium_3, },
1506 { "p3", arch_pentium_3, },
1507 { "pentium4", arch_pentium_4, },
1508 { "p4", arch_pentium_4, },
1509 { "pentiumm", arch_pentium_m, },
1510 { "pm", arch_pentium_m, },
1511 { "core", arch_core, },
1513 { "athlon", arch_athlon, },
1514 { "athlon64", arch_athlon_64, },
1515 { "opteron", arch_opteron, },
1519 static lc_opt_enum_int_var_t arch_var = {
1520 &ia32_isa_template.arch, arch_items
1523 static lc_opt_enum_int_var_t opt_arch_var = {
1524 &ia32_isa_template.opt_arch, arch_items
1527 static const lc_opt_enum_int_items_t fp_unit_items[] = {
1529 { "sse2", fp_sse2 },
1533 static lc_opt_enum_int_var_t fp_unit_var = {
1534 &ia32_isa_template.fp_kind, fp_unit_items
1537 static const lc_opt_enum_int_items_t gas_items[] = {
1538 { "linux", ASM_LINUX_GAS },
1539 { "mingw", ASM_MINGW_GAS },
1543 static lc_opt_enum_int_var_t gas_var = {
1544 (int *)&asm_flavour, gas_items
1547 static const lc_opt_table_entry_t ia32_options[] = {
1548 LC_OPT_ENT_ENUM_INT("arch", "select the instruction architecture", &arch_var),
1549 LC_OPT_ENT_ENUM_INT("opt", "optimize for instruction architecture", &opt_arch_var),
1550 LC_OPT_ENT_ENUM_INT("fpunit", "select the floating point unit", &fp_unit_var),
1551 LC_OPT_ENT_NEGBIT("noaddrmode", "do not use address mode", &ia32_isa_template.opt, IA32_OPT_DOAM),
1552 LC_OPT_ENT_NEGBIT("nolea", "do not optimize for LEAs", &ia32_isa_template.opt, IA32_OPT_LEA),
1553 LC_OPT_ENT_NEGBIT("noplacecnst", "do not place constants", &ia32_isa_template.opt, IA32_OPT_PLACECNST),
1554 LC_OPT_ENT_NEGBIT("noimmop", "no operations with immediates", &ia32_isa_template.opt, IA32_OPT_IMMOPS),
1555 LC_OPT_ENT_NEGBIT("noextbb", "do not use extended basic block scheduling", &ia32_isa_template.opt, IA32_OPT_EXTBB),
1556 LC_OPT_ENT_ENUM_INT("gasmode", "set the GAS compatibility mode", &gas_var),
1561 * Register command line options for the ia32 backend.
1565 * ia32-arch=arch create instruction for arch
1566 * ia32-opt=arch optimize for run on arch
1567 * ia32-fpunit=unit select floating point unit (x87 or SSE2)
1568 * ia32-incdec optimize for inc/dec
1569 * ia32-noaddrmode do not use address mode
1570 * ia32-nolea do not optimize for LEAs
1571 * ia32-noplacecnst do not place constants,
1572 * ia32-noimmop no operations with immediates
1573 * ia32-noextbb do not use extended basic block scheduling
1574 * ia32-gasmode set the GAS compatibility mode
1576 static void ia32_register_options(lc_opt_entry_t *ent)
1578 lc_opt_entry_t *be_grp_ia32 = lc_opt_get_grp(ent, "ia32");
1579 lc_opt_add_table(be_grp_ia32, ia32_options);
1581 #endif /* WITH_LIBCORE */
1583 const arch_isa_if_t ia32_isa_if = {
1586 ia32_get_n_reg_class,
1588 ia32_get_reg_class_for_mode,
1590 ia32_get_irn_handler,
1591 ia32_get_code_generator_if,
1592 ia32_get_list_sched_selector,
1593 ia32_get_reg_class_alignment,
1594 ia32_get_libfirm_params,
1596 ia32_register_options