2 * This is the main ia32 firm backend driver.
3 * @author Christian Wuerdig
20 #include <libcore/lc_opts.h>
21 #include <libcore/lc_opts_enum.h>
22 #endif /* WITH_LIBCORE */
24 #include "pseudo_irg.h"
28 #include "iredges_t.h"
36 #include "../beabi.h" /* the general register allocator interface */
37 #include "../benode_t.h"
38 #include "../belower.h"
39 #include "../besched_t.h"
41 #include "bearch_ia32_t.h"
43 #include "ia32_new_nodes.h" /* ia32 nodes interface */
44 #include "gen_ia32_regalloc_if.h" /* the generated interface (register type and class defenitions) */
45 #include "ia32_gen_decls.h" /* interface declaration emitter */
46 #include "ia32_transform.h"
47 #include "ia32_emitter.h"
48 #include "ia32_map_regs.h"
49 #include "ia32_optimize.h"
51 #include "ia32_dbg_stat.h"
53 #define DEBUG_MODULE "firm.be.ia32.isa"
56 static set *cur_reg_set = NULL;
59 #define is_Start(irn) (get_irn_opcode(irn) == iro_Start)
61 /* Creates the unique per irg GP NoReg node. */
62 ir_node *ia32_new_NoReg_gp(ia32_code_gen_t *cg) {
63 return be_abi_get_callee_save_irn(cg->birg->abi, &ia32_gp_regs[REG_GP_NOREG]);
66 /* Creates the unique per irg FP NoReg node. */
67 ir_node *ia32_new_NoReg_fp(ia32_code_gen_t *cg) {
68 return be_abi_get_callee_save_irn(cg->birg->abi,
69 USE_SSE2(cg) ? &ia32_xmm_regs[REG_XMM_NOREG] : &ia32_vfp_regs[REG_VFP_NOREG]);
72 /**************************************************
75 * _ __ ___ __ _ __ _| | | ___ ___ _| |_
76 * | '__/ _ \/ _` | / _` | | |/ _ \ / __| | | _|
77 * | | | __/ (_| | | (_| | | | (_) | (__ | | |
78 * |_| \___|\__, | \__,_|_|_|\___/ \___| |_|_|
81 **************************************************/
83 static ir_node *my_skip_proj(const ir_node *n) {
91 * Return register requirements for an ia32 node.
92 * If the node returns a tuple (mode_T) then the proj's
93 * will be asked for this information.
95 static const arch_register_req_t *ia32_get_irn_reg_req(const void *self, arch_register_req_t *req, const ir_node *irn, int pos) {
96 const ia32_irn_ops_t *ops = self;
97 const ia32_register_req_t *irn_req;
98 long node_pos = pos == -1 ? 0 : pos;
99 ir_mode *mode = is_Block(irn) ? NULL : get_irn_mode(irn);
100 FIRM_DBG_REGISTER(firm_dbg_module_t *mod, DEBUG_MODULE);
102 if (is_Block(irn) || mode == mode_M || mode == mode_X) {
103 DBG((mod, LEVEL_1, "ignoring Block, mode_M, mode_X node %+F\n", irn));
107 if (mode == mode_T && pos < 0) {
108 DBG((mod, LEVEL_1, "ignoring request OUT requirements for node %+F\n", irn));
112 DBG((mod, LEVEL_1, "get requirements at pos %d for %+F ... ", pos, irn));
116 node_pos = ia32_translate_proj_pos(irn);
122 irn = my_skip_proj(irn);
124 DB((mod, LEVEL_1, "skipping Proj, going to %+F at pos %d ... ", irn, node_pos));
127 if (is_ia32_irn(irn)) {
129 irn_req = get_ia32_in_req(irn, pos);
132 irn_req = get_ia32_out_req(irn, node_pos);
135 DB((mod, LEVEL_1, "returning reqs for %+F at pos %d\n", irn, pos));
137 memcpy(req, &(irn_req->req), sizeof(*req));
139 if (arch_register_req_is(&(irn_req->req), should_be_same)) {
140 assert(irn_req->same_pos >= 0 && "should be same constraint for in -> out NYI");
141 req->other_same = get_irn_n(irn, irn_req->same_pos);
144 if (arch_register_req_is(&(irn_req->req), should_be_different)) {
145 assert(irn_req->different_pos >= 0 && "should be different constraint for in -> out NYI");
146 req->other_different = get_irn_n(irn, irn_req->different_pos);
150 /* treat Unknowns like Const with default requirements */
151 if (is_Unknown(irn)) {
152 DB((mod, LEVEL_1, "returning UKNWN reqs for %+F\n", irn));
153 if (mode_is_float(mode)) {
154 if (USE_SSE2(ops->cg))
155 memcpy(req, &(ia32_default_req_ia32_xmm_xmm_UKNWN), sizeof(*req));
157 memcpy(req, &(ia32_default_req_ia32_vfp_vfp_UKNWN), sizeof(*req));
159 else if (mode_is_int(mode) || mode_is_reference(mode))
160 memcpy(req, &(ia32_default_req_ia32_gp_gp_UKNWN), sizeof(*req));
161 else if (mode == mode_T || mode == mode_M) {
162 DBG((mod, LEVEL_1, "ignoring Unknown node %+F\n", irn));
166 assert(0 && "unsupported Unknown-Mode");
169 DB((mod, LEVEL_1, "returning NULL for %+F (not ia32)\n", irn));
177 static void ia32_set_irn_reg(const void *self, ir_node *irn, const arch_register_t *reg) {
179 const ia32_irn_ops_t *ops = self;
181 if (get_irn_mode(irn) == mode_X) {
185 DBG((ops->cg->mod, LEVEL_1, "ia32 assigned register %s to node %+F\n", reg->name, irn));
188 pos = ia32_translate_proj_pos(irn);
189 irn = my_skip_proj(irn);
192 if (is_ia32_irn(irn)) {
193 const arch_register_t **slots;
195 slots = get_ia32_slots(irn);
199 ia32_set_firm_reg(irn, reg, cur_reg_set);
203 static const arch_register_t *ia32_get_irn_reg(const void *self, const ir_node *irn) {
205 const arch_register_t *reg = NULL;
209 if (get_irn_mode(irn) == mode_X) {
213 pos = ia32_translate_proj_pos(irn);
214 irn = my_skip_proj(irn);
217 if (is_ia32_irn(irn)) {
218 const arch_register_t **slots;
219 slots = get_ia32_slots(irn);
223 reg = ia32_get_firm_reg(irn, cur_reg_set);
229 static arch_irn_class_t ia32_classify(const void *self, const ir_node *irn) {
230 irn = my_skip_proj(irn);
232 return arch_irn_class_branch;
233 else if (is_ia32_Cnst(irn))
234 return arch_irn_class_const;
235 else if (is_ia32_irn(irn))
236 return arch_irn_class_normal;
241 static arch_irn_flags_t ia32_get_flags(const void *self, const ir_node *irn) {
242 irn = my_skip_proj(irn);
243 if (is_ia32_irn(irn))
244 return get_ia32_flags(irn);
247 return arch_irn_flags_ignore;
252 static entity *ia32_get_frame_entity(const void *self, const ir_node *irn) {
253 return is_ia32_irn(irn) ? get_ia32_frame_ent(irn) : NULL;
256 static void ia32_set_stack_bias(const void *self, ir_node *irn, int bias) {
258 const ia32_irn_ops_t *ops = self;
260 if (get_ia32_frame_ent(irn)) {
261 ia32_am_flavour_t am_flav = get_ia32_am_flavour(irn);
263 DBG((ops->cg->mod, LEVEL_1, "stack biased %+F with %d\n", irn, bias));
264 snprintf(buf, sizeof(buf), "%d", bias);
266 if (get_ia32_op_type(irn) == ia32_Normal) {
267 set_ia32_cnst(irn, buf);
270 add_ia32_am_offs(irn, buf);
272 set_ia32_am_flavour(irn, am_flav);
278 be_abi_call_flags_bits_t flags;
279 const arch_isa_t *isa;
280 const arch_env_t *aenv;
284 static void *ia32_abi_init(const be_abi_call_t *call, const arch_env_t *aenv, ir_graph *irg)
286 ia32_abi_env_t *env = xmalloc(sizeof(env[0]));
287 be_abi_call_flags_t fl = be_abi_call_get_flags(call);
288 env->flags = fl.bits;
291 env->isa = aenv->isa;
296 * Put all registers which are saved by the prologue/epilogue in a set.
298 * @param self The callback object.
299 * @param s The result set.
301 static void ia32_abi_dont_save_regs(void *self, pset *s)
303 ia32_abi_env_t *env = self;
304 if(env->flags.try_omit_fp)
305 pset_insert_ptr(s, env->isa->bp);
309 * Generate the routine prologue.
311 * @param self The callback object.
312 * @param mem A pointer to the mem node. Update this if you define new memory.
313 * @param reg_map A map mapping all callee_save/ignore/parameter registers to their defining nodes.
315 * @return The register which shall be used as a stack frame base.
317 * All nodes which define registers in @p reg_map must keep @p reg_map current.
319 static const arch_register_t *ia32_abi_prologue(void *self, ir_node **mem, pmap *reg_map)
321 ia32_abi_env_t *env = self;
323 if (!env->flags.try_omit_fp) {
324 int reg_size = get_mode_size_bytes(env->isa->bp->reg_class->mode);
325 ir_node *bl = get_irg_start_block(env->irg);
326 ir_node *curr_sp = be_abi_reg_map_get(reg_map, env->isa->sp);
327 ir_node *curr_bp = be_abi_reg_map_get(reg_map, env->isa->bp);
331 push = new_rd_ia32_Push(NULL, env->irg, bl, curr_sp, curr_bp, *mem);
332 curr_sp = new_r_Proj(env->irg, bl, push, get_irn_mode(curr_sp), pn_ia32_Push_stack);
333 *mem = new_r_Proj(env->irg, bl, push, mode_M, pn_ia32_Push_M);
335 /* the push must have SP out register */
336 arch_set_irn_register(env->aenv, curr_sp, env->isa->sp);
337 set_ia32_flags(push, arch_irn_flags_ignore);
339 /* move esp to ebp */
340 curr_bp = be_new_Copy(env->isa->bp->reg_class, env->irg, bl, curr_sp);
341 be_set_constr_single_reg(curr_bp, BE_OUT_POS(0), env->isa->bp);
342 arch_set_irn_register(env->aenv, curr_bp, env->isa->bp);
343 be_node_set_flags(curr_bp, BE_OUT_POS(0), arch_irn_flags_ignore);
345 /* beware: the copy must be done before any other sp use */
346 curr_sp = be_new_CopyKeep_single(env->isa->sp->reg_class, env->irg, bl, curr_sp, curr_bp, get_irn_mode(curr_sp));
347 be_set_constr_single_reg(curr_sp, BE_OUT_POS(0), env->isa->sp);
348 arch_set_irn_register(env->aenv, curr_sp, env->isa->sp);
349 be_node_set_flags(curr_sp, BE_OUT_POS(0), arch_irn_flags_ignore);
351 be_abi_reg_map_set(reg_map, env->isa->sp, curr_sp);
352 be_abi_reg_map_set(reg_map, env->isa->bp, curr_bp);
361 * Generate the routine epilogue.
362 * @param self The callback object.
363 * @param bl The block for the epilog
364 * @param mem A pointer to the mem node. Update this if you define new memory.
365 * @param reg_map A map mapping all callee_save/ignore/parameter registers to their defining nodes.
366 * @return The register which shall be used as a stack frame base.
368 * All nodes which define registers in @p reg_map must keep @p reg_map current.
370 static void ia32_abi_epilogue(void *self, ir_node *bl, ir_node **mem, pmap *reg_map)
372 ia32_abi_env_t *env = self;
373 ir_node *curr_sp = be_abi_reg_map_get(reg_map, env->isa->sp);
374 ir_node *curr_bp = be_abi_reg_map_get(reg_map, env->isa->bp);
376 if (env->flags.try_omit_fp) {
377 /* simply remove the stack frame here */
378 curr_sp = be_new_IncSP(env->isa->sp, env->irg, bl, curr_sp, *mem, BE_STACK_FRAME_SIZE, be_stack_dir_shrink);
381 const ia32_isa_t *isa = (ia32_isa_t *)env->isa;
382 ir_mode *mode_bp = env->isa->bp->reg_class->mode;
383 int reg_size = get_mode_size_bytes(env->isa->bp->reg_class->mode);
385 /* gcc always emits a leave at the end of a routine */
386 if (1 || ARCH_AMD(isa->opt_arch)) {
390 leave = new_rd_ia32_Leave(NULL, env->irg, bl, curr_sp, *mem);
391 set_ia32_flags(leave, arch_irn_flags_ignore);
392 curr_bp = new_r_Proj(current_ir_graph, bl, leave, mode_bp, pn_ia32_Leave_frame);
393 curr_sp = new_r_Proj(current_ir_graph, bl, leave, get_irn_mode(curr_sp), pn_ia32_Leave_stack);
394 *mem = new_r_Proj(current_ir_graph, bl, leave, mode_M, pn_ia32_Leave_M);
399 /* copy ebp to esp */
400 curr_sp = be_new_SetSP(env->isa->sp, env->irg, bl, curr_sp, curr_bp, *mem);
403 pop = new_rd_ia32_Pop(NULL, env->irg, bl, curr_sp, *mem);
404 set_ia32_flags(pop, arch_irn_flags_ignore);
405 curr_bp = new_r_Proj(current_ir_graph, bl, pop, mode_bp, pn_ia32_Pop_res);
406 curr_sp = new_r_Proj(current_ir_graph, bl, pop, get_irn_mode(curr_sp), pn_ia32_Pop_stack);
407 *mem = new_r_Proj(current_ir_graph, bl, pop, mode_M, pn_ia32_Pop_M);
409 arch_set_irn_register(env->aenv, curr_sp, env->isa->sp);
410 arch_set_irn_register(env->aenv, curr_bp, env->isa->bp);
413 be_abi_reg_map_set(reg_map, env->isa->sp, curr_sp);
414 be_abi_reg_map_set(reg_map, env->isa->bp, curr_bp);
418 * Produces the type which sits between the stack args and the locals on the stack.
419 * it will contain the return address and space to store the old base pointer.
420 * @return The Firm type modeling the ABI between type.
422 static ir_type *ia32_abi_get_between_type(void *self)
424 static ir_type *omit_fp_between_type = NULL;
425 static ir_type *between_type = NULL;
427 ia32_abi_env_t *env = self;
431 entity *ret_addr_ent;
432 entity *omit_fp_ret_addr_ent;
434 ir_type *old_bp_type = new_type_primitive(new_id_from_str("bp"), mode_P);
435 ir_type *ret_addr_type = new_type_primitive(new_id_from_str("return_addr"), mode_P);
437 between_type = new_type_class(new_id_from_str("ia32_between_type"));
438 old_bp_ent = new_entity(between_type, new_id_from_str("old_bp"), old_bp_type);
439 ret_addr_ent = new_entity(between_type, new_id_from_str("ret_addr"), ret_addr_type);
441 set_entity_offset_bytes(old_bp_ent, 0);
442 set_entity_offset_bytes(ret_addr_ent, get_type_size_bytes(old_bp_type));
443 set_type_size_bytes(between_type, get_type_size_bytes(old_bp_type) + get_type_size_bytes(ret_addr_type));
445 omit_fp_between_type = new_type_class(new_id_from_str("ia32_between_type_omit_fp"));
446 omit_fp_ret_addr_ent = new_entity(omit_fp_between_type, new_id_from_str("ret_addr"), ret_addr_type);
448 set_entity_offset_bytes(omit_fp_ret_addr_ent, 0);
449 set_type_size_bytes(omit_fp_between_type, get_type_size_bytes(ret_addr_type));
452 return env->flags.try_omit_fp ? omit_fp_between_type : between_type;
456 * Returns the inverse operation if @p irn, recalculating the argument at position @p i.
458 * @param irn The original operation
459 * @param i Index of the argument we want the inverse operation to yield
460 * @param inverse struct to be filled with the resulting inverse op
461 * @param obstack The obstack to use for allocation of the returned nodes array
462 * @return The inverse operation or NULL if operation invertible
464 static arch_inverse_t *ia32_get_inverse(const void *self, const ir_node *irn, int i, arch_inverse_t *inverse, struct obstack *obst) {
467 ir_node *block, *noreg, *nomem;
470 /* we cannot invert non-ia32 irns */
471 if (! is_ia32_irn(irn))
474 /* operand must always be a real operand (not base, index or mem) */
475 if (i != 2 && i != 3)
478 /* we don't invert address mode operations */
479 if (get_ia32_op_type(irn) != ia32_Normal)
482 irg = get_irn_irg(irn);
483 block = get_nodes_block(irn);
484 mode = get_ia32_res_mode(irn);
485 noreg = get_irn_n(irn, 0);
486 nomem = new_r_NoMem(irg);
488 /* initialize structure */
489 inverse->nodes = obstack_alloc(obst, 2 * sizeof(inverse->nodes[0]));
493 switch (get_ia32_irn_opcode(irn)) {
495 if (get_ia32_immop_type(irn) == ia32_ImmConst) {
496 /* we have an add with a const here */
497 /* invers == add with negated const */
498 inverse->nodes[0] = new_rd_ia32_Add(NULL, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem);
499 pnc = pn_ia32_Add_res;
501 copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
502 set_ia32_Immop_tarval(inverse->nodes[0], tarval_neg(get_ia32_Immop_tarval(irn)));
503 set_ia32_commutative(inverse->nodes[0]);
505 else if (get_ia32_immop_type(irn) == ia32_ImmSymConst) {
506 /* we have an add with a symconst here */
507 /* invers == sub with const */
508 inverse->nodes[0] = new_rd_ia32_Sub(NULL, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem);
509 pnc = pn_ia32_Sub_res;
511 copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
514 /* normal add: inverse == sub */
515 inverse->nodes[0] = new_rd_ia32_Sub(NULL, irg, block, noreg, noreg, (ir_node *)irn, get_irn_n(irn, i ^ 1), nomem);
516 pnc = pn_ia32_Sub_res;
521 if (get_ia32_immop_type(irn) != ia32_ImmNone) {
522 /* we have a sub with a const/symconst here */
523 /* invers == add with this const */
524 inverse->nodes[0] = new_rd_ia32_Add(NULL, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem);
525 pnc = pn_ia32_Add_res;
526 inverse->costs += (get_ia32_immop_type(irn) == ia32_ImmSymConst) ? 5 : 1;
527 copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
532 inverse->nodes[0] = new_rd_ia32_Add(NULL, irg, block, noreg, noreg, (ir_node *)irn, get_irn_n(irn, 3), nomem);
535 inverse->nodes[0] = new_rd_ia32_Sub(NULL, irg, block, noreg, noreg, get_irn_n(irn, 2), (ir_node *)irn, nomem);
537 pnc = pn_ia32_Sub_res;
542 if (get_ia32_immop_type(irn) != ia32_ImmNone) {
543 /* xor with const: inverse = xor */
544 inverse->nodes[0] = new_rd_ia32_Eor(NULL, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem);
545 pnc = pn_ia32_Eor_res;
546 inverse->costs += (get_ia32_immop_type(irn) == ia32_ImmSymConst) ? 5 : 1;
547 copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
551 inverse->nodes[0] = new_rd_ia32_Eor(NULL, irg, block, noreg, noreg, (ir_node *)irn, get_irn_n(irn, i), nomem);
552 pnc = pn_ia32_Eor_res;
557 inverse->nodes[0] = new_rd_ia32_Not(NULL, irg, block, noreg, noreg, get_irn_n(irn, i), nomem);
558 pnc = pn_ia32_Not_res;
562 inverse->nodes[0] = new_rd_ia32_Minus(NULL, irg, block, noreg, noreg, get_irn_n(irn, i), nomem);
563 pnc = pn_ia32_Minus_res;
567 /* inverse operation not supported */
571 inverse->nodes[1] = new_r_Proj(irg, block, inverse->nodes[0], mode, pnc);
576 static const be_abi_callbacks_t ia32_abi_callbacks = {
579 ia32_abi_get_between_type,
580 ia32_abi_dont_save_regs,
585 /* fill register allocator interface */
587 static const arch_irn_ops_if_t ia32_irn_ops_if = {
588 ia32_get_irn_reg_req,
593 ia32_get_frame_entity,
598 ia32_irn_ops_t ia32_irn_ops = {
605 /**************************************************
608 * ___ ___ __| | ___ __ _ ___ _ __ _| |_
609 * / __/ _ \ / _` |/ _ \/ _` |/ _ \ '_ \ | | _|
610 * | (_| (_) | (_| | __/ (_| | __/ | | | | | |
611 * \___\___/ \__,_|\___|\__, |\___|_| |_| |_|_|
614 **************************************************/
617 * Transforms the standard firm graph into
620 static void ia32_prepare_graph(void *self) {
621 ia32_code_gen_t *cg = self;
622 dom_front_info_t *dom;
623 DEBUG_ONLY(firm_dbg_module_t *old_mod = cg->mod;)
625 FIRM_DBG_REGISTER(cg->mod, "firm.be.ia32.transform");
627 /* 1st: transform constants and psi condition trees */
628 irg_walk_blkwise_graph(cg->irg, ia32_place_consts_set_modes, ia32_transform_psi_cond_tree, cg);
630 /* 2nd: transform all remaining nodes */
631 ia32_register_transformers();
632 dom = be_compute_dominance_frontiers(cg->irg);
633 irg_walk_blkwise_graph(cg->irg, NULL, ia32_transform_node, cg);
634 be_free_dominance_frontiers(dom);
635 be_dump(cg->irg, "-transformed", dump_ir_block_graph_sched);
637 /* 3rd: optimize address mode */
638 FIRM_DBG_REGISTER(cg->mod, "firm.be.ia32.am");
639 ia32_optimize_addressmode(cg);
640 be_dump(cg->irg, "-am", dump_ir_block_graph_sched);
641 DEBUG_ONLY(cg->mod = old_mod;)
644 static INLINE int need_constraint_copy(ir_node *irn) {
646 ! is_ia32_Lea(irn) && \
647 ! is_ia32_Conv_I2I(irn) && \
648 ! is_ia32_Conv_I2I8Bit(irn) && \
649 ! is_ia32_CmpCMov(irn) && \
650 ! is_ia32_CmpSet(irn);
654 * Insert copies for all ia32 nodes where the should_be_same requirement
656 * Transform Sub into Neg -- Add if IN2 == OUT
658 static void ia32_finish_node(ir_node *irn, void *env) {
659 ia32_code_gen_t *cg = env;
660 const ia32_register_req_t **reqs;
661 const arch_register_t *out_reg, *in_reg, *in2_reg;
663 ir_node *copy, *in_node, *block, *in2_node;
664 ia32_op_type_t op_tp;
666 if (is_ia32_irn(irn)) {
667 /* AM Dest nodes don't produce any values */
668 op_tp = get_ia32_op_type(irn);
669 if (op_tp == ia32_AddrModeD)
672 reqs = get_ia32_out_req_all(irn);
673 n_res = get_ia32_n_res(irn);
674 block = get_nodes_block(irn);
676 /* check all OUT requirements, if there is a should_be_same */
677 if ((op_tp == ia32_Normal || op_tp == ia32_AddrModeS) && need_constraint_copy(irn))
679 for (i = 0; i < n_res; i++) {
680 if (arch_register_req_is(&(reqs[i]->req), should_be_same)) {
681 /* get in and out register */
682 out_reg = get_ia32_out_reg(irn, i);
683 in_node = get_irn_n(irn, reqs[i]->same_pos);
684 in_reg = arch_get_irn_register(cg->arch_env, in_node);
686 /* don't copy ignore nodes */
687 if (arch_irn_is(cg->arch_env, in_node, ignore) && is_Proj(in_node))
690 /* check if in and out register are equal */
691 if (! REGS_ARE_EQUAL(out_reg, in_reg)) {
692 /* in case of a commutative op: just exchange the in's */
693 /* beware: the current op could be everything, so test for ia32 */
694 /* commutativity first before getting the second in */
695 if (is_ia32_commutative(irn)) {
696 in2_node = get_irn_n(irn, reqs[i]->same_pos ^ 1);
697 in2_reg = arch_get_irn_register(cg->arch_env, in2_node);
699 if (REGS_ARE_EQUAL(out_reg, in2_reg)) {
700 set_irn_n(irn, reqs[i]->same_pos, in2_node);
701 set_irn_n(irn, reqs[i]->same_pos ^ 1, in_node);
708 DBG((cg->mod, LEVEL_1, "inserting copy for %+F in_pos %d\n", irn, reqs[i]->same_pos));
709 /* create copy from in register */
710 copy = be_new_Copy(arch_register_get_class(in_reg), cg->irg, block, in_node);
712 DBG_OPT_2ADDRCPY(copy);
714 /* destination is the out register */
715 arch_set_irn_register(cg->arch_env, copy, out_reg);
717 /* insert copy before the node into the schedule */
718 sched_add_before(irn, copy);
721 set_irn_n(irn, reqs[i]->same_pos, copy);
728 /* If we have a CondJmp/CmpSet/xCmpSet with immediate, we need to */
729 /* check if it's the right operand, otherwise we have */
730 /* to change it, as CMP doesn't support immediate as */
732 if ((is_ia32_CondJmp(irn) || is_ia32_CmpSet(irn) || is_ia32_xCmpSet(irn)) &&
733 (is_ia32_ImmConst(irn) || is_ia32_ImmSymConst(irn)) &&
734 op_tp == ia32_AddrModeS)
736 set_ia32_op_type(irn, ia32_AddrModeD);
737 set_ia32_pncode(irn, get_inversed_pnc(get_ia32_pncode(irn)));
740 /* check if there is a sub which need to be transformed */
741 ia32_transform_sub_to_neg_add(irn, cg);
743 /* transform a LEA into an Add if possible */
744 ia32_transform_lea_to_add(irn, cg);
748 /* check for peephole optimization */
749 ia32_peephole_optimization(irn, cg);
752 static void ia32_finish_irg_walker(ir_node *block, void *env) {
755 for (irn = sched_first(block); !sched_is_end(irn); irn = next) {
756 next = sched_next(irn);
757 ia32_finish_node(irn, env);
762 * Add Copy nodes for not fulfilled should_be_equal constraints
764 static void ia32_finish_irg(ir_graph *irg, ia32_code_gen_t *cg) {
765 irg_block_walk_graph(irg, NULL, ia32_finish_irg_walker, cg);
771 * Dummy functions for hooks we don't need but which must be filled.
773 static void ia32_before_sched(void *self) {
777 * Called before the register allocator.
778 * Calculate a block schedule here. We need it for the x87
779 * simulator and the emitter.
781 static void ia32_before_ra(void *self) {
782 ia32_code_gen_t *cg = self;
784 cg->blk_sched = sched_create_block_schedule(cg->irg);
789 * Transforms a be node into a Load.
791 static void transform_to_Load(ia32_transform_env_t *env) {
792 ir_node *irn = env->irn;
793 entity *ent = be_get_frame_entity(irn);
794 ir_mode *mode = env->mode;
795 ir_node *noreg = ia32_new_NoReg_gp(env->cg);
796 ir_node *nomem = new_rd_NoMem(env->irg);
797 ir_node *sched_point = NULL;
798 ir_node *ptr = get_irn_n(irn, 0);
799 ir_node *mem = be_is_Reload(irn) ? get_irn_n(irn, 1) : nomem;
800 ir_node *new_op, *proj;
801 const arch_register_t *reg;
803 if (sched_is_scheduled(irn)) {
804 sched_point = sched_prev(irn);
807 if (mode_is_float(mode)) {
808 if (USE_SSE2(env->cg))
809 new_op = new_rd_ia32_xLoad(env->dbg, env->irg, env->block, ptr, noreg, mem);
811 new_op = new_rd_ia32_vfld(env->dbg, env->irg, env->block, ptr, noreg, mem);
814 new_op = new_rd_ia32_Load(env->dbg, env->irg, env->block, ptr, noreg, mem);
817 set_ia32_am_support(new_op, ia32_am_Source);
818 set_ia32_op_type(new_op, ia32_AddrModeS);
819 set_ia32_am_flavour(new_op, ia32_B);
820 set_ia32_ls_mode(new_op, mode);
821 set_ia32_frame_ent(new_op, ent);
822 set_ia32_use_frame(new_op);
824 DBG_OPT_RELOAD2LD(irn, new_op);
826 proj = new_rd_Proj(env->dbg, env->irg, env->block, new_op, mode, pn_Load_res);
829 sched_add_after(sched_point, new_op);
830 sched_add_after(new_op, proj);
835 /* copy the register from the old node to the new Load */
836 reg = arch_get_irn_register(env->cg->arch_env, irn);
837 arch_set_irn_register(env->cg->arch_env, new_op, reg);
839 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, new_op));
845 * Transforms a be node into a Store.
847 static void transform_to_Store(ia32_transform_env_t *env) {
848 ir_node *irn = env->irn;
849 entity *ent = be_get_frame_entity(irn);
850 ir_mode *mode = env->mode;
851 ir_node *noreg = ia32_new_NoReg_gp(env->cg);
852 ir_node *nomem = new_rd_NoMem(env->irg);
853 ir_node *ptr = get_irn_n(irn, 0);
854 ir_node *val = get_irn_n(irn, 1);
855 ir_node *new_op, *proj;
856 ir_node *sched_point = NULL;
858 if (sched_is_scheduled(irn)) {
859 sched_point = sched_prev(irn);
862 if (mode_is_float(mode)) {
863 if (USE_SSE2(env->cg))
864 new_op = new_rd_ia32_xStore(env->dbg, env->irg, env->block, ptr, noreg, val, nomem);
866 new_op = new_rd_ia32_vfst(env->dbg, env->irg, env->block, ptr, noreg, val, nomem);
868 else if (get_mode_size_bits(mode) == 8) {
869 new_op = new_rd_ia32_Store8Bit(env->dbg, env->irg, env->block, ptr, noreg, val, nomem);
872 new_op = new_rd_ia32_Store(env->dbg, env->irg, env->block, ptr, noreg, val, nomem);
875 set_ia32_am_support(new_op, ia32_am_Dest);
876 set_ia32_op_type(new_op, ia32_AddrModeD);
877 set_ia32_am_flavour(new_op, ia32_B);
878 set_ia32_ls_mode(new_op, mode);
879 set_ia32_frame_ent(new_op, ent);
880 set_ia32_use_frame(new_op);
882 DBG_OPT_SPILL2ST(irn, new_op);
884 proj = new_rd_Proj(env->dbg, env->irg, env->block, new_op, mode_M, pn_ia32_Store_M);
887 sched_add_after(sched_point, new_op);
888 sched_add_after(new_op, proj);
893 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, new_op));
899 * Fix the mode of Spill/Reload
901 static ir_mode *fix_spill_mode(ia32_code_gen_t *cg, ir_mode *mode)
903 if (mode_is_float(mode)) {
915 * Block-Walker: Calls the transform functions Spill and Reload.
917 static void ia32_after_ra_walker(ir_node *block, void *env) {
918 ir_node *node, *prev;
919 ia32_code_gen_t *cg = env;
920 ia32_transform_env_t tenv;
923 tenv.irg = current_ir_graph;
925 DEBUG_ONLY(tenv.mod = cg->mod;)
927 /* beware: the schedule is changed here */
928 for (node = sched_last(block); !sched_is_begin(node); node = prev) {
929 prev = sched_prev(node);
930 if (be_is_Reload(node)) {
931 /* we always reload the whole register */
932 tenv.dbg = get_irn_dbg_info(node);
934 tenv.mode = fix_spill_mode(cg, get_irn_mode(node));
935 transform_to_Load(&tenv);
937 else if (be_is_Spill(node)) {
938 /* we always spill the whole register */
939 tenv.dbg = get_irn_dbg_info(node);
941 tenv.mode = fix_spill_mode(cg, get_irn_mode(be_get_Spill_context(node)));
942 transform_to_Store(&tenv);
948 * We transform Spill and Reload here. This needs to be done before
949 * stack biasing otherwise we would miss the corrected offset for these nodes.
951 * If x87 instruction should be emitted, run the x87 simulator and patch
952 * the virtual instructions. This must obviously be done after register allocation.
954 static void ia32_after_ra(void *self) {
955 ia32_code_gen_t *cg = self;
956 irg_block_walk_graph(cg->irg, NULL, ia32_after_ra_walker, self);
958 /* if we do x87 code generation, rewrite all the virtual instructions and registers */
959 if (cg->used_fp == fp_x87) {
960 x87_simulate_graph(cg->arch_env, cg->irg, cg->blk_sched);
966 * Emits the code, closes the output file and frees
967 * the code generator interface.
969 static void ia32_codegen(void *self) {
970 ia32_code_gen_t *cg = self;
971 ir_graph *irg = cg->irg;
973 ia32_finish_irg(irg, cg);
974 be_dump(irg, "-finished", dump_ir_block_graph_sched);
975 ia32_gen_routine(cg->isa->out, irg, cg);
979 /* remove it from the isa */
982 /* de-allocate code generator */
983 del_set(cg->reg_set);
988 static void *ia32_cg_init(const be_irg_t *birg);
990 static const arch_code_generator_if_t ia32_code_gen_if = {
992 NULL, /* before abi introduce hook */
994 ia32_before_sched, /* before scheduling hook */
995 ia32_before_ra, /* before register allocation hook */
996 ia32_after_ra, /* after register allocation hook */
997 ia32_codegen /* emit && done */
1001 * Initializes a IA32 code generator.
1003 static void *ia32_cg_init(const be_irg_t *birg) {
1004 ia32_isa_t *isa = (ia32_isa_t *)birg->main_env->arch_env->isa;
1005 ia32_code_gen_t *cg = xcalloc(1, sizeof(*cg));
1007 cg->impl = &ia32_code_gen_if;
1008 cg->irg = birg->irg;
1009 cg->reg_set = new_set(ia32_cmp_irn_reg_assoc, 1024);
1010 cg->arch_env = birg->main_env->arch_env;
1013 cg->blk_sched = NULL;
1014 cg->fp_to_gp = NULL;
1015 cg->gp_to_fp = NULL;
1016 cg->fp_kind = isa->fp_kind;
1017 cg->used_fp = fp_none;
1019 FIRM_DBG_REGISTER(cg->mod, "firm.be.ia32.cg");
1021 /* copy optimizations from isa for easier access */
1028 if (isa->name_obst_size) {
1029 //printf("freed %d bytes from name obst\n", isa->name_obst_size);
1030 isa->name_obst_size = 0;
1031 obstack_free(isa->name_obst, NULL);
1032 obstack_init(isa->name_obst);
1036 cur_reg_set = cg->reg_set;
1038 ia32_irn_ops.cg = cg;
1040 return (arch_code_generator_t *)cg;
1045 /*****************************************************************
1046 * ____ _ _ _____ _____
1047 * | _ \ | | | | |_ _|/ ____| /\
1048 * | |_) | __ _ ___| | _____ _ __ __| | | | | (___ / \
1049 * | _ < / _` |/ __| |/ / _ \ '_ \ / _` | | | \___ \ / /\ \
1050 * | |_) | (_| | (__| < __/ | | | (_| | _| |_ ____) / ____ \
1051 * |____/ \__,_|\___|_|\_\___|_| |_|\__,_| |_____|_____/_/ \_\
1053 *****************************************************************/
1056 * The template that generates a new ISA object.
1057 * Note that this template can be changed by command line
1060 static ia32_isa_t ia32_isa_template = {
1062 &ia32_isa_if, /* isa interface implementation */
1063 &ia32_gp_regs[REG_ESP], /* stack pointer register */
1064 &ia32_gp_regs[REG_EBP], /* base pointer register */
1065 -1, /* stack direction */
1067 NULL, /* 16bit register names */
1068 NULL, /* 8bit register names */
1072 IA32_OPT_INCDEC | /* optimize add 1, sub 1 into inc/dec default: on */
1073 IA32_OPT_DOAM | /* optimize address mode default: on */
1074 IA32_OPT_LEA | /* optimize for LEAs default: on */
1075 IA32_OPT_PLACECNST | /* place constants immediately before instructions, default: on */
1076 IA32_OPT_IMMOPS | /* operations can use immediates, default: on */
1077 IA32_OPT_EXTBB), /* use extended basic block scheduling, default: on */
1078 arch_pentium_4, /* instruction architecture */
1079 arch_pentium_4, /* optimize for architecture */
1080 fp_sse2, /* use sse2 unit */
1081 NULL, /* current code generator */
1083 NULL, /* name obstack */
1084 0 /* name obst size */
1089 * Initializes the backend ISA.
1091 static void *ia32_init(FILE *file_handle) {
1092 static int inited = 0;
1098 isa = xmalloc(sizeof(*isa));
1099 memcpy(isa, &ia32_isa_template, sizeof(*isa));
1101 ia32_register_init(isa);
1102 ia32_create_opcodes();
1104 if ((ARCH_INTEL(isa->arch) && isa->arch < arch_pentium_4) ||
1105 (ARCH_AMD(isa->arch) && isa->arch < arch_athlon))
1106 /* no SSE2 for these cpu's */
1107 isa->fp_kind = fp_x87;
1109 if (ARCH_INTEL(isa->opt_arch) && isa->opt_arch >= arch_pentium_4) {
1110 /* Pentium 4 don't like inc and dec instructions */
1111 isa->opt &= ~IA32_OPT_INCDEC;
1114 isa->regs_16bit = pmap_create();
1115 isa->regs_8bit = pmap_create();
1116 isa->types = pmap_create();
1117 isa->tv_ent = pmap_create();
1118 isa->out = file_handle;
1120 ia32_build_16bit_reg_map(isa->regs_16bit);
1121 ia32_build_8bit_reg_map(isa->regs_8bit);
1123 /* patch register names of x87 registers */
1125 ia32_st_regs[0].name = "st";
1126 ia32_st_regs[1].name = "st(1)";
1127 ia32_st_regs[2].name = "st(2)";
1128 ia32_st_regs[3].name = "st(3)";
1129 ia32_st_regs[4].name = "st(4)";
1130 ia32_st_regs[5].name = "st(5)";
1131 ia32_st_regs[6].name = "st(6)";
1132 ia32_st_regs[7].name = "st(7)";
1136 isa->name_obst = xmalloc(sizeof(*isa->name_obst));
1137 obstack_init(isa->name_obst);
1138 isa->name_obst_size = 0;
1141 ia32_handle_intrinsics();
1142 ia32_switch_section(NULL, NO_SECTION);
1143 fprintf(isa->out, "\t.intel_syntax\n");
1153 * Closes the output file and frees the ISA structure.
1155 static void ia32_done(void *self) {
1156 ia32_isa_t *isa = self;
1158 /* emit now all global declarations */
1159 ia32_gen_decls(isa->out);
1161 pmap_destroy(isa->regs_16bit);
1162 pmap_destroy(isa->regs_8bit);
1163 pmap_destroy(isa->tv_ent);
1164 pmap_destroy(isa->types);
1167 //printf("name obst size = %d bytes\n", isa->name_obst_size);
1168 obstack_free(isa->name_obst, NULL);
1176 * Return the number of register classes for this architecture.
1177 * We report always these:
1178 * - the general purpose registers
1179 * - the floating point register set (depending on the unit used for FP)
1180 * - MMX/SSE registers (currently not supported)
1182 static int ia32_get_n_reg_class(const void *self) {
1187 * Return the register class for index i.
1189 static const arch_register_class_t *ia32_get_reg_class(const void *self, int i) {
1190 const ia32_isa_t *isa = self;
1191 assert(i >= 0 && i < 2 && "Invalid ia32 register class requested.");
1193 return &ia32_reg_classes[CLASS_ia32_gp];
1194 return USE_SSE2(isa) ? &ia32_reg_classes[CLASS_ia32_xmm] : &ia32_reg_classes[CLASS_ia32_vfp];
1198 * Get the register class which shall be used to store a value of a given mode.
1199 * @param self The this pointer.
1200 * @param mode The mode in question.
1201 * @return A register class which can hold values of the given mode.
1203 const arch_register_class_t *ia32_get_reg_class_for_mode(const void *self, const ir_mode *mode) {
1204 const ia32_isa_t *isa = self;
1205 if (mode_is_float(mode)) {
1206 return USE_SSE2(isa) ? &ia32_reg_classes[CLASS_ia32_xmm] : &ia32_reg_classes[CLASS_ia32_vfp];
1209 return &ia32_reg_classes[CLASS_ia32_gp];
1213 * Get the ABI restrictions for procedure calls.
1214 * @param self The this pointer.
1215 * @param method_type The type of the method (procedure) in question.
1216 * @param abi The abi object to be modified
1218 static void ia32_get_call_abi(const void *self, ir_type *method_type, be_abi_call_t *abi) {
1219 const ia32_isa_t *isa = self;
1222 unsigned cc = get_method_calling_convention(method_type);
1223 int n = get_method_n_params(method_type);
1226 int i, ignore_1, ignore_2;
1228 const arch_register_t *reg;
1229 be_abi_call_flags_t call_flags = be_abi_call_get_flags(abi);
1231 unsigned use_push = !IS_P6_ARCH(isa->opt_arch);
1233 /* set abi flags for calls */
1234 call_flags.bits.left_to_right = 0; /* always last arg first on stack */
1235 call_flags.bits.store_args_sequential = use_push;
1236 /* call_flags.bits.try_omit_fp not changed: can handle both settings */
1237 call_flags.bits.fp_free = 0; /* the frame pointer is fixed in IA32 */
1238 call_flags.bits.call_has_imm = 1; /* IA32 calls can have immediate address */
1240 /* set stack parameter passing style */
1241 be_abi_call_set_flags(abi, call_flags, &ia32_abi_callbacks);
1243 /* collect the mode for each type */
1244 modes = alloca(n * sizeof(modes[0]));
1246 for (i = 0; i < n; i++) {
1247 tp = get_method_param_type(method_type, i);
1248 modes[i] = get_type_mode(tp);
1251 /* set register parameters */
1252 if (cc & cc_reg_param) {
1253 /* determine the number of parameters passed via registers */
1254 biggest_n = ia32_get_n_regparam_class(n, modes, &ignore_1, &ignore_2);
1256 /* loop over all parameters and set the register requirements */
1257 for (i = 0; i <= biggest_n; i++) {
1258 reg = ia32_get_RegParam_reg(n, modes, i, cc);
1259 assert(reg && "kaputt");
1260 be_abi_call_param_reg(abi, i, reg);
1267 /* set stack parameters */
1268 for (i = stack_idx; i < n; i++) {
1269 be_abi_call_param_stack(abi, i, 1, 0, 0);
1273 /* set return registers */
1274 n = get_method_n_ress(method_type);
1276 assert(n <= 2 && "more than two results not supported");
1278 /* In case of 64bit returns, we will have two 32bit values */
1280 tp = get_method_res_type(method_type, 0);
1281 mode = get_type_mode(tp);
1283 assert(!mode_is_float(mode) && "two FP results not supported");
1285 tp = get_method_res_type(method_type, 1);
1286 mode = get_type_mode(tp);
1288 assert(!mode_is_float(mode) && "two FP results not supported");
1290 be_abi_call_res_reg(abi, 0, &ia32_gp_regs[REG_EAX]);
1291 be_abi_call_res_reg(abi, 1, &ia32_gp_regs[REG_EDX]);
1294 const arch_register_t *reg;
1296 tp = get_method_res_type(method_type, 0);
1297 assert(is_atomic_type(tp));
1298 mode = get_type_mode(tp);
1300 reg = mode_is_float(mode) ?
1301 (USE_SSE2(isa) ? &ia32_xmm_regs[REG_XMM0] : &ia32_vfp_regs[REG_VF0]) :
1302 &ia32_gp_regs[REG_EAX];
1304 be_abi_call_res_reg(abi, 0, reg);
1309 static const void *ia32_get_irn_ops(const arch_irn_handler_t *self, const ir_node *irn) {
1310 return &ia32_irn_ops;
1313 const arch_irn_handler_t ia32_irn_handler = {
1317 const arch_irn_handler_t *ia32_get_irn_handler(const void *self) {
1318 return &ia32_irn_handler;
1321 int ia32_to_appear_in_schedule(void *block_env, const ir_node *irn) {
1322 return is_ia32_irn(irn);
1326 * Initializes the code generator interface.
1328 static const arch_code_generator_if_t *ia32_get_code_generator_if(void *self) {
1329 return &ia32_code_gen_if;
1332 list_sched_selector_t ia32_sched_selector;
1335 * Returns the reg_pressure scheduler with to_appear_in_schedule() overloaded
1337 static const list_sched_selector_t *ia32_get_list_sched_selector(const void *self) {
1338 // memcpy(&ia32_sched_selector, reg_pressure_selector, sizeof(list_sched_selector_t));
1339 memcpy(&ia32_sched_selector, trivial_selector, sizeof(list_sched_selector_t));
1340 ia32_sched_selector.to_appear_in_schedule = ia32_to_appear_in_schedule;
1341 return &ia32_sched_selector;
1345 * Returns the necessary byte alignment for storing a register of given class.
1347 static int ia32_get_reg_class_alignment(const void *self, const arch_register_class_t *cls) {
1348 ir_mode *mode = arch_register_class_mode(cls);
1349 int bytes = get_mode_size_bytes(mode);
1351 if (mode_is_float(mode) && bytes > 8)
1358 /* instruction set architectures. */
1359 static const lc_opt_enum_int_items_t arch_items[] = {
1360 { "386", arch_i386, },
1361 { "486", arch_i486, },
1362 { "pentium", arch_pentium, },
1363 { "586", arch_pentium, },
1364 { "pentiumpro", arch_pentium_pro, },
1365 { "686", arch_pentium_pro, },
1366 { "pentiummmx", arch_pentium_mmx, },
1367 { "pentium2", arch_pentium_2, },
1368 { "p2", arch_pentium_2, },
1369 { "pentium3", arch_pentium_3, },
1370 { "p3", arch_pentium_3, },
1371 { "pentium4", arch_pentium_4, },
1372 { "p4", arch_pentium_4, },
1373 { "pentiumm", arch_pentium_m, },
1374 { "pm", arch_pentium_m, },
1375 { "core", arch_core, },
1377 { "athlon", arch_athlon, },
1378 { "athlon64", arch_athlon_64, },
1379 { "opteron", arch_opteron, },
1383 static lc_opt_enum_int_var_t arch_var = {
1384 &ia32_isa_template.arch, arch_items
1387 static lc_opt_enum_int_var_t opt_arch_var = {
1388 &ia32_isa_template.opt_arch, arch_items
1391 static const lc_opt_enum_int_items_t fp_unit_items[] = {
1393 { "sse2", fp_sse2 },
1397 static lc_opt_enum_int_var_t fp_unit_var = {
1398 &ia32_isa_template.fp_kind, fp_unit_items
1401 static const lc_opt_enum_int_items_t gas_items[] = {
1402 { "linux", ASM_LINUX_GAS },
1403 { "mingw", ASM_MINGW_GAS },
1407 static lc_opt_enum_int_var_t gas_var = {
1408 (int *)&asm_flavour, gas_items
1411 static const lc_opt_table_entry_t ia32_options[] = {
1412 LC_OPT_ENT_ENUM_INT("arch", "select the instruction architecture", &arch_var),
1413 LC_OPT_ENT_ENUM_INT("opt", "optimize for instruction architecture", &opt_arch_var),
1414 LC_OPT_ENT_ENUM_INT("fpunit", "select the floating point unit", &fp_unit_var),
1415 LC_OPT_ENT_NEGBIT("noaddrmode", "do not use address mode", &ia32_isa_template.opt, IA32_OPT_DOAM),
1416 LC_OPT_ENT_NEGBIT("nolea", "do not optimize for LEAs", &ia32_isa_template.opt, IA32_OPT_LEA),
1417 LC_OPT_ENT_NEGBIT("noplacecnst", "do not place constants", &ia32_isa_template.opt, IA32_OPT_PLACECNST),
1418 LC_OPT_ENT_NEGBIT("noimmop", "no operations with immediates", &ia32_isa_template.opt, IA32_OPT_IMMOPS),
1419 LC_OPT_ENT_NEGBIT("noextbb", "do not use extended basic block scheduling", &ia32_isa_template.opt, IA32_OPT_EXTBB),
1420 LC_OPT_ENT_ENUM_INT("gasmode", "set the GAS compatibility mode", &gas_var),
1425 * Register command line options for the ia32 backend.
1429 * ia32-arch=arch create instruction for arch
1430 * ia32-opt=arch optimize for run on arch
1431 * ia32-fpunit=unit select floating point unit (x87 or SSE2)
1432 * ia32-incdec optimize for inc/dec
1433 * ia32-noaddrmode do not use address mode
1434 * ia32-nolea do not optimize for LEAs
1435 * ia32-noplacecnst do not place constants,
1436 * ia32-noimmop no operations with immediates
1437 * ia32-noextbb do not use extended basic block scheduling
1438 * ia32-gasmode set the GAS compatibility mode
1440 static void ia32_register_options(lc_opt_entry_t *ent)
1442 lc_opt_entry_t *be_grp_ia32 = lc_opt_get_grp(ent, "ia32");
1443 lc_opt_add_table(be_grp_ia32, ia32_options);
1445 #endif /* WITH_LIBCORE */
1447 const arch_isa_if_t ia32_isa_if = {
1450 ia32_get_n_reg_class,
1452 ia32_get_reg_class_for_mode,
1454 ia32_get_irn_handler,
1455 ia32_get_code_generator_if,
1456 ia32_get_list_sched_selector,
1457 ia32_get_reg_class_alignment,
1459 ia32_register_options