2 * This file is part of libFirm.
3 * Copyright (C) 2012 University of Karlsruhe.
8 * @brief This is the main ia32 firm backend driver.
9 * @author Christian Wuerdig
14 #include "lc_opts_enum.h"
22 #include "iredges_t.h"
35 #include "iroptimize.h"
36 #include "instrument.h"
39 #include "lower_calls.h"
40 #include "lower_mode_b.h"
41 #include "lower_softfloat.h"
42 #include "firmstat_t.h"
51 #include "beblocksched.h"
52 #include "bespillutil.h"
53 #include "bespillslots.h"
58 #include "betranshlp.h"
59 #include "belistsched.h"
60 #include "beabihelper.h"
63 #include "bearch_ia32_t.h"
65 #include "ia32_new_nodes.h"
66 #include "gen_ia32_regalloc_if.h"
67 #include "ia32_common_transform.h"
68 #include "ia32_transform.h"
69 #include "ia32_emitter.h"
70 #include "ia32_optimize.h"
72 #include "ia32_dbg_stat.h"
73 #include "ia32_finish.h"
75 #include "ia32_architecture.h"
78 #include "ia32_pbqp_transform.h"
80 transformer_t be_transformer = TRANSFORMER_DEFAULT;
83 ir_mode *ia32_mode_fpcw;
87 /** The current omit-fp state */
88 static ir_type *omit_fp_between_type = NULL;
89 static ir_type *between_type = NULL;
90 static ir_entity *old_bp_ent = NULL;
91 static ir_entity *ret_addr_ent = NULL;
92 static ir_entity *omit_fp_ret_addr_ent = NULL;
95 * The environment for the intrinsic mapping.
97 static ia32_intrinsic_env_t intrinsic_env = {
98 NULL, /* entity for __divdi3 library call */
99 NULL, /* entity for __moddi3 library call */
100 NULL, /* entity for __udivdi3 library call */
101 NULL, /* entity for __umoddi3 library call */
105 typedef ir_node *(*create_const_node_func) (dbg_info *dbgi, ir_node *block);
108 * Used to create per-graph unique pseudo nodes.
110 static inline ir_node *create_const(ir_graph *irg, ir_node **place,
111 create_const_node_func func,
112 const arch_register_t* reg)
114 ir_node *block, *res;
119 block = get_irg_start_block(irg);
120 res = func(NULL, block);
121 arch_set_irn_register(res, reg);
127 /* Creates the unique per irg GP NoReg node. */
128 ir_node *ia32_new_NoReg_gp(ir_graph *irg)
130 ia32_irg_data_t *irg_data = ia32_get_irg_data(irg);
131 return create_const(irg, &irg_data->noreg_gp, new_bd_ia32_NoReg_GP,
132 &ia32_registers[REG_GP_NOREG]);
135 ir_node *ia32_new_NoReg_fp(ir_graph *irg)
137 ia32_irg_data_t *irg_data = ia32_get_irg_data(irg);
138 return create_const(irg, &irg_data->noreg_fp, new_bd_ia32_NoReg_FP,
139 &ia32_registers[REG_FP_NOREG]);
142 ir_node *ia32_new_NoReg_xmm(ir_graph *irg)
144 ia32_irg_data_t *irg_data = ia32_get_irg_data(irg);
145 return create_const(irg, &irg_data->noreg_xmm, new_bd_ia32_NoReg_XMM,
146 &ia32_registers[REG_XMM_NOREG]);
149 ir_node *ia32_new_Fpu_truncate(ir_graph *irg)
151 ia32_irg_data_t *irg_data = ia32_get_irg_data(irg);
152 return create_const(irg, &irg_data->fpu_trunc_mode, new_bd_ia32_ChangeCW,
153 &ia32_registers[REG_FPCW]);
158 * Returns the admissible noreg register node for input register pos of node irn.
160 static ir_node *ia32_get_admissible_noreg(ir_node *irn, int pos)
162 ir_graph *irg = get_irn_irg(irn);
163 const arch_register_req_t *req = arch_get_irn_register_req_in(irn, pos);
165 assert(req != NULL && "Missing register requirements");
166 if (req->cls == &ia32_reg_classes[CLASS_ia32_gp])
167 return ia32_new_NoReg_gp(irg);
169 if (ia32_cg_config.use_sse2) {
170 return ia32_new_NoReg_xmm(irg);
172 return ia32_new_NoReg_fp(irg);
176 static ir_entity *ia32_get_frame_entity(const ir_node *irn)
178 return is_ia32_irn(irn) ? get_ia32_frame_ent(irn) : NULL;
181 static void ia32_set_frame_entity(ir_node *node, ir_entity *entity)
183 if (is_be_node(node))
184 be_node_set_frame_entity(node, entity);
186 set_ia32_frame_ent(node, entity);
189 static void ia32_set_frame_offset(ir_node *irn, int bias)
191 if (get_ia32_frame_ent(irn) == NULL)
194 if (is_ia32_Pop(irn) || is_ia32_PopMem(irn)) {
195 ir_graph *irg = get_irn_irg(irn);
196 be_stack_layout_t *layout = be_get_irg_stack_layout(irg);
197 if (layout->sp_relative) {
198 /* Pop nodes modify the stack pointer before calculating the
199 * destination address, so fix this here
204 add_ia32_am_offs_int(irn, bias);
207 static int ia32_get_sp_bias(const ir_node *node)
209 if (is_ia32_Call(node))
210 return -(int)get_ia32_call_attr_const(node)->pop;
212 if (is_ia32_Push(node))
215 if (is_ia32_Pop(node) || is_ia32_PopMem(node))
218 if (is_ia32_Leave(node) || is_ia32_CopyEbpEsp(node)) {
219 return SP_BIAS_RESET;
226 * Build the between type and entities if not already build.
228 static void ia32_build_between_type(void)
230 #define IDENT(s) new_id_from_chars(s, sizeof(s)-1)
231 if (between_type == NULL) {
232 ir_type *old_bp_type = new_type_primitive(mode_Iu);
233 ir_type *ret_addr_type = new_type_primitive(mode_Iu);
235 between_type = new_type_struct(IDENT("ia32_between_type"));
236 old_bp_ent = new_entity(between_type, IDENT("old_bp"), old_bp_type);
237 ret_addr_ent = new_entity(between_type, IDENT("ret_addr"), ret_addr_type);
239 set_entity_offset(old_bp_ent, 0);
240 set_entity_offset(ret_addr_ent, get_type_size_bytes(old_bp_type));
241 set_type_size_bytes(between_type, get_type_size_bytes(old_bp_type) + get_type_size_bytes(ret_addr_type));
242 set_type_state(between_type, layout_fixed);
244 omit_fp_between_type = new_type_struct(IDENT("ia32_between_type_omit_fp"));
245 omit_fp_ret_addr_ent = new_entity(omit_fp_between_type, IDENT("ret_addr"), ret_addr_type);
247 set_entity_offset(omit_fp_ret_addr_ent, 0);
248 set_type_size_bytes(omit_fp_between_type, get_type_size_bytes(ret_addr_type));
249 set_type_state(omit_fp_between_type, layout_fixed);
255 * Produces the type which sits between the stack args and the locals on the stack.
256 * it will contain the return address and space to store the old base pointer.
257 * @return The Firm type modeling the ABI between type.
259 static ir_type *ia32_abi_get_between_type(ir_graph *irg)
261 const be_stack_layout_t *layout = be_get_irg_stack_layout(irg);
262 ia32_build_between_type();
263 return layout->sp_relative ? omit_fp_between_type : between_type;
267 * Return the stack entity that contains the return address.
269 ir_entity *ia32_get_return_address_entity(ir_graph *irg)
271 const be_stack_layout_t *layout = be_get_irg_stack_layout(irg);
272 ia32_build_between_type();
273 return layout->sp_relative ? omit_fp_ret_addr_ent : ret_addr_ent;
277 * Return the stack entity that contains the frame address.
279 ir_entity *ia32_get_frame_address_entity(ir_graph *irg)
281 const be_stack_layout_t *layout = be_get_irg_stack_layout(irg);
282 ia32_build_between_type();
283 return layout->sp_relative ? NULL : old_bp_ent;
287 * Get the estimated cycle count for @p irn.
289 * @param self The this pointer.
290 * @param irn The node.
292 * @return The estimated cycle count for this operation
294 static int ia32_get_op_estimated_cost(const ir_node *irn)
297 ia32_op_type_t op_tp;
301 if (!is_ia32_irn(irn))
304 assert(is_ia32_irn(irn));
306 cost = get_ia32_latency(irn);
307 op_tp = get_ia32_op_type(irn);
309 if (is_ia32_CopyB(irn)) {
312 else if (is_ia32_CopyB_i(irn)) {
313 int size = get_ia32_copyb_size(irn);
314 cost = 20 + (int)ceil((4/3) * size);
316 /* in case of address mode operations add additional cycles */
317 else if (op_tp == ia32_AddrModeD || op_tp == ia32_AddrModeS) {
319 In case of stack access and access to fixed addresses add 5 cycles
320 (we assume they are in cache), other memory operations cost 20
323 if (is_ia32_use_frame(irn) || (
324 is_ia32_NoReg_GP(get_irn_n(irn, n_ia32_base)) &&
325 is_ia32_NoReg_GP(get_irn_n(irn, n_ia32_index))
336 static ir_mode *get_spill_mode_mode(const ir_mode *mode)
338 if (mode_is_float(mode))
345 * Get the mode that should be used for spilling value node
347 static ir_mode *get_spill_mode(const ir_node *node)
349 ir_mode *mode = get_irn_mode(node);
350 return get_spill_mode_mode(mode);
354 * Checks whether an addressmode reload for a node with mode mode is compatible
355 * with a spillslot of mode spill_mode
357 static int ia32_is_spillmode_compatible(const ir_mode *mode, const ir_mode *spillmode)
359 return !mode_is_float(mode) || mode == spillmode;
363 * Check if irn can load its operand at position i from memory (source addressmode).
364 * @param irn The irn to be checked
365 * @param i The operands position
366 * @return Non-Zero if operand can be loaded
368 static int ia32_possible_memory_operand(const ir_node *irn, unsigned int i)
370 ir_node *op = get_irn_n(irn, i);
371 const ir_mode *mode = get_irn_mode(op);
372 const ir_mode *spillmode = get_spill_mode(op);
374 if (!is_ia32_irn(irn) || /* must be an ia32 irn */
375 get_ia32_op_type(irn) != ia32_Normal || /* must not already be a addressmode irn */
376 !ia32_is_spillmode_compatible(mode, spillmode) ||
377 is_ia32_use_frame(irn)) /* must not already use frame */
380 switch (get_ia32_am_support(irn)) {
385 if (i != n_ia32_unary_op)
391 case n_ia32_binary_left: {
392 if (!is_ia32_commutative(irn))
395 /* we can't swap left/right for limited registers
396 * (As this (currently) breaks constraint handling copies) */
397 arch_register_req_t const *const req = arch_get_irn_register_req_in(irn, n_ia32_binary_left);
398 if (arch_register_req_is(req, limited))
403 case n_ia32_binary_right:
412 panic("Unknown AM type");
415 /* HACK: must not already use "real" memory.
416 * This can happen for Call and Div */
417 if (!is_NoMem(get_irn_n(irn, n_ia32_mem)))
423 static void ia32_perform_memory_operand(ir_node *irn, ir_node *spill,
427 ir_mode *dest_op_mode;
429 assert(ia32_possible_memory_operand(irn, i) && "Cannot perform memory operand change");
431 set_ia32_op_type(irn, ia32_AddrModeS);
433 load_mode = get_irn_mode(get_irn_n(irn, i));
434 dest_op_mode = get_ia32_ls_mode(irn);
435 if (get_mode_size_bits(load_mode) <= get_mode_size_bits(dest_op_mode)) {
436 set_ia32_ls_mode(irn, load_mode);
438 set_ia32_use_frame(irn);
439 set_ia32_need_stackent(irn);
441 if (i == n_ia32_binary_left &&
442 get_ia32_am_support(irn) == ia32_am_binary &&
443 /* immediates are only allowed on the right side */
444 !is_ia32_Immediate(get_irn_n(irn, n_ia32_binary_right))) {
445 ia32_swap_left_right(irn);
446 i = n_ia32_binary_right;
449 assert(is_NoMem(get_irn_n(irn, n_ia32_mem)));
451 set_irn_n(irn, n_ia32_base, get_irg_frame(get_irn_irg(irn)));
452 set_irn_n(irn, n_ia32_mem, spill);
453 set_irn_n(irn, i, ia32_get_admissible_noreg(irn, i));
454 set_ia32_is_reload(irn);
457 static const be_abi_callbacks_t ia32_abi_callbacks = {
458 ia32_abi_get_between_type,
461 /* register allocator interface */
462 static const arch_irn_ops_t ia32_irn_ops = {
463 ia32_get_frame_entity,
464 ia32_set_frame_offset,
466 ia32_get_op_estimated_cost,
467 ia32_possible_memory_operand,
468 ia32_perform_memory_operand,
471 static int gprof = 0;
473 static void ia32_before_abi(ir_graph *irg)
476 static ir_entity *mcount = NULL;
477 if (mcount == NULL) {
478 ir_type *tp = new_type_method(0, 0);
479 ident *id = new_id_from_str("mcount");
480 mcount = new_entity(get_glob_type(), id, tp);
481 /* FIXME: enter the right ld_ident here */
482 set_entity_ld_ident(mcount, get_entity_ident(mcount));
483 set_entity_visibility(mcount, ir_visibility_external);
485 instrument_initcall(irg, mcount);
490 * Transforms the standard firm graph into
493 static void ia32_prepare_graph(ir_graph *irg)
495 ia32_irg_data_t *irg_data = ia32_get_irg_data(irg);
498 switch (be_transformer) {
499 case TRANSFORMER_DEFAULT:
500 /* transform remaining nodes into assembler instructions */
501 ia32_transform_graph(irg);
504 case TRANSFORMER_PBQP:
505 case TRANSFORMER_RAND:
506 /* transform nodes into assembler instructions by PBQP magic */
507 ia32_transform_graph_by_pbqp(irg);
511 panic("invalid transformer");
514 ia32_transform_graph(irg);
517 /* do local optimizations (mainly CSE) */
518 optimize_graph_df(irg);
519 /* backend code expects that outedges are always enabled */
523 dump_ir_graph(irg, "transformed");
525 /* optimize address mode */
526 ia32_optimize_graph(irg);
528 /* do code placement, to optimize the position of constants */
530 /* backend code expects that outedges are always enabled */
534 dump_ir_graph(irg, "place");
537 ir_node *ia32_turn_back_am(ir_node *node)
539 dbg_info *dbgi = get_irn_dbg_info(node);
540 ir_graph *irg = get_irn_irg(node);
541 ir_node *block = get_nodes_block(node);
542 ir_node *base = get_irn_n(node, n_ia32_base);
543 ir_node *idx = get_irn_n(node, n_ia32_index);
544 ir_node *mem = get_irn_n(node, n_ia32_mem);
547 ir_node *load = new_bd_ia32_Load(dbgi, block, base, idx, mem);
548 ir_node *load_res = new_rd_Proj(dbgi, load, mode_Iu, pn_ia32_Load_res);
550 ia32_copy_am_attrs(load, node);
551 if (is_ia32_is_reload(node))
552 set_ia32_is_reload(load);
553 set_irn_n(node, n_ia32_mem, get_irg_no_mem(irg));
555 switch (get_ia32_am_support(node)) {
557 set_irn_n(node, n_ia32_unary_op, load_res);
561 if (is_ia32_Immediate(get_irn_n(node, n_ia32_binary_right))) {
562 set_irn_n(node, n_ia32_binary_left, load_res);
564 set_irn_n(node, n_ia32_binary_right, load_res);
569 panic("Unknown AM type");
571 noreg = ia32_new_NoReg_gp(irg);
572 set_irn_n(node, n_ia32_base, noreg);
573 set_irn_n(node, n_ia32_index, noreg);
574 set_ia32_am_offs_int(node, 0);
575 set_ia32_am_sc(node, NULL);
576 set_ia32_am_scale(node, 0);
577 clear_ia32_am_sc_sign(node);
579 /* rewire mem-proj */
580 if (get_irn_mode(node) == mode_T) {
581 foreach_out_edge(node, edge) {
582 ir_node *out = get_edge_src_irn(edge);
583 if (get_irn_mode(out) == mode_M) {
584 set_Proj_pred(out, load);
585 set_Proj_proj(out, pn_ia32_Load_M);
591 set_ia32_op_type(node, ia32_Normal);
592 if (sched_is_scheduled(node))
593 sched_add_before(node, load);
598 static ir_node *flags_remat(ir_node *node, ir_node *after)
600 /* we should turn back source address mode when rematerializing nodes */
605 if (is_Block(after)) {
608 block = get_nodes_block(after);
611 type = get_ia32_op_type(node);
614 ia32_turn_back_am(node);
618 /* TODO implement this later... */
619 panic("found DestAM with flag user %+F this should not happen", node);
621 default: assert(type == ia32_Normal); break;
624 copy = exact_copy(node);
625 set_nodes_block(copy, block);
626 sched_add_after(after, copy);
632 * Called before the register allocator.
634 static void ia32_before_ra(ir_graph *irg)
636 /* setup fpu rounding modes */
637 ia32_setup_fpu_mode(irg);
640 be_sched_fix_flags(irg, &ia32_reg_classes[CLASS_ia32_flags],
643 be_add_missing_keeps(irg);
648 * Transforms a be_Reload into a ia32 Load.
650 static void transform_to_Load(ir_node *node)
652 ir_graph *irg = get_irn_irg(node);
653 dbg_info *dbgi = get_irn_dbg_info(node);
654 ir_node *block = get_nodes_block(node);
655 ir_entity *ent = be_get_frame_entity(node);
656 ir_mode *mode = get_irn_mode(node);
657 ir_mode *spillmode = get_spill_mode(node);
658 ir_node *noreg = ia32_new_NoReg_gp(irg);
659 ir_node *ptr = get_irg_frame(irg);
660 ir_node *mem = get_irn_n(node, n_be_Reload_mem);
661 ir_node *new_op, *proj;
662 const arch_register_t *reg;
664 if (mode_is_float(spillmode)) {
665 if (ia32_cg_config.use_sse2)
666 new_op = new_bd_ia32_xLoad(dbgi, block, ptr, noreg, mem, spillmode);
668 new_op = new_bd_ia32_fld(dbgi, block, ptr, noreg, mem, spillmode);
670 else if (get_mode_size_bits(spillmode) == 128) {
671 /* Reload 128 bit SSE registers */
672 new_op = new_bd_ia32_xxLoad(dbgi, block, ptr, noreg, mem);
675 new_op = new_bd_ia32_Load(dbgi, block, ptr, noreg, mem);
677 set_ia32_op_type(new_op, ia32_AddrModeS);
678 set_ia32_ls_mode(new_op, spillmode);
679 set_ia32_frame_ent(new_op, ent);
680 set_ia32_use_frame(new_op);
681 set_ia32_is_reload(new_op);
683 DBG_OPT_RELOAD2LD(node, new_op);
685 proj = new_rd_Proj(dbgi, new_op, mode, pn_ia32_Load_res);
687 sched_replace(node, new_op);
689 /* copy the register from the old node to the new Load */
690 reg = arch_get_irn_register(node);
691 arch_set_irn_register(proj, reg);
693 SET_IA32_ORIG_NODE(new_op, node);
695 exchange(node, proj);
699 * Transforms a be_Spill node into a ia32 Store.
701 static void transform_to_Store(ir_node *node)
703 ir_graph *irg = get_irn_irg(node);
704 dbg_info *dbgi = get_irn_dbg_info(node);
705 ir_node *block = get_nodes_block(node);
706 ir_entity *ent = be_get_frame_entity(node);
707 const ir_node *spillval = get_irn_n(node, n_be_Spill_val);
708 ir_mode *mode = get_spill_mode(spillval);
709 ir_node *noreg = ia32_new_NoReg_gp(irg);
710 ir_node *nomem = get_irg_no_mem(irg);
711 ir_node *ptr = get_irg_frame(irg);
712 ir_node *val = get_irn_n(node, n_be_Spill_val);
716 if (mode_is_float(mode)) {
717 if (ia32_cg_config.use_sse2) {
718 store = new_bd_ia32_xStore(dbgi, block, ptr, noreg, nomem, val);
719 res = new_r_Proj(store, mode_M, pn_ia32_xStore_M);
721 store = new_bd_ia32_fst(dbgi, block, ptr, noreg, nomem, val, mode);
722 res = new_r_Proj(store, mode_M, pn_ia32_fst_M);
724 } else if (get_mode_size_bits(mode) == 128) {
725 /* Spill 128 bit SSE registers */
726 store = new_bd_ia32_xxStore(dbgi, block, ptr, noreg, nomem, val);
727 res = new_r_Proj(store, mode_M, pn_ia32_xxStore_M);
729 store = get_mode_size_bits(mode) == 8
730 ? new_bd_ia32_Store_8bit(dbgi, block, ptr, noreg, nomem, val)
731 : new_bd_ia32_Store (dbgi, block, ptr, noreg, nomem, val);
732 res = new_r_Proj(store, mode_M, pn_ia32_Store_M);
735 set_ia32_op_type(store, ia32_AddrModeD);
736 set_ia32_ls_mode(store, mode);
737 set_ia32_frame_ent(store, ent);
738 set_ia32_use_frame(store);
739 set_ia32_is_spill(store);
740 SET_IA32_ORIG_NODE(store, node);
741 DBG_OPT_SPILL2ST(node, store);
743 sched_replace(node, store);
747 static ir_node *create_push(ir_node *node, ir_node *schedpoint, ir_node *sp, ir_node *mem, ir_entity *ent)
749 dbg_info *dbgi = get_irn_dbg_info(node);
750 ir_node *block = get_nodes_block(node);
751 ir_graph *irg = get_irn_irg(node);
752 ir_node *noreg = ia32_new_NoReg_gp(irg);
753 ir_node *frame = get_irg_frame(irg);
755 ir_node *push = new_bd_ia32_Push(dbgi, block, frame, noreg, mem, noreg, sp);
757 set_ia32_frame_ent(push, ent);
758 set_ia32_use_frame(push);
759 set_ia32_op_type(push, ia32_AddrModeS);
760 set_ia32_ls_mode(push, mode_Is);
761 set_ia32_is_spill(push);
763 sched_add_before(schedpoint, push);
767 static ir_node *create_pop(ir_node *node, ir_node *schedpoint, ir_node *sp, ir_entity *ent)
769 dbg_info *dbgi = get_irn_dbg_info(node);
770 ir_node *block = get_nodes_block(node);
771 ir_graph *irg = get_irn_irg(node);
772 ir_node *noreg = ia32_new_NoReg_gp(irg);
773 ir_node *frame = get_irg_frame(irg);
775 ir_node *pop = new_bd_ia32_PopMem(dbgi, block, frame, noreg,
776 get_irg_no_mem(irg), sp);
778 set_ia32_frame_ent(pop, ent);
779 set_ia32_use_frame(pop);
780 set_ia32_op_type(pop, ia32_AddrModeD);
781 set_ia32_ls_mode(pop, mode_Is);
782 set_ia32_is_reload(pop);
784 sched_add_before(schedpoint, pop);
789 static ir_node* create_spproj(ir_node *node, ir_node *pred, int pos)
791 dbg_info *dbgi = get_irn_dbg_info(node);
792 ir_mode *spmode = mode_Iu;
793 const arch_register_t *spreg = &ia32_registers[REG_ESP];
796 sp = new_rd_Proj(dbgi, pred, spmode, pos);
797 arch_set_irn_register(sp, spreg);
803 * Transform MemPerm, currently we do this the ugly way and produce
804 * push/pop into/from memory cascades. This is possible without using
807 static void transform_MemPerm(ir_node *node)
809 ir_node *block = get_nodes_block(node);
810 ir_graph *irg = get_irn_irg(node);
811 ir_node *sp = be_get_initial_reg_value(irg, &ia32_registers[REG_ESP]);
812 int arity = be_get_MemPerm_entity_arity(node);
813 ir_node **pops = ALLOCAN(ir_node*, arity);
819 for (i = 0; i < arity; ++i) {
820 ir_entity *inent = be_get_MemPerm_in_entity(node, i);
821 ir_entity *outent = be_get_MemPerm_out_entity(node, i);
822 ir_type *enttype = get_entity_type(inent);
823 unsigned entsize = get_type_size_bytes(enttype);
824 unsigned entsize2 = get_type_size_bytes(get_entity_type(outent));
825 ir_node *mem = get_irn_n(node, i + 1);
828 /* work around cases where entities have different sizes */
829 if (entsize2 < entsize)
831 assert( (entsize == 4 || entsize == 8) && "spillslot on x86 should be 32 or 64 bit");
833 push = create_push(node, node, sp, mem, inent);
834 sp = create_spproj(node, push, pn_ia32_Push_stack);
836 /* add another push after the first one */
837 push = create_push(node, node, sp, mem, inent);
838 add_ia32_am_offs_int(push, 4);
839 sp = create_spproj(node, push, pn_ia32_Push_stack);
842 set_irn_n(node, i, new_r_Bad(irg, mode_X));
846 for (i = arity - 1; i >= 0; --i) {
847 ir_entity *inent = be_get_MemPerm_in_entity(node, i);
848 ir_entity *outent = be_get_MemPerm_out_entity(node, i);
849 ir_type *enttype = get_entity_type(outent);
850 unsigned entsize = get_type_size_bytes(enttype);
851 unsigned entsize2 = get_type_size_bytes(get_entity_type(inent));
854 /* work around cases where entities have different sizes */
855 if (entsize2 < entsize)
857 assert( (entsize == 4 || entsize == 8) && "spillslot on x86 should be 32 or 64 bit");
859 pop = create_pop(node, node, sp, outent);
860 sp = create_spproj(node, pop, pn_ia32_Pop_stack);
862 add_ia32_am_offs_int(pop, 4);
864 /* add another pop after the first one */
865 pop = create_pop(node, node, sp, outent);
866 sp = create_spproj(node, pop, pn_ia32_Pop_stack);
873 keep = be_new_Keep(block, 1, in);
874 sched_replace(node, keep);
876 /* exchange memprojs */
877 foreach_out_edge_safe(node, edge) {
878 ir_node *proj = get_edge_src_irn(edge);
879 int p = get_Proj_proj(proj);
883 set_Proj_pred(proj, pops[p]);
884 set_Proj_proj(proj, pn_ia32_Pop_M);
892 * Block-Walker: Calls the transform functions Spill and Reload.
894 static void ia32_after_ra_walker(ir_node *block, void *env)
896 ir_node *node, *prev;
899 /* beware: the schedule is changed here */
900 for (node = sched_last(block); !sched_is_begin(node); node = prev) {
901 prev = sched_prev(node);
903 if (be_is_Reload(node)) {
904 transform_to_Load(node);
905 } else if (be_is_Spill(node)) {
906 transform_to_Store(node);
907 } else if (be_is_MemPerm(node)) {
908 transform_MemPerm(node);
914 * Collects nodes that need frame entities assigned.
916 static void ia32_collect_frame_entity_nodes(ir_node *node, void *data)
918 be_fec_env_t *env = (be_fec_env_t*)data;
922 if (be_is_Reload(node) && be_get_frame_entity(node) == NULL) {
923 mode = get_spill_mode_mode(get_irn_mode(node));
924 align = get_mode_size_bytes(mode);
925 } else if (is_ia32_irn(node) &&
926 get_ia32_frame_ent(node) == NULL &&
927 is_ia32_use_frame(node)) {
928 if (is_ia32_need_stackent(node))
931 switch (get_ia32_irn_opcode(node)) {
933 case iro_ia32_Load: {
934 const ia32_attr_t *attr = get_ia32_attr_const(node);
936 if (attr->data.need_32bit_stackent) {
938 } else if (attr->data.need_64bit_stackent) {
941 mode = get_ia32_ls_mode(node);
942 if (is_ia32_is_reload(node))
943 mode = get_spill_mode_mode(mode);
945 align = get_mode_size_bytes(mode);
951 case iro_ia32_xLoad: {
952 mode = get_ia32_ls_mode(node);
957 case iro_ia32_FldCW: {
958 /* although 2 byte would be enough 4 byte performs best */
966 panic("unexpected frame user while collection frame entity nodes");
968 case iro_ia32_FnstCW:
972 case iro_ia32_fisttp:
973 case iro_ia32_xStore:
974 case iro_ia32_xStoreSimple:
981 be_node_needs_frame_entity(env, node, mode, align);
984 static int determine_ebp_input(ir_node *ret)
986 const arch_register_t *bp = &ia32_registers[REG_EBP];
987 int arity = get_irn_arity(ret);
990 for (i = 0; i < arity; ++i) {
991 ir_node *input = get_irn_n(ret, i);
992 if (arch_get_irn_register(input) == bp)
995 panic("no ebp input found at %+F", ret);
998 static void introduce_epilog(ir_node *ret)
1000 const arch_register_t *sp = &ia32_registers[REG_ESP];
1001 const arch_register_t *bp = &ia32_registers[REG_EBP];
1002 ir_graph *irg = get_irn_irg(ret);
1003 ir_type *frame_type = get_irg_frame_type(irg);
1004 unsigned frame_size = get_type_size_bytes(frame_type);
1005 be_stack_layout_t *layout = be_get_irg_stack_layout(irg);
1006 ir_node *block = get_nodes_block(ret);
1007 ir_node *first_sp = get_irn_n(ret, n_be_Return_sp);
1008 ir_node *curr_sp = first_sp;
1009 ir_mode *mode_gp = ia32_reg_classes[CLASS_ia32_gp].mode;
1011 if (!layout->sp_relative) {
1012 int n_ebp = determine_ebp_input(ret);
1013 ir_node *curr_bp = get_irn_n(ret, n_ebp);
1014 if (ia32_cg_config.use_leave) {
1015 ir_node *leave = new_bd_ia32_Leave(NULL, block, curr_bp);
1016 curr_bp = new_r_Proj(leave, mode_gp, pn_ia32_Leave_frame);
1017 curr_sp = new_r_Proj(leave, mode_gp, pn_ia32_Leave_stack);
1018 arch_set_irn_register(curr_bp, bp);
1019 arch_set_irn_register(curr_sp, sp);
1020 sched_add_before(ret, leave);
1023 ir_node *curr_mem = get_irn_n(ret, n_be_Return_mem);
1024 /* copy ebp to esp */
1025 curr_sp = new_bd_ia32_CopyEbpEsp(NULL, block, curr_bp);
1026 arch_set_irn_register(curr_sp, sp);
1027 sched_add_before(ret, curr_sp);
1030 pop = new_bd_ia32_PopEbp(NULL, block, curr_mem, curr_sp);
1031 curr_bp = new_r_Proj(pop, mode_gp, pn_ia32_PopEbp_res);
1032 curr_sp = new_r_Proj(pop, mode_gp, pn_ia32_PopEbp_stack);
1033 curr_mem = new_r_Proj(pop, mode_M, pn_ia32_Pop_M);
1034 arch_set_irn_register(curr_bp, bp);
1035 arch_set_irn_register(curr_sp, sp);
1036 sched_add_before(ret, pop);
1038 set_irn_n(ret, n_be_Return_mem, curr_mem);
1040 set_irn_n(ret, n_ebp, curr_bp);
1042 ir_node *incsp = be_new_IncSP(sp, block, curr_sp, -(int)frame_size, 0);
1043 sched_add_before(ret, incsp);
1046 set_irn_n(ret, n_be_Return_sp, curr_sp);
1048 /* keep verifier happy... */
1049 if (get_irn_n_edges(first_sp) == 0 && is_Proj(first_sp)) {
1050 kill_node(first_sp);
1055 * put the Prolog code at the beginning, epilog code before each return
1057 static void introduce_prolog_epilog(ir_graph *irg)
1059 const arch_register_t *sp = &ia32_registers[REG_ESP];
1060 const arch_register_t *bp = &ia32_registers[REG_EBP];
1061 ir_node *start = get_irg_start(irg);
1062 ir_node *block = get_nodes_block(start);
1063 ir_type *frame_type = get_irg_frame_type(irg);
1064 unsigned frame_size = get_type_size_bytes(frame_type);
1065 be_stack_layout_t *layout = be_get_irg_stack_layout(irg);
1066 ir_node *initial_sp = be_get_initial_reg_value(irg, sp);
1067 ir_node *curr_sp = initial_sp;
1068 ir_mode *mode_gp = mode_Iu;
1070 if (!layout->sp_relative) {
1072 ir_node *mem = get_irg_initial_mem(irg);
1073 ir_node *noreg = ia32_new_NoReg_gp(irg);
1074 ir_node *initial_bp = be_get_initial_reg_value(irg, bp);
1075 ir_node *push = new_bd_ia32_Push(NULL, block, noreg, noreg, mem, initial_bp, initial_sp);
1078 curr_sp = new_r_Proj(push, mode_gp, pn_ia32_Push_stack);
1079 arch_set_irn_register(curr_sp, sp);
1080 sched_add_after(start, push);
1082 /* move esp to ebp */
1083 ir_node *const curr_bp = be_new_Copy(block, curr_sp);
1084 sched_add_after(push, curr_bp);
1085 be_set_constr_single_reg_out(curr_bp, 0, bp, arch_register_req_type_ignore);
1086 curr_sp = be_new_CopyKeep_single(block, curr_sp, curr_bp);
1087 sched_add_after(curr_bp, curr_sp);
1088 be_set_constr_single_reg_out(curr_sp, 0, sp, arch_register_req_type_produces_sp);
1089 edges_reroute_except(initial_bp, curr_bp, push);
1091 incsp = be_new_IncSP(sp, block, curr_sp, frame_size, 0);
1092 edges_reroute_except(initial_sp, incsp, push);
1093 sched_add_after(curr_sp, incsp);
1095 /* make sure the initial IncSP is really used by someone */
1096 if (get_irn_n_edges(incsp) <= 1) {
1097 ir_node *in[] = { incsp };
1098 ir_node *keep = be_new_Keep(block, 1, in);
1099 sched_add_after(incsp, keep);
1102 layout->initial_bias = -4;
1104 ir_node *const incsp = be_new_IncSP(sp, block, initial_sp, frame_size, 0);
1105 edges_reroute_except(initial_sp, incsp, incsp);
1106 sched_add_after(start, incsp);
1109 /* introduce epilog for every return node */
1111 ir_node *end_block = get_irg_end_block(irg);
1112 int arity = get_irn_arity(end_block);
1115 for (i = 0; i < arity; ++i) {
1116 ir_node *ret = get_irn_n(end_block, i);
1117 assert(be_is_Return(ret));
1118 introduce_epilog(ret);
1124 * Last touchups for the graph before emit: x87 simulation to replace the
1125 * virtual with real x87 instructions, creating a block schedule and peephole
1128 static void ia32_finish_graph(ir_graph *irg)
1130 ia32_irg_data_t *irg_data = ia32_get_irg_data(irg);
1131 be_stack_layout_t *stack_layout = be_get_irg_stack_layout(irg);
1132 bool at_begin = stack_layout->sp_relative ? true : false;
1133 be_fec_env_t *fec_env = be_new_frame_entity_coalescer(irg);
1135 /* create and coalesce frame entities */
1136 irg_walk_graph(irg, NULL, ia32_collect_frame_entity_nodes, fec_env);
1137 be_assign_entities(fec_env, ia32_set_frame_entity, at_begin);
1138 be_free_frame_entity_coalescer(fec_env);
1140 irg_block_walk_graph(irg, NULL, ia32_after_ra_walker, NULL);
1142 introduce_prolog_epilog(irg);
1144 /* fix stack entity offsets */
1145 be_abi_fix_stack_nodes(irg);
1146 be_abi_fix_stack_bias(irg);
1148 /* fix 2-address code constraints */
1149 ia32_finish_irg(irg);
1151 /* we might have to rewrite x87 virtual registers */
1152 if (irg_data->do_x87_sim) {
1153 ia32_x87_simulate_graph(irg);
1156 /* do peephole optimisations */
1157 ia32_peephole_optimization(irg);
1159 be_remove_dead_nodes_from_schedule(irg);
1161 /* create block schedule, this also removes empty blocks which might
1162 * produce critical edges */
1163 irg_data->blk_sched = be_create_block_schedule(irg);
1167 * Emits the code, closes the output file and frees
1168 * the code generator interface.
1170 static void ia32_emit(ir_graph *irg)
1172 if (ia32_cg_config.emit_machcode) {
1173 ia32_gen_binary_routine(irg);
1175 ia32_gen_routine(irg);
1180 * Returns the node representing the PIC base.
1182 static ir_node *ia32_get_pic_base(ir_graph *irg)
1184 ia32_irg_data_t *irg_data = ia32_get_irg_data(irg);
1186 ir_node *get_eip = irg_data->get_eip;
1187 if (get_eip != NULL)
1190 block = get_irg_start_block(irg);
1191 get_eip = new_bd_ia32_GetEIP(NULL, block);
1192 irg_data->get_eip = get_eip;
1198 * Initializes a IA32 code generator.
1200 static void ia32_init_graph(ir_graph *irg)
1202 struct obstack *obst = be_get_be_obst(irg);
1203 ia32_irg_data_t *irg_data = OALLOCZ(obst, ia32_irg_data_t);
1205 irg_data->dump = (be_options.dump_flags & DUMP_BE) ? 1 : 0;
1208 /* Linux gprof implementation needs base pointer */
1209 be_options.omit_fp = 0;
1212 be_birg_from_irg(irg)->isa_link = irg_data;
1215 static const tarval_mode_info mo_integer = {
1222 * set the tarval output mode of all integer modes to decimal
1224 static void set_tarval_output_modes(void)
1228 for (i = ir_get_n_modes(); i > 0;) {
1229 ir_mode *mode = ir_get_mode(--i);
1231 if (mode_is_int(mode))
1232 set_tarval_mode_output_option(mode, &mo_integer);
1236 extern const arch_isa_if_t ia32_isa_if;
1238 static void init_asm_constraints(void)
1240 be_init_default_asm_constraint_flags();
1242 asm_constraint_flags['a'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1243 asm_constraint_flags['b'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1244 asm_constraint_flags['c'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1245 asm_constraint_flags['d'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1246 asm_constraint_flags['D'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1247 asm_constraint_flags['S'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1248 asm_constraint_flags['Q'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1249 asm_constraint_flags['q'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1250 asm_constraint_flags['A'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1251 asm_constraint_flags['l'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1252 asm_constraint_flags['R'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1253 asm_constraint_flags['r'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1254 asm_constraint_flags['p'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1255 asm_constraint_flags['f'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1256 asm_constraint_flags['t'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1257 asm_constraint_flags['u'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1258 asm_constraint_flags['Y'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1259 asm_constraint_flags['X'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1260 asm_constraint_flags['n'] = ASM_CONSTRAINT_FLAG_SUPPORTS_IMMEDIATE;
1261 asm_constraint_flags['g'] = ASM_CONSTRAINT_FLAG_SUPPORTS_IMMEDIATE;
1263 /* no support for autodecrement/autoincrement */
1264 asm_constraint_flags['<'] = ASM_CONSTRAINT_FLAG_NO_SUPPORT;
1265 asm_constraint_flags['>'] = ASM_CONSTRAINT_FLAG_NO_SUPPORT;
1266 /* no float consts */
1267 asm_constraint_flags['E'] = ASM_CONSTRAINT_FLAG_NO_SUPPORT;
1268 asm_constraint_flags['F'] = ASM_CONSTRAINT_FLAG_NO_SUPPORT;
1269 /* makes no sense on x86 */
1270 asm_constraint_flags['s'] = ASM_CONSTRAINT_FLAG_NO_SUPPORT;
1271 /* no support for sse consts yet */
1272 asm_constraint_flags['C'] = ASM_CONSTRAINT_FLAG_NO_SUPPORT;
1273 /* no support for x87 consts yet */
1274 asm_constraint_flags['G'] = ASM_CONSTRAINT_FLAG_NO_SUPPORT;
1275 /* no support for mmx registers yet */
1276 asm_constraint_flags['y'] = ASM_CONSTRAINT_FLAG_NO_SUPPORT;
1277 /* not available in 32bit mode */
1278 asm_constraint_flags['Z'] = ASM_CONSTRAINT_FLAG_NO_SUPPORT;
1279 asm_constraint_flags['e'] = ASM_CONSTRAINT_FLAG_NO_SUPPORT;
1281 /* no code yet to determine register class needed... */
1282 asm_constraint_flags['X'] = ASM_CONSTRAINT_FLAG_NO_SUPPORT;
1286 * Check if Mux(sel, mux_true, mux_false) would represent a Max or Min operation
1288 static bool mux_is_float_min_max(ir_node *sel, ir_node *mux_true,
1293 ir_relation relation;
1298 cmp_l = get_Cmp_left(sel);
1299 cmp_r = get_Cmp_right(sel);
1300 if (!mode_is_float(get_irn_mode(cmp_l)))
1303 /* check for min/max. They're defined as (C-Semantik):
1304 * min(a, b) = a < b ? a : b
1305 * or min(a, b) = a <= b ? a : b
1306 * max(a, b) = a > b ? a : b
1307 * or max(a, b) = a >= b ? a : b
1308 * (Note we only handle float min/max here)
1310 relation = get_Cmp_relation(sel);
1312 case ir_relation_greater_equal:
1313 case ir_relation_greater:
1315 if (cmp_l == mux_true && cmp_r == mux_false)
1318 case ir_relation_less_equal:
1319 case ir_relation_less:
1321 if (cmp_l == mux_true && cmp_r == mux_false)
1324 case ir_relation_unordered_greater_equal:
1325 case ir_relation_unordered_greater:
1327 if (cmp_l == mux_false && cmp_r == mux_true)
1330 case ir_relation_unordered_less_equal:
1331 case ir_relation_unordered_less:
1333 if (cmp_l == mux_false && cmp_r == mux_true)
1344 static bool mux_is_set(ir_node *sel, ir_node *mux_true, ir_node *mux_false)
1346 ir_mode *mode = get_irn_mode(mux_true);
1349 if (!mode_is_int(mode) && !mode_is_reference(mode)
1353 if (is_Const(mux_true) && is_Const(mux_false)) {
1354 /* we can create a set plus up two 3 instructions for any combination
1362 static bool mux_is_float_const_const(ir_node *sel, ir_node *mux_true,
1367 if (!mode_is_float(get_irn_mode(mux_true)))
1370 return is_Const(mux_true) && is_Const(mux_false);
1373 static bool mux_is_doz(ir_node *sel, ir_node *mux_true, ir_node *mux_false)
1380 ir_relation relation;
1385 mode = get_irn_mode(mux_true);
1386 if (mode_is_signed(mode) || mode_is_float(mode))
1389 relation = get_Cmp_relation(sel);
1390 cmp_left = get_Cmp_left(sel);
1391 cmp_right = get_Cmp_right(sel);
1393 /* "move" zero constant to false input */
1394 if (is_Const(mux_true) && is_Const_null(mux_true)) {
1395 ir_node *tmp = mux_false;
1396 mux_false = mux_true;
1398 relation = get_negated_relation(relation);
1400 if (!is_Const(mux_false) || !is_Const_null(mux_false))
1402 if (!is_Sub(mux_true))
1404 sub_left = get_Sub_left(mux_true);
1405 sub_right = get_Sub_right(mux_true);
1407 /* Mux(a >=u b, 0, a-b) */
1408 if ((relation & ir_relation_greater)
1409 && sub_left == cmp_left && sub_right == cmp_right)
1411 /* Mux(a <=u b, 0, b-a) */
1412 if ((relation & ir_relation_less)
1413 && sub_left == cmp_right && sub_right == cmp_left)
1419 static int ia32_is_mux_allowed(ir_node *sel, ir_node *mux_false,
1424 /* middleend can handle some things */
1425 if (ir_is_optimizable_mux(sel, mux_false, mux_true))
1427 /* we can handle Set for all modes and compares */
1428 if (mux_is_set(sel, mux_true, mux_false))
1430 /* SSE has own min/max operations */
1431 if (ia32_cg_config.use_sse2
1432 && mux_is_float_min_max(sel, mux_true, mux_false))
1434 /* we can handle Mux(?, Const[f], Const[f]) */
1435 if (mux_is_float_const_const(sel, mux_true, mux_false)) {
1436 #ifdef FIRM_GRGEN_BE
1437 /* well, some code selectors can't handle it */
1438 if (be_transformer != TRANSFORMER_PBQP
1439 || be_transformer != TRANSFORMER_RAND)
1446 /* no support for 64bit inputs to cmov */
1447 mode = get_irn_mode(mux_true);
1448 if (get_mode_size_bits(mode) > 32)
1450 /* we can handle Abs for all modes and compares (except 64bit) */
1451 if (ir_mux_is_abs(sel, mux_false, mux_true) != 0)
1453 /* we can't handle MuxF yet */
1454 if (mode_is_float(mode))
1457 if (mux_is_doz(sel, mux_true, mux_false))
1460 /* Check Cmp before the node */
1462 ir_mode *cmp_mode = get_irn_mode(get_Cmp_left(sel));
1464 /* we can't handle 64bit compares */
1465 if (get_mode_size_bits(cmp_mode) > 32)
1468 /* we can't handle float compares */
1469 if (mode_is_float(cmp_mode))
1473 /* did we disable cmov generation? */
1474 if (!ia32_cg_config.use_cmov)
1477 /* we can use a cmov */
1482 * Create the trampoline code.
1484 static ir_node *ia32_create_trampoline_fkt(ir_node *block, ir_node *mem, ir_node *trampoline, ir_node *env, ir_node *callee)
1486 ir_graph *const irg = get_irn_irg(block);
1487 ir_node * p = trampoline;
1488 ir_mode *const mode = get_irn_mode(p);
1489 ir_node *const one = new_r_Const(irg, get_mode_one(mode_Iu));
1490 ir_node *const four = new_r_Const_long(irg, mode_Iu, 4);
1494 st = new_r_Store(block, mem, p, new_r_Const_long(irg, mode_Bu, 0xb9), cons_none);
1495 mem = new_r_Proj(st, mode_M, pn_Store_M);
1496 p = new_r_Add(block, p, one, mode);
1497 st = new_r_Store(block, mem, p, env, cons_none);
1498 mem = new_r_Proj(st, mode_M, pn_Store_M);
1499 p = new_r_Add(block, p, four, mode);
1501 st = new_r_Store(block, mem, p, new_r_Const_long(irg, mode_Bu, 0xe9), cons_none);
1502 mem = new_r_Proj(st, mode_M, pn_Store_M);
1503 p = new_r_Add(block, p, one, mode);
1504 st = new_r_Store(block, mem, p, callee, cons_none);
1505 mem = new_r_Proj(st, mode_M, pn_Store_M);
1510 static const ir_settings_arch_dep_t ia32_arch_dep = {
1511 1, /* also use subs */
1512 4, /* maximum shifts */
1513 63, /* maximum shift amount */
1514 ia32_evaluate_insn, /* evaluate the instruction sequence */
1516 1, /* allow Mulhs */
1517 1, /* allow Mulus */
1518 32, /* Mulh allowed up to 32 bit */
1520 static backend_params ia32_backend_params = {
1521 1, /* support inline assembly */
1522 1, /* support Rotl nodes */
1523 0, /* little endian */
1524 1, /* modulo shift efficient */
1525 0, /* non-modulo shift not efficient */
1526 &ia32_arch_dep, /* will be set later */
1527 ia32_is_mux_allowed,
1528 32, /* machine_size */
1529 NULL, /* float arithmetic mode, will be set below */
1530 NULL, /* long long type */
1531 NULL, /* unsigned long long type */
1532 NULL, /* long double type */
1533 12, /* size of trampoline code */
1534 4, /* alignment of trampoline code */
1535 ia32_create_trampoline_fkt,
1536 4 /* alignment of stack parameter */
1540 * Initializes the backend ISA.
1542 static void ia32_init(void)
1544 ir_mode *mode_long_long;
1545 ir_mode *mode_unsigned_long_long;
1546 ir_type *type_long_long;
1547 ir_type *type_unsigned_long_long;
1549 ia32_setup_cg_config();
1551 init_asm_constraints();
1553 ia32_mode_fpcw = new_int_mode("Fpcw", irma_twos_complement, 16, 0, 0);
1555 /* note mantissa is 64bit but with explicitely encoded 1 so the really
1556 * usable part as counted by firm is only 63 bits */
1557 ia32_mode_E = new_float_mode("E", irma_x86_extended_float, 15, 63);
1558 ia32_type_E = new_type_primitive(ia32_mode_E);
1559 set_type_size_bytes(ia32_type_E, 12);
1560 set_type_alignment_bytes(ia32_type_E, 4);
1562 mode_long_long = new_int_mode("long long", irma_twos_complement, 64, 1, 64);
1563 type_long_long = new_type_primitive(mode_long_long);
1564 mode_unsigned_long_long
1565 = new_int_mode("unsigned long long", irma_twos_complement, 64, 0, 64);
1566 type_unsigned_long_long = new_type_primitive(mode_unsigned_long_long);
1568 ia32_backend_params.type_long_long = type_long_long;
1569 ia32_backend_params.type_unsigned_long_long = type_unsigned_long_long;
1571 if (ia32_cg_config.use_sse2 || ia32_cg_config.use_softfloat) {
1572 ia32_backend_params.mode_float_arithmetic = NULL;
1573 ia32_backend_params.type_long_double = NULL;
1575 ia32_backend_params.mode_float_arithmetic = ia32_mode_E;
1576 ia32_backend_params.type_long_double = ia32_type_E;
1579 ia32_register_init();
1580 obstack_init(&opcodes_obst);
1581 ia32_create_opcodes(&ia32_irn_ops);
1584 static void ia32_finish(void)
1586 if (between_type != NULL) {
1587 free_type(between_type);
1588 between_type = NULL;
1590 ia32_free_opcodes();
1591 obstack_free(&opcodes_obst, NULL);
1595 * The template that generates a new ISA object.
1596 * Note that this template can be changed by command line
1599 static ia32_isa_t ia32_isa_template = {
1601 &ia32_isa_if, /* isa interface implementation */
1606 &ia32_registers[REG_ESP], /* stack pointer register */
1607 &ia32_registers[REG_EBP], /* base pointer register */
1608 2, /* power of two stack alignment, 2^2 == 4 */
1609 7, /* costs for a spill instruction */
1610 5, /* costs for a reload instruction */
1611 false, /* no custom abi handling */
1614 IA32_FPU_ARCH_X87, /* FPU architecture */
1617 static arch_env_t *ia32_begin_codegeneration(void)
1619 ia32_isa_t *isa = XMALLOC(ia32_isa_t);
1621 set_tarval_output_modes();
1623 *isa = ia32_isa_template;
1624 isa->tv_ent = pmap_create();
1630 * Closes the output file and frees the ISA structure.
1632 static void ia32_end_codegeneration(void *self)
1634 ia32_isa_t *isa = (ia32_isa_t*)self;
1635 pmap_destroy(isa->tv_ent);
1640 * Returns the register for parameter nr.
1642 static const arch_register_t *ia32_get_RegParam_reg(unsigned cc, unsigned nr,
1643 const ir_mode *mode)
1645 static const arch_register_t *gpreg_param_reg_fastcall[] = {
1646 &ia32_registers[REG_ECX],
1647 &ia32_registers[REG_EDX],
1650 static const unsigned MAXNUM_GPREG_ARGS = 3;
1652 static const arch_register_t *gpreg_param_reg_regparam[] = {
1653 &ia32_registers[REG_EAX],
1654 &ia32_registers[REG_EDX],
1655 &ia32_registers[REG_ECX]
1658 static const arch_register_t *gpreg_param_reg_this[] = {
1659 &ia32_registers[REG_ECX],
1664 static const arch_register_t *fpreg_sse_param_reg_std[] = {
1665 &ia32_registers[REG_XMM0],
1666 &ia32_registers[REG_XMM1],
1667 &ia32_registers[REG_XMM2],
1668 &ia32_registers[REG_XMM3],
1669 &ia32_registers[REG_XMM4],
1670 &ia32_registers[REG_XMM5],
1671 &ia32_registers[REG_XMM6],
1672 &ia32_registers[REG_XMM7]
1675 static const arch_register_t *fpreg_sse_param_reg_this[] = {
1676 NULL, /* in case of a "this" pointer, the first parameter must not be a float */
1678 static const unsigned MAXNUM_SSE_ARGS = 8;
1680 if ((cc & cc_this_call) && nr == 0)
1681 return gpreg_param_reg_this[0];
1683 if (! (cc & cc_reg_param))
1686 if (mode_is_float(mode)) {
1687 if (!ia32_cg_config.use_sse2 || (cc & cc_fpreg_param) == 0)
1689 if (nr >= MAXNUM_SSE_ARGS)
1692 if (cc & cc_this_call) {
1693 return fpreg_sse_param_reg_this[nr];
1695 return fpreg_sse_param_reg_std[nr];
1696 } else if (mode_is_int(mode) || mode_is_reference(mode)) {
1697 unsigned num_regparam;
1699 if (get_mode_size_bits(mode) > 32)
1702 if (nr >= MAXNUM_GPREG_ARGS)
1705 if (cc & cc_this_call) {
1706 return gpreg_param_reg_this[nr];
1708 num_regparam = cc & ~cc_bits;
1709 if (num_regparam == 0) {
1710 /* default fastcall */
1711 return gpreg_param_reg_fastcall[nr];
1713 if (nr < num_regparam)
1714 return gpreg_param_reg_regparam[nr];
1718 panic("unknown argument mode");
1722 * Get the ABI restrictions for procedure calls.
1724 static void ia32_get_call_abi(ir_type *method_type, be_abi_call_t *abi)
1729 be_abi_call_flags_t call_flags = be_abi_call_get_flags(abi);
1731 /* set abi flags for calls */
1732 /* call_flags.try_omit_fp not changed: can handle both settings */
1733 call_flags.call_has_imm = false; /* No call immediate, we handle this by ourselves */
1735 /* set parameter passing style */
1736 be_abi_call_set_flags(abi, call_flags, &ia32_abi_callbacks);
1738 cc = get_method_calling_convention(method_type);
1739 if (get_method_variadicity(method_type) == variadicity_variadic) {
1740 /* pass all parameters of a variadic function on the stack */
1741 cc = cc_cdecl_set | (cc & cc_this_call);
1743 if (get_method_additional_properties(method_type) & mtp_property_private &&
1744 ia32_cg_config.optimize_cc) {
1745 /* set the fast calling conventions (allowing up to 3) */
1746 cc = SET_FASTCALL(cc) | 3;
1750 /* we have to pop the shadow parameter ourself for compound calls */
1751 if ( (get_method_calling_convention(method_type) & cc_compound_ret)
1752 && !(cc & cc_reg_param)) {
1753 pop_amount += get_mode_size_bytes(mode_P_data);
1756 n = get_method_n_params(method_type);
1757 for (i = regnum = 0; i < n; i++) {
1758 const arch_register_t *reg = NULL;
1759 ir_type *tp = get_method_param_type(method_type, i);
1760 ir_mode *mode = get_type_mode(tp);
1763 reg = ia32_get_RegParam_reg(cc, regnum, mode);
1766 be_abi_call_param_reg(abi, i, reg, ABI_CONTEXT_BOTH);
1769 /* Micro optimisation: if the mode is shorter than 4 bytes, load 4 bytes.
1770 * movl has a shorter opcode than mov[sz][bw]l */
1771 ir_mode *load_mode = mode;
1774 unsigned size = get_mode_size_bytes(mode);
1776 if (cc & cc_callee_clear_stk) {
1777 pop_amount += (size + 3U) & ~3U;
1780 if (size < 4) load_mode = mode_Iu;
1783 be_abi_call_param_stack(abi, i, load_mode, 4, 0, 0, ABI_CONTEXT_BOTH);
1787 be_abi_call_set_pop(abi, pop_amount);
1789 /* set return registers */
1790 n = get_method_n_ress(method_type);
1792 assert(n <= 2 && "more than two results not supported");
1794 /* In case of 64bit returns, we will have two 32bit values */
1796 ir_type *tp = get_method_res_type(method_type, 0);
1797 ir_mode *mode = get_type_mode(tp);
1799 assert(!mode_is_float(mode) && "two FP results not supported");
1801 tp = get_method_res_type(method_type, 1);
1802 mode = get_type_mode(tp);
1804 assert(!mode_is_float(mode) && "mixed INT, FP results not supported");
1806 be_abi_call_res_reg(abi, 0, &ia32_registers[REG_EAX], ABI_CONTEXT_BOTH);
1807 be_abi_call_res_reg(abi, 1, &ia32_registers[REG_EDX], ABI_CONTEXT_BOTH);
1810 ir_type *tp = get_method_res_type(method_type, 0);
1811 ir_mode *mode = get_type_mode(tp);
1812 const arch_register_t *reg;
1813 assert(is_atomic_type(tp));
1815 reg = mode_is_float(mode) ? &ia32_registers[REG_ST0] : &ia32_registers[REG_EAX];
1817 be_abi_call_res_reg(abi, 0, reg, ABI_CONTEXT_BOTH);
1821 static void ia32_mark_remat(ir_node *node)
1823 if (is_ia32_irn(node)) {
1824 set_ia32_is_remat(node);
1828 static asm_constraint_flags_t ia32_parse_asm_constraint(const char **c)
1832 /* we already added all our simple flags to the flags modifier list in
1833 * init, so this flag we don't know. */
1834 return ASM_CONSTRAINT_FLAG_INVALID;
1837 static int ia32_is_valid_clobber(const char *clobber)
1839 return ia32_get_clobber_register(clobber) != NULL;
1842 static void ia32_lower_for_target(void)
1844 ir_mode *mode_gp = ia32_reg_classes[CLASS_ia32_gp].mode;
1845 size_t i, n_irgs = get_irp_n_irgs();
1847 /* perform doubleword lowering */
1848 lwrdw_param_t lower_dw_params = {
1849 1, /* little endian */
1850 64, /* doubleword size */
1851 ia32_create_intrinsic_fkt,
1855 /* lower compound param handling
1856 * Note: we lower compound arguments ourself, since on ia32 we don't
1857 * have hidden parameters but know where to find the structs on the stack.
1858 * (This also forces us to always allocate space for the compound arguments
1859 * on the callframe and we can't just use an arbitrary position on the
1862 lower_calls_with_compounds(LF_RETURN_HIDDEN | LF_DONT_LOWER_ARGUMENTS);
1864 /* replace floating point operations by function calls */
1865 if (ia32_cg_config.use_softfloat) {
1866 lower_floating_point();
1869 for (i = 0; i < n_irgs; ++i) {
1870 ir_graph *irg = get_irp_irg(i);
1871 /* break up switches with wide ranges */
1872 lower_switch(irg, 4, 256, mode_gp);
1875 ir_prepare_dw_lowering(&lower_dw_params);
1878 for (i = 0; i < n_irgs; ++i) {
1879 ir_graph *irg = get_irp_irg(i);
1880 /* lower for mode_b stuff */
1881 ir_lower_mode_b(irg, mode_Iu);
1884 for (i = 0; i < n_irgs; ++i) {
1885 ir_graph *irg = get_irp_irg(i);
1886 /* Turn all small CopyBs into loads/stores, keep medium-sized CopyBs,
1887 * so we can generate rep movs later, and turn all big CopyBs into
1889 lower_CopyB(irg, 64, 8193, true);
1894 * Returns the libFirm configuration parameter for this backend.
1896 static const backend_params *ia32_get_libfirm_params(void)
1898 return &ia32_backend_params;
1902 * Check if the given register is callee or caller save.
1904 static int ia32_register_saved_by(const arch_register_t *reg, int callee)
1906 switch (reg->global_index) {
1940 static const lc_opt_enum_int_items_t gas_items[] = {
1941 { "elf", OBJECT_FILE_FORMAT_ELF },
1942 { "mingw", OBJECT_FILE_FORMAT_COFF },
1943 { "macho", OBJECT_FILE_FORMAT_MACH_O },
1947 static lc_opt_enum_int_var_t gas_var = {
1948 (int*) &be_gas_object_file_format, gas_items
1951 #ifdef FIRM_GRGEN_BE
1952 static const lc_opt_enum_int_items_t transformer_items[] = {
1953 { "default", TRANSFORMER_DEFAULT },
1954 { "pbqp", TRANSFORMER_PBQP },
1955 { "random", TRANSFORMER_RAND },
1959 static lc_opt_enum_int_var_t transformer_var = {
1960 (int*)&be_transformer, transformer_items
1964 static const lc_opt_table_entry_t ia32_options[] = {
1965 LC_OPT_ENT_ENUM_INT("gasmode", "set the GAS compatibility mode", &gas_var),
1966 #ifdef FIRM_GRGEN_BE
1967 LC_OPT_ENT_ENUM_INT("transformer", "the transformer used for code selection", &transformer_var),
1969 LC_OPT_ENT_INT ("stackalign", "set power of two stack alignment for calls",
1970 &ia32_isa_template.base.stack_alignment),
1971 LC_OPT_ENT_BOOL("gprof", "create gprof profiling code", &gprof),
1975 const arch_isa_if_t ia32_isa_if = {
1978 ia32_get_libfirm_params,
1979 ia32_lower_for_target,
1980 ia32_parse_asm_constraint,
1981 ia32_is_valid_clobber,
1983 ia32_begin_codegeneration,
1984 ia32_end_codegeneration,
1988 ia32_get_pic_base, /* return node used as base in pic code addresses */
1991 ia32_register_saved_by,
1993 ia32_handle_intrinsics,
1994 ia32_before_abi, /* before abi introduce hook */
1996 ia32_before_ra, /* before register allocation hook */
1997 ia32_finish_graph, /* called before codegen */
1998 ia32_emit, /* emit && done */
2001 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_arch_ia32)
2002 void be_init_arch_ia32(void)
2004 lc_opt_entry_t *be_grp = lc_opt_get_grp(firm_opt_get_root(), "be");
2005 lc_opt_entry_t *ia32_grp = lc_opt_get_grp(be_grp, "ia32");
2007 lc_opt_add_table(ia32_grp, ia32_options);
2008 be_register_isa_if("ia32", &ia32_isa_if);
2010 ia32_init_emitter();
2012 ia32_init_optimize();
2013 ia32_init_transform();
2015 ia32_init_architecture();