2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief This is the main ia32 firm backend driver.
23 * @author Christian Wuerdig
28 #include "lc_opts_enum.h"
36 #include "iredges_t.h"
50 #include "iroptimize.h"
51 #include "instrument.h"
54 #include "lower_calls.h"
55 #include "lower_mode_b.h"
56 #include "lower_softfloat.h"
66 #include "be_dbgout.h"
67 #include "beblocksched.h"
68 #include "bespillutil.h"
69 #include "bespillslots.h"
74 #include "betranshlp.h"
75 #include "belistsched.h"
76 #include "beabihelper.h"
79 #include "bearch_ia32_t.h"
81 #include "ia32_new_nodes.h"
82 #include "gen_ia32_regalloc_if.h"
83 #include "ia32_common_transform.h"
84 #include "ia32_transform.h"
85 #include "ia32_emitter.h"
86 #include "ia32_optimize.h"
88 #include "ia32_dbg_stat.h"
89 #include "ia32_finish.h"
91 #include "ia32_architecture.h"
94 #include "ia32_pbqp_transform.h"
96 transformer_t be_transformer = TRANSFORMER_DEFAULT;
99 ir_mode *ia32_mode_fpcw;
100 ir_mode *ia32_mode_E;
101 ir_type *ia32_type_E;
103 /** The current omit-fp state */
104 static ir_type *omit_fp_between_type = NULL;
105 static ir_type *between_type = NULL;
106 static ir_entity *old_bp_ent = NULL;
107 static ir_entity *ret_addr_ent = NULL;
108 static ir_entity *omit_fp_ret_addr_ent = NULL;
111 * The environment for the intrinsic mapping.
113 static ia32_intrinsic_env_t intrinsic_env = {
115 NULL, /* the irg, these entities belong to */
116 NULL, /* entity for __divdi3 library call */
117 NULL, /* entity for __moddi3 library call */
118 NULL, /* entity for __udivdi3 library call */
119 NULL, /* entity for __umoddi3 library call */
123 typedef ir_node *(*create_const_node_func) (dbg_info *dbgi, ir_node *block);
126 * Used to create per-graph unique pseudo nodes.
128 static inline ir_node *create_const(ir_graph *irg, ir_node **place,
129 create_const_node_func func,
130 const arch_register_t* reg)
132 ir_node *block, *res;
137 block = get_irg_start_block(irg);
138 res = func(NULL, block);
139 arch_set_irn_register(res, reg);
145 /* Creates the unique per irg GP NoReg node. */
146 ir_node *ia32_new_NoReg_gp(ir_graph *irg)
148 ia32_irg_data_t *irg_data = ia32_get_irg_data(irg);
149 return create_const(irg, &irg_data->noreg_gp, new_bd_ia32_NoReg_GP,
150 &ia32_registers[REG_GP_NOREG]);
153 ir_node *ia32_new_NoReg_vfp(ir_graph *irg)
155 ia32_irg_data_t *irg_data = ia32_get_irg_data(irg);
156 return create_const(irg, &irg_data->noreg_vfp, new_bd_ia32_NoReg_VFP,
157 &ia32_registers[REG_VFP_NOREG]);
160 ir_node *ia32_new_NoReg_xmm(ir_graph *irg)
162 ia32_irg_data_t *irg_data = ia32_get_irg_data(irg);
163 return create_const(irg, &irg_data->noreg_xmm, new_bd_ia32_NoReg_XMM,
164 &ia32_registers[REG_XMM_NOREG]);
167 ir_node *ia32_new_Fpu_truncate(ir_graph *irg)
169 ia32_irg_data_t *irg_data = ia32_get_irg_data(irg);
170 return create_const(irg, &irg_data->fpu_trunc_mode, new_bd_ia32_ChangeCW,
171 &ia32_registers[REG_FPCW]);
176 * Returns the admissible noreg register node for input register pos of node irn.
178 static ir_node *ia32_get_admissible_noreg(ir_node *irn, int pos)
180 ir_graph *irg = get_irn_irg(irn);
181 const arch_register_req_t *req = arch_get_irn_register_req_in(irn, pos);
183 assert(req != NULL && "Missing register requirements");
184 if (req->cls == &ia32_reg_classes[CLASS_ia32_gp])
185 return ia32_new_NoReg_gp(irg);
187 if (ia32_cg_config.use_sse2) {
188 return ia32_new_NoReg_xmm(irg);
190 return ia32_new_NoReg_vfp(irg);
194 static arch_irn_class_t ia32_classify(const ir_node *irn)
196 arch_irn_class_t classification = arch_irn_class_none;
198 assert(is_ia32_irn(irn));
200 if (is_ia32_is_reload(irn))
201 classification |= arch_irn_class_reload;
203 if (is_ia32_is_spill(irn))
204 classification |= arch_irn_class_spill;
206 if (is_ia32_is_remat(irn))
207 classification |= arch_irn_class_remat;
209 return classification;
213 * The IA32 ABI callback object.
216 be_abi_call_flags_bits_t flags; /**< The call flags. */
217 ir_graph *irg; /**< The associated graph. */
220 static ir_entity *ia32_get_frame_entity(const ir_node *irn)
222 return is_ia32_irn(irn) ? get_ia32_frame_ent(irn) : NULL;
225 static void ia32_set_frame_entity(ir_node *node, ir_entity *entity)
227 if (is_be_node(node))
228 be_node_set_frame_entity(node, entity);
230 set_ia32_frame_ent(node, entity);
233 static void ia32_set_frame_offset(ir_node *irn, int bias)
235 if (get_ia32_frame_ent(irn) == NULL)
238 if (is_ia32_Pop(irn) || is_ia32_PopMem(irn)) {
239 ir_graph *irg = get_irn_irg(irn);
240 be_stack_layout_t *layout = be_get_irg_stack_layout(irg);
241 if (layout->sp_relative) {
242 /* Pop nodes modify the stack pointer before calculating the
243 * destination address, so fix this here
248 add_ia32_am_offs_int(irn, bias);
251 static int ia32_get_sp_bias(const ir_node *node)
253 if (is_ia32_Call(node))
254 return -(int)get_ia32_call_attr_const(node)->pop;
256 if (is_ia32_Push(node))
259 if (is_ia32_Pop(node) || is_ia32_PopMem(node))
262 if (is_ia32_Leave(node) || is_ia32_CopyEbpEsp(node)) {
263 return SP_BIAS_RESET;
270 * Build the between type and entities if not already build.
272 static void ia32_build_between_type(void)
274 #define IDENT(s) new_id_from_chars(s, sizeof(s)-1)
275 if (! between_type) {
276 ir_type *old_bp_type = new_type_primitive(mode_Iu);
277 ir_type *ret_addr_type = new_type_primitive(mode_Iu);
279 between_type = new_type_struct(IDENT("ia32_between_type"));
280 old_bp_ent = new_entity(between_type, IDENT("old_bp"), old_bp_type);
281 ret_addr_ent = new_entity(between_type, IDENT("ret_addr"), ret_addr_type);
283 set_entity_offset(old_bp_ent, 0);
284 set_entity_offset(ret_addr_ent, get_type_size_bytes(old_bp_type));
285 set_type_size_bytes(between_type, get_type_size_bytes(old_bp_type) + get_type_size_bytes(ret_addr_type));
286 set_type_state(between_type, layout_fixed);
288 omit_fp_between_type = new_type_struct(IDENT("ia32_between_type_omit_fp"));
289 omit_fp_ret_addr_ent = new_entity(omit_fp_between_type, IDENT("ret_addr"), ret_addr_type);
291 set_entity_offset(omit_fp_ret_addr_ent, 0);
292 set_type_size_bytes(omit_fp_between_type, get_type_size_bytes(ret_addr_type));
293 set_type_state(omit_fp_between_type, layout_fixed);
299 * Produces the type which sits between the stack args and the locals on the stack.
300 * it will contain the return address and space to store the old base pointer.
301 * @return The Firm type modeling the ABI between type.
303 static ir_type *ia32_abi_get_between_type(ir_graph *irg)
305 const be_stack_layout_t *layout = be_get_irg_stack_layout(irg);
306 ia32_build_between_type();
307 return layout->sp_relative ? omit_fp_between_type : between_type;
311 * Return the stack entity that contains the return address.
313 ir_entity *ia32_get_return_address_entity(ir_graph *irg)
315 const be_stack_layout_t *layout = be_get_irg_stack_layout(irg);
316 ia32_build_between_type();
317 return layout->sp_relative ? omit_fp_ret_addr_ent : ret_addr_ent;
321 * Return the stack entity that contains the frame address.
323 ir_entity *ia32_get_frame_address_entity(ir_graph *irg)
325 const be_stack_layout_t *layout = be_get_irg_stack_layout(irg);
326 ia32_build_between_type();
327 return layout->sp_relative ? NULL : old_bp_ent;
331 * Get the estimated cycle count for @p irn.
333 * @param self The this pointer.
334 * @param irn The node.
336 * @return The estimated cycle count for this operation
338 static int ia32_get_op_estimated_cost(const ir_node *irn)
341 ia32_op_type_t op_tp;
345 if (!is_ia32_irn(irn))
348 assert(is_ia32_irn(irn));
350 cost = get_ia32_latency(irn);
351 op_tp = get_ia32_op_type(irn);
353 if (is_ia32_CopyB(irn)) {
356 else if (is_ia32_CopyB_i(irn)) {
357 int size = get_ia32_copyb_size(irn);
358 cost = 20 + (int)ceil((4/3) * size);
360 /* in case of address mode operations add additional cycles */
361 else if (op_tp == ia32_AddrModeD || op_tp == ia32_AddrModeS) {
363 In case of stack access and access to fixed addresses add 5 cycles
364 (we assume they are in cache), other memory operations cost 20
367 if (is_ia32_use_frame(irn) || (
368 is_ia32_NoReg_GP(get_irn_n(irn, n_ia32_base)) &&
369 is_ia32_NoReg_GP(get_irn_n(irn, n_ia32_index))
381 * Returns the inverse operation if @p irn, recalculating the argument at position @p i.
383 * @param irn The original operation
384 * @param i Index of the argument we want the inverse operation to yield
385 * @param inverse struct to be filled with the resulting inverse op
386 * @param obstack The obstack to use for allocation of the returned nodes array
387 * @return The inverse operation or NULL if operation invertible
389 static arch_inverse_t *ia32_get_inverse(const ir_node *irn, int i, arch_inverse_t *inverse, struct obstack *obst)
400 ir_node *block, *noreg, *nomem;
403 /* we cannot invert non-ia32 irns */
404 if (! is_ia32_irn(irn))
407 /* operand must always be a real operand (not base, index or mem) */
408 if (i != n_ia32_binary_left && i != n_ia32_binary_right)
411 /* we don't invert address mode operations */
412 if (get_ia32_op_type(irn) != ia32_Normal)
415 /* TODO: adjust for new immediates... */
416 ir_fprintf(stderr, "TODO: fix get_inverse for new immediates (%+F)\n",
420 block = get_nodes_block(irn);
421 mode = get_irn_mode(irn);
422 irn_mode = get_irn_mode(irn);
423 noreg = get_irn_n(irn, 0);
424 nomem = get_irg_no_mem(irg);
425 dbgi = get_irn_dbg_info(irn);
427 /* initialize structure */
428 inverse->nodes = obstack_alloc(obst, 2 * sizeof(inverse->nodes[0]));
432 switch (get_ia32_irn_opcode(irn)) {
434 if (get_ia32_immop_type(irn) == ia32_ImmConst) {
435 /* we have an add with a const here */
436 /* invers == add with negated const */
437 inverse->nodes[0] = new_bd_ia32_Add(dbgi, block, noreg, noreg, nomem, get_irn_n(irn, i), noreg);
439 copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
440 set_ia32_Immop_tarval(inverse->nodes[0], tarval_neg(get_ia32_Immop_tarval(irn)));
441 set_ia32_commutative(inverse->nodes[0]);
443 else if (get_ia32_immop_type(irn) == ia32_ImmSymConst) {
444 /* we have an add with a symconst here */
445 /* invers == sub with const */
446 inverse->nodes[0] = new_bd_ia32_Sub(dbgi, block, noreg, noreg, nomem, get_irn_n(irn, i), noreg);
448 copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
451 /* normal add: inverse == sub */
452 inverse->nodes[0] = new_bd_ia32_Sub(dbgi, block, noreg, noreg, nomem, (ir_node*) irn, get_irn_n(irn, i ^ 1));
457 if (get_ia32_immop_type(irn) != ia32_ImmNone) {
458 /* we have a sub with a const/symconst here */
459 /* invers == add with this const */
460 inverse->nodes[0] = new_bd_ia32_Add(dbgi, block, noreg, noreg, nomem, get_irn_n(irn, i), noreg);
461 inverse->costs += (get_ia32_immop_type(irn) == ia32_ImmSymConst) ? 5 : 1;
462 copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
466 if (i == n_ia32_binary_left) {
467 inverse->nodes[0] = new_bd_ia32_Add(dbgi, block, noreg, noreg, nomem, (ir_node*) irn, get_irn_n(irn, 3));
470 inverse->nodes[0] = new_bd_ia32_Sub(dbgi, block, noreg, noreg, nomem, get_irn_n(irn, n_ia32_binary_left), (ir_node*) irn);
476 if (get_ia32_immop_type(irn) != ia32_ImmNone) {
477 /* xor with const: inverse = xor */
478 inverse->nodes[0] = new_bd_ia32_Xor(dbgi, block, noreg, noreg, nomem, get_irn_n(irn, i), noreg);
479 inverse->costs += (get_ia32_immop_type(irn) == ia32_ImmSymConst) ? 5 : 1;
480 copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
484 inverse->nodes[0] = new_bd_ia32_Xor(dbgi, block, noreg, noreg, nomem, (ir_node *) irn, get_irn_n(irn, i));
489 inverse->nodes[0] = new_bd_ia32_Not(dbgi, block, (ir_node*) irn);
494 inverse->nodes[0] = new_bd_ia32_Neg(dbgi, block, (ir_node*) irn);
499 /* inverse operation not supported */
507 static ir_mode *get_spill_mode_mode(const ir_mode *mode)
509 if (mode_is_float(mode))
516 * Get the mode that should be used for spilling value node
518 static ir_mode *get_spill_mode(const ir_node *node)
520 ir_mode *mode = get_irn_mode(node);
521 return get_spill_mode_mode(mode);
525 * Checks whether an addressmode reload for a node with mode mode is compatible
526 * with a spillslot of mode spill_mode
528 static int ia32_is_spillmode_compatible(const ir_mode *mode, const ir_mode *spillmode)
530 return !mode_is_float(mode) || mode == spillmode;
534 * Check if irn can load its operand at position i from memory (source addressmode).
535 * @param irn The irn to be checked
536 * @param i The operands position
537 * @return Non-Zero if operand can be loaded
539 static int ia32_possible_memory_operand(const ir_node *irn, unsigned int i)
541 ir_node *op = get_irn_n(irn, i);
542 const ir_mode *mode = get_irn_mode(op);
543 const ir_mode *spillmode = get_spill_mode(op);
545 if (!is_ia32_irn(irn) || /* must be an ia32 irn */
546 get_ia32_op_type(irn) != ia32_Normal || /* must not already be a addressmode irn */
547 !ia32_is_spillmode_compatible(mode, spillmode) ||
548 is_ia32_use_frame(irn)) /* must not already use frame */
551 switch (get_ia32_am_support(irn)) {
556 if (i != n_ia32_unary_op)
562 case n_ia32_binary_left: {
563 const arch_register_req_t *req;
564 if (!is_ia32_commutative(irn))
567 /* we can't swap left/right for limited registers
568 * (As this (currently) breaks constraint handling copies)
570 req = arch_get_irn_register_req_in(irn, n_ia32_binary_left);
571 if (req->type & arch_register_req_type_limited)
576 case n_ia32_binary_right:
585 panic("Unknown AM type");
588 /* HACK: must not already use "real" memory.
589 * This can happen for Call and Div */
590 if (!is_NoMem(get_irn_n(irn, n_ia32_mem)))
596 static void ia32_perform_memory_operand(ir_node *irn, ir_node *spill,
600 ir_mode *dest_op_mode;
602 assert(ia32_possible_memory_operand(irn, i) && "Cannot perform memory operand change");
604 set_ia32_op_type(irn, ia32_AddrModeS);
606 load_mode = get_irn_mode(get_irn_n(irn, i));
607 dest_op_mode = get_ia32_ls_mode(irn);
608 if (get_mode_size_bits(load_mode) <= get_mode_size_bits(dest_op_mode)) {
609 set_ia32_ls_mode(irn, load_mode);
611 set_ia32_use_frame(irn);
612 set_ia32_need_stackent(irn);
614 if (i == n_ia32_binary_left &&
615 get_ia32_am_support(irn) == ia32_am_binary &&
616 /* immediates are only allowed on the right side */
617 !is_ia32_Immediate(get_irn_n(irn, n_ia32_binary_right))) {
618 ia32_swap_left_right(irn);
619 i = n_ia32_binary_right;
622 assert(is_NoMem(get_irn_n(irn, n_ia32_mem)));
624 set_irn_n(irn, n_ia32_base, get_irg_frame(get_irn_irg(irn)));
625 set_irn_n(irn, n_ia32_mem, spill);
626 set_irn_n(irn, i, ia32_get_admissible_noreg(irn, i));
627 set_ia32_is_reload(irn);
630 static const be_abi_callbacks_t ia32_abi_callbacks = {
631 ia32_abi_get_between_type,
634 /* register allocator interface */
635 static const arch_irn_ops_t ia32_irn_ops = {
637 ia32_get_frame_entity,
638 ia32_set_frame_offset,
641 ia32_get_op_estimated_cost,
642 ia32_possible_memory_operand,
643 ia32_perform_memory_operand,
646 static ir_entity *mcount = NULL;
647 static int gprof = 0;
649 static void ia32_before_abi(ir_graph *irg)
652 if (mcount == NULL) {
653 ir_type *tp = new_type_method(0, 0);
654 ident *id = new_id_from_str("mcount");
655 mcount = new_entity(get_glob_type(), id, tp);
656 /* FIXME: enter the right ld_ident here */
657 set_entity_ld_ident(mcount, get_entity_ident(mcount));
658 set_entity_visibility(mcount, ir_visibility_external);
660 instrument_initcall(irg, mcount);
665 * Transforms the standard firm graph into
668 static void ia32_prepare_graph(ir_graph *irg)
670 ia32_irg_data_t *irg_data = ia32_get_irg_data(irg);
673 switch (be_transformer) {
674 case TRANSFORMER_DEFAULT:
675 /* transform remaining nodes into assembler instructions */
676 ia32_transform_graph(irg);
679 case TRANSFORMER_PBQP:
680 case TRANSFORMER_RAND:
681 /* transform nodes into assembler instructions by PBQP magic */
682 ia32_transform_graph_by_pbqp(irg);
686 panic("invalid transformer");
689 ia32_transform_graph(irg);
692 /* do local optimizations (mainly CSE) */
693 optimize_graph_df(irg);
694 /* backend code expects that outedges are always enabled */
698 dump_ir_graph(irg, "transformed");
700 /* optimize address mode */
701 ia32_optimize_graph(irg);
703 /* do code placement, to optimize the position of constants */
705 /* backend code expects that outedges are always enabled */
709 dump_ir_graph(irg, "place");
712 ir_node *ia32_turn_back_am(ir_node *node)
714 dbg_info *dbgi = get_irn_dbg_info(node);
715 ir_graph *irg = get_irn_irg(node);
716 ir_node *block = get_nodes_block(node);
717 ir_node *base = get_irn_n(node, n_ia32_base);
718 ir_node *idx = get_irn_n(node, n_ia32_index);
719 ir_node *mem = get_irn_n(node, n_ia32_mem);
722 ir_node *load = new_bd_ia32_Load(dbgi, block, base, idx, mem);
723 ir_node *load_res = new_rd_Proj(dbgi, load, mode_Iu, pn_ia32_Load_res);
725 ia32_copy_am_attrs(load, node);
726 if (is_ia32_is_reload(node))
727 set_ia32_is_reload(load);
728 set_irn_n(node, n_ia32_mem, get_irg_no_mem(irg));
730 switch (get_ia32_am_support(node)) {
732 set_irn_n(node, n_ia32_unary_op, load_res);
736 if (is_ia32_Immediate(get_irn_n(node, n_ia32_binary_right))) {
737 set_irn_n(node, n_ia32_binary_left, load_res);
739 set_irn_n(node, n_ia32_binary_right, load_res);
744 panic("Unknown AM type");
746 noreg = ia32_new_NoReg_gp(current_ir_graph);
747 set_irn_n(node, n_ia32_base, noreg);
748 set_irn_n(node, n_ia32_index, noreg);
749 set_ia32_am_offs_int(node, 0);
750 set_ia32_am_sc(node, NULL);
751 set_ia32_am_scale(node, 0);
752 clear_ia32_am_sc_sign(node);
754 /* rewire mem-proj */
755 if (get_irn_mode(node) == mode_T) {
756 const ir_edge_t *edge;
757 foreach_out_edge(node, edge) {
758 ir_node *out = get_edge_src_irn(edge);
759 if (get_irn_mode(out) == mode_M) {
760 set_Proj_pred(out, load);
761 set_Proj_proj(out, pn_ia32_Load_M);
767 set_ia32_op_type(node, ia32_Normal);
768 if (sched_is_scheduled(node))
769 sched_add_before(node, load);
774 static ir_node *flags_remat(ir_node *node, ir_node *after)
776 /* we should turn back source address mode when rematerializing nodes */
781 if (is_Block(after)) {
784 block = get_nodes_block(after);
787 type = get_ia32_op_type(node);
790 ia32_turn_back_am(node);
794 /* TODO implement this later... */
795 panic("found DestAM with flag user %+F this should not happen", node);
797 default: assert(type == ia32_Normal); break;
800 copy = exact_copy(node);
801 set_nodes_block(copy, block);
802 sched_add_after(after, copy);
808 * Called before the register allocator.
810 static void ia32_before_ra(ir_graph *irg)
812 /* setup fpu rounding modes */
813 ia32_setup_fpu_mode(irg);
816 be_sched_fix_flags(irg, &ia32_reg_classes[CLASS_ia32_flags],
819 be_add_missing_keeps(irg);
824 * Transforms a be_Reload into a ia32 Load.
826 static void transform_to_Load(ir_node *node)
828 ir_graph *irg = get_irn_irg(node);
829 dbg_info *dbgi = get_irn_dbg_info(node);
830 ir_node *block = get_nodes_block(node);
831 ir_entity *ent = be_get_frame_entity(node);
832 ir_mode *mode = get_irn_mode(node);
833 ir_mode *spillmode = get_spill_mode(node);
834 ir_node *noreg = ia32_new_NoReg_gp(irg);
835 ir_node *sched_point = NULL;
836 ir_node *ptr = get_irg_frame(irg);
837 ir_node *mem = get_irn_n(node, n_be_Reload_mem);
838 ir_node *new_op, *proj;
839 const arch_register_t *reg;
841 if (sched_is_scheduled(node)) {
842 sched_point = sched_prev(node);
845 if (mode_is_float(spillmode)) {
846 if (ia32_cg_config.use_sse2)
847 new_op = new_bd_ia32_xLoad(dbgi, block, ptr, noreg, mem, spillmode);
849 new_op = new_bd_ia32_vfld(dbgi, block, ptr, noreg, mem, spillmode);
851 else if (get_mode_size_bits(spillmode) == 128) {
852 /* Reload 128 bit SSE registers */
853 new_op = new_bd_ia32_xxLoad(dbgi, block, ptr, noreg, mem);
856 new_op = new_bd_ia32_Load(dbgi, block, ptr, noreg, mem);
858 set_ia32_op_type(new_op, ia32_AddrModeS);
859 set_ia32_ls_mode(new_op, spillmode);
860 set_ia32_frame_ent(new_op, ent);
861 set_ia32_use_frame(new_op);
862 set_ia32_is_reload(new_op);
864 DBG_OPT_RELOAD2LD(node, new_op);
866 proj = new_rd_Proj(dbgi, new_op, mode, pn_ia32_Load_res);
869 sched_add_after(sched_point, new_op);
873 /* copy the register from the old node to the new Load */
874 reg = arch_get_irn_register(node);
875 arch_set_irn_register(proj, reg);
877 SET_IA32_ORIG_NODE(new_op, node);
879 exchange(node, proj);
883 * Transforms a be_Spill node into a ia32 Store.
885 static void transform_to_Store(ir_node *node)
887 ir_graph *irg = get_irn_irg(node);
888 dbg_info *dbgi = get_irn_dbg_info(node);
889 ir_node *block = get_nodes_block(node);
890 ir_entity *ent = be_get_frame_entity(node);
891 const ir_node *spillval = get_irn_n(node, n_be_Spill_val);
892 ir_mode *mode = get_spill_mode(spillval);
893 ir_node *noreg = ia32_new_NoReg_gp(irg);
894 ir_node *nomem = get_irg_no_mem(irg);
895 ir_node *ptr = get_irg_frame(irg);
896 ir_node *val = get_irn_n(node, n_be_Spill_val);
899 ir_node *sched_point = NULL;
901 if (sched_is_scheduled(node)) {
902 sched_point = sched_prev(node);
905 if (mode_is_float(mode)) {
906 if (ia32_cg_config.use_sse2) {
907 store = new_bd_ia32_xStore(dbgi, block, ptr, noreg, nomem, val);
908 res = new_r_Proj(store, mode_M, pn_ia32_xStore_M);
910 store = new_bd_ia32_vfst(dbgi, block, ptr, noreg, nomem, val, mode);
911 res = new_r_Proj(store, mode_M, pn_ia32_vfst_M);
913 } else if (get_mode_size_bits(mode) == 128) {
914 /* Spill 128 bit SSE registers */
915 store = new_bd_ia32_xxStore(dbgi, block, ptr, noreg, nomem, val);
916 res = new_r_Proj(store, mode_M, pn_ia32_xxStore_M);
917 } else if (get_mode_size_bits(mode) == 8) {
918 store = new_bd_ia32_Store8Bit(dbgi, block, ptr, noreg, nomem, val);
919 res = new_r_Proj(store, mode_M, pn_ia32_Store8Bit_M);
921 store = new_bd_ia32_Store(dbgi, block, ptr, noreg, nomem, val);
922 res = new_r_Proj(store, mode_M, pn_ia32_Store_M);
925 set_ia32_op_type(store, ia32_AddrModeD);
926 set_ia32_ls_mode(store, mode);
927 set_ia32_frame_ent(store, ent);
928 set_ia32_use_frame(store);
929 set_ia32_is_spill(store);
930 SET_IA32_ORIG_NODE(store, node);
931 DBG_OPT_SPILL2ST(node, store);
934 sched_add_after(sched_point, store);
941 static ir_node *create_push(ir_node *node, ir_node *schedpoint, ir_node *sp, ir_node *mem, ir_entity *ent)
943 dbg_info *dbgi = get_irn_dbg_info(node);
944 ir_node *block = get_nodes_block(node);
945 ir_graph *irg = get_irn_irg(node);
946 ir_node *noreg = ia32_new_NoReg_gp(irg);
947 ir_node *frame = get_irg_frame(irg);
949 ir_node *push = new_bd_ia32_Push(dbgi, block, frame, noreg, mem, noreg, sp);
951 set_ia32_frame_ent(push, ent);
952 set_ia32_use_frame(push);
953 set_ia32_op_type(push, ia32_AddrModeS);
954 set_ia32_ls_mode(push, mode_Is);
955 set_ia32_is_spill(push);
957 sched_add_before(schedpoint, push);
961 static ir_node *create_pop(ir_node *node, ir_node *schedpoint, ir_node *sp, ir_entity *ent)
963 dbg_info *dbgi = get_irn_dbg_info(node);
964 ir_node *block = get_nodes_block(node);
965 ir_graph *irg = get_irn_irg(node);
966 ir_node *noreg = ia32_new_NoReg_gp(irg);
967 ir_node *frame = get_irg_frame(irg);
969 ir_node *pop = new_bd_ia32_PopMem(dbgi, block, frame, noreg,
970 get_irg_no_mem(irg), sp);
972 set_ia32_frame_ent(pop, ent);
973 set_ia32_use_frame(pop);
974 set_ia32_op_type(pop, ia32_AddrModeD);
975 set_ia32_ls_mode(pop, mode_Is);
976 set_ia32_is_reload(pop);
978 sched_add_before(schedpoint, pop);
983 static ir_node* create_spproj(ir_node *node, ir_node *pred, int pos)
985 dbg_info *dbgi = get_irn_dbg_info(node);
986 ir_mode *spmode = mode_Iu;
987 const arch_register_t *spreg = &ia32_registers[REG_ESP];
990 sp = new_rd_Proj(dbgi, pred, spmode, pos);
991 arch_set_irn_register(sp, spreg);
997 * Transform MemPerm, currently we do this the ugly way and produce
998 * push/pop into/from memory cascades. This is possible without using
1001 static void transform_MemPerm(ir_node *node)
1003 ir_node *block = get_nodes_block(node);
1004 ir_graph *irg = get_irn_irg(node);
1005 ir_node *sp = be_get_initial_reg_value(irg, &ia32_registers[REG_ESP]);
1006 int arity = be_get_MemPerm_entity_arity(node);
1007 ir_node **pops = ALLOCAN(ir_node*, arity);
1011 const ir_edge_t *edge;
1012 const ir_edge_t *next;
1015 for (i = 0; i < arity; ++i) {
1016 ir_entity *inent = be_get_MemPerm_in_entity(node, i);
1017 ir_entity *outent = be_get_MemPerm_out_entity(node, i);
1018 ir_type *enttype = get_entity_type(inent);
1019 unsigned entsize = get_type_size_bytes(enttype);
1020 unsigned entsize2 = get_type_size_bytes(get_entity_type(outent));
1021 ir_node *mem = get_irn_n(node, i + 1);
1024 /* work around cases where entities have different sizes */
1025 if (entsize2 < entsize)
1027 assert( (entsize == 4 || entsize == 8) && "spillslot on x86 should be 32 or 64 bit");
1029 push = create_push(node, node, sp, mem, inent);
1030 sp = create_spproj(node, push, pn_ia32_Push_stack);
1032 /* add another push after the first one */
1033 push = create_push(node, node, sp, mem, inent);
1034 add_ia32_am_offs_int(push, 4);
1035 sp = create_spproj(node, push, pn_ia32_Push_stack);
1038 set_irn_n(node, i, new_r_Bad(irg, mode_X));
1042 for (i = arity - 1; i >= 0; --i) {
1043 ir_entity *inent = be_get_MemPerm_in_entity(node, i);
1044 ir_entity *outent = be_get_MemPerm_out_entity(node, i);
1045 ir_type *enttype = get_entity_type(outent);
1046 unsigned entsize = get_type_size_bytes(enttype);
1047 unsigned entsize2 = get_type_size_bytes(get_entity_type(inent));
1050 /* work around cases where entities have different sizes */
1051 if (entsize2 < entsize)
1053 assert( (entsize == 4 || entsize == 8) && "spillslot on x86 should be 32 or 64 bit");
1055 pop = create_pop(node, node, sp, outent);
1056 sp = create_spproj(node, pop, pn_ia32_Pop_stack);
1058 add_ia32_am_offs_int(pop, 4);
1060 /* add another pop after the first one */
1061 pop = create_pop(node, node, sp, outent);
1062 sp = create_spproj(node, pop, pn_ia32_Pop_stack);
1069 keep = be_new_Keep(block, 1, in);
1070 sched_add_before(node, keep);
1072 /* exchange memprojs */
1073 foreach_out_edge_safe(node, edge, next) {
1074 ir_node *proj = get_edge_src_irn(edge);
1075 int p = get_Proj_proj(proj);
1079 set_Proj_pred(proj, pops[p]);
1080 set_Proj_proj(proj, pn_ia32_Pop_M);
1083 /* remove memperm */
1089 * Block-Walker: Calls the transform functions Spill and Reload.
1091 static void ia32_after_ra_walker(ir_node *block, void *env)
1093 ir_node *node, *prev;
1096 /* beware: the schedule is changed here */
1097 for (node = sched_last(block); !sched_is_begin(node); node = prev) {
1098 prev = sched_prev(node);
1100 if (be_is_Reload(node)) {
1101 transform_to_Load(node);
1102 } else if (be_is_Spill(node)) {
1103 transform_to_Store(node);
1104 } else if (be_is_MemPerm(node)) {
1105 transform_MemPerm(node);
1111 * Collects nodes that need frame entities assigned.
1113 static void ia32_collect_frame_entity_nodes(ir_node *node, void *data)
1115 be_fec_env_t *env = (be_fec_env_t*)data;
1116 const ir_mode *mode;
1119 if (be_is_Reload(node) && be_get_frame_entity(node) == NULL) {
1120 mode = get_spill_mode_mode(get_irn_mode(node));
1121 align = get_mode_size_bytes(mode);
1122 } else if (is_ia32_irn(node) &&
1123 get_ia32_frame_ent(node) == NULL &&
1124 is_ia32_use_frame(node)) {
1125 if (is_ia32_need_stackent(node))
1128 switch (get_ia32_irn_opcode(node)) {
1130 case iro_ia32_Load: {
1131 const ia32_attr_t *attr = get_ia32_attr_const(node);
1133 if (attr->data.need_32bit_stackent) {
1135 } else if (attr->data.need_64bit_stackent) {
1138 mode = get_ia32_ls_mode(node);
1139 if (is_ia32_is_reload(node))
1140 mode = get_spill_mode_mode(mode);
1142 align = get_mode_size_bytes(mode);
1146 case iro_ia32_vfild:
1148 case iro_ia32_xLoad: {
1149 mode = get_ia32_ls_mode(node);
1154 case iro_ia32_FldCW: {
1155 /* although 2 byte would be enough 4 byte performs best */
1163 panic("unexpected frame user while collection frame entity nodes");
1165 case iro_ia32_FnstCW:
1166 case iro_ia32_Store8Bit:
1167 case iro_ia32_Store:
1170 case iro_ia32_vfist:
1171 case iro_ia32_vfisttp:
1173 case iro_ia32_xStore:
1174 case iro_ia32_xStoreSimple:
1181 be_node_needs_frame_entity(env, node, mode, align);
1184 static int determine_ebp_input(ir_node *ret)
1186 const arch_register_t *bp = &ia32_registers[REG_EBP];
1187 int arity = get_irn_arity(ret);
1190 for (i = 0; i < arity; ++i) {
1191 ir_node *input = get_irn_n(ret, i);
1192 if (arch_get_irn_register(input) == bp)
1195 panic("no ebp input found at %+F", ret);
1198 static void introduce_epilog(ir_node *ret)
1200 const arch_register_t *sp = &ia32_registers[REG_ESP];
1201 const arch_register_t *bp = &ia32_registers[REG_EBP];
1202 ir_graph *irg = get_irn_irg(ret);
1203 ir_type *frame_type = get_irg_frame_type(irg);
1204 unsigned frame_size = get_type_size_bytes(frame_type);
1205 be_stack_layout_t *layout = be_get_irg_stack_layout(irg);
1206 ir_node *block = get_nodes_block(ret);
1207 ir_node *first_sp = get_irn_n(ret, n_be_Return_sp);
1208 ir_node *curr_sp = first_sp;
1209 ir_mode *mode_gp = mode_Iu;
1211 if (!layout->sp_relative) {
1212 int n_ebp = determine_ebp_input(ret);
1213 ir_node *curr_bp = get_irn_n(ret, n_ebp);
1214 if (ia32_cg_config.use_leave) {
1215 ir_node *leave = new_bd_ia32_Leave(NULL, block, curr_bp);
1216 curr_bp = new_r_Proj(leave, mode_gp, pn_ia32_Leave_frame);
1217 curr_sp = new_r_Proj(leave, mode_gp, pn_ia32_Leave_stack);
1218 arch_set_irn_register(curr_bp, bp);
1219 arch_set_irn_register(curr_sp, sp);
1220 sched_add_before(ret, leave);
1223 ir_node *curr_mem = get_irn_n(ret, n_be_Return_mem);
1224 /* copy ebp to esp */
1225 curr_sp = new_bd_ia32_CopyEbpEsp(NULL, block, curr_bp);
1226 arch_set_irn_register(curr_sp, sp);
1227 sched_add_before(ret, curr_sp);
1230 pop = new_bd_ia32_PopEbp(NULL, block, curr_mem, curr_sp);
1231 curr_bp = new_r_Proj(pop, mode_gp, pn_ia32_PopEbp_res);
1232 curr_sp = new_r_Proj(pop, mode_gp, pn_ia32_PopEbp_stack);
1233 curr_mem = new_r_Proj(pop, mode_M, pn_ia32_Pop_M);
1234 arch_set_irn_register(curr_bp, bp);
1235 arch_set_irn_register(curr_sp, sp);
1236 sched_add_before(ret, pop);
1238 set_irn_n(ret, n_be_Return_mem, curr_mem);
1240 set_irn_n(ret, n_ebp, curr_bp);
1242 ir_node *incsp = be_new_IncSP(sp, block, curr_sp, -(int)frame_size, 0);
1243 sched_add_before(ret, incsp);
1246 set_irn_n(ret, n_be_Return_sp, curr_sp);
1248 /* keep verifier happy... */
1249 if (get_irn_n_edges(first_sp) == 0 && is_Proj(first_sp)) {
1250 kill_node(first_sp);
1255 * put the Prolog code at the beginning, epilog code before each return
1257 static void introduce_prolog_epilog(ir_graph *irg)
1259 const arch_register_t *sp = &ia32_registers[REG_ESP];
1260 const arch_register_t *bp = &ia32_registers[REG_EBP];
1261 ir_node *start = get_irg_start(irg);
1262 ir_node *block = get_nodes_block(start);
1263 ir_type *frame_type = get_irg_frame_type(irg);
1264 unsigned frame_size = get_type_size_bytes(frame_type);
1265 be_stack_layout_t *layout = be_get_irg_stack_layout(irg);
1266 ir_node *initial_sp = be_get_initial_reg_value(irg, sp);
1267 ir_node *curr_sp = initial_sp;
1268 ir_mode *mode_gp = mode_Iu;
1270 if (!layout->sp_relative) {
1272 ir_node *mem = get_irg_initial_mem(irg);
1273 ir_node *noreg = ia32_new_NoReg_gp(irg);
1274 ir_node *initial_bp = be_get_initial_reg_value(irg, bp);
1275 ir_node *curr_bp = initial_bp;
1276 ir_node *push = new_bd_ia32_Push(NULL, block, noreg, noreg, mem, curr_bp, curr_sp);
1279 curr_sp = new_r_Proj(push, mode_gp, pn_ia32_Push_stack);
1280 mem = new_r_Proj(push, mode_M, pn_ia32_Push_M);
1281 arch_set_irn_register(curr_sp, sp);
1282 sched_add_after(start, push);
1284 /* move esp to ebp */
1285 curr_bp = be_new_Copy(block, curr_sp);
1286 sched_add_after(push, curr_bp);
1287 be_set_constr_single_reg_out(curr_bp, 0, bp, arch_register_req_type_ignore);
1288 curr_sp = be_new_CopyKeep_single(block, curr_sp, curr_bp);
1289 sched_add_after(curr_bp, curr_sp);
1290 be_set_constr_single_reg_out(curr_sp, 0, sp, arch_register_req_type_produces_sp);
1291 edges_reroute(initial_bp, curr_bp);
1292 set_irn_n(push, n_ia32_Push_val, initial_bp);
1294 incsp = be_new_IncSP(sp, block, curr_sp, frame_size, 0);
1295 edges_reroute(initial_sp, incsp);
1296 set_irn_n(push, n_ia32_Push_stack, initial_sp);
1297 sched_add_after(curr_sp, incsp);
1299 /* make sure the initial IncSP is really used by someone */
1300 if (get_irn_n_edges(incsp) <= 1) {
1301 ir_node *in[] = { incsp };
1302 ir_node *keep = be_new_Keep(block, 1, in);
1303 sched_add_after(incsp, keep);
1306 layout->initial_bias = -4;
1308 ir_node *incsp = be_new_IncSP(sp, block, curr_sp, frame_size, 0);
1309 edges_reroute(initial_sp, incsp);
1310 be_set_IncSP_pred(incsp, curr_sp);
1311 sched_add_after(start, incsp);
1314 /* introduce epilog for every return node */
1316 ir_node *end_block = get_irg_end_block(irg);
1317 int arity = get_irn_arity(end_block);
1320 for (i = 0; i < arity; ++i) {
1321 ir_node *ret = get_irn_n(end_block, i);
1322 assert(be_is_Return(ret));
1323 introduce_epilog(ret);
1329 * Last touchups for the graph before emit: x87 simulation to replace the
1330 * virtual with real x87 instructions, creating a block schedule and peephole
1333 static void ia32_finish(ir_graph *irg)
1335 ia32_irg_data_t *irg_data = ia32_get_irg_data(irg);
1336 be_stack_layout_t *stack_layout = be_get_irg_stack_layout(irg);
1337 bool at_begin = stack_layout->sp_relative ? true : false;
1338 be_fec_env_t *fec_env = be_new_frame_entity_coalescer(irg);
1340 /* create and coalesce frame entities */
1341 irg_walk_graph(irg, NULL, ia32_collect_frame_entity_nodes, fec_env);
1342 be_assign_entities(fec_env, ia32_set_frame_entity, at_begin);
1343 be_free_frame_entity_coalescer(fec_env);
1345 irg_block_walk_graph(irg, NULL, ia32_after_ra_walker, NULL);
1347 introduce_prolog_epilog(irg);
1349 /* fix stack entity offsets */
1350 be_abi_fix_stack_nodes(irg);
1351 be_abi_fix_stack_bias(irg);
1353 /* fix 2-address code constraints */
1354 ia32_finish_irg(irg);
1356 /* we might have to rewrite x87 virtual registers */
1357 if (irg_data->do_x87_sim) {
1358 ia32_x87_simulate_graph(irg);
1361 /* do peephole optimisations */
1362 ia32_peephole_optimization(irg);
1364 be_remove_dead_nodes_from_schedule(irg);
1366 /* create block schedule, this also removes empty blocks which might
1367 * produce critical edges */
1368 irg_data->blk_sched = be_create_block_schedule(irg);
1372 * Emits the code, closes the output file and frees
1373 * the code generator interface.
1375 static void ia32_emit(ir_graph *irg)
1377 if (ia32_cg_config.emit_machcode) {
1378 ia32_gen_binary_routine(irg);
1380 ia32_gen_routine(irg);
1385 * Returns the node representing the PIC base.
1387 static ir_node *ia32_get_pic_base(ir_graph *irg)
1389 ia32_irg_data_t *irg_data = ia32_get_irg_data(irg);
1391 ir_node *get_eip = irg_data->get_eip;
1392 if (get_eip != NULL)
1395 block = get_irg_start_block(irg);
1396 get_eip = new_bd_ia32_GetEIP(NULL, block);
1397 irg_data->get_eip = get_eip;
1403 * Initializes a IA32 code generator.
1405 static void ia32_init_graph(ir_graph *irg)
1407 struct obstack *obst = be_get_be_obst(irg);
1408 ia32_irg_data_t *irg_data = OALLOCZ(obst, ia32_irg_data_t);
1410 irg_data->dump = (be_get_irg_options(irg)->dump_flags & DUMP_BE) ? 1 : 0;
1413 /* Linux gprof implementation needs base pointer */
1414 be_get_irg_options(irg)->omit_fp = 0;
1417 be_birg_from_irg(irg)->isa_link = irg_data;
1422 * Set output modes for GCC
1424 static const tarval_mode_info mo_integer = {
1431 * set the tarval output mode of all integer modes to decimal
1433 static void set_tarval_output_modes(void)
1437 for (i = get_irp_n_modes(); i > 0;) {
1438 ir_mode *mode = get_irp_mode(--i);
1440 if (mode_is_int(mode))
1441 set_tarval_mode_output_option(mode, &mo_integer);
1445 extern const arch_isa_if_t ia32_isa_if;
1448 * The template that generates a new ISA object.
1449 * Note that this template can be changed by command line
1452 static ia32_isa_t ia32_isa_template = {
1454 &ia32_isa_if, /* isa interface implementation */
1459 &ia32_registers[REG_ESP], /* stack pointer register */
1460 &ia32_registers[REG_EBP], /* base pointer register */
1461 &ia32_reg_classes[CLASS_ia32_gp], /* static link pointer register class */
1462 2, /* power of two stack alignment, 2^2 == 4 */
1463 NULL, /* main environment */
1464 7, /* costs for a spill instruction */
1465 5, /* costs for a reload instruction */
1466 false, /* no custom abi handling */
1469 IA32_FPU_ARCH_X87, /* FPU architecture */
1472 static void init_asm_constraints(void)
1474 be_init_default_asm_constraint_flags();
1476 asm_constraint_flags['a'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1477 asm_constraint_flags['b'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1478 asm_constraint_flags['c'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1479 asm_constraint_flags['d'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1480 asm_constraint_flags['D'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1481 asm_constraint_flags['S'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1482 asm_constraint_flags['Q'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1483 asm_constraint_flags['q'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1484 asm_constraint_flags['A'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1485 asm_constraint_flags['l'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1486 asm_constraint_flags['R'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1487 asm_constraint_flags['r'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1488 asm_constraint_flags['p'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1489 asm_constraint_flags['f'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1490 asm_constraint_flags['t'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1491 asm_constraint_flags['u'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1492 asm_constraint_flags['Y'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1493 asm_constraint_flags['X'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1494 asm_constraint_flags['n'] = ASM_CONSTRAINT_FLAG_SUPPORTS_IMMEDIATE;
1495 asm_constraint_flags['g'] = ASM_CONSTRAINT_FLAG_SUPPORTS_IMMEDIATE;
1497 /* no support for autodecrement/autoincrement */
1498 asm_constraint_flags['<'] = ASM_CONSTRAINT_FLAG_NO_SUPPORT;
1499 asm_constraint_flags['>'] = ASM_CONSTRAINT_FLAG_NO_SUPPORT;
1500 /* no float consts */
1501 asm_constraint_flags['E'] = ASM_CONSTRAINT_FLAG_NO_SUPPORT;
1502 asm_constraint_flags['F'] = ASM_CONSTRAINT_FLAG_NO_SUPPORT;
1503 /* makes no sense on x86 */
1504 asm_constraint_flags['s'] = ASM_CONSTRAINT_FLAG_NO_SUPPORT;
1505 /* no support for sse consts yet */
1506 asm_constraint_flags['C'] = ASM_CONSTRAINT_FLAG_NO_SUPPORT;
1507 /* no support for x87 consts yet */
1508 asm_constraint_flags['G'] = ASM_CONSTRAINT_FLAG_NO_SUPPORT;
1509 /* no support for mmx registers yet */
1510 asm_constraint_flags['y'] = ASM_CONSTRAINT_FLAG_NO_SUPPORT;
1511 /* not available in 32bit mode */
1512 asm_constraint_flags['Z'] = ASM_CONSTRAINT_FLAG_NO_SUPPORT;
1513 asm_constraint_flags['e'] = ASM_CONSTRAINT_FLAG_NO_SUPPORT;
1515 /* no code yet to determine register class needed... */
1516 asm_constraint_flags['X'] = ASM_CONSTRAINT_FLAG_NO_SUPPORT;
1520 * Initializes the backend ISA.
1522 static arch_env_t *ia32_init(const be_main_env_t *env)
1524 ia32_isa_t *isa = XMALLOC(ia32_isa_t);
1526 set_tarval_output_modes();
1528 *isa = ia32_isa_template;
1530 if (ia32_mode_fpcw == NULL) {
1531 ia32_mode_fpcw = new_int_mode("Fpcw", irma_twos_complement, 16, 0, 0);
1534 ia32_register_init();
1535 ia32_create_opcodes(&ia32_irn_ops);
1537 isa->tv_ent = pmap_create();
1539 /* enter the ISA object into the intrinsic environment */
1540 intrinsic_env.isa = isa;
1542 be_emit_init(env->file_handle);
1543 be_gas_begin_compilation_unit(env);
1549 * Closes the output file and frees the ISA structure.
1551 static void ia32_done(void *self)
1553 ia32_isa_t *isa = (ia32_isa_t*)self;
1555 /* emit now all global declarations */
1556 be_gas_end_compilation_unit(isa->base.main_env);
1560 pmap_destroy(isa->tv_ent);
1565 * Returns the register for parameter nr.
1567 static const arch_register_t *ia32_get_RegParam_reg(unsigned cc, unsigned nr,
1568 const ir_mode *mode)
1570 static const arch_register_t *gpreg_param_reg_fastcall[] = {
1571 &ia32_registers[REG_ECX],
1572 &ia32_registers[REG_EDX],
1575 static const unsigned MAXNUM_GPREG_ARGS = 3;
1577 static const arch_register_t *gpreg_param_reg_regparam[] = {
1578 &ia32_registers[REG_EAX],
1579 &ia32_registers[REG_EDX],
1580 &ia32_registers[REG_ECX]
1583 static const arch_register_t *gpreg_param_reg_this[] = {
1584 &ia32_registers[REG_ECX],
1589 static const arch_register_t *fpreg_sse_param_reg_std[] = {
1590 &ia32_registers[REG_XMM0],
1591 &ia32_registers[REG_XMM1],
1592 &ia32_registers[REG_XMM2],
1593 &ia32_registers[REG_XMM3],
1594 &ia32_registers[REG_XMM4],
1595 &ia32_registers[REG_XMM5],
1596 &ia32_registers[REG_XMM6],
1597 &ia32_registers[REG_XMM7]
1600 static const arch_register_t *fpreg_sse_param_reg_this[] = {
1601 NULL, /* in case of a "this" pointer, the first parameter must not be a float */
1603 static const unsigned MAXNUM_SSE_ARGS = 8;
1605 if ((cc & cc_this_call) && nr == 0)
1606 return gpreg_param_reg_this[0];
1608 if (! (cc & cc_reg_param))
1611 if (mode_is_float(mode)) {
1612 if (!ia32_cg_config.use_sse2 || (cc & cc_fpreg_param) == 0)
1614 if (nr >= MAXNUM_SSE_ARGS)
1617 if (cc & cc_this_call) {
1618 return fpreg_sse_param_reg_this[nr];
1620 return fpreg_sse_param_reg_std[nr];
1621 } else if (mode_is_int(mode) || mode_is_reference(mode)) {
1622 unsigned num_regparam;
1624 if (get_mode_size_bits(mode) > 32)
1627 if (nr >= MAXNUM_GPREG_ARGS)
1630 if (cc & cc_this_call) {
1631 return gpreg_param_reg_this[nr];
1633 num_regparam = cc & ~cc_bits;
1634 if (num_regparam == 0) {
1635 /* default fastcall */
1636 return gpreg_param_reg_fastcall[nr];
1638 if (nr < num_regparam)
1639 return gpreg_param_reg_regparam[nr];
1643 panic("unknown argument mode");
1647 * Get the ABI restrictions for procedure calls.
1649 static void ia32_get_call_abi(ir_type *method_type, be_abi_call_t *abi)
1654 be_abi_call_flags_t call_flags = be_abi_call_get_flags(abi);
1656 /* set abi flags for calls */
1657 call_flags.bits.store_args_sequential = 0;
1658 /* call_flags.bits.try_omit_fp not changed: can handle both settings */
1659 call_flags.bits.fp_free = 0; /* the frame pointer is fixed in IA32 */
1660 call_flags.bits.call_has_imm = 0; /* No call immediate, we handle this by ourselves */
1662 /* set parameter passing style */
1663 be_abi_call_set_flags(abi, call_flags, &ia32_abi_callbacks);
1665 cc = get_method_calling_convention(method_type);
1666 if (get_method_variadicity(method_type) == variadicity_variadic) {
1667 /* pass all parameters of a variadic function on the stack */
1668 cc = cc_cdecl_set | (cc & cc_this_call);
1670 if (get_method_additional_properties(method_type) & mtp_property_private &&
1671 ia32_cg_config.optimize_cc) {
1672 /* set the fast calling conventions (allowing up to 3) */
1673 cc = SET_FASTCALL(cc) | 3;
1677 /* we have to pop the shadow parameter ourself for compound calls */
1678 if ( (get_method_calling_convention(method_type) & cc_compound_ret)
1679 && !(cc & cc_reg_param)) {
1680 pop_amount += get_mode_size_bytes(mode_P_data);
1683 n = get_method_n_params(method_type);
1684 for (i = regnum = 0; i < n; i++) {
1685 const arch_register_t *reg = NULL;
1686 ir_type *tp = get_method_param_type(method_type, i);
1687 ir_mode *mode = get_type_mode(tp);
1690 reg = ia32_get_RegParam_reg(cc, regnum, mode);
1693 be_abi_call_param_reg(abi, i, reg, ABI_CONTEXT_BOTH);
1696 /* Micro optimisation: if the mode is shorter than 4 bytes, load 4 bytes.
1697 * movl has a shorter opcode than mov[sz][bw]l */
1698 ir_mode *load_mode = mode;
1701 unsigned size = get_mode_size_bytes(mode);
1703 if (cc & cc_callee_clear_stk) {
1704 pop_amount += (size + 3U) & ~3U;
1707 if (size < 4) load_mode = mode_Iu;
1710 be_abi_call_param_stack(abi, i, load_mode, 4, 0, 0, ABI_CONTEXT_BOTH);
1714 be_abi_call_set_pop(abi, pop_amount);
1716 /* set return registers */
1717 n = get_method_n_ress(method_type);
1719 assert(n <= 2 && "more than two results not supported");
1721 /* In case of 64bit returns, we will have two 32bit values */
1723 ir_type *tp = get_method_res_type(method_type, 0);
1724 ir_mode *mode = get_type_mode(tp);
1726 assert(!mode_is_float(mode) && "two FP results not supported");
1728 tp = get_method_res_type(method_type, 1);
1729 mode = get_type_mode(tp);
1731 assert(!mode_is_float(mode) && "mixed INT, FP results not supported");
1733 be_abi_call_res_reg(abi, 0, &ia32_registers[REG_EAX], ABI_CONTEXT_BOTH);
1734 be_abi_call_res_reg(abi, 1, &ia32_registers[REG_EDX], ABI_CONTEXT_BOTH);
1737 ir_type *tp = get_method_res_type(method_type, 0);
1738 ir_mode *mode = get_type_mode(tp);
1739 const arch_register_t *reg;
1740 assert(is_atomic_type(tp));
1742 reg = mode_is_float(mode) ? &ia32_registers[REG_VF0] : &ia32_registers[REG_EAX];
1744 be_abi_call_res_reg(abi, 0, reg, ABI_CONTEXT_BOTH);
1748 static void ia32_mark_remat(ir_node *node)
1750 if (is_ia32_irn(node)) {
1751 set_ia32_is_remat(node);
1756 * Check if Mux(sel, mux_true, mux_false) would represent a Max or Min operation
1758 static bool mux_is_float_min_max(ir_node *sel, ir_node *mux_true,
1763 ir_relation relation;
1768 cmp_l = get_Cmp_left(sel);
1769 cmp_r = get_Cmp_right(sel);
1770 if (!mode_is_float(get_irn_mode(cmp_l)))
1773 /* check for min/max. They're defined as (C-Semantik):
1774 * min(a, b) = a < b ? a : b
1775 * or min(a, b) = a <= b ? a : b
1776 * max(a, b) = a > b ? a : b
1777 * or max(a, b) = a >= b ? a : b
1778 * (Note we only handle float min/max here)
1780 relation = get_Cmp_relation(sel);
1782 case ir_relation_greater_equal:
1783 case ir_relation_greater:
1785 if (cmp_l == mux_true && cmp_r == mux_false)
1788 case ir_relation_less_equal:
1789 case ir_relation_less:
1791 if (cmp_l == mux_true && cmp_r == mux_false)
1794 case ir_relation_unordered_greater_equal:
1795 case ir_relation_unordered_greater:
1797 if (cmp_l == mux_false && cmp_r == mux_true)
1800 case ir_relation_unordered_less_equal:
1801 case ir_relation_unordered_less:
1803 if (cmp_l == mux_false && cmp_r == mux_true)
1814 static bool mux_is_set(ir_node *sel, ir_node *mux_true, ir_node *mux_false)
1816 ir_mode *mode = get_irn_mode(mux_true);
1819 if (!mode_is_int(mode) && !mode_is_reference(mode)
1823 if (is_Const(mux_true) && is_Const(mux_false)) {
1824 /* we can create a set plus up two 3 instructions for any combination
1832 static bool mux_is_float_const_const(ir_node *sel, ir_node *mux_true,
1837 if (!mode_is_float(get_irn_mode(mux_true)))
1840 return is_Const(mux_true) && is_Const(mux_false);
1843 static bool mux_is_doz(ir_node *sel, ir_node *mux_true, ir_node *mux_false)
1850 ir_relation relation;
1855 mode = get_irn_mode(mux_true);
1856 if (mode_is_signed(mode) || mode_is_float(mode))
1859 relation = get_Cmp_relation(sel);
1860 cmp_left = get_Cmp_left(sel);
1861 cmp_right = get_Cmp_right(sel);
1863 /* "move" zero constant to false input */
1864 if (is_Const(mux_true) && is_Const_null(mux_true)) {
1865 ir_node *tmp = mux_false;
1866 mux_false = mux_true;
1868 relation = get_negated_relation(relation);
1870 if (!is_Const(mux_false) || !is_Const_null(mux_false))
1872 if (!is_Sub(mux_true))
1874 sub_left = get_Sub_left(mux_true);
1875 sub_right = get_Sub_right(mux_true);
1877 /* Mux(a >=u b, 0, a-b) */
1878 if ((relation & ir_relation_greater)
1879 && sub_left == cmp_left && sub_right == cmp_right)
1881 /* Mux(a <=u b, 0, b-a) */
1882 if ((relation & ir_relation_less)
1883 && sub_left == cmp_right && sub_right == cmp_left)
1889 static int ia32_is_mux_allowed(ir_node *sel, ir_node *mux_false,
1894 /* middleend can handle some things */
1895 if (ir_is_optimizable_mux(sel, mux_false, mux_true))
1897 /* we can handle Set for all modes and compares */
1898 if (mux_is_set(sel, mux_true, mux_false))
1900 /* SSE has own min/max operations */
1901 if (ia32_cg_config.use_sse2
1902 && mux_is_float_min_max(sel, mux_true, mux_false))
1904 /* we can handle Mux(?, Const[f], Const[f]) */
1905 if (mux_is_float_const_const(sel, mux_true, mux_false)) {
1906 #ifdef FIRM_GRGEN_BE
1907 /* well, some code selectors can't handle it */
1908 if (be_transformer != TRANSFORMER_PBQP
1909 || be_transformer != TRANSFORMER_RAND)
1916 /* no support for 64bit inputs to cmov */
1917 mode = get_irn_mode(mux_true);
1918 if (get_mode_size_bits(mode) > 32)
1920 /* we can handle Abs for all modes and compares (except 64bit) */
1921 if (ir_mux_is_abs(sel, mux_false, mux_true) != 0)
1923 /* we can't handle MuxF yet */
1924 if (mode_is_float(mode))
1927 if (mux_is_doz(sel, mux_true, mux_false))
1930 /* Check Cmp before the node */
1932 ir_mode *cmp_mode = get_irn_mode(get_Cmp_left(sel));
1934 /* we can't handle 64bit compares */
1935 if (get_mode_size_bits(cmp_mode) > 32)
1938 /* we can't handle float compares */
1939 if (mode_is_float(cmp_mode))
1943 /* did we disable cmov generation? */
1944 if (!ia32_cg_config.use_cmov)
1947 /* we can use a cmov */
1951 static asm_constraint_flags_t ia32_parse_asm_constraint(const char **c)
1955 /* we already added all our simple flags to the flags modifier list in
1956 * init, so this flag we don't know. */
1957 return ASM_CONSTRAINT_FLAG_INVALID;
1960 static int ia32_is_valid_clobber(const char *clobber)
1962 return ia32_get_clobber_register(clobber) != NULL;
1965 static void ia32_lower_for_target(void)
1967 size_t i, n_irgs = get_irp_n_irgs();
1969 /* perform doubleword lowering */
1970 lwrdw_param_t lower_dw_params = {
1971 1, /* little endian */
1972 64, /* doubleword size */
1973 ia32_create_intrinsic_fkt,
1977 ia32_create_opcodes(&ia32_irn_ops);
1979 /* lower compound param handling
1980 * Note: we lower compound arguments ourself, since on ia32 we don't
1981 * have hidden parameters but know where to find the structs on the stack.
1982 * (This also forces us to always allocate space for the compound arguments
1983 * on the callframe and we can't just use an arbitrary position on the
1986 lower_calls_with_compounds(LF_RETURN_HIDDEN | LF_DONT_LOWER_ARGUMENTS);
1988 /* replace floating point operations by function calls */
1989 if (ia32_cg_config.use_softfloat) {
1990 lower_floating_point();
1993 ir_prepare_dw_lowering(&lower_dw_params);
1996 for (i = 0; i < n_irgs; ++i) {
1997 ir_graph *irg = get_irp_irg(i);
1998 /* lower for mode_b stuff */
1999 ir_lower_mode_b(irg, mode_Iu);
2000 /* break up switches with wide ranges */
2001 lower_switch(irg, 4, 256, false);
2004 for (i = 0; i < n_irgs; ++i) {
2005 ir_graph *irg = get_irp_irg(i);
2006 /* Turn all small CopyBs into loads/stores, keep medium-sized CopyBs,
2007 * so we can generate rep movs later, and turn all big CopyBs into
2009 lower_CopyB(irg, 64, 8193, true);
2014 * Create the trampoline code.
2016 static ir_node *ia32_create_trampoline_fkt(ir_node *block, ir_node *mem, ir_node *trampoline, ir_node *env, ir_node *callee)
2018 ir_graph *const irg = get_irn_irg(block);
2019 ir_node * p = trampoline;
2020 ir_mode *const mode = get_irn_mode(p);
2021 ir_node *const one = new_r_Const(irg, get_mode_one(mode_Iu));
2022 ir_node *const four = new_r_Const_long(irg, mode_Iu, 4);
2026 st = new_r_Store(block, mem, p, new_r_Const_long(irg, mode_Bu, 0xb9), cons_none);
2027 mem = new_r_Proj(st, mode_M, pn_Store_M);
2028 p = new_r_Add(block, p, one, mode);
2029 st = new_r_Store(block, mem, p, env, cons_none);
2030 mem = new_r_Proj(st, mode_M, pn_Store_M);
2031 p = new_r_Add(block, p, four, mode);
2033 st = new_r_Store(block, mem, p, new_r_Const_long(irg, mode_Bu, 0xe9), cons_none);
2034 mem = new_r_Proj(st, mode_M, pn_Store_M);
2035 p = new_r_Add(block, p, one, mode);
2036 st = new_r_Store(block, mem, p, callee, cons_none);
2037 mem = new_r_Proj(st, mode_M, pn_Store_M);
2038 p = new_r_Add(block, p, four, mode);
2044 * Returns the libFirm configuration parameter for this backend.
2046 static const backend_params *ia32_get_libfirm_params(void)
2048 static const ir_settings_arch_dep_t ad = {
2049 1, /* also use subs */
2050 4, /* maximum shifts */
2051 63, /* maximum shift amount */
2052 ia32_evaluate_insn, /* evaluate the instruction sequence */
2054 1, /* allow Mulhs */
2055 1, /* allow Mulus */
2056 32, /* Mulh allowed up to 32 bit */
2058 static backend_params p = {
2059 1, /* support inline assembly */
2060 1, /* support Rotl nodes */
2061 0, /* little endian */
2062 1, /* modulo shift efficient */
2063 0, /* non-modulo shift not efficient */
2064 &ad, /* will be set later */
2065 ia32_is_mux_allowed,
2066 32, /* machine_size */
2067 NULL, /* float arithmetic mode, will be set below */
2068 NULL, /* long long type */
2069 NULL, /* unsigned long long type */
2070 NULL, /* long double type */
2071 12, /* size of trampoline code */
2072 4, /* alignment of trampoline code */
2073 ia32_create_trampoline_fkt,
2074 4 /* alignment of stack parameter */
2077 if (ia32_mode_E == NULL) {
2078 /* note mantissa is 64bit but with explicitely encoded 1 so the really
2079 * usable part as counted by firm is only 63 bits */
2080 ia32_mode_E = new_float_mode("E", irma_x86_extended_float, 15, 63);
2081 ia32_type_E = new_type_primitive(ia32_mode_E);
2082 set_type_size_bytes(ia32_type_E, 12);
2083 set_type_alignment_bytes(ia32_type_E, 16);
2086 ir_mode *mode_long_long
2087 = new_int_mode("long long", irma_twos_complement, 64, 1, 64);
2088 ir_type *type_long_long = new_type_primitive(mode_long_long);
2089 ir_mode *mode_unsigned_long_long
2090 = new_int_mode("unsigned long long", irma_twos_complement, 64, 0, 64);
2091 ir_type *type_unsigned_long_long
2092 = new_type_primitive(mode_unsigned_long_long);
2094 ia32_setup_cg_config();
2096 /* doesn't really belong here, but this is the earliest place the backend
2098 init_asm_constraints();
2100 p.type_long_long = type_long_long;
2101 p.type_unsigned_long_long = type_unsigned_long_long;
2103 if (ia32_cg_config.use_sse2 || ia32_cg_config.use_softfloat) {
2104 p.mode_float_arithmetic = NULL;
2105 p.type_long_double = NULL;
2107 p.mode_float_arithmetic = ia32_mode_E;
2108 p.type_long_double = ia32_type_E;
2114 * Check if the given register is callee or caller save.
2116 static int ia32_register_saved_by(const arch_register_t *reg, int callee)
2119 /* check for callee saved */
2120 if (reg->reg_class == &ia32_reg_classes[CLASS_ia32_gp]) {
2121 switch (reg->index) {
2132 /* check for caller saved */
2133 if (reg->reg_class == &ia32_reg_classes[CLASS_ia32_gp]) {
2134 switch (reg->index) {
2142 } else if (reg->reg_class == &ia32_reg_classes[CLASS_ia32_xmm]) {
2143 /* all XMM registers are caller save */
2144 return reg->index != REG_XMM_NOREG;
2145 } else if (reg->reg_class == &ia32_reg_classes[CLASS_ia32_vfp]) {
2146 /* all VFP registers are caller save */
2147 return reg->index != REG_VFP_NOREG;
2153 static const lc_opt_enum_int_items_t gas_items[] = {
2154 { "elf", OBJECT_FILE_FORMAT_ELF },
2155 { "mingw", OBJECT_FILE_FORMAT_COFF },
2156 { "macho", OBJECT_FILE_FORMAT_MACH_O },
2160 static lc_opt_enum_int_var_t gas_var = {
2161 (int*) &be_gas_object_file_format, gas_items
2164 #ifdef FIRM_GRGEN_BE
2165 static const lc_opt_enum_int_items_t transformer_items[] = {
2166 { "default", TRANSFORMER_DEFAULT },
2167 { "pbqp", TRANSFORMER_PBQP },
2168 { "random", TRANSFORMER_RAND },
2172 static lc_opt_enum_int_var_t transformer_var = {
2173 (int*)&be_transformer, transformer_items
2177 static const lc_opt_table_entry_t ia32_options[] = {
2178 LC_OPT_ENT_ENUM_INT("gasmode", "set the GAS compatibility mode", &gas_var),
2179 #ifdef FIRM_GRGEN_BE
2180 LC_OPT_ENT_ENUM_INT("transformer", "the transformer used for code selection", &transformer_var),
2182 LC_OPT_ENT_INT ("stackalign", "set power of two stack alignment for calls",
2183 &ia32_isa_template.base.stack_alignment),
2184 LC_OPT_ENT_BOOL("gprof", "create gprof profiling code", &gprof),
2188 const arch_isa_if_t ia32_isa_if = {
2190 ia32_lower_for_target,
2192 ia32_handle_intrinsics,
2194 ia32_get_libfirm_params,
2196 ia32_parse_asm_constraint,
2197 ia32_is_valid_clobber,
2200 ia32_get_pic_base, /* return node used as base in pic code addresses */
2201 ia32_before_abi, /* before abi introduce hook */
2203 ia32_before_ra, /* before register allocation hook */
2204 ia32_finish, /* called before codegen */
2205 ia32_emit, /* emit && done */
2206 ia32_register_saved_by,
2211 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_arch_ia32)
2212 void be_init_arch_ia32(void)
2214 lc_opt_entry_t *be_grp = lc_opt_get_grp(firm_opt_get_root(), "be");
2215 lc_opt_entry_t *ia32_grp = lc_opt_get_grp(be_grp, "ia32");
2217 lc_opt_add_table(ia32_grp, ia32_options);
2218 be_register_isa_if("ia32", &ia32_isa_if);
2220 ia32_init_emitter();
2222 ia32_init_optimize();
2223 ia32_init_transform();
2225 ia32_init_architecture();