2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief This is the main ia32 firm backend driver.
23 * @author Christian Wuerdig
29 #include "lc_opts_enum.h"
37 #include "iredges_t.h"
51 #include "iroptimize.h"
52 #include "instrument.h"
57 #include "../benode.h"
58 #include "../belower.h"
59 #include "../besched.h"
62 #include "../beirgmod.h"
63 #include "../be_dbgout.h"
64 #include "../beblocksched.h"
65 #include "../bemachine.h"
66 #include "../bespillslots.h"
67 #include "../bemodule.h"
68 #include "../begnuas.h"
69 #include "../bestate.h"
70 #include "../beflags.h"
71 #include "../betranshlp.h"
72 #include "../belistsched.h"
73 #include "../beabihelper.h"
75 #include "bearch_ia32_t.h"
77 #include "ia32_new_nodes.h"
78 #include "gen_ia32_regalloc_if.h"
79 #include "gen_ia32_machine.h"
80 #include "ia32_common_transform.h"
81 #include "ia32_transform.h"
82 #include "ia32_emitter.h"
83 #include "ia32_optimize.h"
85 #include "ia32_dbg_stat.h"
86 #include "ia32_finish.h"
87 #include "ia32_util.h"
89 #include "ia32_architecture.h"
92 #include "ia32_pbqp_transform.h"
94 transformer_t be_transformer = TRANSFORMER_DEFAULT;
97 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
99 ir_mode *ia32_mode_fpcw = NULL;
101 /** The current omit-fp state */
102 static unsigned ia32_curr_fp_ommitted = 0;
103 static ir_type *omit_fp_between_type = NULL;
104 static ir_type *between_type = NULL;
105 static ir_entity *old_bp_ent = NULL;
106 static ir_entity *ret_addr_ent = NULL;
107 static ir_entity *omit_fp_ret_addr_ent = NULL;
110 * The environment for the intrinsic mapping.
112 static ia32_intrinsic_env_t intrinsic_env = {
114 NULL, /* the irg, these entities belong to */
115 NULL, /* entity for __divdi3 library call */
116 NULL, /* entity for __moddi3 library call */
117 NULL, /* entity for __udivdi3 library call */
118 NULL, /* entity for __umoddi3 library call */
122 typedef ir_node *(*create_const_node_func) (dbg_info *dbg, ir_node *block);
125 * Used to create per-graph unique pseudo nodes.
127 static inline ir_node *create_const(ir_graph *irg, ir_node **place,
128 create_const_node_func func,
129 const arch_register_t* reg)
131 ir_node *block, *res;
136 block = get_irg_start_block(irg);
137 res = func(NULL, block);
138 arch_set_irn_register(res, reg);
144 /* Creates the unique per irg GP NoReg node. */
145 ir_node *ia32_new_NoReg_gp(ir_graph *irg)
147 ia32_irg_data_t *irg_data = ia32_get_irg_data(irg);
148 return create_const(irg, &irg_data->noreg_gp, new_bd_ia32_NoReg_GP,
149 &ia32_registers[REG_GP_NOREG]);
152 ir_node *ia32_new_NoReg_vfp(ir_graph *irg)
154 ia32_irg_data_t *irg_data = ia32_get_irg_data(irg);
155 return create_const(irg, &irg_data->noreg_vfp, new_bd_ia32_NoReg_VFP,
156 &ia32_registers[REG_VFP_NOREG]);
159 ir_node *ia32_new_NoReg_xmm(ir_graph *irg)
161 ia32_irg_data_t *irg_data = ia32_get_irg_data(irg);
162 return create_const(irg, &irg_data->noreg_xmm, new_bd_ia32_NoReg_XMM,
163 &ia32_registers[REG_XMM_NOREG]);
166 ir_node *ia32_new_Fpu_truncate(ir_graph *irg)
168 ia32_irg_data_t *irg_data = ia32_get_irg_data(irg);
169 return create_const(irg, &irg_data->fpu_trunc_mode, new_bd_ia32_ChangeCW,
170 &ia32_registers[REG_FPCW]);
175 * Returns the admissible noreg register node for input register pos of node irn.
177 static ir_node *ia32_get_admissible_noreg(ir_node *irn, int pos)
179 ir_graph *irg = get_irn_irg(irn);
180 const arch_register_req_t *req = arch_get_register_req(irn, pos);
182 assert(req != NULL && "Missing register requirements");
183 if (req->cls == &ia32_reg_classes[CLASS_ia32_gp])
184 return ia32_new_NoReg_gp(irg);
186 if (ia32_cg_config.use_sse2) {
187 return ia32_new_NoReg_xmm(irg);
189 return ia32_new_NoReg_vfp(irg);
193 static arch_irn_class_t ia32_classify(const ir_node *irn)
195 arch_irn_class_t classification = arch_irn_class_none;
197 assert(is_ia32_irn(irn));
199 if (is_ia32_is_reload(irn))
200 classification |= arch_irn_class_reload;
202 if (is_ia32_is_spill(irn))
203 classification |= arch_irn_class_spill;
205 if (is_ia32_is_remat(irn))
206 classification |= arch_irn_class_remat;
208 return classification;
212 * The IA32 ABI callback object.
215 be_abi_call_flags_bits_t flags; /**< The call flags. */
216 ir_graph *irg; /**< The associated graph. */
219 static ir_entity *ia32_get_frame_entity(const ir_node *irn)
221 return is_ia32_irn(irn) ? get_ia32_frame_ent(irn) : NULL;
224 static void ia32_set_frame_entity(ir_node *node, ir_entity *entity)
226 if (is_be_node(node))
227 be_node_set_frame_entity(node, entity);
229 set_ia32_frame_ent(node, entity);
232 static void ia32_set_frame_offset(ir_node *irn, int bias)
234 if (get_ia32_frame_ent(irn) == NULL)
237 if (is_ia32_Pop(irn) || is_ia32_PopMem(irn)) {
238 ir_graph *irg = get_irn_irg(irn);
239 be_stack_layout_t *layout = be_get_irg_stack_layout(irg);
240 if (layout->sp_relative) {
241 /* Pop nodes modify the stack pointer before calculating the
242 * destination address, so fix this here
247 add_ia32_am_offs_int(irn, bias);
250 static int ia32_get_sp_bias(const ir_node *node)
252 if (is_ia32_Call(node))
253 return -(int)get_ia32_call_attr_const(node)->pop;
255 if (is_ia32_Push(node))
258 if (is_ia32_Pop(node) || is_ia32_PopMem(node))
261 if (is_ia32_Leave(node) || (be_is_Copy(node)
262 && arch_get_irn_register(node) == &ia32_registers[REG_ESP])) {
263 return SP_BIAS_RESET;
270 * Generate the routine prologue.
272 * @param self The callback object.
273 * @param mem A pointer to the mem node. Update this if you define new memory.
274 * @param reg_map A map mapping all callee_save/ignore/parameter registers to their defining nodes.
275 * @param stack_bias Points to the current stack bias, can be modified if needed.
277 * @return The register which shall be used as a stack frame base.
279 * All nodes which define registers in @p reg_map must keep @p reg_map current.
281 static const arch_register_t *ia32_abi_prologue(void *self, ir_node **mem, pmap *reg_map, int *stack_bias)
283 ia32_abi_env_t *env = (ia32_abi_env_t*)self;
284 ir_graph *irg = env->irg;
285 const arch_env_t *arch_env = be_get_irg_arch_env(irg);
287 ia32_curr_fp_ommitted = env->flags.try_omit_fp;
288 if (! env->flags.try_omit_fp) {
289 ir_node *bl = get_irg_start_block(env->irg);
290 ir_node *curr_sp = be_abi_reg_map_get(reg_map, arch_env->sp);
291 ir_node *curr_bp = be_abi_reg_map_get(reg_map, arch_env->bp);
292 ir_node *noreg = ia32_new_NoReg_gp(irg);
295 /* mark bp register as ignore */
296 be_set_constr_single_reg_out(get_Proj_pred(curr_bp),
297 get_Proj_proj(curr_bp), arch_env->bp, arch_register_req_type_ignore);
300 push = new_bd_ia32_Push(NULL, bl, noreg, noreg, *mem, curr_bp, curr_sp);
301 arch_irn_add_flags(push, arch_irn_flags_prolog);
302 curr_sp = new_r_Proj(push, get_irn_mode(curr_sp), pn_ia32_Push_stack);
303 *mem = new_r_Proj(push, mode_M, pn_ia32_Push_M);
304 set_irn_pinned(push, op_pin_state_pinned);
306 /* the push must have SP out register */
307 arch_set_irn_register(curr_sp, arch_env->sp);
309 /* this modifies the stack bias, because we pushed 32bit */
312 /* move esp to ebp */
313 curr_bp = be_new_Copy(arch_env->bp->reg_class, bl, curr_sp);
314 arch_irn_add_flags(curr_bp, arch_irn_flags_prolog);
315 be_set_constr_single_reg_out(curr_bp, 0, arch_env->bp,
316 arch_register_req_type_ignore);
317 set_irn_pinned(curr_bp, op_pin_state_pinned);
319 /* beware: the copy must be done before any other sp use */
320 curr_sp = be_new_CopyKeep_single(arch_env->sp->reg_class, bl, curr_sp, curr_bp, get_irn_mode(curr_sp));
321 arch_irn_add_flags(curr_sp, arch_irn_flags_prolog);
322 be_set_constr_single_reg_out(curr_sp, 0, arch_env->sp,
323 arch_register_req_type_produces_sp);
325 be_abi_reg_map_set(reg_map, arch_env->sp, curr_sp);
326 be_abi_reg_map_set(reg_map, arch_env->bp, curr_bp);
335 * Generate the routine epilogue.
336 * @param self The callback object.
337 * @param bl The block for the epilog
338 * @param mem A pointer to the mem node. Update this if you define new memory.
339 * @param reg_map A map mapping all callee_save/ignore/parameter registers to their defining nodes.
340 * @return The register which shall be used as a stack frame base.
342 * All nodes which define registers in @p reg_map must keep @p reg_map current.
344 static void ia32_abi_epilogue(void *self, ir_node *bl, ir_node **mem, pmap *reg_map)
346 ia32_abi_env_t *env = (ia32_abi_env_t*)self;
347 const arch_env_t *arch_env = be_get_irg_arch_env(env->irg);
348 ir_node *curr_sp = be_abi_reg_map_get(reg_map, arch_env->sp);
349 ir_node *curr_bp = be_abi_reg_map_get(reg_map, arch_env->bp);
351 if (env->flags.try_omit_fp) {
352 /* simply remove the stack frame here */
353 curr_sp = be_new_IncSP(arch_env->sp, bl, curr_sp, BE_STACK_FRAME_SIZE_SHRINK, 0);
354 arch_irn_add_flags(curr_sp, arch_irn_flags_epilog);
355 set_irn_pinned(curr_sp, op_pin_state_pinned);
357 ir_mode *mode_bp = arch_env->bp->reg_class->mode;
359 if (ia32_cg_config.use_leave) {
363 leave = new_bd_ia32_Leave(NULL, bl, curr_bp);
364 curr_bp = new_r_Proj(leave, mode_bp, pn_ia32_Leave_frame);
365 curr_sp = new_r_Proj(leave, get_irn_mode(curr_sp), pn_ia32_Leave_stack);
366 arch_irn_add_flags(leave, arch_irn_flags_epilog);
367 set_irn_pinned(leave, op_pin_state_pinned);
371 /* copy ebp to esp */
372 curr_sp = be_new_Copy(&ia32_reg_classes[CLASS_ia32_gp], bl, curr_bp);
373 arch_set_irn_register(curr_sp, arch_env->sp);
374 be_set_constr_single_reg_out(curr_sp, 0, arch_env->sp,
375 arch_register_req_type_ignore);
376 arch_irn_add_flags(curr_sp, arch_irn_flags_epilog);
377 set_irn_pinned(curr_sp, op_pin_state_pinned);
380 pop = new_bd_ia32_PopEbp(NULL, bl, *mem, curr_sp);
381 curr_bp = new_r_Proj(pop, mode_bp, pn_ia32_Pop_res);
382 curr_sp = new_r_Proj(pop, get_irn_mode(curr_sp), pn_ia32_Pop_stack);
383 arch_irn_add_flags(pop, arch_irn_flags_epilog);
384 set_irn_pinned(pop, op_pin_state_pinned);
386 *mem = new_r_Proj(pop, mode_M, pn_ia32_Pop_M);
388 arch_set_irn_register(curr_sp, arch_env->sp);
389 arch_set_irn_register(curr_bp, arch_env->bp);
392 be_abi_reg_map_set(reg_map, arch_env->sp, curr_sp);
393 be_abi_reg_map_set(reg_map, arch_env->bp, curr_bp);
397 * Initialize the callback object.
398 * @param call The call object.
399 * @param irg The graph with the method.
400 * @return Some pointer. This pointer is passed to all other callback functions as self object.
402 static void *ia32_abi_init(const be_abi_call_t *call, ir_graph *irg)
404 ia32_abi_env_t *env = XMALLOC(ia32_abi_env_t);
405 be_abi_call_flags_t fl = be_abi_call_get_flags(call);
406 env->flags = fl.bits;
412 * Destroy the callback object.
413 * @param self The callback object.
415 static void ia32_abi_done(void *self)
421 * Build the between type and entities if not already build.
423 static void ia32_build_between_type(void)
425 #define IDENT(s) new_id_from_chars(s, sizeof(s)-1)
426 if (! between_type) {
427 ir_type *old_bp_type = new_type_primitive(mode_Iu);
428 ir_type *ret_addr_type = new_type_primitive(mode_Iu);
430 between_type = new_type_struct(IDENT("ia32_between_type"));
431 old_bp_ent = new_entity(between_type, IDENT("old_bp"), old_bp_type);
432 ret_addr_ent = new_entity(between_type, IDENT("ret_addr"), ret_addr_type);
434 set_entity_offset(old_bp_ent, 0);
435 set_entity_offset(ret_addr_ent, get_type_size_bytes(old_bp_type));
436 set_type_size_bytes(between_type, get_type_size_bytes(old_bp_type) + get_type_size_bytes(ret_addr_type));
437 set_type_state(between_type, layout_fixed);
439 omit_fp_between_type = new_type_struct(IDENT("ia32_between_type_omit_fp"));
440 omit_fp_ret_addr_ent = new_entity(omit_fp_between_type, IDENT("ret_addr"), ret_addr_type);
442 set_entity_offset(omit_fp_ret_addr_ent, 0);
443 set_type_size_bytes(omit_fp_between_type, get_type_size_bytes(ret_addr_type));
444 set_type_state(omit_fp_between_type, layout_fixed);
450 * Produces the type which sits between the stack args and the locals on the stack.
451 * it will contain the return address and space to store the old base pointer.
452 * @return The Firm type modeling the ABI between type.
454 static ir_type *ia32_abi_get_between_type(void *self)
456 ia32_abi_env_t *env = (ia32_abi_env_t*)self;
458 ia32_build_between_type();
459 return env->flags.try_omit_fp ? omit_fp_between_type : between_type;
463 * Return the stack entity that contains the return address.
465 ir_entity *ia32_get_return_address_entity(void)
467 ia32_build_between_type();
468 return ia32_curr_fp_ommitted ? omit_fp_ret_addr_ent : ret_addr_ent;
472 * Return the stack entity that contains the frame address.
474 ir_entity *ia32_get_frame_address_entity(void)
476 ia32_build_between_type();
477 return ia32_curr_fp_ommitted ? NULL : old_bp_ent;
481 * Get the estimated cycle count for @p irn.
483 * @param self The this pointer.
484 * @param irn The node.
486 * @return The estimated cycle count for this operation
488 static int ia32_get_op_estimated_cost(const ir_node *irn)
491 ia32_op_type_t op_tp;
495 if (!is_ia32_irn(irn))
498 assert(is_ia32_irn(irn));
500 cost = get_ia32_latency(irn);
501 op_tp = get_ia32_op_type(irn);
503 if (is_ia32_CopyB(irn)) {
506 else if (is_ia32_CopyB_i(irn)) {
507 int size = get_ia32_copyb_size(irn);
508 cost = 20 + (int)ceil((4/3) * size);
510 /* in case of address mode operations add additional cycles */
511 else if (op_tp == ia32_AddrModeD || op_tp == ia32_AddrModeS) {
513 In case of stack access and access to fixed addresses add 5 cycles
514 (we assume they are in cache), other memory operations cost 20
517 if (is_ia32_use_frame(irn) || (
518 is_ia32_NoReg_GP(get_irn_n(irn, n_ia32_base)) &&
519 is_ia32_NoReg_GP(get_irn_n(irn, n_ia32_index))
531 * Returns the inverse operation if @p irn, recalculating the argument at position @p i.
533 * @param irn The original operation
534 * @param i Index of the argument we want the inverse operation to yield
535 * @param inverse struct to be filled with the resulting inverse op
536 * @param obstack The obstack to use for allocation of the returned nodes array
537 * @return The inverse operation or NULL if operation invertible
539 static arch_inverse_t *ia32_get_inverse(const ir_node *irn, int i, arch_inverse_t *inverse, struct obstack *obst)
550 ir_node *block, *noreg, *nomem;
553 /* we cannot invert non-ia32 irns */
554 if (! is_ia32_irn(irn))
557 /* operand must always be a real operand (not base, index or mem) */
558 if (i != n_ia32_binary_left && i != n_ia32_binary_right)
561 /* we don't invert address mode operations */
562 if (get_ia32_op_type(irn) != ia32_Normal)
565 /* TODO: adjust for new immediates... */
566 ir_fprintf(stderr, "TODO: fix get_inverse for new immediates (%+F)\n",
570 block = get_nodes_block(irn);
571 mode = get_irn_mode(irn);
572 irn_mode = get_irn_mode(irn);
573 noreg = get_irn_n(irn, 0);
574 nomem = new_r_NoMem(irg);
575 dbg = get_irn_dbg_info(irn);
577 /* initialize structure */
578 inverse->nodes = obstack_alloc(obst, 2 * sizeof(inverse->nodes[0]));
582 switch (get_ia32_irn_opcode(irn)) {
585 if (get_ia32_immop_type(irn) == ia32_ImmConst) {
586 /* we have an add with a const here */
587 /* invers == add with negated const */
588 inverse->nodes[0] = new_bd_ia32_Add(dbg, block, noreg, noreg, nomem, get_irn_n(irn, i), noreg);
590 copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
591 set_ia32_Immop_tarval(inverse->nodes[0], tarval_neg(get_ia32_Immop_tarval(irn)));
592 set_ia32_commutative(inverse->nodes[0]);
594 else if (get_ia32_immop_type(irn) == ia32_ImmSymConst) {
595 /* we have an add with a symconst here */
596 /* invers == sub with const */
597 inverse->nodes[0] = new_bd_ia32_Sub(dbg, block, noreg, noreg, nomem, get_irn_n(irn, i), noreg);
599 copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
602 /* normal add: inverse == sub */
603 inverse->nodes[0] = new_bd_ia32_Sub(dbg, block, noreg, noreg, nomem, (ir_node*) irn, get_irn_n(irn, i ^ 1));
610 if (get_ia32_immop_type(irn) != ia32_ImmNone) {
611 /* we have a sub with a const/symconst here */
612 /* invers == add with this const */
613 inverse->nodes[0] = new_bd_ia32_Add(dbg, block, noreg, noreg, nomem, get_irn_n(irn, i), noreg);
614 inverse->costs += (get_ia32_immop_type(irn) == ia32_ImmSymConst) ? 5 : 1;
615 copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
619 if (i == n_ia32_binary_left) {
620 inverse->nodes[0] = new_bd_ia32_Add(dbg, block, noreg, noreg, nomem, (ir_node*) irn, get_irn_n(irn, 3));
623 inverse->nodes[0] = new_bd_ia32_Sub(dbg, block, noreg, noreg, nomem, get_irn_n(irn, n_ia32_binary_left), (ir_node*) irn);
631 if (get_ia32_immop_type(irn) != ia32_ImmNone) {
632 /* xor with const: inverse = xor */
633 inverse->nodes[0] = new_bd_ia32_Xor(dbg, block, noreg, noreg, nomem, get_irn_n(irn, i), noreg);
634 inverse->costs += (get_ia32_immop_type(irn) == ia32_ImmSymConst) ? 5 : 1;
635 copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
639 inverse->nodes[0] = new_bd_ia32_Xor(dbg, block, noreg, noreg, nomem, (ir_node *) irn, get_irn_n(irn, i));
645 inverse->nodes[0] = new_bd_ia32_Not(dbg, block, (ir_node*) irn);
650 inverse->nodes[0] = new_bd_ia32_Neg(dbg, block, (ir_node*) irn);
655 /* inverse operation not supported */
663 static ir_mode *get_spill_mode_mode(const ir_mode *mode)
665 if (mode_is_float(mode))
672 * Get the mode that should be used for spilling value node
674 static ir_mode *get_spill_mode(const ir_node *node)
676 ir_mode *mode = get_irn_mode(node);
677 return get_spill_mode_mode(mode);
681 * Checks whether an addressmode reload for a node with mode mode is compatible
682 * with a spillslot of mode spill_mode
684 static int ia32_is_spillmode_compatible(const ir_mode *mode, const ir_mode *spillmode)
686 return !mode_is_float(mode) || mode == spillmode;
690 * Check if irn can load its operand at position i from memory (source addressmode).
691 * @param irn The irn to be checked
692 * @param i The operands position
693 * @return Non-Zero if operand can be loaded
695 static int ia32_possible_memory_operand(const ir_node *irn, unsigned int i)
697 ir_node *op = get_irn_n(irn, i);
698 const ir_mode *mode = get_irn_mode(op);
699 const ir_mode *spillmode = get_spill_mode(op);
701 if (!is_ia32_irn(irn) || /* must be an ia32 irn */
702 get_ia32_op_type(irn) != ia32_Normal || /* must not already be a addressmode irn */
703 !ia32_is_spillmode_compatible(mode, spillmode) ||
704 is_ia32_use_frame(irn)) /* must not already use frame */
707 switch (get_ia32_am_support(irn)) {
712 if (i != n_ia32_unary_op)
718 case n_ia32_binary_left: {
719 const arch_register_req_t *req;
720 if (!is_ia32_commutative(irn))
723 /* we can't swap left/right for limited registers
724 * (As this (currently) breaks constraint handling copies)
726 req = arch_get_in_register_req(irn, n_ia32_binary_left);
727 if (req->type & arch_register_req_type_limited)
732 case n_ia32_binary_right:
741 panic("Unknown AM type");
744 /* HACK: must not already use "real" memory.
745 * This can happen for Call and Div */
746 if (!is_NoMem(get_irn_n(irn, n_ia32_mem)))
752 static void ia32_perform_memory_operand(ir_node *irn, ir_node *spill,
756 ir_mode *dest_op_mode;
758 assert(ia32_possible_memory_operand(irn, i) && "Cannot perform memory operand change");
760 set_ia32_op_type(irn, ia32_AddrModeS);
762 load_mode = get_irn_mode(get_irn_n(irn, i));
763 dest_op_mode = get_ia32_ls_mode(irn);
764 if (get_mode_size_bits(load_mode) <= get_mode_size_bits(dest_op_mode)) {
765 set_ia32_ls_mode(irn, load_mode);
767 set_ia32_use_frame(irn);
768 set_ia32_need_stackent(irn);
770 if (i == n_ia32_binary_left &&
771 get_ia32_am_support(irn) == ia32_am_binary &&
772 /* immediates are only allowed on the right side */
773 !is_ia32_Immediate(get_irn_n(irn, n_ia32_binary_right))) {
774 ia32_swap_left_right(irn);
775 i = n_ia32_binary_right;
778 assert(is_NoMem(get_irn_n(irn, n_ia32_mem)));
780 set_irn_n(irn, n_ia32_base, get_irg_frame(get_irn_irg(irn)));
781 set_irn_n(irn, n_ia32_mem, spill);
782 set_irn_n(irn, i, ia32_get_admissible_noreg(irn, i));
783 set_ia32_is_reload(irn);
786 static const be_abi_callbacks_t ia32_abi_callbacks = {
789 ia32_abi_get_between_type,
794 /* register allocator interface */
795 static const arch_irn_ops_t ia32_irn_ops = {
797 ia32_get_frame_entity,
798 ia32_set_frame_offset,
801 ia32_get_op_estimated_cost,
802 ia32_possible_memory_operand,
803 ia32_perform_memory_operand,
806 static ir_entity *mcount = NULL;
807 static int gprof = 0;
809 static void ia32_before_abi(ir_graph *irg)
812 if (mcount == NULL) {
813 ir_type *tp = new_type_method(0, 0);
814 ident *id = new_id_from_str("mcount");
815 mcount = new_entity(get_glob_type(), id, tp);
816 /* FIXME: enter the right ld_ident here */
817 set_entity_ld_ident(mcount, get_entity_ident(mcount));
818 set_entity_visibility(mcount, ir_visibility_external);
820 instrument_initcall(irg, mcount);
825 * Transforms the standard firm graph into
828 static void ia32_prepare_graph(ir_graph *irg)
830 ia32_irg_data_t *irg_data = ia32_get_irg_data(irg);
833 switch (be_transformer) {
834 case TRANSFORMER_DEFAULT:
835 /* transform remaining nodes into assembler instructions */
836 ia32_transform_graph(irg);
839 case TRANSFORMER_PBQP:
840 case TRANSFORMER_RAND:
841 /* transform nodes into assembler instructions by PBQP magic */
842 ia32_transform_graph_by_pbqp(irg);
846 panic("invalid transformer");
849 ia32_transform_graph(irg);
852 /* do local optimizations (mainly CSE) */
853 optimize_graph_df(irg);
856 dump_ir_graph(irg, "transformed");
858 /* optimize address mode */
859 ia32_optimize_graph(irg);
861 /* do code placement, to optimize the position of constants */
865 dump_ir_graph(irg, "place");
868 ir_node *ia32_turn_back_am(ir_node *node)
870 dbg_info *dbgi = get_irn_dbg_info(node);
871 ir_graph *irg = get_irn_irg(node);
872 ir_node *block = get_nodes_block(node);
873 ir_node *base = get_irn_n(node, n_ia32_base);
874 ir_node *index = get_irn_n(node, n_ia32_index);
875 ir_node *mem = get_irn_n(node, n_ia32_mem);
878 ir_node *load = new_bd_ia32_Load(dbgi, block, base, index, mem);
879 ir_node *load_res = new_rd_Proj(dbgi, load, mode_Iu, pn_ia32_Load_res);
881 ia32_copy_am_attrs(load, node);
882 if (is_ia32_is_reload(node))
883 set_ia32_is_reload(load);
884 set_irn_n(node, n_ia32_mem, new_r_NoMem(irg));
886 switch (get_ia32_am_support(node)) {
888 set_irn_n(node, n_ia32_unary_op, load_res);
892 if (is_ia32_Immediate(get_irn_n(node, n_ia32_binary_right))) {
893 set_irn_n(node, n_ia32_binary_left, load_res);
895 set_irn_n(node, n_ia32_binary_right, load_res);
900 panic("Unknown AM type");
902 noreg = ia32_new_NoReg_gp(current_ir_graph);
903 set_irn_n(node, n_ia32_base, noreg);
904 set_irn_n(node, n_ia32_index, noreg);
905 set_ia32_am_offs_int(node, 0);
906 set_ia32_am_sc(node, NULL);
907 set_ia32_am_scale(node, 0);
908 clear_ia32_am_sc_sign(node);
910 /* rewire mem-proj */
911 if (get_irn_mode(node) == mode_T) {
912 const ir_edge_t *edge;
913 foreach_out_edge(node, edge) {
914 ir_node *out = get_edge_src_irn(edge);
915 if (get_irn_mode(out) == mode_M) {
916 set_Proj_pred(out, load);
917 set_Proj_proj(out, pn_ia32_Load_M);
923 set_ia32_op_type(node, ia32_Normal);
924 if (sched_is_scheduled(node))
925 sched_add_before(node, load);
930 static ir_node *flags_remat(ir_node *node, ir_node *after)
932 /* we should turn back source address mode when rematerializing nodes */
937 if (is_Block(after)) {
940 block = get_nodes_block(after);
943 type = get_ia32_op_type(node);
946 ia32_turn_back_am(node);
950 /* TODO implement this later... */
951 panic("found DestAM with flag user %+F this should not happen", node);
954 default: assert(type == ia32_Normal); break;
957 copy = exact_copy(node);
958 set_nodes_block(copy, block);
959 sched_add_after(after, copy);
965 * Called before the register allocator.
967 static void ia32_before_ra(ir_graph *irg)
969 /* setup fpu rounding modes */
970 ia32_setup_fpu_mode(irg);
973 be_sched_fix_flags(irg, &ia32_reg_classes[CLASS_ia32_flags],
976 be_add_missing_keeps(irg);
981 * Transforms a be_Reload into a ia32 Load.
983 static void transform_to_Load(ir_node *node)
985 ir_graph *irg = get_irn_irg(node);
986 dbg_info *dbg = get_irn_dbg_info(node);
987 ir_node *block = get_nodes_block(node);
988 ir_entity *ent = be_get_frame_entity(node);
989 ir_mode *mode = get_irn_mode(node);
990 ir_mode *spillmode = get_spill_mode(node);
991 ir_node *noreg = ia32_new_NoReg_gp(irg);
992 ir_node *sched_point = NULL;
993 ir_node *ptr = get_irg_frame(irg);
994 ir_node *mem = get_irn_n(node, n_be_Reload_mem);
995 ir_node *new_op, *proj;
996 const arch_register_t *reg;
998 if (sched_is_scheduled(node)) {
999 sched_point = sched_prev(node);
1002 if (mode_is_float(spillmode)) {
1003 if (ia32_cg_config.use_sse2)
1004 new_op = new_bd_ia32_xLoad(dbg, block, ptr, noreg, mem, spillmode);
1006 new_op = new_bd_ia32_vfld(dbg, block, ptr, noreg, mem, spillmode);
1008 else if (get_mode_size_bits(spillmode) == 128) {
1009 /* Reload 128 bit SSE registers */
1010 new_op = new_bd_ia32_xxLoad(dbg, block, ptr, noreg, mem);
1013 new_op = new_bd_ia32_Load(dbg, block, ptr, noreg, mem);
1015 set_ia32_op_type(new_op, ia32_AddrModeS);
1016 set_ia32_ls_mode(new_op, spillmode);
1017 set_ia32_frame_ent(new_op, ent);
1018 set_ia32_use_frame(new_op);
1019 set_ia32_is_reload(new_op);
1021 DBG_OPT_RELOAD2LD(node, new_op);
1023 proj = new_rd_Proj(dbg, new_op, mode, pn_ia32_Load_res);
1026 sched_add_after(sched_point, new_op);
1030 /* copy the register from the old node to the new Load */
1031 reg = arch_get_irn_register(node);
1032 arch_set_irn_register(proj, reg);
1034 SET_IA32_ORIG_NODE(new_op, node);
1036 exchange(node, proj);
1040 * Transforms a be_Spill node into a ia32 Store.
1042 static void transform_to_Store(ir_node *node)
1044 ir_graph *irg = get_irn_irg(node);
1045 dbg_info *dbg = get_irn_dbg_info(node);
1046 ir_node *block = get_nodes_block(node);
1047 ir_entity *ent = be_get_frame_entity(node);
1048 const ir_node *spillval = get_irn_n(node, n_be_Spill_val);
1049 ir_mode *mode = get_spill_mode(spillval);
1050 ir_node *noreg = ia32_new_NoReg_gp(irg);
1051 ir_node *nomem = new_r_NoMem(irg);
1052 ir_node *ptr = get_irg_frame(irg);
1053 ir_node *val = get_irn_n(node, n_be_Spill_val);
1055 ir_node *sched_point = NULL;
1057 if (sched_is_scheduled(node)) {
1058 sched_point = sched_prev(node);
1061 if (mode_is_float(mode)) {
1062 if (ia32_cg_config.use_sse2)
1063 store = new_bd_ia32_xStore(dbg, block, ptr, noreg, nomem, val);
1065 store = new_bd_ia32_vfst(dbg, block, ptr, noreg, nomem, val, mode);
1066 } else if (get_mode_size_bits(mode) == 128) {
1067 /* Spill 128 bit SSE registers */
1068 store = new_bd_ia32_xxStore(dbg, block, ptr, noreg, nomem, val);
1069 } else if (get_mode_size_bits(mode) == 8) {
1070 store = new_bd_ia32_Store8Bit(dbg, block, ptr, noreg, nomem, val);
1072 store = new_bd_ia32_Store(dbg, block, ptr, noreg, nomem, val);
1075 set_ia32_op_type(store, ia32_AddrModeD);
1076 set_ia32_ls_mode(store, mode);
1077 set_ia32_frame_ent(store, ent);
1078 set_ia32_use_frame(store);
1079 set_ia32_is_spill(store);
1080 SET_IA32_ORIG_NODE(store, node);
1081 DBG_OPT_SPILL2ST(node, store);
1084 sched_add_after(sched_point, store);
1088 exchange(node, store);
1091 static ir_node *create_push(ir_node *node, ir_node *schedpoint, ir_node *sp, ir_node *mem, ir_entity *ent)
1093 dbg_info *dbg = get_irn_dbg_info(node);
1094 ir_node *block = get_nodes_block(node);
1095 ir_graph *irg = get_irn_irg(node);
1096 ir_node *noreg = ia32_new_NoReg_gp(irg);
1097 ir_node *frame = get_irg_frame(irg);
1099 ir_node *push = new_bd_ia32_Push(dbg, block, frame, noreg, mem, noreg, sp);
1101 set_ia32_frame_ent(push, ent);
1102 set_ia32_use_frame(push);
1103 set_ia32_op_type(push, ia32_AddrModeS);
1104 set_ia32_ls_mode(push, mode_Is);
1105 set_ia32_is_spill(push);
1107 sched_add_before(schedpoint, push);
1111 static ir_node *create_pop(ir_node *node, ir_node *schedpoint, ir_node *sp, ir_entity *ent)
1113 dbg_info *dbg = get_irn_dbg_info(node);
1114 ir_node *block = get_nodes_block(node);
1115 ir_graph *irg = get_irn_irg(node);
1116 ir_node *noreg = ia32_new_NoReg_gp(irg);
1117 ir_node *frame = get_irg_frame(irg);
1119 ir_node *pop = new_bd_ia32_PopMem(dbg, block, frame, noreg, new_r_NoMem(irg), sp);
1121 set_ia32_frame_ent(pop, ent);
1122 set_ia32_use_frame(pop);
1123 set_ia32_op_type(pop, ia32_AddrModeD);
1124 set_ia32_ls_mode(pop, mode_Is);
1125 set_ia32_is_reload(pop);
1127 sched_add_before(schedpoint, pop);
1132 static ir_node* create_spproj(ir_node *node, ir_node *pred, int pos)
1134 dbg_info *dbg = get_irn_dbg_info(node);
1135 ir_mode *spmode = mode_Iu;
1136 const arch_register_t *spreg = &ia32_registers[REG_ESP];
1139 sp = new_rd_Proj(dbg, pred, spmode, pos);
1140 arch_set_irn_register(sp, spreg);
1146 * Transform MemPerm, currently we do this the ugly way and produce
1147 * push/pop into/from memory cascades. This is possible without using
1150 static void transform_MemPerm(ir_node *node)
1152 ir_node *block = get_nodes_block(node);
1153 ir_graph *irg = get_irn_irg(node);
1154 ir_node *sp = be_abi_get_ignore_irn(irg, &ia32_registers[REG_ESP]);
1155 int arity = be_get_MemPerm_entity_arity(node);
1156 ir_node **pops = ALLOCAN(ir_node*, arity);
1160 const ir_edge_t *edge;
1161 const ir_edge_t *next;
1164 for (i = 0; i < arity; ++i) {
1165 ir_entity *inent = be_get_MemPerm_in_entity(node, i);
1166 ir_entity *outent = be_get_MemPerm_out_entity(node, i);
1167 ir_type *enttype = get_entity_type(inent);
1168 unsigned entsize = get_type_size_bytes(enttype);
1169 unsigned entsize2 = get_type_size_bytes(get_entity_type(outent));
1170 ir_node *mem = get_irn_n(node, i + 1);
1173 /* work around cases where entities have different sizes */
1174 if (entsize2 < entsize)
1176 assert( (entsize == 4 || entsize == 8) && "spillslot on x86 should be 32 or 64 bit");
1178 push = create_push(node, node, sp, mem, inent);
1179 sp = create_spproj(node, push, pn_ia32_Push_stack);
1181 /* add another push after the first one */
1182 push = create_push(node, node, sp, mem, inent);
1183 add_ia32_am_offs_int(push, 4);
1184 sp = create_spproj(node, push, pn_ia32_Push_stack);
1187 set_irn_n(node, i, new_r_Bad(irg));
1191 for (i = arity - 1; i >= 0; --i) {
1192 ir_entity *inent = be_get_MemPerm_in_entity(node, i);
1193 ir_entity *outent = be_get_MemPerm_out_entity(node, i);
1194 ir_type *enttype = get_entity_type(outent);
1195 unsigned entsize = get_type_size_bytes(enttype);
1196 unsigned entsize2 = get_type_size_bytes(get_entity_type(inent));
1199 /* work around cases where entities have different sizes */
1200 if (entsize2 < entsize)
1202 assert( (entsize == 4 || entsize == 8) && "spillslot on x86 should be 32 or 64 bit");
1204 pop = create_pop(node, node, sp, outent);
1205 sp = create_spproj(node, pop, pn_ia32_Pop_stack);
1207 add_ia32_am_offs_int(pop, 4);
1209 /* add another pop after the first one */
1210 pop = create_pop(node, node, sp, outent);
1211 sp = create_spproj(node, pop, pn_ia32_Pop_stack);
1218 keep = be_new_Keep(block, 1, in);
1219 sched_add_before(node, keep);
1221 /* exchange memprojs */
1222 foreach_out_edge_safe(node, edge, next) {
1223 ir_node *proj = get_edge_src_irn(edge);
1224 int p = get_Proj_proj(proj);
1228 set_Proj_pred(proj, pops[p]);
1229 set_Proj_proj(proj, pn_ia32_Pop_M);
1232 /* remove memperm */
1233 arity = get_irn_arity(node);
1234 for (i = 0; i < arity; ++i) {
1235 set_irn_n(node, i, new_r_Bad(irg));
1241 * Block-Walker: Calls the transform functions Spill and Reload.
1243 static void ia32_after_ra_walker(ir_node *block, void *env)
1245 ir_node *node, *prev;
1248 /* beware: the schedule is changed here */
1249 for (node = sched_last(block); !sched_is_begin(node); node = prev) {
1250 prev = sched_prev(node);
1252 if (be_is_Reload(node)) {
1253 transform_to_Load(node);
1254 } else if (be_is_Spill(node)) {
1255 transform_to_Store(node);
1256 } else if (be_is_MemPerm(node)) {
1257 transform_MemPerm(node);
1263 * Collects nodes that need frame entities assigned.
1265 static void ia32_collect_frame_entity_nodes(ir_node *node, void *data)
1267 be_fec_env_t *env = (be_fec_env_t*)data;
1268 const ir_mode *mode;
1271 if (be_is_Reload(node) && be_get_frame_entity(node) == NULL) {
1272 mode = get_spill_mode_mode(get_irn_mode(node));
1273 align = get_mode_size_bytes(mode);
1274 } else if (is_ia32_irn(node) &&
1275 get_ia32_frame_ent(node) == NULL &&
1276 is_ia32_use_frame(node)) {
1277 if (is_ia32_need_stackent(node))
1280 switch (get_ia32_irn_opcode(node)) {
1282 case iro_ia32_Load: {
1283 const ia32_attr_t *attr = get_ia32_attr_const(node);
1285 if (attr->data.need_32bit_stackent) {
1287 } else if (attr->data.need_64bit_stackent) {
1290 mode = get_ia32_ls_mode(node);
1291 if (is_ia32_is_reload(node))
1292 mode = get_spill_mode_mode(mode);
1294 align = get_mode_size_bytes(mode);
1298 case iro_ia32_vfild:
1300 case iro_ia32_xLoad: {
1301 mode = get_ia32_ls_mode(node);
1306 case iro_ia32_FldCW: {
1307 /* although 2 byte would be enough 4 byte performs best */
1315 panic("unexpected frame user while collection frame entity nodes");
1317 case iro_ia32_FnstCW:
1318 case iro_ia32_Store8Bit:
1319 case iro_ia32_Store:
1322 case iro_ia32_vfist:
1323 case iro_ia32_vfisttp:
1325 case iro_ia32_xStore:
1326 case iro_ia32_xStoreSimple:
1333 be_node_needs_frame_entity(env, node, mode, align);
1337 * We transform Spill and Reload here. This needs to be done before
1338 * stack biasing otherwise we would miss the corrected offset for these nodes.
1340 static void ia32_after_ra(ir_graph *irg)
1342 be_stack_layout_t *stack_layout = be_get_irg_stack_layout(irg);
1343 bool at_begin = stack_layout->sp_relative ? true : false;
1344 be_fec_env_t *fec_env = be_new_frame_entity_coalescer(irg);
1346 /* create and coalesce frame entities */
1347 irg_walk_graph(irg, NULL, ia32_collect_frame_entity_nodes, fec_env);
1348 be_assign_entities(fec_env, ia32_set_frame_entity, at_begin);
1349 be_free_frame_entity_coalescer(fec_env);
1351 irg_block_walk_graph(irg, NULL, ia32_after_ra_walker, NULL);
1355 * Last touchups for the graph before emit: x87 simulation to replace the
1356 * virtual with real x87 instructions, creating a block schedule and peephole
1359 static void ia32_finish(ir_graph *irg)
1361 ia32_irg_data_t *irg_data = ia32_get_irg_data(irg);
1363 ia32_finish_irg(irg);
1365 /* we might have to rewrite x87 virtual registers */
1366 if (irg_data->do_x87_sim) {
1367 ia32_x87_simulate_graph(irg);
1370 /* do peephole optimisations */
1371 ia32_peephole_optimization(irg);
1373 /* create block schedule, this also removes empty blocks which might
1374 * produce critical edges */
1375 irg_data->blk_sched = be_create_block_schedule(irg);
1379 * Emits the code, closes the output file and frees
1380 * the code generator interface.
1382 static void ia32_emit(ir_graph *irg)
1384 if (ia32_cg_config.emit_machcode) {
1385 ia32_gen_binary_routine(irg);
1387 ia32_gen_routine(irg);
1392 * Returns the node representing the PIC base.
1394 static ir_node *ia32_get_pic_base(ir_graph *irg)
1396 ia32_irg_data_t *irg_data = ia32_get_irg_data(irg);
1398 ir_node *get_eip = irg_data->get_eip;
1399 if (get_eip != NULL)
1402 block = get_irg_start_block(irg);
1403 get_eip = new_bd_ia32_GetEIP(NULL, block);
1404 irg_data->get_eip = get_eip;
1410 * Initializes a IA32 code generator.
1412 static void ia32_init_graph(ir_graph *irg)
1414 struct obstack *obst = be_get_be_obst(irg);
1415 ia32_irg_data_t *irg_data = OALLOCZ(obst, ia32_irg_data_t);
1417 irg_data->dump = (be_get_irg_options(irg)->dump_flags & DUMP_BE) ? 1 : 0;
1420 /* Linux gprof implementation needs base pointer */
1421 be_get_irg_options(irg)->omit_fp = 0;
1424 be_birg_from_irg(irg)->isa_link = irg_data;
1429 * Set output modes for GCC
1431 static const tarval_mode_info mo_integer = {
1438 * set the tarval output mode of all integer modes to decimal
1440 static void set_tarval_output_modes(void)
1444 for (i = get_irp_n_modes(); i > 0;) {
1445 ir_mode *mode = get_irp_mode(--i);
1447 if (mode_is_int(mode))
1448 set_tarval_mode_output_option(mode, &mo_integer);
1452 extern const arch_isa_if_t ia32_isa_if;
1455 * The template that generates a new ISA object.
1456 * Note that this template can be changed by command line
1459 static ia32_isa_t ia32_isa_template = {
1461 &ia32_isa_if, /* isa interface implementation */
1466 &ia32_registers[REG_ESP], /* stack pointer register */
1467 &ia32_registers[REG_EBP], /* base pointer register */
1468 &ia32_reg_classes[CLASS_ia32_gp], /* static link pointer register class */
1469 2, /* power of two stack alignment, 2^2 == 4 */
1470 NULL, /* main environment */
1471 7, /* costs for a spill instruction */
1472 5, /* costs for a reload instruction */
1473 false, /* no custom abi handling */
1477 NULL, /* abstract machine */
1480 static void init_asm_constraints(void)
1482 be_init_default_asm_constraint_flags();
1484 asm_constraint_flags['a'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1485 asm_constraint_flags['b'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1486 asm_constraint_flags['c'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1487 asm_constraint_flags['d'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1488 asm_constraint_flags['D'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1489 asm_constraint_flags['S'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1490 asm_constraint_flags['Q'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1491 asm_constraint_flags['q'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1492 asm_constraint_flags['A'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1493 asm_constraint_flags['l'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1494 asm_constraint_flags['R'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1495 asm_constraint_flags['r'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1496 asm_constraint_flags['p'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1497 asm_constraint_flags['f'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1498 asm_constraint_flags['t'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1499 asm_constraint_flags['u'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1500 asm_constraint_flags['Y'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1501 asm_constraint_flags['X'] = ASM_CONSTRAINT_FLAG_SUPPORTS_REGISTER;
1502 asm_constraint_flags['n'] = ASM_CONSTRAINT_FLAG_SUPPORTS_IMMEDIATE;
1503 asm_constraint_flags['g'] = ASM_CONSTRAINT_FLAG_SUPPORTS_IMMEDIATE;
1505 /* no support for autodecrement/autoincrement */
1506 asm_constraint_flags['<'] = ASM_CONSTRAINT_FLAG_NO_SUPPORT;
1507 asm_constraint_flags['>'] = ASM_CONSTRAINT_FLAG_NO_SUPPORT;
1508 /* no float consts */
1509 asm_constraint_flags['E'] = ASM_CONSTRAINT_FLAG_NO_SUPPORT;
1510 asm_constraint_flags['F'] = ASM_CONSTRAINT_FLAG_NO_SUPPORT;
1511 /* makes no sense on x86 */
1512 asm_constraint_flags['s'] = ASM_CONSTRAINT_FLAG_NO_SUPPORT;
1513 /* no support for sse consts yet */
1514 asm_constraint_flags['C'] = ASM_CONSTRAINT_FLAG_NO_SUPPORT;
1515 /* no support for x87 consts yet */
1516 asm_constraint_flags['G'] = ASM_CONSTRAINT_FLAG_NO_SUPPORT;
1517 /* no support for mmx registers yet */
1518 asm_constraint_flags['y'] = ASM_CONSTRAINT_FLAG_NO_SUPPORT;
1519 /* not available in 32bit mode */
1520 asm_constraint_flags['Z'] = ASM_CONSTRAINT_FLAG_NO_SUPPORT;
1521 asm_constraint_flags['e'] = ASM_CONSTRAINT_FLAG_NO_SUPPORT;
1523 /* no code yet to determine register class needed... */
1524 asm_constraint_flags['X'] = ASM_CONSTRAINT_FLAG_NO_SUPPORT;
1528 * Initializes the backend ISA.
1530 static arch_env_t *ia32_init(FILE *file_handle)
1532 ia32_isa_t *isa = XMALLOC(ia32_isa_t);
1534 set_tarval_output_modes();
1536 memcpy(isa, &ia32_isa_template, sizeof(*isa));
1538 if (ia32_mode_fpcw == NULL) {
1539 ia32_mode_fpcw = new_ir_mode("Fpcw", irms_int_number, 16, 0, irma_none, 0);
1542 ia32_register_init();
1543 ia32_create_opcodes(&ia32_irn_ops);
1545 be_emit_init(file_handle);
1546 isa->types = pmap_create();
1547 isa->tv_ent = pmap_create();
1548 isa->cpu = ia32_init_machine_description();
1550 /* enter the ISA object into the intrinsic environment */
1551 intrinsic_env.isa = isa;
1559 * Closes the output file and frees the ISA structure.
1561 static void ia32_done(void *self)
1563 ia32_isa_t *isa = (ia32_isa_t*)self;
1565 /* emit now all global declarations */
1566 be_gas_emit_decls(isa->base.main_env);
1568 pmap_destroy(isa->tv_ent);
1569 pmap_destroy(isa->types);
1578 * Get the register class which shall be used to store a value of a given mode.
1579 * @param self The this pointer.
1580 * @param mode The mode in question.
1581 * @return A register class which can hold values of the given mode.
1583 static const arch_register_class_t *ia32_get_reg_class_for_mode(const ir_mode *mode)
1585 if (mode_is_float(mode)) {
1586 return ia32_cg_config.use_sse2 ? &ia32_reg_classes[CLASS_ia32_xmm] : &ia32_reg_classes[CLASS_ia32_vfp];
1589 return &ia32_reg_classes[CLASS_ia32_gp];
1593 * Returns the register for parameter nr.
1595 static const arch_register_t *ia32_get_RegParam_reg(unsigned cc, unsigned nr,
1596 const ir_mode *mode)
1598 static const arch_register_t *gpreg_param_reg_fastcall[] = {
1599 &ia32_registers[REG_ECX],
1600 &ia32_registers[REG_EDX],
1603 static const unsigned MAXNUM_GPREG_ARGS = 3;
1605 static const arch_register_t *gpreg_param_reg_regparam[] = {
1606 &ia32_registers[REG_EAX],
1607 &ia32_registers[REG_EDX],
1608 &ia32_registers[REG_ECX]
1611 static const arch_register_t *gpreg_param_reg_this[] = {
1612 &ia32_registers[REG_ECX],
1617 static const arch_register_t *fpreg_sse_param_reg_std[] = {
1618 &ia32_registers[REG_XMM0],
1619 &ia32_registers[REG_XMM1],
1620 &ia32_registers[REG_XMM2],
1621 &ia32_registers[REG_XMM3],
1622 &ia32_registers[REG_XMM4],
1623 &ia32_registers[REG_XMM5],
1624 &ia32_registers[REG_XMM6],
1625 &ia32_registers[REG_XMM7]
1628 static const arch_register_t *fpreg_sse_param_reg_this[] = {
1629 NULL, /* in case of a "this" pointer, the first parameter must not be a float */
1631 static const unsigned MAXNUM_SSE_ARGS = 8;
1633 if ((cc & cc_this_call) && nr == 0)
1634 return gpreg_param_reg_this[0];
1636 if (! (cc & cc_reg_param))
1639 if (mode_is_float(mode)) {
1640 if (!ia32_cg_config.use_sse2 || (cc & cc_fpreg_param) == 0)
1642 if (nr >= MAXNUM_SSE_ARGS)
1645 if (cc & cc_this_call) {
1646 return fpreg_sse_param_reg_this[nr];
1648 return fpreg_sse_param_reg_std[nr];
1649 } else if (mode_is_int(mode) || mode_is_reference(mode)) {
1650 unsigned num_regparam;
1652 if (get_mode_size_bits(mode) > 32)
1655 if (nr >= MAXNUM_GPREG_ARGS)
1658 if (cc & cc_this_call) {
1659 return gpreg_param_reg_this[nr];
1661 num_regparam = cc & ~cc_bits;
1662 if (num_regparam == 0) {
1663 /* default fastcall */
1664 return gpreg_param_reg_fastcall[nr];
1666 if (nr < num_regparam)
1667 return gpreg_param_reg_regparam[nr];
1671 panic("unknown argument mode");
1675 * Get the ABI restrictions for procedure calls.
1676 * @param self The this pointer.
1677 * @param method_type The type of the method (procedure) in question.
1678 * @param abi The abi object to be modified
1680 static void ia32_get_call_abi(const void *self, ir_type *method_type,
1688 be_abi_call_flags_t call_flags = be_abi_call_get_flags(abi);
1692 /* set abi flags for calls */
1693 call_flags.bits.left_to_right = 0; /* always last arg first on stack */
1694 call_flags.bits.store_args_sequential = 0;
1695 /* call_flags.bits.try_omit_fp not changed: can handle both settings */
1696 call_flags.bits.fp_free = 0; /* the frame pointer is fixed in IA32 */
1697 call_flags.bits.call_has_imm = 0; /* No call immediate, we handle this by ourselves */
1699 /* set parameter passing style */
1700 be_abi_call_set_flags(abi, call_flags, &ia32_abi_callbacks);
1702 cc = get_method_calling_convention(method_type);
1703 if (get_method_variadicity(method_type) == variadicity_variadic) {
1704 /* pass all parameters of a variadic function on the stack */
1705 cc = cc_cdecl_set | (cc & cc_this_call);
1707 if (get_method_additional_properties(method_type) & mtp_property_private &&
1708 ia32_cg_config.optimize_cc) {
1709 /* set the fast calling conventions (allowing up to 3) */
1710 cc = SET_FASTCALL(cc) | 3;
1714 /* we have to pop the shadow parameter ourself for compound calls */
1715 if ( (get_method_calling_convention(method_type) & cc_compound_ret)
1716 && !(cc & cc_reg_param)) {
1717 pop_amount += get_mode_size_bytes(mode_P_data);
1720 n = get_method_n_params(method_type);
1721 for (i = regnum = 0; i < n; i++) {
1723 const arch_register_t *reg = NULL;
1725 tp = get_method_param_type(method_type, i);
1726 mode = get_type_mode(tp);
1728 reg = ia32_get_RegParam_reg(cc, regnum, mode);
1731 be_abi_call_param_reg(abi, i, reg, ABI_CONTEXT_BOTH);
1734 /* Micro optimisation: if the mode is shorter than 4 bytes, load 4 bytes.
1735 * movl has a shorter opcode than mov[sz][bw]l */
1736 ir_mode *load_mode = mode;
1739 unsigned size = get_mode_size_bytes(mode);
1741 if (cc & cc_callee_clear_stk) {
1742 pop_amount += (size + 3U) & ~3U;
1745 if (size < 4) load_mode = mode_Iu;
1748 be_abi_call_param_stack(abi, i, load_mode, 4, 0, 0, ABI_CONTEXT_BOTH);
1752 be_abi_call_set_pop(abi, pop_amount);
1754 /* set return registers */
1755 n = get_method_n_ress(method_type);
1757 assert(n <= 2 && "more than two results not supported");
1759 /* In case of 64bit returns, we will have two 32bit values */
1761 tp = get_method_res_type(method_type, 0);
1762 mode = get_type_mode(tp);
1764 assert(!mode_is_float(mode) && "two FP results not supported");
1766 tp = get_method_res_type(method_type, 1);
1767 mode = get_type_mode(tp);
1769 assert(!mode_is_float(mode) && "mixed INT, FP results not supported");
1771 be_abi_call_res_reg(abi, 0, &ia32_registers[REG_EAX], ABI_CONTEXT_BOTH);
1772 be_abi_call_res_reg(abi, 1, &ia32_registers[REG_EDX], ABI_CONTEXT_BOTH);
1775 const arch_register_t *reg;
1777 tp = get_method_res_type(method_type, 0);
1778 assert(is_atomic_type(tp));
1779 mode = get_type_mode(tp);
1781 reg = mode_is_float(mode) ? &ia32_registers[REG_VF0] : &ia32_registers[REG_EAX];
1783 be_abi_call_res_reg(abi, 0, reg, ABI_CONTEXT_BOTH);
1788 * Returns the necessary byte alignment for storing a register of given class.
1790 static int ia32_get_reg_class_alignment(const arch_register_class_t *cls)
1792 ir_mode *mode = arch_register_class_mode(cls);
1793 int bytes = get_mode_size_bytes(mode);
1795 if (mode_is_float(mode) && bytes > 8)
1801 * Return irp irgs in the desired order.
1803 static ir_graph **ia32_get_irg_list(const void *self, ir_graph ***irg_list)
1810 static void ia32_mark_remat(ir_node *node)
1812 if (is_ia32_irn(node)) {
1813 set_ia32_is_remat(node);
1818 * Check if Mux(sel, mux_true, mux_false) would represent a Max or Min operation
1820 static bool mux_is_float_min_max(ir_node *sel, ir_node *mux_true,
1825 ir_relation relation;
1830 cmp_l = get_Cmp_left(sel);
1831 cmp_r = get_Cmp_right(sel);
1832 if (!mode_is_float(get_irn_mode(cmp_l)))
1835 /* check for min/max. They're defined as (C-Semantik):
1836 * min(a, b) = a < b ? a : b
1837 * or min(a, b) = a <= b ? a : b
1838 * max(a, b) = a > b ? a : b
1839 * or max(a, b) = a >= b ? a : b
1840 * (Note we only handle float min/max here)
1842 relation = get_Cmp_relation(sel);
1844 case ir_relation_greater_equal:
1845 case ir_relation_greater:
1847 if (cmp_l == mux_true && cmp_r == mux_false)
1850 case ir_relation_less_equal:
1851 case ir_relation_less:
1853 if (cmp_l == mux_true && cmp_r == mux_false)
1856 case ir_relation_unordered_greater_equal:
1857 case ir_relation_unordered_greater:
1859 if (cmp_l == mux_false && cmp_r == mux_true)
1862 case ir_relation_unordered_less_equal:
1863 case ir_relation_unordered_less:
1865 if (cmp_l == mux_false && cmp_r == mux_true)
1876 static bool mux_is_set(ir_node *sel, ir_node *mux_true, ir_node *mux_false)
1878 ir_mode *mode = get_irn_mode(mux_true);
1881 if (!mode_is_int(mode) && !mode_is_reference(mode)
1885 if (is_Const(mux_true) && is_Const(mux_false)) {
1886 /* we can create a set plus up two 3 instructions for any combination
1894 static bool mux_is_float_const_const(ir_node *sel, ir_node *mux_true,
1899 if (!mode_is_float(get_irn_mode(mux_true)))
1902 return is_Const(mux_true) && is_Const(mux_false);
1905 static bool mux_is_doz(ir_node *sel, ir_node *mux_true, ir_node *mux_false)
1912 ir_relation relation;
1917 mode = get_irn_mode(mux_true);
1918 if (mode_is_signed(mode) || mode_is_float(mode))
1921 relation = get_Cmp_relation(sel);
1922 cmp_left = get_Cmp_left(sel);
1923 cmp_right = get_Cmp_right(sel);
1925 /* "move" zero constant to false input */
1926 if (is_Const(mux_true) && is_Const_null(mux_true)) {
1927 ir_node *tmp = mux_false;
1928 mux_false = mux_true;
1930 relation = get_negated_relation(relation);
1932 if (!is_Const(mux_false) || !is_Const_null(mux_false))
1934 if (!is_Sub(mux_true))
1936 sub_left = get_Sub_left(mux_true);
1937 sub_right = get_Sub_right(mux_true);
1939 /* Mux(a >=u b, 0, a-b) */
1940 if ((relation & ir_relation_greater)
1941 && sub_left == cmp_left && sub_right == cmp_right)
1943 /* Mux(a <=u b, 0, b-a) */
1944 if ((relation & ir_relation_less)
1945 && sub_left == cmp_right && sub_right == cmp_left)
1951 static int ia32_is_mux_allowed(ir_node *sel, ir_node *mux_false,
1956 /* we can handle Set for all modes and compares */
1957 if (mux_is_set(sel, mux_true, mux_false))
1959 /* SSE has own min/max operations */
1960 if (ia32_cg_config.use_sse2
1961 && mux_is_float_min_max(sel, mux_true, mux_false))
1963 /* we can handle Mux(?, Const[f], Const[f]) */
1964 if (mux_is_float_const_const(sel, mux_true, mux_false)) {
1965 #ifdef FIRM_GRGEN_BE
1966 /* well, some code selectors can't handle it */
1967 if (be_transformer != TRANSFORMER_PBQP
1968 || be_transformer != TRANSFORMER_RAND)
1975 /* no support for 64bit inputs to cmov */
1976 mode = get_irn_mode(mux_true);
1977 if (get_mode_size_bits(mode) > 32)
1979 /* we can handle Abs for all modes and compares (except 64bit) */
1980 if (be_mux_is_abs(sel, mux_true, mux_false) != 0)
1982 /* we can't handle MuxF yet */
1983 if (mode_is_float(mode))
1986 if (mux_is_doz(sel, mux_true, mux_false))
1989 /* Check Cmp before the node */
1991 ir_mode *cmp_mode = get_irn_mode(get_Cmp_left(sel));
1993 /* we can't handle 64bit compares */
1994 if (get_mode_size_bits(cmp_mode) > 32)
1997 /* we can't handle float compares */
1998 if (mode_is_float(cmp_mode))
2002 /* did we disable cmov generation? */
2003 if (!ia32_cg_config.use_cmov)
2006 /* we can use a cmov */
2010 static asm_constraint_flags_t ia32_parse_asm_constraint(const char **c)
2014 /* we already added all our simple flags to the flags modifier list in
2015 * init, so this flag we don't know. */
2016 return ASM_CONSTRAINT_FLAG_INVALID;
2019 static int ia32_is_valid_clobber(const char *clobber)
2021 return ia32_get_clobber_register(clobber) != NULL;
2024 static ir_node *ia32_create_set(ir_node *cond)
2026 /* ia32-set function produces 8-bit results which have to be converted */
2027 ir_node *set = ir_create_mux_set(cond, mode_Bu);
2028 ir_node *block = get_nodes_block(set);
2029 return new_r_Conv(block, set, mode_Iu);
2032 static void ia32_lower_for_target(void)
2034 size_t i, n_irgs = get_irp_n_irgs();
2035 lower_mode_b_config_t lower_mode_b_config = {
2036 mode_Iu, /* lowered mode */
2038 0, /* don't lower direct compares */
2040 lower_params_t params = {
2041 4, /* def_ptr_alignment */
2042 LF_COMPOUND_RETURN | LF_RETURN_HIDDEN, /* flags */
2043 ADD_HIDDEN_ALWAYS_IN_FRONT, /* hidden_params */
2044 NULL, /* find pointer type */
2045 NULL, /* ret_compound_in_regs */
2048 /* perform doubleword lowering */
2049 lwrdw_param_t lower_dw_params = {
2050 1, /* little endian */
2051 64, /* doubleword size */
2052 ia32_create_intrinsic_fkt,
2056 /* lower compound param handling */
2057 lower_calls_with_compounds(¶ms);
2059 lower_dw_ops(&lower_dw_params);
2061 for (i = 0; i < n_irgs; ++i) {
2062 ir_graph *irg = get_irp_irg(i);
2063 /* lower for mode_b stuff */
2064 ir_lower_mode_b(irg, &lower_mode_b_config);
2065 /* break up switches with wide ranges */
2066 lower_switch(irg, 256, true);
2071 * Create the trampoline code.
2073 static ir_node *ia32_create_trampoline_fkt(ir_node *block, ir_node *mem, ir_node *trampoline, ir_node *env, ir_node *callee)
2075 ir_graph *irg = get_irn_irg(block);
2076 ir_node *p = trampoline;
2077 ir_mode *mode = get_irn_mode(p);
2081 st = new_r_Store(block, mem, p, new_r_Const_long(irg, mode_Bu, 0xb9), cons_none);
2082 mem = new_r_Proj(st, mode_M, pn_Store_M);
2083 p = new_r_Add(block, p, new_r_Const_long(irg, mode_Iu, 1), mode);
2084 st = new_r_Store(block, mem, p, env, cons_none);
2085 mem = new_r_Proj(st, mode_M, pn_Store_M);
2086 p = new_r_Add(block, p, new_r_Const_long(irg, mode_Iu, 4), mode);
2088 st = new_r_Store(block, mem, p, new_r_Const_long(irg, mode_Bu, 0xe9), cons_none);
2089 mem = new_r_Proj(st, mode_M, pn_Store_M);
2090 p = new_r_Add(block, p, new_r_Const_long(irg, mode_Iu, 1), mode);
2091 st = new_r_Store(block, mem, p, callee, cons_none);
2092 mem = new_r_Proj(st, mode_M, pn_Store_M);
2093 p = new_r_Add(block, p, new_r_Const_long(irg, mode_Iu, 4), mode);
2099 * Returns the libFirm configuration parameter for this backend.
2101 static const backend_params *ia32_get_libfirm_params(void)
2103 static const ir_settings_arch_dep_t ad = {
2104 1, /* also use subs */
2105 4, /* maximum shifts */
2106 63, /* maximum shift amount */
2107 ia32_evaluate_insn, /* evaluate the instruction sequence */
2109 1, /* allow Mulhs */
2110 1, /* allow Mulus */
2111 32, /* Mulh allowed up to 32 bit */
2113 static backend_params p = {
2114 1, /* support inline assembly */
2115 1, /* support Rotl nodes */
2116 0, /* little endian */
2117 NULL, /* will be set later */
2118 ia32_is_mux_allowed,
2119 NULL, /* float arithmetic mode, will be set below */
2120 12, /* size of trampoline code */
2121 4, /* alignment of trampoline code */
2122 ia32_create_trampoline_fkt,
2123 4 /* alignment of stack parameter */
2126 ia32_setup_cg_config();
2128 /* doesn't really belong here, but this is the earliest place the backend
2130 init_asm_constraints();
2133 if (! ia32_cg_config.use_sse2)
2134 p.mode_float_arithmetic = mode_E;
2138 static const lc_opt_enum_int_items_t gas_items[] = {
2139 { "elf", OBJECT_FILE_FORMAT_ELF },
2140 { "mingw", OBJECT_FILE_FORMAT_COFF },
2141 { "macho", OBJECT_FILE_FORMAT_MACH_O },
2145 static lc_opt_enum_int_var_t gas_var = {
2146 (int*) &be_gas_object_file_format, gas_items
2149 #ifdef FIRM_GRGEN_BE
2150 static const lc_opt_enum_int_items_t transformer_items[] = {
2151 { "default", TRANSFORMER_DEFAULT },
2152 { "pbqp", TRANSFORMER_PBQP },
2153 { "random", TRANSFORMER_RAND },
2157 static lc_opt_enum_int_var_t transformer_var = {
2158 (int*)&be_transformer, transformer_items
2162 static const lc_opt_table_entry_t ia32_options[] = {
2163 LC_OPT_ENT_ENUM_INT("gasmode", "set the GAS compatibility mode", &gas_var),
2164 #ifdef FIRM_GRGEN_BE
2165 LC_OPT_ENT_ENUM_INT("transformer", "the transformer used for code selection", &transformer_var),
2167 LC_OPT_ENT_INT ("stackalign", "set power of two stack alignment for calls",
2168 &ia32_isa_template.base.stack_alignment),
2169 LC_OPT_ENT_BOOL("gprof", "create gprof profiling code", &gprof),
2173 const arch_isa_if_t ia32_isa_if = {
2175 ia32_lower_for_target,
2177 ia32_handle_intrinsics,
2178 ia32_get_reg_class_for_mode,
2180 ia32_get_reg_class_alignment,
2181 ia32_get_libfirm_params,
2184 ia32_parse_asm_constraint,
2185 ia32_is_valid_clobber,
2188 ia32_get_pic_base, /* return node used as base in pic code addresses */
2189 ia32_before_abi, /* before abi introduce hook */
2191 ia32_before_ra, /* before register allocation hook */
2192 ia32_after_ra, /* after register allocation hook */
2193 ia32_finish, /* called before codegen */
2194 ia32_emit, /* emit && done */
2197 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_arch_ia32);
2198 void be_init_arch_ia32(void)
2200 lc_opt_entry_t *be_grp = lc_opt_get_grp(firm_opt_get_root(), "be");
2201 lc_opt_entry_t *ia32_grp = lc_opt_get_grp(be_grp, "ia32");
2203 lc_opt_add_table(ia32_grp, ia32_options);
2204 be_register_isa_if("ia32", &ia32_isa_if);
2206 FIRM_DBG_REGISTER(dbg, "firm.be.ia32.cg");
2208 ia32_init_emitter();
2210 ia32_init_optimize();
2211 ia32_init_transform();
2213 ia32_init_architecture();