2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief The main amd64 backend driver file.
23 * @version $Id: bearch_amd64.c 26909 2010-01-05 15:56:54Z matze $
38 #include "../bearch.h"
39 #include "../benode.h"
40 #include "../belower.h"
41 #include "../besched.h"
43 #include "../bemodule.h"
44 #include "../begnuas.h"
45 #include "../belistsched.h"
46 #include "../beflags.h"
47 #include "../bespillslots.h"
49 #include "bearch_amd64_t.h"
51 #include "amd64_new_nodes.h"
52 #include "gen_amd64_regalloc_if.h"
53 #include "amd64_transform.h"
54 #include "amd64_emitter.h"
56 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
58 static arch_irn_class_t amd64_classify(const ir_node *irn)
64 static ir_entity *amd64_get_frame_entity(const ir_node *node)
66 if (is_amd64_FrameAddr(node)) {
67 const amd64_SymConst_attr_t *attr = get_irn_generic_attr_const(node);
70 } else if (is_amd64_Store(node)) {
71 const amd64_SymConst_attr_t *attr = get_irn_generic_attr_const(node);
74 } else if (is_amd64_Load(node)) {
75 const amd64_SymConst_attr_t *attr = get_irn_generic_attr_const(node);
80 /* TODO: return the ir_entity assigned to the frame */
85 * This function is called by the generic backend to correct offsets for
86 * nodes accessing the stack.
88 static void amd64_set_frame_offset(ir_node *irn, int offset)
90 if (is_amd64_FrameAddr(irn)) {
91 amd64_SymConst_attr_t *attr = get_irn_generic_attr(irn);
92 attr->fp_offset += offset;
94 } else if (is_amd64_Store(irn)) {
95 amd64_SymConst_attr_t *attr = get_irn_generic_attr(irn);
96 attr->fp_offset += offset;
98 } else if (is_amd64_Load(irn)) {
99 amd64_SymConst_attr_t *attr = get_irn_generic_attr(irn);
100 attr->fp_offset += offset;
105 static int amd64_get_sp_bias(const ir_node *irn)
111 /* fill register allocator interface */
113 static const arch_irn_ops_t amd64_irn_ops = {
116 amd64_get_frame_entity,
117 amd64_set_frame_offset,
119 NULL, /* get_inverse */
120 NULL, /* get_op_estimated_cost */
121 NULL, /* possible_memory_operand */
122 NULL, /* perform_memory_operand */
128 * Transforms the standard firm graph into
131 static void amd64_prepare_graph(void *self)
133 amd64_code_gen_t *cg = self;
135 amd64_transform_graph (cg);
138 dump_ir_graph(cg->irg, "transformed");
143 * Called immediatly before emit phase.
145 static void amd64_finish_irg(void *self)
147 amd64_code_gen_t *cg = self;
148 ir_graph *irg = cg->irg;
150 dump_ir_graph(irg, "amd64-finished");
153 static void amd64_before_ra(void *self)
155 amd64_code_gen_t *cg = self;
157 be_sched_fix_flags(cg->irg, &amd64_reg_classes[CLASS_amd64_flags], 0);
161 static void transform_Reload(ir_node *node)
163 ir_graph *irg = get_irn_irg(node);
164 ir_node *block = get_nodes_block(node);
165 dbg_info *dbgi = get_irn_dbg_info(node);
166 ir_node *ptr = get_irg_frame(irg);
167 ir_node *mem = get_irn_n(node, be_pos_Reload_mem);
168 ir_mode *mode = get_irn_mode(node);
169 ir_entity *entity = be_get_frame_entity(node);
170 const arch_register_t *reg;
174 ir_node *sched_point = sched_prev(node);
176 load = new_bd_amd64_Load(dbgi, block, ptr, mem, entity);
177 sched_add_after(sched_point, load);
180 proj = new_rd_Proj(dbgi, load, mode, pn_amd64_Load_res);
182 reg = arch_get_irn_register(node);
183 arch_set_irn_register(proj, reg);
185 exchange(node, proj);
188 static void transform_Spill(ir_node *node)
190 ir_graph *irg = get_irn_irg(node);
191 ir_node *block = get_nodes_block(node);
192 dbg_info *dbgi = get_irn_dbg_info(node);
193 ir_node *ptr = get_irg_frame(irg);
194 ir_node *mem = new_NoMem();
195 ir_node *val = get_irn_n(node, be_pos_Spill_val);
196 //ir_mode *mode = get_irn_mode(val);
197 ir_entity *entity = be_get_frame_entity(node);
198 ir_node *sched_point;
201 sched_point = sched_prev(node);
202 store = new_bd_amd64_Store(dbgi, block, ptr, val, mem, entity);
205 sched_add_after(sched_point, store);
207 exchange(node, store);
210 static void amd64_after_ra_walker(ir_node *block, void *data)
212 ir_node *node, *prev;
215 for (node = sched_last(block); !sched_is_begin(node); node = prev) {
216 prev = sched_prev(node);
218 if (be_is_Reload(node)) {
219 transform_Reload(node);
220 } else if (be_is_Spill(node)) {
221 transform_Spill(node);
226 static void amd64_after_ra(void *self)
228 amd64_code_gen_t *cg = self;
229 be_coalesce_spillslots(cg->irg);
231 irg_block_walk_graph(cg->irg, NULL, amd64_after_ra_walker, NULL);
236 * Emits the code, closes the output file and frees
237 * the code generator interface.
239 static void amd64_emit_and_done(void *self)
241 amd64_code_gen_t *cg = self;
242 ir_graph *irg = cg->irg;
244 amd64_gen_routine(cg, irg);
246 /* de-allocate code generator */
250 static void *amd64_cg_init(ir_graph *irg);
252 static const arch_code_generator_if_t amd64_code_gen_if = {
254 NULL, /* get_pic_base hook */
255 NULL, /* before abi introduce hook */
257 NULL, /* spill hook */
258 amd64_before_ra, /* before register allocation hook */
259 amd64_after_ra, /* after register allocation hook */
265 * Initializes the code generator.
267 static void *amd64_cg_init(ir_graph *irg)
269 const arch_env_t *arch_env = be_get_irg_arch_env(irg);
270 amd64_isa_t *isa = (amd64_isa_t *) arch_env;
271 amd64_code_gen_t *cg = XMALLOC(amd64_code_gen_t);
273 cg->impl = &amd64_code_gen_if;
276 cg->dump = (be_get_irg_options(irg)->dump_flags & DUMP_BE) ? 1 : 0;
278 return (arch_code_generator_t *)cg;
282 typedef ir_node *(*create_const_node_func) (dbg_info *dbg, ir_node *block);
285 * Used to create per-graph unique pseudo nodes.
287 static inline ir_node *create_const(amd64_code_gen_t *cg, ir_node **place,
288 create_const_node_func func,
289 const arch_register_t* reg)
291 ir_node *block, *res;
296 block = get_irg_start_block(cg->irg);
297 res = func(NULL, block);
298 arch_set_irn_register(res, reg);
304 const arch_isa_if_t amd64_isa_if;
305 static amd64_isa_t amd64_isa_template = {
307 &amd64_isa_if, /* isa interface implementation */
308 &amd64_gp_regs[REG_RSP], /* stack pointer register */
309 &amd64_gp_regs[REG_RBP], /* base pointer register */
310 &amd64_reg_classes[CLASS_amd64_gp], /* link pointer register class */
311 -1, /* stack direction */
312 3, /* power of two stack alignment for calls, 2^2 == 4 */
313 NULL, /* main environment */
314 7, /* costs for a spill instruction */
315 5, /* costs for a reload instruction */
316 false, /* no custom abi handling */
321 * Initializes the backend ISA
323 static arch_env_t *amd64_init(FILE *outfile)
325 static int run_once = 0;
332 isa = XMALLOC(amd64_isa_t);
333 memcpy(isa, &amd64_isa_template, sizeof(*isa));
335 be_emit_init(outfile);
337 amd64_register_init();
338 amd64_create_opcodes(&amd64_irn_ops);
340 return &isa->arch_env;
346 * Closes the output file and frees the ISA structure.
348 static void amd64_done(void *self)
350 amd64_isa_t *isa = self;
352 /* emit now all global declarations */
353 be_gas_emit_decls(isa->arch_env.main_env);
360 static unsigned amd64_get_n_reg_class(void)
365 static const arch_register_class_t *amd64_get_reg_class(unsigned i)
367 assert(i < N_CLASSES);
368 return &amd64_reg_classes[i];
374 * Get the register class which shall be used to store a value of a given mode.
375 * @param self The this pointer.
376 * @param mode The mode in question.
377 * @return A register class which can hold values of the given mode.
379 static const arch_register_class_t *amd64_get_reg_class_for_mode(const ir_mode *mode)
381 assert(!mode_is_float(mode));
382 return &amd64_reg_classes[CLASS_amd64_gp];
388 be_abi_call_flags_bits_t flags;
389 const arch_env_t *arch_env;
393 static void *amd64_abi_init(const be_abi_call_t *call, const arch_env_t *arch_env, ir_graph *irg)
395 amd64_abi_env_t *env = XMALLOC(amd64_abi_env_t);
396 be_abi_call_flags_t fl = be_abi_call_get_flags(call);
397 env->flags = fl.bits;
399 env->arch_env = arch_env;
404 * Get the between type for that call.
405 * @param self The callback object.
406 * @return The between type of for that call.
408 static ir_type *amd64_get_between_type(void *self)
410 static ir_type *between_type = NULL;
411 static ir_entity *old_bp_ent = NULL;
415 ir_entity *ret_addr_ent;
416 ir_type *ret_addr_type = new_type_primitive(mode_P);
417 ir_type *old_bp_type = new_type_primitive(mode_P);
419 between_type = new_type_class(new_id_from_str("amd64_between_type"));
420 old_bp_ent = new_entity(between_type, new_id_from_str("old_bp"), old_bp_type);
421 ret_addr_ent = new_entity(between_type, new_id_from_str("old_bp"), ret_addr_type);
423 set_entity_offset(old_bp_ent, 0);
424 set_entity_offset(ret_addr_ent, get_type_size_bytes(old_bp_type));
425 set_type_size_bytes(between_type, get_type_size_bytes(old_bp_type) + get_type_size_bytes(ret_addr_type));
432 * Build the prolog, return the BASE POINTER register
434 static const arch_register_t *amd64_abi_prologue(void *self, ir_node **mem,
435 pmap *reg_map, int *stack_bias)
437 amd64_abi_env_t *env = self;
438 const arch_env_t *aenv = env->arch_env;
444 if (!env->flags.try_omit_fp) {
445 /* FIXME: maybe later here should be some code to generate
446 * the usual abi prologue */
447 return env->arch_env->bp;
450 return env->arch_env->sp;
453 /* Build the epilog */
454 static void amd64_abi_epilogue(void *self, ir_node *bl, ir_node **mem,
457 amd64_abi_env_t *env = self;
458 const arch_env_t *aenv = env->arch_env;
459 ir_node *curr_sp = be_abi_reg_map_get(reg_map, aenv->sp);
460 ir_node *curr_bp = be_abi_reg_map_get(reg_map, aenv->bp);
464 if (env->flags.try_omit_fp) {
465 curr_sp = be_new_IncSP(aenv->sp, bl, curr_sp, BE_STACK_FRAME_SIZE_SHRINK, 0);
468 be_abi_reg_map_set(reg_map, aenv->sp, curr_sp);
469 be_abi_reg_map_set(reg_map, aenv->bp, curr_bp);
472 static const be_abi_callbacks_t amd64_abi_callbacks = {
475 amd64_get_between_type,
480 static const arch_register_t *gpreg_param_reg_std[] = {
481 &amd64_gp_regs[REG_RDI],
482 &amd64_gp_regs[REG_RSI],
483 &amd64_gp_regs[REG_RDX],
484 &amd64_gp_regs[REG_RCX],
485 &amd64_gp_regs[REG_R8],
486 &amd64_gp_regs[REG_R9],
489 static const arch_register_t *amd64_get_RegParam_reg(int n)
491 assert(n < 6 && n >=0 && "register param > 6 requested");
492 return gpreg_param_reg_std[n];
496 * Get the ABI restrictions for procedure calls.
497 * @param self The this pointer.
498 * @param method_type The type of the method (procedure) in question.
499 * @param abi The abi object to be modified
501 static void amd64_get_call_abi(const void *self, ir_type *method_type,
506 int i, n = get_method_n_params(method_type);
507 be_abi_call_flags_t call_flags;
512 /* set abi flags for calls */
513 call_flags.bits.left_to_right = 0;
514 call_flags.bits.store_args_sequential = 0;
515 call_flags.bits.try_omit_fp = 1;
516 call_flags.bits.fp_free = 0;
517 call_flags.bits.call_has_imm = 1;
519 /* set stack parameter passing style */
520 be_abi_call_set_flags(abi, call_flags, &amd64_abi_callbacks);
522 for (i = 0; i < n; i++) {
523 tp = get_method_param_type(method_type, i);
524 mode = get_type_mode(tp);
525 //d// printf ("MODE %p %p XX %d\n", mode, mode_Iu, i);
527 if (!no_reg && i < 6 && mode_is_data (mode)) {
528 //d// printf("TEST%d\n", i);
529 be_abi_call_param_reg(abi, i, amd64_get_RegParam_reg (i),
531 /* default: all parameters on stack */
534 be_abi_call_param_stack(abi, i, mode, 8, 0, 0, ABI_CONTEXT_BOTH);
538 /* TODO: set correct return register */
539 /* default: return value is in R0 resp. F0 */
540 if (get_method_n_ress(method_type) > 0) {
541 tp = get_method_res_type(method_type, 0);
542 mode = get_type_mode(tp);
544 /* FIXME: No floating point yet */
545 /* be_abi_call_res_reg(abi, 0,
546 mode_is_float(mode) ? &amd64_fp_regs[REG_F0] : &amd64_gp_regs[REG_R0], ABI_CONTEXT_BOTH) */;
548 be_abi_call_res_reg(abi, 0,
549 &amd64_gp_regs[REG_RAX], ABI_CONTEXT_BOTH);
553 static int amd64_to_appear_in_schedule(void *block_env, const ir_node *irn)
557 if(!is_amd64_irn(irn))
564 * Initializes the code generator interface.
566 static const arch_code_generator_if_t *amd64_get_code_generator_if(
570 return &amd64_code_gen_if;
573 list_sched_selector_t amd64_sched_selector;
576 * Returns the reg_pressure scheduler with to_appear_in_schedule() overloaded
578 static const list_sched_selector_t *amd64_get_list_sched_selector(
579 const void *self, list_sched_selector_t *selector)
584 amd64_sched_selector = trivial_selector;
585 amd64_sched_selector.to_appear_in_schedule = amd64_to_appear_in_schedule;
586 return &amd64_sched_selector;
589 static const ilp_sched_selector_t *amd64_get_ilp_sched_selector(
597 * Returns the necessary byte alignment for storing a register of given class.
599 static int amd64_get_reg_class_alignment(const arch_register_class_t *cls)
601 ir_mode *mode = arch_register_class_mode(cls);
602 return get_mode_size_bytes(mode);
606 * Returns the libFirm configuration parameter for this backend.
608 static const backend_params *amd64_get_backend_params(void) {
609 static backend_params p = {
610 0, /* no dword lowering */
611 0, /* no inline assembly */
612 NULL, /* will be set later */
613 NULL, /* no creator function */
614 NULL, /* context for create_intrinsic_fkt */
615 NULL, /* parameter for if conversion */
616 NULL, /* float arithmetic mode */
617 0, /* no trampoline support: size 0 */
618 0, /* no trampoline support: align 0 */
619 NULL, /* no trampoline support: no trampoline builder */
620 8 /* alignment of stack parameter: typically 4 (32bit) or 8 (64bit) */
625 static const be_execution_unit_t ***amd64_get_allowed_execution_units(
634 static const be_machine_t *amd64_get_machine(const void *self)
642 static ir_graph **amd64_get_backend_irg_list(const void *self,
650 static asm_constraint_flags_t amd64_parse_asm_constraint(const char **c)
653 return ASM_CONSTRAINT_FLAG_INVALID;
656 static int amd64_is_valid_clobber(const char *clobber)
662 const arch_isa_if_t amd64_isa_if = {
665 NULL, /* handle intrinsics */
666 amd64_get_n_reg_class,
668 amd64_get_reg_class_for_mode,
670 amd64_get_code_generator_if,
671 amd64_get_list_sched_selector,
672 amd64_get_ilp_sched_selector,
673 amd64_get_reg_class_alignment,
674 amd64_get_backend_params,
675 amd64_get_allowed_execution_units,
677 amd64_get_backend_irg_list,
678 NULL, /* mark remat */
679 amd64_parse_asm_constraint,
680 amd64_is_valid_clobber
683 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_arch_amd64);
684 void be_init_arch_amd64(void)
686 be_register_isa_if("amd64", &amd64_isa_if);
687 FIRM_DBG_REGISTER(dbg, "firm.be.amd64.cg");
688 amd64_init_transform();