2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief The main amd64 backend driver file.
32 #include "lower_calls.h"
45 #include "belistsched.h"
47 #include "bespillslots.h"
48 #include "bespillutil.h"
51 #include "bearch_amd64_t.h"
53 #include "amd64_new_nodes.h"
54 #include "gen_amd64_regalloc_if.h"
55 #include "amd64_transform.h"
56 #include "amd64_emitter.h"
58 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
60 static ir_entity *amd64_get_frame_entity(const ir_node *node)
62 if (is_amd64_FrameAddr(node)) {
63 const amd64_SymConst_attr_t *attr = get_amd64_SymConst_attr_const(node);
66 } else if (is_amd64_Store(node)) {
67 const amd64_SymConst_attr_t *attr = get_amd64_SymConst_attr_const(node);
70 } else if (is_amd64_Load(node)) {
71 const amd64_SymConst_attr_t *attr = get_amd64_SymConst_attr_const(node);
76 /* TODO: return the ir_entity assigned to the frame */
81 * This function is called by the generic backend to correct offsets for
82 * nodes accessing the stack.
84 static void amd64_set_frame_offset(ir_node *irn, int offset)
86 if (is_amd64_FrameAddr(irn)) {
87 amd64_SymConst_attr_t *attr = get_amd64_SymConst_attr(irn);
88 attr->fp_offset += offset;
90 } else if (is_amd64_Store(irn)) {
91 amd64_SymConst_attr_t *attr = get_amd64_SymConst_attr(irn);
92 attr->fp_offset += offset;
94 } else if (is_amd64_Load(irn)) {
95 amd64_SymConst_attr_t *attr = get_amd64_SymConst_attr(irn);
96 attr->fp_offset += offset;
101 static int amd64_get_sp_bias(const ir_node *irn)
107 /* fill register allocator interface */
109 static const arch_irn_ops_t amd64_irn_ops = {
110 amd64_get_frame_entity,
111 amd64_set_frame_offset,
113 NULL, /* get_inverse */
114 NULL, /* get_op_estimated_cost */
115 NULL, /* possible_memory_operand */
116 NULL, /* perform_memory_operand */
122 * Transforms the standard firm graph into
125 static void amd64_prepare_graph(ir_graph *irg)
127 amd64_transform_graph(irg);
129 if (be_options.dump_flags & DUMP_BE)
130 dump_ir_graph(irg, "transformed");
133 static void amd64_before_ra(ir_graph *irg)
135 be_sched_fix_flags(irg, &amd64_reg_classes[CLASS_amd64_flags], NULL, NULL);
138 static void transform_Reload(ir_node *node)
140 ir_graph *irg = get_irn_irg(node);
141 ir_node *block = get_nodes_block(node);
142 dbg_info *dbgi = get_irn_dbg_info(node);
143 ir_node *ptr = get_irg_frame(irg);
144 ir_node *mem = get_irn_n(node, n_be_Reload_mem);
145 ir_mode *mode = get_irn_mode(node);
146 ir_entity *entity = be_get_frame_entity(node);
147 const arch_register_t *reg;
151 ir_node *sched_point = sched_prev(node);
153 load = new_bd_amd64_Load(dbgi, block, ptr, mem, entity);
154 sched_add_after(sched_point, load);
157 proj = new_rd_Proj(dbgi, load, mode, pn_amd64_Load_res);
159 reg = arch_get_irn_register(node);
160 arch_set_irn_register(proj, reg);
162 exchange(node, proj);
165 static void transform_Spill(ir_node *node)
167 ir_graph *irg = get_irn_irg(node);
168 ir_node *block = get_nodes_block(node);
169 dbg_info *dbgi = get_irn_dbg_info(node);
170 ir_node *ptr = get_irg_frame(irg);
171 ir_node *mem = get_irg_no_mem(irg);
172 ir_node *val = get_irn_n(node, n_be_Spill_val);
173 //ir_mode *mode = get_irn_mode(val);
174 ir_entity *entity = be_get_frame_entity(node);
175 ir_node *sched_point;
178 sched_point = sched_prev(node);
179 store = new_bd_amd64_Store(dbgi, block, ptr, val, mem, entity);
182 sched_add_after(sched_point, store);
184 exchange(node, store);
187 static void amd64_after_ra_walker(ir_node *block, void *data)
189 ir_node *node, *prev;
192 for (node = sched_last(block); !sched_is_begin(node); node = prev) {
193 prev = sched_prev(node);
195 if (be_is_Reload(node)) {
196 transform_Reload(node);
197 } else if (be_is_Spill(node)) {
198 transform_Spill(node);
203 static void amd64_set_frame_entity(ir_node *node, ir_entity *entity)
205 assert(be_is_Reload(node) || be_is_Spill(node));
206 be_node_set_frame_entity(node, entity);
210 * Collects nodes that need frame entities assigned.
212 static void amd64_collect_frame_entity_nodes(ir_node *node, void *data)
214 if (be_is_Reload(node) && be_get_frame_entity(node) == NULL) {
215 be_fec_env_t *env = (be_fec_env_t*)data;
216 const ir_mode *mode = get_irn_mode(node);
217 int align = get_mode_size_bytes(mode);
218 be_node_needs_frame_entity(env, node, mode, align);
223 * Called immediatly before emit phase.
225 static void amd64_finish_irg(ir_graph *irg)
227 be_stack_layout_t *stack_layout = be_get_irg_stack_layout(irg);
228 bool at_begin = stack_layout->sp_relative ? true : false;
229 be_fec_env_t *fec_env = be_new_frame_entity_coalescer(irg);
231 /* create and coalesce frame entities */
232 irg_walk_graph(irg, NULL, amd64_collect_frame_entity_nodes, fec_env);
233 be_assign_entities(fec_env, amd64_set_frame_entity, at_begin);
234 be_free_frame_entity_coalescer(fec_env);
236 irg_block_walk_graph(irg, NULL, amd64_after_ra_walker, NULL);
238 /* fix stack entity offsets */
239 be_abi_fix_stack_nodes(irg);
240 be_abi_fix_stack_bias(irg);
243 extern const arch_isa_if_t amd64_isa_if;
244 static amd64_isa_t amd64_isa_template = {
246 &amd64_isa_if, /* isa interface implementation */
251 &amd64_registers[REG_RSP], /* stack pointer register */
252 &amd64_registers[REG_RBP], /* base pointer register */
253 3, /* power of two stack alignment for calls, 2^2 == 4 */
254 NULL, /* main environment */
255 7, /* costs for a spill instruction */
256 5, /* costs for a reload instruction */
257 false, /* no custom abi handling */
261 static void amd64_init(void)
263 amd64_register_init();
264 amd64_create_opcodes(&amd64_irn_ops);
267 static void amd64_finish(void)
269 amd64_free_opcodes();
272 static arch_env_t *amd64_begin_codegeneration(const be_main_env_t *env)
274 amd64_isa_t *isa = XMALLOC(amd64_isa_t);
275 *isa = amd64_isa_template;
277 be_emit_init(env->file_handle);
278 be_gas_begin_compilation_unit(env);
284 * Closes the output file and frees the ISA structure.
286 static void amd64_end_codegeneration(void *self)
288 amd64_isa_t *isa = (amd64_isa_t*)self;
290 /* emit now all global declarations */
291 be_gas_end_compilation_unit(isa->base.main_env);
298 * Get the between type for that call.
299 * @param self The callback object.
300 * @return The between type of for that call.
302 static ir_type *amd64_get_between_type(ir_graph *irg)
304 static ir_type *between_type = NULL;
305 static ir_entity *old_bp_ent = NULL;
309 ir_entity *ret_addr_ent;
310 ir_type *ret_addr_type = new_type_primitive(mode_P);
311 ir_type *old_bp_type = new_type_primitive(mode_P);
313 between_type = new_type_class(new_id_from_str("amd64_between_type"));
314 old_bp_ent = new_entity(between_type, new_id_from_str("old_bp"), old_bp_type);
315 ret_addr_ent = new_entity(between_type, new_id_from_str("old_bp"), ret_addr_type);
317 set_entity_offset(old_bp_ent, 0);
318 set_entity_offset(ret_addr_ent, get_type_size_bytes(old_bp_type));
319 set_type_size_bytes(between_type, get_type_size_bytes(old_bp_type) + get_type_size_bytes(ret_addr_type));
325 static const be_abi_callbacks_t amd64_abi_callbacks = {
326 amd64_get_between_type,
329 static const arch_register_t *gpreg_param_reg_std[] = {
330 &amd64_registers[REG_RDI],
331 &amd64_registers[REG_RSI],
332 &amd64_registers[REG_RDX],
333 &amd64_registers[REG_RCX],
334 &amd64_registers[REG_R8],
335 &amd64_registers[REG_R9],
338 static const arch_register_t *amd64_get_RegParam_reg(int n)
340 assert(n < 6 && n >=0 && "register param > 6 requested");
341 return gpreg_param_reg_std[n];
345 * Get the ABI restrictions for procedure calls.
346 * @param self The this pointer.
347 * @param method_type The type of the method (procedure) in question.
348 * @param abi The abi object to be modified
350 static void amd64_get_call_abi(ir_type *method_type, be_abi_call_t *abi)
354 int i, n = get_method_n_params(method_type);
357 /* set abi flags for calls */
358 be_abi_call_flags_t call_flags = be_abi_call_get_flags(abi);
359 call_flags.call_has_imm = true;
360 be_abi_call_set_flags(abi, call_flags, &amd64_abi_callbacks);
362 for (i = 0; i < n; i++) {
363 tp = get_method_param_type(method_type, i);
364 mode = get_type_mode(tp);
365 //d// printf ("MODE %p %p XX %d\n", mode, mode_Iu, i);
367 if (!no_reg && i < 6 && mode_is_data (mode)) {
368 //d// printf("TEST%d\n", i);
369 be_abi_call_param_reg(abi, i, amd64_get_RegParam_reg (i),
371 /* default: all parameters on stack */
374 be_abi_call_param_stack(abi, i, mode, 8, 0, 0, ABI_CONTEXT_BOTH);
378 /* TODO: set correct return register */
379 /* default: return value is in R0 resp. F0 */
380 if (get_method_n_ress(method_type) > 0) {
381 tp = get_method_res_type(method_type, 0);
382 mode = get_type_mode(tp);
384 if (mode_is_float(mode))
385 panic("float not supported yet");
387 be_abi_call_res_reg(abi, 0,
388 &amd64_registers[REG_RAX], ABI_CONTEXT_BOTH);
392 static void amd64_lower_for_target(void)
394 size_t i, n_irgs = get_irp_n_irgs();
396 /* lower compound param handling */
397 lower_calls_with_compounds(LF_RETURN_HIDDEN);
399 for (i = 0; i < n_irgs; ++i) {
400 ir_graph *irg = get_irp_irg(i);
401 /* Turn all small CopyBs into loads/stores, and turn all bigger
402 * CopyBs into memcpy calls, because we cannot handle CopyB nodes
403 * during code generation yet.
404 * TODO: Adapt this once custom CopyB handling is implemented. */
405 lower_CopyB(irg, 64, 65, true);
409 static int amd64_is_mux_allowed(ir_node *sel, ir_node *mux_false,
419 * Returns the libFirm configuration parameter for this backend.
421 static const backend_params *amd64_get_backend_params(void) {
422 static backend_params p = {
423 0, /* no inline assembly */
424 1, /* support Rotl nodes */
425 0, /* little endian */
426 1, /* modulo shift is efficient */
427 0, /* non-modulo shift is not efficient */
428 NULL, /* will be set later */
429 amd64_is_mux_allowed, /* parameter for if conversion */
430 64, /* machine size */
431 NULL, /* float arithmetic mode */
432 NULL, /* long long type */
433 NULL, /* unsigned long long type */
434 NULL, /* long double type (not supported yet) */
435 0, /* no trampoline support: size 0 */
436 0, /* no trampoline support: align 0 */
437 NULL, /* no trampoline support: no trampoline builder */
438 8 /* alignment of stack parameter: typically 4 (32bit) or 8 (64bit) */
443 static asm_constraint_flags_t amd64_parse_asm_constraint(const char **c)
446 return ASM_CONSTRAINT_FLAG_INVALID;
449 static int amd64_is_valid_clobber(const char *clobber)
455 static int amd64_register_saved_by(const arch_register_t *reg, int callee)
458 /* check for callee saved */
459 if (reg->reg_class == &amd64_reg_classes[CLASS_amd64_gp]) {
460 switch (reg->index) {
473 /* check for caller saved */
474 if (reg->reg_class == &amd64_reg_classes[CLASS_amd64_gp]) {
475 switch (reg->index) {
494 const arch_isa_if_t amd64_isa_if = {
497 amd64_get_backend_params,
498 amd64_lower_for_target,
499 amd64_parse_asm_constraint,
500 amd64_is_valid_clobber,
502 amd64_begin_codegeneration,
503 amd64_end_codegeneration,
506 NULL, /* mark remat */
507 NULL, /* get_pic_base */
510 amd64_register_saved_by,
512 NULL, /* handle intrinsics */
513 NULL, /* before_abi */
520 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_arch_amd64)
521 void be_init_arch_amd64(void)
523 be_register_isa_if("amd64", &amd64_isa_if);
524 FIRM_DBG_REGISTER(dbg, "firm.be.amd64.cg");
525 amd64_init_transform();