2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief The main amd64 backend driver file.
32 #include "lower_calls.h"
47 #include "belistsched.h"
49 #include "bespillslots.h"
50 #include "bespillutil.h"
53 #include "bearch_amd64_t.h"
55 #include "amd64_new_nodes.h"
56 #include "gen_amd64_regalloc_if.h"
57 #include "amd64_transform.h"
58 #include "amd64_emitter.h"
60 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
62 static ir_entity *amd64_get_frame_entity(const ir_node *node)
64 if (is_amd64_FrameAddr(node)) {
65 const amd64_SymConst_attr_t *attr = get_amd64_SymConst_attr_const(node);
68 } else if (is_amd64_Store(node)) {
69 const amd64_SymConst_attr_t *attr = get_amd64_SymConst_attr_const(node);
72 } else if (is_amd64_Load(node)) {
73 const amd64_SymConst_attr_t *attr = get_amd64_SymConst_attr_const(node);
78 /* TODO: return the ir_entity assigned to the frame */
83 * This function is called by the generic backend to correct offsets for
84 * nodes accessing the stack.
86 static void amd64_set_frame_offset(ir_node *irn, int offset)
88 if (is_amd64_FrameAddr(irn)) {
89 amd64_SymConst_attr_t *attr = get_amd64_SymConst_attr(irn);
90 attr->fp_offset += offset;
92 } else if (is_amd64_Store(irn)) {
93 amd64_SymConst_attr_t *attr = get_amd64_SymConst_attr(irn);
94 attr->fp_offset += offset;
96 } else if (is_amd64_Load(irn)) {
97 amd64_SymConst_attr_t *attr = get_amd64_SymConst_attr(irn);
98 attr->fp_offset += offset;
103 static int amd64_get_sp_bias(const ir_node *irn)
109 /* fill register allocator interface */
111 static const arch_irn_ops_t amd64_irn_ops = {
112 amd64_get_frame_entity,
113 amd64_set_frame_offset,
115 NULL, /* get_op_estimated_cost */
116 NULL, /* possible_memory_operand */
117 NULL, /* perform_memory_operand */
123 * Transforms the standard firm graph into
126 static void amd64_prepare_graph(ir_graph *irg)
128 amd64_transform_graph(irg);
130 if (be_options.dump_flags & DUMP_BE)
131 dump_ir_graph(irg, "transformed");
134 static void amd64_before_ra(ir_graph *irg)
136 be_sched_fix_flags(irg, &amd64_reg_classes[CLASS_amd64_flags], NULL, NULL);
139 static void transform_Reload(ir_node *node)
141 ir_graph *irg = get_irn_irg(node);
142 ir_node *block = get_nodes_block(node);
143 dbg_info *dbgi = get_irn_dbg_info(node);
144 ir_node *ptr = get_irg_frame(irg);
145 ir_node *mem = get_irn_n(node, n_be_Reload_mem);
146 ir_mode *mode = get_irn_mode(node);
147 ir_entity *entity = be_get_frame_entity(node);
148 const arch_register_t *reg;
152 load = new_bd_amd64_Load(dbgi, block, ptr, mem, entity);
153 sched_replace(node, load);
155 proj = new_rd_Proj(dbgi, load, mode, pn_amd64_Load_res);
157 reg = arch_get_irn_register(node);
158 arch_set_irn_register(proj, reg);
160 exchange(node, proj);
163 static void transform_Spill(ir_node *node)
165 ir_graph *irg = get_irn_irg(node);
166 ir_node *block = get_nodes_block(node);
167 dbg_info *dbgi = get_irn_dbg_info(node);
168 ir_node *ptr = get_irg_frame(irg);
169 ir_node *mem = get_irg_no_mem(irg);
170 ir_node *val = get_irn_n(node, n_be_Spill_val);
171 //ir_mode *mode = get_irn_mode(val);
172 ir_entity *entity = be_get_frame_entity(node);
175 store = new_bd_amd64_Store(dbgi, block, ptr, val, mem, entity);
176 sched_replace(node, store);
178 exchange(node, store);
181 static void amd64_after_ra_walker(ir_node *block, void *data)
183 ir_node *node, *prev;
186 for (node = sched_last(block); !sched_is_begin(node); node = prev) {
187 prev = sched_prev(node);
189 if (be_is_Reload(node)) {
190 transform_Reload(node);
191 } else if (be_is_Spill(node)) {
192 transform_Spill(node);
197 static void amd64_set_frame_entity(ir_node *node, ir_entity *entity)
199 assert(be_is_Reload(node) || be_is_Spill(node));
200 be_node_set_frame_entity(node, entity);
204 * Collects nodes that need frame entities assigned.
206 static void amd64_collect_frame_entity_nodes(ir_node *node, void *data)
208 if (be_is_Reload(node) && be_get_frame_entity(node) == NULL) {
209 be_fec_env_t *env = (be_fec_env_t*)data;
210 const ir_mode *mode = get_irn_mode(node);
211 int align = get_mode_size_bytes(mode);
212 be_node_needs_frame_entity(env, node, mode, align);
217 * Called immediatly before emit phase.
219 static void amd64_finish_irg(ir_graph *irg)
221 be_stack_layout_t *stack_layout = be_get_irg_stack_layout(irg);
222 bool at_begin = stack_layout->sp_relative ? true : false;
223 be_fec_env_t *fec_env = be_new_frame_entity_coalescer(irg);
225 /* create and coalesce frame entities */
226 irg_walk_graph(irg, NULL, amd64_collect_frame_entity_nodes, fec_env);
227 be_assign_entities(fec_env, amd64_set_frame_entity, at_begin);
228 be_free_frame_entity_coalescer(fec_env);
230 irg_block_walk_graph(irg, NULL, amd64_after_ra_walker, NULL);
232 /* fix stack entity offsets */
233 be_abi_fix_stack_nodes(irg);
234 be_abi_fix_stack_bias(irg);
237 extern const arch_isa_if_t amd64_isa_if;
238 static amd64_isa_t amd64_isa_template = {
240 &amd64_isa_if, /* isa interface implementation */
245 &amd64_registers[REG_RSP], /* stack pointer register */
246 &amd64_registers[REG_RBP], /* base pointer register */
247 3, /* power of two stack alignment for calls, 2^2 == 4 */
248 7, /* costs for a spill instruction */
249 5, /* costs for a reload instruction */
250 false, /* no custom abi handling */
254 static void amd64_init(void)
256 amd64_register_init();
257 amd64_create_opcodes(&amd64_irn_ops);
260 static void amd64_finish(void)
262 amd64_free_opcodes();
265 static arch_env_t *amd64_begin_codegeneration(void)
267 amd64_isa_t *isa = XMALLOC(amd64_isa_t);
268 *isa = amd64_isa_template;
274 * Closes the output file and frees the ISA structure.
276 static void amd64_end_codegeneration(void *self)
282 * Get the between type for that call.
283 * @param self The callback object.
284 * @return The between type of for that call.
286 static ir_type *amd64_get_between_type(ir_graph *irg)
288 static ir_type *between_type = NULL;
289 static ir_entity *old_bp_ent = NULL;
293 ir_entity *ret_addr_ent;
294 ir_type *ret_addr_type = new_type_primitive(mode_P);
295 ir_type *old_bp_type = new_type_primitive(mode_P);
297 between_type = new_type_class(new_id_from_str("amd64_between_type"));
298 old_bp_ent = new_entity(between_type, new_id_from_str("old_bp"), old_bp_type);
299 ret_addr_ent = new_entity(between_type, new_id_from_str("old_bp"), ret_addr_type);
301 set_entity_offset(old_bp_ent, 0);
302 set_entity_offset(ret_addr_ent, get_type_size_bytes(old_bp_type));
303 set_type_size_bytes(between_type, get_type_size_bytes(old_bp_type) + get_type_size_bytes(ret_addr_type));
309 static const be_abi_callbacks_t amd64_abi_callbacks = {
310 amd64_get_between_type,
313 static const arch_register_t *gpreg_param_reg_std[] = {
314 &amd64_registers[REG_RDI],
315 &amd64_registers[REG_RSI],
316 &amd64_registers[REG_RDX],
317 &amd64_registers[REG_RCX],
318 &amd64_registers[REG_R8],
319 &amd64_registers[REG_R9],
322 static const arch_register_t *amd64_get_RegParam_reg(int n)
324 assert(n < 6 && n >=0 && "register param > 6 requested");
325 return gpreg_param_reg_std[n];
329 * Get the ABI restrictions for procedure calls.
330 * @param self The this pointer.
331 * @param method_type The type of the method (procedure) in question.
332 * @param abi The abi object to be modified
334 static void amd64_get_call_abi(ir_type *method_type, be_abi_call_t *abi)
338 int i, n = get_method_n_params(method_type);
341 /* set abi flags for calls */
342 be_abi_call_flags_t call_flags = be_abi_call_get_flags(abi);
343 call_flags.call_has_imm = true;
344 be_abi_call_set_flags(abi, call_flags, &amd64_abi_callbacks);
346 for (i = 0; i < n; i++) {
347 tp = get_method_param_type(method_type, i);
348 mode = get_type_mode(tp);
349 //d// printf ("MODE %p %p XX %d\n", mode, mode_Iu, i);
351 if (!no_reg && i < 6 && mode_is_data (mode)) {
352 //d// printf("TEST%d\n", i);
353 be_abi_call_param_reg(abi, i, amd64_get_RegParam_reg (i),
355 /* default: all parameters on stack */
358 be_abi_call_param_stack(abi, i, mode, 8, 0, 0, ABI_CONTEXT_BOTH);
362 /* TODO: set correct return register */
363 /* default: return value is in R0 resp. F0 */
364 if (get_method_n_ress(method_type) > 0) {
365 tp = get_method_res_type(method_type, 0);
366 mode = get_type_mode(tp);
368 if (mode_is_float(mode))
369 panic("float not supported yet");
371 be_abi_call_res_reg(abi, 0,
372 &amd64_registers[REG_RAX], ABI_CONTEXT_BOTH);
376 static void amd64_lower_for_target(void)
378 size_t i, n_irgs = get_irp_n_irgs();
380 /* lower compound param handling */
381 lower_calls_with_compounds(LF_RETURN_HIDDEN);
383 for (i = 0; i < n_irgs; ++i) {
384 ir_graph *irg = get_irp_irg(i);
385 /* Turn all small CopyBs into loads/stores, and turn all bigger
386 * CopyBs into memcpy calls, because we cannot handle CopyB nodes
387 * during code generation yet.
388 * TODO: Adapt this once custom CopyB handling is implemented. */
389 lower_CopyB(irg, 64, 65, true);
393 static int amd64_is_mux_allowed(ir_node *sel, ir_node *mux_false,
403 * Returns the libFirm configuration parameter for this backend.
405 static const backend_params *amd64_get_backend_params(void) {
406 static backend_params p = {
407 0, /* no inline assembly */
408 1, /* support Rotl nodes */
409 0, /* little endian */
410 1, /* modulo shift is efficient */
411 0, /* non-modulo shift is not efficient */
412 NULL, /* will be set later */
413 amd64_is_mux_allowed, /* parameter for if conversion */
414 64, /* machine size */
415 NULL, /* float arithmetic mode */
416 NULL, /* long long type */
417 NULL, /* unsigned long long type */
418 NULL, /* long double type (not supported yet) */
419 0, /* no trampoline support: size 0 */
420 0, /* no trampoline support: align 0 */
421 NULL, /* no trampoline support: no trampoline builder */
422 8 /* alignment of stack parameter: typically 4 (32bit) or 8 (64bit) */
427 static asm_constraint_flags_t amd64_parse_asm_constraint(const char **c)
430 return ASM_CONSTRAINT_FLAG_INVALID;
433 static int amd64_is_valid_clobber(const char *clobber)
439 static int amd64_register_saved_by(const arch_register_t *reg, int callee)
441 switch (reg->global_index) {
466 const arch_isa_if_t amd64_isa_if = {
469 amd64_get_backend_params,
470 amd64_lower_for_target,
471 amd64_parse_asm_constraint,
472 amd64_is_valid_clobber,
474 amd64_begin_codegeneration,
475 amd64_end_codegeneration,
478 NULL, /* mark remat */
479 NULL, /* get_pic_base */
482 amd64_register_saved_by,
484 NULL, /* handle intrinsics */
485 NULL, /* before_abi */
492 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_arch_amd64)
493 void be_init_arch_amd64(void)
495 be_register_isa_if("amd64", &amd64_isa_if);
496 FIRM_DBG_REGISTER(dbg, "firm.be.amd64.cg");
497 amd64_init_transform();