2 * This file is part of libFirm.
3 * Copyright (C) 2012 University of Karlsruhe.
8 * @brief The main amd64 backend driver file.
18 #include "lower_calls.h"
33 #include "belistsched.h"
35 #include "bespillslots.h"
36 #include "bespillutil.h"
39 #include "bearch_amd64_t.h"
41 #include "amd64_new_nodes.h"
42 #include "gen_amd64_regalloc_if.h"
43 #include "amd64_transform.h"
44 #include "amd64_emitter.h"
46 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
48 static ir_entity *amd64_get_frame_entity(const ir_node *node)
50 if (is_amd64_FrameAddr(node)) {
51 const amd64_SymConst_attr_t *attr = get_amd64_SymConst_attr_const(node);
54 } else if (is_amd64_Store(node)) {
55 const amd64_SymConst_attr_t *attr = get_amd64_SymConst_attr_const(node);
58 } else if (is_amd64_Load(node)) {
59 const amd64_SymConst_attr_t *attr = get_amd64_SymConst_attr_const(node);
64 /* TODO: return the ir_entity assigned to the frame */
69 * This function is called by the generic backend to correct offsets for
70 * nodes accessing the stack.
72 static void amd64_set_frame_offset(ir_node *irn, int offset)
74 if (is_amd64_FrameAddr(irn)) {
75 amd64_SymConst_attr_t *attr = get_amd64_SymConst_attr(irn);
76 attr->fp_offset += offset;
78 } else if (is_amd64_Store(irn)) {
79 amd64_SymConst_attr_t *attr = get_amd64_SymConst_attr(irn);
80 attr->fp_offset += offset;
82 } else if (is_amd64_Load(irn)) {
83 amd64_SymConst_attr_t *attr = get_amd64_SymConst_attr(irn);
84 attr->fp_offset += offset;
89 static int amd64_get_sp_bias(const ir_node *irn)
95 /* fill register allocator interface */
97 static const arch_irn_ops_t amd64_irn_ops = {
98 amd64_get_frame_entity,
99 amd64_set_frame_offset,
101 NULL, /* get_op_estimated_cost */
102 NULL, /* possible_memory_operand */
103 NULL, /* perform_memory_operand */
109 * Transforms the standard firm graph into
112 static void amd64_prepare_graph(ir_graph *irg)
114 amd64_transform_graph(irg);
116 if (be_options.dump_flags & DUMP_BE)
117 dump_ir_graph(irg, "transformed");
120 static void amd64_before_ra(ir_graph *irg)
122 be_sched_fix_flags(irg, &amd64_reg_classes[CLASS_amd64_flags], NULL, NULL);
125 static void transform_Reload(ir_node *node)
127 ir_graph *irg = get_irn_irg(node);
128 ir_node *block = get_nodes_block(node);
129 dbg_info *dbgi = get_irn_dbg_info(node);
130 ir_node *ptr = get_irg_frame(irg);
131 ir_node *mem = get_irn_n(node, n_be_Reload_mem);
132 ir_mode *mode = get_irn_mode(node);
133 ir_entity *entity = be_get_frame_entity(node);
134 const arch_register_t *reg;
138 load = new_bd_amd64_Load(dbgi, block, ptr, mem, entity);
139 sched_replace(node, load);
141 proj = new_rd_Proj(dbgi, load, mode, pn_amd64_Load_res);
143 reg = arch_get_irn_register(node);
144 arch_set_irn_register(proj, reg);
146 exchange(node, proj);
149 static void transform_Spill(ir_node *node)
151 ir_graph *irg = get_irn_irg(node);
152 ir_node *block = get_nodes_block(node);
153 dbg_info *dbgi = get_irn_dbg_info(node);
154 ir_node *ptr = get_irg_frame(irg);
155 ir_node *mem = get_irg_no_mem(irg);
156 ir_node *val = get_irn_n(node, n_be_Spill_val);
157 //ir_mode *mode = get_irn_mode(val);
158 ir_entity *entity = be_get_frame_entity(node);
161 store = new_bd_amd64_Store(dbgi, block, ptr, val, mem, entity);
162 sched_replace(node, store);
164 exchange(node, store);
167 static void amd64_after_ra_walker(ir_node *block, void *data)
169 ir_node *node, *prev;
172 for (node = sched_last(block); !sched_is_begin(node); node = prev) {
173 prev = sched_prev(node);
175 if (be_is_Reload(node)) {
176 transform_Reload(node);
177 } else if (be_is_Spill(node)) {
178 transform_Spill(node);
183 static void amd64_set_frame_entity(ir_node *node, ir_entity *entity)
185 assert(be_is_Reload(node) || be_is_Spill(node));
186 be_node_set_frame_entity(node, entity);
190 * Collects nodes that need frame entities assigned.
192 static void amd64_collect_frame_entity_nodes(ir_node *node, void *data)
194 if (be_is_Reload(node) && be_get_frame_entity(node) == NULL) {
195 be_fec_env_t *env = (be_fec_env_t*)data;
196 const ir_mode *mode = get_irn_mode(node);
197 int align = get_mode_size_bytes(mode);
198 be_node_needs_frame_entity(env, node, mode, align);
203 * Called immediatly before emit phase.
205 static void amd64_finish_irg(ir_graph *irg)
207 be_stack_layout_t *stack_layout = be_get_irg_stack_layout(irg);
208 bool at_begin = stack_layout->sp_relative ? true : false;
209 be_fec_env_t *fec_env = be_new_frame_entity_coalescer(irg);
211 /* create and coalesce frame entities */
212 irg_walk_graph(irg, NULL, amd64_collect_frame_entity_nodes, fec_env);
213 be_assign_entities(fec_env, amd64_set_frame_entity, at_begin);
214 be_free_frame_entity_coalescer(fec_env);
216 irg_block_walk_graph(irg, NULL, amd64_after_ra_walker, NULL);
218 /* fix stack entity offsets */
219 be_abi_fix_stack_nodes(irg);
220 be_abi_fix_stack_bias(irg);
223 extern const arch_isa_if_t amd64_isa_if;
224 static amd64_isa_t amd64_isa_template = {
226 &amd64_isa_if, /* isa interface implementation */
231 &amd64_registers[REG_RSP], /* stack pointer register */
232 &amd64_registers[REG_RBP], /* base pointer register */
233 3, /* power of two stack alignment for calls, 2^2 == 4 */
234 7, /* costs for a spill instruction */
235 5, /* costs for a reload instruction */
236 false, /* no custom abi handling */
240 static void amd64_init(void)
242 amd64_register_init();
243 amd64_create_opcodes(&amd64_irn_ops);
246 static void amd64_finish(void)
248 amd64_free_opcodes();
251 static arch_env_t *amd64_begin_codegeneration(void)
253 amd64_isa_t *isa = XMALLOC(amd64_isa_t);
254 *isa = amd64_isa_template;
260 * Closes the output file and frees the ISA structure.
262 static void amd64_end_codegeneration(void *self)
268 * Get the between type for that call.
269 * @param self The callback object.
270 * @return The between type of for that call.
272 static ir_type *amd64_get_between_type(ir_graph *irg)
274 static ir_type *between_type = NULL;
275 static ir_entity *old_bp_ent = NULL;
279 ir_entity *ret_addr_ent;
280 ir_type *ret_addr_type = new_type_primitive(mode_P);
281 ir_type *old_bp_type = new_type_primitive(mode_P);
283 between_type = new_type_class(new_id_from_str("amd64_between_type"));
284 old_bp_ent = new_entity(between_type, new_id_from_str("old_bp"), old_bp_type);
285 ret_addr_ent = new_entity(between_type, new_id_from_str("old_bp"), ret_addr_type);
287 set_entity_offset(old_bp_ent, 0);
288 set_entity_offset(ret_addr_ent, get_type_size_bytes(old_bp_type));
289 set_type_size_bytes(between_type, get_type_size_bytes(old_bp_type) + get_type_size_bytes(ret_addr_type));
295 static const be_abi_callbacks_t amd64_abi_callbacks = {
296 amd64_get_between_type,
299 static const arch_register_t *gpreg_param_reg_std[] = {
300 &amd64_registers[REG_RDI],
301 &amd64_registers[REG_RSI],
302 &amd64_registers[REG_RDX],
303 &amd64_registers[REG_RCX],
304 &amd64_registers[REG_R8],
305 &amd64_registers[REG_R9],
308 static const arch_register_t *amd64_get_RegParam_reg(int n)
310 assert(n < 6 && n >=0 && "register param > 6 requested");
311 return gpreg_param_reg_std[n];
315 * Get the ABI restrictions for procedure calls.
316 * @param self The this pointer.
317 * @param method_type The type of the method (procedure) in question.
318 * @param abi The abi object to be modified
320 static void amd64_get_call_abi(ir_type *method_type, be_abi_call_t *abi)
324 int i, n = get_method_n_params(method_type);
327 /* set abi flags for calls */
328 be_abi_call_flags_t call_flags = be_abi_call_get_flags(abi);
329 call_flags.call_has_imm = true;
330 be_abi_call_set_flags(abi, call_flags, &amd64_abi_callbacks);
332 for (i = 0; i < n; i++) {
333 tp = get_method_param_type(method_type, i);
334 mode = get_type_mode(tp);
335 //d// printf ("MODE %p %p XX %d\n", mode, mode_Iu, i);
337 if (!no_reg && i < 6 && mode_is_data (mode)) {
338 //d// printf("TEST%d\n", i);
339 be_abi_call_param_reg(abi, i, amd64_get_RegParam_reg (i),
341 /* default: all parameters on stack */
344 be_abi_call_param_stack(abi, i, mode, 8, 0, 0, ABI_CONTEXT_BOTH);
348 /* TODO: set correct return register */
349 /* default: return value is in R0 resp. F0 */
350 if (get_method_n_ress(method_type) > 0) {
351 tp = get_method_res_type(method_type, 0);
352 mode = get_type_mode(tp);
354 if (mode_is_float(mode))
355 panic("float not supported yet");
357 be_abi_call_res_reg(abi, 0,
358 &amd64_registers[REG_RAX], ABI_CONTEXT_BOTH);
362 static void amd64_lower_for_target(void)
364 size_t i, n_irgs = get_irp_n_irgs();
366 /* lower compound param handling */
367 lower_calls_with_compounds(LF_RETURN_HIDDEN);
369 for (i = 0; i < n_irgs; ++i) {
370 ir_graph *irg = get_irp_irg(i);
371 /* Turn all small CopyBs into loads/stores, and turn all bigger
372 * CopyBs into memcpy calls, because we cannot handle CopyB nodes
373 * during code generation yet.
374 * TODO: Adapt this once custom CopyB handling is implemented. */
375 lower_CopyB(irg, 64, 65, true);
379 static int amd64_is_mux_allowed(ir_node *sel, ir_node *mux_false,
389 * Returns the libFirm configuration parameter for this backend.
391 static const backend_params *amd64_get_backend_params(void) {
392 static backend_params p = {
393 0, /* no inline assembly */
394 1, /* support Rotl nodes */
395 0, /* little endian */
396 1, /* modulo shift is efficient */
397 0, /* non-modulo shift is not efficient */
398 NULL, /* will be set later */
399 amd64_is_mux_allowed, /* parameter for if conversion */
400 64, /* machine size */
401 NULL, /* float arithmetic mode */
402 NULL, /* long long type */
403 NULL, /* unsigned long long type */
404 NULL, /* long double type (not supported yet) */
405 0, /* no trampoline support: size 0 */
406 0, /* no trampoline support: align 0 */
407 NULL, /* no trampoline support: no trampoline builder */
408 8 /* alignment of stack parameter: typically 4 (32bit) or 8 (64bit) */
413 static asm_constraint_flags_t amd64_parse_asm_constraint(const char **c)
416 return ASM_CONSTRAINT_FLAG_INVALID;
419 static int amd64_is_valid_clobber(const char *clobber)
425 static int amd64_register_saved_by(const arch_register_t *reg, int callee)
427 switch (reg->global_index) {
452 const arch_isa_if_t amd64_isa_if = {
455 amd64_get_backend_params,
456 amd64_lower_for_target,
457 amd64_parse_asm_constraint,
458 amd64_is_valid_clobber,
460 amd64_begin_codegeneration,
461 amd64_end_codegeneration,
464 NULL, /* mark remat */
465 NULL, /* get_pic_base */
468 amd64_register_saved_by,
470 NULL, /* handle intrinsics */
471 NULL, /* before_abi */
478 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_arch_amd64)
479 void be_init_arch_amd64(void)
481 be_register_isa_if("amd64", &amd64_isa_if);
482 FIRM_DBG_REGISTER(dbg, "firm.be.amd64.cg");
483 amd64_init_transform();