2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief emit assembler for a backend graph
23 * @version $Id: amd64_emitter.c 26746 2009-11-27 08:53:15Z matze $
39 #include "../besched.h"
40 #include "../begnuas.h"
41 #include "../beblocksched.h"
42 #include "../be_dbgout.h"
44 #include "amd64_emitter.h"
45 #include "gen_amd64_emitter.h"
46 #include "amd64_nodes_attr.h"
47 #include "amd64_new_nodes.h"
49 #define SNPRINTF_BUF_LEN 128
51 #include "../benode.h"
54 * Returns the register at in position pos.
56 static const arch_register_t *get_in_reg(const ir_node *node, int pos)
59 const arch_register_t *reg = NULL;
61 assert(get_irn_arity(node) > pos && "Invalid IN position");
63 /* The out register of the operator at position pos is the
64 in register we need. */
65 op = get_irn_n(node, pos);
67 reg = arch_get_irn_register(op);
69 assert(reg && "no in register found");
74 * Returns the register at out position pos.
76 static const arch_register_t *get_out_reg(const ir_node *node, int pos)
79 const arch_register_t *reg = NULL;
81 /* 1st case: irn is not of mode_T, so it has only */
82 /* one OUT register -> good */
83 /* 2nd case: irn is of mode_T -> collect all Projs and ask the */
84 /* Proj with the corresponding projnum for the register */
86 if (get_irn_mode(node) != mode_T) {
87 reg = arch_get_irn_register(node);
88 } else if (is_amd64_irn(node)) {
89 reg = arch_irn_get_register(node, pos);
91 const ir_edge_t *edge;
93 foreach_out_edge(node, edge) {
94 proj = get_edge_src_irn(edge);
95 assert(is_Proj(proj) && "non-Proj from mode_T node");
96 if (get_Proj_proj(proj) == pos) {
97 reg = arch_get_irn_register(proj);
103 assert(reg && "no out register found");
107 /*************************************************************
109 * (_) | | / _| | | | |
110 * _ __ _ __ _ _ __ | |_| |_ | |__ ___| |_ __ ___ _ __
111 * | '_ \| '__| | '_ \| __| _| | '_ \ / _ \ | '_ \ / _ \ '__|
112 * | |_) | | | | | | | |_| | | | | | __/ | |_) | __/ |
113 * | .__/|_| |_|_| |_|\__|_| |_| |_|\___|_| .__/ \___|_|
116 *************************************************************/
118 void amd64_emit_immediate(const ir_node *node)
120 const amd64_attr_t *attr = get_amd64_attr_const (node);
122 be_emit_irprintf("0x%X", attr->ext.imm_value);
125 void amd64_emit_source_register(const ir_node *node, int pos)
127 const arch_register_t *reg = get_in_reg(node, pos);
129 be_emit_string(arch_register_get_name(reg));
132 void amd64_emit_dest_register(const ir_node *node, int pos)
134 const arch_register_t *reg = get_out_reg(node, pos);
136 be_emit_string(arch_register_get_name(reg));
140 * Returns the target label for a control flow node.
143 static void amd64_emit_cfop_target(const ir_node *node)
145 ir_node *block = get_irn_link(node);
147 be_emit_irprintf("BLOCK_%ld", get_irn_node_nr(block));
151 /***********************************************************************************
154 * _ __ ___ __ _ _ _ __ | |_ _ __ __ _ _ __ ___ _____ _____ _ __| | __
155 * | '_ ` _ \ / _` | | '_ \ | _| '__/ _` | '_ ` _ \ / _ \ \ /\ / / _ \| '__| |/ /
156 * | | | | | | (_| | | | | | | | | | | (_| | | | | | | __/\ V V / (_) | | | <
157 * |_| |_| |_|\__,_|_|_| |_| |_| |_| \__,_|_| |_| |_|\___| \_/\_/ \___/|_| |_|\_\
159 ***********************************************************************************/
162 * Default emitter for anything that we don't want to generate code for.
164 static void emit_nothing(const ir_node *node)
172 static void emit_amd64_SymConst(const ir_node *irn)
174 const amd64_SymConst_attr_t *attr = get_amd64_SymConst_attr_const(irn);
175 // sym_or_tv_t key, *entry;
178 // key.u.id = get_entity_ld_ident(attr->entity);
181 // entry = (sym_or_tv_t *)set_insert(sym_or_tv, &key, sizeof(key), HASH_PTR(key.u.generic));
182 // if (entry->label == 0) {
183 // /* allocate a label */
184 // entry->label = get_unique_label();
186 // label = entry->label;
188 be_gas_emit_entity(attr->entity);
190 be_emit_finish_line_gas(irn);
191 be_emit_cstring("\t.long 0x0");
192 be_emit_finish_line_gas(irn);
196 * Returns the next block in a block schedule.
198 static ir_node *sched_next_block(const ir_node *block)
200 return get_irn_link(block);
204 * Returns the target block for a control flow node.
206 static ir_node *get_cfop_target_block(const ir_node *irn)
208 return get_irn_link(irn);
212 * Emit the target label for a control flow node.
214 static void amd64_emit_cfop_target(const ir_node *irn)
216 ir_node *block = get_cfop_target_block(irn);
218 be_gas_emit_block_name(block);
224 static void emit_amd64_Jmp(const ir_node *node)
226 ir_node *block, *next_block;
228 /* for now, the code works for scheduled and non-schedules blocks */
229 block = get_nodes_block(node);
231 /* we have a block schedule */
232 next_block = sched_next_block(block);
233 if (get_cfop_target_block(node) != next_block) {
234 be_emit_cstring("\tjmp ");
235 amd64_emit_cfop_target(node);
237 be_emit_cstring("\t/* fallthrough to ");
238 amd64_emit_cfop_target(node);
239 be_emit_cstring(" */");
241 be_emit_finish_line_gas(node);
245 * Emit a Compare with conditional branch.
247 static void emit_amd64_Jcc(const ir_node *irn)
249 const ir_edge_t *edge;
250 const ir_node *proj_true = NULL;
251 const ir_node *proj_false = NULL;
252 const ir_node *block;
253 const ir_node *next_block;
255 const amd64_attr_t *attr = get_irn_generic_attr_const(irn);
256 int proj_num = attr->ext.pnc;
257 ir_node *op1 = get_irn_n(irn, 0);
258 const amd64_attr_t *cmp_attr = get_irn_generic_attr_const(op1);
259 bool is_signed = !cmp_attr->data.cmp_unsigned;
261 assert(is_amd64_Cmp(op1));
263 foreach_out_edge(irn, edge) {
264 ir_node *proj = get_edge_src_irn(edge);
265 long nr = get_Proj_proj(proj);
266 if (nr == pn_Cond_true) {
273 if (cmp_attr->data.ins_permuted) {
274 proj_num = get_mirrored_pnc(proj_num);
277 /* for now, the code works for scheduled and non-schedules blocks */
278 block = get_nodes_block(irn);
280 /* we have a block schedule */
281 next_block = sched_next_block(block);
283 assert(proj_num != pn_Cmp_False);
284 assert(proj_num != pn_Cmp_True);
286 if (get_cfop_target_block(proj_true) == next_block) {
287 /* exchange both proj's so the second one can be omitted */
288 const ir_node *t = proj_true;
290 proj_true = proj_false;
292 proj_num = get_negated_pnc(proj_num, mode_Iu);
296 case pn_Cmp_Eq: suffix = "e"; break;
297 case pn_Cmp_Lt: suffix = is_signed ? "l" : "b"; break;
298 case pn_Cmp_Le: suffix = is_signed ? "le" : "be"; break;
299 case pn_Cmp_Gt: suffix = is_signed ? "g" : "o"; break;
300 case pn_Cmp_Ge: suffix = is_signed ? "ge" : "oe"; break;
301 case pn_Cmp_Lg: suffix = "ne"; break;
302 case pn_Cmp_Leg: suffix = "mp"; break;
303 default: panic("Cmp has unsupported pnc");
306 /* emit the true proj */
307 be_emit_irprintf("\tj%s ", suffix);
308 amd64_emit_cfop_target(proj_true);
309 be_emit_finish_line_gas(proj_true);
311 if (get_cfop_target_block(proj_false) == next_block) {
312 be_emit_cstring("\t/* fallthrough to ");
313 amd64_emit_cfop_target(proj_false);
314 be_emit_cstring(" */");
315 be_emit_finish_line_gas(proj_false);
317 be_emit_cstring("\tjmp ");
318 amd64_emit_cfop_target(proj_false);
319 be_emit_finish_line_gas(proj_false);
324 * Emits code for a call.
326 static void emit_be_Call(const ir_node *node)
328 ir_entity *entity = be_Call_get_entity (node);
331 be_emit_cstring("\tcall ");
332 be_gas_emit_entity (be_Call_get_entity(node));
333 be_emit_finish_line_gas(node);
335 be_emit_pad_comment();
336 be_emit_cstring("/* FIXME: call NULL entity?! */\n");
343 static void emit_be_Copy(const ir_node *irn)
345 ir_mode *mode = get_irn_mode(irn);
347 if (get_in_reg(irn, 0) == get_out_reg(irn, 0)) {
352 if (mode_is_float(mode)) {
353 panic("emit_be_Copy: move not supported for FP");
354 } else if (mode_is_data(mode)) {
355 be_emit_cstring("\tmov ");
356 amd64_emit_source_register(irn, 0);
357 be_emit_cstring(", ");
358 amd64_emit_dest_register(irn, 0);
359 be_emit_finish_line_gas(irn);
361 panic("emit_be_Copy: move not supported for this mode");
366 * Emits code for a return.
368 static void emit_be_Return(const ir_node *node)
370 be_emit_cstring("\tret");
371 be_emit_finish_line_gas(node);
375 * The type of a emitter function.
377 typedef void (emit_func)(const ir_node *irn);
380 * Set a node emitter. Make it a bit more type safe.
382 static inline void set_emitter(ir_op *op, emit_func arm_emit_node)
384 op->ops.generic = (op_func)arm_emit_node;
388 * Enters the emitter functions for handled nodes into the generic
389 * pointer of an opcode.
391 static void amd64_register_emitters(void)
393 /* first clear the generic function pointer for all ops */
394 clear_irp_opcodes_generic_func();
396 /* register all emitter functions defined in spec */
397 amd64_register_spec_emitters();
399 set_emitter(op_amd64_SymConst, emit_amd64_SymConst);
400 set_emitter(op_amd64_Jmp, emit_amd64_Jmp);
401 set_emitter(op_amd64_Jcc, emit_amd64_Jcc);
402 set_emitter(op_be_Return, emit_be_Return);
403 set_emitter(op_be_Call, emit_be_Call);
404 set_emitter(op_be_Copy, emit_be_Copy);
406 set_emitter(op_be_Start, emit_nothing);
407 set_emitter(op_be_Keep, emit_nothing);
408 set_emitter(op_be_Barrier, emit_nothing);
409 set_emitter(op_be_IncSP, emit_nothing);
410 set_emitter(op_Phi, emit_nothing);
413 typedef void (*emit_func_ptr) (const ir_node *);
416 * Emits code for a node.
418 static void amd64_emit_node(const ir_node *node)
420 ir_op *op = get_irn_op(node);
422 if (op->ops.generic) {
423 emit_func_ptr func = (emit_func_ptr) op->ops.generic;
426 ir_fprintf(stderr, "No emitter for node %+F\n", node);
431 * Walks over the nodes in a block connected by scheduling edges
432 * and emits code for each node.
434 static void amd64_gen_block(ir_node *block, void *data)
439 if (! is_Block(block))
442 be_gas_emit_block_name(block);
445 be_emit_write_line();
447 sched_foreach(block, node) {
448 amd64_emit_node(node);
454 * Sets labels for control flow nodes (jump target)
455 * TODO: Jump optimization
457 static void amd64_gen_labels(ir_node *block, void *env)
460 int n = get_Block_n_cfgpreds(block);
463 for (n--; n >= 0; n--) {
464 pred = get_Block_cfgpred(block, n);
465 set_irn_link(pred, block);
472 void amd64_gen_routine(const amd64_code_gen_t *cg, ir_graph *irg)
474 ir_entity *entity = get_irg_entity(irg);
479 /* register all emitter functions */
480 amd64_register_emitters();
482 blk_sched = be_create_block_schedule(irg);
484 be_dbg_method_begin(entity, be_abi_get_stack_layout(cg->birg->abi));
485 be_gas_emit_function_prolog(entity, 4);
487 irg_block_walk_graph(irg, amd64_gen_labels, NULL, NULL);
489 n = ARR_LEN(blk_sched);
490 for (i = 0; i < n; i++) {
491 ir_node *block = blk_sched[i];
492 ir_node *next = (i + 1) < n ? blk_sched[i+1] : NULL;
494 set_irn_link(block, next);
497 for (i = 0; i < n; ++i) {
498 ir_node *block = blk_sched[i];
500 amd64_gen_block(block, 0);
503 be_gas_emit_function_epilog(entity);
506 be_emit_write_line();