2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief emit assembler for a backend graph
23 * @version $Id: amd64_emitter.c 26746 2009-11-27 08:53:15Z matze $
39 #include "../besched.h"
40 #include "../begnuas.h"
41 #include "../beblocksched.h"
42 #include "../be_dbgout.h"
44 #include "amd64_emitter.h"
45 #include "gen_amd64_emitter.h"
46 #include "gen_amd64_regalloc_if.h"
47 #include "amd64_nodes_attr.h"
48 #include "amd64_new_nodes.h"
50 #define SNPRINTF_BUF_LEN 128
52 #include "../benode.h"
55 * Returns the register at in position pos.
57 static const arch_register_t *get_in_reg(const ir_node *node, int pos)
60 const arch_register_t *reg = NULL;
62 assert(get_irn_arity(node) > pos && "Invalid IN position");
64 /* The out register of the operator at position pos is the
65 in register we need. */
66 op = get_irn_n(node, pos);
68 reg = arch_get_irn_register(op);
70 assert(reg && "no in register found");
75 * Returns the register at out position pos.
77 static const arch_register_t *get_out_reg(const ir_node *node, int pos)
80 const arch_register_t *reg = NULL;
82 /* 1st case: irn is not of mode_T, so it has only */
83 /* one OUT register -> good */
84 /* 2nd case: irn is of mode_T -> collect all Projs and ask the */
85 /* Proj with the corresponding projnum for the register */
87 if (get_irn_mode(node) != mode_T) {
88 reg = arch_get_irn_register(node);
89 } else if (is_amd64_irn(node)) {
90 reg = arch_irn_get_register(node, pos);
92 const ir_edge_t *edge;
94 foreach_out_edge(node, edge) {
95 proj = get_edge_src_irn(edge);
96 assert(is_Proj(proj) && "non-Proj from mode_T node");
97 if (get_Proj_proj(proj) == pos) {
98 reg = arch_get_irn_register(proj);
104 assert(reg && "no out register found");
108 /*************************************************************
110 * (_) | | / _| | | | |
111 * _ __ _ __ _ _ __ | |_| |_ | |__ ___| |_ __ ___ _ __
112 * | '_ \| '__| | '_ \| __| _| | '_ \ / _ \ | '_ \ / _ \ '__|
113 * | |_) | | | | | | | |_| | | | | | __/ | |_) | __/ |
114 * | .__/|_| |_|_| |_|\__|_| |_| |_|\___|_| .__/ \___|_|
117 *************************************************************/
119 void amd64_emit_register(const arch_register_t *reg)
122 be_emit_string(arch_register_get_name(reg));
125 void amd64_emit_immediate(const ir_node *node)
127 const amd64_attr_t *attr = get_amd64_attr_const (node);
129 be_emit_irprintf("0x%X", attr->ext.imm_value);
132 void amd64_emit_fp_offset(const ir_node *node)
134 const amd64_SymConst_attr_t *attr = get_amd64_SymConst_attr_const(node);
136 be_emit_irprintf("%d", attr->fp_offset);
139 void amd64_emit_source_register(const ir_node *node, int pos)
141 amd64_emit_register(get_in_reg(node, pos));
144 void amd64_emit_dest_register(const ir_node *node, int pos)
146 amd64_emit_register(get_out_reg(node, pos));
150 * Returns the target label for a control flow node.
153 static void amd64_emit_cfop_target(const ir_node *node)
155 ir_node *block = get_irn_link(node);
157 be_emit_irprintf("BLOCK_%ld", get_irn_node_nr(block));
161 /***********************************************************************************
164 * _ __ ___ __ _ _ _ __ | |_ _ __ __ _ _ __ ___ _____ _____ _ __| | __
165 * | '_ ` _ \ / _` | | '_ \ | _| '__/ _` | '_ ` _ \ / _ \ \ /\ / / _ \| '__| |/ /
166 * | | | | | | (_| | | | | | | | | | | (_| | | | | | | __/\ V V / (_) | | | <
167 * |_| |_| |_|\__,_|_|_| |_| |_| |_| \__,_|_| |_| |_|\___| \_/\_/ \___/|_| |_|\_\
169 ***********************************************************************************/
172 * Default emitter for anything that we don't want to generate code for.
174 static void emit_nothing(const ir_node *node)
182 static void emit_amd64_SymConst(const ir_node *irn)
184 const amd64_SymConst_attr_t *attr = get_amd64_SymConst_attr_const(irn);
185 // sym_or_tv_t key, *entry;
188 // key.u.id = get_entity_ld_ident(attr->entity);
191 // entry = (sym_or_tv_t *)set_insert(sym_or_tv, &key, sizeof(key), HASH_PTR(key.u.generic));
192 // if (entry->label == 0) {
193 // /* allocate a label */
194 // entry->label = get_unique_label();
196 // label = entry->label;
198 be_emit_cstring("\tmov $");
199 be_gas_emit_entity(attr->entity);
200 be_emit_cstring(", ");
201 amd64_emit_dest_register(irn, 0);
202 be_emit_finish_line_gas(irn);
208 static void emit_amd64_Conv(const ir_node *irn)
210 const amd64_attr_t *attr = get_irn_generic_attr_const(irn);
213 be_emit_cstring("\tmov ");
214 amd64_emit_source_register(irn, 0);
215 be_emit_cstring(", ");
216 amd64_emit_dest_register(irn, 0);
217 be_emit_finish_line_gas(irn);
222 * Returns the next block in a block schedule.
224 static ir_node *sched_next_block(const ir_node *block)
226 return get_irn_link(block);
230 * Returns the target block for a control flow node.
232 static ir_node *get_cfop_target_block(const ir_node *irn)
234 return get_irn_link(irn);
238 * Emit the target label for a control flow node.
240 static void amd64_emit_cfop_target(const ir_node *irn)
242 ir_node *block = get_cfop_target_block(irn);
244 be_gas_emit_block_name(block);
250 static void emit_amd64_Jmp(const ir_node *node)
252 ir_node *block, *next_block;
254 /* for now, the code works for scheduled and non-schedules blocks */
255 block = get_nodes_block(node);
257 /* we have a block schedule */
258 next_block = sched_next_block(block);
259 if (get_cfop_target_block(node) != next_block) {
260 be_emit_cstring("\tjmp ");
261 amd64_emit_cfop_target(node);
263 be_emit_cstring("\t/* fallthrough to ");
264 amd64_emit_cfop_target(node);
265 be_emit_cstring(" */");
267 be_emit_finish_line_gas(node);
271 * Emit a Compare with conditional branch.
273 static void emit_amd64_Jcc(const ir_node *irn)
275 const ir_edge_t *edge;
276 const ir_node *proj_true = NULL;
277 const ir_node *proj_false = NULL;
278 const ir_node *block;
279 const ir_node *next_block;
281 const amd64_attr_t *attr = get_irn_generic_attr_const(irn);
282 int proj_num = attr->ext.pnc;
283 ir_node *op1 = get_irn_n(irn, 0);
284 const amd64_attr_t *cmp_attr = get_irn_generic_attr_const(op1);
285 bool is_signed = !cmp_attr->data.cmp_unsigned;
287 assert(is_amd64_Cmp(op1));
289 foreach_out_edge(irn, edge) {
290 ir_node *proj = get_edge_src_irn(edge);
291 long nr = get_Proj_proj(proj);
292 if (nr == pn_Cond_true) {
299 if (cmp_attr->data.ins_permuted) {
300 proj_num = get_mirrored_pnc(proj_num);
303 /* for now, the code works for scheduled and non-schedules blocks */
304 block = get_nodes_block(irn);
306 /* we have a block schedule */
307 next_block = sched_next_block(block);
309 assert(proj_num != pn_Cmp_False);
310 assert(proj_num != pn_Cmp_True);
312 if (get_cfop_target_block(proj_true) == next_block) {
313 /* exchange both proj's so the second one can be omitted */
314 const ir_node *t = proj_true;
316 proj_true = proj_false;
318 proj_num = get_negated_pnc(proj_num, mode_Lu);
322 case pn_Cmp_Eq: suffix = "e"; break;
323 case pn_Cmp_Lt: suffix = is_signed ? "l" : "b"; break;
324 case pn_Cmp_Le: suffix = is_signed ? "le" : "be"; break;
325 case pn_Cmp_Gt: suffix = is_signed ? "g" : "a"; break;
326 case pn_Cmp_Ge: suffix = is_signed ? "ge" : "ae"; break;
327 case pn_Cmp_Lg: suffix = "ne"; break;
328 case pn_Cmp_Leg: suffix = "mp"; break;
329 default: panic("Cmp has unsupported pnc");
332 /* emit the true proj */
333 be_emit_irprintf("\tj%s ", suffix);
334 amd64_emit_cfop_target(proj_true);
335 be_emit_finish_line_gas(proj_true);
337 if (get_cfop_target_block(proj_false) == next_block) {
338 be_emit_cstring("\t/* fallthrough to ");
339 amd64_emit_cfop_target(proj_false);
340 be_emit_cstring(" */");
341 be_emit_finish_line_gas(proj_false);
343 be_emit_cstring("\tjmp ");
344 amd64_emit_cfop_target(proj_false);
345 be_emit_finish_line_gas(proj_false);
350 * Emits code for a call.
352 static void emit_be_Call(const ir_node *node)
354 ir_entity *entity = be_Call_get_entity(node);
356 /* %eax/%rax is used in AMD64 to pass the number of vector parameters for
357 * variable argument counts */
358 if (get_method_variadicity (be_Call_get_type((ir_node *) node))) {
359 /* But this still is a hack... */
360 be_emit_cstring("\txor %rax, %rax");
361 be_emit_finish_line_gas(node);
365 be_emit_cstring("\tcall ");
366 be_gas_emit_entity (be_Call_get_entity(node));
367 be_emit_finish_line_gas(node);
369 be_emit_pad_comment();
370 be_emit_cstring("/* FIXME: call NULL entity?! */\n");
377 static void emit_be_Copy(const ir_node *irn)
379 ir_mode *mode = get_irn_mode(irn);
381 if (get_in_reg(irn, 0) == get_out_reg(irn, 0)) {
386 if (mode_is_float(mode)) {
387 panic("emit_be_Copy: move not supported for FP");
388 } else if (mode_is_data(mode)) {
389 be_emit_cstring("\tmov ");
390 amd64_emit_source_register(irn, 0);
391 be_emit_cstring(", ");
392 amd64_emit_dest_register(irn, 0);
393 be_emit_finish_line_gas(irn);
395 panic("emit_be_Copy: move not supported for this mode");
399 static void emit_be_Perm(const ir_node *node)
401 const arch_register_t *in0, *in1;
402 const arch_register_class_t *cls0, *cls1;
404 in0 = arch_get_irn_register(get_irn_n(node, 0));
405 in1 = arch_get_irn_register(get_irn_n(node, 1));
407 cls0 = arch_register_get_class(in0);
408 cls1 = arch_register_get_class(in1);
410 assert(cls0 == cls1 && "Register class mismatch at Perm");
412 be_emit_cstring("\txchg ");
413 amd64_emit_register (in0);
414 be_emit_cstring(", ");
415 amd64_emit_register (in1);
416 be_emit_finish_line_gas(node);
418 if (cls0 != &amd64_reg_classes[CLASS_amd64_gp]) {
419 panic("unexpected register class in be_Perm (%+F)", node);
423 static void emit_amd64_FrameAddr(const ir_node *irn)
425 const amd64_SymConst_attr_t *attr = get_irn_generic_attr_const(irn);
427 be_emit_cstring("\tmov ");
428 amd64_emit_source_register(irn, 0);
429 be_emit_cstring(", ");
430 amd64_emit_dest_register(irn, 0);
431 be_emit_finish_line_gas(irn);
433 be_emit_cstring("\tadd ");
434 be_emit_irprintf("$0x%X", attr->fp_offset);
435 be_emit_cstring(", ");
436 amd64_emit_dest_register(irn, 0);
437 be_emit_finish_line_gas(irn);
441 * Emits code to increase stack pointer.
443 static void emit_be_IncSP(const ir_node *node)
445 int offs = be_get_IncSP_offset(node);
451 be_emit_irprintf("\tsub ");
452 be_emit_irprintf("$%u, ", offs);
453 amd64_emit_dest_register(node, 0);
454 be_emit_finish_line_gas(node);
456 be_emit_irprintf("\tadd ");
457 be_emit_irprintf("$%u, ", -offs);
458 amd64_emit_dest_register(node, 0);
459 be_emit_finish_line_gas(node);
464 * Emits code for a return.
466 static void emit_be_Return(const ir_node *node)
468 be_emit_cstring("\tret");
469 be_emit_finish_line_gas(node);
473 static void emit_amd64_binop_op(const ir_node *irn, int second_op)
475 if (irn->op == op_amd64_Add) {
476 be_emit_cstring("\tadd ");
477 amd64_emit_source_register(irn, second_op);
478 be_emit_cstring(", ");
479 amd64_emit_dest_register(irn, 0);
480 be_emit_finish_line_gas(irn);
481 } else if (irn->op == op_amd64_Sub) {
482 be_emit_cstring("\tneg ");
483 amd64_emit_source_register(irn, second_op);
484 be_emit_finish_line_gas(irn);
485 be_emit_cstring("\tadd ");
486 amd64_emit_source_register(irn, second_op);
487 be_emit_cstring(", ");
488 amd64_emit_dest_register(irn, 0);
489 be_emit_finish_line_gas(irn);
490 be_emit_cstring("\tneg ");
491 amd64_emit_source_register(irn, second_op);
492 be_emit_finish_line_gas(irn);
498 * Emits an arithmetic operation that handles arbitraty input registers.
500 static void emit_amd64_binop(const ir_node *irn)
502 const arch_register_t *reg_s1 = get_in_reg(irn, 0);
503 const arch_register_t *reg_s2 = get_in_reg(irn, 1);
504 const arch_register_t *reg_d1 = get_out_reg(irn, 0);
508 if (reg_d1 != reg_s1 && reg_d1 != reg_s2) {
509 be_emit_cstring("\tmov ");
510 amd64_emit_register(reg_s1);
511 be_emit_cstring(", ");
512 amd64_emit_register(reg_d1);
513 be_emit_finish_line_gas(irn);
516 } else if (reg_d1 == reg_s2 && reg_d1 != reg_s1) {
521 emit_amd64_binop_op(irn, second_op);
525 * The type of a emitter function.
527 typedef void (emit_func)(const ir_node *irn);
530 * Set a node emitter. Make it a bit more type safe.
532 static inline void set_emitter(ir_op *op, emit_func arm_emit_node)
534 op->ops.generic = (op_func)arm_emit_node;
538 * Enters the emitter functions for handled nodes into the generic
539 * pointer of an opcode.
541 static void amd64_register_emitters(void)
543 /* first clear the generic function pointer for all ops */
544 clear_irp_opcodes_generic_func();
546 /* register all emitter functions defined in spec */
547 amd64_register_spec_emitters();
549 set_emitter(op_amd64_SymConst, emit_amd64_SymConst);
550 set_emitter(op_amd64_Jmp, emit_amd64_Jmp);
551 set_emitter(op_amd64_Jcc, emit_amd64_Jcc);
552 set_emitter(op_amd64_Conv, emit_amd64_Conv);
553 set_emitter(op_amd64_FrameAddr, emit_amd64_FrameAddr);
554 set_emitter(op_be_Return, emit_be_Return);
555 set_emitter(op_be_Call, emit_be_Call);
556 set_emitter(op_be_Copy, emit_be_Copy);
557 set_emitter(op_be_IncSP, emit_be_IncSP);
558 set_emitter(op_be_Perm, emit_be_Perm);
560 set_emitter(op_amd64_Add, emit_amd64_binop);
561 set_emitter(op_amd64_Sub, emit_amd64_binop);
563 set_emitter(op_be_Start, emit_nothing);
564 set_emitter(op_be_Keep, emit_nothing);
565 set_emitter(op_be_Barrier, emit_nothing);
566 set_emitter(op_Phi, emit_nothing);
569 typedef void (*emit_func_ptr) (const ir_node *);
572 * Emits code for a node.
574 static void amd64_emit_node(const ir_node *node)
576 ir_op *op = get_irn_op(node);
578 if (op->ops.generic) {
579 emit_func_ptr func = (emit_func_ptr) op->ops.generic;
582 ir_fprintf(stderr, "No emitter for node %+F\n", node);
587 * Walks over the nodes in a block connected by scheduling edges
588 * and emits code for each node.
590 static void amd64_gen_block(ir_node *block, void *data)
595 if (! is_Block(block))
598 be_gas_emit_block_name(block);
601 be_emit_write_line();
603 sched_foreach(block, node) {
604 amd64_emit_node(node);
610 * Sets labels for control flow nodes (jump target)
611 * TODO: Jump optimization
613 static void amd64_gen_labels(ir_node *block, void *env)
616 int n = get_Block_n_cfgpreds(block);
619 for (n--; n >= 0; n--) {
620 pred = get_Block_cfgpred(block, n);
621 set_irn_link(pred, block);
628 void amd64_gen_routine(ir_graph *irg)
630 ir_entity *entity = get_irg_entity(irg);
634 /* register all emitter functions */
635 amd64_register_emitters();
637 blk_sched = be_create_block_schedule(irg);
639 be_dbg_method_begin(entity);
640 be_gas_emit_function_prolog(entity, 4);
642 irg_block_walk_graph(irg, amd64_gen_labels, NULL, NULL);
644 n = ARR_LEN(blk_sched);
645 for (i = 0; i < n; i++) {
646 ir_node *block = blk_sched[i];
647 ir_node *next = (i + 1) < n ? blk_sched[i+1] : NULL;
649 set_irn_link(block, next);
652 for (i = 0; i < n; ++i) {
653 ir_node *block = blk_sched[i];
655 amd64_gen_block(block, 0);
658 be_gas_emit_function_epilog(entity);
661 be_emit_write_line();