2 * Copyright (C) 1995-2011 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief emit assembler for a backend graph
41 #include "beblocksched.h"
43 #include "amd64_emitter.h"
44 #include "gen_amd64_emitter.h"
45 #include "gen_amd64_regalloc_if.h"
46 #include "amd64_nodes_attr.h"
47 #include "amd64_new_nodes.h"
51 /*************************************************************
53 * (_) | | / _| | | | |
54 * _ __ _ __ _ _ __ | |_| |_ | |__ ___| |_ __ ___ _ __
55 * | '_ \| '__| | '_ \| __| _| | '_ \ / _ \ | '_ \ / _ \ '__|
56 * | |_) | | | | | | | |_| | | | | | __/ | |_) | __/ |
57 * | .__/|_| |_|_| |_|\__|_| |_| |_|\___|_| .__/ \___|_|
60 *************************************************************/
63 * Returns the target block for a control flow node.
65 static ir_node *get_cfop_target_block(const ir_node *irn)
67 return (ir_node*)get_irn_link(irn);
70 void amd64_emitf(ir_node const *const node, char const *fmt, ...)
77 char const *start = fmt;
79 while (*fmt != '%' && *fmt != '\n' && *fmt != '\0')
82 be_emit_string_len(start, fmt - start);
99 arch_register_t const *reg;
106 amd64_attr_t const *const attr = get_amd64_attr_const(node);
107 be_emit_irprintf("$0x%X", attr->ext.imm_value);
112 if (*fmt < '0' || '9' <= *fmt)
114 reg = arch_get_irn_register_out(node, *fmt++ - '0');
118 ir_entity const *const ent = va_arg(ap, ir_entity const*);
119 be_gas_emit_entity(ent);
124 ir_node *const block = get_cfop_target_block(node);
125 be_gas_emit_block_name(block);
130 amd64_SymConst_attr_t const *const attr = get_amd64_SymConst_attr_const(node);
132 be_emit_irprintf("%d", attr->fp_offset);
137 reg = va_arg(ap, arch_register_t const*);
140 be_emit_string(reg->name);
145 if ('0' <= *fmt && *fmt <= '9') {
147 } else if (*fmt == '*') {
149 pos = va_arg(ap, int);
153 reg = arch_get_irn_register_in(node, pos);
158 int const num = va_arg(ap, int);
159 be_emit_irprintf("%d", num);
164 char const *const str = va_arg(ap, char const*);
170 unsigned const num = va_arg(ap, unsigned);
171 be_emit_irprintf("%u", num);
177 panic("unknown format conversion");
181 be_emit_finish_line_gas(node);
185 /***********************************************************************************
188 * _ __ ___ __ _ _ _ __ | |_ _ __ __ _ _ __ ___ _____ _____ _ __| | __
189 * | '_ ` _ \ / _` | | '_ \ | _| '__/ _` | '_ ` _ \ / _ \ \ /\ / / _ \| '__| |/ /
190 * | | | | | | (_| | | | | | | | | | | (_| | | | | | | __/\ V V / (_) | | | <
191 * |_| |_| |_|\__,_|_|_| |_| |_| |_| \__,_|_| |_| |_|\___| \_/\_/ \___/|_| |_|\_\
193 ***********************************************************************************/
196 * Default emitter for anything that we don't want to generate code for.
198 static void emit_nothing(const ir_node *node)
206 static void emit_amd64_SymConst(const ir_node *irn)
208 const amd64_SymConst_attr_t *attr = get_amd64_SymConst_attr_const(irn);
210 sym_or_tv_t key, *entry;
213 key.u.id = get_entity_ld_ident(attr->entity);
216 entry = set_insert(sym_or_tv_t, sym_or_tv, &key, sizeof(key), hash_ptr(key.u.generic));
217 if (entry->label == 0) {
218 /* allocate a label */
219 entry->label = get_unique_label();
221 label = entry->label;
224 amd64_emitf(irn, "mov $%E, %D0", attr->entity);
230 static void emit_amd64_Conv(const ir_node *irn)
232 amd64_emitf(irn, "mov %S0, %D0");
237 * Returns the next block in a block schedule.
239 static ir_node *sched_next_block(const ir_node *block)
241 return (ir_node*)get_irn_link(block);
247 static void emit_amd64_Jmp(const ir_node *node)
249 ir_node *block, *next_block;
251 /* for now, the code works for scheduled and non-schedules blocks */
252 block = get_nodes_block(node);
254 /* we have a block schedule */
255 next_block = sched_next_block(block);
256 if (get_cfop_target_block(node) != next_block) {
257 amd64_emitf(node, "jmp %L");
258 } else if (be_options.verbose_asm) {
259 amd64_emitf(node, "/* fallthrough to %L */");
264 * Emit a Compare with conditional branch.
266 static void emit_amd64_Jcc(const ir_node *irn)
268 const ir_node *proj_true = NULL;
269 const ir_node *proj_false = NULL;
270 const ir_node *block;
271 const ir_node *next_block;
273 const amd64_attr_t *attr = get_amd64_attr_const(irn);
274 ir_relation relation = attr->ext.relation;
275 ir_node *op1 = get_irn_n(irn, 0);
276 const amd64_attr_t *cmp_attr = get_amd64_attr_const(op1);
277 bool is_signed = !cmp_attr->data.cmp_unsigned;
279 assert(is_amd64_Cmp(op1));
281 foreach_out_edge(irn, edge) {
282 ir_node *proj = get_edge_src_irn(edge);
283 long nr = get_Proj_proj(proj);
284 if (nr == pn_Cond_true) {
291 if (cmp_attr->data.ins_permuted) {
292 relation = get_inversed_relation(relation);
295 /* for now, the code works for scheduled and non-schedules blocks */
296 block = get_nodes_block(irn);
298 /* we have a block schedule */
299 next_block = sched_next_block(block);
301 assert(relation != ir_relation_false);
302 assert(relation != ir_relation_true);
304 if (get_cfop_target_block(proj_true) == next_block) {
305 /* exchange both proj's so the second one can be omitted */
306 const ir_node *t = proj_true;
308 proj_true = proj_false;
310 relation = get_negated_relation(relation);
313 switch (relation & ir_relation_less_equal_greater) {
314 case ir_relation_equal: suffix = "e"; break;
315 case ir_relation_less: suffix = is_signed ? "l" : "b"; break;
316 case ir_relation_less_equal: suffix = is_signed ? "le" : "be"; break;
317 case ir_relation_greater: suffix = is_signed ? "g" : "a"; break;
318 case ir_relation_greater_equal: suffix = is_signed ? "ge" : "ae"; break;
319 case ir_relation_less_greater: suffix = "ne"; break;
320 case ir_relation_less_equal_greater: suffix = "mp"; break;
321 default: panic("Cmp has unsupported pnc");
324 /* emit the true proj */
325 amd64_emitf(proj_true, "j%s %L", suffix);
327 if (get_cfop_target_block(proj_false) != next_block) {
328 amd64_emitf(proj_false, "jmp %L");
329 } else if (be_options.verbose_asm) {
330 amd64_emitf(proj_false, "/* fallthrough to %L */");
335 * Emits code for a call.
337 static void emit_be_Call(const ir_node *node)
339 ir_entity *entity = be_Call_get_entity(node);
341 /* %eax/%rax is used in AMD64 to pass the number of vector parameters for
342 * variable argument counts */
343 if (get_method_variadicity (be_Call_get_type((ir_node *) node))) {
344 /* But this still is a hack... */
345 amd64_emitf(node, "xor %%rax, %%rax");
349 amd64_emitf(node, "call %E", entity);
351 be_emit_pad_comment();
352 be_emit_cstring("/* FIXME: call NULL entity?! */\n");
359 static void emit_be_Copy(const ir_node *irn)
361 ir_mode *mode = get_irn_mode(irn);
363 if (arch_get_irn_register_in(irn, 0) == arch_get_irn_register_out(irn, 0)) {
368 if (mode_is_float(mode)) {
369 panic("move not supported for FP");
370 } else if (mode_is_data(mode)) {
371 amd64_emitf(irn, "mov %S0, %D0");
373 panic("move not supported for this mode");
377 static void emit_be_Perm(const ir_node *node)
379 const arch_register_t *in0, *in1;
381 in0 = arch_get_irn_register(get_irn_n(node, 0));
382 in1 = arch_get_irn_register(get_irn_n(node, 1));
384 arch_register_class_t const* const cls0 = in0->reg_class;
385 assert(cls0 == in1->reg_class && "Register class mismatch at Perm");
387 amd64_emitf(node, "xchg %R, %R", in0, in1);
389 if (cls0 != &amd64_reg_classes[CLASS_amd64_gp]) {
390 panic("unexpected register class in be_Perm (%+F)", node);
394 static void emit_amd64_FrameAddr(const ir_node *irn)
396 const amd64_SymConst_attr_t *attr =
397 (const amd64_SymConst_attr_t*) get_amd64_attr_const(irn);
399 amd64_emitf(irn, "mov %S0, %D0");
400 amd64_emitf(irn, "add $%u, %D0", attr->fp_offset);
404 * Emits code to increase stack pointer.
406 static void emit_be_IncSP(const ir_node *node)
408 int offs = be_get_IncSP_offset(node);
414 amd64_emitf(node, "sub, $%d, %D0", offs);
416 amd64_emitf(node, "add, $%d, %D0", -offs);
421 * Emits code for a return.
423 static void emit_be_Return(const ir_node *node)
425 be_emit_cstring("\tret");
426 be_emit_finish_line_gas(node);
430 static void emit_amd64_binop_op(const ir_node *irn, int second_op)
432 if (irn->op == op_amd64_Add) {
433 amd64_emitf(irn, "add %S*, %D0", second_op);
434 } else if (irn->op == op_amd64_Sub) {
435 amd64_emitf(irn, "neg %S*", second_op);
436 amd64_emitf(irn, "add %S*, %D0", second_op);
437 amd64_emitf(irn, "neg %S*", second_op);
443 * Emits an arithmetic operation that handles arbitraty input registers.
445 static void emit_amd64_binop(const ir_node *irn)
447 const arch_register_t *reg_s1 = arch_get_irn_register_in(irn, 0);
448 const arch_register_t *reg_s2 = arch_get_irn_register_in(irn, 1);
449 const arch_register_t *reg_d1 = arch_get_irn_register_out(irn, 0);
453 if (reg_d1 != reg_s1 && reg_d1 != reg_s2) {
454 amd64_emitf(irn, "mov %R, %R", reg_s1, reg_d1);
456 } else if (reg_d1 == reg_s2 && reg_d1 != reg_s1) {
460 emit_amd64_binop_op(irn, second_op);
464 * The type of a emitter function.
466 typedef void (emit_func)(const ir_node *irn);
469 * Set a node emitter. Make it a bit more type safe.
471 static inline void set_emitter(ir_op *op, emit_func arm_emit_node)
473 op->ops.generic = (op_func)arm_emit_node;
477 * Enters the emitter functions for handled nodes into the generic
478 * pointer of an opcode.
480 static void amd64_register_emitters(void)
482 /* first clear the generic function pointer for all ops */
483 ir_clear_opcodes_generic_func();
485 /* register all emitter functions defined in spec */
486 amd64_register_spec_emitters();
488 set_emitter(op_amd64_SymConst, emit_amd64_SymConst);
489 set_emitter(op_amd64_Jmp, emit_amd64_Jmp);
490 set_emitter(op_amd64_Jcc, emit_amd64_Jcc);
491 set_emitter(op_amd64_Conv, emit_amd64_Conv);
492 set_emitter(op_amd64_FrameAddr, emit_amd64_FrameAddr);
493 set_emitter(op_be_Return, emit_be_Return);
494 set_emitter(op_be_Call, emit_be_Call);
495 set_emitter(op_be_Copy, emit_be_Copy);
496 set_emitter(op_be_IncSP, emit_be_IncSP);
497 set_emitter(op_be_Perm, emit_be_Perm);
499 set_emitter(op_amd64_Add, emit_amd64_binop);
500 set_emitter(op_amd64_Sub, emit_amd64_binop);
502 set_emitter(op_be_Start, emit_nothing);
503 set_emitter(op_be_Keep, emit_nothing);
504 set_emitter(op_Phi, emit_nothing);
507 typedef void (*emit_func_ptr) (const ir_node *);
510 * Emits code for a node.
512 static void amd64_emit_node(const ir_node *node)
514 ir_op *op = get_irn_op(node);
516 if (op->ops.generic) {
517 emit_func_ptr func = (emit_func_ptr) op->ops.generic;
520 ir_fprintf(stderr, "No emitter for node %+F\n", node);
525 * Walks over the nodes in a block connected by scheduling edges
526 * and emits code for each node.
528 static void amd64_gen_block(ir_node *block, void *data)
532 if (! is_Block(block))
535 be_gas_begin_block(block, true);
537 sched_foreach(block, node) {
538 amd64_emit_node(node);
544 * Sets labels for control flow nodes (jump target)
545 * TODO: Jump optimization
547 static void amd64_gen_labels(ir_node *block, void *env)
550 int n = get_Block_n_cfgpreds(block);
553 for (n--; n >= 0; n--) {
554 pred = get_Block_cfgpred(block, n);
555 set_irn_link(pred, block);
562 void amd64_gen_routine(ir_graph *irg)
564 ir_entity *entity = get_irg_entity(irg);
568 /* register all emitter functions */
569 amd64_register_emitters();
571 blk_sched = be_create_block_schedule(irg);
573 be_gas_emit_function_prolog(entity, 4, NULL);
575 irg_block_walk_graph(irg, amd64_gen_labels, NULL, NULL);
577 n = ARR_LEN(blk_sched);
578 for (i = 0; i < n; i++) {
579 ir_node *block = blk_sched[i];
580 ir_node *next = (i + 1) < n ? blk_sched[i+1] : NULL;
582 set_irn_link(block, next);
585 for (i = 0; i < n; ++i) {
586 ir_node *block = blk_sched[i];
588 amd64_gen_block(block, 0);
591 be_gas_emit_function_epilog(entity);