2 * Copyright (C) 1995-2011 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief emit assembler for a backend graph
40 #include "beblocksched.h"
42 #include "amd64_emitter.h"
43 #include "gen_amd64_emitter.h"
44 #include "gen_amd64_regalloc_if.h"
45 #include "amd64_nodes_attr.h"
46 #include "amd64_new_nodes.h"
50 /*************************************************************
52 * (_) | | / _| | | | |
53 * _ __ _ __ _ _ __ | |_| |_ | |__ ___| |_ __ ___ _ __
54 * | '_ \| '__| | '_ \| __| _| | '_ \ / _ \ | '_ \ / _ \ '__|
55 * | |_) | | | | | | | |_| | | | | | __/ | |_) | __/ |
56 * | .__/|_| |_|_| |_|\__|_| |_| |_|\___|_| .__/ \___|_|
59 *************************************************************/
61 void amd64_emit_register(const arch_register_t *reg)
64 be_emit_string(arch_register_get_name(reg));
67 void amd64_emit_immediate(const ir_node *node)
69 const amd64_attr_t *attr = get_amd64_attr_const (node);
71 be_emit_irprintf("0x%X", attr->ext.imm_value);
74 void amd64_emit_fp_offset(const ir_node *node)
76 const amd64_SymConst_attr_t *attr = get_amd64_SymConst_attr_const(node);
78 be_emit_irprintf("%d", attr->fp_offset);
81 void amd64_emit_source_register(const ir_node *node, int pos)
83 amd64_emit_register(arch_get_irn_register_in(node, pos));
86 void amd64_emit_dest_register(const ir_node *node, int pos)
88 amd64_emit_register(arch_get_irn_register_out(node, pos));
92 * Returns the target label for a control flow node.
95 static void amd64_emit_cfop_target(const ir_node *node)
97 ir_node *block = get_irn_link(node);
99 be_emit_irprintf("BLOCK_%ld", get_irn_node_nr(block));
103 /***********************************************************************************
106 * _ __ ___ __ _ _ _ __ | |_ _ __ __ _ _ __ ___ _____ _____ _ __| | __
107 * | '_ ` _ \ / _` | | '_ \ | _| '__/ _` | '_ ` _ \ / _ \ \ /\ / / _ \| '__| |/ /
108 * | | | | | | (_| | | | | | | | | | | (_| | | | | | | __/\ V V / (_) | | | <
109 * |_| |_| |_|\__,_|_|_| |_| |_| |_| \__,_|_| |_| |_|\___| \_/\_/ \___/|_| |_|\_\
111 ***********************************************************************************/
114 * Default emitter for anything that we don't want to generate code for.
116 static void emit_nothing(const ir_node *node)
124 static void emit_amd64_SymConst(const ir_node *irn)
126 const amd64_SymConst_attr_t *attr = get_amd64_SymConst_attr_const(irn);
128 sym_or_tv_t key, *entry;
131 key.u.id = get_entity_ld_ident(attr->entity);
134 entry = set_insert(sym_or_tv_t, sym_or_tv, &key, sizeof(key), hash_ptr(key.u.generic));
135 if (entry->label == 0) {
136 /* allocate a label */
137 entry->label = get_unique_label();
139 label = entry->label;
142 be_emit_cstring("\tmov $");
143 be_gas_emit_entity(attr->entity);
144 be_emit_cstring(", ");
145 amd64_emit_dest_register(irn, 0);
146 be_emit_finish_line_gas(irn);
152 static void emit_amd64_Conv(const ir_node *irn)
154 be_emit_cstring("\tmov ");
155 amd64_emit_source_register(irn, 0);
156 be_emit_cstring(", ");
157 amd64_emit_dest_register(irn, 0);
158 be_emit_finish_line_gas(irn);
163 * Returns the next block in a block schedule.
165 static ir_node *sched_next_block(const ir_node *block)
167 return (ir_node*)get_irn_link(block);
171 * Returns the target block for a control flow node.
173 static ir_node *get_cfop_target_block(const ir_node *irn)
175 return (ir_node*)get_irn_link(irn);
179 * Emit the target label for a control flow node.
181 static void amd64_emit_cfop_target(const ir_node *irn)
183 ir_node *block = get_cfop_target_block(irn);
185 be_gas_emit_block_name(block);
191 static void emit_amd64_Jmp(const ir_node *node)
193 ir_node *block, *next_block;
195 /* for now, the code works for scheduled and non-schedules blocks */
196 block = get_nodes_block(node);
198 /* we have a block schedule */
199 next_block = sched_next_block(block);
200 if (get_cfop_target_block(node) != next_block) {
201 be_emit_cstring("\tjmp ");
202 amd64_emit_cfop_target(node);
204 if (be_options.verbose_asm) {
205 be_emit_cstring("\t/* fallthrough to ");
206 amd64_emit_cfop_target(node);
207 be_emit_cstring(" */");
210 be_emit_finish_line_gas(node);
214 * Emit a Compare with conditional branch.
216 static void emit_amd64_Jcc(const ir_node *irn)
218 const ir_edge_t *edge;
219 const ir_node *proj_true = NULL;
220 const ir_node *proj_false = NULL;
221 const ir_node *block;
222 const ir_node *next_block;
224 const amd64_attr_t *attr = get_amd64_attr_const(irn);
225 ir_relation relation = attr->ext.relation;
226 ir_node *op1 = get_irn_n(irn, 0);
227 const amd64_attr_t *cmp_attr = get_amd64_attr_const(op1);
228 bool is_signed = !cmp_attr->data.cmp_unsigned;
230 assert(is_amd64_Cmp(op1));
232 foreach_out_edge(irn, edge) {
233 ir_node *proj = get_edge_src_irn(edge);
234 long nr = get_Proj_proj(proj);
235 if (nr == pn_Cond_true) {
242 if (cmp_attr->data.ins_permuted) {
243 relation = get_inversed_relation(relation);
246 /* for now, the code works for scheduled and non-schedules blocks */
247 block = get_nodes_block(irn);
249 /* we have a block schedule */
250 next_block = sched_next_block(block);
252 assert(relation != ir_relation_false);
253 assert(relation != ir_relation_true);
255 if (get_cfop_target_block(proj_true) == next_block) {
256 /* exchange both proj's so the second one can be omitted */
257 const ir_node *t = proj_true;
259 proj_true = proj_false;
261 relation = get_negated_relation(relation);
264 switch (relation & ir_relation_less_equal_greater) {
265 case ir_relation_equal: suffix = "e"; break;
266 case ir_relation_less: suffix = is_signed ? "l" : "b"; break;
267 case ir_relation_less_equal: suffix = is_signed ? "le" : "be"; break;
268 case ir_relation_greater: suffix = is_signed ? "g" : "a"; break;
269 case ir_relation_greater_equal: suffix = is_signed ? "ge" : "ae"; break;
270 case ir_relation_less_greater: suffix = "ne"; break;
271 case ir_relation_less_equal_greater: suffix = "mp"; break;
272 default: panic("Cmp has unsupported pnc");
275 /* emit the true proj */
276 be_emit_irprintf("\tj%s ", suffix);
277 amd64_emit_cfop_target(proj_true);
278 be_emit_finish_line_gas(proj_true);
280 if (get_cfop_target_block(proj_false) == next_block) {
281 if (be_options.verbose_asm) {
282 be_emit_cstring("\t/* fallthrough to ");
283 amd64_emit_cfop_target(proj_false);
284 be_emit_cstring(" */");
285 be_emit_finish_line_gas(proj_false);
288 be_emit_cstring("\tjmp ");
289 amd64_emit_cfop_target(proj_false);
290 be_emit_finish_line_gas(proj_false);
295 * Emits code for a call.
297 static void emit_be_Call(const ir_node *node)
299 ir_entity *entity = be_Call_get_entity(node);
301 /* %eax/%rax is used in AMD64 to pass the number of vector parameters for
302 * variable argument counts */
303 if (get_method_variadicity (be_Call_get_type((ir_node *) node))) {
304 /* But this still is a hack... */
305 be_emit_cstring("\txor %rax, %rax");
306 be_emit_finish_line_gas(node);
310 be_emit_cstring("\tcall ");
311 be_gas_emit_entity (be_Call_get_entity(node));
312 be_emit_finish_line_gas(node);
314 be_emit_pad_comment();
315 be_emit_cstring("/* FIXME: call NULL entity?! */\n");
322 static void emit_be_Copy(const ir_node *irn)
324 ir_mode *mode = get_irn_mode(irn);
326 if (arch_get_irn_register_in(irn, 0) == arch_get_irn_register_out(irn, 0)) {
331 if (mode_is_float(mode)) {
332 panic("emit_be_Copy: move not supported for FP");
333 } else if (mode_is_data(mode)) {
334 be_emit_cstring("\tmov ");
335 amd64_emit_source_register(irn, 0);
336 be_emit_cstring(", ");
337 amd64_emit_dest_register(irn, 0);
338 be_emit_finish_line_gas(irn);
340 panic("emit_be_Copy: move not supported for this mode");
344 static void emit_be_Perm(const ir_node *node)
346 const arch_register_t *in0, *in1;
347 const arch_register_class_t *cls0, *cls1;
349 in0 = arch_get_irn_register(get_irn_n(node, 0));
350 in1 = arch_get_irn_register(get_irn_n(node, 1));
352 cls0 = arch_register_get_class(in0);
353 cls1 = arch_register_get_class(in1);
355 assert(cls0 == cls1 && "Register class mismatch at Perm");
357 be_emit_cstring("\txchg ");
358 amd64_emit_register (in0);
359 be_emit_cstring(", ");
360 amd64_emit_register (in1);
361 be_emit_finish_line_gas(node);
363 if (cls0 != &amd64_reg_classes[CLASS_amd64_gp]) {
364 panic("unexpected register class in be_Perm (%+F)", node);
368 static void emit_amd64_FrameAddr(const ir_node *irn)
370 const amd64_SymConst_attr_t *attr =
371 (const amd64_SymConst_attr_t*) get_amd64_attr_const(irn);
373 be_emit_cstring("\tmov ");
374 amd64_emit_source_register(irn, 0);
375 be_emit_cstring(", ");
376 amd64_emit_dest_register(irn, 0);
377 be_emit_finish_line_gas(irn);
379 be_emit_cstring("\tadd ");
380 be_emit_irprintf("$0x%X", attr->fp_offset);
381 be_emit_cstring(", ");
382 amd64_emit_dest_register(irn, 0);
383 be_emit_finish_line_gas(irn);
387 * Emits code to increase stack pointer.
389 static void emit_be_IncSP(const ir_node *node)
391 int offs = be_get_IncSP_offset(node);
397 be_emit_irprintf("\tsub ");
398 be_emit_irprintf("$%u, ", offs);
399 amd64_emit_dest_register(node, 0);
400 be_emit_finish_line_gas(node);
402 be_emit_irprintf("\tadd ");
403 be_emit_irprintf("$%u, ", -offs);
404 amd64_emit_dest_register(node, 0);
405 be_emit_finish_line_gas(node);
410 * Emits code for a return.
412 static void emit_be_Return(const ir_node *node)
414 be_emit_cstring("\tret");
415 be_emit_finish_line_gas(node);
419 static void emit_amd64_binop_op(const ir_node *irn, int second_op)
421 if (irn->op == op_amd64_Add) {
422 be_emit_cstring("\tadd ");
423 amd64_emit_source_register(irn, second_op);
424 be_emit_cstring(", ");
425 amd64_emit_dest_register(irn, 0);
426 be_emit_finish_line_gas(irn);
427 } else if (irn->op == op_amd64_Sub) {
428 be_emit_cstring("\tneg ");
429 amd64_emit_source_register(irn, second_op);
430 be_emit_finish_line_gas(irn);
431 be_emit_cstring("\tadd ");
432 amd64_emit_source_register(irn, second_op);
433 be_emit_cstring(", ");
434 amd64_emit_dest_register(irn, 0);
435 be_emit_finish_line_gas(irn);
436 be_emit_cstring("\tneg ");
437 amd64_emit_source_register(irn, second_op);
438 be_emit_finish_line_gas(irn);
444 * Emits an arithmetic operation that handles arbitraty input registers.
446 static void emit_amd64_binop(const ir_node *irn)
448 const arch_register_t *reg_s1 = arch_get_irn_register_in(irn, 0);
449 const arch_register_t *reg_s2 = arch_get_irn_register_in(irn, 1);
450 const arch_register_t *reg_d1 = arch_get_irn_register_out(irn, 0);
454 if (reg_d1 != reg_s1 && reg_d1 != reg_s2) {
455 be_emit_cstring("\tmov ");
456 amd64_emit_register(reg_s1);
457 be_emit_cstring(", ");
458 amd64_emit_register(reg_d1);
459 be_emit_finish_line_gas(irn);
462 } else if (reg_d1 == reg_s2 && reg_d1 != reg_s1) {
467 emit_amd64_binop_op(irn, second_op);
471 * The type of a emitter function.
473 typedef void (emit_func)(const ir_node *irn);
476 * Set a node emitter. Make it a bit more type safe.
478 static inline void set_emitter(ir_op *op, emit_func arm_emit_node)
480 op->ops.generic = (op_func)arm_emit_node;
484 * Enters the emitter functions for handled nodes into the generic
485 * pointer of an opcode.
487 static void amd64_register_emitters(void)
489 /* first clear the generic function pointer for all ops */
490 ir_clear_opcodes_generic_func();
492 /* register all emitter functions defined in spec */
493 amd64_register_spec_emitters();
495 set_emitter(op_amd64_SymConst, emit_amd64_SymConst);
496 set_emitter(op_amd64_Jmp, emit_amd64_Jmp);
497 set_emitter(op_amd64_Jcc, emit_amd64_Jcc);
498 set_emitter(op_amd64_Conv, emit_amd64_Conv);
499 set_emitter(op_amd64_FrameAddr, emit_amd64_FrameAddr);
500 set_emitter(op_be_Return, emit_be_Return);
501 set_emitter(op_be_Call, emit_be_Call);
502 set_emitter(op_be_Copy, emit_be_Copy);
503 set_emitter(op_be_IncSP, emit_be_IncSP);
504 set_emitter(op_be_Perm, emit_be_Perm);
506 set_emitter(op_amd64_Add, emit_amd64_binop);
507 set_emitter(op_amd64_Sub, emit_amd64_binop);
509 set_emitter(op_be_Start, emit_nothing);
510 set_emitter(op_be_Keep, emit_nothing);
511 set_emitter(op_Phi, emit_nothing);
514 typedef void (*emit_func_ptr) (const ir_node *);
517 * Emits code for a node.
519 static void amd64_emit_node(const ir_node *node)
521 ir_op *op = get_irn_op(node);
523 if (op->ops.generic) {
524 emit_func_ptr func = (emit_func_ptr) op->ops.generic;
527 ir_fprintf(stderr, "No emitter for node %+F\n", node);
532 * Walks over the nodes in a block connected by scheduling edges
533 * and emits code for each node.
535 static void amd64_gen_block(ir_node *block, void *data)
540 if (! is_Block(block))
543 be_gas_begin_block(block, true);
545 sched_foreach(block, node) {
546 amd64_emit_node(node);
552 * Sets labels for control flow nodes (jump target)
553 * TODO: Jump optimization
555 static void amd64_gen_labels(ir_node *block, void *env)
558 int n = get_Block_n_cfgpreds(block);
561 for (n--; n >= 0; n--) {
562 pred = get_Block_cfgpred(block, n);
563 set_irn_link(pred, block);
570 void amd64_gen_routine(ir_graph *irg)
572 ir_entity *entity = get_irg_entity(irg);
576 /* register all emitter functions */
577 amd64_register_emitters();
579 blk_sched = be_create_block_schedule(irg);
581 be_gas_emit_function_prolog(entity, 4, NULL);
583 irg_block_walk_graph(irg, amd64_gen_labels, NULL, NULL);
585 n = ARR_LEN(blk_sched);
586 for (i = 0; i < n; i++) {
587 ir_node *block = blk_sched[i];
588 ir_node *next = (i + 1) < n ? blk_sched[i+1] : NULL;
590 set_irn_link(block, next);
593 for (i = 0; i < n; ++i) {
594 ir_node *block = blk_sched[i];
596 amd64_gen_block(block, 0);
599 be_gas_emit_function_epilog(entity);