2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief code selection (transform FIRM into amd64 FIRM)
27 #include "irgraph_t.h"
37 #include "betranshlp.h"
39 #include "bearch_amd64_t.h"
41 #include "amd64_nodes_attr.h"
42 #include "amd64_transform.h"
43 #include "amd64_new_nodes.h"
45 #include "gen_amd64_regalloc_if.h"
47 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
49 /* Some support functions: */
51 static inline int mode_needs_gp_reg(ir_mode *mode)
53 return mode_is_int(mode) || mode_is_reference(mode);
57 * Create a DAG constructing a given Const.
59 * @param irn a Firm const
61 static ir_node *create_const_graph(ir_node *irn, ir_node *block)
63 ir_tarval *tv = get_Const_tarval(irn);
64 ir_mode *mode = get_tarval_mode(tv);
65 dbg_info *dbgi = get_irn_dbg_info(irn);
68 if (mode_is_reference(mode)) {
69 /* AMD64 is 64bit, so we can safely convert a reference tarval into Iu */
70 assert(get_mode_size_bits(mode) == get_mode_size_bits(mode_Lu));
71 tv = tarval_convert_to(tv, mode_Lu);
74 value = get_tarval_long(tv);
75 //d// printf ("TEST GENERATE %d\n", value);
77 return new_bd_amd64_Immediate(dbgi, block, value);
80 /* Op transformers: */
83 * Transforms a Const node.
85 * @return The transformed AMD64 node.
87 static ir_node *gen_Const(ir_node *node) {
88 ir_node *block = be_transform_node(get_nodes_block(node));
89 ir_mode *mode = get_irn_mode(node);
90 ir_node *res = create_const_graph(node, block);
97 * Transforms a SymConst node.
99 * @return The transformed ARM node.
101 static ir_node *gen_SymConst(ir_node *node)
103 ir_node *block = be_transform_node(get_nodes_block(node));
104 ir_entity *entity = get_SymConst_entity(node);
105 dbg_info *dbgi = get_irn_dbg_info(node);
108 new_node = new_bd_amd64_SymConst(dbgi, block, entity);
113 * Transforms an Add node.
115 * @return The transformed AMD64 node.
117 static ir_node *gen_Add(ir_node *node) {
118 ir_node *block = be_transform_node(get_nodes_block(node));
119 /* ir_mode *mode = get_irn_mode(node); */
120 ir_node *op1 = get_Add_left(node);
121 ir_node *op2 = get_Add_right(node);
122 dbg_info *dbgi = get_irn_dbg_info(node);
123 ir_node *new_op1 = be_transform_node(op1);
124 ir_node *new_op2 = be_transform_node(op2);
126 ir_node *res = new_bd_amd64_Add(dbgi, block, new_op1, new_op2);
131 * Transforms an Sub node.
133 * @return The transformed AMD64 node.
135 static ir_node *gen_Sub(ir_node *node) {
136 ir_node *block = be_transform_node(get_nodes_block(node));
137 /* ir_mode *mode = get_irn_mode(node); */
138 ir_node *op1 = get_Sub_left(node);
139 ir_node *op2 = get_Sub_right(node);
140 dbg_info *dbgi = get_irn_dbg_info(node);
141 ir_node *new_op1 = be_transform_node(op1);
142 ir_node *new_op2 = be_transform_node(op2);
144 ir_node *res = new_bd_amd64_Sub(dbgi, block, new_op1, new_op2);
148 static ir_node *gen_Mul(ir_node *node) {
149 ir_node *block = be_transform_node(get_nodes_block(node));
150 /* ir_mode *mode = get_irn_mode(node); */
151 ir_node *op1 = get_Mul_left(node);
152 ir_node *op2 = get_Mul_right(node);
153 dbg_info *dbgi = get_irn_dbg_info(node);
154 ir_node *new_op1 = be_transform_node(op1);
155 ir_node *new_op2 = be_transform_node(op2);
157 ir_node *res = new_bd_amd64_Mul(dbgi, block, new_op1, new_op2);
161 static ir_node *gen_Minus(ir_node *node)
163 ir_node *block = be_transform_node(get_nodes_block(node));
164 ir_node *val = be_transform_node(get_Minus_op(node));
165 dbg_info *dbgi = get_irn_dbg_info(node);
167 return new_bd_amd64_Neg(dbgi, block, val);
170 static ir_node *gen_Jmp(ir_node *node)
172 ir_node *block = get_nodes_block(node);
173 ir_node *new_block = be_transform_node(block);
174 dbg_info *dbgi = get_irn_dbg_info(node);
176 return new_bd_amd64_Jmp(dbgi, new_block);
179 static ir_node *gen_be_Call(ir_node *node)
181 ir_node *res = be_duplicate_node(node);
182 arch_add_irn_flags(res, arch_irn_flags_modify_flags);
187 static ir_node *gen_Cmp(ir_node *node)
189 ir_node *block = be_transform_node(get_nodes_block(node));
190 ir_node *op1 = get_Cmp_left(node);
191 ir_node *op2 = get_Cmp_right(node);
192 ir_mode *cmp_mode = get_irn_mode(op1);
193 dbg_info *dbgi = get_irn_dbg_info(node);
198 if (mode_is_float(cmp_mode)) {
199 panic("Floating point not implemented yet!");
202 assert(get_irn_mode(op2) == cmp_mode);
203 is_unsigned = !mode_is_signed(cmp_mode);
205 new_op1 = be_transform_node(op1);
206 /* new_op1 = gen_extension(dbgi, block, new_op1, cmp_mode); */
207 new_op2 = be_transform_node(op2);
208 /* new_op2 = gen_extension(dbgi, block, new_op2, cmp_mode); */
209 return new_bd_amd64_Cmp(dbgi, block, new_op1, new_op2, false,
216 * @return the created ARM Cond node
218 static ir_node *gen_Cond(ir_node *node)
220 ir_node *selector = get_Cond_selector(node);
221 ir_mode *mode = get_irn_mode(selector);
224 ir_relation relation;
227 if (mode != mode_b) {
228 panic ("create_Switch not implemented yet!");
229 // return gen_SwitchJmp(node);
231 assert(is_Cmp(selector));
233 block = be_transform_node(get_nodes_block(node));
234 dbgi = get_irn_dbg_info(node);
235 flag_node = be_transform_node(selector);
236 relation = get_Cmp_relation(selector);
238 return new_bd_amd64_Jcc(dbgi, block, flag_node, relation);
243 * Create an And that will zero out upper bits.
245 * @param dbgi debug info
246 * @param block the basic block
247 * @param op the original node
248 * param src_bits number of lower bits that will remain
250 static ir_node *gen_zero_extension(dbg_info *dbgi, ir_node *block, ir_node *op,
254 return new_bd_arm_And_imm(dbgi, block, op, 0xFF, 0);
255 } else if (src_bits == 16) {
256 ir_node *lshift = new_bd_arm_Mov_reg_shift_imm(dbgi, block, op, ARM_SHF_LSL_IMM, 16);
257 ir_node *rshift = new_bd_arm_Mov_reg_shift_imm(dbgi, block, lshift, ARM_SHF_LSR_IMM, 16);
260 panic("zero extension only supported for 8 and 16 bits");
265 * Generate code for a sign extension.
267 static ir_node *gen_sign_extension(dbg_info *dbgi, ir_node *block, ir_node *op,
270 int shift_width = 32 - src_bits;
271 ir_node *lshift_node = new_bd_arm_Mov_reg_shift_imm(dbgi, block, op, ARM_SHF_LSL_IMM, shift_width);
272 ir_node *rshift_node = new_bd_arm_Mov_reg_shift_imm(dbgi, block, lshift_node, ARM_SHF_ASR_IMM, shift_width);
276 static ir_node *gen_extension(dbg_info *dbgi, ir_node *block, ir_node *op,
279 int bits = get_mode_size_bits(orig_mode);
283 if (mode_is_signed(orig_mode)) {
284 return gen_sign_extension(dbgi, block, op, bits);
286 return gen_zero_extension(dbgi, block, op, bits);
291 * returns true if it is assured, that the upper bits of a node are "clean"
292 * which means for a 16 or 8 bit value, that the upper bits in the register
293 * are 0 for unsigned and a copy of the last significant bit for signed
296 static bool upper_bits_clean(ir_node *transformed_node, ir_mode *mode)
298 (void) transformed_node;
305 static ir_node *gen_Phi(ir_node *node)
307 ir_mode *mode = get_irn_mode(node);
308 const arch_register_req_t *req;
309 if (mode_needs_gp_reg(mode)) {
310 /* all integer operations are on 64bit registers now */
312 req = amd64_reg_classes[CLASS_amd64_gp].class_req;
314 req = arch_no_register_req;
317 return be_transform_phi(node, req);
321 * Transforms a Conv node.
323 * @return The created ia32 Conv node
325 static ir_node *gen_Conv(ir_node *node)
327 ir_node *block = be_transform_node(get_nodes_block(node));
328 ir_node *op = get_Conv_op(node);
329 ir_node *new_op = be_transform_node(op);
330 ir_mode *src_mode = get_irn_mode(op);
331 ir_mode *dst_mode = get_irn_mode(node);
332 dbg_info *dbgi = get_irn_dbg_info(node);
334 if (src_mode == dst_mode)
337 if (mode_is_float(src_mode) || mode_is_float(dst_mode)) {
338 panic("float not supported yet");
339 } else { /* complete in gp registers */
340 int src_bits = get_mode_size_bits(src_mode);
341 int dst_bits = get_mode_size_bits(dst_mode);
344 if (src_bits == dst_bits) {
345 /* kill unnecessary conv */
349 if (src_bits < dst_bits) {
356 return new_bd_amd64_Conv(dbgi, block, new_op, min_mode);
359 if (upper_bits_clean(new_op, min_mode)) {
363 if (mode_is_signed(min_mode)) {
364 return gen_sign_extension(dbg, block, new_op, min_bits);
366 return gen_zero_extension(dbg, block, new_op, min_bits);
373 * Transforms a Store.
375 * @return the created AMD64 Store node
377 static ir_node *gen_Store(ir_node *node)
379 ir_node *block = be_transform_node(get_nodes_block(node));
380 ir_node *ptr = get_Store_ptr(node);
381 ir_node *new_ptr = be_transform_node(ptr);
382 ir_node *mem = get_Store_mem(node);
383 ir_node *new_mem = be_transform_node(mem);
384 ir_node *val = get_Store_value(node);
385 ir_node *new_val = be_transform_node(val);
386 ir_mode *mode = get_irn_mode(val);
387 dbg_info *dbgi = get_irn_dbg_info(node);
388 ir_node *new_store = NULL;
390 if (mode_is_float(mode)) {
391 panic("Float not supported yet");
393 assert(mode_is_data(mode) && "unsupported mode for Store");
394 new_store = new_bd_amd64_Store(dbgi, block, new_ptr, new_val, new_mem, 0);
396 set_irn_pinned(new_store, get_irn_pinned(node));
403 * @return the created AMD64 Load node
405 static ir_node *gen_Load(ir_node *node)
407 ir_node *block = be_transform_node(get_nodes_block(node));
408 ir_node *ptr = get_Load_ptr(node);
409 ir_node *new_ptr = be_transform_node(ptr);
410 ir_node *mem = get_Load_mem(node);
411 ir_node *new_mem = be_transform_node(mem);
412 ir_mode *mode = get_Load_mode(node);
413 dbg_info *dbgi = get_irn_dbg_info(node);
414 ir_node *new_load = NULL;
416 if (mode_is_float(mode)) {
417 panic("Float not supported yet");
419 assert(mode_is_data(mode) && "unsupported mode for Load");
420 new_load = new_bd_amd64_Load(dbgi, block, new_ptr, new_mem, 0);
422 set_irn_pinned(new_load, get_irn_pinned(node));
425 /* check for special case: the loaded value might not be used */
426 if (be_get_Proj_for_pn(node, pn_Load_res) == NULL) {
427 /* add a result proj and a Keep to produce a pseudo use */
428 ir_node *proj = new_r_Proj(new_load, mode_Iu, pn_amd64_Load_res);
429 be_new_Keep(block, 1, &proj);
437 * Transform a Proj from a Load.
439 static ir_node *gen_Proj_Load(ir_node *node)
441 ir_node *load = get_Proj_pred(node);
442 ir_node *new_load = be_transform_node(load);
443 dbg_info *dbgi = get_irn_dbg_info(node);
444 long proj = get_Proj_proj(node);
446 /* renumber the proj */
447 switch (get_amd64_irn_opcode(new_load)) {
449 /* handle all gp loads equal: they have the same proj numbers. */
450 if (proj == pn_Load_res) {
451 return new_rd_Proj(dbgi, new_load, mode_Lu, pn_amd64_Load_res);
452 } else if (proj == pn_Load_M) {
453 return new_rd_Proj(dbgi, new_load, mode_M, pn_amd64_Load_M);
457 case iro_sparc_fpaLoad:
458 panic("FP not implemented yet");
462 panic("Unsupported Proj from Load");
465 return be_duplicate_node(node);
469 * Transform a Proj node.
471 static ir_node *gen_Proj(ir_node *node)
473 ir_graph *irg = current_ir_graph;
474 dbg_info *dbgi = get_irn_dbg_info(node);
475 ir_node *pred = get_Proj_pred(node);
476 long proj = get_Proj_proj(node);
481 if (is_Store(pred)) {
482 if (proj == pn_Store_M) {
483 return be_transform_node(pred);
485 panic("Unsupported Proj from Store");
487 } else if (is_Load(pred)) {
488 return gen_Proj_Load(node);
490 } else if (be_is_SubSP(pred)) {
491 //panic("gen_Proj not implemented for SubSP");
492 return gen_Proj_be_SubSP(node);
493 } else if (be_is_AddSP(pred)) {
494 //panic("gen_Proj not implemented for AddSP");
495 return gen_Proj_be_AddSP(node);
496 } else if (is_Div(pred)) {
497 return gen_Proj_Div(node);
499 } else if (is_Start(pred)) {
501 if (node == get_irg_anchor(irg, anchor_tls)) {
502 return gen_Proj_tls(node);
505 ir_node *new_pred = be_transform_node(pred);
506 ir_mode *mode = get_irn_mode(node);
507 if (mode_needs_gp_reg(mode)) {
508 ir_node *new_proj = new_r_Proj(new_pred, mode_Iu, get_Proj_proj(node));
509 new_proj->node_nr = node->node_nr;
515 return be_duplicate_node(node);
519 * Transforms a FrameAddr into an AMD64 Add.
521 static ir_node *gen_be_FrameAddr(ir_node *node)
523 ir_node *block = be_transform_node(get_nodes_block(node));
524 ir_entity *ent = be_get_frame_entity(node);
525 ir_node *fp = be_get_FrameAddr_frame(node);
526 ir_node *new_fp = be_transform_node(fp);
527 dbg_info *dbgi = get_irn_dbg_info(node);
530 new_node = new_bd_amd64_FrameAddr(dbgi, block, new_fp, ent);
534 /* Boilerplate code for transformation: */
536 static void amd64_register_transformers(void)
538 be_start_transform_setup();
540 be_set_transform_function(op_Const, gen_Const);
541 be_set_transform_function(op_SymConst, gen_SymConst);
542 be_set_transform_function(op_Add, gen_Add);
543 be_set_transform_function(op_Sub, gen_Sub);
544 be_set_transform_function(op_Mul, gen_Mul);
545 be_set_transform_function(op_be_Call, gen_be_Call);
546 be_set_transform_function(op_be_FrameAddr, gen_be_FrameAddr);
547 be_set_transform_function(op_Conv, gen_Conv);
548 be_set_transform_function(op_Jmp, gen_Jmp);
549 be_set_transform_function(op_Cmp, gen_Cmp);
550 be_set_transform_function(op_Cond, gen_Cond);
551 be_set_transform_function(op_Phi, gen_Phi);
552 be_set_transform_function(op_Load, gen_Load);
553 be_set_transform_function(op_Store, gen_Store);
554 be_set_transform_function(op_Proj, gen_Proj);
555 be_set_transform_function(op_Minus, gen_Minus);
558 void amd64_transform_graph(ir_graph *irg)
560 amd64_register_transformers();
561 be_transform_graph(irg, NULL);
564 void amd64_init_transform(void)
566 FIRM_DBG_REGISTER(dbg, "firm.be.amd64.transform");