2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief code selection (transform FIRM into amd64 FIRM)
23 * @version $Id: amd64_transform.c 26673 2009-10-01 16:43:13Z matze $
28 #include "irgraph_t.h"
38 #include "../benode.h"
39 #include "../betranshlp.h"
40 #include "../beutil.h"
41 #include "bearch_amd64_t.h"
43 #include "amd64_nodes_attr.h"
44 #include "amd64_transform.h"
45 #include "amd64_new_nodes.h"
47 #include "gen_amd64_regalloc_if.h"
49 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
51 /** holds the current code generator during transformation */
52 static amd64_code_gen_t *env_cg;
54 ///* its enough to have those once */
55 //static ir_node *nomem, *noreg_GP;
57 /* Some support functions: */
59 static inline int mode_needs_gp_reg(ir_mode *mode)
61 return mode_is_int(mode) || mode_is_reference(mode);
65 * Create a DAG constructing a given Const.
67 * @param irn a Firm const
69 static ir_node *create_const_graph(ir_node *irn, ir_node *block)
71 tarval *tv = get_Const_tarval(irn);
72 ir_mode *mode = get_tarval_mode(tv);
73 dbg_info *dbgi = get_irn_dbg_info(irn);
76 if (mode_is_reference(mode)) {
77 /* AMD64 is 64bit, so we can safely convert a reference tarval into Iu */
78 assert(get_mode_size_bits(mode) == get_mode_size_bits(mode_Lu));
79 tv = tarval_convert_to(tv, mode_Lu);
82 value = get_tarval_long(tv);
83 //d// printf ("TEST GENERATE %d\n", value);
85 return new_bd_amd64_Immediate(dbgi, block, value);
88 /* Op transformers: */
91 * Transforms a Const node.
93 * @return The transformed AMD64 node.
95 static ir_node *gen_Const(ir_node *node) {
96 ir_node *block = be_transform_node(get_nodes_block(node));
97 ir_mode *mode = get_irn_mode(node);
98 ir_node *res = create_const_graph(node, block);
101 be_dep_on_frame(res);
107 * Transforms a SymConst node.
109 * @return The transformed ARM node.
111 static ir_node *gen_SymConst(ir_node *node)
113 ir_node *block = be_transform_node(get_nodes_block(node));
114 ir_entity *entity = get_SymConst_entity(node);
115 dbg_info *dbgi = get_irn_dbg_info(node);
118 new_node = new_bd_amd64_SymConst(dbgi, block, entity);
119 be_dep_on_frame(new_node);
124 * Transforms an Add node.
126 * @return The transformed AMD64 node.
128 static ir_node *gen_Add(ir_node *node) {
129 ir_node *block = be_transform_node(get_nodes_block(node));
130 /* ir_mode *mode = get_irn_mode(node); */
131 ir_node *op1 = get_Add_left(node);
132 ir_node *op2 = get_Add_right(node);
133 dbg_info *dbgi = get_irn_dbg_info(node);
134 ir_node *new_op1 = be_transform_node(op1);
135 ir_node *new_op2 = be_transform_node(op2);
137 ir_node *res = new_bd_amd64_Add(dbgi, block, new_op1, new_op2);
138 be_dep_on_frame (res);
142 static ir_node *gen_Jmp(ir_node *node)
144 ir_node *block = get_nodes_block(node);
145 ir_node *new_block = be_transform_node(block);
146 dbg_info *dbgi = get_irn_dbg_info(node);
148 return new_bd_amd64_Jmp(dbgi, new_block);
151 static ir_node *gen_be_Call(ir_node *node)
153 ir_node *res = be_duplicate_node(node);
154 arch_irn_add_flags(res, arch_irn_flags_modify_flags);
159 static ir_node *gen_Cmp(ir_node *node)
161 ir_node *block = be_transform_node(get_nodes_block(node));
162 ir_node *op1 = get_Cmp_left(node);
163 ir_node *op2 = get_Cmp_right(node);
164 ir_mode *cmp_mode = get_irn_mode(op1);
165 dbg_info *dbgi = get_irn_dbg_info(node);
170 if (mode_is_float(cmp_mode)) {
171 panic("Floating point not implemented yet (in gen_Cmp)!");
174 assert(get_irn_mode(op2) == cmp_mode);
175 is_unsigned = !mode_is_signed(cmp_mode);
177 new_op1 = be_transform_node(op1);
178 // new_op1 = gen_extension(dbgi, block, new_op1, cmp_mode);
179 new_op2 = be_transform_node(op2);
180 // new_op2 = gen_extension(dbgi, block, new_op2, cmp_mode);
181 return new_bd_amd64_Cmp(dbgi, block, new_op1, new_op2, false,
188 * @return the created ARM Cond node
190 static ir_node *gen_Cond(ir_node *node)
192 ir_node *selector = get_Cond_selector(node);
193 ir_mode *mode = get_irn_mode(selector);
198 if (mode != mode_b) {
199 panic ("create_Switch not implemented yet!");
200 // return gen_SwitchJmp(node);
202 assert(is_Proj(selector));
204 block = be_transform_node(get_nodes_block(node));
205 dbgi = get_irn_dbg_info(node);
206 flag_node = be_transform_node(get_Proj_pred(selector));
208 return new_bd_amd64_Jcc(dbgi, block, flag_node, get_Proj_proj(selector));
212 // * Create an And that will zero out upper bits.
214 // * @param dbgi debug info
215 // * @param block the basic block
216 // * @param op the original node
217 // * param src_bits number of lower bits that will remain
219 //static ir_node *gen_zero_extension(dbg_info *dbgi, ir_node *block, ir_node *op,
222 // if (src_bits == 8) {
223 // return new_bd_arm_And_imm(dbgi, block, op, 0xFF, 0);
224 // } else if (src_bits == 16) {
225 // ir_node *lshift = new_bd_arm_Mov_reg_shift_imm(dbgi, block, op, ARM_SHF_LSL_IMM, 16);
226 // ir_node *rshift = new_bd_arm_Mov_reg_shift_imm(dbgi, block, lshift, ARM_SHF_LSR_IMM, 16);
229 // panic("zero extension only supported for 8 and 16 bits");
234 // * Generate code for a sign extension.
236 //static ir_node *gen_sign_extension(dbg_info *dbgi, ir_node *block, ir_node *op,
239 // int shift_width = 32 - src_bits;
240 // ir_node *lshift_node = new_bd_arm_Mov_reg_shift_imm(dbgi, block, op, ARM_SHF_LSL_IMM, shift_width);
241 // ir_node *rshift_node = new_bd_arm_Mov_reg_shift_imm(dbgi, block, lshift_node, ARM_SHF_ASR_IMM, shift_width);
242 // return rshift_node;
245 //static ir_node *gen_extension(dbg_info *dbgi, ir_node *block, ir_node *op,
246 // ir_mode *orig_mode)
248 // int bits = get_mode_size_bits(orig_mode);
252 // if (mode_is_signed(orig_mode)) {
253 // return gen_sign_extension(dbgi, block, op, bits);
255 // return gen_zero_extension(dbgi, block, op, bits);
260 // * returns true if it is assured, that the upper bits of a node are "clean"
261 // * which means for a 16 or 8 bit value, that the upper bits in the register
262 // * are 0 for unsigned and a copy of the last significant bit for signed
265 //static bool upper_bits_clean(ir_node *transformed_node, ir_mode *mode)
267 // (void) transformed_node;
274 * Change some phi modes
276 static ir_node *gen_Phi(ir_node *node)
278 const arch_register_req_t *req;
279 ir_node *block = be_transform_node(get_nodes_block(node));
280 ir_graph *irg = current_ir_graph;
281 dbg_info *dbgi = get_irn_dbg_info(node);
282 ir_mode *mode = get_irn_mode(node);
285 if (mode_needs_gp_reg(mode)) {
286 /* all integer operations are on 64bit registers now */
288 req = amd64_reg_classes[CLASS_amd64_gp].class_req;
290 req = arch_no_register_req;
293 /* phi nodes allow loops, so we use the old arguments for now
294 * and fix this later */
295 phi = new_ir_node(dbgi, irg, block, op_Phi, mode, get_irn_arity(node),
296 get_irn_in(node) + 1);
297 copy_node_attr(irg, node, phi);
298 be_duplicate_deps(node, phi);
300 arch_set_out_register_req(phi, 0, req);
302 be_enqueue_preds(node);
310 * Transforms a Conv node.
312 * @return The created ia32 Conv node
314 static ir_node *gen_Conv(ir_node *node)
316 ir_node *block = be_transform_node(get_nodes_block(node));
317 ir_node *op = get_Conv_op(node);
318 ir_node *new_op = be_transform_node(op);
319 ir_mode *src_mode = get_irn_mode(op);
320 ir_mode *dst_mode = get_irn_mode(node);
321 dbg_info *dbgi = get_irn_dbg_info(node);
323 if (src_mode == dst_mode)
326 if (mode_is_float(src_mode) || mode_is_float(dst_mode)) {
327 panic("float not supported yet");
328 } else { /* complete in gp registers */
329 int src_bits = get_mode_size_bits(src_mode);
330 int dst_bits = get_mode_size_bits(dst_mode);
334 if (src_bits == dst_bits) {
335 /* kill unneccessary conv */
339 if (src_bits < dst_bits) {
347 return new_bd_amd64_Conv(dbgi, block, new_op, min_mode);
349 //if (upper_bits_clean(new_op, min_mode)) {
353 //if (mode_is_signed(min_mode)) {
354 // return gen_sign_extension(dbg, block, new_op, min_bits);
356 // return gen_zero_extension(dbg, block, new_op, min_bits);
362 * Transforms a Store.
364 * @return the created AMD64 Store node
366 static ir_node *gen_Store(ir_node *node)
368 ir_node *block = be_transform_node(get_nodes_block(node));
369 ir_node *ptr = get_Store_ptr(node);
370 ir_node *new_ptr = be_transform_node(ptr);
371 ir_node *mem = get_Store_mem(node);
372 ir_node *new_mem = be_transform_node(mem);
373 ir_node *val = get_Store_value(node);
374 ir_node *new_val = be_transform_node(val);
375 ir_mode *mode = get_irn_mode(val);
376 dbg_info *dbgi = get_irn_dbg_info(node);
377 ir_node *new_store = NULL;
379 if (mode_is_float(mode)) {
380 panic("Float not supported yet");
382 assert(mode_is_data(mode) && "unsupported mode for Store");
383 new_store = new_bd_amd64_Store(dbgi, block, new_ptr, new_val, new_mem);
385 set_irn_pinned(new_store, get_irn_pinned(node));
392 * @return the created AMD64 Load node
394 static ir_node *gen_Load(ir_node *node)
396 ir_node *block = be_transform_node(get_nodes_block(node));
397 ir_node *ptr = get_Load_ptr(node);
398 ir_node *new_ptr = be_transform_node(ptr);
399 ir_node *mem = get_Load_mem(node);
400 ir_node *new_mem = be_transform_node(mem);
401 ir_mode *mode = get_Load_mode(node);
402 dbg_info *dbgi = get_irn_dbg_info(node);
403 ir_node *new_load = NULL;
405 if (mode_is_float(mode)) {
406 panic("Float not supported yet");
408 assert(mode_is_data(mode) && "unsupported mode for Load");
409 new_load = new_bd_amd64_Load(dbgi, block, new_ptr, new_mem);
411 set_irn_pinned(new_load, get_irn_pinned(node));
413 /* check for special case: the loaded value might not be used */
414 // if (be_get_Proj_for_pn(node, pn_Load_res) == NULL) {
415 // /* add a result proj and a Keep to produce a pseudo use */
416 // ir_node *proj = new_r_Proj(new_load, mode_Iu, pn_amd64_Load_res);
417 // be_new_Keep(block, 1, &proj);
424 * Transform a Proj from a Load.
426 static ir_node *gen_Proj_Load(ir_node *node)
428 ir_node *load = get_Proj_pred(node);
429 ir_node *new_load = be_transform_node(load);
430 dbg_info *dbgi = get_irn_dbg_info(node);
431 long proj = get_Proj_proj(node);
433 /* renumber the proj */
434 switch (get_amd64_irn_opcode(new_load)) {
436 /* handle all gp loads equal: they have the same proj numbers. */
437 if (proj == pn_Load_res) {
438 return new_rd_Proj(dbgi, new_load, mode_Lu, pn_amd64_Load_res);
439 } else if (proj == pn_Load_M) {
440 return new_rd_Proj(dbgi, new_load, mode_M, pn_amd64_Load_M);
444 case iro_sparc_fpaLoad:
445 panic("FP not implemented yet");
449 panic("Unsupported Proj from Load");
452 return be_duplicate_node(node);
456 * Transform a Proj node.
458 static ir_node *gen_Proj(ir_node *node)
460 ir_graph *irg = current_ir_graph;
461 dbg_info *dbgi = get_irn_dbg_info(node);
462 ir_node *pred = get_Proj_pred(node);
463 long proj = get_Proj_proj(node);
468 if (is_Store(pred)) {
469 if (proj == pn_Store_M) {
470 return be_transform_node(pred);
472 panic("Unsupported Proj from Store");
474 } else if (is_Load(pred)) {
475 return gen_Proj_Load(node);
476 // } else if (be_is_SubSP(pred)) {
477 // //panic("gen_Proj not implemented for SubSP");
478 // return gen_Proj_be_SubSP(node);
479 // } else if (be_is_AddSP(pred)) {
480 // //panic("gen_Proj not implemented for AddSP");
481 // return gen_Proj_be_AddSP(node);
482 // } else if (is_Cmp(pred)) {
483 // //panic("gen_Proj not implemented for Cmp");
484 // return gen_Proj_Cmp(node);
485 // } else if (is_Div(pred)) {
486 // return gen_Proj_Div(node);
487 } else if (is_Start(pred)) {
489 // if (proj == pn_Start_X_initial_exec) {
490 // ir_node *block = get_nodes_block(pred);
493 // // we exchange the ProjX with a jump
494 // block = be_transform_node(block);
495 // jump = new_rd_Jmp(dbgi, block);
499 // if (node == get_irg_anchor(irg, anchor_tls)) {
500 // return gen_Proj_tls(node);
504 // ir_node *new_pred = be_transform_node(pred);
505 // ir_mode *mode = get_irn_mode(node);
506 // if (mode_needs_gp_reg(mode)) {
507 // ir_node *new_proj = new_r_Proj(new_pred, mode_Iu, get_Proj_proj(node));
508 // new_proj->node_nr = node->node_nr;
513 return be_duplicate_node(node);
517 * Transforms a FrameAddr into an AMD64 Add.
519 static ir_node *gen_be_FrameAddr(ir_node *node)
521 ir_node *block = be_transform_node(get_nodes_block(node));
522 ir_entity *ent = be_get_frame_entity(node);
523 ir_node *fp = be_get_FrameAddr_frame(node);
524 ir_node *new_fp = be_transform_node(fp);
525 dbg_info *dbgi = get_irn_dbg_info(node);
528 new_node = new_bd_amd64_FrameAddr(dbgi, block, new_fp, ent);
532 /* Boilerplate code for transformation: */
534 static void amd64_pretransform_node(void)
536 amd64_code_gen_t *cg = env_cg;
539 // nomem = get_irg_no_mem(current_ir_graph);
542 static void set_transformer(ir_op *op, be_transform_func amd64_transform_func)
544 op->ops.generic = (op_func)amd64_transform_func;
547 static void amd64_register_transformers(void)
549 clear_irp_opcodes_generic_func();
551 set_transformer(op_Const, gen_Const);
552 set_transformer(op_SymConst, gen_SymConst);
553 set_transformer(op_Add, gen_Add);
554 set_transformer(op_be_Call, gen_be_Call);
555 set_transformer(op_be_FrameAddr, gen_be_FrameAddr);
556 set_transformer(op_Conv, gen_Conv);
557 set_transformer(op_Jmp, gen_Jmp);
558 set_transformer(op_Cmp, gen_Cmp);
559 set_transformer(op_Cond, gen_Cond);
560 set_transformer(op_Phi, gen_Phi);
561 set_transformer(op_Load, gen_Load);
562 set_transformer(op_Store, gen_Store);
563 set_transformer(op_Proj, gen_Proj);
567 void amd64_transform_graph(amd64_code_gen_t *cg)
569 amd64_register_transformers();
571 be_transform_graph(cg->irg, amd64_pretransform_node);
574 void amd64_init_transform(void)
576 FIRM_DBG_REGISTER(dbg, "firm.be.amd64.transform");