2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief code selection (transform FIRM into amd64 FIRM)
23 * @version $Id: amd64_transform.c 26673 2009-10-01 16:43:13Z matze $
28 #include "irgraph_t.h"
38 #include "../benode.h"
39 #include "../betranshlp.h"
40 #include "bearch_amd64_t.h"
42 #include "amd64_nodes_attr.h"
43 #include "amd64_transform.h"
44 #include "amd64_new_nodes.h"
46 #include "gen_amd64_regalloc_if.h"
48 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
50 /** holds the current code generator during transformation */
51 static amd64_code_gen_t *env_cg;
53 ///* its enough to have those once */
54 //static ir_node *nomem, *noreg_GP;
56 /* Some support functions: */
58 static inline int mode_needs_gp_reg(ir_mode *mode)
60 return mode_is_int(mode) || mode_is_reference(mode);
64 * Create a DAG constructing a given Const.
66 * @param irn a Firm const
68 static ir_node *create_const_graph(ir_node *irn, ir_node *block)
70 tarval *tv = get_Const_tarval(irn);
71 ir_mode *mode = get_tarval_mode(tv);
72 dbg_info *dbgi = get_irn_dbg_info(irn);
75 if (mode_is_reference(mode)) {
76 /* AMD64 is 64bit, so we can safely convert a reference tarval into Iu */
77 assert(get_mode_size_bits(mode) == get_mode_size_bits(mode_Iu));
78 tv = tarval_convert_to(tv, mode_Iu);
81 value = get_tarval_long(tv);
82 //d// printf ("TEST GENERATE %d\n", value);
84 return new_bd_amd64_Immediate(dbgi, block, value);
87 /* Op transformers: */
90 * Transforms a Const node.
92 * @return The transformed AMD64 node.
94 static ir_node *gen_Const(ir_node *node) {
95 ir_node *block = be_transform_node(get_nodes_block(node));
96 ir_mode *mode = get_irn_mode(node);
97 ir_node *res = create_const_graph(node, block);
100 be_dep_on_frame(res);
106 * Transforms a SymConst node.
108 * @return The transformed ARM node.
110 static ir_node *gen_SymConst(ir_node *node)
112 ir_node *block = be_transform_node(get_nodes_block(node));
113 ir_entity *entity = get_SymConst_entity(node);
114 dbg_info *dbgi = get_irn_dbg_info(node);
117 new_node = new_bd_amd64_SymConst(dbgi, block, entity);
118 be_dep_on_frame(new_node);
123 * Transforms an Add node.
125 * @return The transformed AMD64 node.
127 static ir_node *gen_Add(ir_node *node) {
128 ir_node *block = be_transform_node(get_nodes_block(node));
129 /* ir_mode *mode = get_irn_mode(node); */
130 ir_node *op1 = get_Add_left(node);
131 ir_node *op2 = get_Add_right(node);
132 dbg_info *dbgi = get_irn_dbg_info(node);
133 ir_node *new_op1 = be_transform_node(op1);
134 ir_node *new_op2 = be_transform_node(op2);
136 ir_node *res = new_bd_amd64_Add(dbgi, block, new_op1, new_op2);
137 be_dep_on_frame (res);
141 static ir_node *gen_Jmp(ir_node *node)
143 ir_node *block = get_nodes_block(node);
144 ir_node *new_block = be_transform_node(block);
145 dbg_info *dbgi = get_irn_dbg_info(node);
147 return new_bd_amd64_Jmp(dbgi, new_block);
150 static ir_node *gen_be_Call(ir_node *node)
152 ir_node *res = be_duplicate_node(node);
153 arch_irn_add_flags(res, arch_irn_flags_modify_flags);
158 static ir_node *gen_Cmp(ir_node *node)
160 ir_node *block = be_transform_node(get_nodes_block(node));
161 ir_node *op1 = get_Cmp_left(node);
162 ir_node *op2 = get_Cmp_right(node);
163 ir_mode *cmp_mode = get_irn_mode(op1);
164 dbg_info *dbgi = get_irn_dbg_info(node);
169 if (mode_is_float(cmp_mode)) {
170 panic("Floating point not implemented yet (in gen_Cmp)!");
173 assert(get_irn_mode(op2) == cmp_mode);
174 is_unsigned = !mode_is_signed(cmp_mode);
176 new_op1 = be_transform_node(op1);
177 // new_op1 = gen_extension(dbgi, block, new_op1, cmp_mode);
178 new_op2 = be_transform_node(op2);
179 // new_op2 = gen_extension(dbgi, block, new_op2, cmp_mode);
180 return new_bd_amd64_Cmp(dbgi, block, new_op1, new_op2, false,
187 * @return the created ARM Cond node
189 static ir_node *gen_Cond(ir_node *node)
191 ir_node *selector = get_Cond_selector(node);
192 ir_mode *mode = get_irn_mode(selector);
197 if (mode != mode_b) {
198 panic ("create_Switch not implemented yet!");
199 // return gen_SwitchJmp(node);
201 assert(is_Proj(selector));
203 block = be_transform_node(get_nodes_block(node));
204 dbgi = get_irn_dbg_info(node);
205 flag_node = be_transform_node(get_Proj_pred(selector));
207 return new_bd_amd64_Jcc(dbgi, block, flag_node, get_Proj_proj(selector));
211 // * Create an And that will zero out upper bits.
213 // * @param dbgi debug info
214 // * @param block the basic block
215 // * @param op the original node
216 // * param src_bits number of lower bits that will remain
218 //static ir_node *gen_zero_extension(dbg_info *dbgi, ir_node *block, ir_node *op,
221 // if (src_bits == 8) {
222 // return new_bd_arm_And_imm(dbgi, block, op, 0xFF, 0);
223 // } else if (src_bits == 16) {
224 // ir_node *lshift = new_bd_arm_Mov_reg_shift_imm(dbgi, block, op, ARM_SHF_LSL_IMM, 16);
225 // ir_node *rshift = new_bd_arm_Mov_reg_shift_imm(dbgi, block, lshift, ARM_SHF_LSR_IMM, 16);
228 // panic("zero extension only supported for 8 and 16 bits");
233 // * Generate code for a sign extension.
235 //static ir_node *gen_sign_extension(dbg_info *dbgi, ir_node *block, ir_node *op,
238 // int shift_width = 32 - src_bits;
239 // ir_node *lshift_node = new_bd_arm_Mov_reg_shift_imm(dbgi, block, op, ARM_SHF_LSL_IMM, shift_width);
240 // ir_node *rshift_node = new_bd_arm_Mov_reg_shift_imm(dbgi, block, lshift_node, ARM_SHF_ASR_IMM, shift_width);
241 // return rshift_node;
244 //static ir_node *gen_extension(dbg_info *dbgi, ir_node *block, ir_node *op,
245 // ir_mode *orig_mode)
247 // int bits = get_mode_size_bits(orig_mode);
251 // if (mode_is_signed(orig_mode)) {
252 // return gen_sign_extension(dbgi, block, op, bits);
254 // return gen_zero_extension(dbgi, block, op, bits);
259 // * returns true if it is assured, that the upper bits of a node are "clean"
260 // * which means for a 16 or 8 bit value, that the upper bits in the register
261 // * are 0 for unsigned and a copy of the last significant bit for signed
264 //static bool upper_bits_clean(ir_node *transformed_node, ir_mode *mode)
266 // (void) transformed_node;
273 * Change some phi modes
275 static ir_node *gen_Phi(ir_node *node)
277 const arch_register_req_t *req;
278 ir_node *block = be_transform_node(get_nodes_block(node));
279 ir_graph *irg = current_ir_graph;
280 dbg_info *dbgi = get_irn_dbg_info(node);
281 ir_mode *mode = get_irn_mode(node);
284 if (mode_needs_gp_reg(mode)) {
285 /* all integer operations are on 32bit registers now */
287 req = amd64_reg_classes[CLASS_amd64_gp].class_req;
289 req = arch_no_register_req;
292 /* phi nodes allow loops, so we use the old arguments for now
293 * and fix this later */
294 phi = new_ir_node(dbgi, irg, block, op_Phi, mode, get_irn_arity(node),
295 get_irn_in(node) + 1);
296 copy_node_attr(irg, node, phi);
297 be_duplicate_deps(node, phi);
299 arch_set_out_register_req(phi, 0, req);
301 be_enqueue_preds(node);
309 * Transforms a Conv node.
311 * @return The created ia32 Conv node
313 static ir_node *gen_Conv(ir_node *node)
315 ir_node *block = be_transform_node(get_nodes_block(node));
316 ir_node *op = get_Conv_op(node);
317 ir_node *new_op = be_transform_node(op);
318 ir_mode *src_mode = get_irn_mode(op);
319 ir_mode *dst_mode = get_irn_mode(node);
320 dbg_info *dbgi = get_irn_dbg_info(node);
322 if (src_mode == dst_mode)
325 if (mode_is_float(src_mode) || mode_is_float(dst_mode)) {
326 panic("float not supported yet");
327 } else { /* complete in gp registers */
328 int src_bits = get_mode_size_bits(src_mode);
329 int dst_bits = get_mode_size_bits(dst_mode);
333 if (src_bits == dst_bits) {
334 /* kill unneccessary conv */
338 if (src_bits < dst_bits) {
346 return new_bd_amd64_Conv(dbgi, block, new_op, min_mode);
348 //if (upper_bits_clean(new_op, min_mode)) {
352 //if (mode_is_signed(min_mode)) {
353 // return gen_sign_extension(dbg, block, new_op, min_bits);
355 // return gen_zero_extension(dbg, block, new_op, min_bits);
360 /* Boilerplate code for transformation: */
362 static void amd64_pretransform_node(void)
364 amd64_code_gen_t *cg = env_cg;
367 // nomem = get_irg_no_mem(current_ir_graph);
370 static void set_transformer(ir_op *op, be_transform_func amd64_transform_func)
372 op->ops.generic = (op_func)amd64_transform_func;
375 static void amd64_register_transformers(void)
377 clear_irp_opcodes_generic_func();
379 set_transformer(op_Const, gen_Const);
380 set_transformer(op_SymConst, gen_SymConst);
381 set_transformer(op_Add, gen_Add);
382 set_transformer(op_be_Call, gen_be_Call);
383 set_transformer(op_Conv, gen_Conv);
384 set_transformer(op_Jmp, gen_Jmp);
385 set_transformer(op_Cmp, gen_Cmp);
386 set_transformer(op_Cond, gen_Cond);
387 set_transformer(op_Phi, gen_Phi);
391 void amd64_transform_graph(amd64_code_gen_t *cg)
393 amd64_register_transformers();
395 be_transform_graph(cg->irg, amd64_pretransform_node);
398 void amd64_init_transform(void)
400 FIRM_DBG_REGISTER(dbg, "firm.be.amd64.transform");