#include "irmode_t.h"
#include "irgmod.h"
#include "iredges.h"
-#include "irvrfy.h"
#include "ircons.h"
#include "iropt_t.h"
#include "error.h"
#include "../benode.h"
#include "../betranshlp.h"
+#include "../beutil.h"
#include "bearch_amd64_t.h"
#include "amd64_nodes_attr.h"
DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
-/** holds the current code generator during transformation */
-static amd64_code_gen_t *env_cg;
-
-///* its enough to have those once */
-//static ir_node *nomem, *noreg_GP;
-
/* Some support functions: */
static inline int mode_needs_gp_reg(ir_mode *mode)
*/
static ir_node *create_const_graph(ir_node *irn, ir_node *block)
{
- tarval *tv = get_Const_tarval(irn);
- ir_mode *mode = get_tarval_mode(tv);
- dbg_info *dbgi = get_irn_dbg_info(irn);
- unsigned value;
+ ir_tarval *tv = get_Const_tarval(irn);
+ ir_mode *mode = get_tarval_mode(tv);
+ dbg_info *dbgi = get_irn_dbg_info(irn);
+ unsigned value;
if (mode_is_reference(mode)) {
/* AMD64 is 64bit, so we can safely convert a reference tarval into Iu */
- assert(get_mode_size_bits(mode) == get_mode_size_bits(mode_Iu));
- tv = tarval_convert_to(tv, mode_Iu);
+ assert(get_mode_size_bits(mode) == get_mode_size_bits(mode_Lu));
+ tv = tarval_convert_to(tv, mode_Lu);
}
value = get_tarval_long(tv);
ir_node *res = create_const_graph(node, block);
(void) mode;
- be_dep_on_frame(res);
-
return res;
}
ir_node *new_node;
new_node = new_bd_amd64_SymConst(dbgi, block, entity);
- be_dep_on_frame(new_node);
return new_node;
}
ir_node *new_op2 = be_transform_node(op2);
ir_node *res = new_bd_amd64_Add(dbgi, block, new_op1, new_op2);
- be_dep_on_frame (res);
return res;
}
+/**
+ * Transforms an Sub node.
+ *
+ * @return The transformed AMD64 node.
+ */
+static ir_node *gen_Sub(ir_node *node) {
+ ir_node *block = be_transform_node(get_nodes_block(node));
+ /* ir_mode *mode = get_irn_mode(node); */
+ ir_node *op1 = get_Sub_left(node);
+ ir_node *op2 = get_Sub_right(node);
+ dbg_info *dbgi = get_irn_dbg_info(node);
+ ir_node *new_op1 = be_transform_node(op1);
+ ir_node *new_op2 = be_transform_node(op2);
+
+ ir_node *res = new_bd_amd64_Sub(dbgi, block, new_op1, new_op2);
+ return res;
+}
+
+static ir_node *gen_Mul(ir_node *node) {
+ ir_node *block = be_transform_node(get_nodes_block(node));
+ /* ir_mode *mode = get_irn_mode(node); */
+ ir_node *op1 = get_Mul_left(node);
+ ir_node *op2 = get_Mul_right(node);
+ dbg_info *dbgi = get_irn_dbg_info(node);
+ ir_node *new_op1 = be_transform_node(op1);
+ ir_node *new_op2 = be_transform_node(op2);
+
+ ir_node *res = new_bd_amd64_Mul(dbgi, block, new_op1, new_op2);
+ return res;
+}
+
+static ir_node *gen_Minus(ir_node *node)
+{
+ ir_node *block = be_transform_node(get_nodes_block(node));
+ ir_node *val = be_transform_node(get_Minus_op(node));
+ dbg_info *dbgi = get_irn_dbg_info(node);
+
+ return new_bd_amd64_Neg(dbgi, block, val);
+}
+
static ir_node *gen_Jmp(ir_node *node)
{
ir_node *block = get_nodes_block(node);
static ir_node *gen_be_Call(ir_node *node)
{
ir_node *res = be_duplicate_node(node);
- arch_irn_add_flags(res, arch_irn_flags_modify_flags);
+ arch_add_irn_flags(res, arch_irn_flags_modify_flags);
return res;
}
is_unsigned = !mode_is_signed(cmp_mode);
new_op1 = be_transform_node(op1);
-// new_op1 = gen_extension(dbgi, block, new_op1, cmp_mode);
+ /* new_op1 = gen_extension(dbgi, block, new_op1, cmp_mode); */
new_op2 = be_transform_node(op2);
-// new_op2 = gen_extension(dbgi, block, new_op2, cmp_mode);
+ /* new_op2 = gen_extension(dbgi, block, new_op2, cmp_mode); */
return new_bd_amd64_Cmp(dbgi, block, new_op1, new_op2, false,
is_unsigned);
}
*/
static ir_node *gen_Cond(ir_node *node)
{
- ir_node *selector = get_Cond_selector(node);
- ir_mode *mode = get_irn_mode(selector);
- ir_node *block;
- ir_node *flag_node;
- dbg_info *dbgi;
+ ir_node *selector = get_Cond_selector(node);
+ ir_mode *mode = get_irn_mode(selector);
+ ir_node *block;
+ ir_node *flag_node;
+ ir_relation relation;
+ dbg_info *dbgi;
if (mode != mode_b) {
panic ("create_Switch not implemented yet!");
// return gen_SwitchJmp(node);
}
- assert(is_Proj(selector));
+ assert(is_Cmp(selector));
block = be_transform_node(get_nodes_block(node));
dbgi = get_irn_dbg_info(node);
- flag_node = be_transform_node(get_Proj_pred(selector));
+ flag_node = be_transform_node(selector);
+ relation = get_Cmp_relation(selector);
+
+ return new_bd_amd64_Jcc(dbgi, block, flag_node, relation);
+}
+
+#if 0
+/**
+ * Create an And that will zero out upper bits.
+ *
+ * @param dbgi debug info
+ * @param block the basic block
+ * @param op the original node
+ * param src_bits number of lower bits that will remain
+ */
+static ir_node *gen_zero_extension(dbg_info *dbgi, ir_node *block, ir_node *op,
+ int src_bits)
+{
+ if (src_bits == 8) {
+ return new_bd_arm_And_imm(dbgi, block, op, 0xFF, 0);
+ } else if (src_bits == 16) {
+ ir_node *lshift = new_bd_arm_Mov_reg_shift_imm(dbgi, block, op, ARM_SHF_LSL_IMM, 16);
+ ir_node *rshift = new_bd_arm_Mov_reg_shift_imm(dbgi, block, lshift, ARM_SHF_LSR_IMM, 16);
+ return rshift;
+ } else {
+ panic("zero extension only supported for 8 and 16 bits");
+ }
+}
+
+/**
+ * Generate code for a sign extension.
+ */
+static ir_node *gen_sign_extension(dbg_info *dbgi, ir_node *block, ir_node *op,
+ int src_bits)
+{
+ int shift_width = 32 - src_bits;
+ ir_node *lshift_node = new_bd_arm_Mov_reg_shift_imm(dbgi, block, op, ARM_SHF_LSL_IMM, shift_width);
+ ir_node *rshift_node = new_bd_arm_Mov_reg_shift_imm(dbgi, block, lshift_node, ARM_SHF_ASR_IMM, shift_width);
+ return rshift_node;
+}
- return new_bd_amd64_Jcc(dbgi, block, flag_node, get_Proj_proj(selector));
+static ir_node *gen_extension(dbg_info *dbgi, ir_node *block, ir_node *op,
+ ir_mode *orig_mode)
+{
+ int bits = get_mode_size_bits(orig_mode);
+ if (bits == 32)
+ return op;
+
+ if (mode_is_signed(orig_mode)) {
+ return gen_sign_extension(dbgi, block, op, bits);
+ } else {
+ return gen_zero_extension(dbgi, block, op, bits);
+ }
}
-///**
-// * Create an And that will zero out upper bits.
-// *
-// * @param dbgi debug info
-// * @param block the basic block
-// * @param op the original node
-// * param src_bits number of lower bits that will remain
-// */
-//static ir_node *gen_zero_extension(dbg_info *dbgi, ir_node *block, ir_node *op,
-// int src_bits)
-//{
-// if (src_bits == 8) {
-// return new_bd_arm_And_imm(dbgi, block, op, 0xFF, 0);
-// } else if (src_bits == 16) {
-// ir_node *lshift = new_bd_arm_Mov_reg_shift_imm(dbgi, block, op, ARM_SHF_LSL_IMM, 16);
-// ir_node *rshift = new_bd_arm_Mov_reg_shift_imm(dbgi, block, lshift, ARM_SHF_LSR_IMM, 16);
-// return rshift;
-// } else {
-// panic("zero extension only supported for 8 and 16 bits");
-// }
-//}
-//
-///**
-// * Generate code for a sign extension.
-// */
-//static ir_node *gen_sign_extension(dbg_info *dbgi, ir_node *block, ir_node *op,
-// int src_bits)
-//{
-// int shift_width = 32 - src_bits;
-// ir_node *lshift_node = new_bd_arm_Mov_reg_shift_imm(dbgi, block, op, ARM_SHF_LSL_IMM, shift_width);
-// ir_node *rshift_node = new_bd_arm_Mov_reg_shift_imm(dbgi, block, lshift_node, ARM_SHF_ASR_IMM, shift_width);
-// return rshift_node;
-//}
-//
-//static ir_node *gen_extension(dbg_info *dbgi, ir_node *block, ir_node *op,
-// ir_mode *orig_mode)
-//{
-// int bits = get_mode_size_bits(orig_mode);
-// if (bits == 32)
-// return op;
-//
-// if (mode_is_signed(orig_mode)) {
-// return gen_sign_extension(dbgi, block, op, bits);
-// } else {
-// return gen_zero_extension(dbgi, block, op, bits);
-// }
-//}
-//
-///**
-// * returns true if it is assured, that the upper bits of a node are "clean"
-// * which means for a 16 or 8 bit value, that the upper bits in the register
-// * are 0 for unsigned and a copy of the last significant bit for signed
-// * numbers.
-// */
-//static bool upper_bits_clean(ir_node *transformed_node, ir_mode *mode)
-//{
-// (void) transformed_node;
-// (void) mode;
-// /* TODO */
-// return false;
-//}
+/**
+ * returns true if it is assured, that the upper bits of a node are "clean"
+ * which means for a 16 or 8 bit value, that the upper bits in the register
+ * are 0 for unsigned and a copy of the last significant bit for signed
+ * numbers.
+ */
+static bool upper_bits_clean(ir_node *transformed_node, ir_mode *mode)
+{
+ (void) transformed_node;
+ (void) mode;
+ /* TODO */
+ return false;
+}
+#endif
/**
* Change some phi modes
ir_node *phi;
if (mode_needs_gp_reg(mode)) {
- /* all integer operations are on 32bit registers now */
- mode = mode_Iu;
+ /* all integer operations are on 64bit registers now */
+ mode = mode_Lu;
req = amd64_reg_classes[CLASS_amd64_gp].class_req;
} else {
req = arch_no_register_req;
copy_node_attr(irg, node, phi);
be_duplicate_deps(node, phi);
- arch_set_out_register_req(phi, 0, req);
+ arch_set_irn_register_req_out(phi, 0, req);
be_enqueue_preds(node);
} else { /* complete in gp registers */
int src_bits = get_mode_size_bits(src_mode);
int dst_bits = get_mode_size_bits(dst_mode);
- int min_bits;
ir_mode *min_mode;
if (src_bits == dst_bits) {
- /* kill unneccessary conv */
+ /* kill unnecessary conv */
return new_op;
}
if (src_bits < dst_bits) {
- min_bits = src_bits;
min_mode = src_mode;
} else {
- min_bits = dst_bits;
min_mode = dst_mode;
}
+
return new_bd_amd64_Conv(dbgi, block, new_op, min_mode);
- //if (upper_bits_clean(new_op, min_mode)) {
- // return new_op;
- //}
+#if 0
+ if (upper_bits_clean(new_op, min_mode)) {
+ return new_op;
+ }
- //if (mode_is_signed(min_mode)) {
- // return gen_sign_extension(dbg, block, new_op, min_bits);
- //} else {
- // return gen_zero_extension(dbg, block, new_op, min_bits);
- //}
+ if (mode_is_signed(min_mode)) {
+ return gen_sign_extension(dbg, block, new_op, min_bits);
+ } else {
+ return gen_zero_extension(dbg, block, new_op, min_bits);
+ }
+#endif
}
}
-/* Boilerplate code for transformation: */
+/**
+ * Transforms a Store.
+ *
+ * @return the created AMD64 Store node
+ */
+static ir_node *gen_Store(ir_node *node)
+{
+ ir_node *block = be_transform_node(get_nodes_block(node));
+ ir_node *ptr = get_Store_ptr(node);
+ ir_node *new_ptr = be_transform_node(ptr);
+ ir_node *mem = get_Store_mem(node);
+ ir_node *new_mem = be_transform_node(mem);
+ ir_node *val = get_Store_value(node);
+ ir_node *new_val = be_transform_node(val);
+ ir_mode *mode = get_irn_mode(val);
+ dbg_info *dbgi = get_irn_dbg_info(node);
+ ir_node *new_store = NULL;
+
+ if (mode_is_float(mode)) {
+ panic("Float not supported yet");
+ } else {
+ assert(mode_is_data(mode) && "unsupported mode for Store");
+ new_store = new_bd_amd64_Store(dbgi, block, new_ptr, new_val, new_mem, 0);
+ }
+ set_irn_pinned(new_store, get_irn_pinned(node));
+ return new_store;
+}
+
+/**
+ * Transforms a Load.
+ *
+ * @return the created AMD64 Load node
+ */
+static ir_node *gen_Load(ir_node *node)
+{
+ ir_node *block = be_transform_node(get_nodes_block(node));
+ ir_node *ptr = get_Load_ptr(node);
+ ir_node *new_ptr = be_transform_node(ptr);
+ ir_node *mem = get_Load_mem(node);
+ ir_node *new_mem = be_transform_node(mem);
+ ir_mode *mode = get_Load_mode(node);
+ dbg_info *dbgi = get_irn_dbg_info(node);
+ ir_node *new_load = NULL;
+
+ if (mode_is_float(mode)) {
+ panic("Float not supported yet");
+ } else {
+ assert(mode_is_data(mode) && "unsupported mode for Load");
+ new_load = new_bd_amd64_Load(dbgi, block, new_ptr, new_mem, 0);
+ }
+ set_irn_pinned(new_load, get_irn_pinned(node));
+
+#if 0
+ /* check for special case: the loaded value might not be used */
+ if (be_get_Proj_for_pn(node, pn_Load_res) == NULL) {
+ /* add a result proj and a Keep to produce a pseudo use */
+ ir_node *proj = new_r_Proj(new_load, mode_Iu, pn_amd64_Load_res);
+ be_new_Keep(block, 1, &proj);
+ }
+#endif
+
+ return new_load;
+}
-static void amd64_pretransform_node(void)
+/**
+ * Transform a Proj from a Load.
+ */
+static ir_node *gen_Proj_Load(ir_node *node)
+{
+ ir_node *load = get_Proj_pred(node);
+ ir_node *new_load = be_transform_node(load);
+ dbg_info *dbgi = get_irn_dbg_info(node);
+ long proj = get_Proj_proj(node);
+
+ /* renumber the proj */
+ switch (get_amd64_irn_opcode(new_load)) {
+ case iro_amd64_Load:
+ /* handle all gp loads equal: they have the same proj numbers. */
+ if (proj == pn_Load_res) {
+ return new_rd_Proj(dbgi, new_load, mode_Lu, pn_amd64_Load_res);
+ } else if (proj == pn_Load_M) {
+ return new_rd_Proj(dbgi, new_load, mode_M, pn_amd64_Load_M);
+ }
+ break;
+ /*
+ case iro_sparc_fpaLoad:
+ panic("FP not implemented yet");
+ break;
+ */
+ default:
+ panic("Unsupported Proj from Load");
+ }
+
+ return be_duplicate_node(node);
+}
+
+/**
+ * Transform a Proj node.
+ */
+static ir_node *gen_Proj(ir_node *node)
{
- amd64_code_gen_t *cg = env_cg;
- (void) cg;
+ ir_graph *irg = current_ir_graph;
+ dbg_info *dbgi = get_irn_dbg_info(node);
+ ir_node *pred = get_Proj_pred(node);
+ long proj = get_Proj_proj(node);
+
+ (void) irg;
+ (void) dbgi;
+
+ if (is_Store(pred)) {
+ if (proj == pn_Store_M) {
+ return be_transform_node(pred);
+ } else {
+ panic("Unsupported Proj from Store");
+ }
+ } else if (is_Load(pred)) {
+ return gen_Proj_Load(node);
+#if 0
+ } else if (be_is_SubSP(pred)) {
+ //panic("gen_Proj not implemented for SubSP");
+ return gen_Proj_be_SubSP(node);
+ } else if (be_is_AddSP(pred)) {
+ //panic("gen_Proj not implemented for AddSP");
+ return gen_Proj_be_AddSP(node);
+ } else if (is_Cmp(pred)) {
+ //panic("gen_Proj not implemented for Cmp");
+ return gen_Proj_Cmp(node);
+ } else if (is_Div(pred)) {
+ return gen_Proj_Div(node);
+#endif
+ } else if (is_Start(pred)) {
+#if 0
+ if (node == get_irg_anchor(irg, anchor_tls)) {
+ return gen_Proj_tls(node);
+ }
+ } else {
+ ir_node *new_pred = be_transform_node(pred);
+ ir_mode *mode = get_irn_mode(node);
+ if (mode_needs_gp_reg(mode)) {
+ ir_node *new_proj = new_r_Proj(new_pred, mode_Iu, get_Proj_proj(node));
+ new_proj->node_nr = node->node_nr;
+ return new_proj;
+ }
+#endif
+ }
-// nomem = get_irg_no_mem(current_ir_graph);
+ return be_duplicate_node(node);
}
-static void set_transformer(ir_op *op, be_transform_func amd64_transform_func)
+/**
+ * Transforms a FrameAddr into an AMD64 Add.
+ */
+static ir_node *gen_be_FrameAddr(ir_node *node)
{
- op->ops.generic = (op_func)amd64_transform_func;
+ ir_node *block = be_transform_node(get_nodes_block(node));
+ ir_entity *ent = be_get_frame_entity(node);
+ ir_node *fp = be_get_FrameAddr_frame(node);
+ ir_node *new_fp = be_transform_node(fp);
+ dbg_info *dbgi = get_irn_dbg_info(node);
+ ir_node *new_node;
+
+ new_node = new_bd_amd64_FrameAddr(dbgi, block, new_fp, ent);
+ return new_node;
}
+/* Boilerplate code for transformation: */
+
static void amd64_register_transformers(void)
{
- clear_irp_opcodes_generic_func();
-
- set_transformer(op_Const, gen_Const);
- set_transformer(op_SymConst, gen_SymConst);
- set_transformer(op_Add, gen_Add);
- set_transformer(op_be_Call, gen_be_Call);
- set_transformer(op_Conv, gen_Conv);
- set_transformer(op_Jmp, gen_Jmp);
- set_transformer(op_Cmp, gen_Cmp);
- set_transformer(op_Cond, gen_Cond);
- set_transformer(op_Phi, gen_Phi);
+ be_start_transform_setup();
+
+ be_set_transform_function(op_Const, gen_Const);
+ be_set_transform_function(op_SymConst, gen_SymConst);
+ be_set_transform_function(op_Add, gen_Add);
+ be_set_transform_function(op_Sub, gen_Sub);
+ be_set_transform_function(op_Mul, gen_Mul);
+ be_set_transform_function(op_be_Call, gen_be_Call);
+ be_set_transform_function(op_be_FrameAddr, gen_be_FrameAddr);
+ be_set_transform_function(op_Conv, gen_Conv);
+ be_set_transform_function(op_Jmp, gen_Jmp);
+ be_set_transform_function(op_Cmp, gen_Cmp);
+ be_set_transform_function(op_Cond, gen_Cond);
+ be_set_transform_function(op_Phi, gen_Phi);
+ be_set_transform_function(op_Load, gen_Load);
+ be_set_transform_function(op_Store, gen_Store);
+ be_set_transform_function(op_Proj, gen_Proj);
+ be_set_transform_function(op_Minus, gen_Minus);
}
-
-void amd64_transform_graph(amd64_code_gen_t *cg)
+void amd64_transform_graph(ir_graph *irg)
{
amd64_register_transformers();
- env_cg = cg;
- be_transform_graph(cg->irg, amd64_pretransform_node);
+ be_transform_graph(irg, NULL);
}
void amd64_init_transform(void)