/*
- * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
+ * Copyright (C) 1995-2010 University of Karlsruhe. All right reserved.
*
* This file is part of libFirm.
*
/**
* @file
* @brief code selection (transform FIRM into SPARC FIRM)
- * @version $Id: TEMPLATE_transform.c 26673 2009-10-01 16:43:13Z matze $
+ * @version $Id$
*/
#include "config.h"
static sparc_code_gen_t *env_cg;
+static ir_node *gen_SymConst(ir_node *node);
+
+
static inline int mode_needs_gp_reg(ir_mode *mode)
{
return mode_is_int(mode) || mode_is_reference(mode);
}
+/**
+ * Create an And that will zero out upper bits.
+ *
+ * @param dbgi debug info
+ * @param block the basic block
+ * @param op the original node
+ * @param src_bits number of lower bits that will remain
+ */
+static ir_node *gen_zero_extension(dbg_info *dbgi, ir_node *block, ir_node *op,
+ int src_bits)
+{
+ if (src_bits == 8) {
+ return new_bd_sparc_And_imm(dbgi, block, op, 0xFF);
+ } else if (src_bits == 16) {
+ ir_node *lshift = new_bd_sparc_ShiftLL_imm(dbgi, block, op, 16);
+ ir_node *rshift = new_bd_sparc_ShiftLR_imm(dbgi, block, lshift, 16);
+ return rshift;
+ } else {
+ panic("zero extension only supported for 8 and 16 bits");
+ }
+}
+
+/**
+ * Generate code for a sign extension.
+ */
+static ir_node *gen_sign_extension(dbg_info *dbgi, ir_node *block, ir_node *op,
+ int src_bits)
+{
+ int shift_width = 32 - src_bits;
+ ir_node *lshift_node = new_bd_sparc_ShiftLL_imm(dbgi, block, op, shift_width);
+ ir_node *rshift_node = new_bd_sparc_ShiftRA_imm(dbgi, block, lshift_node, shift_width);
+ return rshift_node;
+}
+
+/**
+ * returns true if it is assured, that the upper bits of a node are "clean"
+ * which means for a 16 or 8 bit value, that the upper bits in the register
+ * are 0 for unsigned and a copy of the last significant bit for signed
+ * numbers.
+ */
+static bool upper_bits_clean(ir_node *transformed_node, ir_mode *mode)
+{
+ (void) transformed_node;
+ (void) mode;
+ /* TODO */
+ return false;
+}
+
+static ir_node *gen_extension(dbg_info *dbgi, ir_node *block, ir_node *op,
+ ir_mode *orig_mode)
+{
+ int bits = get_mode_size_bits(orig_mode);
+ if (bits == 32)
+ return op;
+
+ if (mode_is_signed(orig_mode)) {
+ return gen_sign_extension(dbgi, block, op, bits);
+ } else {
+ return gen_zero_extension(dbgi, block, op, bits);
+ }
+}
+
+
/**
* Creates a possible DAG for a constant.
*/
{
ir_node *result;
- // TODO: find a better solution for this
- if (value < -4096 || value > 4096) {
- panic("FIXME: immediate value exceeds max. size of simm13 (13 bits signed)");
+ // we need to load hi & lo separately
+ if (value < -4096 || value > 4095) {
+ ir_node *hi = new_bd_sparc_HiImm(dbgi, block, (int) value);
+ result = new_bd_sparc_LoImm(dbgi, block, hi, value);
+ be_dep_on_frame(hi);
+ } else {
+ result = new_bd_sparc_Mov_imm(dbgi, block, (int) value);
+ be_dep_on_frame(result);
}
- result = new_bd_sparc_Mov_imm(dbgi, block, (int) value);
return result;
}
{
tarval *tv = get_Const_tarval(irn);
ir_mode *mode = get_tarval_mode(tv);
+ dbg_info *dbgi = get_irn_dbg_info(irn);
long value;
+
if (mode_is_reference(mode)) {
/* SPARC V8 is 32bit, so we can safely convert a reference tarval into Iu */
assert(get_mode_size_bits(mode) == get_mode_size_bits(mode_Iu));
tv = tarval_convert_to(tv, mode_Iu);
}
+
value = get_tarval_long(tv);
- return create_const_graph_value(get_irn_dbg_info(irn), block, value);
+ return create_const_graph_value(dbgi, block, value);
+}
+
+/**
+ * create a DAG to load fp constant. sparc only supports loading from global memory
+ */
+static ir_node *create_fp_const_graph(ir_node *irn, ir_node *block)
+{
+ (void) block;
+ (void) irn;
+ panic("FP constants not implemented");
}
} match_flags_t;
typedef ir_node* (*new_binop_reg_func) (dbg_info *dbgi, ir_node *block, ir_node *op1, ir_node *op2);
+typedef ir_node* (*new_binop_fp_func) (dbg_info *dbgi, ir_node *block, ir_node *op1, ir_node *op2, ir_mode *mode);
typedef ir_node* (*new_binop_imm_func) (dbg_info *dbgi, ir_node *block, ir_node *op1, int simm13);
/**
- * checks wether a node's value can be encoded as a immediate
- * TODO: pass a result pointer to fetch the encoded immediate
+ * checks if a node's value can be encoded as a immediate
*
*/
static bool is_imm_encodeable(const ir_node *node)
{
long val;
+ //assert(mode_is_float_vector(get_irn_mode(node)));
+
if (!is_Const(node))
return false;
val = get_tarval_long(get_Const_tarval(node));
- return !(val < -4096 || val > 4096);
+ return !(val < -4096 || val > 4095);
}
/**
ir_node *new_op2;
dbg_info *dbgi = get_irn_dbg_info(node);
-/*
- if (flags & MATCH_SIZE_NEUTRAL) {
- op1 = arm_skip_downconv(op1);
- op2 = arm_skip_downconv(op2);
- } else {
- assert(get_mode_size_bits(get_irn_mode(node)) == 32);
- }
-*/
if (is_imm_encodeable(op2)) {
ir_node *new_op1 = be_transform_node(op1);
return new_imm(dbgi, block, new_op1, get_tarval_long(get_Const_tarval(op2)));
return new_reg(dbgi, block, new_op1, new_op2);
}
+/**
+ * helper function for FP binop operations
+ */
+static ir_node *gen_helper_binfpop(ir_node *node, new_binop_fp_func new_reg)
+{
+ ir_node *block = be_transform_node(get_nodes_block(node));
+ ir_node *op1 = get_binop_left(node);
+ ir_node *new_op1;
+ ir_node *op2 = get_binop_right(node);
+ ir_node *new_op2;
+ dbg_info *dbgi = get_irn_dbg_info(node);
+
+ new_op2 = be_transform_node(op2);
+ new_op1 = be_transform_node(op1);
+ return new_reg(dbgi, block, new_op1, new_op2, get_irn_mode(node));
+}
+
/**
* Creates an sparc Add.
*
{
ir_mode *mode = get_irn_mode(node);
ir_node *block = be_transform_node(get_nodes_block(node));
- ir_node *op1 = get_Add_left(node);
- ir_node *op2 = get_Add_right(node);
dbg_info *dbgi = get_irn_dbg_info(node);
- ir_node *new_op1 = be_transform_node(op1);
- ir_node *new_op2 = be_transform_node(op2);
- (void) new_op1;
- (void) new_op2;
(void) block;
(void) dbgi;
{
ir_mode *mode = get_irn_mode(node);
ir_node *block = be_transform_node(get_nodes_block(node));
- ir_node *op1 = get_Add_left(node);
- ir_node *op2 = get_Add_right(node);
dbg_info *dbgi = get_irn_dbg_info(node);
- ir_node *new_op1 = be_transform_node(op1);
- ir_node *new_op2 = be_transform_node(op2);
- (void) new_op1;
- (void) new_op2;
- (void) block;
- (void) dbgi;
+ (void) block;
+ (void) dbgi;
if (mode_is_float(mode))
panic("FP not implemented yet");
return new_store;
}
+/**
+ * Creates an sparc Mul.
+ * returns the lower 32bits of the 64bit multiply result
+ *
+ * @return the created sparc Mul node
+ */
+static ir_node *gen_Mul(ir_node *node) {
+ ir_mode *mode = get_irn_mode(node);
+ dbg_info *dbgi = get_irn_dbg_info(node);
+
+ ir_node *mul;
+ ir_node *proj_res_low;
+
+ if (mode_is_float(mode)) {
+ mul = gen_helper_binfpop(node, new_bd_sparc_fMul);
+ return mul;
+ }
+
+ assert(mode_is_data(mode));
+ mul = gen_helper_binop(node, MATCH_COMMUTATIVE | MATCH_SIZE_NEUTRAL, new_bd_sparc_Mul_reg, new_bd_sparc_Mul_imm);
+ arch_irn_add_flags(mul, arch_irn_flags_modify_flags);
+
+ proj_res_low = new_rd_Proj(dbgi, mul, mode_Iu, pn_sparc_Mul_low);
+ return proj_res_low;
+}
+
+/**
+ * Creates an sparc Mulh.
+ * Mulh returns the upper 32bits of a mul instruction
+ *
+ * @return the created sparc Mulh node
+ */
+static ir_node *gen_Mulh(ir_node *node) {
+ ir_mode *mode = get_irn_mode(node);
+ dbg_info *dbgi = get_irn_dbg_info(node);
+
+ ir_node *mul;
+ ir_node *proj_res_hi;
+
+ if (mode_is_float(mode))
+ panic("FP not supported yet");
+
+
+ assert(mode_is_data(mode));
+ mul = gen_helper_binop(node, MATCH_COMMUTATIVE | MATCH_SIZE_NEUTRAL, new_bd_sparc_Mulh_reg, new_bd_sparc_Mulh_imm);
+ //arch_irn_add_flags(mul, arch_irn_flags_modify_flags);
+ proj_res_hi = new_rd_Proj(dbgi, mul, mode_Iu, pn_sparc_Mulh_low);
+ return proj_res_hi;
+}
+
+/**
+ * Creates an sparc Div.
+ *
+ * @return the created sparc Div node
+ */
+static ir_node *gen_Div(ir_node *node) {
+
+ ir_mode *mode = get_irn_mode(node);
+
+ ir_node *div;
+
+ if (mode_is_float(mode))
+ panic("FP not supported yet");
+
+ //assert(mode_is_data(mode));
+ div = gen_helper_binop(node, MATCH_SIZE_NEUTRAL, new_bd_sparc_Div_reg, new_bd_sparc_Div_imm);
+ return div;
+}
+
+
+/**
+ * transform abs node:
+ * mov a, b
+ * sra b, 31, b
+ * xor a, b
+ * sub a, b
+ *
+ * @return
+ */
+static ir_node *gen_Abs(ir_node *node) {
+ ir_node *block = be_transform_node(get_nodes_block(node));
+ ir_mode *mode = get_irn_mode(node);
+ dbg_info *dbgi = get_irn_dbg_info(node);
+ ir_node *op = get_Abs_op(node);
+
+ ir_node *mov, *sra, *xor, *sub, *new_op;
+
+ if (mode_is_float(mode))
+ panic("FP not supported yet");
+
+ new_op = be_transform_node(op);
+
+ mov = new_bd_sparc_Mov_reg(dbgi, block, new_op);
+ sra = new_bd_sparc_ShiftRA_imm(dbgi, block, mov, 31);
+ xor = new_bd_sparc_Xor_reg(dbgi, block, new_op, sra);
+ sub = new_bd_sparc_Sub_reg(dbgi, block, sra, xor);
+
+ return sub;
+}
+
+/**
+ * Transforms a Not node.
+ *
+ * @return the created ARM Not node
+ */
+static ir_node *gen_Not(ir_node *node)
+{
+ ir_node *block = be_transform_node(get_nodes_block(node));
+ ir_node *op = get_Not_op(node);
+ ir_node *new_op = be_transform_node(op);
+ dbg_info *dbgi = get_irn_dbg_info(node);
+
+ return new_bd_sparc_Not(dbgi, block, new_op);
+}
+
+static ir_node *gen_And(ir_node *node)
+{
+ ir_mode *mode = get_irn_mode(node);
+ ir_node *block = be_transform_node(get_nodes_block(node));
+ dbg_info *dbgi = get_irn_dbg_info(node);
+
+ (void) block;
+ (void) dbgi;
+
+ if (mode_is_float(mode))
+ panic("FP not implemented yet");
+
+ return gen_helper_binop(node, MATCH_COMMUTATIVE, new_bd_sparc_And_reg, new_bd_sparc_And_imm);
+}
+
+static ir_node *gen_Or(ir_node *node)
+{
+ ir_mode *mode = get_irn_mode(node);
+ ir_node *block = be_transform_node(get_nodes_block(node));
+ dbg_info *dbgi = get_irn_dbg_info(node);
+
+ (void) block;
+ (void) dbgi;
+
+ if (mode_is_float(mode))
+ panic("FP not implemented yet");
+
+ return gen_helper_binop(node, MATCH_COMMUTATIVE, new_bd_sparc_Or_reg, new_bd_sparc_Or_imm);
+}
+
+static ir_node *gen_Xor(ir_node *node)
+{
+ ir_mode *mode = get_irn_mode(node);
+ ir_node *block = be_transform_node(get_nodes_block(node));
+ dbg_info *dbgi = get_irn_dbg_info(node);
+
+ (void) block;
+ (void) dbgi;
+ if (mode_is_float(mode))
+ panic("FP not implemented yet");
+
+ return gen_helper_binop(node, MATCH_COMMUTATIVE, new_bd_sparc_Xor_reg, new_bd_sparc_Xor_imm);
+}
+
+static ir_node *gen_Shl(ir_node *node)
+{
+ return gen_helper_binop(node, MATCH_SIZE_NEUTRAL, new_bd_sparc_ShiftLL_reg, new_bd_sparc_ShiftLL_imm);
+}
+
+static ir_node *gen_Shr(ir_node *node)
+{
+ return gen_helper_binop(node, MATCH_SIZE_NEUTRAL, new_bd_sparc_ShiftLR_reg, new_bd_sparc_ShiftLR_imm);
+}
+
+static ir_node *gen_Shra(ir_node *node)
+{
+ return gen_helper_binop(node, MATCH_SIZE_NEUTRAL, new_bd_sparc_ShiftRA_reg, new_bd_sparc_ShiftRA_imm);
+}
/****** TRANSFORM GENERAL BACKEND NODES ********/
+/**
+ * Transforms a Minus node.
+ *
+ */
+static ir_node *gen_Minus(ir_node *node)
+{
+ ir_node *block = be_transform_node(get_nodes_block(node));
+ ir_node *op = get_Minus_op(node);
+ ir_node *new_op = be_transform_node(op);
+ dbg_info *dbgi = get_irn_dbg_info(node);
+ ir_mode *mode = get_irn_mode(node);
+
+ if (mode_is_float(mode)) {
+ panic("FP not implemented yet");
+ }
+
+ assert(mode_is_data(mode));
+ return new_bd_sparc_Minus(dbgi, block, new_op);
+}
+
/**
* Transforms a Const node.
*
- * @param node the ir Store node
+ * @param node the ir Const node
* @return The transformed sparc node.
*/
static ir_node *gen_Const(ir_node *node)
(void) dbg;
if (mode_is_float(mode)) {
- panic("FP not supported yet");
+ return create_fp_const_graph(node, block);
}
+
return create_const_graph(node, block);
}
panic("FloatCmp not implemented");
}
+ /*
if (get_mode_size_bits(cmp_mode) != 32) {
panic("CmpMode != 32bit not supported yet");
}
+ */
assert(get_irn_mode(op2) == cmp_mode);
is_unsigned = !mode_is_signed(cmp_mode);
/* compare with 0 can be done with Tst */
+ /*
if (is_Const(op2) && tarval_is_null(get_Const_tarval(op2))) {
new_op1 = be_transform_node(op1);
return new_bd_sparc_Tst(dbgi, block, new_op1, false,
return new_bd_sparc_Tst(dbgi, block, new_op2, true,
is_unsigned);
}
+ */
/* integer compare */
new_op1 = be_transform_node(op1);
- //new_op1 = gen_extension(dbgi, block, new_op1, cmp_mode);
+ new_op1 = gen_extension(dbgi, block, new_op1, cmp_mode);
new_op2 = be_transform_node(op2);
- //new_op2 = gen_extension(dbgi, block, new_op2, cmp_mode);
+ new_op2 = gen_extension(dbgi, block, new_op2, cmp_mode);
return new_bd_sparc_Cmp_reg(dbgi, block, new_op1, new_op2, false, is_unsigned);
}
return new_node;
}
-/**
- * Create an And that will zero out upper bits.
- *
- * @param dbgi debug info
- * @param block the basic block
- * @param op the original node
- * @param src_bits number of lower bits that will remain
- */
-static ir_node *gen_zero_extension(dbg_info *dbgi, ir_node *block, ir_node *op,
- int src_bits)
-{
- if (src_bits == 8) {
- return new_bd_sparc_And_imm(dbgi, block, op, 0xFF);
- } else if (src_bits == 16) {
- ir_node *lshift = new_bd_sparc_ShiftLL_imm(dbgi, block, op, 16);
- ir_node *rshift = new_bd_sparc_ShiftLR_imm(dbgi, block, lshift, 16);
- return rshift;
- } else {
- panic("zero extension only supported for 8 and 16 bits");
- }
-}
-
-/**
- * Generate code for a sign extension.
- */
-static ir_node *gen_sign_extension(dbg_info *dbgi, ir_node *block, ir_node *op,
- int src_bits)
-{
- int shift_width = 32 - src_bits;
- ir_node *lshift_node = new_bd_sparc_ShiftLL_imm(dbgi, block, op, shift_width);
- ir_node *rshift_node = new_bd_sparc_ShiftRA_imm(dbgi, block, lshift_node, shift_width);
- return rshift_node;
-}
-
-/**
- * returns true if it is assured, that the upper bits of a node are "clean"
- * which means for a 16 or 8 bit value, that the upper bits in the register
- * are 0 for unsigned and a copy of the last significant bit for signed
- * numbers.
- */
-static bool upper_bits_clean(ir_node *transformed_node, ir_mode *mode)
-{
- (void) transformed_node;
- (void) mode;
- /* TODO */
- return false;
-}
-
/**
* Transforms a Conv node.
*
ir_mode *dst_mode = get_irn_mode(node);
dbg_info *dbg = get_irn_dbg_info(node);
+ int src_bits = get_mode_size_bits(src_mode);
+ int dst_bits = get_mode_size_bits(dst_mode);
+
if (src_mode == dst_mode)
return new_op;
if (mode_is_float(src_mode) || mode_is_float(dst_mode)) {
- panic("FP not implemented");
+ assert((src_bits <= 64 && dst_bits <= 64) && "quad FP not implemented");
+
+ if (mode_is_float(src_mode)) {
+ if (mode_is_float(dst_mode)) {
+ // float -> float conv
+ if (src_bits > dst_bits) {
+ return new_bd_sparc_FpDToFpS(dbg, block, new_op, dst_mode);
+ } else {
+ return new_bd_sparc_FpSToFpD(dbg, block, new_op, dst_mode);
+ }
+ } else {
+ // float -> int conv
+ switch (dst_bits) {
+ case 32:
+ return new_bd_sparc_FpSToInt(dbg, block, new_op, dst_mode);
+ case 64:
+ return new_bd_sparc_FpDToInt(dbg, block, new_op, dst_mode);
+ default:
+ panic("quad FP not implemented");
+ }
+ }
+ } else {
+ // int -> float conv
+ switch (dst_bits) {
+ case 32:
+ return new_bd_sparc_IntToFpS(dbg, block, new_op, src_mode);
+ case 64:
+ return new_bd_sparc_IntToFpD(dbg, block, new_op, src_mode);
+ default:
+ panic("quad FP not implemented");
+ }
+ }
} else { /* complete in gp registers */
- int src_bits = get_mode_size_bits(src_mode);
- int dst_bits = get_mode_size_bits(dst_mode);
int min_bits;
ir_mode *min_mode;
}
}
+static ir_node *gen_Unknown(ir_node *node)
+{
+ ir_node *block = get_nodes_block(node);
+ ir_node *new_block = be_transform_node(block);
+ dbg_info *dbgi = get_irn_dbg_info(node);
+
+ /* just produce a 0 */
+ ir_mode *mode = get_irn_mode(node);
+ if (mode_is_float(mode)) {
+ panic("FP not implemented");
+ be_dep_on_frame(node);
+ return node;
+ } else if (mode_needs_gp_reg(mode)) {
+ return create_const_graph_value(dbgi, new_block, 0);
+ }
+
+ panic("Unexpected Unknown mode");
+}
+
/**
* Transform some Phi nodes
*/
panic("Unsupported Proj from Load");
}
- return be_duplicate_node(node);
+ return be_duplicate_node(node);
}
/**
long proj = get_Proj_proj(node);
if (proj == pn_be_AddSP_sp) {
- // TODO: check for correct pn_sparc_* flags
ir_node *res = new_rd_Proj(dbgi, new_pred, mode_Iu,
pn_sparc_SubSP_stack);
arch_set_irn_register(res, &sparc_gp_regs[REG_SP]);
return res;
} else if (proj == pn_be_AddSP_res) {
- // TODO: check for correct pn_sparc_* flags
return new_rd_Proj(dbgi, new_pred, mode_Iu, pn_sparc_SubSP_stack);
} else if (proj == pn_be_AddSP_M) {
return new_rd_Proj(dbgi, new_pred, mode_M, pn_sparc_SubSP_M);
panic("not implemented");
}
+/**
+ * transform Projs from a Div
+ */
+static ir_node *gen_Proj_Div(ir_node *node)
+{
+ ir_node *pred = get_Proj_pred(node);
+ ir_node *new_pred = be_transform_node(pred);
+ dbg_info *dbgi = get_irn_dbg_info(node);
+ ir_mode *mode = get_irn_mode(node);
+ long proj = get_Proj_proj(node);
+
+ switch (proj) {
+ case pn_Div_res:
+ if (is_sparc_Div(new_pred)) {
+ return new_rd_Proj(dbgi, new_pred, mode, pn_sparc_Div_res);
+ }
+ break;
+ default:
+ break;
+ }
+ panic("Unsupported Proj from Div");
+}
+
/**
* Transform a Proj node.
long proj = get_Proj_proj(node);
(void) irg;
- (void) dbgi;
+ (void) dbgi;
if (is_Store(pred)) {
if (proj == pn_Store_M) {
} else if (is_Cmp(pred)) {
//panic("gen_Proj not implemented for Cmp");
return gen_Proj_Cmp(node);
+ } else if (is_Div(pred)) {
+ return gen_Proj_Div(node);
} else if (is_Start(pred)) {
/*
if (proj == pn_Start_X_initial_exec) {
}
}
- return be_duplicate_node(node);
+ return be_duplicate_node(node);
}
+
/**
* transform a Jmp
*/
return new_bd_sparc_Jmp(dbgi, new_block);
}
-/**
- * the BAD transformer.
- */
-static ir_node *bad_transform(ir_node *irn)
-{
- panic("SPARC backend: Not implemented: %+F", irn);
-}
-
-/**
- * Set a node emitter. Make it a bit more type safe.
- */
-static void set_transformer(ir_op *op, be_transform_func sparc_transform_func)
-{
- op->ops.generic = (op_func)sparc_transform_func;
-}
-
/**
* configure transformation callbacks
*/
void sparc_register_transformers(void)
{
- clear_irp_opcodes_generic_func();
- set_transformer(op_Add, gen_Add);
- set_transformer(op_Store, gen_Store);
- set_transformer(op_Const, gen_Const);
- set_transformer(op_Load, gen_Load);
- set_transformer(op_Sub, gen_Sub);
-
- set_transformer(op_be_AddSP, gen_be_AddSP);
- set_transformer(op_be_SubSP, gen_be_SubSP);
- set_transformer(op_be_Copy, gen_be_Copy);
- set_transformer(op_be_Call, gen_be_Call);
- set_transformer(op_be_FrameAddr, gen_be_FrameAddr);
-
- set_transformer(op_Cond, gen_Cond);
- set_transformer(op_Cmp, gen_Cmp);
-
- set_transformer(op_SymConst, gen_SymConst);
-
- set_transformer(op_Phi, gen_Phi);
- set_transformer(op_Proj, gen_Proj);
-
- set_transformer(op_Conv, gen_Conv);
- set_transformer(op_Jmp, gen_Jmp);
-
- /* node list */
- /*
- set_transformer(op_Abs, gen_Abs);
- set_transformer(op_Add, gen_Add);
- set_transformer(op_And, gen_And);
- set_transformer(op_Const, gen_Const);
- set_transformer(op_Conv, gen_Conv);
- set_transformer(op_CopyB, gen_CopyB);
- set_transformer(op_Eor, gen_Eor);
- set_transformer(op_Jmp, gen_Jmp);
- set_transformer(op_Load, gen_Load);
- set_transformer(op_Minus, gen_Minus);
- set_transformer(op_Mul, gen_Mul);
- set_transformer(op_Not, gen_Not);
- set_transformer(op_Or, gen_Or);
- set_transformer(op_Quot, gen_Quot);
- set_transformer(op_Rotl, gen_Rotl);
- set_transformer(op_Shl, gen_Shl);
- set_transformer(op_Shr, gen_Shr);
- set_transformer(op_Shrs, gen_Shrs);
- set_transformer(op_Store, gen_Store);
- set_transformer(op_Sub, gen_Sub);
- set_transformer(op_Unknown, gen_Unknown);
- */
-
- set_transformer(op_ASM, bad_transform);
- set_transformer(op_Builtin, bad_transform);
- set_transformer(op_CallBegin, bad_transform);
- set_transformer(op_Cast, bad_transform);
- set_transformer(op_Confirm, bad_transform);
- set_transformer(op_DivMod, bad_transform);
- set_transformer(op_EndExcept, bad_transform);
- set_transformer(op_EndReg, bad_transform);
- set_transformer(op_Filter, bad_transform);
- set_transformer(op_Free, bad_transform);
- set_transformer(op_Id, bad_transform);
- set_transformer(op_InstOf, bad_transform);
- set_transformer(op_Mulh, bad_transform);
- set_transformer(op_Mux, bad_transform);
- set_transformer(op_Raise, bad_transform);
- set_transformer(op_Sel, bad_transform);
- set_transformer(op_Tuple, bad_transform);
-}
-
-
-/**
- * Pre-transform all unknown nodes.
- */
-static void sparc_pretransform_node(void)
-{
- sparc_code_gen_t *cg = env_cg;
- (void) cg;
- //cg->unknown_gp = be_pre_transform_node(cg->unknown_gp);
- //cg->unknown_fpa = be_pre_transform_node(cg->unknown_fpa);
+ be_start_transform_setup();
+
+ be_set_transform_function(op_Abs, gen_Abs);
+ be_set_transform_function(op_Add, gen_Add);
+ be_set_transform_function(op_And, gen_And);
+ be_set_transform_function(op_be_AddSP, gen_be_AddSP);
+ be_set_transform_function(op_be_Call, gen_be_Call);
+ be_set_transform_function(op_be_Copy, gen_be_Copy);
+ be_set_transform_function(op_be_FrameAddr, gen_be_FrameAddr);
+ be_set_transform_function(op_be_SubSP, gen_be_SubSP);
+ be_set_transform_function(op_Cmp, gen_Cmp);
+ be_set_transform_function(op_Cond, gen_Cond);
+ be_set_transform_function(op_Const, gen_Const);
+ be_set_transform_function(op_Conv, gen_Conv);
+ be_set_transform_function(op_Div, gen_Div);
+ be_set_transform_function(op_Eor, gen_Xor);
+ be_set_transform_function(op_Jmp, gen_Jmp);
+ be_set_transform_function(op_Load, gen_Load);
+ be_set_transform_function(op_Minus, gen_Minus);
+ be_set_transform_function(op_Mul, gen_Mul);
+ be_set_transform_function(op_Mulh, gen_Mulh);
+ be_set_transform_function(op_Not, gen_Not);
+ be_set_transform_function(op_Or, gen_Or);
+ be_set_transform_function(op_Phi, gen_Phi);
+ be_set_transform_function(op_Proj, gen_Proj);
+ be_set_transform_function(op_Shl, gen_Shl);
+ be_set_transform_function(op_Shr, gen_Shr);
+ be_set_transform_function(op_Shrs, gen_Shra);
+ be_set_transform_function(op_Store, gen_Store);
+ be_set_transform_function(op_Sub, gen_Sub);
+ be_set_transform_function(op_SymConst, gen_SymConst);
+ be_set_transform_function(op_Unknown, gen_Unknown);
+
+ be_set_transform_function(op_sparc_Save, be_duplicate_node);
}
/**
{
sparc_register_transformers();
env_cg = cg;
- be_transform_graph(cg->birg, sparc_pretransform_node);
+ be_transform_graph(cg->irg, NULL);
}
void sparc_init_transform(void)