return new_bd_sparc_And_imm(dbgi, block, op, NULL, 0xFF);
} else if (src_bits == 16) {
ir_node *lshift = new_bd_sparc_Sll_imm(dbgi, block, op, NULL, 16);
- ir_node *rshift = new_bd_sparc_Slr_imm(dbgi, block, lshift, NULL, 16);
+ ir_node *rshift = new_bd_sparc_Srl_imm(dbgi, block, lshift, NULL, 16);
return rshift;
} else {
panic("zero extension only supported for 8 and 16 bits");
typedef enum {
MATCH_NONE = 0,
- MATCH_COMMUTATIVE = 1 << 0, /**< commutative operation. */
+ MATCH_COMMUTATIVE = 1U << 0, /**< commutative operation. */
+ MATCH_MODE_NEUTRAL = 1U << 1, /**< the higher bits of the inputs don't
+ influence the significant lower bit at
+ all (for cases where mode < 32bit) */
} match_flags_t;
typedef ir_node* (*new_binop_reg_func) (dbg_info *dbgi, ir_node *block, ir_node *op1, ir_node *op2);
return is_value_imm_encodeable(value);
}
+static bool needs_extension(ir_mode *mode)
+{
+ return get_mode_size_bits(mode) < get_mode_size_bits(mode_gp);
+}
+
+/**
+ * Check, if a given node is a Down-Conv, ie. a integer Conv
+ * from a mode with a mode with more bits to a mode with lesser bits.
+ * Moreover, we return only true if the node has not more than 1 user.
+ *
+ * @param node the node
+ * @return non-zero if node is a Down-Conv
+ */
+static bool is_downconv(const ir_node *node)
+{
+ ir_mode *src_mode;
+ ir_mode *dest_mode;
+
+ if (!is_Conv(node))
+ return false;
+
+ src_mode = get_irn_mode(get_Conv_op(node));
+ dest_mode = get_irn_mode(node);
+ return
+ mode_needs_gp_reg(src_mode) &&
+ mode_needs_gp_reg(dest_mode) &&
+ get_mode_size_bits(dest_mode) <= get_mode_size_bits(src_mode);
+}
+
+static ir_node *sparc_skip_downconv(ir_node *node)
+{
+ while (is_downconv(node)) {
+ node = get_Conv_op(node);
+ }
+ return node;
+}
+
/**
* helper function for binop operations
*
new_binop_reg_func new_reg,
new_binop_imm_func new_imm)
{
- dbg_info *dbgi = get_irn_dbg_info(node);
- ir_node *block = be_transform_node(get_nodes_block(node));
+ dbg_info *dbgi = get_irn_dbg_info(node);
+ ir_node *block = be_transform_node(get_nodes_block(node));
ir_node *new_op1;
ir_node *new_op2;
+ ir_mode *mode1;
+ ir_mode *mode2;
+
+ if (flags & MATCH_MODE_NEUTRAL) {
+ op1 = sparc_skip_downconv(op1);
+ op2 = sparc_skip_downconv(op2);
+ }
+ mode1 = get_irn_mode(op1);
+ mode2 = get_irn_mode(op2);
if (is_imm_encodeable(op2)) {
- ir_node *new_op1 = be_transform_node(op1);
- int32_t immediate = get_tarval_long(get_Const_tarval(op2));
+ ir_node *new_op1 = be_transform_node(op1);
+ int32_t immediate = get_tarval_long(get_Const_tarval(op2));
+ if (! (flags & MATCH_MODE_NEUTRAL) && needs_extension(mode1)) {
+ new_op1 = gen_extension(dbgi, block, new_op1, mode1);
+ }
return new_imm(dbgi, block, new_op1, NULL, immediate);
}
new_op2 = be_transform_node(op2);
+ if (! (flags & MATCH_MODE_NEUTRAL) && needs_extension(mode2)) {
+ new_op2 = gen_extension(dbgi, block, new_op2, mode2);
+ }
if ((flags & MATCH_COMMUTATIVE) && is_imm_encodeable(op1)) {
int32_t immediate = get_tarval_long(get_Const_tarval(op1));
return new_imm(dbgi, block, new_op2, NULL, immediate);
}
- new_op1 = be_transform_node(op1);
+ new_op1 = be_transform_node(op1);
+ if (! (flags & MATCH_MODE_NEUTRAL) && needs_extension(mode1)) {
+ new_op1 = gen_extension(dbgi, block, new_op1, mode1);
+ }
return new_reg(dbgi, block, new_op1, new_op2);
}
panic("unsupported mode %+F for float op", mode);
}
+static ir_node *get_g0(void)
+{
+ return be_prolog_get_reg_value(abihelper, &sparc_gp_regs[REG_G0]);
+}
+
typedef struct address_t {
- ir_node *base;
+ ir_node *ptr;
+ ir_node *ptr2;
ir_entity *entity;
int32_t offset;
} address_t;
-static void match_address(ir_node *ptr, address_t *address)
+/**
+ * Match a load/store address
+ */
+static void match_address(ir_node *ptr, address_t *address, bool use_ptr2)
{
ir_node *base = ptr;
+ ir_node *ptr2 = NULL;
int32_t offset = 0;
ir_entity *entity = NULL;
ir_node *new_block = be_transform_node(block);
entity = get_SymConst_entity(base);
base = new_bd_sparc_SetHi(dbgi, new_block, entity, offset);
+ } else if (use_ptr2 && is_Add(base) && entity == NULL && offset == 0) {
+ ptr2 = be_transform_node(get_Add_right(base));
+ base = be_transform_node(get_Add_left(base));
} else {
if (is_value_imm_encodeable(offset)) {
base = be_transform_node(base);
}
}
- address->base = base;
+ address->ptr = base;
+ address->ptr2 = ptr2;
address->entity = entity;
address->offset = offset;
}
ir_node *left = get_Add_left(node);
/* is this simple address arithmetic? then we can let the linker do
* the calculation. */
- if (is_SymConst(left)) {
+ if (is_SymConst(left) && get_irn_n_edges(left) == 1) {
dbg_info *dbgi = get_irn_dbg_info(node);
ir_node *block = be_transform_node(get_nodes_block(node));
address_t address;
- match_address(node, &address);
- assert(is_sparc_SetHi(address.base));
- return new_bd_sparc_Or_imm(dbgi, block, address.base,
+ /* the value of use_ptr2 shouldn't matter here */
+ match_address(node, &address, false);
+ assert(is_sparc_SetHi(address.ptr));
+ return new_bd_sparc_Or_imm(dbgi, block, address.ptr,
address.entity, address.offset);
}
}
}
- return gen_helper_binop(node, MATCH_COMMUTATIVE, new_bd_sparc_Add_reg,
- new_bd_sparc_Add_imm);
+ return gen_helper_binop(node, MATCH_COMMUTATIVE | MATCH_MODE_NEUTRAL,
+ new_bd_sparc_Add_reg, new_bd_sparc_Add_imm);
}
/**
}
}
-static ir_node *create_stf(dbg_info *dbgi, ir_node *block, ir_node *ptr,
- ir_node *value, ir_node *mem, ir_mode *mode,
+static ir_node *create_stf(dbg_info *dbgi, ir_node *block, ir_node *value,
+ ir_node *ptr, ir_node *mem, ir_mode *mode,
ir_entity *entity, long offset,
bool is_frame_entity)
{
unsigned bits = get_mode_size_bits(mode);
assert(mode_is_float(mode));
if (bits == 32) {
- return new_bd_sparc_Stf_s(dbgi, block, ptr, value, mem, mode, entity,
+ return new_bd_sparc_Stf_s(dbgi, block, value, ptr, mem, mode, entity,
offset, is_frame_entity);
} else if (bits == 64) {
- return new_bd_sparc_Stf_d(dbgi, block, ptr, value, mem, mode, entity,
+ return new_bd_sparc_Stf_d(dbgi, block, value, ptr, mem, mode, entity,
offset, is_frame_entity);
} else {
assert(bits == 128);
- return new_bd_sparc_Stf_q(dbgi, block, ptr, value, mem, mode, entity,
+ return new_bd_sparc_Stf_q(dbgi, block, value, ptr, mem, mode, entity,
offset, is_frame_entity);
}
}
ir_node *new_load = NULL;
address_t address;
- match_address(ptr, &address);
-
if (mode_is_float(mode)) {
- new_load = create_ldf(dbgi, block, address.base, new_mem, mode,
+ match_address(ptr, &address, false);
+ new_load = create_ldf(dbgi, block, address.ptr, new_mem, mode,
address.entity, address.offset, false);
} else {
- new_load = new_bd_sparc_Ld(dbgi, block, address.base, new_mem, mode,
- address.entity, address.offset, false);
+ match_address(ptr, &address, true);
+ if (address.ptr2 != NULL) {
+ assert(address.entity == NULL && address.offset == 0);
+ new_load = new_bd_sparc_Ld_reg(dbgi, block, address.ptr,
+ address.ptr2, new_mem, mode);
+ } else {
+ new_load = new_bd_sparc_Ld_imm(dbgi, block, address.ptr, new_mem,
+ mode, address.entity, address.offset,
+ false);
+ }
}
set_irn_pinned(new_load, get_irn_pinned(node));
ir_node *new_store = NULL;
address_t address;
- match_address(ptr, &address);
-
if (mode_is_float(mode)) {
- new_store = create_stf(dbgi, block, address.base, new_val, new_mem,
+ /* TODO: variants with reg+reg address mode */
+ match_address(ptr, &address, false);
+ new_store = create_stf(dbgi, block, new_val, address.ptr, new_mem,
mode, address.entity, address.offset, false);
} else {
- new_store = new_bd_sparc_St(dbgi, block, address.base, new_val, new_mem,
- mode, address.entity, address.offset,
- false);
+ match_address(ptr, &address, true);
+ if (address.ptr2 != NULL) {
+ assert(address.entity == NULL && address.offset == 0);
+ new_store = new_bd_sparc_St_reg(dbgi, block, new_val, address.ptr,
+ address.ptr2, new_mem, mode);
+ } else {
+ new_store = new_bd_sparc_St_imm(dbgi, block, new_val, address.ptr,
+ new_mem, mode, address.entity,
+ address.offset, false);
+ }
}
set_irn_pinned(new_store, get_irn_pinned(node));
new_bd_sparc_fmul_d, new_bd_sparc_fmul_q);
}
- assert(mode_is_data(mode));
- return gen_helper_binop(node, MATCH_COMMUTATIVE,
+ return gen_helper_binop(node, MATCH_COMMUTATIVE | MATCH_MODE_NEUTRAL,
new_bd_sparc_Mul_reg, new_bd_sparc_Mul_imm);
}
{
ir_mode *mode = get_irn_mode(node);
ir_node *mul;
- ir_node *proj_res_hi;
if (mode_is_float(mode))
panic("FP not supported yet");
-
- assert(mode_is_data(mode));
mul = gen_helper_binop(node, MATCH_COMMUTATIVE, new_bd_sparc_Mulh_reg, new_bd_sparc_Mulh_imm);
- //arch_irn_add_flags(mul, arch_irn_flags_modify_flags);
- proj_res_hi = new_r_Proj(mul, mode_gp, pn_sparc_Mulh_low);
- return proj_res_hi;
+ return new_r_Proj(mul, mode_gp, pn_sparc_Mulh_low);
+}
+
+static ir_node *gen_sign_extension_value(ir_node *node)
+{
+ ir_node *block = get_nodes_block(node);
+ ir_node *new_block = be_transform_node(block);
+ ir_node *new_node = be_transform_node(node);
+ /* TODO: we could do some shortcuts for some value types probably.
+ * (For constants or other cases where we know the sign bit in
+ * advance) */
+ return new_bd_sparc_Sra_imm(NULL, new_block, new_node, NULL, 31);
}
/**
*/
static ir_node *gen_Div(ir_node *node)
{
- ir_mode *mode = get_Div_resmode(node);
- ir_node *res;
+ dbg_info *dbgi = get_irn_dbg_info(node);
+ ir_node *block = get_nodes_block(node);
+ ir_node *new_block = be_transform_node(block);
+ ir_mode *mode = get_Div_resmode(node);
+ ir_node *left = get_Div_left(node);
+ ir_node *left_low = be_transform_node(left);
+ ir_node *right = get_Div_right(node);
+ ir_node *res;
assert(!mode_is_float(mode));
if (mode_is_signed(mode)) {
- res = gen_helper_binop(node, 0, new_bd_sparc_SDiv_reg,
- new_bd_sparc_SDiv_imm);
+ ir_node *left_high = gen_sign_extension_value(left);
+
+ if (is_imm_encodeable(right)) {
+ int32_t immediate = get_tarval_long(get_Const_tarval(right));
+ res = new_bd_sparc_SDiv_imm(dbgi, new_block, left_high, left_low,
+ NULL, immediate);
+ } else {
+ ir_node *new_right = be_transform_node(right);
+ res = new_bd_sparc_SDiv_reg(dbgi, new_block, left_high, left_low,
+ new_right);
+ }
} else {
- res = gen_helper_binop(node, 0, new_bd_sparc_UDiv_reg,
- new_bd_sparc_UDiv_imm);
+ ir_node *left_high = get_g0();
+ if (is_imm_encodeable(right)) {
+ int32_t immediate = get_tarval_long(get_Const_tarval(right));
+ res = new_bd_sparc_UDiv_imm(dbgi, new_block, left_high, left_low,
+ NULL, immediate);
+ } else {
+ ir_node *new_right = be_transform_node(right);
+ res = new_bd_sparc_UDiv_reg(dbgi, new_block, left_high, left_low,
+ new_right);
+ }
}
+
return res;
}
new_bd_sparc_fdiv_d, new_bd_sparc_fdiv_q);
}
+#if 0
static ir_node *gen_Abs(ir_node *node)
{
ir_mode *const mode = get_irn_mode(node);
return sub;
}
}
-
-static ir_node *get_g0(void)
-{
- return be_prolog_get_reg_value(abihelper, &sparc_gp_regs[REG_G0]);
-}
+#endif
/**
* Transforms a Not node.
return new_bd_sparc_XNor_reg(dbgi, block, zero, new_op);
}
-static ir_node *gen_And(ir_node *node)
+static ir_node *gen_helper_bitop(ir_node *node,
+ new_binop_reg_func new_reg,
+ new_binop_imm_func new_imm,
+ new_binop_reg_func new_not_reg,
+ new_binop_imm_func new_not_imm)
{
- ir_node *left = get_And_left(node);
- ir_node *right = get_And_right(node);
-
- if (is_Not(right)) {
- ir_node *not_op = get_Not_op(right);
- return gen_helper_binop_args(node, left, not_op, MATCH_NONE,
- new_bd_sparc_AndN_reg,
- new_bd_sparc_AndN_imm);
+ ir_node *op1 = get_binop_left(node);
+ ir_node *op2 = get_binop_right(node);
+ if (is_Not(op1)) {
+ return gen_helper_binop_args(node, op2, get_Not_op(op1),
+ MATCH_MODE_NEUTRAL,
+ new_not_reg, new_not_imm);
}
- if (is_Not(left)) {
- ir_node *not_op = get_Not_op(left);
- return gen_helper_binop_args(node, right, not_op, MATCH_NONE,
- new_bd_sparc_AndN_reg,
- new_bd_sparc_AndN_imm);
+ if (is_Not(op2)) {
+ return gen_helper_binop_args(node, op1, get_Not_op(op2),
+ MATCH_MODE_NEUTRAL,
+ new_not_reg, new_not_imm);
}
+ return gen_helper_binop_args(node, op1, op2,
+ MATCH_MODE_NEUTRAL | MATCH_COMMUTATIVE,
+ new_reg, new_imm);
+}
- return gen_helper_binop(node, MATCH_COMMUTATIVE, new_bd_sparc_And_reg,
- new_bd_sparc_And_imm);
+static ir_node *gen_And(ir_node *node)
+{
+ return gen_helper_bitop(node,
+ new_bd_sparc_And_reg,
+ new_bd_sparc_And_imm,
+ new_bd_sparc_AndN_reg,
+ new_bd_sparc_AndN_imm);
}
static ir_node *gen_Or(ir_node *node)
{
- ir_node *left = get_Or_left(node);
- ir_node *right = get_Or_right(node);
-
- if (is_Not(right)) {
- ir_node *not_op = get_Not_op(right);
- return gen_helper_binop_args(node, left, not_op, MATCH_NONE,
- new_bd_sparc_OrN_reg,
- new_bd_sparc_OrN_imm);
- }
- if (is_Not(left)) {
- ir_node *not_op = get_Not_op(left);
- return gen_helper_binop_args(node, right, not_op, MATCH_NONE,
- new_bd_sparc_OrN_reg,
- new_bd_sparc_OrN_imm);
- }
-
- return gen_helper_binop(node, MATCH_COMMUTATIVE, new_bd_sparc_Or_reg,
- new_bd_sparc_Or_imm);
+ return gen_helper_bitop(node,
+ new_bd_sparc_Or_reg,
+ new_bd_sparc_Or_imm,
+ new_bd_sparc_OrN_reg,
+ new_bd_sparc_OrN_imm);
}
static ir_node *gen_Eor(ir_node *node)
{
- ir_node *left = get_Eor_left(node);
- ir_node *right = get_Eor_right(node);
-
- if (is_Not(right)) {
- ir_node *not_op = get_Not_op(right);
- return gen_helper_binop_args(node, left, not_op, MATCH_COMMUTATIVE,
- new_bd_sparc_XNor_reg,
- new_bd_sparc_XNor_imm);
- }
- if (is_Not(left)) {
- ir_node *not_op = get_Not_op(left);
- return gen_helper_binop_args(node, not_op, right,
- MATCH_COMMUTATIVE | MATCH_MODE_NEUTRAL,
- new_bd_sparc_XNor_reg,
- new_bd_sparc_XNor_imm);
- }
-
- return gen_helper_binop(node, MATCH_COMMUTATIVE, new_bd_sparc_Xor_reg,
- new_bd_sparc_Xor_imm);
+ return gen_helper_bitop(node,
+ new_bd_sparc_Xor_reg,
+ new_bd_sparc_Xor_imm,
+ new_bd_sparc_XNor_reg,
+ new_bd_sparc_XNor_imm);
}
static ir_node *gen_Shl(ir_node *node)
static ir_node *gen_Shr(ir_node *node)
{
- return gen_helper_binop(node, MATCH_NONE, new_bd_sparc_Slr_reg, new_bd_sparc_Slr_imm);
+ return gen_helper_binop(node, MATCH_NONE, new_bd_sparc_Srl_reg, new_bd_sparc_Srl_imm);
}
static ir_node *gen_Shrs(ir_node *node)
return get_irn_mode(op);
}
-/**
- * Transform Cond nodes
- */
+static ir_node *make_address(dbg_info *dbgi, ir_node *block, ir_entity *entity,
+ int32_t offset)
+{
+ ir_node *hi = new_bd_sparc_SetHi(dbgi, block, entity, offset);
+ ir_node *low = new_bd_sparc_Or_imm(dbgi, block, hi, entity, offset);
+ be_dep_on_frame(hi);
+ return low;
+}
+
+static ir_node *gen_SwitchJmp(ir_node *node)
+{
+ dbg_info *dbgi = get_irn_dbg_info(node);
+ ir_node *block = be_transform_node(get_nodes_block(node));
+ ir_node *selector = get_Cond_selector(node);
+ ir_node *new_selector = be_transform_node(selector);
+ long default_pn = get_Cond_default_proj(node);
+ ir_entity *entity;
+ ir_node *table_address;
+ ir_node *index;
+ ir_node *load;
+ ir_node *address;
+
+ /* switch with smaller mode not implemented yet */
+ assert(get_mode_size_bits(get_irn_mode(selector)) == 32);
+
+ entity = new_entity(NULL, id_unique("TBL%u"), get_unknown_type());
+ set_entity_visibility(entity, ir_visibility_private);
+ add_entity_linkage(entity, IR_LINKAGE_CONSTANT);
+
+ /* TODO: this code does not construct code to check for access
+ * out-of bounds of the jumptable yet. I think we should put this stuff
+ * into the switch_lowering phase to get some additional optimisations
+ * done. */
+
+ /* construct base address */
+ table_address = make_address(dbgi, block, entity, 0);
+ /* scale index */
+ index = new_bd_sparc_Sll_imm(dbgi, block, new_selector, NULL, 2);
+ /* load from jumptable */
+ load = new_bd_sparc_Ld_reg(dbgi, block, table_address, index, new_NoMem(),
+ mode_gp);
+ address = new_r_Proj(load, mode_gp, pn_sparc_Ld_res);
+
+ return new_bd_sparc_SwitchJmp(dbgi, block, address, default_pn, entity);
+}
+
static ir_node *gen_Cond(ir_node *node)
{
ir_node *selector = get_Cond_selector(node);
// switch/case jumps
if (mode != mode_b) {
- panic("SwitchJump not implemented yet");
+ return gen_SwitchJmp(node);
}
// regular if/else jumps
*/
static ir_node *gen_Cmp(ir_node *node)
{
- ir_node *block = be_transform_node(get_nodes_block(node));
- ir_node *op1 = get_Cmp_left(node);
- ir_node *op2 = get_Cmp_right(node);
- ir_mode *cmp_mode = get_irn_mode(op1);
- dbg_info *dbgi = get_irn_dbg_info(node);
- ir_node *new_op1 = be_transform_node(op1);
- ir_node *new_op2 = be_transform_node(op2);
+ ir_node *op1 = get_Cmp_left(node);
+ ir_node *op2 = get_Cmp_right(node);
+ ir_mode *cmp_mode = get_irn_mode(op1);
assert(get_irn_mode(op2) == cmp_mode);
if (mode_is_float(cmp_mode)) {
- unsigned bits = get_mode_size_bits(cmp_mode);
+ ir_node *block = be_transform_node(get_nodes_block(node));
+ dbg_info *dbgi = get_irn_dbg_info(node);
+ ir_node *new_op1 = be_transform_node(op1);
+ ir_node *new_op2 = be_transform_node(op2);
+ unsigned bits = get_mode_size_bits(cmp_mode);
if (bits == 32) {
return new_bd_sparc_fcmp_s(dbgi, block, new_op1, new_op2, cmp_mode);
} else if (bits == 64) {
}
}
+ /* when we compare a bitop like and,or,... with 0 then we can directly use
+ * the bitopcc variant.
+ * Currently we only do this when we're the only user of the node...
+ */
+ if (is_Const(op2) && is_Const_null(op2) && get_irn_n_edges(op1) == 1) {
+ if (is_And(op1)) {
+ return gen_helper_bitop(op1,
+ new_bd_sparc_AndCCZero_reg,
+ new_bd_sparc_AndCCZero_imm,
+ new_bd_sparc_AndNCCZero_reg,
+ new_bd_sparc_AndNCCZero_imm);
+ } else if (is_Or(op1)) {
+ return gen_helper_bitop(op1,
+ new_bd_sparc_OrCCZero_reg,
+ new_bd_sparc_OrCCZero_imm,
+ new_bd_sparc_OrNCCZero_reg,
+ new_bd_sparc_OrNCCZero_imm);
+ } else if (is_Eor(op1)) {
+ return gen_helper_bitop(op1,
+ new_bd_sparc_XorCCZero_reg,
+ new_bd_sparc_XorCCZero_imm,
+ new_bd_sparc_XNorCCZero_reg,
+ new_bd_sparc_XNorCCZero_imm);
+ }
+ }
+
/* integer compare */
- new_op1 = gen_extension(dbgi, block, new_op1, cmp_mode);
- new_op2 = gen_extension(dbgi, block, new_op2, cmp_mode);
- return new_bd_sparc_Cmp_reg(dbgi, block, new_op1, new_op2);
+ return gen_helper_binop_args(node, op1, op2, MATCH_NONE,
+ new_bd_sparc_Cmp_reg, new_bd_sparc_Cmp_imm);
}
/**
dbg_info *dbgi = get_irn_dbg_info(node);
ir_node *block = get_nodes_block(node);
ir_node *new_block = be_transform_node(block);
- ir_node *hi = new_bd_sparc_SetHi(dbgi, new_block, entity, 0);
- ir_node *low = new_bd_sparc_Or_imm(dbgi, new_block, hi, entity, 0);
- be_dep_on_frame(hi);
-
- return low;
+ return make_address(dbgi, new_block, entity, 0);
}
static ir_node *create_fftof(dbg_info *dbgi, ir_node *block, ir_node *op,
static ir_node *create_ftoi(dbg_info *dbgi, ir_node *block, ir_node *op,
ir_mode *src_mode)
{
- unsigned bits = get_mode_size_bits(src_mode);
+ ir_node *ftoi;
+ unsigned bits = get_mode_size_bits(src_mode);
if (bits == 32) {
- return new_bd_sparc_fftoi_s(dbgi, block, op, src_mode);
+ ftoi = new_bd_sparc_fftoi_s(dbgi, block, op, src_mode);
} else if (bits == 64) {
- return new_bd_sparc_fftoi_d(dbgi, block, op, src_mode);
+ ftoi = new_bd_sparc_fftoi_d(dbgi, block, op, src_mode);
} else {
assert(bits == 128);
- return new_bd_sparc_fftoi_q(dbgi, block, op, src_mode);
+ ftoi = new_bd_sparc_fftoi_q(dbgi, block, op, src_mode);
+ }
+
+ {
+ ir_graph *irg = get_irn_irg(block);
+ ir_node *sp = get_irg_frame(irg);
+ ir_node *nomem = new_r_NoMem(irg);
+ ir_node *stf = create_stf(dbgi, block, ftoi, sp, nomem, src_mode,
+ NULL, 0, true);
+ ir_node *ld = new_bd_sparc_Ld_imm(dbgi, block, sp, stf, mode_gp,
+ NULL, 0, true);
+ ir_node *res = new_r_Proj(ld, mode_gp, pn_sparc_Ld_res);
+ set_irn_pinned(stf, op_pin_state_floats);
+ set_irn_pinned(ld, op_pin_state_floats);
+ return res;
}
}
static ir_node *create_itof(dbg_info *dbgi, ir_node *block, ir_node *op,
ir_mode *dst_mode)
{
- unsigned bits = get_mode_size_bits(dst_mode);
+ ir_graph *irg = get_irn_irg(block);
+ ir_node *sp = get_irg_frame(irg);
+ ir_node *nomem = new_r_NoMem(irg);
+ ir_node *st = new_bd_sparc_St_imm(dbgi, block, op, sp, nomem,
+ mode_gp, NULL, 0, true);
+ ir_node *ldf = new_bd_sparc_Ldf_s(dbgi, block, sp, st, mode_fp,
+ NULL, 0, true);
+ ir_node *res = new_r_Proj(ldf, mode_fp, pn_sparc_Ldf_res);
+ unsigned bits = get_mode_size_bits(dst_mode);
+ set_irn_pinned(st, op_pin_state_floats);
+ set_irn_pinned(ldf, op_pin_state_floats);
+
if (bits == 32) {
- return new_bd_sparc_fitof_s(dbgi, block, op, dst_mode);
+ return new_bd_sparc_fitof_s(dbgi, block, res, dst_mode);
} else if (bits == 64) {
- return new_bd_sparc_fitof_d(dbgi, block, op, dst_mode);
+ return new_bd_sparc_fitof_d(dbgi, block, res, dst_mode);
} else {
assert(bits == 128);
- return new_bd_sparc_fitof_q(dbgi, block, op, dst_mode);
+ return new_bd_sparc_fitof_q(dbgi, block, res, dst_mode);
}
}
-/**
- * Transforms a Conv node.
- *
- */
static ir_node *gen_Conv(ir_node *node)
{
ir_node *block = be_transform_node(get_nodes_block(node));
}
} else {
/* int -> float conv */
- if (!mode_is_signed(src_mode))
- panic("unsigned to float not implemented yet");
+ if (src_bits < 32) {
+ new_op = gen_extension(dbg, block, new_op, src_mode);
+ } else if (src_bits == 32 && !mode_is_signed(src_mode)) {
+ panic("unsigned to float not lowered!");
+ }
return create_itof(dbg, block, new_op, dst_mode);
}
} else { /* complete in gp registers */
ir_graph *irg = current_ir_graph;
ir_node *sp = get_irg_frame(irg);
ir_node *nomem = new_NoMem();
- ir_node *st = new_bd_sparc_St(dbgi, block, sp, value0, nomem, mode_gp,
- NULL, 0, true);
+ ir_node *st = new_bd_sparc_St_imm(dbgi, block, value0, sp, nomem,
+ mode_gp, NULL, 0, true);
ir_mode *mode;
ir_node *ldf;
ir_node *mem;
set_irn_pinned(st, op_pin_state_floats);
if (value1 != NULL) {
- ir_node *st1 = new_bd_sparc_St(dbgi, block, sp, value1, nomem, mode_gp,
- NULL, 4, true);
+ ir_node *st1 = new_bd_sparc_St_imm(dbgi, block, value1, sp, nomem,
+ mode_gp, NULL, 4, true);
ir_node *in[2] = { st, st1 };
ir_node *sync = new_r_Sync(block, 2, in);
set_irn_pinned(st1, op_pin_state_floats);
ir_graph *irg = current_ir_graph;
ir_node *stack = get_irg_frame(irg);
ir_node *nomem = new_NoMem();
- ir_node *stf = create_stf(dbgi, block, stack, node, nomem, float_mode,
+ ir_node *stf = create_stf(dbgi, block, node, stack, nomem, float_mode,
NULL, 0, true);
int bits = get_mode_size_bits(float_mode);
ir_node *ld;
set_irn_pinned(stf, op_pin_state_floats);
- ld = new_bd_sparc_Ld(dbgi, block, stack, stf, mode_gp, NULL, 0, true);
+ ld = new_bd_sparc_Ld_imm(dbgi, block, stack, stf, mode_gp, NULL, 0, true);
set_irn_pinned(ld, op_pin_state_floats);
result[0] = new_Proj(ld, mode_gp, pn_sparc_Ld_res);
if (bits == 64) {
- ir_node *ld2 = new_bd_sparc_Ld(dbgi, block, stack, stf, mode_gp,
- NULL, 4, true);
+ ir_node *ld2 = new_bd_sparc_Ld_imm(dbgi, block, stack, stf, mode_gp,
+ NULL, 4, true);
set_irn_pinned(ld, op_pin_state_floats);
result[1] = new_Proj(ld2, mode_gp, pn_sparc_Ld_res);
/* max inputs: memory, callee, register arguments */
int max_inputs = 2 + n_param_regs;
ir_node **in = ALLOCAN(ir_node*, max_inputs);
- ir_node **sync_ins = ALLOCAN(ir_node*, max_inputs);
+ ir_node **sync_ins = ALLOCAN(ir_node*, n_params);
struct obstack *obst = be_get_be_obst(irg);
const arch_register_req_t **in_req
= OALLOCNZ(obst, const arch_register_req_t*, max_inputs);
/* create a parameter frame if necessary */
if (mode_is_float(mode)) {
- str = create_stf(dbgi, new_block, incsp, new_value, new_mem,
+ str = create_stf(dbgi, new_block, new_value, incsp, new_mem,
mode, NULL, param->offset, true);
} else {
- str = new_bd_sparc_St(dbgi, new_block, incsp, new_value, new_mem,
- mode, NULL, param->offset, true);
+ str = new_bd_sparc_St_imm(dbgi, new_block, new_value, incsp,
+ new_mem, mode, NULL, param->offset, true);
}
set_irn_pinned(str, op_pin_state_floats);
sync_ins[sync_arity++] = str;
} else {
res = new_bd_sparc_Call_reg(dbgi, new_block, in_arity, in, out_arity);
}
- set_sparc_in_req_all(res, in_req);
+ arch_set_in_register_reqs(res, in_req);
/* create output register reqs */
o = 0;
} else if (param->entity != NULL) {
ir_node *fp = be_prolog_get_reg_value(abihelper, fp_reg);
ir_node *mem = be_prolog_get_memory(abihelper);
- ir_node *ld = new_bd_sparc_Ld(NULL, new_block, fp, mem,
- mode_gp, param->entity,
- 0, true);
+ ir_node *ld = new_bd_sparc_Ld_imm(NULL, new_block, fp, mem,
+ mode_gp, param->entity,
+ 0, true);
value1 = new_Proj(ld, mode_gp, pn_sparc_Ld_res);
}
param->entity, 0, true);
value = new_r_Proj(load, mode_fp, pn_sparc_Ldf_res);
} else {
- load = new_bd_sparc_Ld(NULL, new_block, fp, mem, mode,
- param->entity, 0, true);
+ load = new_bd_sparc_Ld_imm(NULL, new_block, fp, mem, mode,
+ param->entity, 0, true);
value = new_r_Proj(load, mode_gp, pn_sparc_Ld_res);
}
set_irn_pinned(load, op_pin_state_floats);
{
be_start_transform_setup();
- be_set_transform_function(op_Abs, gen_Abs);
be_set_transform_function(op_Add, gen_Add);
be_set_transform_function(op_And, gen_And);
be_set_transform_function(op_Call, gen_Call);