typedef enum {
MATCH_NONE = 0,
- MATCH_COMMUTATIVE = 1 << 0, /**< commutative operation. */
+ MATCH_COMMUTATIVE = 1U << 0, /**< commutative operation. */
+ MATCH_MODE_NEUTRAL = 1U << 1, /**< the higher bits of the inputs don't
+ influence the significant lower bit at
+ all (for cases where mode < 32bit) */
} match_flags_t;
typedef ir_node* (*new_binop_reg_func) (dbg_info *dbgi, ir_node *block, ir_node *op1, ir_node *op2);
return is_value_imm_encodeable(value);
}
+static bool needs_extension(ir_mode *mode)
+{
+ return get_mode_size_bits(mode) < get_mode_size_bits(mode_gp);
+}
+
+/**
+ * Check, if a given node is a Down-Conv, ie. a integer Conv
+ * from a mode with a mode with more bits to a mode with lesser bits.
+ * Moreover, we return only true if the node has not more than 1 user.
+ *
+ * @param node the node
+ * @return non-zero if node is a Down-Conv
+ */
+static bool is_downconv(const ir_node *node)
+{
+ ir_mode *src_mode;
+ ir_mode *dest_mode;
+
+ if (!is_Conv(node))
+ return false;
+
+ src_mode = get_irn_mode(get_Conv_op(node));
+ dest_mode = get_irn_mode(node);
+ return
+ mode_needs_gp_reg(src_mode) &&
+ mode_needs_gp_reg(dest_mode) &&
+ get_mode_size_bits(dest_mode) <= get_mode_size_bits(src_mode);
+}
+
+static ir_node *sparc_skip_downconv(ir_node *node)
+{
+ while (is_downconv(node)) {
+ node = get_Conv_op(node);
+ }
+ return node;
+}
+
/**
* helper function for binop operations
*
new_binop_reg_func new_reg,
new_binop_imm_func new_imm)
{
- dbg_info *dbgi = get_irn_dbg_info(node);
- ir_node *block = be_transform_node(get_nodes_block(node));
+ dbg_info *dbgi = get_irn_dbg_info(node);
+ ir_node *block = be_transform_node(get_nodes_block(node));
ir_node *new_op1;
ir_node *new_op2;
+ ir_mode *mode1;
+ ir_mode *mode2;
+
+ if (flags & MATCH_MODE_NEUTRAL) {
+ op1 = sparc_skip_downconv(op1);
+ op2 = sparc_skip_downconv(op2);
+ }
+ mode1 = get_irn_mode(op1);
+ mode2 = get_irn_mode(op2);
if (is_imm_encodeable(op2)) {
- ir_node *new_op1 = be_transform_node(op1);
- int32_t immediate = get_tarval_long(get_Const_tarval(op2));
+ ir_node *new_op1 = be_transform_node(op1);
+ int32_t immediate = get_tarval_long(get_Const_tarval(op2));
+ if (! (flags & MATCH_MODE_NEUTRAL) && needs_extension(mode1)) {
+ new_op1 = gen_extension(dbgi, block, new_op1, mode1);
+ }
return new_imm(dbgi, block, new_op1, NULL, immediate);
}
new_op2 = be_transform_node(op2);
+ if (! (flags & MATCH_MODE_NEUTRAL) && needs_extension(mode2)) {
+ new_op2 = gen_extension(dbgi, block, new_op2, mode2);
+ }
if ((flags & MATCH_COMMUTATIVE) && is_imm_encodeable(op1)) {
int32_t immediate = get_tarval_long(get_Const_tarval(op1));
return new_imm(dbgi, block, new_op2, NULL, immediate);
}
- new_op1 = be_transform_node(op1);
+ new_op1 = be_transform_node(op1);
+ if (! (flags & MATCH_MODE_NEUTRAL) && needs_extension(mode1)) {
+ new_op1 = gen_extension(dbgi, block, new_op1, mode1);
+ }
return new_reg(dbgi, block, new_op1, new_op2);
}
ir_node *left = get_Add_left(node);
/* is this simple address arithmetic? then we can let the linker do
* the calculation. */
- if (is_SymConst(left)) {
+ if (is_SymConst(left) && get_irn_n_edges(left) == 1) {
dbg_info *dbgi = get_irn_dbg_info(node);
ir_node *block = be_transform_node(get_nodes_block(node));
address_t address;
}
}
- return gen_helper_binop(node, MATCH_COMMUTATIVE, new_bd_sparc_Add_reg,
- new_bd_sparc_Add_imm);
+ return gen_helper_binop(node, MATCH_COMMUTATIVE | MATCH_MODE_NEUTRAL,
+ new_bd_sparc_Add_reg, new_bd_sparc_Add_imm);
}
/**
new_bd_sparc_fmul_d, new_bd_sparc_fmul_q);
}
- assert(mode_is_data(mode));
- return gen_helper_binop(node, MATCH_COMMUTATIVE,
+ return gen_helper_binop(node, MATCH_COMMUTATIVE | MATCH_MODE_NEUTRAL,
new_bd_sparc_Mul_reg, new_bd_sparc_Mul_imm);
}
{
ir_mode *mode = get_irn_mode(node);
ir_node *mul;
- ir_node *proj_res_hi;
if (mode_is_float(mode))
panic("FP not supported yet");
-
- assert(mode_is_data(mode));
mul = gen_helper_binop(node, MATCH_COMMUTATIVE, new_bd_sparc_Mulh_reg, new_bd_sparc_Mulh_imm);
- //arch_irn_add_flags(mul, arch_irn_flags_modify_flags);
- proj_res_hi = new_r_Proj(mul, mode_gp, pn_sparc_Mulh_low);
- return proj_res_hi;
+ return new_r_Proj(mul, mode_gp, pn_sparc_Mulh_low);
}
static ir_node *gen_sign_extension_value(ir_node *node)
new_bd_sparc_fdiv_d, new_bd_sparc_fdiv_q);
}
+#if 0
static ir_node *gen_Abs(ir_node *node)
{
ir_mode *const mode = get_irn_mode(node);
return sub;
}
}
+#endif
/**
* Transforms a Not node.
return new_bd_sparc_XNor_reg(dbgi, block, zero, new_op);
}
-static ir_node *gen_And(ir_node *node)
+static ir_node *gen_helper_bitop(ir_node *node,
+ new_binop_reg_func new_reg,
+ new_binop_imm_func new_imm,
+ new_binop_reg_func new_not_reg,
+ new_binop_imm_func new_not_imm)
{
- ir_node *left = get_And_left(node);
- ir_node *right = get_And_right(node);
-
- if (is_Not(right)) {
- ir_node *not_op = get_Not_op(right);
- return gen_helper_binop_args(node, left, not_op, MATCH_NONE,
- new_bd_sparc_AndN_reg,
- new_bd_sparc_AndN_imm);
+ ir_node *op1 = get_binop_left(node);
+ ir_node *op2 = get_binop_right(node);
+ if (is_Not(op1)) {
+ return gen_helper_binop_args(node, op2, get_Not_op(op1),
+ MATCH_MODE_NEUTRAL,
+ new_not_reg, new_not_imm);
}
- if (is_Not(left)) {
- ir_node *not_op = get_Not_op(left);
- return gen_helper_binop_args(node, right, not_op, MATCH_NONE,
- new_bd_sparc_AndN_reg,
- new_bd_sparc_AndN_imm);
+ if (is_Not(op2)) {
+ return gen_helper_binop_args(node, op1, get_Not_op(op2),
+ MATCH_MODE_NEUTRAL,
+ new_not_reg, new_not_imm);
}
+ return gen_helper_binop_args(node, op1, op2,
+ MATCH_MODE_NEUTRAL | MATCH_COMMUTATIVE,
+ new_reg, new_imm);
+}
- return gen_helper_binop(node, MATCH_COMMUTATIVE, new_bd_sparc_And_reg,
- new_bd_sparc_And_imm);
+static ir_node *gen_And(ir_node *node)
+{
+ return gen_helper_bitop(node,
+ new_bd_sparc_And_reg,
+ new_bd_sparc_And_imm,
+ new_bd_sparc_AndN_reg,
+ new_bd_sparc_AndN_imm);
}
static ir_node *gen_Or(ir_node *node)
{
- ir_node *left = get_Or_left(node);
- ir_node *right = get_Or_right(node);
-
- if (is_Not(right)) {
- ir_node *not_op = get_Not_op(right);
- return gen_helper_binop_args(node, left, not_op, MATCH_NONE,
- new_bd_sparc_OrN_reg,
- new_bd_sparc_OrN_imm);
- }
- if (is_Not(left)) {
- ir_node *not_op = get_Not_op(left);
- return gen_helper_binop_args(node, right, not_op, MATCH_NONE,
- new_bd_sparc_OrN_reg,
- new_bd_sparc_OrN_imm);
- }
-
- return gen_helper_binop(node, MATCH_COMMUTATIVE, new_bd_sparc_Or_reg,
- new_bd_sparc_Or_imm);
+ return gen_helper_bitop(node,
+ new_bd_sparc_Or_reg,
+ new_bd_sparc_Or_imm,
+ new_bd_sparc_OrN_reg,
+ new_bd_sparc_OrN_imm);
}
static ir_node *gen_Eor(ir_node *node)
{
- ir_node *left = get_Eor_left(node);
- ir_node *right = get_Eor_right(node);
-
- if (is_Not(right)) {
- ir_node *not_op = get_Not_op(right);
- return gen_helper_binop_args(node, left, not_op, MATCH_COMMUTATIVE,
- new_bd_sparc_XNor_reg,
- new_bd_sparc_XNor_imm);
- }
- if (is_Not(left)) {
- ir_node *not_op = get_Not_op(left);
- return gen_helper_binop_args(node, not_op, right,
- MATCH_COMMUTATIVE | MATCH_MODE_NEUTRAL,
- new_bd_sparc_XNor_reg,
- new_bd_sparc_XNor_imm);
- }
-
- return gen_helper_binop(node, MATCH_COMMUTATIVE, new_bd_sparc_Xor_reg,
- new_bd_sparc_Xor_imm);
+ return gen_helper_bitop(node,
+ new_bd_sparc_Xor_reg,
+ new_bd_sparc_Xor_imm,
+ new_bd_sparc_XNor_reg,
+ new_bd_sparc_XNor_imm);
}
static ir_node *gen_Shl(ir_node *node)
return get_irn_mode(op);
}
-/**
- * Transform Cond nodes
- */
+static ir_node *make_address(dbg_info *dbgi, ir_node *block, ir_entity *entity,
+ int32_t offset)
+{
+ ir_node *hi = new_bd_sparc_SetHi(dbgi, block, entity, offset);
+ ir_node *low = new_bd_sparc_Or_imm(dbgi, block, hi, entity, offset);
+ be_dep_on_frame(hi);
+ return low;
+}
+
+static ir_node *gen_SwitchJmp(ir_node *node)
+{
+ dbg_info *dbgi = get_irn_dbg_info(node);
+ ir_node *block = be_transform_node(get_nodes_block(node));
+ ir_node *selector = get_Cond_selector(node);
+ ir_node *new_selector = be_transform_node(selector);
+ long switch_min = LONG_MAX;
+ long switch_max = LONG_MIN;
+ long default_pn = get_Cond_default_proj(node);
+ ir_entity *entity;
+ ir_node *table_address;
+ ir_node *index;
+ ir_node *load;
+ ir_node *address;
+ unsigned length;
+ const ir_edge_t *edge;
+
+ /* switch with smaller mode not implemented yet */
+ assert(get_mode_size_bits(get_irn_mode(selector)) == 32);
+
+ foreach_out_edge(node, edge) {
+ ir_node *proj = get_edge_src_irn(edge);
+ long pn = get_Proj_proj(proj);
+ if (pn == default_pn)
+ continue;
+
+ switch_min = pn<switch_min ? pn : switch_min;
+ switch_max = pn>switch_max ? pn : switch_max;
+ }
+ length = (unsigned long) (switch_max - switch_min);
+ if (length > 16000) {
+ panic("Size of switch %+F bigger than 16000", node);
+ }
+
+ entity = new_entity(NULL, id_unique("TBL%u"), get_unknown_type());
+ set_entity_visibility(entity, ir_visibility_private);
+ add_entity_linkage(entity, IR_LINKAGE_CONSTANT);
+
+ /* TODO: this code does not construct code to check for access
+ * out-of bounds of the jumptable yet. I think we should put this stuff
+ * into the switch_lowering phase to get some additional optimisations
+ * done. */
+
+ /* construct base address */
+ table_address = make_address(dbgi, block, entity,
+ -switch_min * get_mode_size_bytes(mode_gp));
+ /* scale index */
+ index = new_bd_sparc_Sll_imm(dbgi, block, new_selector, NULL, 2);
+ /* load from jumptable */
+ load = new_bd_sparc_Ld_reg(dbgi, block, table_address, index, new_NoMem(),
+ mode_gp);
+ address = new_r_Proj(load, mode_gp, pn_sparc_Ld_res);
+
+ return new_bd_sparc_SwitchJmp(dbgi, block, address, default_pn, entity);
+}
+
static ir_node *gen_Cond(ir_node *node)
{
ir_node *selector = get_Cond_selector(node);
// switch/case jumps
if (mode != mode_b) {
- panic("SwitchJump not implemented yet");
+ return gen_SwitchJmp(node);
}
// regular if/else jumps
*/
static ir_node *gen_Cmp(ir_node *node)
{
- ir_node *block = be_transform_node(get_nodes_block(node));
- ir_node *op1 = get_Cmp_left(node);
- ir_node *op2 = get_Cmp_right(node);
- ir_mode *cmp_mode = get_irn_mode(op1);
- dbg_info *dbgi = get_irn_dbg_info(node);
- ir_node *new_op1 = be_transform_node(op1);
- ir_node *new_op2 = be_transform_node(op2);
+ ir_node *op1 = get_Cmp_left(node);
+ ir_node *op2 = get_Cmp_right(node);
+ ir_mode *cmp_mode = get_irn_mode(op1);
assert(get_irn_mode(op2) == cmp_mode);
if (mode_is_float(cmp_mode)) {
- unsigned bits = get_mode_size_bits(cmp_mode);
+ ir_node *block = be_transform_node(get_nodes_block(node));
+ dbg_info *dbgi = get_irn_dbg_info(node);
+ ir_node *new_op1 = be_transform_node(op1);
+ ir_node *new_op2 = be_transform_node(op2);
+ unsigned bits = get_mode_size_bits(cmp_mode);
if (bits == 32) {
return new_bd_sparc_fcmp_s(dbgi, block, new_op1, new_op2, cmp_mode);
} else if (bits == 64) {
}
}
+ /* when we compare a bitop like and,or,... with 0 then we can directly use
+ * the bitopcc variant.
+ * Currently we only do this when we're the only user of the node...
+ */
+ if (is_Const(op2) && is_Const_null(op2) && get_irn_n_edges(op1) == 1) {
+ if (is_And(op1)) {
+ return gen_helper_bitop(op1,
+ new_bd_sparc_AndCCZero_reg,
+ new_bd_sparc_AndCCZero_imm,
+ new_bd_sparc_AndNCCZero_reg,
+ new_bd_sparc_AndNCCZero_imm);
+ } else if (is_Or(op1)) {
+ return gen_helper_bitop(op1,
+ new_bd_sparc_OrCCZero_reg,
+ new_bd_sparc_OrCCZero_imm,
+ new_bd_sparc_OrNCCZero_reg,
+ new_bd_sparc_OrNCCZero_imm);
+ } else if (is_Eor(op1)) {
+ return gen_helper_bitop(op1,
+ new_bd_sparc_XorCCZero_reg,
+ new_bd_sparc_XorCCZero_imm,
+ new_bd_sparc_XNorCCZero_reg,
+ new_bd_sparc_XNorCCZero_imm);
+ }
+ }
+
/* integer compare */
- new_op1 = gen_extension(dbgi, block, new_op1, cmp_mode);
- new_op2 = gen_extension(dbgi, block, new_op2, cmp_mode);
- return new_bd_sparc_Cmp_reg(dbgi, block, new_op1, new_op2);
+ return gen_helper_binop_args(node, op1, op2, MATCH_NONE,
+ new_bd_sparc_Cmp_reg, new_bd_sparc_Cmp_imm);
}
/**
dbg_info *dbgi = get_irn_dbg_info(node);
ir_node *block = get_nodes_block(node);
ir_node *new_block = be_transform_node(block);
- ir_node *hi = new_bd_sparc_SetHi(dbgi, new_block, entity, 0);
- ir_node *low = new_bd_sparc_Or_imm(dbgi, new_block, hi, entity, 0);
- be_dep_on_frame(hi);
-
- return low;
+ return make_address(dbgi, new_block, entity, 0);
}
static ir_node *create_fftof(dbg_info *dbgi, ir_node *block, ir_node *op,
{
be_start_transform_setup();
- be_set_transform_function(op_Abs, gen_Abs);
be_set_transform_function(op_Add, gen_Add);
be_set_transform_function(op_And, gen_And);
be_set_transform_function(op_Call, gen_Call);