* @brief This file implements the IR transformation from firm into
* ia32-Firm.
* @author Christian Wuerdig, Matthias Braun
- * @version $Id$
*/
#include "config.h"
{
ir_graph *irg = current_ir_graph;
- if (be_get_irg_options(irg)->pic) {
+ if (be_options.pic) {
const arch_env_t *arch_env = be_get_irg_arch_env(irg);
return arch_env->impl->get_pic_base(irg);
}
case 0: mode = mode_Iu; break;
case 1: mode = mode_Lu; break;
case 2: mode = mode_F; break;
- default: panic("internal compiler error (ia32_gen_fp_known_const)");
+ default: panic("internal compiler error");
}
tv = new_tarval_from_str(cnst_str, strlen(cnst_str), mode);
* input here, for unary operations use NULL).
*/
static int ia32_use_source_address_mode(ir_node *block, ir_node *node,
- ir_node *other, ir_node *other2, match_flags_t flags)
+ ir_node *other, ir_node *other2,
+ match_flags_t flags)
{
ir_node *load;
+ ir_mode *mode;
long pn;
/* float constants are always available */
if (is_Const(node)) {
- ir_mode *mode = get_irn_mode(node);
+ mode = get_irn_mode(node);
if (mode_is_float(mode)) {
+ ir_tarval *tv = get_Const_tarval(node);
+ if (!tarval_ieee754_can_conv_lossless(tv, mode_D))
+ return 0;
if (ia32_cg_config.use_sse2) {
if (is_simple_sse_Const(node))
return 0;
return 0;
return 1;
}
+ return 0;
}
if (!is_Proj(node))
return 0;
if (get_nodes_block(load) != block)
return 0;
+ mode = get_irn_mode(node);
+ /* we can't fold mode_E AM */
+ if (mode == ia32_mode_E)
+ return 0;
/* we only use address mode if we're the only user of the load */
if (get_irn_n_edges(node) != (flags & match_two_users ? 2 : 1))
return 0;
if (!is_Conv(node))
return 0;
- /* we only want to skip the conv when we're the only user
- * (because this test is used in the context of address-mode selection
- * and we don't want to use address mode for multiple users) */
- if (get_irn_n_edges(node) > 1)
- return 0;
-
src_mode = get_irn_mode(get_Conv_op(node));
dest_mode = get_irn_mode(node);
return
/** Skip all Down-Conv's on a given node and return the resulting node. */
ir_node *ia32_skip_downconv(ir_node *node)
{
- while (is_downconv(node))
+ while (is_downconv(node)) {
+ /* we only want to skip the conv when we're the only user
+ * (because this test is used in the context of address-mode selection
+ * and we don't want to use address mode for multiple users) */
+ if (get_irn_n_edges(node) > 1)
+ break;
+
node = get_Conv_op(node);
+ }
return node;
}
/** Skip all signedness convs */
static ir_node *ia32_skip_sameconv(ir_node *node)
{
- while (is_sameconv(node))
+ while (is_sameconv(node)) {
node = get_Conv_op(node);
+ }
return node;
}
-static ir_node *create_upconv(ir_node *node, ir_node *orig_node)
+static ir_node *transform_sext(ir_node *node, ir_node *orig_node)
{
- ir_mode *mode = get_irn_mode(node);
- ir_node *block;
- ir_mode *tgt_mode;
- dbg_info *dbgi;
+ ir_mode *mode = get_irn_mode(node);
+ ir_node *block = get_nodes_block(node);
+ dbg_info *dbgi = get_irn_dbg_info(node);
+ return create_I2I_Conv(mode, mode_Is, dbgi, block, node, orig_node);
+}
+
+static ir_node *transform_zext(ir_node *node, ir_node *orig_node)
+{
+ ir_mode *mode = get_irn_mode(node);
+ ir_node *block = get_nodes_block(node);
+ dbg_info *dbgi = get_irn_dbg_info(node);
+ /* normalize to an unsigned mode */
+ switch (get_mode_size_bits(mode)) {
+ case 8: mode = mode_Bu; break;
+ case 16: mode = mode_Hu; break;
+ default:
+ panic("ia32: invalid mode in zest: %+F", node);
+ }
+ return create_I2I_Conv(mode, mode_Iu, dbgi, block, node, orig_node);
+}
+static ir_node *transform_upconv(ir_node *node, ir_node *orig_node)
+{
+ ir_mode *mode = get_irn_mode(node);
if (mode_is_signed(mode)) {
- tgt_mode = mode_Is;
+ return transform_sext(node, orig_node);
} else {
- tgt_mode = mode_Iu;
+ return transform_zext(node, orig_node);
}
- block = get_nodes_block(node);
- dbgi = get_irn_dbg_info(node);
-
- return create_I2I_Conv(mode, tgt_mode, dbgi, block, node, orig_node);
}
/**
}
mode = get_irn_mode(op2);
- if (flags & match_upconv_32 && get_mode_size_bits(mode) != 32) {
- new_op1 = (op1 == NULL ? NULL : create_upconv(op1, NULL));
- if (new_op2 == NULL)
- new_op2 = create_upconv(op2, NULL);
- am->ls_mode = mode_Iu;
+ if (get_mode_size_bits(mode) != 32
+ && (flags & (match_mode_neutral | match_upconv | match_zero_ext))) {
+ if (flags & match_upconv) {
+ new_op1 = (op1 == NULL ? NULL : transform_upconv(op1, op1));
+ if (new_op2 == NULL)
+ new_op2 = transform_upconv(op2, op2);
+ } else if (flags & match_zero_ext) {
+ new_op1 = (op1 == NULL ? NULL : transform_zext(op1, op1));
+ if (new_op2 == NULL)
+ new_op2 = transform_zext(op2, op2);
+ } else {
+ new_op1 = (op1 == NULL ? NULL : be_transform_node(op1));
+ if (new_op2 == NULL)
+ new_op2 = be_transform_node(op2);
+ assert(flags & match_mode_neutral);
+ }
+ mode = mode_Iu;
} else {
new_op1 = (op1 == NULL ? NULL : be_transform_node(op1));
if (new_op2 == NULL)
new_op2 = be_transform_node(op2);
- am->ls_mode = (flags & match_mode_neutral) ? mode_Iu : mode;
}
+ am->ls_mode = mode;
}
if (addr->base == NULL)
addr->base = noreg_GP;
return initial_fpcw;
}
+static ir_node *skip_float_upconv(ir_node *node)
+{
+ ir_mode *mode = get_irn_mode(node);
+ assert(mode_is_float(mode));
+
+ while (is_Conv(node)) {
+ ir_node *pred = get_Conv_op(node);
+ ir_mode *pred_mode = get_irn_mode(pred);
+
+ /**
+ * suboptimal, but without this check the address mode matcher
+ * can incorrectly think that something has only 1 user
+ */
+ if (get_irn_n_edges(node) > 1)
+ break;
+
+ if (!mode_is_float(pred_mode)
+ || get_mode_size_bits(pred_mode) > get_mode_size_bits(mode))
+ break;
+ node = pred;
+ mode = pred_mode;
+ }
+ return node;
+}
+
+static void check_x87_floatmode(ir_mode *mode)
+{
+ if (mode != ia32_mode_E) {
+ panic("ia32: x87 only supports x86 extended float mode");
+ }
+}
+
/**
* Construct a standard binary operation, set AM and immediate if required.
*
static ir_node *gen_binop_x87_float(ir_node *node, ir_node *op1, ir_node *op2,
construct_binop_float_func *func)
{
- ir_mode *mode = get_irn_mode(node);
dbg_info *dbgi;
- ir_node *block, *new_block, *new_node;
+ ir_node *block;
+ ir_node *new_block;
+ ir_node *new_node;
ia32_address_mode_t am;
ia32_address_t *addr = &am.addr;
ia32_x87_attr_t *attr;
/* All operations are considered commutative, because there are reverse
* variants */
- match_flags_t flags = match_commutative;
+ match_flags_t flags = match_commutative | match_am;
+ ir_mode *mode
+ = is_Div(node) ? get_Div_resmode(node) : get_irn_mode(node);
+ check_x87_floatmode(mode);
- /* happens for div nodes... */
- if (mode == mode_T) {
- if (is_Div(node))
- mode = get_Div_resmode(node);
- else
- panic("can't determine mode");
- }
-
- /* cannot use address mode with long double on x87 */
- if (get_mode_size_bits(mode) <= 64)
- flags |= match_am;
+ op1 = skip_float_upconv(op1);
+ op2 = skip_float_upconv(op2);
block = get_nodes_block(node);
match_arguments(&am, block, op1, op2, NULL, flags);
construct_shift_func *func,
match_flags_t flags)
{
- dbg_info *dbgi;
- ir_node *block, *new_block, *new_op1, *new_op2, *new_node;
- ir_mode *mode = get_irn_mode(node);
+ ir_mode *mode = get_irn_mode(node);
assert(! mode_is_float(mode));
assert(flags & match_immediate);
- assert((flags & ~(match_mode_neutral | match_immediate)) == 0);
+ assert((flags & ~(match_mode_neutral | match_zero_ext | match_upconv | match_immediate)) == 0);
- if (get_mode_modulo_shift(mode) != 32)
+ if (get_mode_modulo_shift(mode) != 32) {
+ /* TODO: implement special cases for non-modulo shifts */
panic("modulo shift!=32 not supported by ia32 backend");
+ }
+ ir_node *new_op1;
+ ir_node *new_op2;
if (flags & match_mode_neutral) {
op1 = ia32_skip_downconv(op1);
new_op1 = be_transform_node(op1);
- } else if (get_mode_size_bits(mode) != 32) {
- new_op1 = create_upconv(op1, node);
} else {
- new_op1 = be_transform_node(op1);
+ op1 = ia32_skip_sameconv(op1);
+ if (get_mode_size_bits(mode) != 32) {
+ if (flags & match_upconv) {
+ new_op1 = transform_upconv(op1, node);
+ } else if (flags & match_zero_ext) {
+ new_op1 = transform_zext(op1, node);
+ } else {
+ /* match_mode_neutral not handled here because it makes no
+ * sense for shift operations */
+ panic("ia32 code selection failed for %+F", node);
+ }
+ } else {
+ new_op1 = be_transform_node(op1);
+ }
}
/* the shift amount can be any mode that is bigger than 5 bits, since all
}
new_op2 = create_immediate_or_transform(op2, 0);
- dbgi = get_irn_dbg_info(node);
- block = get_nodes_block(node);
- new_block = be_transform_node(block);
- new_node = func(dbgi, new_block, new_op1, new_op2);
+ dbg_info *dbgi = get_irn_dbg_info(node);
+ ir_node *block = get_nodes_block(node);
+ ir_node *new_block = be_transform_node(block);
+ ir_node *new_node = func(dbgi, new_block, new_op1, new_op2);
SET_IA32_ORIG_NODE(new_node, node);
/* lowered shift instruction may have a dependency operand, handle it here */
ia32_mark_non_am(node);
- op2 = ia32_skip_downconv(op2);
- op1 = ia32_skip_downconv(op1);
-
/**
* Rules for an Add:
* 0. Immediate Trees (example Add(Symconst, Const) -> Const)
panic("invalid divmod node %+F", node);
}
- match_arguments(&am, block, op1, op2, NULL, match_am | match_upconv_32);
+ match_arguments(&am, block, op1, op2, NULL, match_am | match_upconv);
/* Beware: We don't need a Sync, if the memory predecessor of the Div node
is the memory of the consumed address. We can have only the second op as address
ir_node *left = get_Shr_left(node);
ir_node *right = get_Shr_right(node);
- return gen_shift_binop(node, left, right, new_bd_ia32_Shr, match_immediate);
+ return gen_shift_binop(node, left, right, new_bd_ia32_Shr,
+ match_immediate | match_zero_ext);
}
-
-
/**
* Creates an ia32 Sar.
*
}
}
- return gen_shift_binop(node, left, right, new_bd_ia32_Sar, match_immediate);
+ return gen_shift_binop(node, left, right, new_bd_ia32_Sar,
+ match_immediate | match_upconv);
}
set_ia32_op_type(new_node, ia32_AddrModeS);
set_ia32_ls_mode(new_node, mode);
} else {
+ check_x87_floatmode(mode);
new_node = new_bd_ia32_vfchs(dbgi, block, new_op);
}
} else {
/* TODO, implement -Abs case */
assert(!negate);
} else {
+ check_x87_floatmode(mode);
new_node = new_bd_ia32_vfabs(dbgi, new_block, new_op);
SET_IA32_ORIG_NODE(new_node, node);
if (negate) {
}
static ia32_condition_code_t relation_to_condition_code(ir_relation relation,
- ir_mode *mode)
+ ir_mode *mode,
+ bool overflow_possible)
{
if (mode_is_float(mode)) {
switch (relation) {
case ir_relation_unordered_equal:
case ir_relation_equal: return ia32_cc_equal;
case ir_relation_unordered_less:
- case ir_relation_less: return ia32_cc_less;
+ case ir_relation_less:
+ return overflow_possible ? ia32_cc_less : ia32_cc_sign;
case ir_relation_unordered_less_equal:
case ir_relation_less_equal: return ia32_cc_less_equal;
case ir_relation_unordered_greater:
case ir_relation_greater: return ia32_cc_greater;
case ir_relation_unordered_greater_equal:
- case ir_relation_greater_equal: return ia32_cc_greater_equal;
+ case ir_relation_greater_equal:
+ return overflow_possible ? ia32_cc_greater_equal : ia32_cc_not_sign;
case ir_relation_unordered_less_greater:
case ir_relation_less_greater: return ia32_cc_not_equal;
case ir_relation_less_equal_greater:
}
}
-static ir_node *get_flags_node_cmp(ir_node *cmp, ia32_condition_code_t *cc_out)
+static ir_node *get_flags_node(ir_node *cmp, ia32_condition_code_t *cc_out)
{
/* must have a Cmp as input */
ir_relation relation = get_Cmp_relation(cmp);
- ir_relation possible;
ir_node *l = get_Cmp_left(cmp);
ir_node *r = get_Cmp_right(cmp);
ir_mode *mode = get_irn_mode(l);
+ bool overflow_possible;
ir_node *flags;
/* check for bit-test */
- if (ia32_cg_config.use_bt && (relation == ir_relation_equal
- || (mode_is_signed(mode) && relation == ir_relation_less_greater)
- || (!mode_is_signed(mode) && ((relation & ir_relation_greater_equal) == ir_relation_greater)))
- && is_And(l)) {
+ if (ia32_cg_config.use_bt
+ && (relation == ir_relation_equal
+ || (mode_is_signed(mode) && relation == ir_relation_less_greater)
+ || (!mode_is_signed(mode) && ((relation & ir_relation_greater_equal) == ir_relation_greater)))
+ && is_And(l)) {
ir_node *la = get_And_left(l);
ir_node *ra = get_And_right(l);
if (is_Shl(ra)) {
if (is_Const_1(c) && is_Const_0(r)) {
/* (1 << n) & ra) */
ir_node *n = get_Shl_right(la);
- flags = gen_bt(cmp, ra, n);
+ flags = gen_bt(cmp, ra, n);
/* the bit is copied into the CF flag */
if (relation & ir_relation_equal)
*cc_out = ia32_cc_above_equal; /* test for CF=0 */
}
}
- /* the middle-end tries to eliminate impossible relations, so a ptr != 0
+ /* the middle-end tries to eliminate impossible relations, so a ptr <> 0
* test becomes ptr > 0. But for x86 an equal comparison is preferable to
* a >0 (we can sometimes eliminate the cmp in favor of flags produced by
- * a predecessor node). So add the < bit */
- possible = ir_get_possible_cmp_relations(l, r);
- if (((relation & ir_relation_less) && !(possible & ir_relation_greater))
- || ((relation & ir_relation_greater) && !(possible & ir_relation_less)))
- relation |= ir_relation_less_greater;
+ * a predecessor node). So add the < bit.
+ * (Note that we do not want to produce <=> (which can happen for
+ * unoptimized code), because no x86 flag can represent that */
+ if (!(relation & ir_relation_equal) && relation & ir_relation_less_greater)
+ relation |= get_negated_relation(ir_get_possible_cmp_relations(l, r)) & ir_relation_less_greater;
+
+ overflow_possible = true;
+ if (is_Const(r) && is_Const_null(r))
+ overflow_possible = false;
/* just do a normal transformation of the Cmp */
- *cc_out = relation_to_condition_code(relation, mode);
+ *cc_out = relation_to_condition_code(relation, mode, overflow_possible);
flags = be_transform_node(cmp);
return flags;
}
-/**
- * Transform a node returning a "flag" result.
- *
- * @param node the node to transform
- * @param cc_out the compare mode to use
- */
-static ir_node *get_flags_node(ir_node *node, ia32_condition_code_t *cc_out)
-{
- assert(is_Cmp(node));
- return get_flags_node_cmp(node, cc_out);
-}
-
/**
* Transforms a Load.
*
addr.mem = be_transform_node(mem);
if (mode_is_float(mode)) {
- /* Convs (and strict-Convs) before stores are unnecessary if the mode
- is the same. */
- while (is_Conv(val) && mode == get_irn_mode(val)) {
- ir_node *op = get_Conv_op(val);
- if (!mode_is_float(get_irn_mode(op)))
- break;
- val = op;
- }
new_val = be_transform_node(val);
if (ia32_cg_config.use_sse2) {
new_node = new_bd_ia32_xStore(dbgi, new_block, addr.base,
addr.index, addr.mem, new_val, mode);
}
} else if (!ia32_cg_config.use_sse2 && is_float_to_int_conv(val)) {
- val = get_Conv_op(val);
-
- /* TODO: is this optimisation still necessary at all (middleend)? */
- /* We can skip ALL float->float up-Convs (and strict-up-Convs) before
- * stores. */
- while (is_Conv(val)) {
- ir_node *op = get_Conv_op(val);
- if (!mode_is_float(get_irn_mode(op)))
- break;
- if (get_mode_size_bits(get_irn_mode(op)) > get_mode_size_bits(get_irn_mode(val)))
- break;
- val = op;
- }
+ val = get_Conv_op(val);
new_val = be_transform_node(val);
new_node = gen_vfist(dbgi, new_block, addr.base, addr.index, addr.mem, new_val);
} else {
+ unsigned dest_bits = get_mode_size_bits(mode);
+ while (is_downconv(val)
+ && get_mode_size_bits(get_irn_mode(val)) >= dest_bits) {
+ val = get_Conv_op(val);
+ }
new_val = create_immediate_or_transform(val, 0);
assert(mode != mode_b);
- if (get_mode_size_bits(mode) == 8) {
+ if (dest_bits == 8) {
new_node = new_bd_ia32_Store8Bit(dbgi, new_block, addr.base,
addr.index, addr.mem, new_val);
} else {
ir_node *new_node;
ir_entity *entity;
- assert(get_mode_size_bits(get_irn_mode(sel)) <= 32);
- if (get_mode_size_bits(sel_mode) != 32)
- new_sel = create_upconv(new_sel, sel);
+ assert(get_mode_size_bits(sel_mode) <= 32);
+ assert(!mode_is_float(sel_mode));
+ sel = ia32_skip_sameconv(sel);
+ if (get_mode_size_bits(sel_mode) < 32)
+ new_sel = transform_upconv(sel, node);
entity = new_entity(NULL, id_unique("TBL%u"), get_unknown_type());
set_entity_visibility(entity, ir_visibility_private);
ir_node *left = get_Cmp_left(node);
ir_node *new_left = be_transform_node(left);
ir_node *right = get_Cmp_right(node);
+ ir_mode *cmp_mode = get_irn_mode(left);
ir_node *new_right;
ir_node *new_node;
+ check_x87_floatmode(cmp_mode);
if (ia32_cg_config.use_fucomi) {
new_right = be_transform_node(right);
set_ia32_commutative(new_node);
SET_IA32_ORIG_NODE(new_node, node);
} else {
- if (ia32_cg_config.use_ftst && is_Const_0(right)) {
+ if (is_Const_0(right)) {
new_node = new_bd_ia32_vFtstFnstsw(dbgi, new_block, new_left, 0);
} else {
new_right = be_transform_node(right);
new_node = new_bd_ia32_vFucomFnstsw(dbgi, new_block, new_left, new_right, 0);
+ set_ia32_commutative(new_node);
}
- set_ia32_commutative(new_node);
-
SET_IA32_ORIG_NODE(new_node, node);
new_node = new_bd_ia32_Sahf(dbgi, new_block, new_node);
return new_node;
}
-/**
- * returns true if it is assured, that the upper bits of a node are "clean"
- * which means for a 16 or 8 bit value, that the upper bits in the register
- * are 0 for unsigned and a copy of the last significant bit for signed
- * numbers.
- */
-static bool upper_bits_clean(ir_node *transformed_node, ir_mode *mode)
+static bool ia32_mux_upper_bits_clean(const ir_node *node, ir_mode *mode)
{
- assert(ia32_mode_needs_gp_reg(mode));
- if (get_mode_size_bits(mode) >= 32)
- return true;
-
- if (is_Proj(transformed_node))
- return upper_bits_clean(get_Proj_pred(transformed_node), mode);
-
- switch (get_ia32_irn_opcode(transformed_node)) {
- case iro_ia32_Conv_I2I:
- case iro_ia32_Conv_I2I8Bit: {
- ir_mode *smaller_mode = get_ia32_ls_mode(transformed_node);
- if (mode_is_signed(smaller_mode) != mode_is_signed(mode))
- return false;
- if (get_mode_size_bits(smaller_mode) > get_mode_size_bits(mode))
- return false;
-
- return true;
- }
-
- case iro_ia32_Shr:
- if (mode_is_signed(mode)) {
- return false; /* TODO handle signed modes */
- } else {
- ir_node *right = get_irn_n(transformed_node, n_ia32_Shr_count);
- if (is_ia32_Immediate(right) || is_ia32_Const(right)) {
- const ia32_immediate_attr_t *attr
- = get_ia32_immediate_attr_const(right);
- if (attr->symconst == 0 &&
- (unsigned)attr->offset >= 32 - get_mode_size_bits(mode)) {
- return true;
- }
- }
- return upper_bits_clean(get_irn_n(transformed_node, n_ia32_Shr_val), mode);
- }
-
- case iro_ia32_Sar:
- /* TODO too conservative if shift amount is constant */
- return upper_bits_clean(get_irn_n(transformed_node, n_ia32_Sar_val), mode);
-
- case iro_ia32_And:
- if (!mode_is_signed(mode)) {
- return
- upper_bits_clean(get_irn_n(transformed_node, n_ia32_And_right), mode) ||
- upper_bits_clean(get_irn_n(transformed_node, n_ia32_And_left), mode);
- }
- /* TODO if one is known to be zero extended, then || is sufficient */
- /* FALLTHROUGH */
- case iro_ia32_Or:
- case iro_ia32_Xor:
- return
- upper_bits_clean(get_irn_n(transformed_node, n_ia32_binary_right), mode) &&
- upper_bits_clean(get_irn_n(transformed_node, n_ia32_binary_left), mode);
-
- case iro_ia32_Const:
- case iro_ia32_Immediate: {
- const ia32_immediate_attr_t *attr =
- get_ia32_immediate_attr_const(transformed_node);
- if (mode_is_signed(mode)) {
- long shifted = attr->offset >> (get_mode_size_bits(mode) - 1);
- return shifted == 0 || shifted == -1;
- } else {
- unsigned long shifted = (unsigned long)attr->offset;
- shifted >>= get_mode_size_bits(mode)-1;
- shifted >>= 1;
- return shifted == 0;
- }
- }
-
- default:
- return false;
+ ir_node *mux_true = get_Mux_true(node);
+ ir_node *mux_false = get_Mux_false(node);
+ ir_mode *mux_mode = get_irn_mode(node);
+ /* mux nodes which get transformed to the set instruction are not clean */
+ if (is_Const(mux_true) && is_Const(mux_false)
+ && get_mode_size_bits(mux_mode) == 8) {
+ return false;
}
+ return be_upper_bits_clean(mux_true, mode)
+ && be_upper_bits_clean(mux_false, mode);
}
/**
assert(get_irn_mode(and_left) == cmp_mode);
match_arguments(&am, block, and_left, and_right, NULL,
- match_commutative |
- match_am | match_8bit_am | match_16bit_am |
- match_am_and_immediates | match_immediate);
+ match_commutative |
+ match_am | match_8bit_am | match_16bit_am |
+ match_am_and_immediates | match_immediate);
/* use 32bit compare mode if possible since the opcode is smaller */
- if (upper_bits_clean(am.new_op1, cmp_mode) &&
- upper_bits_clean(am.new_op2, cmp_mode)) {
+ if (am.op_type == ia32_Normal &&
+ be_upper_bits_clean(and_left, cmp_mode) &&
+ be_upper_bits_clean(and_right, cmp_mode)) {
cmp_mode = mode_is_signed(cmp_mode) ? mode_Is : mode_Iu;
}
if (get_mode_size_bits(cmp_mode) == 8) {
new_node = new_bd_ia32_Test8Bit(dbgi, new_block, addr->base,
- addr->index, addr->mem, am.new_op1, am.new_op2, am.ins_permuted);
+ addr->index, addr->mem,
+ am.new_op1, am.new_op2,
+ am.ins_permuted);
} else {
- new_node = new_bd_ia32_Test(dbgi, new_block, addr->base, addr->index,
- addr->mem, am.new_op1, am.new_op2, am.ins_permuted);
+ new_node = new_bd_ia32_Test(dbgi, new_block, addr->base,
+ addr->index, addr->mem, am.new_op1,
+ am.new_op2, am.ins_permuted);
}
} else {
/* Cmp(left, right) */
match_arguments(&am, block, left, right, NULL,
- match_commutative | match_am | match_8bit_am |
- match_16bit_am | match_am_and_immediates |
- match_immediate);
+ match_commutative |
+ match_am | match_8bit_am | match_16bit_am |
+ match_am_and_immediates | match_immediate);
/* use 32bit compare mode if possible since the opcode is smaller */
- if (upper_bits_clean(am.new_op1, cmp_mode) &&
- upper_bits_clean(am.new_op2, cmp_mode)) {
+ if (am.op_type == ia32_Normal &&
+ be_upper_bits_clean(left, cmp_mode) &&
+ be_upper_bits_clean(right, cmp_mode)) {
cmp_mode = mode_is_signed(cmp_mode) ? mode_Is : mode_Iu;
}
am.new_op2, am.ins_permuted);
} else {
new_node = new_bd_ia32_Cmp(dbgi, new_block, addr->base, addr->index,
- addr->mem, am.new_op1, am.new_op2, am.ins_permuted);
+ addr->mem, am.new_op1, am.new_op2,
+ am.ins_permuted);
}
}
set_am_attributes(new_node, &am);
dbgi = get_irn_dbg_info(psi);
sbb = new_bd_ia32_Sbb0(dbgi, block, eflags);
+ set_ia32_ls_mode(sbb, mode_Iu);
notn = new_bd_ia32_Not(dbgi, block, sbb);
new_node = new_bd_ia32_And(dbgi, block, noreg_GP, noreg_GP, nomem, new_node, notn);
+ set_ia32_ls_mode(new_node, mode_Iu);
set_ia32_commutative(new_node);
return new_node;
}
SETCC_TR_NOT,
SETCC_TR_AND,
SETCC_TR_SET,
- SETCC_TR_SBB,
};
typedef struct setcc_transform {
case SETCC_TR_SET:
new_node = create_set_32bit(dbgi, new_block, flags, res.cc, node);
break;
- case SETCC_TR_SBB:
- new_node = new_bd_ia32_Sbb0(dbgi, new_block, flags);
- break;
default:
panic("unknown setcc transform");
}
}
/**
- * Creates a x87 strict Conv by placing a Store and a Load
+ * Creates a x87 Conv by placing a Store and a Load
*/
-static ir_node *gen_x87_strict_conv(ir_mode *tgt_mode, ir_node *node)
+static ir_node *gen_x87_conv(ir_mode *tgt_mode, ir_node *node)
{
ir_node *block = get_nodes_block(node);
ir_graph *irg = get_Block_irg(block);
if (possible_int_mode_for_fp(src_mode)) {
ia32_address_mode_t am;
- match_arguments(&am, src_block, NULL, op, NULL, match_am | match_try_am | match_16bit_am);
+ match_arguments(&am, src_block, NULL, op, NULL, match_am | match_try_am | match_16bit_am | match_upconv);
if (am.op_type == ia32_AddrModeS) {
ia32_address_t *addr = &am.addr;
/* first convert to 32 bit signed if necessary */
if (get_mode_size_bits(src_mode) < 32) {
- if (!upper_bits_clean(new_op, src_mode)) {
+ if (!be_upper_bits_clean(op, src_mode)) {
new_op = create_Conv_I2I(dbgi, block, noreg_GP, noreg_GP, nomem, new_op, src_mode);
SET_IA32_ORIG_NODE(new_op, node);
}
{
ir_node *new_block = be_transform_node(block);
ir_node *new_node;
- ir_mode *smaller_mode;
ia32_address_mode_t am;
ia32_address_t *addr = &am.addr;
(void) node;
- if (get_mode_size_bits(src_mode) < get_mode_size_bits(tgt_mode)) {
- smaller_mode = src_mode;
- } else {
- smaller_mode = tgt_mode;
- }
+ assert(get_mode_size_bits(src_mode) < get_mode_size_bits(tgt_mode));
#ifdef DEBUG_libfirm
if (is_Const(op)) {
}
#endif
+ if (be_upper_bits_clean(op, src_mode)) {
+ return be_transform_node(op);
+ }
+
match_arguments(&am, block, NULL, op, NULL,
match_am | match_8bit_am | match_16bit_am);
- if (upper_bits_clean(am.new_op2, smaller_mode)) {
- /* unnecessary conv. in theory it shouldn't have been AM */
- assert(is_ia32_NoReg_GP(addr->base));
- assert(is_ia32_NoReg_GP(addr->index));
- assert(is_NoMem(addr->mem));
- assert(am.addr.offset == 0);
- assert(am.addr.symconst_ent == NULL);
- return am.new_op2;
- }
-
new_node = create_Conv_I2I(dbgi, new_block, addr->base, addr->index,
- addr->mem, am.new_op2, smaller_mode);
+ addr->mem, am.new_op2, src_mode);
set_am_attributes(new_node, &am);
/* match_arguments assume that out-mode = in-mode, this isn't true here
* so fix it */
- set_ia32_ls_mode(new_node, smaller_mode);
+ set_ia32_ls_mode(new_node, src_mode);
SET_IA32_ORIG_NODE(new_node, node);
new_node = fix_mem_proj(new_node, &am);
return new_node;
}
if (src_mode == tgt_mode) {
- if (get_Conv_strict(node)) {
- if (ia32_cg_config.use_sse2) {
- /* when we are in SSE mode, we can kill all strict no-op conversion */
- return be_transform_node(op);
- }
- } else {
- /* this should be optimized already, but who knows... */
- DEBUG_ONLY(ir_fprintf(stderr, "Debug warning: conv %+F is pointless\n", node);)
+ /* this should be optimized already, but who knows... */
+ DEBUG_ONLY(ir_fprintf(stderr, "Debug warning: conv %+F is pointless\n", node);)
DB((dbg, LEVEL_1, "killed Conv(mode, mode) ..."));
- return be_transform_node(op);
- }
+ return be_transform_node(op);
}
if (mode_is_float(src_mode)) {
nomem, new_op);
set_ia32_ls_mode(res, tgt_mode);
} else {
- if (get_Conv_strict(node)) {
- /* if fp_no_float_fold is not set then we assume that we
- * don't have any float operations in a non
- * mode_float_arithmetic mode and can skip strict upconvs */
- if (src_bits < tgt_bits) {
- DB((dbg, LEVEL_1, "killed Conv(float, float) ..."));
- return new_op;
- } else {
- res = gen_x87_strict_conv(tgt_mode, new_op);
- SET_IA32_ORIG_NODE(get_Proj_pred(res), node);
- return res;
- }
+ if (src_bits < tgt_bits) {
+ DB((dbg, LEVEL_1, "killed Conv(float, float) ..."));
+ return new_op;
+ } else {
+ res = gen_x87_conv(tgt_mode, new_op);
+ SET_IA32_ORIG_NODE(get_Proj_pred(res), node);
+ return res;
}
- DB((dbg, LEVEL_1, "killed Conv(float, float) ..."));
- return new_op;
}
} else {
/* ... to int */
unsigned float_mantissa = get_mode_mantissa_size(tgt_mode);
res = gen_x87_gp_to_fp(node, src_mode);
- /* we need a strict-Conv, if the int mode has more bits than the
+ /* we need a float-conv, if the int mode has more bits than the
* float mantissa */
if (float_mantissa < int_mantissa) {
- res = gen_x87_strict_conv(tgt_mode, res);
+ res = gen_x87_conv(tgt_mode, res);
SET_IA32_ORIG_NODE(get_Proj_pred(res), node);
}
return res;
return be_transform_node(op);
} else {
/* to int */
- if (src_bits == tgt_bits) {
+ if (src_bits >= tgt_bits) {
DB((dbg, LEVEL_1, "omitting unnecessary Conv(%+F, %+F) ...",
src_mode, tgt_mode));
return be_transform_node(op);
return new_node;
}
-/**
- * Change some phi modes
- */
static ir_node *gen_Phi(ir_node *node)
{
+ ir_mode *mode = get_irn_mode(node);
const arch_register_req_t *req;
- ir_node *block = be_transform_node(get_nodes_block(node));
- ir_graph *irg = current_ir_graph;
- dbg_info *dbgi = get_irn_dbg_info(node);
- ir_mode *mode = get_irn_mode(node);
- ir_node *phi;
-
if (ia32_mode_needs_gp_reg(mode)) {
/* we shouldn't have any 64bit stuff around anymore */
assert(get_mode_size_bits(mode) <= 32);
req = arch_no_register_req;
}
- /* phi nodes allow loops, so we use the old arguments for now
- * and fix this later */
- phi = new_ir_node(dbgi, irg, block, op_Phi, mode, get_irn_arity(node),
- get_irn_in(node) + 1);
- copy_node_attr(irg, node, phi);
- be_duplicate_deps(node, phi);
-
- arch_set_irn_register_req_out(phi, 0, req);
-
- be_enqueue_preds(node);
-
- return phi;
+ return be_transform_phi(node, req);
}
static ir_node *gen_Jmp(ir_node *node)
assert(get_irn_mode(op) == mode_P);
- match_arguments(&am, block, NULL, op, NULL, match_am | match_immediate);
+ match_arguments(&am, block, NULL, op, NULL,
+ match_am | match_immediate | match_upconv);
new_node = new_bd_ia32_IJmp(dbgi, new_block, addr->base, addr->index,
- addr->mem, am.new_op2);
+ addr->mem, am.new_op2);
set_am_attributes(new_node, &am);
SET_IA32_ORIG_NODE(new_node, node);
ir_node *mem_high;
if (ia32_cg_config.use_sse2) {
- panic("ia32_l_LLtoFloat not implemented for SSE2");
+ panic("not implemented for SSE2");
}
/* do a store */
if (pn == pn_Store_M) {
return new_pred;
}
- panic("exception control flow for gen_float_const_Store not implemented yet");
+ panic("exception control flow not implemented yet");
} else if (get_ia32_op_type(new_pred) == ia32_AddrModeD) {
/* destination address mode */
if (pn == pn_Store_M) {
/* special case for PIC trampoline calls */
old_no_pic_adjust = ia32_no_pic_adjust;
- ia32_no_pic_adjust = be_get_irg_options(current_ir_graph)->pic;
+ ia32_no_pic_adjust = be_options.pic;
match_arguments(&am, src_block, NULL, src_ptr, src_mem,
- match_am | match_immediate);
+ match_am | match_immediate | match_upconv);
ia32_no_pic_adjust = old_no_pic_adjust;
/* or */
orn = new_bd_ia32_Or(dbgi, block, noreg_GP, noreg_GP, nomem, bsf, neg);
+ set_ia32_ls_mode(orn, mode_Iu);
set_ia32_commutative(orn);
/* add 1 */
* operations)
*/
ir_node *count = ia32_create_Immediate(NULL, 0, 16);
- ir_node *shr = new_bd_ia32_Shr(dbgi, new_block, new_param, count);
- ir_node *xor = new_bd_ia32_Xor(dbgi, new_block, noreg_GP, noreg_GP, nomem,
- shr, new_param);
- ir_node *xor2 = new_bd_ia32_XorHighLow(dbgi, new_block, xor);
+ ir_node *shr = new_bd_ia32_Shr(dbgi, new_block, new_param, count);
+ ir_node *xorn = new_bd_ia32_Xor(dbgi, new_block, noreg_GP, noreg_GP, nomem,
+ shr, new_param);
+ ir_node *xor2 = new_bd_ia32_XorHighLow(dbgi, new_block, xorn);
ir_node *flags;
- set_ia32_commutative(xor);
+ set_ia32_ls_mode(xorn, mode_Iu);
+ set_ia32_commutative(xorn);
set_irn_mode(xor2, mode_T);
flags = new_r_Proj(xor2, mode_Iu, pn_ia32_XorHighLow_flags);
ia32_address_t *addr = &am.addr;
ir_node *cnt;
- match_arguments(&am, block, NULL, param, NULL, match_am | match_16bit_am);
+ match_arguments(&am, block, NULL, param, NULL, match_am | match_16bit_am | match_upconv);
cnt = new_bd_ia32_Popcnt(dbgi, new_block, addr->base, addr->index, addr->mem, am.new_op2);
set_am_attributes(cnt, &am);
ir_node *new_block = be_transform_node(block);
ir_mode *mode = get_irn_mode(param);
unsigned size = get_mode_size_bits(mode);
- ir_node *m1, *m2, *m3, *m4, *s1, *s2, *s3, *s4;
switch (size) {
case 32:
- if (ia32_cg_config.use_i486) {
+ if (ia32_cg_config.use_bswap) {
/* swap available */
return new_bd_ia32_Bswap(dbgi, new_block, param);
+ } else {
+ ir_node *i8 = ia32_create_Immediate(NULL, 0, 8);
+ ir_node *rol1 = new_bd_ia32_Rol(dbgi, new_block, param, i8);
+ ir_node *i16 = ia32_create_Immediate(NULL, 0, 16);
+ ir_node *rol2 = new_bd_ia32_Rol(dbgi, new_block, rol1, i16);
+ ir_node *rol3 = new_bd_ia32_Rol(dbgi, new_block, rol2, i8);
+ set_ia32_ls_mode(rol1, mode_Hu);
+ set_ia32_ls_mode(rol2, mode_Iu);
+ set_ia32_ls_mode(rol3, mode_Hu);
+ return rol3;
}
- s1 = new_bd_ia32_Shl(dbgi, new_block, param, ia32_create_Immediate(NULL, 0, 24));
- s2 = new_bd_ia32_Shl(dbgi, new_block, param, ia32_create_Immediate(NULL, 0, 8));
-
- m1 = new_bd_ia32_And(dbgi, new_block, noreg_GP, noreg_GP, nomem, s2, ia32_create_Immediate(NULL, 0, 0xFF00));
- m2 = new_bd_ia32_Lea(dbgi, new_block, s1, m1);
-
- s3 = new_bd_ia32_Shr(dbgi, new_block, param, ia32_create_Immediate(NULL, 0, 8));
-
- m3 = new_bd_ia32_And(dbgi, new_block, noreg_GP, noreg_GP, nomem, s3, ia32_create_Immediate(NULL, 0, 0xFF0000));
- m4 = new_bd_ia32_Lea(dbgi, new_block, m2, m3);
-
- s4 = new_bd_ia32_Shr(dbgi, new_block, param, ia32_create_Immediate(NULL, 0, 24));
- return new_bd_ia32_Lea(dbgi, new_block, m4, s4);
case 16:
/* swap16 always available */
case ir_bk_inner_trampoline:
return gen_inner_trampoline(node);
}
- panic("Builtin %s not implemented in IA32", get_builtin_kind_name(kind));
+ panic("Builtin %s not implemented", get_builtin_kind_name(kind));
}
/**
return get_Tuple_pred(new_node, 0);
}
}
- panic("Builtin %s not implemented in IA32", get_builtin_kind_name(kind));
+ panic("Builtin %s not implemented", get_builtin_kind_name(kind));
}
static ir_node *gen_be_IncSP(ir_node *node)
return res;
}
-/**
- * Transform the Projs from a Cmp.
- */
-static ir_node *gen_Proj_Cmp(ir_node *node)
-{
- /* this probably means not all mode_b nodes were lowered... */
- panic("trying to directly transform Proj_Cmp %+F (mode_b not lowered?)",
- node);
-}
-
static ir_node *gen_Proj_ASM(ir_node *node)
{
ir_mode *mode = get_irn_mode(node);
return gen_Proj_be_AddSP(node);
case beo_Call:
return gen_Proj_be_Call(node);
- case iro_Cmp:
- return gen_Proj_Cmp(node);
case iro_Start:
proj = get_Proj_proj(node);
switch (proj) {
be_set_transform_function(op_Switch, gen_Switch);
be_set_transform_function(op_SymConst, gen_SymConst);
be_set_transform_function(op_Unknown, ia32_gen_Unknown);
+
+ be_set_upper_bits_clean_function(op_Mux, ia32_mux_upper_bits_clean);
}
/**
for (j = get_method_n_ress(mtp) - 1; j >= 0; --j) {
ir_type *res_tp = get_method_res_type(mtp, j);
ir_node *res, *new_res;
- const ir_edge_t *edge, *next;
ir_mode *res_mode;
if (! is_atomic_type(res_tp)) {
new_res = NULL;
/* now patch the users */
- foreach_out_edge_safe(res, edge, next) {
+ foreach_out_edge_safe(res, edge) {
ir_node *succ = get_edge_src_irn(edge);
/* ignore Keeps */