#include "irprintf.h"
#include "debug.h"
#include "irdom.h"
-#include "archop.h"
#include "error.h"
#include "array_t.h"
#include "height.h"
#define SFP_ABS "0x7FFFFFFF"
#define DFP_ABS "0x7FFFFFFFFFFFFFFF"
#define DFP_INTMAX "9223372036854775807"
+#define ULL_BIAS "18446744073709551616"
-#define TP_SFP_SIGN "ia32_sfp_sign"
-#define TP_DFP_SIGN "ia32_dfp_sign"
-#define TP_SFP_ABS "ia32_sfp_abs"
-#define TP_DFP_ABS "ia32_dfp_abs"
-#define TP_INT_MAX "ia32_int_max"
-
-#define ENT_SFP_SIGN "IA32_SFP_SIGN"
-#define ENT_DFP_SIGN "IA32_DFP_SIGN"
-#define ENT_SFP_ABS "IA32_SFP_ABS"
-#define ENT_DFP_ABS "IA32_DFP_ABS"
-#define ENT_INT_MAX "IA32_INT_MAX"
+#define ENT_SFP_SIGN ".LC_ia32_sfp_sign"
+#define ENT_DFP_SIGN ".LC_ia32_dfp_sign"
+#define ENT_SFP_ABS ".LC_ia32_sfp_abs"
+#define ENT_DFP_ABS ".LC_ia32_dfp_abs"
+#define ENT_ULL_BIAS ".LC_ia32_ull_bias"
#define mode_vfp (ia32_reg_classes[CLASS_ia32_vfp].mode)
#define mode_xmm (ia32_reg_classes[CLASS_ia32_xmm].mode)
static ir_node *initial_fpcw = NULL;
-extern ir_op *get_op_Mulh(void);
-
typedef ir_node *construct_binop_func(dbg_info *db, ir_node *block,
ir_node *base, ir_node *index, ir_node *mem, ir_node *op1,
ir_node *op2);
res = load;
set_ia32_ls_mode(load, mode);
} else {
+ ir_mode *ls_mode;
+
floatent = create_float_const_entity(node);
+ /* create_float_const_ent is smart and sometimes creates
+ smaller entities */
+ ls_mode = get_type_mode(get_entity_type(floatent));
- load = new_bd_ia32_vfld(dbgi, block, noreg, noreg, nomem, mode);
+ load = new_bd_ia32_vfld(dbgi, block, noreg, noreg, nomem,
+ ls_mode);
set_ia32_op_type(load, ia32_AddrModeS);
set_ia32_am_sc(load, floatent);
arch_irn_add_flags(load, arch_irn_flags_rematerializable);
res = new_r_Proj(current_ir_graph, block, load, mode_vfp, pn_ia32_vfld_res);
- /* take the mode from the entity */
- set_ia32_ls_mode(load, get_type_mode(get_entity_type(floatent)));
}
}
end:
return cnst;
}
+/**
+ * Create a float type for the given mode and cache it.
+ *
+ * @param mode the mode for the float type (might be integer mode for SSE2 types)
+ * @param align alignment
+ */
+static ir_type *ia32_create_float_type(ir_mode *mode, unsigned align) {
+ char buf[32];
+ ir_type *tp;
+
+ assert(align <= 16);
+
+ if (mode == mode_Iu) {
+ static ir_type *int_Iu[16] = {NULL, };
+
+ if (int_Iu[align] == NULL) {
+ snprintf(buf, sizeof(buf), "int_Iu_%u", align);
+ int_Iu[align] = tp = new_type_primitive(new_id_from_str(buf), mode);
+ /* set the specified alignment */
+ set_type_alignment_bytes(tp, align);
+ }
+ return int_Iu[align];
+ } else if (mode == mode_Lu) {
+ static ir_type *int_Lu[16] = {NULL, };
+
+ if (int_Lu[align] == NULL) {
+ snprintf(buf, sizeof(buf), "int_Lu_%u", align);
+ int_Lu[align] = tp = new_type_primitive(new_id_from_str(buf), mode);
+ /* set the specified alignment */
+ set_type_alignment_bytes(tp, align);
+ }
+ return int_Lu[align];
+ } else if (mode == mode_F) {
+ static ir_type *float_F[16] = {NULL, };
+
+ if (float_F[align] == NULL) {
+ snprintf(buf, sizeof(buf), "float_F_%u", align);
+ float_F[align] = tp = new_type_primitive(new_id_from_str(buf), mode);
+ /* set the specified alignment */
+ set_type_alignment_bytes(tp, align);
+ }
+ return float_F[align];
+ } else if (mode == mode_D) {
+ static ir_type *float_D[16] = {NULL, };
+
+ if (float_D[align] == NULL) {
+ snprintf(buf, sizeof(buf), "float_D_%u", align);
+ float_D[align] = tp = new_type_primitive(new_id_from_str(buf), mode);
+ /* set the specified alignment */
+ set_type_alignment_bytes(tp, align);
+ }
+ return float_D[align];
+ } else {
+ static ir_type *float_E[16] = {NULL, };
+
+ if (float_E[align] == NULL) {
+ snprintf(buf, sizeof(buf), "float_E_%u", align);
+ float_E[align] = tp = new_type_primitive(new_id_from_str(buf), mode);
+ /* set the specified alignment */
+ set_type_alignment_bytes(tp, align);
+ }
+ return float_E[align];
+ }
+}
+
+/**
+ * Create a float[2] array type for the given atomic type.
+ *
+ * @param tp the atomic type
+ */
+static ir_type *ia32_create_float_array(ir_type *tp) {
+ char buf[32];
+ ir_mode *mode = get_type_mode(tp);
+ unsigned align = get_type_alignment_bytes(tp);
+ ir_type *arr;
+
+ assert(align <= 16);
+
+ if (mode == mode_F) {
+ static ir_type *float_F[16] = {NULL, };
+
+ if (float_F[align] != NULL)
+ return float_F[align];
+ snprintf(buf, sizeof(buf), "arr_float_F_%u", align);
+ arr = float_F[align] = new_type_array(new_id_from_str(buf), 1, tp);
+ } else if (mode == mode_D) {
+ static ir_type *float_D[16] = {NULL, };
+
+ if (float_D[align] != NULL)
+ return float_D[align];
+ snprintf(buf, sizeof(buf), "arr_float_D_%u", align);
+ arr = float_D[align] = new_type_array(new_id_from_str(buf), 1, tp);
+ } else {
+ static ir_type *float_E[16] = {NULL, };
+
+ if (float_E[align] != NULL)
+ return float_E[align];
+ snprintf(buf, sizeof(buf), "arr_float_E_%u", align);
+ arr = float_E[align] = new_type_array(new_id_from_str(buf), 1, tp);
+ }
+ set_type_alignment_bytes(arr, align);
+ set_type_size_bytes(arr, 2 * get_type_size_bytes(tp));
+ set_type_state(arr, layout_fixed);
+ return arr;
+}
+
/* Generates an entity for a known FP const (used for FP Neg + Abs) */
ir_entity *ia32_gen_fp_known_const(ia32_known_const_t kct)
{
static const struct {
- const char *tp_name;
const char *ent_name;
const char *cnst_str;
char mode;
- char align;
+ unsigned char align;
} names [ia32_known_const_max] = {
- { TP_SFP_SIGN, ENT_SFP_SIGN, SFP_SIGN, 0, 16 }, /* ia32_SSIGN */
- { TP_DFP_SIGN, ENT_DFP_SIGN, DFP_SIGN, 1, 16 }, /* ia32_DSIGN */
- { TP_SFP_ABS, ENT_SFP_ABS, SFP_ABS, 0, 16 }, /* ia32_SABS */
- { TP_DFP_ABS, ENT_DFP_ABS, DFP_ABS, 1, 16 }, /* ia32_DABS */
- { TP_INT_MAX, ENT_INT_MAX, DFP_INTMAX, 2, 4 } /* ia32_INTMAX */
+ { ENT_SFP_SIGN, SFP_SIGN, 0, 16 }, /* ia32_SSIGN */
+ { ENT_DFP_SIGN, DFP_SIGN, 1, 16 }, /* ia32_DSIGN */
+ { ENT_SFP_ABS, SFP_ABS, 0, 16 }, /* ia32_SABS */
+ { ENT_DFP_ABS, DFP_ABS, 1, 16 }, /* ia32_DABS */
+ { ENT_ULL_BIAS, ULL_BIAS, 2, 4 } /* ia32_ULLBIAS */
};
static ir_entity *ent_cache[ia32_known_const_max];
- const char *tp_name, *ent_name, *cnst_str;
+ const char *ent_name, *cnst_str;
ir_type *tp;
- ir_node *cnst;
- ir_graph *rem;
ir_entity *ent;
tarval *tv;
ir_mode *mode;
ent_name = names[kct].ent_name;
if (! ent_cache[kct]) {
- tp_name = names[kct].tp_name;
cnst_str = names[kct].cnst_str;
switch (names[kct].mode) {
case 0: mode = mode_Iu; break;
case 1: mode = mode_Lu; break;
- default: mode = mode_F; break;
+ default: mode = mode_F; break;
}
tv = new_tarval_from_str(cnst_str, strlen(cnst_str), mode);
- tp = new_type_primitive(new_id_from_str(tp_name), mode);
- /* set the specified alignment */
- set_type_alignment_bytes(tp, names[kct].align);
+ tp = ia32_create_float_type(mode, names[kct].align);
+ if (kct == ia32_ULLBIAS)
+ tp = ia32_create_float_array(tp);
ent = new_entity(get_glob_type(), new_id_from_str(ent_name), tp);
set_entity_ld_ident(ent, get_entity_ident(ent));
set_entity_variability(ent, variability_constant);
set_entity_allocation(ent, allocation_static);
- /* we create a new entity here: It's initialization must resist on the
- const code irg */
- rem = current_ir_graph;
- current_ir_graph = get_const_code_irg();
- cnst = new_Const(mode, tv);
- current_ir_graph = rem;
+ if (kct == ia32_ULLBIAS) {
+ ir_initializer_t *initializer = create_initializer_compound(2);
- set_atomic_ent_value(ent, cnst);
+ set_initializer_compound_value(initializer, 0,
+ create_initializer_tarval(get_tarval_null(mode)));
+ set_initializer_compound_value(initializer, 1,
+ create_initializer_tarval(tv));
+
+ set_entity_initializer(ent, initializer);
+ } else {
+ set_entity_initializer(ent, create_initializer_tarval(tv));
+ }
/* cache the entry */
ent_cache[kct] = ent;
src_mode = get_irn_mode(get_Conv_op(node));
dest_mode = get_irn_mode(node);
- return ia32_mode_needs_gp_reg(src_mode)
- && ia32_mode_needs_gp_reg(dest_mode)
- && get_mode_size_bits(dest_mode) < get_mode_size_bits(src_mode);
+ return
+ ia32_mode_needs_gp_reg(src_mode) &&
+ ia32_mode_needs_gp_reg(dest_mode) &&
+ get_mode_size_bits(dest_mode) <= get_mode_size_bits(src_mode);
}
/* Skip all Down-Conv's on a given node and return the resulting node. */
assert(use_am || !(flags & match_8bit_am));
assert(use_am || !(flags & match_16bit_am));
- if (mode_bits == 8) {
- if (!(flags & match_8bit_am))
- use_am = 0;
- /* we don't automatically add upconvs yet */
- assert((flags & match_mode_neutral) || (flags & match_8bit));
- } else if (mode_bits == 16) {
- if (!(flags & match_16bit_am))
- use_am = 0;
- /* we don't automatically add upconvs yet */
- assert((flags & match_mode_neutral) || (flags & match_16bit));
+ if ((mode_bits == 8 && !(flags & match_8bit_am)) ||
+ (mode_bits == 16 && !(flags & match_16bit_am))) {
+ use_am = 0;
}
/* we can simply skip downconvs for mode neutral nodes: the upper bits
{
ir_node *left = get_Shrs_left(node);
ir_node *right = get_Shrs_right(node);
- ir_mode *mode = get_irn_mode(node);
- if (is_Const(right) && mode == mode_Is) {
+ if (is_Const(right)) {
tarval *tv = get_Const_tarval(right);
long val = get_tarval_long(tv);
if (val == 31) {
}
/* 8 or 16 bit sign extension? */
- if (is_Const(right) && is_Shl(left) && mode == mode_Is) {
+ if (is_Const(right) && is_Shl(left)) {
ir_node *shl_left = get_Shl_left(left);
ir_node *shl_right = get_Shl_right(left);
if (is_Const(shl_right)) {
ia32_address_t *addr = &am.addr;
memset(&am, 0, sizeof(am));
- assert(flags & match_dest_am);
assert(flags & match_immediate); /* there is no destam node without... */
commutative = (flags & match_commutative) != 0;
}
new_node = dest_am_binop(val, op1, op2, mem, ptr, mode,
new_bd_ia32_AddMem, new_bd_ia32_AddMem8Bit,
- match_dest_am | match_commutative |
- match_immediate);
+ match_commutative | match_immediate);
break;
case iro_Sub:
op1 = get_Sub_left(val);
}
new_node = dest_am_binop(val, op1, op2, mem, ptr, mode,
new_bd_ia32_SubMem, new_bd_ia32_SubMem8Bit,
- match_dest_am | match_immediate);
+ match_immediate);
break;
case iro_And:
op1 = get_And_left(val);
op2 = get_And_right(val);
new_node = dest_am_binop(val, op1, op2, mem, ptr, mode,
new_bd_ia32_AndMem, new_bd_ia32_AndMem8Bit,
- match_dest_am | match_commutative |
- match_immediate);
+ match_commutative | match_immediate);
break;
case iro_Or:
op1 = get_Or_left(val);
op2 = get_Or_right(val);
new_node = dest_am_binop(val, op1, op2, mem, ptr, mode,
new_bd_ia32_OrMem, new_bd_ia32_OrMem8Bit,
- match_dest_am | match_commutative |
- match_immediate);
+ match_commutative | match_immediate);
break;
case iro_Eor:
op1 = get_Eor_left(val);
op2 = get_Eor_right(val);
new_node = dest_am_binop(val, op1, op2, mem, ptr, mode,
new_bd_ia32_XorMem, new_bd_ia32_XorMem8Bit,
- match_dest_am | match_commutative |
- match_immediate);
+ match_commutative | match_immediate);
break;
case iro_Shl:
op1 = get_Shl_left(val);
op2 = get_Shl_right(val);
new_node = dest_am_binop(val, op1, op2, mem, ptr, mode,
new_bd_ia32_ShlMem, new_bd_ia32_ShlMem,
- match_dest_am | match_immediate);
+ match_immediate);
break;
case iro_Shr:
op1 = get_Shr_left(val);
op2 = get_Shr_right(val);
new_node = dest_am_binop(val, op1, op2, mem, ptr, mode,
new_bd_ia32_ShrMem, new_bd_ia32_ShrMem,
- match_dest_am | match_immediate);
+ match_immediate);
break;
case iro_Shrs:
op1 = get_Shrs_left(val);
op2 = get_Shrs_right(val);
new_node = dest_am_binop(val, op1, op2, mem, ptr, mode,
new_bd_ia32_SarMem, new_bd_ia32_SarMem,
- match_dest_am | match_immediate);
+ match_immediate);
break;
case iro_Rotl:
op1 = get_Rotl_left(val);
op2 = get_Rotl_right(val);
new_node = dest_am_binop(val, op1, op2, mem, ptr, mode,
new_bd_ia32_RolMem, new_bd_ia32_RolMem,
- match_dest_am | match_immediate);
+ match_immediate);
break;
/* TODO: match ROR patterns... */
case iro_Mux:
return new_node;
}
+static bool possible_int_mode_for_fp(ir_mode *mode)
+{
+ unsigned size;
+
+ if (!mode_is_signed(mode))
+ return false;
+ size = get_mode_size_bits(mode);
+ if (size != 16 && size != 32)
+ return false;
+ return true;
+}
+
static int is_float_to_int_conv(const ir_node *node)
{
ir_mode *mode = get_irn_mode(node);
ir_node *conv_op;
ir_mode *conv_mode;
- if (mode != mode_Is && mode != mode_Hs)
+ if (!possible_int_mode_for_fp(mode))
return 0;
if (!is_Conv(node))
ir_node *block = be_transform_node(get_nodes_block(node));
ir_node *sel = get_Cond_selector(node);
ir_node *new_sel = be_transform_node(sel);
- int switch_min = INT_MAX;
- int switch_max = INT_MIN;
+ long switch_min = LONG_MAX;
+ long switch_max = LONG_MIN;
long default_pn = get_Cond_defaultProj(node);
ir_node *new_node;
const ir_edge_t *edge;
switch_max = pn;
}
- if ((unsigned) (switch_max - switch_min) > 256000) {
+ if ((unsigned long) (switch_max - switch_min) > 256000) {
panic("Size of switch %+F bigger than 256000", node);
}
match_arguments(&am, block, and_left, and_right, NULL,
match_commutative |
match_am | match_8bit_am | match_16bit_am |
- match_am_and_immediates | match_immediate |
- match_8bit | match_16bit);
+ match_am_and_immediates | match_immediate);
/* use 32bit compare mode if possible since the opcode is smaller */
if (upper_bits_clean(am.new_op1, cmp_mode) &&
match_arguments(&am, block, left, right, NULL,
match_commutative | match_am | match_8bit_am |
match_16bit_am | match_am_and_immediates |
- match_immediate | match_8bit | match_16bit);
+ match_immediate);
/* use 32bit compare mode if possible since the opcode is smaller */
if (upper_bits_clean(am.new_op1, cmp_mode) &&
upper_bits_clean(am.new_op2, cmp_mode)) {
ir_node *val_true = get_Mux_true(node);
ir_node *val_false = get_Mux_false(node);
ir_node *new_node;
- match_flags_t match_flags;
ia32_address_mode_t am;
ia32_address_t *addr;
addr = &am.addr;
- match_flags = match_commutative | match_am | match_16bit_am |
- match_mode_neutral;
-
- match_arguments(&am, block, val_false, val_true, flags, match_flags);
+ match_arguments(&am, block, val_false, val_true, flags,
+ match_commutative | match_am | match_16bit_am | match_mode_neutral);
new_node = new_bd_ia32_CMov(dbgi, new_block, addr->base, addr->index,
addr->mem, am.new_op1, am.new_op2, new_flags,
int ins_permuted)
{
ir_node *noreg = ia32_new_NoReg_gp(env_cg);
- ir_node *nomem = new_NoMem();
ir_mode *mode = get_irn_mode(orig_node);
ir_node *new_node;
/* we might need to conv the result up */
if (get_mode_size_bits(mode) > 8) {
+ ir_node *nomem = new_NoMem();
new_node = new_bd_ia32_Conv_I2I8Bit(dbgi, new_block, noreg, noreg,
nomem, new_node, mode_Bu);
SET_IA32_ORIG_NODE(new_node, orig_node);
{
ir_graph *irg = current_ir_graph;
ir_mode *mode = get_irn_mode(psi);
- ir_node *new_node, *sub, *sbb, *eflags, *block, *noreg, *tmpreg, *nomem;
+ ir_node *nomem = new_NoMem();
+ ir_node *new_node, *sub, *sbb, *eflags, *block, *noreg;
+
dbg_info *dbgi;
new_node = gen_binop(psi, a, b, new_bd_ia32_Sub,
eflags = new_rd_Proj(NULL, irg, block, sub, mode_Iu, pn_ia32_Sub_flags);
dbgi = get_irn_dbg_info(psi);
- noreg = ia32_new_NoReg_gp(env_cg);
- tmpreg = new_bd_ia32_ProduceVal(dbgi, block);
- nomem = new_NoMem();
- sbb = new_bd_ia32_Sbb(dbgi, block, noreg, noreg, nomem, tmpreg, tmpreg, eflags);
+ sbb = new_bd_ia32_Sbb0(dbgi, block, eflags);
+ noreg = ia32_new_NoReg_gp(env_cg);
new_node = new_bd_ia32_And(dbgi, block, noreg, noreg, nomem, new_node, sbb);
set_ia32_commutative(new_node);
return new_node;
}
+/**
+ * Create an const array of two float consts.
+ *
+ * @param c0 the first constant
+ * @param c1 the second constant
+ * @param new_mode IN/OUT for the mode of the constants, if NULL
+ * smallest possible mode will be used
+ */
+static ir_entity *ia32_create_const_array(ir_node *c0, ir_node *c1, ir_mode **new_mode) {
+ ir_entity *ent;
+ ir_mode *mode = *new_mode;
+ ir_type *tp;
+ ir_initializer_t *initializer;
+ tarval *tv0 = get_Const_tarval(c0);
+ tarval *tv1 = get_Const_tarval(c1);
+
+ if (mode == NULL) {
+ /* detect the best mode for the constants */
+ mode = get_tarval_mode(tv0);
+
+ if (mode != mode_F) {
+ if (tarval_ieee754_can_conv_lossless(tv0, mode_F) &&
+ tarval_ieee754_can_conv_lossless(tv1, mode_F)) {
+ mode = mode_F;
+ tv0 = tarval_convert_to(tv0, mode);
+ tv1 = tarval_convert_to(tv1, mode);
+ } else if (mode != mode_D) {
+ if (tarval_ieee754_can_conv_lossless(tv0, mode_D) &&
+ tarval_ieee754_can_conv_lossless(tv1, mode_D)) {
+ mode = mode_D;
+ tv0 = tarval_convert_to(tv0, mode);
+ tv1 = tarval_convert_to(tv1, mode);
+ }
+ }
+ }
+
+ }
+
+ tp = ia32_create_float_type(mode, 4);
+ tp = ia32_create_float_array(tp);
+
+ ent = new_entity(get_glob_type(), ia32_unique_id(".LC%u"), tp);
+
+ set_entity_ld_ident(ent, get_entity_ident(ent));
+ set_entity_visibility(ent, visibility_local);
+ set_entity_variability(ent, variability_constant);
+ set_entity_allocation(ent, allocation_static);
+
+ initializer = create_initializer_compound(2);
+
+ set_initializer_compound_value(initializer, 0, create_initializer_tarval(tv0));
+ set_initializer_compound_value(initializer, 1, create_initializer_tarval(tv1));
+
+ set_entity_initializer(ent, initializer);
+
+ *new_mode = mode;
+ return ent;
+}
+
/**
* Transforms a Mux node into CMov.
*
ir_node *mux_false = get_Mux_false(node);
ir_node *cond = get_Mux_sel(node);
ir_mode *mode = get_irn_mode(node);
+ ir_node *flags;
+ ir_node *new_node;
pn_Cmp pnc;
assert(get_irn_mode(cond) == mode_b);
}
}
}
+ if (is_Const(mux_true) && is_Const(mux_false)) {
+ ia32_address_mode_t am;
+ ir_node *noreg = ia32_new_NoReg_gp(env_cg);
+ ir_node *nomem = new_NoMem();
+ ir_node *load;
+ ir_mode *new_mode;
+ unsigned scale;
+
+ flags = get_flags_node(cond, &pnc);
+ new_node = create_set_32bit(dbgi, new_block, flags, pnc, node, /*is_premuted=*/0);
+
+ if (ia32_cg_config.use_sse2) {
+ /* cannot load from different mode on SSE */
+ new_mode = mode;
+ } else {
+ /* x87 can load any mode */
+ new_mode = NULL;
+ }
+
+ am.addr.symconst_ent = ia32_create_const_array(mux_false, mux_true, &new_mode);
+
+ switch (get_mode_size_bytes(new_mode)) {
+ case 4:
+ scale = 2;
+ break;
+ case 8:
+ scale = 3;
+ break;
+ case 10:
+ /* use 2 * 5 */
+ scale = 1;
+ new_node = new_bd_ia32_Lea(dbgi, new_block, new_node, new_node);
+ set_ia32_am_scale(new_node, 2);
+ break;
+ case 12:
+ /* use 4 * 3 */
+ scale = 2;
+ new_node = new_bd_ia32_Lea(dbgi, new_block, new_node, new_node);
+ set_ia32_am_scale(new_node, 1);
+ break;
+ case 16:
+ /* arg, shift 16 NOT supported */
+ scale = 3;
+ new_node = new_bd_ia32_Add(dbgi, new_block, noreg, noreg, nomem, new_node, new_node);
+ break;
+ default:
+ panic("Unsupported constant size");
+ }
+
+ am.ls_mode = new_mode;
+ am.addr.base = noreg;
+ am.addr.index = new_node;
+ am.addr.mem = nomem;
+ am.addr.offset = 0;
+ am.addr.scale = scale;
+ am.addr.use_frame = 0;
+ am.addr.frame_entity = NULL;
+ am.addr.symconst_sign = 0;
+ am.mem_proj = am.addr.mem;
+ am.op_type = ia32_AddrModeS;
+ am.new_op1 = NULL;
+ am.new_op2 = NULL;
+ am.pinned = op_pin_state_floats;
+ am.commutative = 1;
+ am.ins_permuted = 0;
+
+ if (ia32_cg_config.use_sse2)
+ load = new_bd_ia32_xLoad(dbgi, block, am.addr.base, am.addr.index, am.addr.mem, new_mode);
+ else
+ load = new_bd_ia32_vfld(dbgi, block, am.addr.base, am.addr.index, am.addr.mem, new_mode);
+ set_am_attributes(load, &am);
+
+ return new_rd_Proj(NULL, current_ir_graph, block, load, mode_vfp, pn_ia32_res);
+ }
panic("cannot transform floating point Mux");
} else {
- ir_node *flags;
- ir_node *new_node;
-
assert(ia32_mode_needs_gp_reg(mode));
if (is_Proj(cond)) {
ir_node *new_node;
/* fild can use source AM if the operand is a signed 16bit or 32bit integer */
- if (src_mode == mode_Is || src_mode == mode_Hs) {
+ if (possible_int_mode_for_fp(src_mode)) {
ia32_address_mode_t am;
- match_arguments(&am, src_block, NULL, op, NULL,
- match_am | match_try_am | match_16bit | match_16bit_am);
+ match_arguments(&am, src_block, NULL, op, NULL, match_am | match_try_am | match_16bit_am);
if (am.op_type == ia32_AddrModeS) {
ia32_address_t *addr = &am.addr;
/* first convert to 32 bit signed if necessary */
if (get_mode_size_bits(src_mode) < 32) {
- new_op = create_Conv_I2I(dbgi, block, noreg, noreg, nomem, new_op, src_mode);
- SET_IA32_ORIG_NODE(new_op, node);
+ if (!upper_bits_clean(new_op, src_mode)) {
+ new_op = create_Conv_I2I(dbgi, block, noreg, noreg, nomem, new_op, src_mode);
+ SET_IA32_ORIG_NODE(new_op, node);
+ }
mode = mode_Is;
}
#endif
match_arguments(&am, block, NULL, op, NULL,
- match_8bit | match_16bit |
match_am | match_8bit_am | match_16bit_am);
if (upper_bits_clean(am.new_op2, smaller_mode)) {
ir_node *nomem = new_NoMem();
ir_node *res = NULL;
+ assert(!mode_is_int(src_mode) || src_bits <= 32);
+ assert(!mode_is_int(tgt_mode) || tgt_bits <= 32);
+
if (src_mode == mode_b) {
assert(mode_is_int(tgt_mode) || mode_is_reference(tgt_mode));
/* nothing to do, we already model bools as 0/1 ints */
new_op = be_transform_node(op);
/* we convert from float ... */
if (mode_is_float(tgt_mode)) {
+#if 0
+ /* Matze: I'm a bit unsure what the following is for? seems wrong
+ * to me... */
if (src_mode == mode_E && tgt_mode == mode_D
&& !get_Conv_strict(node)) {
DB((dbg, LEVEL_1, "killed Conv(mode, mode) ..."));
return new_op;
}
+#endif
/* ... to float */
if (ia32_cg_config.use_sse2) {
set_ia32_ls_mode(res, tgt_mode);
} else {
if (get_Conv_strict(node)) {
- res = gen_x87_strict_conv(tgt_mode, new_op);
- SET_IA32_ORIG_NODE(get_Proj_pred(res), node);
- return res;
+ /* if fp_no_float_fold is not set then we assume that we
+ * don't have any float operations in a non
+ * mode_float_arithmetic mode and can skip strict upconvs */
+ if (src_bits < tgt_bits
+ && !(get_irg_fp_model(current_ir_graph) & fp_no_float_fold)) {
+ DB((dbg, LEVEL_1, "killed Conv(float, float) ..."));
+ return new_op;
+ } else {
+ res = gen_x87_strict_conv(tgt_mode, new_op);
+ SET_IA32_ORIG_NODE(get_Proj_pred(res), node);
+ return res;
+ }
}
DB((dbg, LEVEL_1, "killed Conv(float, float) ..."));
return new_op;
nomem, new_op);
set_ia32_ls_mode(res, tgt_mode);
} else {
+ unsigned int_mantissa = get_mode_size_bits(src_mode) - (mode_is_signed(src_mode) ? 1 : 0);
+ unsigned float_mantissa = tarval_ieee754_get_mantissa_size(tgt_mode);
res = gen_x87_gp_to_fp(node, src_mode);
- if (get_Conv_strict(node)) {
- /* The strict-Conv is only necessary, if the int mode has more bits
- * than the float mantissa */
- size_t int_mantissa = get_mode_size_bits(src_mode) - (mode_is_signed(src_mode) ? 1 : 0);
- size_t float_mantissa;
- /* FIXME There is no way to get the mantissa size of a mode */
- switch (get_mode_size_bits(tgt_mode)) {
- case 32: float_mantissa = 23 + 1; break; // + 1 for implicit 1
- case 64: float_mantissa = 52 + 1; break;
- case 80:
- case 96: float_mantissa = 64; break;
- default: float_mantissa = 0; break;
- }
- if (float_mantissa < int_mantissa) {
- res = gen_x87_strict_conv(tgt_mode, res);
- SET_IA32_ORIG_NODE(get_Proj_pred(res), node);
- }
+
+ /* we need a strict-Conv, if the int mode has more bits than the
+ * float mantissa */
+ if (float_mantissa < int_mantissa) {
+ res = gen_x87_strict_conv(tgt_mode, res);
+ SET_IA32_ORIG_NODE(get_Proj_pred(res), node);
}
return res;
}
assert(get_irn_mode(op) == mode_P);
- match_arguments(&am, block, NULL, op, NULL,
- match_am | match_8bit_am | match_16bit_am |
- match_immediate | match_8bit | match_16bit);
+ match_arguments(&am, block, NULL, op, NULL, match_am | match_immediate);
new_node = new_bd_ia32_IJmp(dbgi, new_block, addr->base, addr->index,
addr->mem, am.new_op2);
ir_node *new_val_low = be_transform_node(val_low);
ir_node *new_val_high = be_transform_node(val_high);
ir_node *in[2];
- ir_node *sync;
- ir_node *fild;
- ir_node *store_low;
- ir_node *store_high;
+ ir_node *sync, *fild, *res;
+ ir_node *store_low, *store_high;
- if (!mode_is_signed(get_irn_mode(val_high))) {
- panic("unsigned long long -> float not supported yet (%+F)", node);
+ if (ia32_cg_config.use_sse2) {
+ panic("ia32_l_LLtoFloat not implemented for SSE2");
}
/* do a store */
SET_IA32_ORIG_NODE(fild, node);
- return new_r_Proj(irg, block, fild, mode_vfp, pn_ia32_vfild_res);
+ res = new_r_Proj(irg, block, fild, mode_vfp, pn_ia32_vfild_res);
+
+ if (! mode_is_signed(get_irn_mode(val_high))) {
+ ia32_address_mode_t am;
+
+ ir_node *count = create_Immediate(NULL, 0, 31);
+ ir_node *fadd;
+
+ am.addr.base = ia32_new_NoReg_gp(env_cg);
+ am.addr.index = new_bd_ia32_Shr(dbgi, block, new_val_high, count);
+ am.addr.mem = nomem;
+ am.addr.offset = 0;
+ am.addr.scale = 2;
+ am.addr.symconst_ent = ia32_gen_fp_known_const(ia32_ULLBIAS);
+ am.addr.use_frame = 0;
+ am.addr.frame_entity = NULL;
+ am.addr.symconst_sign = 0;
+ am.ls_mode = mode_F;
+ am.mem_proj = nomem;
+ am.op_type = ia32_AddrModeS;
+ am.new_op1 = res;
+ am.new_op2 = ia32_new_NoReg_vfp(env_cg);
+ am.pinned = op_pin_state_floats;
+ am.commutative = 1;
+ am.ins_permuted = 0;
+
+ fadd = new_bd_ia32_vfadd(dbgi, block, am.addr.base, am.addr.index, am.addr.mem,
+ am.new_op1, am.new_op2, get_fpcw());
+ set_am_attributes(fadd, &am);
+
+ set_irn_mode(fadd, mode_T);
+ res = new_rd_Proj(NULL, irg, block, fadd, mode_vfp, pn_ia32_res);
+ }
+ return res;
}
static ir_node *gen_ia32_l_FloattoLL(ir_node *node)
return call;
}
+/**
+ * Transform Builtin return_address
+ */
+static ir_node *gen_return_address(ir_node *node) {
+ ir_node *param = get_Builtin_param(node, 0);
+ ir_node *frame = get_Builtin_param(node, 1);
+ dbg_info *dbgi = get_irn_dbg_info(node);
+ tarval *tv = get_Const_tarval(param);
+ unsigned long value = get_tarval_long(tv);
+
+ ir_node *block = be_transform_node(get_nodes_block(node));
+ ir_node *ptr = be_transform_node(frame);
+ ir_node *noreg = ia32_new_NoReg_gp(env_cg);
+ ir_node *load;
+
+ if (value > 0) {
+ ir_node *cnt = new_bd_ia32_ProduceVal(dbgi, block);
+ ir_node *res = new_bd_ia32_ProduceVal(dbgi, block);
+ ptr = new_bd_ia32_ClimbFrame(dbgi, block, ptr, cnt, res, value);
+ }
+
+ /* load the return address from this frame */
+ load = new_bd_ia32_Load(dbgi, block, ptr, noreg, get_irg_no_mem(current_ir_graph));
+
+ set_irn_pinned(load, get_irn_pinned(node));
+ set_ia32_op_type(load, ia32_AddrModeS);
+ set_ia32_ls_mode(load, mode_Iu);
+
+ set_ia32_am_offs_int(load, 0);
+ set_ia32_use_frame(load);
+ set_ia32_frame_ent(load, ia32_get_return_address_entity());
+
+ if (get_irn_pinned(node) == op_pin_state_floats) {
+ assert(pn_ia32_xLoad_res == pn_ia32_vfld_res
+ && pn_ia32_vfld_res == pn_ia32_Load_res
+ && pn_ia32_Load_res == pn_ia32_res);
+ arch_irn_add_flags(load, arch_irn_flags_rematerializable);
+ }
+
+ SET_IA32_ORIG_NODE(load, node);
+ return new_r_Proj(current_ir_graph, block, load, mode_Iu, pn_ia32_Load_res);
+}
+
+/**
+ * Transform Builtin frame_address
+ */
+static ir_node *gen_frame_address(ir_node *node) {
+ ir_node *param = get_Builtin_param(node, 0);
+ ir_node *frame = get_Builtin_param(node, 1);
+ dbg_info *dbgi = get_irn_dbg_info(node);
+ tarval *tv = get_Const_tarval(param);
+ unsigned long value = get_tarval_long(tv);
+
+ ir_node *block = be_transform_node(get_nodes_block(node));
+ ir_node *ptr = be_transform_node(frame);
+ ir_node *noreg = ia32_new_NoReg_gp(env_cg);
+ ir_node *load;
+ ir_entity *ent;
+
+ if (value > 0) {
+ ir_node *cnt = new_bd_ia32_ProduceVal(dbgi, block);
+ ir_node *res = new_bd_ia32_ProduceVal(dbgi, block);
+ ptr = new_bd_ia32_ClimbFrame(dbgi, block, ptr, cnt, res, value);
+ }
+
+ /* load the return address from this frame */
+ load = new_bd_ia32_Load(dbgi, block, ptr, noreg, get_irg_no_mem(current_ir_graph));
+
+ set_irn_pinned(load, get_irn_pinned(node));
+ set_ia32_op_type(load, ia32_AddrModeS);
+ set_ia32_ls_mode(load, mode_Iu);
+
+ ent = ia32_get_frame_address_entity();
+ if (ent != NULL) {
+ set_ia32_am_offs_int(load, 0);
+ set_ia32_use_frame(load);
+ set_ia32_frame_ent(load, ent);
+ } else {
+ /* will fail anyway, but gcc does this: */
+ set_ia32_am_offs_int(load, 0);
+ }
+
+ if (get_irn_pinned(node) == op_pin_state_floats) {
+ assert(pn_ia32_xLoad_res == pn_ia32_vfld_res
+ && pn_ia32_vfld_res == pn_ia32_Load_res
+ && pn_ia32_Load_res == pn_ia32_res);
+ arch_irn_add_flags(load, arch_irn_flags_rematerializable);
+ }
+
+ SET_IA32_ORIG_NODE(load, node);
+ return new_r_Proj(current_ir_graph, block, load, mode_Iu, pn_ia32_Load_res);
+}
+
+/**
+ * Transform Builtin frame_address
+ */
+static ir_node *gen_prefetch(ir_node *node) {
+ dbg_info *dbgi;
+ ir_node *ptr, *block, *mem, *noreg, *base, *index;
+ ir_node *param, *new_node;
+ long rw, locality;
+ tarval *tv;
+ ia32_address_t addr;
+
+ if (!ia32_cg_config.use_sse_prefetch && !ia32_cg_config.use_3dnow_prefetch) {
+ /* no prefetch at all, route memory */
+ return be_transform_node(get_Builtin_mem(node));
+ }
+
+ param = get_Builtin_param(node, 1);
+ tv = get_Const_tarval(param);
+ rw = get_tarval_long(tv);
+
+ /* construct load address */
+ memset(&addr, 0, sizeof(addr));
+ ptr = get_Builtin_param(node, 0);
+ ia32_create_address_mode(&addr, ptr, 0);
+ base = addr.base;
+ index = addr.index;
+
+ noreg = ia32_new_NoReg_gp(env_cg);
+ if (base == NULL) {
+ base = noreg;
+ } else {
+ base = be_transform_node(base);
+ }
+
+ if (index == NULL) {
+ index = noreg;
+ } else {
+ index = be_transform_node(index);
+ }
+
+ dbgi = get_irn_dbg_info(node);
+ block = be_transform_node(get_nodes_block(node));
+ mem = be_transform_node(get_Builtin_mem(node));
+
+ if (rw == 1 && ia32_cg_config.use_3dnow_prefetch) {
+ /* we have 3DNow!, this was already checked above */
+ new_node = new_bd_ia32_PrefetchW(dbgi, block, base, index, mem);
+ } else if (ia32_cg_config.use_sse_prefetch) {
+ /* note: rw == 1 is IGNORED in that case */
+ param = get_Builtin_param(node, 2);
+ tv = get_Const_tarval(param);
+ locality = get_tarval_long(tv);
+
+ /* SSE style prefetch */
+ switch (locality) {
+ case 0:
+ new_node = new_bd_ia32_PrefetchNTA(dbgi, block, base, index, mem);
+ break;
+ case 1:
+ new_node = new_bd_ia32_Prefetch2(dbgi, block, base, index, mem);
+ break;
+ case 2:
+ new_node = new_bd_ia32_Prefetch1(dbgi, block, base, index, mem);
+ break;
+ default:
+ new_node = new_bd_ia32_Prefetch0(dbgi, block, base, index, mem);
+ break;
+ }
+ } else {
+ assert(ia32_cg_config.use_3dnow_prefetch);
+ /* 3DNow! style prefetch */
+ new_node = new_bd_ia32_Prefetch(dbgi, block, base, index, mem);
+ }
+
+ set_irn_pinned(new_node, get_irn_pinned(node));
+ set_ia32_op_type(new_node, ia32_AddrModeS);
+ set_ia32_ls_mode(new_node, mode_Bu);
+ set_address(new_node, &addr);
+
+ SET_IA32_ORIG_NODE(new_node, node);
+
+ be_dep_on_frame(new_node);
+ return new_r_Proj(current_ir_graph, block, new_node, mode_M, pn_ia32_Prefetch_M);
+}
+
+/**
+ * Transform ...
+ */
+static ir_node *gen_unop_dest(ir_node *node, construct_binop_dest_func *func) {
+ ir_node *param = get_Builtin_param(node, 0);
+ dbg_info *dbgi = get_irn_dbg_info(node);
+
+ ir_node *block = get_nodes_block(node);
+ ir_node *new_block = be_transform_node(block);
+
+ ia32_address_mode_t am;
+ ia32_address_t *addr = &am.addr;
+ ir_node *cnt;
+
+ match_arguments(&am, block, NULL, param, NULL, match_am | match_16bit_am);
+
+ cnt = (*func)(dbgi, new_block, addr->base, addr->index, addr->mem, am.new_op2);
+ set_am_attributes(cnt, &am);
+ set_ia32_ls_mode(cnt, get_irn_mode(param));
+
+ SET_IA32_ORIG_NODE(cnt, node);
+ return fix_mem_proj(cnt, &am);
+}
+
+/**
+ * Transform builtin ffs.
+ */
+static ir_node *gen_ffs(ir_node *node) {
+ ir_node *bsf = gen_unop_dest(node, new_bd_ia32_Bsf);
+ ir_node *real = skip_Proj(bsf);
+ dbg_info *dbgi = get_irn_dbg_info(real);
+ ir_node *block = get_nodes_block(real);
+ ir_node *imm = create_Immediate(NULL, 0, 31);
+ ir_node *noreg = ia32_new_NoReg_gp(env_cg);
+ ir_node *nomem = new_NoMem();
+ ir_node *flag, *set, *conv, *neg, *or;
+
+ /* bsf x */
+ if (get_irn_mode(real) != mode_T) {
+ set_irn_mode(real, mode_T);
+ bsf = new_r_Proj(current_ir_graph, block, real, mode_Iu, pn_ia32_res);
+ }
+
+ flag = new_r_Proj(current_ir_graph, block, real, mode_b, pn_ia32_flags);
+
+ /* sete */
+ set = new_bd_ia32_Set(dbgi, block, flag, pn_Cmp_Eq, 0);
+ SET_IA32_ORIG_NODE(set, node);
+
+ /* conv to 32bit */
+ conv = new_bd_ia32_Conv_I2I8Bit(dbgi, block, noreg, noreg, nomem, set, mode_Bu);
+ SET_IA32_ORIG_NODE(conv, node);
+
+ /* neg */
+ neg = new_bd_ia32_Neg(dbgi, block, conv);
+
+ /* or */
+ or = new_bd_ia32_Or(dbgi, block, noreg, noreg, nomem, bsf, neg);
+ set_ia32_commutative(or);
+
+ /* add 1 */
+ return new_bd_ia32_Add(dbgi, block, noreg, noreg, nomem, or, create_Immediate(NULL, 0, 1));
+}
+
+/**
+ * Transform builtin clz.
+ */
+static ir_node *gen_clz(ir_node *node) {
+ ir_node *bsr = gen_unop_dest(node, new_bd_ia32_Bsr);
+ ir_node *real = skip_Proj(bsr);
+ dbg_info *dbgi = get_irn_dbg_info(real);
+ ir_node *block = get_nodes_block(real);
+ ir_node *imm = create_Immediate(NULL, 0, 31);
+ ir_node *noreg = ia32_new_NoReg_gp(env_cg);
+
+ return new_bd_ia32_Xor(dbgi, block, noreg, noreg, new_NoMem(), bsr, imm);
+}
+
+/**
+ * Transform builtin ctz.
+ */
+static ir_node *gen_ctz(ir_node *node) {
+ return gen_unop_dest(node, new_bd_ia32_Bsf);
+}
+
+/**
+ * Transform builtin parity.
+ */
+static ir_node *gen_parity(ir_node *node) {
+ ir_node *param = get_Builtin_param(node, 0);
+ dbg_info *dbgi = get_irn_dbg_info(node);
+
+ ir_node *block = get_nodes_block(node);
+
+ ir_node *new_block = be_transform_node(block);
+ ir_node *noreg = ia32_new_NoReg_gp(env_cg);
+ ir_node *imm, *cmp, *new_node;
+
+ ia32_address_mode_t am;
+ ia32_address_t *addr = &am.addr;
+
+
+ /* cmp param, 0 */
+ match_arguments(&am, block, NULL, param, NULL, match_am);
+ imm = create_Immediate(NULL, 0, 0);
+ cmp = new_bd_ia32_Cmp(dbgi, new_block, addr->base, addr->index,
+ addr->mem, imm, am.new_op2, am.ins_permuted, 0);
+ set_am_attributes(cmp, &am);
+ set_ia32_ls_mode(cmp, mode_Iu);
+
+ SET_IA32_ORIG_NODE(cmp, node);
+
+ cmp = fix_mem_proj(cmp, &am);
+
+ /* setp */
+ new_node = new_bd_ia32_Set(dbgi, new_block, cmp, ia32_pn_Cmp_parity, 0);
+ SET_IA32_ORIG_NODE(new_node, node);
+
+ /* conv to 32bit */
+ new_node = new_bd_ia32_Conv_I2I8Bit(dbgi, new_block, noreg, noreg,
+ new_NoMem(), new_node, mode_Bu);
+ SET_IA32_ORIG_NODE(new_node, node);
+ return new_node;
+}
+
+/**
+ * Transform builtin popcount
+ */
+static ir_node *gen_popcount(ir_node *node) {
+ ir_node *param = get_Builtin_param(node, 0);
+ dbg_info *dbgi = get_irn_dbg_info(node);
+
+ ir_node *block = get_nodes_block(node);
+ ir_node *new_block = be_transform_node(block);
+
+ ir_node *noreg, *nomem, *new_param;
+ ir_node *imm, *simm, *m1, *s1, *s2, *s3, *s4, *s5, *m2, *m3, *m4, *m5, *m6, *m7, *m8, *m9, *m10, *m11, *m12, *m13;
+
+ /* check for SSE4.2 or SSE4a and use the popcnt instruction */
+ if (ia32_cg_config.use_popcnt) {
+ ia32_address_mode_t am;
+ ia32_address_t *addr = &am.addr;
+ ir_node *cnt;
+
+ match_arguments(&am, block, NULL, param, NULL, match_am | match_16bit_am);
+
+ cnt = new_bd_ia32_Popcnt(dbgi, new_block, addr->base, addr->index, addr->mem, am.new_op2);
+ set_am_attributes(cnt, &am);
+ set_ia32_ls_mode(cnt, get_irn_mode(param));
+
+ SET_IA32_ORIG_NODE(cnt, node);
+ return fix_mem_proj(cnt, &am);
+ }
+
+ noreg = ia32_new_NoReg_gp(env_cg);
+ nomem = new_NoMem();
+ new_param = be_transform_node(param);
+
+ /* do the standard popcount algo */
+
+ /* m1 = x & 0x55555555 */
+ imm = create_Immediate(NULL, 0, 0x55555555);
+ m1 = new_bd_ia32_And(dbgi, new_block, noreg, noreg, nomem, new_param, imm);
+
+ /* s1 = x >> 1 */
+ simm = create_Immediate(NULL, 0, 1);
+ s1 = new_bd_ia32_Shl(dbgi, new_block, new_param, simm);
+
+ /* m2 = s1 & 0x55555555 */
+ m2 = new_bd_ia32_And(dbgi, new_block, noreg, noreg, nomem, s1, imm);
+
+ /* m3 = m1 + m2 */
+ m3 = new_bd_ia32_Lea(dbgi, new_block, m2, m1);
+
+ /* m4 = m3 & 0x33333333 */
+ imm = create_Immediate(NULL, 0, 0x33333333);
+ m4 = new_bd_ia32_And(dbgi, new_block, noreg, noreg, nomem, m3, imm);
+
+ /* s2 = m3 >> 2 */
+ simm = create_Immediate(NULL, 0, 2);
+ s2 = new_bd_ia32_Shl(dbgi, new_block, m3, simm);
+
+ /* m5 = s2 & 0x33333333 */
+ m5 = new_bd_ia32_And(dbgi, new_block, noreg, noreg, nomem, s2, imm);
+
+ /* m6 = m4 + m5 */
+ m6 = new_bd_ia32_Lea(dbgi, new_block, m4, m5);
+
+ /* m7 = m6 & 0x0F0F0F0F */
+ imm = create_Immediate(NULL, 0, 0x0F0F0F0F);
+ m7 = new_bd_ia32_And(dbgi, new_block, noreg, noreg, nomem, m6, imm);
+
+ /* s3 = m6 >> 4 */
+ simm = create_Immediate(NULL, 0, 4);
+ s3 = new_bd_ia32_Shl(dbgi, new_block, m6, simm);
+
+ /* m8 = s3 & 0x0F0F0F0F */
+ m8 = new_bd_ia32_And(dbgi, new_block, noreg, noreg, nomem, s3, imm);
+
+ /* m9 = m7 + m8 */
+ m9 = new_bd_ia32_Lea(dbgi, new_block, m7, m8);
+
+ /* m10 = m9 & 0x00FF00FF */
+ imm = create_Immediate(NULL, 0, 0x00FF00FF);
+ m10 = new_bd_ia32_And(dbgi, new_block, noreg, noreg, nomem, m9, imm);
+
+ /* s4 = m9 >> 8 */
+ simm = create_Immediate(NULL, 0, 8);
+ s4 = new_bd_ia32_Shl(dbgi, new_block, m9, simm);
+
+ /* m11 = s4 & 0x00FF00FF */
+ m11 = new_bd_ia32_And(dbgi, new_block, noreg, noreg, nomem, s4, imm);
+
+ /* m12 = m10 + m11 */
+ m12 = new_bd_ia32_Lea(dbgi, new_block, m10, m11);
+
+ /* m13 = m12 & 0x0000FFFF */
+ imm = create_Immediate(NULL, 0, 0x0000FFFF);
+ m13 = new_bd_ia32_And(dbgi, new_block, noreg, noreg, nomem, m12, imm);
+
+ /* s5 = m12 >> 16 */
+ simm = create_Immediate(NULL, 0, 16);
+ s5 = new_bd_ia32_Shl(dbgi, new_block, m12, simm);
+
+ /* res = m13 + s5 */
+ return new_bd_ia32_Lea(dbgi, new_block, m13, s5);
+}
+
+/**
+ * Transform Builtin node.
+ */
+static ir_node *gen_Builtin(ir_node *node) {
+ ir_builtin_kind kind = get_Builtin_kind(node);
+
+ switch (kind) {
+ case ir_bk_return_address:
+ return gen_return_address(node);
+ case ir_bk_frame_addess:
+ return gen_frame_address(node);
+ case ir_bk_prefetch:
+ return gen_prefetch(node);
+ case ir_bk_ffs:
+ return gen_ffs(node);
+ case ir_bk_clz:
+ return gen_clz(node);
+ case ir_bk_ctz:
+ return gen_ctz(node);
+ case ir_bk_parity:
+ return gen_parity(node);
+ case ir_bk_popcount:
+ return gen_popcount(node);
+ }
+ panic("Builtin %s not implemented in IA32", get_builtin_kind_name(kind));
+}
+
+/**
+ * Transform Proj(Builtin) node.
+ */
+static ir_node *gen_Proj_Builtin(ir_node *proj) {
+ ir_node *node = get_Proj_pred(proj);
+ ir_node *new_node = be_transform_node(node);
+ ir_builtin_kind kind = get_Builtin_kind(node);
+
+ switch (kind) {
+ case ir_bk_return_address:
+ case ir_bk_frame_addess:
+ case ir_bk_ffs:
+ case ir_bk_clz:
+ case ir_bk_ctz:
+ case ir_bk_parity:
+ case ir_bk_popcount:
+ assert(get_Proj_proj(proj) == pn_Builtin_1_result);
+ return new_node;
+ case ir_bk_prefetch:
+ assert(get_Proj_proj(proj) == pn_Builtin_M);
+ return new_node;
+ }
+ panic("Builtin %s not implemented in IA32", get_builtin_kind_name(kind));
+}
+
static ir_node *gen_be_IncSP(ir_node *node)
{
ir_node *res = be_duplicate_node(node);
return gen_Proj_Load(node);
case iro_ASM:
return gen_Proj_ASM(node);
+ case iro_Builtin:
+ return gen_Proj_Builtin(node);
case iro_Div:
case iro_Mod:
case iro_DivMod:
*/
static void register_transformers(void)
{
- ir_op *op_Mulh;
-
/* first clear the generic function pointer for all ops */
clear_irp_opcodes_generic_func();
GEN(Add);
GEN(Sub);
GEN(Mul);
+ GEN(Mulh);
GEN(And);
GEN(Or);
GEN(Eor);
BAD(EndReg);
BAD(EndExcept);
+ /* handle builtins */
+ GEN(Builtin);
+
/* handle generic backend nodes */
GEN(be_FrameAddr);
GEN(be_Call);
GEN(be_SubSP);
GEN(be_Copy);
- op_Mulh = get_op_Mulh();
- if (op_Mulh)
- GEN(Mulh);
-
#undef GEN
#undef BAD
}