#define DFP_INTMAX "9223372036854775807"
#define ULL_BIAS "18446744073709551616"
-#define TP_SFP_SIGN "ia32_sfp_sign"
-#define TP_DFP_SIGN "ia32_dfp_sign"
-#define TP_SFP_ABS "ia32_sfp_abs"
-#define TP_DFP_ABS "ia32_dfp_abs"
-#define TP_ULL_BIAS "ia32_ull_bias"
-
#define ENT_SFP_SIGN ".LC_ia32_sfp_sign"
#define ENT_DFP_SIGN ".LC_ia32_dfp_sign"
#define ENT_SFP_ABS ".LC_ia32_sfp_abs"
static ir_node *initial_fpcw = NULL;
-extern ir_op *get_op_Mulh(void);
-
typedef ir_node *construct_binop_func(dbg_info *db, ir_node *block,
ir_node *base, ir_node *index, ir_node *mem, ir_node *op1,
ir_node *op2);
res = load;
set_ia32_ls_mode(load, mode);
} else {
+ ir_mode *ls_mode;
+
floatent = create_float_const_entity(node);
+ /* create_float_const_ent is smart and sometimes creates
+ smaller entities */
+ ls_mode = get_type_mode(get_entity_type(floatent));
- load = new_bd_ia32_vfld(dbgi, block, noreg, noreg, nomem, mode);
+ load = new_bd_ia32_vfld(dbgi, block, noreg, noreg, nomem,
+ ls_mode);
set_ia32_op_type(load, ia32_AddrModeS);
set_ia32_am_sc(load, floatent);
arch_irn_add_flags(load, arch_irn_flags_rematerializable);
res = new_r_Proj(current_ir_graph, block, load, mode_vfp, pn_ia32_vfld_res);
- /* take the mode from the entity */
- set_ia32_ls_mode(load, get_type_mode(get_entity_type(floatent)));
}
}
end:
return cnst;
}
+/**
+ * Create a float type for the given mode and cache it.
+ *
+ * @param mode the mode for the float type (might be integer mode for SSE2 types)
+ * @param align alignment
+ */
+static ir_type *ia32_create_float_type(ir_mode *mode, unsigned align) {
+ char buf[32];
+ ir_type *tp;
+
+ assert(align <= 16);
+
+ if (mode == mode_Iu) {
+ static ir_type *int_Iu[16] = {NULL, };
+
+ if (int_Iu[align] == NULL) {
+ snprintf(buf, sizeof(buf), "int_Iu_%u", align);
+ int_Iu[align] = tp = new_type_primitive(new_id_from_str(buf), mode);
+ /* set the specified alignment */
+ set_type_alignment_bytes(tp, align);
+ }
+ return int_Iu[align];
+ } else if (mode == mode_Lu) {
+ static ir_type *int_Lu[16] = {NULL, };
+
+ if (int_Lu[align] == NULL) {
+ snprintf(buf, sizeof(buf), "int_Lu_%u", align);
+ int_Lu[align] = tp = new_type_primitive(new_id_from_str(buf), mode);
+ /* set the specified alignment */
+ set_type_alignment_bytes(tp, align);
+ }
+ return int_Lu[align];
+ } else if (mode == mode_F) {
+ static ir_type *float_F[16] = {NULL, };
+
+ if (float_F[align] == NULL) {
+ snprintf(buf, sizeof(buf), "float_F_%u", align);
+ float_F[align] = tp = new_type_primitive(new_id_from_str(buf), mode);
+ /* set the specified alignment */
+ set_type_alignment_bytes(tp, align);
+ }
+ return float_F[align];
+ } else if (mode == mode_D) {
+ static ir_type *float_D[16] = {NULL, };
+
+ if (float_D[align] == NULL) {
+ snprintf(buf, sizeof(buf), "float_D_%u", align);
+ float_D[align] = tp = new_type_primitive(new_id_from_str(buf), mode);
+ /* set the specified alignment */
+ set_type_alignment_bytes(tp, align);
+ }
+ return float_D[align];
+ } else {
+ static ir_type *float_E[16] = {NULL, };
+
+ if (float_E[align] == NULL) {
+ snprintf(buf, sizeof(buf), "float_E_%u", align);
+ float_E[align] = tp = new_type_primitive(new_id_from_str(buf), mode);
+ /* set the specified alignment */
+ set_type_alignment_bytes(tp, align);
+ }
+ return float_E[align];
+ }
+}
+
+/**
+ * Create a float[2] array type for the given atomic type.
+ *
+ * @param tp the atomic type
+ */
+static ir_type *ia32_create_float_array(ir_type *tp) {
+ char buf[32];
+ ir_mode *mode = get_type_mode(tp);
+ unsigned align = get_type_alignment_bytes(tp);
+ ir_type *arr;
+
+ assert(align <= 16);
+
+ if (mode == mode_F) {
+ static ir_type *float_F[16] = {NULL, };
+
+ if (float_F[align] != NULL)
+ return float_F[align];
+ snprintf(buf, sizeof(buf), "arr_float_F_%u", align);
+ arr = float_F[align] = new_type_array(new_id_from_str(buf), 1, tp);
+ } else if (mode == mode_D) {
+ static ir_type *float_D[16] = {NULL, };
+
+ if (float_D[align] != NULL)
+ return float_D[align];
+ snprintf(buf, sizeof(buf), "arr_float_D_%u", align);
+ arr = float_D[align] = new_type_array(new_id_from_str(buf), 1, tp);
+ } else {
+ static ir_type *float_E[16] = {NULL, };
+
+ if (float_E[align] != NULL)
+ return float_E[align];
+ snprintf(buf, sizeof(buf), "arr_float_E_%u", align);
+ arr = float_E[align] = new_type_array(new_id_from_str(buf), 1, tp);
+ }
+ set_type_alignment_bytes(arr, align);
+ set_type_size_bytes(arr, 2 * get_type_size_bytes(tp));
+ set_type_state(arr, layout_fixed);
+ return arr;
+}
+
/* Generates an entity for a known FP const (used for FP Neg + Abs) */
ir_entity *ia32_gen_fp_known_const(ia32_known_const_t kct)
{
static const struct {
- const char *tp_name;
const char *ent_name;
const char *cnst_str;
char mode;
- char align;
+ unsigned char align;
} names [ia32_known_const_max] = {
- { TP_SFP_SIGN, ENT_SFP_SIGN, SFP_SIGN, 0, 16 }, /* ia32_SSIGN */
- { TP_DFP_SIGN, ENT_DFP_SIGN, DFP_SIGN, 1, 16 }, /* ia32_DSIGN */
- { TP_SFP_ABS, ENT_SFP_ABS, SFP_ABS, 0, 16 }, /* ia32_SABS */
- { TP_DFP_ABS, ENT_DFP_ABS, DFP_ABS, 1, 16 }, /* ia32_DABS */
- { TP_ULL_BIAS, ENT_ULL_BIAS, ULL_BIAS, 2, 4 } /* ia32_ULLBIAS */
+ { ENT_SFP_SIGN, SFP_SIGN, 0, 16 }, /* ia32_SSIGN */
+ { ENT_DFP_SIGN, DFP_SIGN, 1, 16 }, /* ia32_DSIGN */
+ { ENT_SFP_ABS, SFP_ABS, 0, 16 }, /* ia32_SABS */
+ { ENT_DFP_ABS, DFP_ABS, 1, 16 }, /* ia32_DABS */
+ { ENT_ULL_BIAS, ULL_BIAS, 2, 4 } /* ia32_ULLBIAS */
};
static ir_entity *ent_cache[ia32_known_const_max];
- const char *tp_name, *ent_name, *cnst_str;
+ const char *ent_name, *cnst_str;
ir_type *tp;
ir_entity *ent;
tarval *tv;
ent_name = names[kct].ent_name;
if (! ent_cache[kct]) {
- tp_name = names[kct].tp_name;
cnst_str = names[kct].cnst_str;
switch (names[kct].mode) {
case 0: mode = mode_Iu; break;
case 1: mode = mode_Lu; break;
- default: mode = mode_F; break;
+ default: mode = mode_F; break;
}
tv = new_tarval_from_str(cnst_str, strlen(cnst_str), mode);
- tp = new_type_primitive(new_id_from_str(tp_name), mode);
- /* set the specified alignment */
- set_type_alignment_bytes(tp, names[kct].align);
+ tp = ia32_create_float_type(mode, names[kct].align);
- if (kct == ia32_ULLBIAS) {
- /* we are in the backend, construct a fixed type here */
- unsigned size = get_type_size_bytes(tp);
- tp = new_type_array(new_id_from_str(tp_name), 1, tp);
- set_type_alignment_bytes(tp, names[kct].align);
- set_type_size_bytes(tp, 2 * size);
- set_type_state(tp, layout_fixed);
- }
+ if (kct == ia32_ULLBIAS)
+ tp = ia32_create_float_array(tp);
ent = new_entity(get_glob_type(), new_id_from_str(ent_name), tp);
set_entity_ld_ident(ent, get_entity_ident(ent));
assert(use_am || !(flags & match_16bit_am));
if ((mode_bits == 8 && !(flags & match_8bit_am)) ||
- (mode_bits == 16 && !(flags & match_16bit_am))) {
+ (mode_bits == 16 && !(flags & match_16bit_am))) {
use_am = 0;
}
(void)orig;
if (ia32_cg_config.use_short_sex_eax) {
- const arch_register_class_t *reg_class = &ia32_reg_classes[CLASS_ia32_gp];
- ir_node *in[2];
-
- res = new_bd_ia32_Cltd(dbgi, block, val);
- in[0] = res;
- in[1] = val;
- be_new_Keep(reg_class, current_ir_graph, block, 2, in);
+ ir_node *pval = new_bd_ia32_ProduceVal(dbgi, block);
+ be_dep_on_frame(pval);
+ res = new_bd_ia32_Cltd(dbgi, block, val, pval);
} else {
ir_node *imm31 = create_Immediate(NULL, 0, 31);
res = new_bd_ia32_Sar(dbgi, block, val, imm31);
ir_node *block = be_transform_node(get_nodes_block(node));
ir_node *sel = get_Cond_selector(node);
ir_node *new_sel = be_transform_node(sel);
- int switch_min = INT_MAX;
- int switch_max = INT_MIN;
+ long switch_min = LONG_MAX;
+ long switch_max = LONG_MIN;
long default_pn = get_Cond_defaultProj(node);
ir_node *new_node;
const ir_edge_t *edge;
switch_max = pn;
}
- if ((unsigned) (switch_max - switch_min) > 256000) {
+ if ((unsigned long) (switch_max - switch_min) > 256000) {
panic("Size of switch %+F bigger than 256000", node);
}
int ins_permuted)
{
ir_node *noreg = ia32_new_NoReg_gp(env_cg);
- ir_node *nomem = new_NoMem();
ir_mode *mode = get_irn_mode(orig_node);
ir_node *new_node;
/* we might need to conv the result up */
if (get_mode_size_bits(mode) > 8) {
+ ir_node *nomem = new_NoMem();
new_node = new_bd_ia32_Conv_I2I8Bit(dbgi, new_block, noreg, noreg,
nomem, new_node, mode_Bu);
SET_IA32_ORIG_NODE(new_node, orig_node);
{
ir_graph *irg = current_ir_graph;
ir_mode *mode = get_irn_mode(psi);
- ir_node *new_node, *sub, *sbb, *eflags, *block, *noreg, *nomem;
+ ir_node *nomem = new_NoMem();
+ ir_node *new_node, *sub, *sbb, *eflags, *block, *noreg;
+
dbg_info *dbgi;
new_node = gen_binop(psi, a, b, new_bd_ia32_Sub,
return new_node;
}
+/**
+ * Create an const array of two float consts.
+ *
+ * @param c0 the first constant
+ * @param c1 the second constant
+ * @param new_mode IN/OUT for the mode of the constants, if NULL
+ * smallest possible mode will be used
+ */
+static ir_entity *ia32_create_const_array(ir_node *c0, ir_node *c1, ir_mode **new_mode) {
+ ir_entity *ent;
+ ir_mode *mode = *new_mode;
+ ir_type *tp;
+ ir_initializer_t *initializer;
+ tarval *tv0 = get_Const_tarval(c0);
+ tarval *tv1 = get_Const_tarval(c1);
+
+ if (mode == NULL) {
+ /* detect the best mode for the constants */
+ mode = get_tarval_mode(tv0);
+
+ if (mode != mode_F) {
+ if (tarval_ieee754_can_conv_lossless(tv0, mode_F) &&
+ tarval_ieee754_can_conv_lossless(tv1, mode_F)) {
+ mode = mode_F;
+ tv0 = tarval_convert_to(tv0, mode);
+ tv1 = tarval_convert_to(tv1, mode);
+ } else if (mode != mode_D) {
+ if (tarval_ieee754_can_conv_lossless(tv0, mode_D) &&
+ tarval_ieee754_can_conv_lossless(tv1, mode_D)) {
+ mode = mode_D;
+ tv0 = tarval_convert_to(tv0, mode);
+ tv1 = tarval_convert_to(tv1, mode);
+ }
+ }
+ }
+
+ }
+
+ tp = ia32_create_float_type(mode, 4);
+ tp = ia32_create_float_array(tp);
+
+ ent = new_entity(get_glob_type(), ia32_unique_id(".LC%u"), tp);
+
+ set_entity_ld_ident(ent, get_entity_ident(ent));
+ set_entity_visibility(ent, visibility_local);
+ set_entity_variability(ent, variability_constant);
+ set_entity_allocation(ent, allocation_static);
+
+ initializer = create_initializer_compound(2);
+
+ set_initializer_compound_value(initializer, 0, create_initializer_tarval(tv0));
+ set_initializer_compound_value(initializer, 1, create_initializer_tarval(tv1));
+
+ set_entity_initializer(ent, initializer);
+
+ *new_mode = mode;
+ return ent;
+}
+
/**
* Transforms a Mux node into CMov.
*
ir_node *mux_false = get_Mux_false(node);
ir_node *cond = get_Mux_sel(node);
ir_mode *mode = get_irn_mode(node);
+ ir_node *flags;
+ ir_node *new_node;
pn_Cmp pnc;
assert(get_irn_mode(cond) == mode_b);
}
}
}
+ if (is_Const(mux_true) && is_Const(mux_false)) {
+ ia32_address_mode_t am;
+ ir_node *noreg = ia32_new_NoReg_gp(env_cg);
+ ir_node *nomem = new_NoMem();
+ ir_node *load;
+ ir_mode *new_mode;
+ unsigned scale;
+
+ flags = get_flags_node(cond, &pnc);
+ new_node = create_set_32bit(dbgi, new_block, flags, pnc, node, /*is_premuted=*/0);
+
+ if (ia32_cg_config.use_sse2) {
+ /* cannot load from different mode on SSE */
+ new_mode = mode;
+ } else {
+ /* x87 can load any mode */
+ new_mode = NULL;
+ }
+
+ am.addr.symconst_ent = ia32_create_const_array(mux_false, mux_true, &new_mode);
+
+ switch (get_mode_size_bytes(new_mode)) {
+ case 4:
+ scale = 2;
+ break;
+ case 8:
+ scale = 3;
+ break;
+ case 10:
+ /* use 2 * 5 */
+ scale = 1;
+ new_node = new_bd_ia32_Lea(dbgi, new_block, new_node, new_node);
+ set_ia32_am_scale(new_node, 2);
+ break;
+ case 12:
+ /* use 4 * 3 */
+ scale = 2;
+ new_node = new_bd_ia32_Lea(dbgi, new_block, new_node, new_node);
+ set_ia32_am_scale(new_node, 1);
+ break;
+ case 16:
+ /* arg, shift 16 NOT supported */
+ scale = 3;
+ new_node = new_bd_ia32_Add(dbgi, new_block, noreg, noreg, nomem, new_node, new_node);
+ break;
+ default:
+ panic("Unsupported constant size");
+ }
+
+ am.ls_mode = new_mode;
+ am.addr.base = noreg;
+ am.addr.index = new_node;
+ am.addr.mem = nomem;
+ am.addr.offset = 0;
+ am.addr.scale = scale;
+ am.addr.use_frame = 0;
+ am.addr.frame_entity = NULL;
+ am.addr.symconst_sign = 0;
+ am.mem_proj = am.addr.mem;
+ am.op_type = ia32_AddrModeS;
+ am.new_op1 = NULL;
+ am.new_op2 = NULL;
+ am.pinned = op_pin_state_floats;
+ am.commutative = 1;
+ am.ins_permuted = 0;
+
+ if (ia32_cg_config.use_sse2)
+ load = new_bd_ia32_xLoad(dbgi, block, am.addr.base, am.addr.index, am.addr.mem, new_mode);
+ else
+ load = new_bd_ia32_vfld(dbgi, block, am.addr.base, am.addr.index, am.addr.mem, new_mode);
+ set_am_attributes(load, &am);
+
+ return new_rd_Proj(NULL, current_ir_graph, block, load, mode_vfp, pn_ia32_res);
+ }
panic("cannot transform floating point Mux");
} else {
- ir_node *flags;
- ir_node *new_node;
-
assert(ia32_mode_needs_gp_reg(mode));
if (is_Proj(cond)) {
new_op = be_transform_node(op);
/* we convert from float ... */
if (mode_is_float(tgt_mode)) {
+#if 0
+ /* Matze: I'm a bit unsure what the following is for? seems wrong
+ * to me... */
if (src_mode == mode_E && tgt_mode == mode_D
&& !get_Conv_strict(node)) {
DB((dbg, LEVEL_1, "killed Conv(mode, mode) ..."));
return new_op;
}
+#endif
/* ... to float */
if (ia32_cg_config.use_sse2) {
set_ia32_ls_mode(res, tgt_mode);
} else {
if (get_Conv_strict(node)) {
- res = gen_x87_strict_conv(tgt_mode, new_op);
- SET_IA32_ORIG_NODE(get_Proj_pred(res), node);
- return res;
+ /* if fp_no_float_fold is not set then we assume that we
+ * don't have any float operations in a non
+ * mode_float_arithmetic mode and can skip strict upconvs */
+ if (src_bits < tgt_bits
+ && !(get_irg_fp_model(current_ir_graph) & fp_no_float_fold)) {
+ DB((dbg, LEVEL_1, "killed Conv(float, float) ..."));
+ return new_op;
+ } else {
+ res = gen_x87_strict_conv(tgt_mode, new_op);
+ SET_IA32_ORIG_NODE(get_Proj_pred(res), node);
+ return res;
+ }
}
DB((dbg, LEVEL_1, "killed Conv(float, float) ..."));
return new_op;
nomem, new_op);
set_ia32_ls_mode(res, tgt_mode);
} else {
+ unsigned int_mantissa = get_mode_size_bits(src_mode) - (mode_is_signed(src_mode) ? 1 : 0);
+ unsigned float_mantissa = tarval_ieee754_get_mantissa_size(tgt_mode);
res = gen_x87_gp_to_fp(node, src_mode);
- if (get_Conv_strict(node)) {
- /* The strict-Conv is only necessary, if the int mode has more bits
- * than the float mantissa */
- size_t int_mantissa = get_mode_size_bits(src_mode) - (mode_is_signed(src_mode) ? 1 : 0);
- size_t float_mantissa;
- /* FIXME There is no way to get the mantissa size of a mode */
- switch (get_mode_size_bits(tgt_mode)) {
- case 32: float_mantissa = 23 + 1; break; // + 1 for implicit 1
- case 64: float_mantissa = 52 + 1; break;
- case 80:
- case 96: float_mantissa = 64; break;
- default: float_mantissa = 0; break;
- }
- if (float_mantissa < int_mantissa) {
- res = gen_x87_strict_conv(tgt_mode, res);
- SET_IA32_ORIG_NODE(get_Proj_pred(res), node);
- }
+
+ /* we need a strict-Conv, if the int mode has more bits than the
+ * float mantissa */
+ if (float_mantissa < int_mantissa) {
+ res = gen_x87_strict_conv(tgt_mode, res);
+ SET_IA32_ORIG_NODE(get_Proj_pred(res), node);
}
return res;
}
return call;
}
+/**
+ * Transform Builtin trap
+ */
+static ir_node *gen_trap(ir_node *node) {
+ dbg_info *dbgi = get_irn_dbg_info(node);
+ ir_node *block = be_transform_node(get_nodes_block(node));
+ ir_node *mem = be_transform_node(get_Builtin_mem(node));
+
+ return new_bd_ia32_UD2(dbgi, block, mem);
+}
+
+/**
+ * Transform Builtin debugbreak
+ */
+static ir_node *gen_debugbreak(ir_node *node) {
+ dbg_info *dbgi = get_irn_dbg_info(node);
+ ir_node *block = be_transform_node(get_nodes_block(node));
+ ir_node *mem = be_transform_node(get_Builtin_mem(node));
+
+ return new_bd_ia32_Breakpoint(dbgi, block, mem);
+}
+
+/**
+ * Transform Builtin return_address
+ */
+static ir_node *gen_return_address(ir_node *node) {
+ ir_node *param = get_Builtin_param(node, 0);
+ ir_node *frame = get_Builtin_param(node, 1);
+ dbg_info *dbgi = get_irn_dbg_info(node);
+ tarval *tv = get_Const_tarval(param);
+ unsigned long value = get_tarval_long(tv);
+
+ ir_node *block = be_transform_node(get_nodes_block(node));
+ ir_node *ptr = be_transform_node(frame);
+ ir_node *noreg = ia32_new_NoReg_gp(env_cg);
+ ir_node *load;
+
+ if (value > 0) {
+ ir_node *cnt = new_bd_ia32_ProduceVal(dbgi, block);
+ ir_node *res = new_bd_ia32_ProduceVal(dbgi, block);
+ ptr = new_bd_ia32_ClimbFrame(dbgi, block, ptr, cnt, res, value);
+ }
+
+ /* load the return address from this frame */
+ load = new_bd_ia32_Load(dbgi, block, ptr, noreg, get_irg_no_mem(current_ir_graph));
+
+ set_irn_pinned(load, get_irn_pinned(node));
+ set_ia32_op_type(load, ia32_AddrModeS);
+ set_ia32_ls_mode(load, mode_Iu);
+
+ set_ia32_am_offs_int(load, 0);
+ set_ia32_use_frame(load);
+ set_ia32_frame_ent(load, ia32_get_return_address_entity());
+
+ if (get_irn_pinned(node) == op_pin_state_floats) {
+ assert(pn_ia32_xLoad_res == pn_ia32_vfld_res
+ && pn_ia32_vfld_res == pn_ia32_Load_res
+ && pn_ia32_Load_res == pn_ia32_res);
+ arch_irn_add_flags(load, arch_irn_flags_rematerializable);
+ }
+
+ SET_IA32_ORIG_NODE(load, node);
+ return new_r_Proj(current_ir_graph, block, load, mode_Iu, pn_ia32_Load_res);
+}
+
+/**
+ * Transform Builtin frame_address
+ */
+static ir_node *gen_frame_address(ir_node *node) {
+ ir_node *param = get_Builtin_param(node, 0);
+ ir_node *frame = get_Builtin_param(node, 1);
+ dbg_info *dbgi = get_irn_dbg_info(node);
+ tarval *tv = get_Const_tarval(param);
+ unsigned long value = get_tarval_long(tv);
+
+ ir_node *block = be_transform_node(get_nodes_block(node));
+ ir_node *ptr = be_transform_node(frame);
+ ir_node *noreg = ia32_new_NoReg_gp(env_cg);
+ ir_node *load;
+ ir_entity *ent;
+
+ if (value > 0) {
+ ir_node *cnt = new_bd_ia32_ProduceVal(dbgi, block);
+ ir_node *res = new_bd_ia32_ProduceVal(dbgi, block);
+ ptr = new_bd_ia32_ClimbFrame(dbgi, block, ptr, cnt, res, value);
+ }
+
+ /* load the frame address from this frame */
+ load = new_bd_ia32_Load(dbgi, block, ptr, noreg, get_irg_no_mem(current_ir_graph));
+
+ set_irn_pinned(load, get_irn_pinned(node));
+ set_ia32_op_type(load, ia32_AddrModeS);
+ set_ia32_ls_mode(load, mode_Iu);
+
+ ent = ia32_get_frame_address_entity();
+ if (ent != NULL) {
+ set_ia32_am_offs_int(load, 0);
+ set_ia32_use_frame(load);
+ set_ia32_frame_ent(load, ent);
+ } else {
+ /* will fail anyway, but gcc does this: */
+ set_ia32_am_offs_int(load, 0);
+ }
+
+ if (get_irn_pinned(node) == op_pin_state_floats) {
+ assert(pn_ia32_xLoad_res == pn_ia32_vfld_res
+ && pn_ia32_vfld_res == pn_ia32_Load_res
+ && pn_ia32_Load_res == pn_ia32_res);
+ arch_irn_add_flags(load, arch_irn_flags_rematerializable);
+ }
+
+ SET_IA32_ORIG_NODE(load, node);
+ return new_r_Proj(current_ir_graph, block, load, mode_Iu, pn_ia32_Load_res);
+}
+
+/**
+ * Transform Builtin frame_address
+ */
+static ir_node *gen_prefetch(ir_node *node) {
+ dbg_info *dbgi;
+ ir_node *ptr, *block, *mem, *noreg, *base, *index;
+ ir_node *param, *new_node;
+ long rw, locality;
+ tarval *tv;
+ ia32_address_t addr;
+
+ if (!ia32_cg_config.use_sse_prefetch && !ia32_cg_config.use_3dnow_prefetch) {
+ /* no prefetch at all, route memory */
+ return be_transform_node(get_Builtin_mem(node));
+ }
+
+ param = get_Builtin_param(node, 1);
+ tv = get_Const_tarval(param);
+ rw = get_tarval_long(tv);
+
+ /* construct load address */
+ memset(&addr, 0, sizeof(addr));
+ ptr = get_Builtin_param(node, 0);
+ ia32_create_address_mode(&addr, ptr, 0);
+ base = addr.base;
+ index = addr.index;
+
+ noreg = ia32_new_NoReg_gp(env_cg);
+ if (base == NULL) {
+ base = noreg;
+ } else {
+ base = be_transform_node(base);
+ }
+
+ if (index == NULL) {
+ index = noreg;
+ } else {
+ index = be_transform_node(index);
+ }
+
+ dbgi = get_irn_dbg_info(node);
+ block = be_transform_node(get_nodes_block(node));
+ mem = be_transform_node(get_Builtin_mem(node));
+
+ if (rw == 1 && ia32_cg_config.use_3dnow_prefetch) {
+ /* we have 3DNow!, this was already checked above */
+ new_node = new_bd_ia32_PrefetchW(dbgi, block, base, index, mem);
+ } else if (ia32_cg_config.use_sse_prefetch) {
+ /* note: rw == 1 is IGNORED in that case */
+ param = get_Builtin_param(node, 2);
+ tv = get_Const_tarval(param);
+ locality = get_tarval_long(tv);
+
+ /* SSE style prefetch */
+ switch (locality) {
+ case 0:
+ new_node = new_bd_ia32_PrefetchNTA(dbgi, block, base, index, mem);
+ break;
+ case 1:
+ new_node = new_bd_ia32_Prefetch2(dbgi, block, base, index, mem);
+ break;
+ case 2:
+ new_node = new_bd_ia32_Prefetch1(dbgi, block, base, index, mem);
+ break;
+ default:
+ new_node = new_bd_ia32_Prefetch0(dbgi, block, base, index, mem);
+ break;
+ }
+ } else {
+ assert(ia32_cg_config.use_3dnow_prefetch);
+ /* 3DNow! style prefetch */
+ new_node = new_bd_ia32_Prefetch(dbgi, block, base, index, mem);
+ }
+
+ set_irn_pinned(new_node, get_irn_pinned(node));
+ set_ia32_op_type(new_node, ia32_AddrModeS);
+ set_ia32_ls_mode(new_node, mode_Bu);
+ set_address(new_node, &addr);
+
+ SET_IA32_ORIG_NODE(new_node, node);
+
+ be_dep_on_frame(new_node);
+ return new_r_Proj(current_ir_graph, block, new_node, mode_M, pn_ia32_Prefetch_M);
+}
+
+/**
+ * Transform bsf like node
+ */
+static ir_node *gen_unop_AM(ir_node *node, construct_binop_dest_func *func)
+{
+ ir_node *param = get_Builtin_param(node, 0);
+ dbg_info *dbgi = get_irn_dbg_info(node);
+
+ ir_node *block = get_nodes_block(node);
+ ir_node *new_block = be_transform_node(block);
+
+ ia32_address_mode_t am;
+ ia32_address_t *addr = &am.addr;
+ ir_node *cnt;
+
+ match_arguments(&am, block, NULL, param, NULL, match_am);
+
+ cnt = func(dbgi, new_block, addr->base, addr->index, addr->mem, am.new_op2);
+ set_am_attributes(cnt, &am);
+ set_ia32_ls_mode(cnt, get_irn_mode(param));
+
+ SET_IA32_ORIG_NODE(cnt, node);
+ return fix_mem_proj(cnt, &am);
+}
+
+/**
+ * Transform builtin ffs.
+ */
+static ir_node *gen_ffs(ir_node *node)
+{
+ ir_node *bsf = gen_unop_AM(node, new_bd_ia32_Bsf);
+ ir_node *real = skip_Proj(bsf);
+ dbg_info *dbgi = get_irn_dbg_info(real);
+ ir_node *block = get_nodes_block(real);
+ ir_node *noreg = ia32_new_NoReg_gp(env_cg);
+ ir_node *nomem = new_NoMem();
+ ir_node *flag, *set, *conv, *neg, *or;
+
+ /* bsf x */
+ if (get_irn_mode(real) != mode_T) {
+ set_irn_mode(real, mode_T);
+ bsf = new_r_Proj(current_ir_graph, block, real, mode_Iu, pn_ia32_res);
+ }
+
+ flag = new_r_Proj(current_ir_graph, block, real, mode_b, pn_ia32_flags);
+
+ /* sete */
+ set = new_bd_ia32_Set(dbgi, block, flag, pn_Cmp_Eq, 0);
+ SET_IA32_ORIG_NODE(set, node);
+
+ /* conv to 32bit */
+ conv = new_bd_ia32_Conv_I2I8Bit(dbgi, block, noreg, noreg, nomem, set, mode_Bu);
+ SET_IA32_ORIG_NODE(conv, node);
+
+ /* neg */
+ neg = new_bd_ia32_Neg(dbgi, block, conv);
+
+ /* or */
+ or = new_bd_ia32_Or(dbgi, block, noreg, noreg, nomem, bsf, neg);
+ set_ia32_commutative(or);
+
+ /* add 1 */
+ return new_bd_ia32_Add(dbgi, block, noreg, noreg, nomem, or, create_Immediate(NULL, 0, 1));
+}
+
+/**
+ * Transform builtin clz.
+ */
+static ir_node *gen_clz(ir_node *node)
+{
+ ir_node *bsr = gen_unop_AM(node, new_bd_ia32_Bsr);
+ ir_node *real = skip_Proj(bsr);
+ dbg_info *dbgi = get_irn_dbg_info(real);
+ ir_node *block = get_nodes_block(real);
+ ir_node *imm = create_Immediate(NULL, 0, 31);
+ ir_node *noreg = ia32_new_NoReg_gp(env_cg);
+
+ return new_bd_ia32_Xor(dbgi, block, noreg, noreg, new_NoMem(), bsr, imm);
+}
+
+/**
+ * Transform builtin ctz.
+ */
+static ir_node *gen_ctz(ir_node *node)
+{
+ return gen_unop_AM(node, new_bd_ia32_Bsf);
+}
+
+/**
+ * Transform builtin parity.
+ */
+static ir_node *gen_parity(ir_node *node)
+{
+ ir_node *param = get_Builtin_param(node, 0);
+ dbg_info *dbgi = get_irn_dbg_info(node);
+
+ ir_node *block = get_nodes_block(node);
+
+ ir_node *new_block = be_transform_node(block);
+ ir_node *noreg = ia32_new_NoReg_gp(env_cg);
+ ir_node *imm, *cmp, *new_node;
+
+ ia32_address_mode_t am;
+ ia32_address_t *addr = &am.addr;
+
+
+ /* cmp param, 0 */
+ match_arguments(&am, block, NULL, param, NULL, match_am);
+ imm = create_Immediate(NULL, 0, 0);
+ cmp = new_bd_ia32_Cmp(dbgi, new_block, addr->base, addr->index,
+ addr->mem, imm, am.new_op2, am.ins_permuted, 0);
+ set_am_attributes(cmp, &am);
+ set_ia32_ls_mode(cmp, mode_Iu);
+
+ SET_IA32_ORIG_NODE(cmp, node);
+
+ cmp = fix_mem_proj(cmp, &am);
+
+ /* setp */
+ new_node = new_bd_ia32_Set(dbgi, new_block, cmp, ia32_pn_Cmp_parity, 0);
+ SET_IA32_ORIG_NODE(new_node, node);
+
+ /* conv to 32bit */
+ new_node = new_bd_ia32_Conv_I2I8Bit(dbgi, new_block, noreg, noreg,
+ new_NoMem(), new_node, mode_Bu);
+ SET_IA32_ORIG_NODE(new_node, node);
+ return new_node;
+}
+
+/**
+ * Transform builtin popcount
+ */
+static ir_node *gen_popcount(ir_node *node) {
+ ir_node *param = get_Builtin_param(node, 0);
+ dbg_info *dbgi = get_irn_dbg_info(node);
+
+ ir_node *block = get_nodes_block(node);
+ ir_node *new_block = be_transform_node(block);
+
+ ir_node *noreg, *nomem, *new_param;
+ ir_node *imm, *simm, *m1, *s1, *s2, *s3, *s4, *s5, *m2, *m3, *m4, *m5, *m6, *m7, *m8, *m9, *m10, *m11, *m12, *m13;
+
+ /* check for SSE4.2 or SSE4a and use the popcnt instruction */
+ if (ia32_cg_config.use_popcnt) {
+ ia32_address_mode_t am;
+ ia32_address_t *addr = &am.addr;
+ ir_node *cnt;
+
+ match_arguments(&am, block, NULL, param, NULL, match_am | match_16bit_am);
+
+ cnt = new_bd_ia32_Popcnt(dbgi, new_block, addr->base, addr->index, addr->mem, am.new_op2);
+ set_am_attributes(cnt, &am);
+ set_ia32_ls_mode(cnt, get_irn_mode(param));
+
+ SET_IA32_ORIG_NODE(cnt, node);
+ return fix_mem_proj(cnt, &am);
+ }
+
+ noreg = ia32_new_NoReg_gp(env_cg);
+ nomem = new_NoMem();
+ new_param = be_transform_node(param);
+
+ /* do the standard popcount algo */
+
+ /* m1 = x & 0x55555555 */
+ imm = create_Immediate(NULL, 0, 0x55555555);
+ m1 = new_bd_ia32_And(dbgi, new_block, noreg, noreg, nomem, new_param, imm);
+
+ /* s1 = x >> 1 */
+ simm = create_Immediate(NULL, 0, 1);
+ s1 = new_bd_ia32_Shl(dbgi, new_block, new_param, simm);
+
+ /* m2 = s1 & 0x55555555 */
+ m2 = new_bd_ia32_And(dbgi, new_block, noreg, noreg, nomem, s1, imm);
+
+ /* m3 = m1 + m2 */
+ m3 = new_bd_ia32_Lea(dbgi, new_block, m2, m1);
+
+ /* m4 = m3 & 0x33333333 */
+ imm = create_Immediate(NULL, 0, 0x33333333);
+ m4 = new_bd_ia32_And(dbgi, new_block, noreg, noreg, nomem, m3, imm);
+
+ /* s2 = m3 >> 2 */
+ simm = create_Immediate(NULL, 0, 2);
+ s2 = new_bd_ia32_Shl(dbgi, new_block, m3, simm);
+
+ /* m5 = s2 & 0x33333333 */
+ m5 = new_bd_ia32_And(dbgi, new_block, noreg, noreg, nomem, s2, imm);
+
+ /* m6 = m4 + m5 */
+ m6 = new_bd_ia32_Lea(dbgi, new_block, m4, m5);
+
+ /* m7 = m6 & 0x0F0F0F0F */
+ imm = create_Immediate(NULL, 0, 0x0F0F0F0F);
+ m7 = new_bd_ia32_And(dbgi, new_block, noreg, noreg, nomem, m6, imm);
+
+ /* s3 = m6 >> 4 */
+ simm = create_Immediate(NULL, 0, 4);
+ s3 = new_bd_ia32_Shl(dbgi, new_block, m6, simm);
+
+ /* m8 = s3 & 0x0F0F0F0F */
+ m8 = new_bd_ia32_And(dbgi, new_block, noreg, noreg, nomem, s3, imm);
+
+ /* m9 = m7 + m8 */
+ m9 = new_bd_ia32_Lea(dbgi, new_block, m7, m8);
+
+ /* m10 = m9 & 0x00FF00FF */
+ imm = create_Immediate(NULL, 0, 0x00FF00FF);
+ m10 = new_bd_ia32_And(dbgi, new_block, noreg, noreg, nomem, m9, imm);
+
+ /* s4 = m9 >> 8 */
+ simm = create_Immediate(NULL, 0, 8);
+ s4 = new_bd_ia32_Shl(dbgi, new_block, m9, simm);
+
+ /* m11 = s4 & 0x00FF00FF */
+ m11 = new_bd_ia32_And(dbgi, new_block, noreg, noreg, nomem, s4, imm);
+
+ /* m12 = m10 + m11 */
+ m12 = new_bd_ia32_Lea(dbgi, new_block, m10, m11);
+
+ /* m13 = m12 & 0x0000FFFF */
+ imm = create_Immediate(NULL, 0, 0x0000FFFF);
+ m13 = new_bd_ia32_And(dbgi, new_block, noreg, noreg, nomem, m12, imm);
+
+ /* s5 = m12 >> 16 */
+ simm = create_Immediate(NULL, 0, 16);
+ s5 = new_bd_ia32_Shl(dbgi, new_block, m12, simm);
+
+ /* res = m13 + s5 */
+ return new_bd_ia32_Lea(dbgi, new_block, m13, s5);
+}
+
+/**
+ * Transform builtin byte swap.
+ */
+static ir_node *gen_bswap(ir_node *node) {
+ ir_node *param = be_transform_node(get_Builtin_param(node, 0));
+ dbg_info *dbgi = get_irn_dbg_info(node);
+
+ ir_node *block = get_nodes_block(node);
+ ir_node *new_block = be_transform_node(block);
+ ir_mode *mode = get_irn_mode(param);
+ unsigned size = get_mode_size_bits(mode);
+ ir_node *m1, *m2, *m3, *m4, *s1, *s2, *s3, *s4, *noreg, *nomem;
+
+ switch (size) {
+ case 32:
+ if (ia32_cg_config.use_i486) {
+ /* swap available */
+ return new_bd_ia32_Bswap(dbgi, new_block, param);
+ }
+ s1 = new_bd_ia32_Shl(dbgi, new_block, param, create_Immediate(NULL, 0, 24));
+ s2 = new_bd_ia32_Shl(dbgi, new_block, param, create_Immediate(NULL, 0, 8));
+
+ noreg = ia32_new_NoReg_gp(env_cg);
+ nomem = new_NoMem();
+
+ m1 = new_bd_ia32_And(dbgi, new_block, noreg, noreg, nomem, s2, create_Immediate(NULL, 0, 0xFF00));
+ m2 = new_bd_ia32_Lea(dbgi, new_block, s1, m1);
+
+ s3 = new_bd_ia32_Shr(dbgi, new_block, param, create_Immediate(NULL, 0, 8));
+
+ m3 = new_bd_ia32_And(dbgi, new_block, noreg, noreg, nomem, s3, create_Immediate(NULL, 0, 0xFF0000));
+ m4 = new_bd_ia32_Lea(dbgi, new_block, m2, m3);
+
+ s4 = new_bd_ia32_Shr(dbgi, new_block, param, create_Immediate(NULL, 0, 24));
+ return new_bd_ia32_Lea(dbgi, new_block, m4, s4);
+
+ case 16:
+ /* swap16 always available */
+ return new_bd_ia32_Bswap16(dbgi, new_block, param);
+
+ default:
+ panic("Invalid bswap size (%d)", size);
+ }
+}
+
+/**
+ * Transform Builtin node.
+ */
+static ir_node *gen_Builtin(ir_node *node) {
+ ir_builtin_kind kind = get_Builtin_kind(node);
+
+ switch (kind) {
+ case ir_bk_trap:
+ return gen_trap(node);
+ case ir_bk_debugbreak:
+ return gen_debugbreak(node);
+ case ir_bk_return_address:
+ return gen_return_address(node);
+ case ir_bk_frame_addess:
+ return gen_frame_address(node);
+ case ir_bk_prefetch:
+ return gen_prefetch(node);
+ case ir_bk_ffs:
+ return gen_ffs(node);
+ case ir_bk_clz:
+ return gen_clz(node);
+ case ir_bk_ctz:
+ return gen_ctz(node);
+ case ir_bk_parity:
+ return gen_parity(node);
+ case ir_bk_popcount:
+ return gen_popcount(node);
+ case ir_bk_bswap:
+ return gen_bswap(node);
+ }
+ panic("Builtin %s not implemented in IA32", get_builtin_kind_name(kind));
+}
+
+/**
+ * Transform Proj(Builtin) node.
+ */
+static ir_node *gen_Proj_Builtin(ir_node *proj) {
+ ir_node *node = get_Proj_pred(proj);
+ ir_node *new_node = be_transform_node(node);
+ ir_builtin_kind kind = get_Builtin_kind(node);
+
+ switch (kind) {
+ case ir_bk_return_address:
+ case ir_bk_frame_addess:
+ case ir_bk_ffs:
+ case ir_bk_clz:
+ case ir_bk_ctz:
+ case ir_bk_parity:
+ case ir_bk_popcount:
+ case ir_bk_bswap:
+ assert(get_Proj_proj(proj) == pn_Builtin_1_result);
+ return new_node;
+ case ir_bk_trap:
+ case ir_bk_debugbreak:
+ case ir_bk_prefetch:
+ assert(get_Proj_proj(proj) == pn_Builtin_M);
+ return new_node;
+ }
+ panic("Builtin %s not implemented in IA32", get_builtin_kind_name(kind));
+}
+
static ir_node *gen_be_IncSP(ir_node *node)
{
ir_node *res = be_duplicate_node(node);
static ir_node *gen_Proj_ASM(ir_node *node)
{
- ir_node *pred;
- ir_node *new_pred;
- ir_node *block;
-
- if (get_irn_mode(node) != mode_M)
- return be_duplicate_node(node);
+ ir_mode *mode = get_irn_mode(node);
+ ir_node *pred = get_Proj_pred(node);
+ ir_node *new_pred = be_transform_node(pred);
+ ir_node *block = get_nodes_block(new_pred);
+ long pos = get_Proj_proj(node);
+
+ if (mode == mode_M) {
+ pos = arch_irn_get_n_outs(new_pred) + 1;
+ } else if (mode_is_int(mode) || mode_is_reference(mode)) {
+ mode = mode_Iu;
+ } else if (mode_is_float(mode)) {
+ mode = mode_E;
+ } else {
+ panic("unexpected proj mode at ASM");
+ }
- pred = get_Proj_pred(node);
- new_pred = be_transform_node(pred);
- block = get_nodes_block(new_pred);
- return new_r_Proj(current_ir_graph, block, new_pred, mode_M,
- arch_irn_get_n_outs(new_pred) + 1);
+ return new_r_Proj(current_ir_graph, block, new_pred, mode, pos);
}
/**
return gen_Proj_Load(node);
case iro_ASM:
return gen_Proj_ASM(node);
+ case iro_Builtin:
+ return gen_Proj_Builtin(node);
case iro_Div:
case iro_Mod:
case iro_DivMod:
*/
static void register_transformers(void)
{
- ir_op *op_Mulh;
-
/* first clear the generic function pointer for all ops */
clear_irp_opcodes_generic_func();
GEN(Add);
GEN(Sub);
GEN(Mul);
+ GEN(Mulh);
GEN(And);
GEN(Or);
GEN(Eor);
BAD(EndReg);
BAD(EndExcept);
+ /* handle builtins */
+ GEN(Builtin);
+
/* handle generic backend nodes */
GEN(be_FrameAddr);
GEN(be_Call);
GEN(be_SubSP);
GEN(be_Copy);
- op_Mulh = get_op_Mulh();
- if (op_Mulh)
- GEN(Mulh);
-
#undef GEN
#undef BAD
}