return res;
}
+/**
+ * Creates an immediate.
+ *
+ * @param symconst if set, create a SymConst immediate
+ * @param symconst_sign sign for the symconst
+ * @param val integer value for the immediate
+ */
+static ir_node *create_Immediate(ir_entity *symconst, int symconst_sign, long val)
+{
+ ir_graph *irg = current_ir_graph;
+ ir_node *start_block = get_irg_start_block(irg);
+ ir_node *immediate = new_rd_ia32_Immediate(NULL, irg, start_block,
+ symconst, symconst_sign, val);
+ arch_set_irn_register(env_cg->arch_env, immediate, &ia32_gp_regs[REG_GP_NOREG]);
+
+ return immediate;
+}
+
/**
* Get an atomic entity that is initialized with a tarval
*/
ir_entity *res;
ir_graph *rem;
- if (! e) {
+ if (e == NULL) {
ir_mode *mode = get_irn_mode(cnst);
ir_type *tp = get_Const_type(cnst);
if (tp == firm_unknown_type)
{
tarval *tv = get_Const_tarval(node);
- if(tarval_is_null(tv) || tarval_is_one(tv))
+ if (tarval_is_null(tv) || tarval_is_one(tv))
+ return 1;
+
+ /* TODO: match all the other float constants */
+ return 0;
+}
+
+/**
+ * returns true if constant can be created with a simple float command
+ */
+static int is_simple_sse_Const(ir_node *node)
+{
+ tarval *tv = get_Const_tarval(node);
+
+ if (get_tarval_mode(tv) == mode_F)
+ return 1;
+
+ if (tarval_is_null(tv) || tarval_is_one(tv))
return 1;
/* TODO: match all the other float constants */
ir_entity *floatent;
if (ia32_cg_config.use_sse2) {
- if (is_Const_null(node)) {
+ tarval *tv = get_Const_tarval(node);
+ if (tarval_is_null(tv)) {
load = new_rd_ia32_xZero(dbgi, irg, block);
set_ia32_ls_mode(load, mode);
res = load;
+ } else if (tarval_is_one(tv)) {
+ int cnst = mode == mode_F ? 26 : 55;
+ ir_node *imm1 = create_Immediate(NULL, 0, cnst);
+ ir_node *imm2 = create_Immediate(NULL, 0, 2);
+ ir_node *pslld, *psrld;
+
+ load = new_rd_ia32_xAllOnes(dbgi, irg, block);
+ set_ia32_ls_mode(load, mode);
+ pslld = new_rd_ia32_xPslld(dbgi, irg, block, load, imm1);
+ set_ia32_ls_mode(pslld, mode);
+ psrld = new_rd_ia32_xPsrld(dbgi, irg, block, pslld, imm2);
+ set_ia32_ls_mode(psrld, mode);
+ res = psrld;
+ } else if (mode == mode_F) {
+ /* we can place any 32bit constant by using a movd gp, sse */
+ unsigned val = get_tarval_sub_bits(tv, 0) |
+ (get_tarval_sub_bits(tv, 1) << 8) |
+ (get_tarval_sub_bits(tv, 2) << 16) |
+ (get_tarval_sub_bits(tv, 3) << 24);
+ ir_node *cnst = new_rd_ia32_Const(dbgi, irg, block, NULL, 0, val);
+ load = new_rd_ia32_xMovd(dbgi, irg, block, cnst);
+ set_ia32_ls_mode(load, mode);
+ res = load;
} else {
floatent = create_float_const_entity(node);
load = new_rd_ia32_xLoad(dbgi, irg, block, noreg, noreg, nomem,
- mode);
+ mode);
set_ia32_op_type(load, ia32_AddrModeS);
set_ia32_am_sc(load, floatent);
set_ia32_flags(load, get_ia32_flags(load) | arch_irn_flags_rematerializable);
tv = tarval_convert_to(tv, mode_Iu);
- if(tv == get_tarval_bad() || tv == get_tarval_undefined()
- || tv == NULL) {
+ if (tv == get_tarval_bad() || tv == get_tarval_undefined() ||
+ tv == NULL) {
panic("couldn't convert constant tarval (%+F)", node);
}
val = get_tarval_long(tv);
long pn;
/* float constants are always available */
- if(is_Const(node) && mode_is_float(mode)) {
- if(!is_simple_x87_Const(node))
- return 0;
- if(get_irn_n_edges(node) > 1)
+ if (is_Const(node) && mode_is_float(mode)) {
+ if (ia32_cg_config.use_sse2) {
+ if (is_simple_sse_Const(node))
+ return 0;
+ } else {
+ if (is_simple_x87_Const(node))
+ return 0;
+ }
+ if (get_irn_n_edges(node) > 1)
return 0;
return 1;
}
ir_node *mem;
ir_node *new_mem;
- if(is_Const(node)) {
+ if (is_Const(node)) {
ir_entity *entity = create_float_const_entity(node);
addr->base = noreg_gp;
addr->index = noreg_gp;
assert(use_am || !(flags & match_8bit_am));
assert(use_am || !(flags & match_16bit_am));
- if(mode_bits == 8) {
- if (! (flags & match_8bit_am))
+ if (mode_bits == 8) {
+ if (!(flags & match_8bit_am))
use_am = 0;
/* we don't automatically add upconvs yet */
assert((flags & match_mode_neutral) || (flags & match_8bit));
- } else if(mode_bits == 16) {
- if(! (flags & match_16bit_am))
+ } else if (mode_bits == 16) {
+ if (!(flags & match_16bit_am))
use_am = 0;
/* we don't automatically add upconvs yet */
assert((flags & match_mode_neutral) || (flags & match_16bit));
/* we can simply skip downconvs for mode neutral nodes: the upper bits
* can be random for these operations */
- if(flags & match_mode_neutral) {
+ if (flags & match_mode_neutral) {
op2 = ia32_skip_downconv(op2);
- if(op1 != NULL) {
+ if (op1 != NULL) {
op1 = ia32_skip_downconv(op1);
}
}
/* match immediates. firm nodes are normalized: constants are always on the
* op2 input */
new_op2 = NULL;
- if(! (flags & match_try_am) && use_immediate) {
+ if (!(flags & match_try_am) && use_immediate) {
new_op2 = try_create_Immediate(op2, 0);
}
- if(new_op2 == NULL
- && use_am && ia32_use_source_address_mode(block, op2, op1, other_op)) {
+ if (new_op2 == NULL &&
+ use_am && ia32_use_source_address_mode(block, op2, op1, other_op)) {
build_address(am, op2);
new_op1 = (op1 == NULL ? NULL : be_transform_node(op1));
if(mode_is_float(mode)) {
new_op2 = noreg_gp;
}
am->op_type = ia32_AddrModeS;
- } else if(commutative && (new_op2 == NULL || use_am_and_immediates) &&
- use_am
- && ia32_use_source_address_mode(block, op1, op2, other_op)) {
+ } else if (commutative && (new_op2 == NULL || use_am_and_immediates) &&
+ use_am &&
+ ia32_use_source_address_mode(block, op1, op2, other_op)) {
ir_node *noreg;
build_address(am, op1);
- if(mode_is_float(mode)) {
+ if (mode_is_float(mode)) {
noreg = ia32_new_NoReg_vfp(env_cg);
} else {
noreg = noreg_gp;
ir_node *op1 = get_Mul_left(node);
ir_node *op2 = get_Mul_right(node);
ir_mode *mode = get_irn_mode(node);
+ unsigned flags;
if (mode_is_float(mode)) {
if (ia32_cg_config.use_sse2)
/* for the lower 32bit of the result it doesn't matter whether we use
* signed or unsigned multiplication so we use IMul as it has fewer
* constraints */
- return gen_binop(node, op1, op2, new_rd_ia32_IMul,
- match_commutative | match_am | match_mode_neutral |
- match_immediate | match_am_and_immediates);
+ flags = match_commutative | match_am | match_mode_neutral | match_immediate;
+ if (ia32_cg_config.use_imul_mem_imm32)
+ flags |= match_am_and_immediates;
+ return gen_binop(node, op1, op2, new_rd_ia32_IMul, flags);
}
/**
return new_node;
}
+/**
+ * Transform a Cond node.
+ */
static ir_node *gen_Cond(ir_node *node) {
ir_node *block = get_nodes_block(node);
ir_node *new_block = be_transform_node(block);
return new_node;
}
-static ir_node *create_Immediate(ir_entity *symconst, int symconst_sign, long val)
-{
- ir_graph *irg = current_ir_graph;
- ir_node *start_block = get_irg_start_block(irg);
- ir_node *immediate = new_rd_ia32_Immediate(NULL, irg, start_block,
- symconst, symconst_sign, val);
- arch_set_irn_register(env_cg->arch_env, immediate, &ia32_gp_regs[REG_GP_NOREG]);
-
- return immediate;
-}
-
/**
* Create a conversion from general purpose to x87 register
*/
ir_node *res = NULL;
if (src_mode == mode_b) {
- assert(mode_is_int(tgt_mode));
+ assert(mode_is_int(tgt_mode) || mode_is_reference(tgt_mode));
/* nothing to do, we already model bools as 0/1 ints */
return be_transform_node(op);
}
}
static ir_node *gen_ia32_l_FloattoLL(ir_node *node) {
- (void) node;
- panic("LLtoFloat NIY");
+ ir_node *src_block = get_nodes_block(node);
+ ir_node *block = be_transform_node(src_block);
+ ir_graph *irg = current_ir_graph;
+ dbg_info *dbgi = get_irn_dbg_info(node);
+ ir_node *frame = get_irg_frame(irg);
+ ir_node *noreg = ia32_new_NoReg_gp(env_cg);
+ ir_node *nomem = new_NoMem();
+ ir_node *val = get_irn_n(node, n_ia32_l_FloattoLL_val);
+ ir_node *new_val = be_transform_node(val);
+ ir_node *trunc_mode = ia32_new_Fpu_truncate(env_cg);
+
+ ir_node *fist;
+
+ /* do a fist */
+ fist = new_rd_ia32_vfist(dbgi, irg, block, frame, noreg, nomem, new_val,
+ trunc_mode);
+ SET_IA32_ORIG_NODE(fist, ia32_get_old_node_name(env_cg, node));
+ set_ia32_use_frame(fist);
+ set_ia32_op_type(fist, ia32_AddrModeD);
+ set_ia32_ls_mode(fist, mode_Ls);
+
+ return fist;
}
/**
return NULL;
}
+static ir_node *gen_Proj_l_FloattoLL(ir_node *node) {
+ ir_graph *irg = current_ir_graph;
+ ir_node *block = be_transform_node(get_nodes_block(node));
+ ir_node *pred = get_Proj_pred(node);
+ ir_node *new_pred = be_transform_node(pred);
+ ir_node *frame = get_irg_frame(irg);
+ ir_node *noreg = ia32_new_NoReg_gp(env_cg);
+ dbg_info *dbgi = get_irn_dbg_info(node);
+ long pn = get_Proj_proj(node);
+ ir_node *load;
+ ir_node *proj;
+ ia32_attr_t *attr;
+
+ load = new_rd_ia32_Load(dbgi, irg, block, frame, noreg, new_pred);
+ SET_IA32_ORIG_NODE(load, ia32_get_old_node_name(env_cg, node));
+ set_ia32_use_frame(load);
+ set_ia32_op_type(load, ia32_AddrModeS);
+ set_ia32_ls_mode(load, mode_Iu);
+ /* we need a 64bit stackslot (fist stores 64bit) even though we only load
+ * 32 bit from it with this particular load */
+ attr = get_ia32_attr(load);
+ attr->data.need_64bit_stackent = 1;
+
+ if (pn == pn_ia32_l_FloattoLL_res_high) {
+ add_ia32_am_offs_int(load, 4);
+ } else {
+ assert(pn == pn_ia32_l_FloattoLL_res_low);
+ }
+
+ proj = new_r_Proj(irg, block, load, mode_Iu, pn_ia32_Load_res);
+
+ return proj;
+}
+
/**
* Transform the Projs of an AddSP.
*/
if (node == be_get_old_anchor(anchor_tls)) {
return gen_Proj_tls(node);
}
+ } else if (is_ia32_l_FloattoLL(pred)) {
+ return gen_Proj_l_FloattoLL(node);
#ifdef FIRM_EXT_GRS
} else if(!is_ia32_irn(pred)) { // Quick hack for SIMD optimization
#else