}
/**
- * Get an entity that is initialized with a tarval
+ * Get an atomic entity that is initialized with a tarval
*/
-static ir_entity *get_entity_for_tv(ia32_code_gen_t *cg, ir_node *cnst)
+static ir_entity *ia32_get_entity_for_tv(ia32_isa_t *isa, ir_node *cnst)
{
tarval *tv = get_Const_tarval(cnst);
- pmap_entry *e = pmap_find(cg->isa->tv_ent, tv);
+ pmap_entry *e = pmap_find(isa->tv_ent, tv);
ir_entity *res;
ir_graph *rem;
ir_mode *mode = get_irn_mode(cnst);
ir_type *tp = get_Const_type(cnst);
if (tp == firm_unknown_type)
- tp = get_prim_type(cg->isa->types, mode);
+ tp = get_prim_type(isa->types, mode);
res = new_entity(get_glob_type(), unique_id(".LC%u"), tp);
set_atomic_ent_value(res, new_Const_type(tv, tp));
current_ir_graph = rem;
- pmap_insert(cg->isa->tv_ent, tv, res);
+ pmap_insert(isa->tv_ent, tv, res);
} else {
res = e->value;
}
ir_node *nomem = new_NoMem();
ir_node *load;
ir_entity *floatent;
+ cnst_classify_t clss = classify_Const(node);
- if (! USE_SSE2(env_cg)) {
- cnst_classify_t clss = classify_Const(node);
+ if (USE_SSE2(env_cg)) {
+ if (clss == CNST_NULL) {
+ load = new_rd_ia32_xZero(dbgi, irg, block);
+ set_ia32_ls_mode(load, mode);
+ res = load;
+ } else {
+ floatent = ia32_get_entity_for_tv(env_cg->isa, node);
+ load = new_rd_ia32_xLoad(dbgi, irg, block, noreg, noreg, nomem,
+ mode);
+ set_ia32_op_type(load, ia32_AddrModeS);
+ set_ia32_am_sc(load, floatent);
+ set_ia32_flags(load, get_ia32_flags(load) | arch_irn_flags_rematerializable);
+ res = new_r_Proj(irg, block, load, mode_xmm, pn_ia32_xLoad_res);
+ }
+ } else {
if (clss == CNST_NULL) {
load = new_rd_ia32_vfldz(dbgi, irg, block);
res = load;
load = new_rd_ia32_vfld1(dbgi, irg, block);
res = load;
} else {
- floatent = get_entity_for_tv(env_cg, node);
+ floatent = ia32_get_entity_for_tv(env_cg->isa, node);
load = new_rd_ia32_vfld(dbgi, irg, block, noreg, noreg, nomem, mode);
set_ia32_op_type(load, ia32_AddrModeS);
res = new_r_Proj(irg, block, load, mode_vfp, pn_ia32_vfld_res);
}
set_ia32_ls_mode(load, mode);
- } else {
- floatent = get_entity_for_tv(env_cg, node);
-
- load = new_rd_ia32_xLoad(dbgi, irg, block, noreg, noreg, nomem,
- mode);
- set_ia32_op_type(load, ia32_AddrModeS);
- set_ia32_am_sc(load, floatent);
- set_ia32_flags(load, get_ia32_flags(load) | arch_irn_flags_rematerializable);
-
- res = new_r_Proj(irg, block, load, mode_xmm, pn_ia32_xLoad_res);
}
SET_IA32_ORIG_NODE(load, ia32_get_old_node_name(env_cg, node));
static void match_arguments(ia32_address_mode_t *am, ir_node *block,
ir_node *op1, ir_node *op2, int commutative,
- int use_am_and_immediates)
+ int use_am_and_immediates, int use_am)
{
ia32_address_t *addr = &am->addr;
ir_node *noreg_gp = ia32_new_NoReg_gp(env_cg);
memset(am, 0, sizeof(am[0]));
new_op2 = try_create_Immediate(op2, 0);
- if(new_op2 == NULL && use_source_address_mode(block, op2, op1)) {
+ if(new_op2 == NULL && use_am && use_source_address_mode(block, op2, op1)) {
build_address(am, op2);
new_op1 = be_transform_node(op1);
new_op2 = noreg_gp;
am->op_type = ia32_AddrModeS;
} else if(commutative && (new_op2 == NULL || use_am_and_immediates) &&
- use_source_address_mode(block, op1, op2)) {
+ use_am && use_source_address_mode(block, op1, op2)) {
build_address(am, op1);
if(new_op2 != NULL) {
new_op1 = noreg_gp;
ia32_address_mode_t am;
ia32_address_t *addr = &am.addr;
- match_arguments(&am, src_block, op1, op2, commutative, 0);
+ match_arguments(&am, src_block, op1, op2, commutative, 0, 1);
new_node = func(dbgi, irg, block, addr->base, addr->index, am.new_op1,
am.new_op2, addr->mem);
if(new_op != NULL)
return new_op;
- /* construct load address */
+ /* construct store address */
memset(&addr, 0, sizeof(addr));
ia32_create_address_mode(&addr, ptr, 0);
base = addr.base;
}
static ir_node *try_create_TestJmp(ir_node *block, dbg_info *dbgi, long pnc,
- ir_node *cmp_left, ir_node *cmp_right)
+ ir_node *cmp_left, ir_node *cmp_right,
+ int use_am)
{
ir_node *arg_left;
ir_node *arg_right;
mode = mode_Iu;
assert(get_mode_size_bits(mode) <= 32);
- match_arguments(&am, block, arg_left, arg_right, 1, 1);
+ match_arguments(&am, block, arg_left, arg_right, 1, 1, use_am);
if(am.flipped)
pnc = get_inversed_pnc(pnc);
ir_node *new_cmp_b;
ir_mode *cmp_mode;
long pnc;
+ int use_am;
if (sel_mode != mode_b) {
return create_Switch(node);
if(!is_Proj(sel) || !is_Cmp(get_Proj_pred(sel))) {
/* it's some mode_b value but not a direct comparison -> create a
* testjmp */
- res = try_create_TestJmp(block, dbgi, pn_Cmp_Lg, sel, NULL);
+ res = try_create_TestJmp(block, dbgi, pn_Cmp_Lg, sel, NULL, 1);
SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env_cg, node));
return res;
}
+ /* address mode makes only sense when we're the only user of the cmp */
+ use_am = get_irn_n_edges(node) <= 1;
+
cmp = get_Proj_pred(sel);
cmp_a = get_Cmp_left(cmp);
cmp_b = get_Cmp_right(cmp);
}
if(mode_needs_gp_reg(cmp_mode)) {
- res = try_create_TestJmp(block, dbgi, pnc, cmp_a, cmp_b);
+ res = try_create_TestJmp(block, dbgi, pnc, cmp_a, cmp_b, use_am);
if(res != NULL) {
SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env_cg, node));
return res;
} else {
ia32_address_mode_t am;
ia32_address_t *addr = &am.addr;
- match_arguments(&am, src_block, cmp_a, cmp_b, 1, 1);
+ match_arguments(&am, src_block, cmp_a, cmp_b, 1, 1, use_am);
if(am.flipped)
pnc = get_inversed_pnc(pnc);
static ir_node *create_set(long pnc, ir_node *cmp_left, ir_node *cmp_right,
- dbg_info *dbgi, ir_node *block)
+ dbg_info *dbgi, ir_node *block, int use_am)
{
ir_graph *irg = current_ir_graph;
ir_node *new_block = be_transform_node(block);
assert(get_mode_size_bits(mode) <= 32);
- match_arguments(&am, block, arg_left, arg_right, 1, 1);
+ match_arguments(&am, block, arg_left, arg_right, 1, 1, use_am);
if(am.flipped)
pnc = get_inversed_pnc(pnc);
mode = get_irn_mode(cmp_left);
assert(get_mode_size_bits(mode) <= 32);
- match_arguments(&am, block, cmp_left, cmp_right, 1, 1);
+ match_arguments(&am, block, cmp_left, cmp_right, 1, 1, use_am);
if(am.flipped)
pnc = get_inversed_pnc(pnc);
}
if(is_Const_1(psi_true) && is_Const_0(psi_default)) {
- new_op = create_set(pnc, cmp_left, cmp_right, dbgi, block);
+ new_op = create_set(pnc, cmp_left, cmp_right, dbgi, block, 1);
} else if(is_Const_0(psi_true) && is_Const_1(psi_default)) {
pnc = get_negated_pnc(pnc, cmp_mode);
- new_op = create_set(pnc, cmp_left, cmp_right, dbgi, block);
+ new_op = create_set(pnc, cmp_left, cmp_right, dbgi, block, 1);
} else {
new_op = create_cmov(pnc, cmp_left, cmp_right, psi_true, psi_default,
dbgi, block);
ir_node *muls = new_rd_ia32_IMul1OP(dbgi, irg, block, noreg, noreg, new_left,
new_right, new_NoMem());
clear_ia32_commutative(muls);
- set_ia32_am_support(muls, ia32_am_Source, ia32_am_binary);
SET_IA32_ORIG_NODE(muls, ia32_get_old_node_name(env_cg, node));
return muls;
}
+static ir_node *gen_ia32_Add64Bit(ir_node *node)
+{
+ ir_node *a_l = be_transform_node(get_irn_n(node, 0));
+ ir_node *a_h = be_transform_node(get_irn_n(node, 1));
+ ir_node *b_l = create_immediate_or_transform(get_irn_n(node, 2), 0);
+ ir_node *b_h = create_immediate_or_transform(get_irn_n(node, 3), 0);
+ ir_node *block = be_transform_node(get_nodes_block(node));
+ dbg_info *dbgi = get_irn_dbg_info(node);
+ ir_graph *irg = current_ir_graph;
+ ir_node *new_op = new_rd_ia32_Add64Bit(dbgi, irg, block, a_l, a_h, b_l, b_h);
+ SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env_cg, node));
+ return new_op;
+}
+
/**
* Transforms a l_ShlD/l_ShrD into a ShlD/ShrD. Those nodes have 3 data inputs:
* op1 - target to be shifted
ir_graph *irg = current_ir_graph;
dbg_info *dbgi = get_irn_dbg_info(node);
ir_node *new_op1 = be_transform_node(op1);
- ir_node *new_op2 = create_immediate_or_transform(op2, 'I');
- ir_node *new_count = be_transform_node(count);
+ ir_node *new_op2 = be_transform_node(op2);
+ ir_node *new_count = create_immediate_or_transform(count, 'I');
/* TODO proper AM support */
dbg_info *dbgi = get_irn_dbg_info(cmp);
ir_node *block = get_nodes_block(node);
ir_node *res;
+ int use_am;
assert(!mode_is_float(cmp_mode));
pnc |= ia32_pn_Cmp_Unsigned;
}
- res = create_set(pnc, cmp_left, cmp_right, dbgi, block);
+ /**
+ * address mode makes only sense when we'll be the only node using the cmp
+ */
+ use_am = get_irn_n_edges(cmp) <= 1;
+
+ res = create_set(pnc, cmp_left, cmp_right, dbgi, block, use_am);
SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env_cg, cmp));
return res;
GEN(IJmp);
/* transform ops from intrinsic lowering */
+ GEN(ia32_Add64Bit);
GEN(ia32_l_Add);
GEN(ia32_l_Adc);
GEN(ia32_l_Sub);