mode);
set_ia32_op_type(load, ia32_AddrModeS);
set_ia32_am_sc(load, floatent);
- set_ia32_flags(load, get_ia32_flags(load) | arch_irn_flags_rematerializable);
+ arch_irn_add_flags(load, arch_irn_flags_rematerializable);
res = new_r_Proj(irg, block, load, mode_xmm, pn_ia32_xLoad_res);
}
} else {
load = new_rd_ia32_vfld(dbgi, irg, block, noreg, noreg, nomem, mode);
set_ia32_op_type(load, ia32_AddrModeS);
set_ia32_am_sc(load, floatent);
- set_ia32_flags(load, get_ia32_flags(load) | arch_irn_flags_rematerializable);
+ arch_irn_add_flags(load, arch_irn_flags_rematerializable);
res = new_r_Proj(irg, block, load, mode_vfp, pn_ia32_vfld_res);
/* take the mode from the entity */
set_ia32_ls_mode(load, get_type_mode(get_entity_type(floatent)));
}
}
end:
- SET_IA32_ORIG_NODE(load, ia32_get_old_node_name(env_cg, node));
+ SET_IA32_ORIG_NODE(load, node);
be_dep_on_frame(load);
return res;
val = get_tarval_long(tv);
cnst = new_rd_ia32_Const(dbgi, irg, block, NULL, 0, val);
- SET_IA32_ORIG_NODE(cnst, ia32_get_old_node_name(env_cg, node));
+ SET_IA32_ORIG_NODE(cnst, node);
be_dep_on_frame(cnst);
return cnst;
cnst = new_rd_ia32_Const(dbgi, irg, block, entity, 0, 0);
}
- SET_IA32_ORIG_NODE(cnst, ia32_get_old_node_name(env_cg, node));
+ SET_IA32_ORIG_NODE(cnst, node);
be_dep_on_frame(cnst);
return cnst;
/* construct load address */
memset(addr, 0, sizeof(addr[0]));
- ia32_create_address_mode(addr, ptr, /*force=*/0);
+ ia32_create_address_mode(addr, ptr, 0);
noreg_gp = ia32_new_NoReg_gp(env_cg);
addr->base = addr->base ? be_transform_node(addr->base) : noreg_gp;
addr->mem = be_transform_node(mem);
}
-static void build_address(ia32_address_mode_t *am, ir_node *node)
+static void build_address(ia32_address_mode_t *am, ir_node *node,
+ ia32_create_am_flags_t flags)
{
ir_node *noreg_gp = ia32_new_NoReg_gp(env_cg);
ia32_address_t *addr = &am->addr;
am->am_node = node;
/* construct load address */
- ia32_create_address_mode(addr, ptr, /*force=*/0);
+ ia32_create_address_mode(addr, ptr, flags);
addr->base = addr->base ? be_transform_node(addr->base) : noreg_gp;
addr->index = addr->index ? be_transform_node(addr->index) : noreg_gp;
noreg_gp = ia32_new_NoReg_gp(env_cg);
if (new_op2 == NULL &&
use_am && ia32_use_source_address_mode(block, op2, op1, other_op, flags)) {
- build_address(am, op2);
+ build_address(am, op2, 0);
new_op1 = (op1 == NULL ? NULL : be_transform_node(op1));
if (mode_is_float(mode)) {
new_op2 = ia32_new_NoReg_vfp(env_cg);
use_am &&
ia32_use_source_address_mode(block, op1, op2, other_op, flags)) {
ir_node *noreg;
- build_address(am, op1);
+ build_address(am, op1, 0);
if (mode_is_float(mode)) {
noreg = ia32_new_NoReg_vfp(env_cg);
if (!(flags & match_am_and_immediates) &&
(is_ia32_Immediate(am.new_op1) || is_ia32_Immediate(am.new_op2)))
set_ia32_am_support(new_node, ia32_am_none);
- SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env_cg, node));
+ SET_IA32_ORIG_NODE(new_node, node);
new_node = fix_mem_proj(new_node, &am);
if (!(flags & match_am_and_immediates) &&
(is_ia32_Immediate(am.new_op1) || is_ia32_Immediate(am.new_op2)))
set_ia32_am_support(new_node, ia32_am_none);
- SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env_cg, node));
+ SET_IA32_ORIG_NODE(new_node, node);
new_node = fix_mem_proj(new_node, &am);
attr = get_ia32_x87_attr(new_node);
attr->attr.data.ins_permuted = am.ins_permuted;
- SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env_cg, node));
+ SET_IA32_ORIG_NODE(new_node, node);
new_node = fix_mem_proj(new_node, &am);
block = get_nodes_block(node);
new_block = be_transform_node(block);
new_node = func(dbgi, current_ir_graph, new_block, new_op1, new_op2);
- SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env_cg, node));
+ SET_IA32_ORIG_NODE(new_node, node);
/* lowered shift instruction may have a dependency operand, handle it here */
if (get_irn_arity(node) == 3) {
new_block = be_transform_node(block);
new_node = func(dbgi, current_ir_graph, new_block, new_op);
- SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env_cg, node));
+ SET_IA32_ORIG_NODE(new_node, node);
return new_node;
}
* 3. Otherwise -> Lea
*/
memset(&addr, 0, sizeof(addr));
- ia32_create_address_mode(&addr, node, /*force=*/1);
+ ia32_create_address_mode(&addr, node, ia32_create_am_force);
add_immediate_op = NULL;
dbgi = get_irn_dbg_info(node);
new_node = new_rd_ia32_Const(dbgi, irg, new_block, addr.symconst_ent,
addr.symconst_sign, addr.offset);
be_dep_on_frame(new_node);
- SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env_cg, node));
+ SET_IA32_ORIG_NODE(new_node, node);
return new_node;
}
/* add with immediate? */
}
new_node = create_lea_from_address(dbgi, new_block, &addr);
- SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env_cg, node));
+ SET_IA32_ORIG_NODE(new_node, node);
return new_node;
}
am_addr->index, am_addr->mem, am.new_op1,
am.new_op2);
set_am_attributes(new_node, &am);
- SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env_cg, node));
+ SET_IA32_ORIG_NODE(new_node, node);
new_node = fix_mem_proj(new_node, &am);
/* otherwise construct a lea */
new_node = create_lea_from_address(dbgi, new_block, &addr);
- SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env_cg, node));
+ SET_IA32_ORIG_NODE(new_node, node);
return new_node;
}
NEW_ARR_A(ir_node*, ins, arity + 1);
+ /* NOTE: This sometimes produces dead-code because the old sync in
+ * src_mem might not be used anymore, we should detect this case
+ * and kill the sync... */
for (i = arity - 1; i >= 0; --i) {
ir_node *const pred = get_Sync_pred(src_mem, i);
}
}
+static ir_node *create_sex_32_64(dbg_info *dbgi, ir_graph *irg, ir_node *block,
+ ir_node *val, const ir_node *orig)
+{
+ ir_node *res;
+
+ (void)orig;
+ if (ia32_cg_config.use_short_sex_eax) {
+ ir_node *pval = new_rd_ia32_ProduceVal(dbgi, irg, block);
+ be_dep_on_frame(pval);
+ res = new_rd_ia32_Cltd(dbgi, irg, block, val, pval);
+ } else {
+ ir_node *imm31 = create_Immediate(NULL, 0, 31);
+ res = new_rd_ia32_Sar(dbgi, irg, block, val, imm31);
+ }
+ SET_IA32_ORIG_NODE(res, orig);
+ return res;
+}
+
/**
* Generates an ia32 DivMod with additional infrastructure for the
* register allocator if needed.
new_mem = transform_AM_mem(irg, block, op2, mem, addr->mem);
if (mode_is_signed(mode)) {
- ir_node *produceval = new_rd_ia32_ProduceVal(dbgi, irg, new_block);
- be_dep_on_frame(produceval);
- sign_extension = new_rd_ia32_Cltd(dbgi, irg, new_block, am.new_op1,
- produceval);
-
- new_node = new_rd_ia32_IDiv(dbgi, irg, new_block, addr->base,
- addr->index, new_mem, am.new_op2,
- am.new_op1, sign_extension);
+ sign_extension = create_sex_32_64(dbgi, irg, new_block, am.new_op1, node);
+ new_node = new_rd_ia32_IDiv(dbgi, irg, new_block, addr->base,
+ addr->index, new_mem, am.new_op2, am.new_op1, sign_extension);
} else {
sign_extension = new_rd_ia32_Const(dbgi, irg, new_block, NULL, 0, 0);
be_dep_on_frame(sign_extension);
set_irn_pinned(new_node, get_irn_pinned(node));
set_am_attributes(new_node, &am);
- SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env_cg, node));
+ SET_IA32_ORIG_NODE(new_node, node);
new_node = fix_mem_proj(new_node, &am);
long val = get_tarval_long(tv);
if (val == 31) {
/* this is a sign extension */
- ir_graph *irg = current_ir_graph;
dbg_info *dbgi = get_irn_dbg_info(node);
ir_node *block = be_transform_node(get_nodes_block(node));
- ir_node *op = left;
- ir_node *new_op = be_transform_node(op);
- ir_node *pval = new_rd_ia32_ProduceVal(dbgi, irg, block);
+ ir_node *new_op = be_transform_node(left);
- be_dep_on_frame(pval);
- return new_rd_ia32_Cltd(dbgi, irg, block, new_op, pval);
+ return create_sex_32_64(dbgi, current_ir_graph, block, new_op, node);
}
}
new_node = gen_unop(node, op, new_rd_ia32_Neg, match_mode_neutral);
}
- SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env_cg, node));
+ SET_IA32_ORIG_NODE(new_node, node);
return new_node;
}
set_ia32_am_sc(new_node, ent);
- SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env_cg, node));
+ SET_IA32_ORIG_NODE(new_node, node);
set_ia32_op_type(new_node, ia32_AddrModeS);
set_ia32_ls_mode(new_node, mode);
} else {
new_node = new_rd_ia32_vfabs(dbgi, irg, new_block, new_op);
- SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env_cg, node));
+ SET_IA32_ORIG_NODE(new_node, node);
}
} else {
- ir_node *xor, *pval, *sign_extension;
+ ir_node *xor, *sign_extension;
if (get_mode_size_bits(mode) == 32) {
new_op = be_transform_node(op);
new_op = create_I2I_Conv(mode, mode_Is, dbgi, block, op, node);
}
- pval = new_rd_ia32_ProduceVal(dbgi, irg, new_block);
- sign_extension = new_rd_ia32_Cltd(dbgi, irg, new_block,
- new_op, pval);
-
- be_dep_on_frame(pval);
- SET_IA32_ORIG_NODE(sign_extension,ia32_get_old_node_name(env_cg, node));
+ sign_extension = create_sex_32_64(dbgi, irg, new_block, new_op, node);
xor = new_rd_ia32_Xor(dbgi, irg, new_block, noreg_gp, noreg_gp,
nomem, new_op, sign_extension);
- SET_IA32_ORIG_NODE(xor, ia32_get_old_node_name(env_cg, node));
+ SET_IA32_ORIG_NODE(xor, node);
new_node = new_rd_ia32_Sub(dbgi, irg, new_block, noreg_gp, noreg_gp,
nomem, xor, sign_extension);
- SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env_cg, node));
+ SET_IA32_ORIG_NODE(new_node, node);
}
return new_node;
/* construct load address */
memset(&addr, 0, sizeof(addr));
- ia32_create_address_mode(&addr, ptr, /*force=*/0);
+ ia32_create_address_mode(&addr, ptr, 0);
base = addr.base;
index = addr.index;
set_address(new_node, &addr);
if (get_irn_pinned(node) == op_pin_state_floats) {
- add_ia32_flags(new_node, arch_irn_flags_rematerializable);
+ assert(pn_ia32_xLoad_res == pn_ia32_vfld_res
+ && pn_ia32_vfld_res == pn_ia32_Load_res
+ && pn_ia32_Load_res == pn_ia32_res);
+ arch_irn_add_flags(new_node, arch_irn_flags_rematerializable);
}
- SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env_cg, node));
+ SET_IA32_ORIG_NODE(new_node, node);
be_dep_on_frame(new_node);
return new_node;
return 0;
}
- if (is_Sync(mem)) {
- int i;
-
- for (i = get_Sync_n_preds(mem) - 1; i >= 0; --i) {
- ir_node *const pred = get_Sync_pred(mem, i);
-
- if (is_Proj(pred) && get_Proj_pred(pred) == load)
- continue;
-
- if (get_nodes_block(pred) == block &&
- heights_reachable_in_block(heights, pred, load)) {
- return 0;
- }
- }
- } else {
- /* Store should be attached to the load */
- if (!is_Proj(mem) || get_Proj_pred(mem) != load)
- return 0;
- }
+ if (prevents_AM(block, load, mem))
+ return 0;
+ /* Store should be attached to the load via mem */
+ assert(heights_reachable_in_block(heights, mem, load));
return 1;
}
commutative = (flags & match_commutative) != 0;
if (use_dest_am(src_block, op1, mem, ptr, op2)) {
- build_address(&am, op1);
+ build_address(&am, op1, ia32_create_am_double_use);
new_op = create_immediate_or_transform(op2, 0);
} else if (commutative && use_dest_am(src_block, op2, mem, ptr, op1)) {
- build_address(&am, op2);
+ build_address(&am, op2, ia32_create_am_double_use);
new_op = create_immediate_or_transform(op1, 0);
} else {
return NULL;
set_address(new_node, addr);
set_ia32_op_type(new_node, ia32_AddrModeD);
set_ia32_ls_mode(new_node, mode);
- SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env_cg, node));
+ SET_IA32_ORIG_NODE(new_node, node);
be_set_transformed_node(get_Proj_pred(am.mem_proj), new_node);
mem_proj = be_transform_node(am.mem_proj);
ir_node *mem_proj;
ia32_address_mode_t am;
ia32_address_t *addr = &am.addr;
- memset(&am, 0, sizeof(am));
if (!use_dest_am(src_block, op, mem, ptr, NULL))
return NULL;
- build_address(&am, op);
+ memset(&am, 0, sizeof(am));
+ build_address(&am, op, ia32_create_am_double_use);
dbgi = get_irn_dbg_info(node);
block = be_transform_node(src_block);
set_address(new_node, addr);
set_ia32_op_type(new_node, ia32_AddrModeD);
set_ia32_ls_mode(new_node, mode);
- SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env_cg, node));
+ SET_IA32_ORIG_NODE(new_node, node);
be_set_transformed_node(get_Proj_pred(am.mem_proj), new_node);
mem_proj = be_transform_node(am.mem_proj);
set_address(new_node, &addr);
set_ia32_op_type(new_node, ia32_AddrModeD);
set_ia32_ls_mode(new_node, mode);
- SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env_cg, node));
+ SET_IA32_ORIG_NODE(new_node, node);
return new_node;
}
}
new_node = dest_am_binop(val, op1, op2, mem, ptr, mode,
new_rd_ia32_SubMem, new_rd_ia32_SubMem8Bit,
- match_dest_am | match_immediate |
- match_immediate);
+ match_dest_am | match_immediate);
break;
case iro_And:
op1 = get_And_left(val);
}
/**
- * Transform a Store(floatConst).
+ * Transform a Store(floatConst) into a sequence of
+ * integer stores.
*
* @return the created ia32 Store node
*/
set_ia32_op_type(new_node, ia32_AddrModeD);
set_ia32_ls_mode(new_node, mode_Iu);
set_address(new_node, &addr);
- SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env_cg, node));
+ SET_IA32_ORIG_NODE(new_node, node);
+ assert(i < 4);
ins[i++] = new_node;
size -= 4;
addr.offset += 4;
} while (size != 0);
- return i == 1 ? ins[0] : new_rd_Sync(dbgi, irg, new_block, i, ins);
+ if (i > 1) {
+ return new_rd_Sync(dbgi, irg, new_block, i, ins);
+ } else {
+ return ins[0];
+ }
}
/**
return new_node;
}
/**
- * Transforms a normal Store.
+ * Transforms a general (no special case) Store.
*
* @return the created ia32 Store node
*/
-static ir_node *gen_normal_Store(ir_node *node)
+static ir_node *gen_general_Store(ir_node *node)
{
ir_node *val = get_Store_value(node);
ir_mode *mode = get_irn_mode(val);
/* construct store address */
memset(&addr, 0, sizeof(addr));
- ia32_create_address_mode(&addr, ptr, /*force=*/0);
+ ia32_create_address_mode(&addr, ptr, 0);
if (addr.base == NULL) {
addr.base = noreg;
set_ia32_ls_mode(store, mode);
set_address(store, &addr);
- SET_IA32_ORIG_NODE(store, ia32_get_old_node_name(env_cg, node));
+ SET_IA32_ORIG_NODE(store, node);
return new_node;
}
ir_mode *mode = get_irn_mode(val);
if (mode_is_float(mode) && is_Const(val)) {
- int transform;
-
- /* we are storing a floating point constant */
- if (ia32_cg_config.use_sse2) {
- transform = !is_simple_sse_Const(val);
- } else {
- transform = !is_simple_x87_Const(val);
- }
- if (transform)
- return gen_float_const_Store(node, val);
+ /* We can transform every floating const store
+ into a sequence of integer stores.
+ If the constant is already in a register,
+ it would be better to use it, but we don't
+ have this information here. */
+ return gen_float_const_Store(node, val);
}
- return gen_normal_Store(node);
+ return gen_general_Store(node);
}
/**
add_ia32_am_offs_int(new_sel, -switch_min);
set_ia32_op_type(new_sel, ia32_AddrModeS);
- SET_IA32_ORIG_NODE(new_sel, ia32_get_old_node_name(env_cg, node));
+ SET_IA32_ORIG_NODE(new_sel, node);
}
new_node = new_rd_ia32_SwitchJmp(dbgi, irg, block, new_sel, default_pn);
- SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env_cg, node));
+ SET_IA32_ORIG_NODE(new_node, node);
return new_node;
}
flags = get_flags_node(sel, &pnc);
new_node = new_rd_ia32_Jcc(dbgi, irg, new_block, flags, pnc);
- SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env_cg, node));
+ SET_IA32_ORIG_NODE(new_node, node);
return new_node;
}
new_node = new_rd_ia32_vFucomi(dbgi, irg, new_block, new_left,
new_right, 0);
set_ia32_commutative(new_node);
- SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env_cg, node));
+ SET_IA32_ORIG_NODE(new_node, node);
} else {
if (ia32_cg_config.use_ftst && is_Const_0(right)) {
new_node = new_rd_ia32_vFtstFnstsw(dbgi, irg, new_block, new_left,
set_ia32_commutative(new_node);
- SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env_cg, node));
+ SET_IA32_ORIG_NODE(new_node, node);
new_node = new_rd_ia32_Sahf(dbgi, irg, new_block, new_node);
- SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env_cg, node));
+ SET_IA32_ORIG_NODE(new_node, node);
}
return new_node;
am.ins_permuted);
set_am_attributes(new_node, &am);
- SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env_cg, node));
+ SET_IA32_ORIG_NODE(new_node, node);
new_node = fix_mem_proj(new_node, &am);
set_am_attributes(new_node, &am);
set_ia32_ls_mode(new_node, cmp_mode);
- SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env_cg, node));
+ SET_IA32_ORIG_NODE(new_node, node);
new_node = fix_mem_proj(new_node, &am);
am.ins_permuted, pnc);
set_am_attributes(new_node, &am);
- SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env_cg, node));
+ SET_IA32_ORIG_NODE(new_node, node);
new_node = fix_mem_proj(new_node, &am);
ir_node *new_node;
new_node = new_rd_ia32_Set(dbgi, irg, new_block, flags, pnc, ins_permuted);
- SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env_cg, orig_node));
+ SET_IA32_ORIG_NODE(new_node, orig_node);
/* we might need to conv the result up */
if (get_mode_size_bits(mode) > 8) {
new_node = new_rd_ia32_Conv_I2I8Bit(dbgi, irg, new_block, noreg, noreg,
nomem, new_node, mode_Bu);
- SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env_cg, orig_node));
+ SET_IA32_ORIG_NODE(new_node, orig_node);
}
return new_node;
} else {
set_ia32_ls_mode(fist, mode_Is);
}
- SET_IA32_ORIG_NODE(fist, ia32_get_old_node_name(cg, node));
+ SET_IA32_ORIG_NODE(fist, node);
/* do a Load */
load = new_rd_ia32_Load(dbgi, irg, block, get_irg_frame(irg), noreg, mem);
ia32_attr_t *attr = get_ia32_attr(load);
attr->data.need_32bit_stackent = 1;
}
- SET_IA32_ORIG_NODE(load, ia32_get_old_node_name(cg, node));
+ SET_IA32_ORIG_NODE(load, node);
return new_r_Proj(irg, block, load, mode_Iu, pn_ia32_Load_res);
}
tgt_mode);
set_ia32_use_frame(store);
set_ia32_op_type(store, ia32_AddrModeD);
- SET_IA32_ORIG_NODE(store, ia32_get_old_node_name(env_cg, node));
+ SET_IA32_ORIG_NODE(store, node);
load = new_rd_ia32_vfld(dbgi, irg, block, frame, noreg, store,
tgt_mode);
set_ia32_use_frame(load);
set_ia32_op_type(load, ia32_AddrModeS);
- SET_IA32_ORIG_NODE(load, ia32_get_old_node_name(env_cg, node));
+ SET_IA32_ORIG_NODE(load, node);
new_node = new_r_Proj(irg, block, load, mode_E, pn_ia32_vfld_res);
return new_node;
pn_ia32_vfild_res);
set_am_attributes(fild, &am);
- SET_IA32_ORIG_NODE(fild, ia32_get_old_node_name(env_cg, node));
+ SET_IA32_ORIG_NODE(fild, node);
fix_mem_proj(fild, &am);
if (src_bits == 8) {
new_op = new_rd_ia32_Conv_I2I8Bit(dbgi, irg, block, noreg, noreg, nomem,
new_op, src_mode);
- SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env_cg, node));
+ SET_IA32_ORIG_NODE(new_op, node);
mode = mode_Is;
} else if (src_bits < 32) {
new_op = new_rd_ia32_Conv_I2I(dbgi, irg, block, noreg, noreg, nomem,
new_op, src_mode);
- SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env_cg, node));
+ SET_IA32_ORIG_NODE(new_op, node);
mode = mode_Is;
}
/* match_arguments assume that out-mode = in-mode, this isn't true here
* so fix it */
set_ia32_ls_mode(new_node, smaller_mode);
- SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env_cg, node));
+ SET_IA32_ORIG_NODE(new_node, node);
new_node = fix_mem_proj(new_node, &am);
return new_node;
}
} else {
if (get_Conv_strict(node)) {
res = gen_x87_strict_conv(tgt_mode, new_op);
- SET_IA32_ORIG_NODE(get_Proj_pred(res), ia32_get_old_node_name(env_cg, node));
+ SET_IA32_ORIG_NODE(get_Proj_pred(res), node);
return res;
}
DB((dbg, LEVEL_1, "killed Conv(float, float) ..."));
}
if (float_mantissa < int_mantissa) {
res = gen_x87_strict_conv(tgt_mode, res);
- SET_IA32_ORIG_NODE(get_Proj_pred(res), ia32_get_old_node_name(env_cg, node));
+ SET_IA32_ORIG_NODE(get_Proj_pred(res), node);
}
}
return res;
set_ia32_frame_ent(new_node, arch_get_frame_entity(node));
set_ia32_use_frame(new_node);
- SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env_cg, node));
+ SET_IA32_ORIG_NODE(new_node, node);
return new_node;
}
/* create a new barrier */
arity = get_irn_arity(barrier);
- in = alloca(arity * sizeof(in[0]));
+ in = ALLOCAN(ir_node*, arity);
for (i = 0; i < arity; ++i) {
ir_node *new_in;
addr->base, addr->index, addr->mem,
am.new_op2);
set_am_attributes(new_node, &am);
- SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env_cg, node));
+ SET_IA32_ORIG_NODE(new_node, node);
new_node = fix_mem_proj(new_node, &am);
}
flags = new_rd_Proj(NULL, irg, block, sub, mode_Iu, pn_ia32_Sub_flags);
new_node = new_rd_ia32_Jcc(dbgi, irg, block, flags, pn_Cmp_Lt | ia32_pn_Cmp_unsigned);
- SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env_cg, node));
+ SET_IA32_ORIG_NODE(new_node, node);
} else {
panic("generic Bound not supported in ia32 Backend");
}
new_node = new_rd_ia32_ShrD(dbgi, irg, new_block, new_high, new_low,
new_count);
}
- SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env_cg, node));
+ SET_IA32_ORIG_NODE(new_node, node);
return new_node;
}
new_val_low);
store_high = new_rd_ia32_Store(dbgi, irg, block, frame, noreg, nomem,
new_val_high);
- SET_IA32_ORIG_NODE(store_low, ia32_get_old_node_name(env_cg, node));
- SET_IA32_ORIG_NODE(store_high, ia32_get_old_node_name(env_cg, node));
+ SET_IA32_ORIG_NODE(store_low, node);
+ SET_IA32_ORIG_NODE(store_high, node);
set_ia32_use_frame(store_low);
set_ia32_use_frame(store_high);
set_ia32_op_type(fild, ia32_AddrModeS);
set_ia32_ls_mode(fild, mode_Ls);
- SET_IA32_ORIG_NODE(fild, ia32_get_old_node_name(env_cg, node));
+ SET_IA32_ORIG_NODE(fild, node);
return new_r_Proj(irg, block, fild, mode_vfp, pn_ia32_vfild_res);
}
ir_node *fist, *mem;
mem = gen_vfist(dbgi, irg, block, frame, noreg, nomem, new_val, &fist);
- SET_IA32_ORIG_NODE(fist, ia32_get_old_node_name(env_cg, node));
+ SET_IA32_ORIG_NODE(fist, node);
set_ia32_use_frame(fist);
set_ia32_op_type(fist, ia32_AddrModeD);
set_ia32_ls_mode(fist, mode_Ls);
ia32_attr_t *attr;
load = new_rd_ia32_Load(dbgi, irg, block, frame, noreg, new_pred);
- SET_IA32_ORIG_NODE(load, ia32_get_old_node_name(env_cg, node));
+ SET_IA32_ORIG_NODE(load, node);
set_ia32_use_frame(load);
set_ia32_op_type(load, ia32_AddrModeS);
set_ia32_ls_mode(load, mode_Iu);
if (get_irn_pinned(node) == op_pin_state_pinned)
set_irn_pinned(call, op_pin_state_pinned);
- SET_IA32_ORIG_NODE(call, ia32_get_old_node_name(env_cg, node));
+ SET_IA32_ORIG_NODE(call, node);
return call;
}
static ir_node *gen_be_IncSP(ir_node *node)
{
ir_node *res = be_duplicate_node(node);
- be_node_add_flags(res, -1, arch_irn_flags_modify_flags);
+ arch_irn_add_flags(res, arch_irn_flags_modify_flags);
return res;
}
long proj = get_Proj_proj(node);
ir_mode *mode = get_irn_mode(node);
ir_node *sse_load;
- const arch_register_class_t *cls;
- ir_node *res;
+ ir_node *res;
/* The following is kinda tricky: If we're using SSE, then we have to
* move the result value of the call in floating point registers to an
/* transform call modes */
if (mode_is_data(mode)) {
- cls = arch_get_irn_reg_class(node, -1);
+ const arch_register_class_t *cls = arch_get_irn_reg_class_out(node);
mode = cls->mode;
}
} else if (proj == pn_be_Call_M_regular) {
proj = pn_ia32_Call_M;
} else {
- arch_register_req_t const *const req = arch_get_register_req(node, BE_OUT_POS(proj));
- int const n_outs = get_ia32_n_res(new_call);
+ arch_register_req_t const *const req = arch_get_register_req_out(node);
+ int const n_outs = arch_irn_get_n_outs(new_call);
int i;
assert(proj >= pn_be_Call_first_res);
- assert(req->type == arch_register_req_type_limited);
+ assert(req->type & arch_register_req_type_limited);
for (i = 0; i < n_outs; ++i) {
arch_register_req_t const *const new_req = get_ia32_out_req(new_call, i);
- if (new_req->type != arch_register_req_type_limited ||
- new_req->cls != req->cls ||
+ if (!(new_req->type & arch_register_req_type_limited) ||
+ new_req->cls != req->cls ||
*new_req->limited != *req->limited)
continue;
new_pred = be_transform_node(pred);
block = get_nodes_block(new_pred);
return new_r_Proj(current_ir_graph, block, new_pred, mode_M,
- get_ia32_n_res(new_pred) + 1);
+ arch_irn_get_n_outs(new_pred) + 1);
}
/**
if (!is_ia32_irn(node))
return;
- n_outs = get_ia32_n_res(node);
+ n_outs = arch_irn_get_n_outs(node);
if (n_outs <= 0)
return;
if (is_ia32_SwitchJmp(node))