mode);
set_ia32_op_type(load, ia32_AddrModeS);
set_ia32_am_sc(load, floatent);
- set_ia32_flags(load, get_ia32_flags(load) | arch_irn_flags_rematerializable);
+ arch_irn_add_flags(load, arch_irn_flags_rematerializable);
res = new_r_Proj(irg, block, load, mode_xmm, pn_ia32_xLoad_res);
}
} else {
load = new_rd_ia32_vfld(dbgi, irg, block, noreg, noreg, nomem, mode);
set_ia32_op_type(load, ia32_AddrModeS);
set_ia32_am_sc(load, floatent);
- set_ia32_flags(load, get_ia32_flags(load) | arch_irn_flags_rematerializable);
+ arch_irn_add_flags(load, arch_irn_flags_rematerializable);
res = new_r_Proj(irg, block, load, mode_vfp, pn_ia32_vfld_res);
/* take the mode from the entity */
set_ia32_ls_mode(load, get_type_mode(get_entity_type(floatent)));
addr->mem = be_transform_node(mem);
}
-static void build_address(ia32_address_mode_t *am, ir_node *node)
+static void build_address(ia32_address_mode_t *am, ir_node *node,
+ ia32_create_am_flags_t flags)
{
ir_node *noreg_gp = ia32_new_NoReg_gp(env_cg);
ia32_address_t *addr = &am->addr;
am->am_node = node;
/* construct load address */
- ia32_create_address_mode(addr, ptr, 0);
+ ia32_create_address_mode(addr, ptr, flags);
addr->base = addr->base ? be_transform_node(addr->base) : noreg_gp;
addr->index = addr->index ? be_transform_node(addr->index) : noreg_gp;
noreg_gp = ia32_new_NoReg_gp(env_cg);
if (new_op2 == NULL &&
use_am && ia32_use_source_address_mode(block, op2, op1, other_op, flags)) {
- build_address(am, op2);
+ build_address(am, op2, 0);
new_op1 = (op1 == NULL ? NULL : be_transform_node(op1));
if (mode_is_float(mode)) {
new_op2 = ia32_new_NoReg_vfp(env_cg);
use_am &&
ia32_use_source_address_mode(block, op1, op2, other_op, flags)) {
ir_node *noreg;
- build_address(am, op1);
+ build_address(am, op1, 0);
if (mode_is_float(mode)) {
noreg = ia32_new_NoReg_vfp(env_cg);
NEW_ARR_A(ir_node*, ins, arity + 1);
+ /* NOTE: This sometimes produces dead-code because the old sync in
+ * src_mem might not be used anymore, we should detect this case
+ * and kill the sync... */
for (i = arity - 1; i >= 0; --i) {
ir_node *const pred = get_Sync_pred(src_mem, i);
set_address(new_node, &addr);
if (get_irn_pinned(node) == op_pin_state_floats) {
- add_ia32_flags(new_node, arch_irn_flags_rematerializable);
+ assert(pn_ia32_xLoad_res == pn_ia32_vfld_res
+ && pn_ia32_vfld_res == pn_ia32_Load_res
+ && pn_ia32_Load_res == pn_ia32_res);
+ arch_irn_add_flags(new_node, arch_irn_flags_rematerializable);
}
SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env_cg, node));
commutative = (flags & match_commutative) != 0;
if (use_dest_am(src_block, op1, mem, ptr, op2)) {
- build_address(&am, op1);
+ build_address(&am, op1, ia32_create_am_double_use);
new_op = create_immediate_or_transform(op2, 0);
} else if (commutative && use_dest_am(src_block, op2, mem, ptr, op1)) {
- build_address(&am, op2);
+ build_address(&am, op2, ia32_create_am_double_use);
new_op = create_immediate_or_transform(op1, 0);
} else {
return NULL;
return NULL;
memset(&am, 0, sizeof(am));
- build_address(&am, op);
+ build_address(&am, op, ia32_create_am_double_use);
dbgi = get_irn_dbg_info(node);
block = be_transform_node(src_block);
}
/**
- * Transform a Store(floatConst).
+ * Transform a Store(floatConst) into a sequence of
+ * integer stores.
*
* @return the created ia32 Store node
*/
set_address(new_node, &addr);
SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env_cg, node));
+ assert(i < 4);
ins[i++] = new_node;
size -= 4;
addr.offset += 4;
} while (size != 0);
- return i == 1 ? ins[0] : new_rd_Sync(dbgi, irg, new_block, i, ins);
+ if (i > 1) {
+ return new_rd_Sync(dbgi, irg, new_block, i, ins);
+ } else {
+ return ins[0];
+ }
}
/**
return new_node;
}
/**
- * Transforms a normal Store.
+ * Transforms a general (no special case) Store.
*
* @return the created ia32 Store node
*/
-static ir_node *gen_normal_Store(ir_node *node)
+static ir_node *gen_general_Store(ir_node *node)
{
ir_node *val = get_Store_value(node);
ir_mode *mode = get_irn_mode(val);
ir_mode *mode = get_irn_mode(val);
if (mode_is_float(mode) && is_Const(val)) {
- int transform;
-
- /* we are storing a floating point constant */
- if (ia32_cg_config.use_sse2) {
- transform = !is_simple_sse_Const(val);
- } else {
- transform = !is_simple_x87_Const(val);
- }
- if (transform)
- return gen_float_const_Store(node, val);
+ /* We can transform every floating const store
+ into a sequence of integer stores.
+ If the constant is already in a register,
+ it would be better to use it, but we don't
+ have this information here. */
+ return gen_float_const_Store(node, val);
}
- return gen_normal_Store(node);
+ return gen_general_Store(node);
}
/**
/* create a new barrier */
arity = get_irn_arity(barrier);
- in = alloca(arity * sizeof(in[0]));
+ in = ALLOCAN(ir_node*, arity);
for (i = 0; i < arity; ++i) {
ir_node *new_in;
static ir_node *gen_be_IncSP(ir_node *node)
{
ir_node *res = be_duplicate_node(node);
- be_node_add_flags(res, -1, arch_irn_flags_modify_flags);
+ arch_irn_add_flags(res, arch_irn_flags_modify_flags);
return res;
}
long proj = get_Proj_proj(node);
ir_mode *mode = get_irn_mode(node);
ir_node *sse_load;
- const arch_register_class_t *cls;
- ir_node *res;
+ ir_node *res;
/* The following is kinda tricky: If we're using SSE, then we have to
* move the result value of the call in floating point registers to an
/* transform call modes */
if (mode_is_data(mode)) {
- cls = arch_get_irn_reg_class(node, -1);
+ const arch_register_class_t *cls = arch_get_irn_reg_class_out(node);
mode = cls->mode;
}
} else if (proj == pn_be_Call_M_regular) {
proj = pn_ia32_Call_M;
} else {
- arch_register_req_t const *const req = arch_get_register_req(node, BE_OUT_POS(proj));
- int const n_outs = get_ia32_n_res(new_call);
+ arch_register_req_t const *const req = arch_get_register_req_out(node);
+ int const n_outs = arch_irn_get_n_outs(new_call);
int i;
assert(proj >= pn_be_Call_first_res);
- assert(req->type == arch_register_req_type_limited);
+ assert(req->type & arch_register_req_type_limited);
for (i = 0; i < n_outs; ++i) {
arch_register_req_t const *const new_req = get_ia32_out_req(new_call, i);
- if (new_req->type != arch_register_req_type_limited ||
- new_req->cls != req->cls ||
+ if (!(new_req->type & arch_register_req_type_limited) ||
+ new_req->cls != req->cls ||
*new_req->limited != *req->limited)
continue;
new_pred = be_transform_node(pred);
block = get_nodes_block(new_pred);
return new_r_Proj(current_ir_graph, block, new_pred, mode_M,
- get_ia32_n_res(new_pred) + 1);
+ arch_irn_get_n_outs(new_pred) + 1);
}
/**
if (!is_ia32_irn(node))
return;
- n_outs = get_ia32_n_res(node);
+ n_outs = arch_irn_get_n_outs(node);
if (n_outs <= 0)
return;
if (is_ia32_SwitchJmp(node))