ia32_address_t addr;
ir_mode *ls_mode;
ir_node *mem_proj;
+ ir_node *am_node;
ia32_op_type_t op_type;
ir_node *new_op1;
ir_node *new_op2;
am->pinned = get_irn_pinned(load);
am->ls_mode = get_Load_mode(load);
am->mem_proj = be_get_Proj_for_pn(load, pn_Load_M);
+ am->am_node = node;
/* construct load address */
ia32_create_address_mode(addr, ptr, /*force=*/0);
ir_node *src_block = get_nodes_block(node);
ir_node *op1 = get_irn_n(node, n_ia32_l_binop_left);
ir_node *op2 = get_irn_n(node, n_ia32_l_binop_right);
+ ir_node *eflags = get_irn_n(node, n_ia32_l_binop_eflags);
dbg_info *dbgi;
- ir_node *block, *new_node, *eflags, *new_eflags;
+ ir_node *block, *new_node, *new_eflags;
ia32_address_mode_t am;
ia32_address_t *addr = &am.addr;
- match_arguments(&am, src_block, op1, op2, NULL, flags);
+ match_arguments(&am, src_block, op1, op2, eflags, flags);
dbgi = get_irn_dbg_info(node);
block = be_transform_node(src_block);
- eflags = get_irn_n(node, n_ia32_l_binop_eflags);
new_eflags = be_transform_node(eflags);
new_node = func(dbgi, current_ir_graph, block, addr->base, addr->index,
addr->mem, am.new_op1, am.new_op2, new_eflags);
{
ir_node *load;
- if(!is_Proj(node))
+ if (!is_Proj(node))
return 0;
/* we only use address mode if we're the only user of the load */
- if(get_irn_n_edges(node) > 1)
+ if (get_irn_n_edges(node) > 1)
return 0;
load = get_Proj_pred(node);
- if(!is_Load(load))
+ if (!is_Load(load))
return 0;
- if(get_nodes_block(load) != block)
+ if (get_nodes_block(load) != block)
return 0;
- /* Store should be attached to the load */
- if(!is_Proj(mem) || get_Proj_pred(mem) != load)
- return 0;
/* store should have the same pointer as the load */
- if(get_Load_ptr(load) != ptr)
+ if (get_Load_ptr(load) != ptr)
return 0;
/* don't do AM if other node inputs depend on the load (via mem-proj) */
- if(other != NULL && get_nodes_block(other) == block
- && heights_reachable_in_block(heights, other, load))
+ if (other != NULL &&
+ get_nodes_block(other) == block &&
+ heights_reachable_in_block(heights, other, load)) {
return 0;
+ }
+
+ if (is_Sync(mem)) {
+ int i;
+
+ for (i = get_Sync_n_preds(mem) - 1; i >= 0; --i) {
+ ir_node *const pred = get_Sync_pred(mem, i);
+
+ if (is_Proj(pred) && get_Proj_pred(pred) == load)
+ continue;
+
+ if (get_nodes_block(pred) == block &&
+ heights_reachable_in_block(heights, pred, load)) {
+ return 0;
+ }
+ }
+ } else {
+ /* Store should be attached to the load */
+ if (!is_Proj(mem) || get_Proj_pred(mem) != load)
+ return 0;
+ }
return 1;
}
ir_node *noreg_gp = ia32_new_NoReg_gp(env_cg);
ir_graph *irg = current_ir_graph;
dbg_info *dbgi;
+ ir_node *new_mem;
ir_node *new_node;
ir_node *new_op;
ir_node *mem_proj;
if(addr->mem == NULL)
addr->mem = new_NoMem();
- dbgi = get_irn_dbg_info(node);
- block = be_transform_node(src_block);
+ dbgi = get_irn_dbg_info(node);
+ block = be_transform_node(src_block);
+ new_mem = transform_AM_mem(irg, block, am.am_node, mem, addr->mem);
+
if(get_mode_size_bits(mode) == 8) {
new_node = func8bit(dbgi, irg, block, addr->base, addr->index,
- addr->mem, new_op);
+ new_mem, new_op);
} else {
- new_node = func(dbgi, irg, block, addr->base, addr->index, addr->mem,
+ new_node = func(dbgi, irg, block, addr->base, addr->index, new_mem,
new_op);
}
set_address(new_node, addr);
ir_node *ptr, ir_mode *mode,
construct_unop_dest_func *func)
{
- ir_graph *irg = current_ir_graph;
- ir_node *src_block = get_nodes_block(node);
- ir_node *block;
+ ir_graph *irg = current_ir_graph;
+ ir_node *src_block = get_nodes_block(node);
+ ir_node *block;
dbg_info *dbgi;
- ir_node *new_node;
- ir_node *mem_proj;
+ ir_node *new_mem;
+ ir_node *new_node;
+ ir_node *mem_proj;
ia32_address_mode_t am;
ia32_address_t *addr = &am.addr;
memset(&am, 0, sizeof(am));
dbgi = get_irn_dbg_info(node);
block = be_transform_node(src_block);
- new_node = func(dbgi, irg, block, addr->base, addr->index, addr->mem);
+ new_mem = transform_AM_mem(irg, block, am.am_node, mem, addr->mem);
+ new_node = func(dbgi, irg, block, addr->base, addr->index, new_mem);
set_address(new_node, addr);
set_ia32_op_type(new_node, ia32_AddrModeD);
set_ia32_ls_mode(new_node, mode);
if(is_Conv(val)) {
ir_node *conv_op = get_Conv_op(val);
ir_mode *pred_mode = get_irn_mode(conv_op);
+ if (!ia32_mode_needs_gp_reg(pred_mode))
+ break;
if(pred_mode == mode_b || bits <= get_mode_size_bits(pred_mode)) {
val = conv_op;
continue;
case iro_Sub:
op1 = get_Sub_left(val);
op2 = get_Sub_right(val);
- if(is_Const(op2)) {
- ir_fprintf(stderr, "Optimisation warning: not-normalize sub ,C"
- "found\n");
+ if (is_Const(op2)) {
+ ir_fprintf(stderr, "Optimisation warning: not-normalized sub ,C found\n");
}
new_node = dest_am_binop(val, op1, op2, mem, ptr, mode,
new_rd_ia32_SubMem, new_rd_ia32_SubMem8Bit,
}
/**
- * helper function: checks wether all Cmp projs are Lg or Eq which is needed
+ * helper function: checks whether all Cmp projs are Lg or Eq which is needed
* to fold an and into a test node
*/
static int can_fold_test_and(ir_node *node)
switch (get_mode_size_bits(tgt_mode)) {
case 32: float_mantissa = 23 + 1; break; // + 1 for implicit 1
case 64: float_mantissa = 52 + 1; break;
- case 80: float_mantissa = 64 + 1; break;
+ case 80:
+ case 96: float_mantissa = 64; break;
default: float_mantissa = 0; break;
}
if (float_mantissa < int_mantissa) {
/* the shift amount can be any mode that is bigger than 5 bits, since all
* other bits are ignored anyway */
- while (is_Conv(count) && get_irn_n_edges(count) == 1) {
+ while (is_Conv(count) &&
+ get_irn_n_edges(count) == 1 &&
+ mode_is_int(get_irn_mode(count))) {
assert(get_mode_size_bits(get_irn_mode(count)) >= 5);
count = get_Conv_op(count);
}