#include "ia32_dbg_stat.h"
#include "ia32_util.h"
+#define AGGRESSIVE_AM
+
typedef enum {
IA32_AM_CAND_NONE = 0,
IA32_AM_CAND_LEFT = 1,
if (mode_is_float(mode)) {
FP_USED(env->cg);
if (USE_SSE2(env->cg))
- cnst = new_rd_ia32_xConst(dbg, irg, block, get_irg_no_mem(irg), mode);
+ cnst = new_rd_ia32_xConst(dbg, irg, block, mode);
else
- cnst = new_rd_ia32_vfConst(dbg, irg, block, get_irg_no_mem(irg), mode);
+ cnst = new_rd_ia32_vfConst(dbg, irg, block, mode);
}
else
- cnst = new_rd_ia32_Const(dbg, irg, block, get_irg_no_mem(irg), mode);
+ cnst = new_rd_ia32_Const(dbg, irg, block, mode);
set_ia32_Const_attr(cnst, env->irn);
env->irn = cnst;
env->mode = mode_P;
cnst = gen_SymConst(env);
+ add_irn_dep(cnst, be_abi_get_start_barrier(env->cg->birg->abi));
set_Load_ptr(get_Proj_pred(load), cnst);
cnst = load;
}
else {
- cnst = new_rd_ia32_Const(dbg, irg, block, get_irg_no_mem(irg), get_irn_mode(node));
+ cnst = new_rd_ia32_Const(dbg, irg, block, get_irn_mode(node));
+ add_irn_dep(cnst, be_abi_get_start_barrier(env->cg->birg->abi));
set_ia32_Const_attr(cnst, node);
}
+
return cnst;
}
tenv.irn = irn;
DEBUG_ONLY(tenv.mod = cg->mod;)
+#if 1
/* place const either in the smallest dominator of all its users or the original block */
if (cg->opt & IA32_OPT_PLACECNST)
tenv.block = node_users_smallest_common_dominator(irn, 1);
else
tenv.block = get_nodes_block(irn);
+#else
+ /* Actually, there is no real sense in placing */
+ /* the Consts in the successor of the start block. */
+ {
+ ir_node *afterstart = NULL;
+ ir_node *startblock = get_irg_start_block(tenv.irg);
+ const ir_edge_t *edge;
+
+ foreach_block_succ(startblock, edge) {
+ ir_node *block = get_edge_src_irn(edge);
+ if (block != startblock) {
+ afterstart = block;
+ break;
+ }
+ }
+ assert(afterstart != NULL);
+ tenv.block = afterstart;
+ }
+#endif
switch (get_irn_opcode(irn)) {
case iro_Const:
static void ia32_place_consts_walker(ir_node *irn, void *env) {
ia32_code_gen_t *cg = env;
- if(!is_Const(irn) && !is_SymConst(irn))
+ if (! is_Const(irn) && ! is_SymConst(irn))
return;
ia32_transform_const(irn, cg);
}
}
-#if 0
-/**
- * Creates a Push from Store(IncSP(gp_reg_size))
- */
-static void ia32_create_Push(ir_node *irn, ia32_code_gen_t *cg) {
- ir_node *sp = get_irn_n(irn, 0);
- ir_graph *irg = cg->irg;
- ir_node *val, *next, *push, *bl, *proj_M, *proj_res, *old_proj_M, *mem;
- const ir_edge_t *edge;
- heights_t *h;
-
- /* do not create push if store has already an offset assigned or base is not a IncSP */
- if (get_ia32_am_offs(irn) || ! be_is_IncSP(sp))
- return;
-
- /* do not create push if index is not NOREG */
- if (arch_get_irn_register(cg->arch_env, get_irn_n(irn, 1)) !=
- &ia32_gp_regs[REG_GP_NOREG])
- return;
-
- /* do not create push for floating point */
- val = get_irn_n(irn, 2);
- if (mode_is_float(get_irn_mode(val)))
- return;
-
- /* do not create push if IncSp doesn't expand stack or expand size is different from register size */
- if (be_get_IncSP_offset(sp) != get_mode_size_bytes(ia32_reg_classes[CLASS_ia32_gp].mode))
- return;
-
- /* do not create push, if there is a path (inside the block) from the push value to IncSP */
- h = heights_new(cg->irg);
- if (get_nodes_block(val) == get_nodes_block(sp) &&
- heights_reachable_in_block(h, val, sp))
- {
- heights_free(h);
- return;
- }
- heights_free(h);
-
- /* ok, translate into Push */
- edge = get_irn_out_edge_first(irn);
- old_proj_M = get_edge_src_irn(edge);
- bl = get_nodes_block(irn);
-
- next = sched_next(irn);
- sched_remove(irn);
- sched_remove(sp);
-
- /*
- build memory input:
- if the IncSP points to NoMem -> just use the memory input from store
- if IncSP points to somewhere else -> sync memory of IncSP and Store
- */
- mem = get_irn_n(irn, 3);
- push = new_rd_ia32_Push(NULL, irg, bl, be_get_IncSP_pred(sp), val, mem);
- proj_res = new_r_Proj(irg, bl, push, get_irn_mode(sp), pn_ia32_Push_stack);
- proj_M = new_r_Proj(irg, bl, push, mode_M, pn_ia32_Push_M);
-
- /* copy a possible constant from the store */
- set_ia32_id_cnst(push, get_ia32_id_cnst(irn));
- set_ia32_immop_type(push, get_ia32_immop_type(irn));
-
- /* the push must have SP out register */
- arch_set_irn_register(cg->arch_env, push, arch_get_irn_register(cg->arch_env, sp));
-
- exchange(old_proj_M, proj_M);
- exchange(sp, proj_res);
- sched_add_before(next, push);
- sched_add_after(push, proj_res);
-}
-#endif
-
// only optimize up to 48 stores behind IncSPs
#define MAXPUSH_OPTIMIZE 48
ir_node *push;
ir_node *val, *mem;
ir_node *store = stores[i];
+ ir_node *noreg = ia32_new_NoReg_gp(cg);
if(store == NULL || is_Bad(store))
break;
spreg = arch_get_irn_register(cg->arch_env, curr_sp);
// create a push
- push = new_rd_ia32_Push(NULL, irg, block, curr_sp, val, mem);
+ push = new_rd_ia32_Push(NULL, irg, block, noreg, noreg, val, curr_sp, mem);
if(get_ia32_immop_type(store) != ia32_ImmNone) {
copy_ia32_Immop_attr(push, store);
}
set_irn_n(succ, 0, push);
}
- // we can remove the store from schedule now
+ // we can remove the store now
+ set_irn_n(store, 0, new_Bad());
+ set_irn_n(store, 1, new_Bad());
+ set_irn_n(store, 2, new_Bad());
+ set_irn_n(store, 3, new_Bad());
sched_remove(store);
offset -= 4;
if(offset == 0) {
const ir_edge_t *edge, *next;
- sched_remove(irn);
- set_irn_n(irn, 0, new_Bad());
-
foreach_out_edge_safe(irn, edge, next) {
ir_node *arg = get_edge_src_irn(edge);
int pos = get_edge_src_pos(edge);
set_irn_n(arg, pos, curr_sp);
}
- sched_remove(irn);
set_irn_n(irn, 0, new_Bad());
+ sched_remove(irn);
} else {
set_irn_n(irn, 0, curr_sp);
}
/* Omit the optimized IncSP */
be_set_IncSP_pred(irn, be_get_IncSP_pred(prev));
+
+ set_irn_n(prev, 0, new_Bad());
sched_remove(prev);
}
}
if (be_is_IncSP(irn)) {
// optimize_IncSP doesn't respect dependency edges yet...
//ia32_optimize_IncSP(irn, cg);
+ (void) ia32_optimize_IncSP;
ia32_create_Pushs(irn, cg);
}
}
irg_walk_graph(irg, ia32_peephole_optimize_node, NULL, cg);
}
-
/******************************************************************
* _ _ __ __ _
* /\ | | | | | \/ | | |
in = left;
+#ifndef AGGRESSIVE_AM
if (pred_is_specific_nodeblock(block, in, is_ia32_Ld)) {
n = ia32_get_irn_n_edges(in);
is_cand = (n == 1) ? 0 : is_cand; /* load with only one user: don't create LEA */
n = ia32_get_irn_n_edges(in);
is_cand = (n == 1) ? 0 : is_cand; /* load with only one user: don't create LEA */
}
+#else
+ (void) n;
+#endif
is_cand = get_ia32_frame_ent(irn) ? 1 : is_cand;
*/
static ia32_am_cand_t is_am_candidate(ia32_code_gen_t *cg, heights_t *h, const ir_node *block, ir_node *irn) {
ir_node *in, *load, *other, *left, *right;
- int n, is_cand = 0, cand;
+ int is_cand = 0, cand;
if (is_ia32_Ld(irn) || is_ia32_St(irn) || is_ia32_Store8Bit(irn) || is_ia32_vfild(irn) || is_ia32_vfist(irn) ||
is_ia32_GetST0(irn) || is_ia32_SetST0(irn) || is_ia32_xStoreSimple(irn))
in = left;
if (pred_is_specific_nodeblock(block, in, is_ia32_Ld)) {
+#ifndef AGGRESSIVE_AM
+ int n;
n = ia32_get_irn_n_edges(in);
is_cand = (n == 1) ? 1 : is_cand; /* load with more than one user: no AM */
+#else
+ is_cand = 1;
+#endif
load = get_Proj_pred(in);
other = right;
- /* 8bit Loads are not supported, they cannot be used with every register */
- if (get_mode_size_bits(get_ia32_ls_mode(load)) < 16)
+ /* 8bit Loads are not supported (for binary ops),
+ * they cannot be used with every register */
+ if (get_irn_arity(irn) != 4 && get_mode_size_bits(get_ia32_ls_mode(load)) < 16) {
+ assert(get_irn_arity(irn) == 5);
is_cand = 0;
+ }
/* If there is a data dependency of other irn from load: cannot use AM */
if (is_cand && get_nodes_block(other) == block) {
is_cand = 0;
if (pred_is_specific_nodeblock(block, in, is_ia32_Ld)) {
+#ifndef AGGRESSIVE_AM
+ int n;
n = ia32_get_irn_n_edges(in);
is_cand = (n == 1) ? 1 : is_cand; /* load with more than one user: no AM */
+#endif
load = get_Proj_pred(in);
other = left;
* @param irn The irn to be removed from schedule
*/
static INLINE void try_remove_from_sched(ir_node *irn) {
+ int i, arity;
+
if (sched_is_scheduled(irn)) {
if (get_irn_mode(irn) == mode_T) {
const ir_edge_t *edge;
foreach_out_edge(irn, edge) {
ir_node *proj = get_edge_src_irn(edge);
- if (sched_is_scheduled(proj))
+ if (sched_is_scheduled(proj)) {
+ set_irn_n(proj, 0, new_Bad());
sched_remove(proj);
+ }
}
}
+
+ arity = get_irn_arity(irn);
+ for(i = 0; i < arity; ++i) {
+ set_irn_n(irn, i, new_Bad());
+ }
sched_remove(irn);
}
}
}
}
-
/**
* Checks for address mode patterns and performs the
* necessary transformations.
ia32_am_opt_env_t *am_opt_env = env;
ia32_code_gen_t *cg = am_opt_env->cg;
heights_t *h = am_opt_env->h;
- ir_node *block, *noreg_gp, *noreg_fp;
- ir_node *left, *right;
+ ir_node *block, *left, *right;
ir_node *store, *load, *mem_proj;
ir_node *succ, *addr_b, *addr_i;
int check_am_src = 0;
if (! is_ia32_irn(irn) || is_ia32_Ld(irn) || is_ia32_St(irn) || is_ia32_Store8Bit(irn))
return;
- block = get_nodes_block(irn);
- noreg_gp = ia32_new_NoReg_gp(cg);
- noreg_fp = ia32_new_NoReg_fp(cg);
+ block = get_nodes_block(irn);
DBG((mod, LEVEL_1, "checking for AM\n"));
if (get_irn_arity(irn) == 4) {
/* it's an "unary" operation */
right = left;
+ cand = IA32_AM_CAND_BOTH;
}
else {
right = get_irn_n(irn, 3);
if (get_irn_arity(irn) == 5) {
/* binary AMop */
set_irn_n(irn, 4, get_irn_n(load, 2));
- set_irn_n(irn, 2, noreg_gp);
+ set_irn_n(irn, 2, ia32_get_admissible_noreg(cg, irn, 2));
}
else {
/* unary AMop */
set_irn_n(irn, 3, get_irn_n(load, 2));
- set_irn_n(irn, 2, noreg_gp);
+ set_irn_n(irn, 2, ia32_get_admissible_noreg(cg, irn, 2));
}
/* connect the memory Proj of the Store to the op */
/* and right operand is a Load which only used by this irn */
if (check_am_src &&
(cand & IA32_AM_CAND_RIGHT) &&
- (get_irn_arity(irn) == 5) &&
(ia32_get_irn_n_edges(right) == 1))
{
- right = get_Proj_pred(right);
+ ir_node *load = get_Proj_pred(right);
- addr_b = get_irn_n(right, 0);
- addr_i = get_irn_n(right, 1);
+ addr_b = get_irn_n(load, 0);
+ addr_i = get_irn_n(load, 1);
/* set new base, index and attributes */
set_irn_n(irn, 0, addr_b);
set_irn_n(irn, 1, addr_i);
- add_ia32_am_offs(irn, get_ia32_am_offs(right));
- set_ia32_am_scale(irn, get_ia32_am_scale(right));
- set_ia32_am_flavour(irn, get_ia32_am_flavour(right));
+ add_ia32_am_offs(irn, get_ia32_am_offs(load));
+ set_ia32_am_scale(irn, get_ia32_am_scale(load));
+ set_ia32_am_flavour(irn, get_ia32_am_flavour(load));
set_ia32_op_type(irn, ia32_AddrModeS);
- set_ia32_frame_ent(irn, get_ia32_frame_ent(right));
- set_ia32_ls_mode(irn, get_ia32_ls_mode(right));
+ set_ia32_frame_ent(irn, get_ia32_frame_ent(load));
+ set_ia32_ls_mode(irn, get_ia32_ls_mode(load));
- set_ia32_am_sc(irn, get_ia32_am_sc(right));
- if (is_ia32_am_sc_sign(right))
+ set_ia32_am_sc(irn, get_ia32_am_sc(load));
+ if (is_ia32_am_sc_sign(load))
set_ia32_am_sc_sign(irn);
/* clear remat flag */
set_ia32_flags(irn, get_ia32_flags(irn) & ~arch_irn_flags_rematerializable);
- if (is_ia32_use_frame(right))
+ if (is_ia32_use_frame(load))
set_ia32_use_frame(irn);
- /* connect to Load memory */
- set_irn_n(irn, 4, get_irn_n(right, 2));
+ /* connect to Load memory and disconnect Load */
+ if (get_irn_arity(irn) == 5) {
+ /* binary AMop */
+ set_irn_n(irn, 4, get_irn_n(load, 2));
+ set_irn_n(irn, 3, ia32_get_admissible_noreg(cg, irn, 3));
+ } else {
+ assert(get_irn_arity(irn) == 4);
+ /* unary AMop */
+ set_irn_n(irn, 3, get_irn_n(load, 2));
+ set_irn_n(irn, 2, ia32_get_admissible_noreg(cg, irn, 2));
+ }
/* this is only needed for Compares, but currently ALL nodes
* have this attribute :-) */
set_ia32_pncode(irn, get_inversed_pnc(get_ia32_pncode(irn)));
- /* disconnect from Load */
- set_irn_n(irn, 3, noreg_gp);
-
- DBG_OPT_AM_S(right, irn);
+ DBG_OPT_AM_S(load, irn);
/* If Load has a memory Proj, connect it to the op */
- mem_proj = ia32_get_proj_for_mode(right, mode_M);
+ mem_proj = ia32_get_proj_for_mode(load, mode_M);
if (mem_proj) {
set_Proj_pred(mem_proj, irn);
set_Proj_proj(mem_proj, 1);
}
- try_remove_from_sched(right);
+ try_remove_from_sched(load);
- DB((mod, LEVEL_1, "merged with %+F into source AM\n", right));
+ DB((mod, LEVEL_1, "merged with %+F into source AM\n", load));
}
else {
/* was exchanged but optimize failed: exchange back */