* @date 06.04.2006
* @version $Id$
*/
-#ifdef HAVE_CONFIG_H
#include "config.h"
-#endif
#ifdef WITH_ILP
#include <math.h>
+#include "array_t.h"
#include "hashptr.h"
#include "debug.h"
#include "obst.h"
static INLINE int
has_reg_class(const spill_ilp_t * si, const ir_node * irn)
{
- return arch_irn_consider_in_reg_alloc(si->birg->main_env->arch_env,
- si->cls, irn);
+ return arch_irn_consider_in_reg_alloc(si->cls, irn);
}
#if 0
#define pset_foreach(s,i) for((i)=pset_first((s)); (i); (i)=pset_next((s)))
#define set_foreach(s,i) for((i)=set_first((s)); (i); (i)=set_next((s)))
#define foreach_post_remat(s,i) for((i)=next_post_remat((s)); (i); (i)=next_post_remat((i)))
-#define foreach_pre_remat(si,s,i) for((i)=next_pre_remat((si),(s)); (i); (i)=next_pre_remat((si),(i)))
+#define foreach_pre_remat(s,i) for((i)=next_pre_remat((s)); (i); (i)=next_pre_remat((i)))
#define sched_foreach_op(s,i) for((i)=sched_next_op((s));!sched_is_end((i));(i)=sched_next_op((i)))
static int
#endif
}
-static double
-get_cost(const spill_ilp_t * si, const ir_node * irn)
+static double get_cost(const ir_node *irn)
{
if(be_is_Spill(irn)) {
return opt_cost_spill;
} else if(be_is_Reload(irn)){
return opt_cost_reload;
} else {
- return arch_get_op_estimated_cost(si->birg->main_env->arch_env, irn);
+ return arch_get_op_estimated_cost(irn);
}
}
static INLINE int
is_rematerializable(const spill_ilp_t * si, const ir_node * irn)
{
- int n;
- const arch_env_t *arch_env = si->birg->main_env->arch_env;
- int remat = (arch_irn_get_flags(arch_env, irn) & arch_irn_flags_rematerializable) != 0;
+ int n;
+ int remat = (arch_irn_get_flags(irn) & arch_irn_flags_rematerializable) != 0;
#if 0
if(!remat)
for (n = get_irn_arity(irn)-1; n>=0 && remat; --n) {
ir_node *op = get_irn_n(irn, n);
- remat &= has_reg_class(si, op) || arch_irn_get_flags(arch_env, op) & arch_irn_flags_ignore || is_NoMem(op);
+ remat &= has_reg_class(si, op) || arch_irn_get_flags(op) & arch_irn_flags_ignore || is_NoMem(op);
// if(!remat)
// ir_fprintf(stderr, " Argument %d (%+F) of Node %+F has wrong regclass\n", i, op, irn);
remat = obstack_alloc(si->obst, sizeof(*remat));
remat->op = op;
- remat->cost = (int)get_cost(si, op);
+ remat->cost = (int)get_cost(op);
remat->value = dest_value;
remat->proj = proj;
remat->inverse = 0;
return ret;
}
-static INLINE ir_node *
-sched_block_last_noncf(const spill_ilp_t * si, const ir_node * bb)
+static INLINE ir_node *sched_block_last_noncf(const ir_node * bb)
{
- return sched_skip((ir_node*)bb, 0, sched_skip_cf_predicator, (void *) si->birg->main_env->arch_env);
+ return sched_skip((ir_node*)bb, 0, sched_skip_cf_predicator, NULL);
}
/**
sched_add_before(insert, irn);
}
-static void
-sched_put_before(const spill_ilp_t * si, ir_node * insert, ir_node * irn)
+static void sched_put_before(ir_node * insert, ir_node * irn)
{
if(is_Block(insert)) {
- insert = sched_block_last_noncf(si, insert);
+ insert = sched_block_last_noncf(insert);
} else {
insert = sched_next_nonproj(insert, 0);
insert = sched_prev(insert);
}
-static ir_node *
-next_pre_remat(const spill_ilp_t * si, const ir_node * irn)
+static ir_node *next_pre_remat(const ir_node * irn)
{
op_t *op;
ir_node *ret;
if(is_Block(irn)) {
- ret = sched_block_last_noncf(si, irn);
+ ret = sched_block_last_noncf(irn);
ret = sched_next(ret);
ret = sched_prev_op(ret);
} else {
res = 1;
if(is_Block(pos)) {
- prev = sched_block_last_noncf(si, pos);
+ prev = sched_block_last_noncf(pos);
prev = sched_next_nonproj(prev, 0);
} else {
prev = sched_prev_op(pos);
set_phi_class(si->pc, copy, NULL);
set_nodes_block(copy, bb);
- sched_put_before(si, pos, copy);
+ sched_put_before(pos, copy);
return copy;
}
sched_foreach_reverse(bb, irn) {
int i;
- if(!sched_skip_cf_predicator(irn, si->birg->main_env->arch_env)) break;
+ if (!sched_skip_cf_predicator(irn, NULL)) break;
for(i=get_irn_arity(irn)-1; i>=0; --i) {
ir_node *arg = get_irn_n(irn,i);
* find values that are used by remats at end of block
* and insert them into live set
*/
- foreach_pre_remat(si, bb, irn) {
+ foreach_pre_remat(bb, irn) {
int n;
for (n=get_irn_arity(irn)-1; n>=0; --n) {
if(!has_reg_class(si, phi_arg)) {
ir_node *copy = be_new_Copy(si->cls, si->birg->irg, bb, phi_arg);
- ir_node *pos = sched_block_last_noncf(si, bb);
+ ir_node *pos = sched_block_last_noncf(bb);
op_t *op = obstack_alloc(si->obst, sizeof(*op));
DBG((si->dbg, LEVEL_2, "\t copy to my regclass for arg %+F of %+F\n", phi_arg, irn));
}
/* do not place post remats after jumps */
- if(sched_skip_cf_predicator(irn, si->birg->main_env->arch_env)) {
+ if (sched_skip_cf_predicator(irn, si->birg->main_env->arch_env)) {
del_pset(used);
del_pset(args);
break;
* find values that are used by remats at end of block
* and insert them into live set
*/
- foreach_pre_remat(si, bb, irn) {
+ foreach_pre_remat(bb, irn) {
int n;
for (n=get_irn_arity(irn)-1; n>=0; --n) {
sched_foreach_reverse(bb, irn) {
int n;
- if(!sched_skip_cf_predicator(irn, si->birg->main_env->arch_env)) break;
+ if (!sched_skip_cf_predicator(irn, si->birg->main_env->arch_env)) break;
for (n=get_irn_arity(irn)-1; n>=0; --n) {
ir_node *irn_arg = get_irn_n(irn, n);
lpp_set_factor_fast(si->lpp, cst, to_copy_spill->reg_out, -1.0);
if(reload != ILP_UNDEF) lpp_set_factor_fast(si->lpp, cst, reload, -1.0);
lpp_set_factor_fast(si->lpp, cst, to_copy_op->attr.live_range.ilp, -1.0);
- foreach_pre_remat(si, block, tmp) {
+ foreach_pre_remat(block, tmp) {
op_t *remat_op = get_irn_link(tmp);
if(remat_op->attr.remat.remat->value == to_copy) {
lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, -1.0);
static void
luke_blockwalker(ir_node * bb, void * data)
{
- spill_ilp_t *si = (spill_ilp_t*)data;
- ir_node *irn;
- pset *live;
- char buf[256];
- ilp_cst_t cst;
- spill_bb_t *spill_bb = get_irn_link(bb);
- ir_node *tmp;
- spill_t *spill;
- pset *defs = pset_new_ptr_default();
- const arch_env_t *arch_env = si->birg->main_env->arch_env;
+ spill_ilp_t *si = (spill_ilp_t*)data;
+ ir_node *irn;
+ pset *live;
+ char buf[256];
+ ilp_cst_t cst;
+ spill_bb_t *spill_bb = get_irn_link(bb);
+ ir_node *tmp;
+ spill_t *spill;
+ pset *defs = pset_new_ptr_default();
live = pset_new_ptr_default();
lpp_set_factor_fast(si->lpp, cst, spill->reg_out, 1.0);
if(reload != ILP_UNDEF) lpp_set_factor_fast(si->lpp, cst, reload, -1.0);
lpp_set_factor_fast(si->lpp, cst, op->attr.live_range.ilp, -1.0);
- foreach_pre_remat(si, bb, tmp) {
+ foreach_pre_remat(bb, tmp) {
op_t *remat_op = get_irn_link(tmp);
if(remat_op->attr.remat.remat->value == irn) {
lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, -1.0);
lpp_set_factor_fast(si->lpp, cst, spill->reg_out, 1.0);
if(reload != ILP_UNDEF) lpp_set_factor_fast(si->lpp, cst, reload, -1.0);
lpp_set_factor_fast(si->lpp, cst, op->attr.live_range.ilp, -1.0);
- foreach_pre_remat(si, bb, tmp) {
+ foreach_pre_remat(bb, tmp) {
op_t *remat_op = get_irn_link(tmp);
if(remat_op->attr.remat.remat->value == irn) {
lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, -1.0);
/*
* assure the remat args are available
*/
- foreach_pre_remat(si, bb, tmp) {
+ foreach_pre_remat(bb, tmp) {
op_t *remat_op = get_irn_link(tmp);
int n;
* B A S I C B L O C K B O D Y
**************************************/
- sched_foreach_reverse_from(sched_block_last_noncf(si, bb), irn) {
+ sched_foreach_reverse_from(sched_block_last_noncf(bb), irn) {
op_t *op;
op_t *tmp_op;
int n,
}
}
}
- foreach_pre_remat(si, irn, tmp) {
+ foreach_pre_remat(irn, tmp) {
for (n=get_irn_arity(tmp)-1; n>=0; --n) {
ir_node *remat_arg = get_irn_n(tmp, n);
if(has_reg_class(si, remat_arg)) {
if(opt_memoperands && (!is_start_block(bb) || be_is_Barrier(irn))) {
for(n = get_irn_arity(irn)-1; n>=0; --n) {
- if(get_irn_n(irn, n) == arg && arch_possible_memory_operand(arch_env, irn, n)) {
+ if (get_irn_n(irn, n) == arg &&
+ arch_possible_memory_operand(irn, n)) {
ilp_var_t memoperand;
ir_snprintf(buf, sizeof(buf), "memoperand_%N_%d", irn, n);
assert(spill);
ir_snprintf(buf, sizeof(buf), "delete_%N", tmp);
- delete = lpp_add_var_default(si->lpp, buf, lpp_binary, -1.0*get_cost(si, irn)*execution_frequency(si, bb), 0.0);
+ delete = lpp_add_var_default(si->lpp, buf, lpp_binary, -1.0 * get_cost(irn) * execution_frequency(si, bb), 0.0);
/* op may not be killed if its first live_range is 1 */
ir_snprintf(buf, sizeof(buf), "killorig-lr_%N", tmp);
assert(spill);
ir_snprintf(buf, sizeof(buf), "keep_%N", tmp);
- keep = lpp_add_var_default(si->lpp, buf, lpp_binary, get_cost(si, irn)*execution_frequency(si, bb), 1.0);
+ keep = lpp_add_var_default(si->lpp, buf, lpp_binary, get_cost(irn) * execution_frequency(si, bb), 1.0);
/* op may not be killed if its first live_range is 1 */
ir_snprintf(buf, sizeof(buf), "killorig-lr_%N", tmp);
lpp_set_factor_fast(si->lpp, requirements, arg_op->attr.live_range.ilp, 1.0);
lpp_set_factor_fast(si->lpp, requirements, op->attr.live_range.args.reloads[i], 1.0);
- foreach_pre_remat(si, irn, tmp) {
+ foreach_pre_remat(irn, tmp) {
op_t *remat_op = get_irn_link(tmp);
if(remat_op->attr.remat.remat->value == arg) {
lpp_set_factor_fast(si->lpp, requirements, remat_op->attr.remat.ilp, 1.0);
}
}
for(n = get_irn_arity(irn)-1; n>=0; --n) {
- if(get_irn_n(irn, n) == arg && arch_possible_memory_operand(arch_env, irn, n)) {
+ if (get_irn_n(irn, n) == arg &&
+ arch_possible_memory_operand(irn, n)) {
memoperand_t *memoperand;
memoperand = set_find_memoperand(si->memoperands, irn, n);
}
/* requirements for remats */
- foreach_pre_remat(si, irn, tmp) {
+ foreach_pre_remat(irn, tmp) {
op_t *remat_op = get_irn_link(tmp);
int n;
}
/** insert a spill at an arbitrary position */
-ir_node *be_spill2(const arch_env_t *arch_env, ir_node *irn, ir_node *insert)
+static ir_node *be_spill2(ir_node *irn, ir_node *insert)
{
ir_node *bl = is_Block(insert) ? insert : get_nodes_block(insert);
ir_graph *irg = get_irn_irg(bl);
ir_node *frame = get_irg_frame(irg);
ir_node *spill;
ir_node *next;
- const arch_register_class_t *cls = arch_get_irn_reg_class(arch_env, irn, -1);
- const arch_register_class_t *cls_frame = arch_get_irn_reg_class(arch_env, frame, -1);
+ const arch_register_class_t *cls = arch_get_irn_reg_class(irn, -1);
+ const arch_register_class_t *cls_frame = arch_get_irn_reg_class(frame, -1);
spill = be_new_Spill(cls, cls_frame, irg, bl, frame, irn);
{
defs_t *defs;
ir_node *spill;
- const arch_env_t *arch_env = si->birg->main_env->arch_env;
DBG((si->dbg, LEVEL_3, "\t inserting spill for value %+F after %+F\n", irn, before));
- spill = be_spill2(arch_env, irn, before);
+ spill = be_spill2(irn, before);
defs = set_insert_def(si->values, value);
assert(defs);
defs_t *defs;
ir_node *reload,
*spill;
- const arch_env_t *arch_env = si->birg->main_env->arch_env;
DBG((si->dbg, LEVEL_3, "\t inserting reload for value %+F before %+F\n", value, after));
spill = defs->spills;
assert(spill && "no spill placed before reload");
- reload = be_reload(arch_env, si->cls, after, get_irn_mode(value), spill);
+ reload = be_reload(si->cls, after, get_irn_mode(value), spill);
/* enter into the linked list */
set_irn_link(reload, defs->remats);
void perform_memory_operand(spill_ilp_t * si, memoperand_t * memoperand)
{
- defs_t *defs;
- ir_node *value = get_irn_n(memoperand->irn, memoperand->pos);
- ir_node *spill;
- const arch_env_t *arch_env = si->birg->main_env->arch_env;
+ defs_t *defs;
+ ir_node *value = get_irn_n(memoperand->irn, memoperand->pos);
+ ir_node *spill;
DBG((si->dbg, LEVEL_2, "\t inserting memory operand for value %+F at %+F\n", value, memoperand->irn));
spill = defs->spills;
assert(spill && "no spill placed before reload");
- arch_perform_memory_operand(arch_env, memoperand->irn, spill, memoperand->pos);
+ arch_perform_memory_operand(memoperand->irn, spill, memoperand->pos);
}
void insert_memoperands(spill_ilp_t * si)
{
ir_node *insert_pos = bb;
ir_node *spill;
- const arch_env_t *arch_env = si->birg->main_env->arch_env;
/* find last definition of arg value in block */
ir_node *next;
DBG((si->dbg, LEVEL_2, "\t inserting mem copy for value %+F after %+F\n", value, insert_pos));
- spill = be_spill2(arch_env, is_Block(insert_pos)?value:insert_pos, insert_pos);
+ spill = be_spill2(is_Block(insert_pos)?value:insert_pos, insert_pos);
return spill;
}
if(!is_zero(name->value)) {
ir_node *reload;
ir_node *insert_pos = bb;
- ir_node *prev = sched_block_last_noncf(si, bb);
+ ir_node *prev = sched_block_last_noncf(bb);
op_t *prev_op = get_irn_link(prev);
while(be_is_Spill(prev)) {
if(!bitset_is_set(kh->used, get_irn_idx(irn))) {
if(be_is_Spill(irn) || be_is_Reload(irn)) {
- DBG((kh->si->dbg, LEVEL_1, "\t SUBOPTIMAL! %+F IS UNUSED (cost: %g)\n", irn, get_cost(kh->si, irn)*execution_frequency(kh->si, bb)));
+ DBG((kh->si->dbg, LEVEL_1, "\t SUBOPTIMAL! %+F IS UNUSED (cost: %g)\n", irn, get_cost(irn) * execution_frequency(kh->si, bb)));
#if 0
assert(lpp_get_sol_state(kh->si->lpp) != lpp_optimal && "optimal solution is suboptimal?");
#endif
bitset_t *arch_regs = bitset_malloc(arch_n_regs);
bitset_t *abi_regs = bitset_malloc(arch_n_regs);
- arch_put_non_ignore_regs(si->birg->main_env->arch_env, si->cls, arch_regs);
- be_abi_put_ignore_regs(si->birg->abi, si->cls, abi_regs);
+ arch_put_non_ignore_regs(si->cls, arch_regs);
+ be_abi_put_ignore_regs(si->birg->abi, si->cls, abi_regs);
bitset_andnot(arch_regs, abi_regs);
arch_n_regs = bitset_popcnt(arch_regs);