#include "bechordal_t.h"
-#define BIGM 100000.0
-
-#define DUMP_SOLUTION
-#define DUMP_ILP
+//#define DUMP_SOLUTION
+//#define DUMP_ILP
//#define KEEPALIVE /* keep alive all inserted remats and dump graph with remats */
#define COLLECT_REMATS /* enable rematerialization */
#define COLLECT_INVERSE_REMATS /* enable placement of inverse remats */
+//#define ONLY_BRIGGS_REMATS /* only remats without parameters (or only with ignored params) */
#define REMAT_WHILE_LIVE /* only remat values that are live */
//#define NO_ENLARGE_L1V3N355 /* do not remat after the death of some operand */
//#define EXECFREQ_LOOPDEPH /* compute execution frequency from loop depth only */
#define GOODWIN_REDUCTION
//#define NO_MEMCOPIES
//#define VERIFY_DOMINANCE
+#define WITH_MEMOPERANDS
#define SOLVE
//#define SOLVE_LOCAL
#define LPP_SERVER "i44pc52"
#define LPP_SOLVER "cplex"
-#define COST_LOAD 8
-#define COST_STORE 50
-#define COST_REMAT 1
+#define COST_LOAD 8
+#define COST_MEMOPERAND 7
+#define COST_STORE 50
+#define COST_REMAT 1
#define ILP_TIMEOUT 300
#define MAX_PATHS 16
pset *spills;
set *interferences;
ir_node *m_unknown;
+#ifdef WITH_MEMOPERANDS
+ set *memoperands;
+#endif
DEBUG_ONLY(firm_dbg_module_t * dbg);
} spill_ilp_t;
typedef struct _remat_t {
const ir_node *op; /**< for copy_irn */
const ir_node *value; /**< the value which is being recomputed by this remat */
- ir_node *proj; /**< not NULL if the above op produces a tuple */
+ const ir_node *proj; /**< not NULL if the above op produces a tuple */
int cost; /**< cost of this remat */
int inverse; /**< nonzero if this is an inverse remat */
} remat_t;
union {
struct {
ilp_var_t ilp;
- remat_t *remat; /** the remat this op belongs to */
+ const remat_t *remat; /** the remat this op belongs to */
int pre; /** 1, if this is a pressure-increasing remat */
} remat;
struct {
} op_t;
typedef struct _defs_t {
- ir_node *value;
- ir_node *spills; /**< points to the first spill for this value (linked by link field) */
- ir_node *remats; /**< points to the first definition for this value (linked by link field) */
+ const ir_node *value;
+ ir_node *spills; /**< points to the first spill for this value (linked by link field) */
+ ir_node *remats; /**< points to the first definition for this value (linked by link field) */
} defs_t;
typedef struct _remat_info_t {
} keyval_t;
typedef struct _spill_t {
- ir_node *irn;
- ilp_var_t reg_in;
- ilp_var_t mem_in;
- ilp_var_t reg_out;
- ilp_var_t mem_out;
- ilp_var_t spill;
+ ir_node *irn;
+ ilp_var_t reg_in;
+ ilp_var_t mem_in;
+ ilp_var_t reg_out;
+ ilp_var_t mem_out;
+ ilp_var_t spill;
} spill_t;
+#ifdef WITH_MEMOPERANDS
+typedef struct _memoperand_t {
+ ir_node *irn; /**< the irn */
+ unsigned int pos; /**< the position of the argument */
+ ilp_var_t ilp; /**< the ilp var for this memory operand */
+} memoperand_t;
+#endif
+
static INLINE int
has_reg_class(const spill_ilp_t * si, const ir_node * irn)
{
return !(p->irn == q->irn);
}
+#ifdef WITH_MEMOPERANDS
+static int
+cmp_memoperands(const void *a, const void *b, size_t size)
+{
+ const memoperand_t *p = a;
+ const memoperand_t *q = b;
+
+ return !(p->irn == q->irn && p->pos == q->pos);
+}
+#endif
+
static keyval_t *
-set_find_keyval(set * set, void * key)
+set_find_keyval(set * set, const void * key)
{
keyval_t query;
}
static defs_t *
-set_find_def(set * set, ir_node * value)
+set_find_def(set * set, const ir_node * value)
{
defs_t query;
}
static defs_t *
-set_insert_def(set * set, ir_node * value)
+set_insert_def(set * set, const ir_node * value)
{
defs_t query;
return set_insert(set, &query, sizeof(query), HASH_PTR(value));
}
+#ifdef WITH_MEMOPERANDS
+static memoperand_t *
+set_insert_memoperand(set * set, ir_node * irn, unsigned int pos, ilp_var_t ilp)
+{
+ memoperand_t query;
+
+ query.irn = irn;
+ query.pos = pos;
+ query.ilp = ilp;
+ return set_insert(set, &query, sizeof(query), HASH_PTR(irn)+pos);
+}
+
+static memoperand_t *
+set_find_memoperand(set * set, const ir_node * irn, unsigned int pos)
+{
+ memoperand_t query;
+
+ query.irn = (ir_node*)irn;
+ query.pos = pos;
+ return set_find(set, &query, sizeof(query), HASH_PTR(irn)+pos);
+}
+#endif
+
+
static spill_t *
-set_find_spill(set * set, ir_node * value)
+set_find_spill(set * set, const ir_node * value)
{
spill_t query;
- query.irn = value;
+ query.irn = (ir_node*)value;
return set_find(set, &query, sizeof(query), HASH_PTR(value));
}
}
}
+#ifdef NO_SINGLE_USE_REMATS
static int
get_irn_n_nonremat_edges(const spill_ilp_t * si, const ir_node * irn)
{
return i;
}
+#endif
+
+#ifdef ONLY_BRIGGS_REMATS
+static int
+get_irn_n_nonignore_args(const spill_ilp_t * si, const ir_node * irn)
+{
+ int n;
+ unsigned int ret = 0;
+
+ for(n=get_irn_arity(irn)-1; n>=0; --n) {
+ if(has_reg_class(si, irn)) ++ret;
+ }
+
+ return ret;
+}
+#endif
static INLINE void
get_remats_from_op(spill_ilp_t * si, const ir_node * op)
int n;
remat_t *remat;
+ if( has_reg_class(si, op)
#ifdef NO_SINGLE_USE_REMATS
- if(has_reg_class(si, op) && (get_irn_n_nonremat_edges(si, op) > 1)) {
-#else
- if(has_reg_class(si, op)) {
+ && (get_irn_n_nonremat_edges(si, op) > 1)
#endif
+#ifdef ONLY_BRIGGS_REMATS
+ && (get_irn_n_nonignore_args(si, op) == 0)
+#endif
+ ) {
remat = get_remat_from_op(si, op, op);
if(remat) {
add_remat(si, remat);
}
}
-#ifdef COLLECT_INVERSE_REMATS
+#if defined(COLLECT_INVERSE_REMATS) && !defined(ONLY_BRIGGS_REMATS)
/* repeat the whole stuff for each remat retrieved by get_remat_from_op(op, arg)
for each arg */
for (n = get_irn_arity(op)-1; n>=0; --n) {
}
static ir_node *
-insert_remat_after(spill_ilp_t * si, const remat_t * remat, const ir_node * pos, const pset * live)
+insert_remat_after(spill_ilp_t * si, const remat_t * remat, ir_node * pos, const pset * live)
{
char buf[256];
}
static ir_node *
-insert_remat_before(spill_ilp_t * si, const remat_t * remat, const ir_node * pos, const pset * live)
+insert_remat_before(spill_ilp_t * si, const remat_t * remat, ir_node * pos, const pset * live)
{
char buf[256];
DBG((si->dbg, LEVEL_3, "\t Entering %+F\n\n", bb));
- be_lv_foreach(si->chordal_env->lv, bb, be_lv_state_end, i) {
- ir_node *value = be_lv_get_irn(si->chordal_env->lv, bb, i);
+ be_lv_foreach(si->lv, bb, be_lv_state_end, i) {
+ ir_node *value = be_lv_get_irn(si->lv, bb, i);
/* add remats at end of block */
if (has_reg_class(si, value)) {
irn = next;
}
- be_lv_foreach(si->chordal_env->lv, bb, be_lv_state_end | be_lv_state_in, i) {
- ir_node *value = be_lv_get_irn(si->chordal_env->lv, bb, i);
+ be_lv_foreach(si->lv, bb, be_lv_state_end | be_lv_state_in, i) {
+ ir_node *value = be_lv_get_irn(si->lv, bb, i);
/* add remats at end if successor has multiple predecessors */
if(is_merge_edge(bb)) {
/* add remats at end of block */
- if (be_is_live_end(si->chordal_env->lv, bb, value) && has_reg_class(si, value)) {
+ if (be_is_live_end(si->lv, bb, value) && has_reg_class(si, value)) {
remat_info_t *remat_info,
query;
remat_t *remat;
}
if(is_diverge_edge(bb)) {
/* add remat2s at beginning of block */
- if ((be_is_live_in(si->chordal_env->lv, bb, value) || (is_Phi(value) && get_nodes_block(value)==bb)) && has_reg_class(si, value)) {
+ if ((be_is_live_in(si->lv, bb, value) || (is_Phi(value) && get_nodes_block(value)==bb)) && has_reg_class(si, value)) {
remat_info_t *remat_info,
query;
remat_t *remat;
live = pset_new_ptr_default();
use_end = pset_new_ptr_default();
- be_lv_foreach(si->chordal_env->lv, bb, be_lv_state_end, i) {
- irn = be_lv_get_irn(si->chordal_env->lv, bb, i);
+ be_lv_foreach(si->lv, bb, be_lv_state_end, i) {
+ irn = be_lv_get_irn(si->lv, bb, i);
if (has_reg_class(si, irn) && !pset_find_ptr(si->all_possible_remats, irn)) {
op_t *op;
}
ir_snprintf(buf, sizeof(buf), "check_end_%N", bb);
- //cst = lpp_add_cst(si->lpp, buf, lpp_less, si->n_regs);
- cst = lpp_add_cst(si->lpp, buf, lpp_less, si->n_regs - pset_count(use_end));
+ //cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, si->n_regs);
+ cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, si->n_regs - pset_count(use_end));
spill_bb->ilp = new_set(cmp_spill, pset_count(live)+pset_count(use_end));
set_insert_keyval(spill_bb->reloads, irn, INT_TO_PTR(reload));
/* reload <= mem_out */
- rel_cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
+ rel_cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0.0);
lpp_set_factor_fast(si->lpp, rel_cst, reload, 1.0);
lpp_set_factor_fast(si->lpp, rel_cst, spill->mem_out, -1.0);
- }
+ }
spill->reg_in = ILP_UNDEF;
spill->mem_in = ILP_UNDEF;
set_insert_keyval(spill_bb->reloads, irn, INT_TO_PTR(reload));
/* reload <= mem_out */
- rel_cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
+ rel_cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0.0);
lpp_set_factor_fast(si->lpp, rel_cst, reload, 1.0);
lpp_set_factor_fast(si->lpp, rel_cst, spill->mem_out, -1.0);
spill->mem_in = ILP_UNDEF;
ir_snprintf(buf, sizeof(buf), "req_cf_end_%N_%N", irn, bb);
- end_use_req = lpp_add_cst(si->lpp, buf, lpp_equal, 1);
+ end_use_req = lpp_add_cst_uniq(si->lpp, buf, lpp_equal, 1);
lpp_set_factor_fast(si->lpp, end_use_req, spill->reg_out, 1.0);
}
next_post_remat(const ir_node * irn)
{
op_t *op;
+ ir_node *next;
if(is_Block(irn)) {
- irn = sched_block_first_nonphi(irn);
+ next = sched_block_first_nonphi(irn);
} else {
- irn = sched_next_op(irn);
+ next = sched_next_op(irn);
}
- if(sched_is_end(irn))
+ if(sched_is_end(next))
return NULL;
- op = (op_t*)get_irn_link(irn);
+ op = get_irn_link(next);
if(op->is_remat && !op->attr.remat.pre) {
- return irn;
+ return next;
}
return NULL;
copyreg = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
ir_snprintf(buf, sizeof(buf), "check_copyreg_%N", block);
- cst = lpp_add_cst(si->lpp, buf, lpp_less, si->n_regs);
+ cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, si->n_regs);
pset_foreach(live, tmp) {
spill_t *spill;
}
ir_snprintf(buf, sizeof(buf), "req_copy_%N_%N_%N", block, phi, to_copy);
- cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
+ cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0.0);
/* copy - reg_out - reload - remat - live_range <= 0 */
lpp_set_factor_fast(si->lpp, cst, phi_op->attr.live_range.args.copies[pos], 1.0);
}
ir_snprintf(buf, sizeof(buf), "copyreg_%N_%N_%N", block, phi, to_copy);
- cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
+ cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0.0);
/* copy - reg_out - copyreg <= 0 */
lpp_set_factor_fast(si->lpp, cst, phi_op->attr.live_range.args.copies[pos], 1.0);
ir_node *tmp;
spill_t *spill;
pset *defs = pset_new_ptr_default();
+#ifdef WITH_MEMOPERANDS
+ const arch_env_t *arch_env = si->chordal_env->birg->main_env->arch_env;
+#endif
live = pset_new_ptr_default();
op->attr.live_range.op = bb;
ir_snprintf(buf, sizeof(buf), "reg_out_%N_%N", bb, irn);
- cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
+ cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0.0);
/* reg_out - reload - remat - live_range <= 0 */
lpp_set_factor_fast(si->lpp, cst, spill->reg_out, 1.0);
pset *used;
pset *remat_defs;
keyval_t *keyval;
+#ifdef WITH_MEMOPERANDS
+ ilp_cst_t one_memoperand;
+#endif
/* iterate only until first phi */
if(is_Phi(irn))
if(n_remats == 0) {
/* sum remat2s <= 1 + n_remats*live_range */
ir_snprintf(buf, sizeof(buf), "dying_lr_%N_%N", value, irn);
- cst = lpp_add_cst(si->lpp, buf, lpp_less, 1.0);
+ cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 1.0);
}
n_remats++;
/* remat + \sum live_range(remat_arg) <= |args| */
ir_snprintf(buf, sizeof(buf), "one_must_die_%+F", tmp);
- cst = lpp_add_cst(si->lpp, buf, lpp_less, pset_count(remat_args));
+ cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, pset_count(remat_args));
lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, 1.0);
pset_foreach(remat_args, remat_arg) {
/* next_live_range <= prev_live_range + sum remat2s */
ir_snprintf(buf, sizeof(buf), "next_lr_%N_%N", value, irn);
- cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
+ cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0.0);
ir_snprintf(buf, sizeof(buf), "lr_%N_%N", value, irn);
prev_lr = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
/* check the register pressure in the epilog */
/* sum_{L\U'} lr + sum_{U'} post_use <= k - |D| */
ir_snprintf(buf, sizeof(buf), "check_post_%N", irn);
- check_post = lpp_add_cst(si->lpp, buf, lpp_less, si->n_regs - d);
+ check_post = lpp_add_cst_uniq(si->lpp, buf, lpp_less, si->n_regs - d);
/* add L\U' to check_post */
pset_foreach(live, tmp) {
/* post_use >= next_lr + remat */
ir_snprintf(buf, sizeof(buf), "post_use_%N_%N-%d", arg, irn, p++);
- cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
+ cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0.0);
lpp_set_factor_fast(si->lpp, cst, post_use, -1.0);
lpp_set_factor_fast(si->lpp, cst, arg_op->attr.live_range.ilp, 1.0);
if(!set_find_keyval(args, arg)) {
/* post_use <= prev_lr */
ir_snprintf(buf, sizeof(buf), "req_post_use_%N_%N", arg, irn);
- cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
+ cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0.0);
lpp_set_factor_fast(si->lpp, cst, post_use, 1.0);
lpp_set_factor_fast(si->lpp, cst, prev_lr, -1.0);
if(!pset_find_ptr(remat_defs, arg) && pset_find_ptr(live, arg)) {
/* next_lr <= prev_lr */
ir_snprintf(buf, sizeof(buf), "next_lr_%N_%N", arg, irn);
- cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
+ cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0.0);
lpp_set_factor_fast(si->lpp, cst, arg_op->attr.live_range.ilp, 1.0);
lpp_set_factor_fast(si->lpp, cst, prev_lr, -1.0);
}
}
-
/* forall post remat which use arg add a similar cst */
foreach_post_remat(irn, remat) {
int n;
DBG((si->dbg, LEVEL_3, "\t found remat with arg %+F in epilog of %+F\n", arg, irn));
ir_snprintf(buf, sizeof(buf), "post_use_%N_%N-%d", arg, irn, p++);
- cst = lpp_add_cst(si->lpp, buf, lpp_greater, 0.0);
- lpp_set_factor_fast(si->lpp, cst, post_use, 1.0);
- lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, -1.0);
+ cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0.0);
+ lpp_set_factor_fast(si->lpp, cst, post_use, -1.0);
+ lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, 1.0);
}
}
}
+#ifdef WITH_MEMOPERANDS
+ for(n = get_irn_arity(irn)-1; n>=0; --n) {
+ if(get_irn_n(irn, n) == arg && arch_possible_memory_operand(arch_env, irn, n)) {
+ ilp_var_t memoperand;
+
+ ir_snprintf(buf, sizeof(buf), "memoperand_%N_%d", irn, n);
+ memoperand = lpp_add_var(si->lpp, buf, lpp_binary, COST_MEMOPERAND*execution_frequency(si, bb));
+ set_insert_memoperand(si->memoperands, irn, n, memoperand);
+
+ ir_snprintf(buf, sizeof(buf), "nolivepost_%N_%d", irn, n);
+ cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 1.0);
+
+ lpp_set_factor_fast(si->lpp, cst, memoperand, 1.0);
+ lpp_set_factor_fast(si->lpp, cst, post_use, 1.0);
+// if(arg_op->attr.live_range.ilp != ILP_UNDEF)
+// lpp_set_factor_fast(si->lpp, cst, arg_op->attr.live_range.ilp, 1.0);
+ }
+ }
+#endif
+
/* new live range begins for each used value */
arg_op->attr.live_range.ilp = prev_lr;
arg_op->attr.live_range.op = irn;
/* check the register pressure in the prolog */
/* sum_{L\U} lr <= k - |U| */
ir_snprintf(buf, sizeof(buf), "check_pre_%N", irn);
- check_pre = lpp_add_cst(si->lpp, buf, lpp_less, si->n_regs - u);
+ check_pre = lpp_add_cst_uniq(si->lpp, buf, lpp_less, si->n_regs - u);
/* for the prolog remove defined values from the live set */
pset_foreach(defs, tmp) {
pset_remove_ptr(live, tmp);
}
+#ifdef WITH_MEMOPERANDS
+ ir_snprintf(buf, sizeof(buf), "one_memoperand_%N", irn);
+ one_memoperand = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 1.0);
+#endif
+
/***********************************************************
* I T E R A T I O N O V E R A R G S F O R P R O L O G
**********************************************************/
set_foreach(args, keyval) {
- spill_t *spill;
- ir_node *arg = keyval->key;
- int i = PTR_TO_INT(keyval->val);
- op_t *arg_op = get_irn_link(arg);
+ spill_t *spill;
+ const ir_node *arg = keyval->key;
+ int i = PTR_TO_INT(keyval->val);
+ op_t *arg_op = get_irn_link(arg);
+ ilp_cst_t requirements;
+#ifdef WITH_MEMOPERANDS
+ int n_memoperands;
+#endif
spill = set_find_spill(spill_bb->ilp, arg);
assert(spill);
/* reload <= mem_out */
ir_snprintf(buf, sizeof(buf), "req_reload_%N_%N", arg, irn);
- cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
+ cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0.0);
lpp_set_factor_fast(si->lpp, cst, op->attr.live_range.args.reloads[i], 1.0);
lpp_set_factor_fast(si->lpp, cst, spill->mem_out, -1.0);
/* requirement: arg must be in register for use */
/* reload + remat + live_range == 1 */
ir_snprintf(buf, sizeof(buf), "req_%N_%N", irn, arg);
- cst = lpp_add_cst(si->lpp, buf, lpp_equal, 1.0);
+ requirements = lpp_add_cst_uniq(si->lpp, buf, lpp_equal, 1.0);
- lpp_set_factor_fast(si->lpp, cst, arg_op->attr.live_range.ilp, 1.0);
- lpp_set_factor_fast(si->lpp, cst, op->attr.live_range.args.reloads[i], 1.0);
+ lpp_set_factor_fast(si->lpp, requirements, arg_op->attr.live_range.ilp, 1.0);
+ lpp_set_factor_fast(si->lpp, requirements, op->attr.live_range.args.reloads[i], 1.0);
foreach_pre_remat(si, irn, tmp) {
op_t *remat_op = get_irn_link(tmp);
if(remat_op->attr.remat.remat->value == arg) {
- lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, 1.0);
+ lpp_set_factor_fast(si->lpp, requirements, remat_op->attr.remat.ilp, 1.0);
}
}
+
+#ifdef WITH_MEMOPERANDS
+ n_memoperands = 0;
+ for(n = get_irn_arity(irn)-1; n>=0; --n) {
+ if(get_irn_n(irn, n) == arg) {
+ n_memoperands++;
+ }
+ }
+ for(n = get_irn_arity(irn)-1; n>=0; --n) {
+ if(get_irn_n(irn, n) == arg && arch_possible_memory_operand(arch_env, irn, n)) {
+ memoperand_t *memoperand;
+ memoperand = set_find_memoperand(si->memoperands, irn, n);
+
+ /* memoperand <= mem_out */
+ ir_snprintf(buf, sizeof(buf), "req_memoperand_%N_%d", irn, n);
+ cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0.0);
+ lpp_set_factor_fast(si->lpp, cst, memoperand->ilp, 1.0);
+ lpp_set_factor_fast(si->lpp, cst, spill->mem_out, -1.0);
+
+ /* the memoperand is only sufficient if it is used once by the op */
+ if(n_memoperands == 1)
+ lpp_set_factor_fast(si->lpp, requirements, memoperand->ilp, 1.0);
+
+ lpp_set_factor_fast(si->lpp, one_memoperand, memoperand->ilp, 1.0);
+
+ /* we have one more free register if we use a memory operand */
+ lpp_set_factor_fast(si->lpp, check_pre, memoperand->ilp, -1.0);
+ }
+ }
+#endif
}
/* iterate over L\U */
for (n=get_irn_arity(tmp)-1; n>=0; --n) {
ir_node *remat_arg = get_irn_n(tmp, n);
op_t *arg_op = get_irn_link(remat_arg);
- ilp_var_t prev_lr;
if(!has_reg_class(si, remat_arg)) continue;
set_foreach(spill_bb->ilp, spill) {
ir_snprintf(buf, sizeof(buf), "mem_out_%N_%N", spill->irn, bb);
- cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
+ cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0.0);
lpp_set_factor_fast(si->lpp, cst, spill->mem_out, 1.0);
lpp_set_factor_fast(si->lpp, cst, spill->spill, -1.0);
/* copy <= mem_in */
ir_snprintf(buf, sizeof(buf), "nocopy_%N_%N", arg, spill->irn);
- cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
+ cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0.0);
lpp_set_factor_fast(si->lpp, cst, var, 1.0);
lpp_set_factor_fast(si->lpp, cst, spill->mem_in, -1.0);
}
* including remats
*/
ir_snprintf(buf, sizeof(buf), "check_start_%N", bb);
- cst = lpp_add_cst(si->lpp, buf, lpp_less, si->n_regs);
+ cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, si->n_regs);
pset_foreach(live, irn) {
ilp_cst_t nospill;
/* spill + mem_in <= 1 */
ir_snprintf(buf, sizeof(buf), "nospill_%N_%N", irn, bb);
- nospill = lpp_add_cst(si->lpp, buf, lpp_less, 1);
+ nospill = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 1);
lpp_set_factor_fast(si->lpp, nospill, spill->mem_in, 1.0);
lpp_set_factor_fast(si->lpp, nospill, spill->spill, 1.0);
if(has_reg_class(si, phi_arg)) {
/* mem_in < mem_out_arg + copy */
ir_snprintf(buf, sizeof(buf), "mem_in_%N_%N-%d", irn, bb, p);
- mem_in = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
+ mem_in = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0.0);
/* reg_in < reg_out_arg */
ir_snprintf(buf, sizeof(buf), "reg_in_%N_%N-%d", irn, bb, p++);
- reg_in = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
+ reg_in = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0.0);
lpp_set_factor_fast(si->lpp, mem_in, spill->mem_in, 1.0);
lpp_set_factor_fast(si->lpp, reg_in, spill->reg_in, 1.0);
spill_t *spill_p;
ir_snprintf(buf, sizeof(buf), "mem_in_%N_%N-%d", irn, bb, p);
- mem_in = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
+ mem_in = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0.0);
ir_snprintf(buf, sizeof(buf), "reg_in_%N_%N-%d", irn, bb, p++);
- reg_in = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
+ reg_in = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0.0);
lpp_set_factor_fast(si->lpp, mem_in, spill->mem_in, 1.0);
lpp_set_factor_fast(si->lpp, reg_in, spill->reg_in, 1.0);
assert(spill && spill->irn == irn);
ir_snprintf(buf, sizeof(buf), "first_lr_%N_%N", irn, bb);
- cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
+ cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0.0);
lpp_set_factor_fast(si->lpp, cst, op->attr.live_range.ilp, 1.0);
lpp_set_factor_fast(si->lpp, cst, spill->reg_in, -1.0);
assert(spill);
ir_snprintf(buf, sizeof(buf), "req_spill_%N_%N", irn, bb);
- cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
+ cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0.0);
lpp_set_factor_fast(si->lpp, cst, spill->spill, 1.0);
if(is_diverge_edge(bb)) lpp_set_factor_fast(si->lpp, cst, spill->reg_in, -1.0);
assert(!is_Proj(tmp));
if(op->is_remat) {
- ir_node *value = op->attr.remat.remat->value;
+ const ir_node *value = op->attr.remat.remat->value;
if(value == irn) {
/* only collect remats up to the first real use of a value */
}
}
}
-next_live:
+next_live: ;
}
del_pset(live);
}
static int
-values_interfere_in_block(ir_node * bb, ir_node * a, ir_node * b)
+values_interfere_in_block(const spill_ilp_t * si, const ir_node * bb, const ir_node * a, const ir_node * b)
{
const ir_edge_t *edge;
/* the following code is stolen from bera.c */
- if(is_live_end(bb, a))
+ if(be_is_live_end(si->lv, bb, a))
return 1;
foreach_out_edge(a, edge) {
static void
luke_interferencewalker(ir_node * bb, void * data)
{
-#if 0 /* rewrite! */
spill_ilp_t *si = (spill_ilp_t*)data;
- irn_live_t *li1,
- *li2;
+ int l1, l2;
- be_lv_foreach(si->lv, bb, be_lv_state_end | be_lv_state_out | be_lv_state_in, i) {
- ir_node *a = be_lv_foreach(si->lv, bb, i);
+ be_lv_foreach(si->lv, bb, be_lv_state_end | be_lv_state_out | be_lv_state_in, l1) {
+ ir_node *a = be_lv_get_irn(si->lv, bb, l1);
op_t *a_op = get_irn_link(a);
if(a_op->is_remat) continue;
/* a is only interesting if it is in my register class and if it is inside a phi class */
if (has_reg_class(si, a) && get_phi_class(a)) {
- for(li2=li1->next; li2; li2 = li2->next) {
- ir_node *b = (ir_node *) li2->irn;
+ for(l2=_be_lv_next_irn(si->lv, bb, 0xff, l1+1); l2>=0; l2=_be_lv_next_irn(si->lv, bb, 0xff, l2+1)) {
+ ir_node *b = be_lv_get_irn(si->lv, bb, l2);
op_t *b_op = get_irn_link(b);
if(b_op->is_remat) continue;
/* a and b are only interesting if they are in the same phi class */
if(has_reg_class(si, b) && get_phi_class(a) == get_phi_class(b)) {
- if(values_interfere_in_block(bb, a, b)) {
+ if(values_interfere_in_block(si, bb, a, b)) {
DBG((si->dbg, LEVEL_4, "\tvalues interfere in %+F: %+F, %+F\n", bb, a, b));
set_insert_interference(si, si->interferences, a, b, bb);
}
}
}
}
-#endif
}
static unsigned int copy_path_id = 0;
void *ptr;
ir_snprintf(buf, sizeof(buf), "copy_path-%d", copy_path_id++);
- cst = lpp_add_cst(si->lpp, buf, lpp_less, 0);
+ cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0);
lpp_set_factor_fast(si->lpp, cst, any_interfere, 1.0);
* @parameter visited contains a set of nodes already visited on this path
*/
static int
-find_copy_path(spill_ilp_t * si, ir_node * irn, ir_node * target, ilp_var_t any_interfere, pset * copies, pset * visited)
+find_copy_path(spill_ilp_t * si, const ir_node * irn, const ir_node * target, ilp_var_t any_interfere, pset * copies, pset * visited)
{
- ir_edge_t *edge;
- op_t *op = get_irn_link(irn);
- pset *visited_users = pset_new_ptr_default();
- int paths = 0;
+ const ir_edge_t *edge;
+ op_t *op = get_irn_link(irn);
+ pset *visited_users = pset_new_ptr_default();
+ int paths = 0;
- if(op->is_remat) return;
+ if(op->is_remat) return 0;
pset_insert_ptr(visited, irn);
char buf[256];
ir_snprintf(buf, sizeof(buf), "always_copy-%d-%d", any_interfere, copy);
- cst = lpp_add_cst(si->lpp, buf, lpp_equal, 0);
+ cst = lpp_add_cst_uniq(si->lpp, buf, lpp_equal, 0);
lpp_set_factor_fast(si->lpp, cst, any_interfere, -1.0);
lpp_set_factor_fast(si->lpp, cst, copy, 1.0);
DBG((si->dbg, LEVEL_1, "ALWAYS COPYING %d FOR INTERFERENCE %d\n", copy, any_interfere));
char buf[256];
ir_snprintf(buf, sizeof(buf), "always_copy-%d-%d", any_interfere, copy);
- cst = lpp_add_cst(si->lpp, buf, lpp_equal, 0);
+ cst = lpp_add_cst_uniq(si->lpp, buf, lpp_equal, 0);
lpp_set_factor_fast(si->lpp, cst, any_interfere, -1.0);
lpp_set_factor_fast(si->lpp, cst, copy, 1.0);
DBG((si->dbg, LEVEL_1, "ALWAYS COPYING %d FOR INTERFERENCE %d\n", copy, any_interfere));
}
static void
-gen_copy_constraints(spill_ilp_t * si, ir_node * a, ir_node * b, ilp_var_t any_interfere)
+gen_copy_constraints(spill_ilp_t * si, const ir_node * a, const ir_node * b, ilp_var_t any_interfere)
{
pset * copies = pset_new_ptr_default();
pset * visited = pset_new_ptr_default();
/* any_interf <= \sum interf */
ir_snprintf(buf, sizeof(buf), "interfere_%N_%N", a, b);
- any_interfere_cst = lpp_add_cst(si->lpp, buf, lpp_less, 0);
+ any_interfere_cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0);
any_interfere = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
lpp_set_factor_fast(si->lpp, any_interfere_cst, any_interfere, 1.0);
const ir_node *bb = irnlist->irn;
spill_bb_t *spill_bb = get_irn_link(bb);
spill_t *spilla,
- *spillb,
- query;
+ *spillb;
char buf[256];
- query.irn = a;
spilla = set_find_spill(spill_bb->ilp, a);
assert(spilla);
- query.irn = b;
spillb = set_find_spill(spill_bb->ilp, b);
assert(spillb);
interfere = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
ir_snprintf(buf, sizeof(buf), "interfere_%N_%N_%N-1", bb, a, b);
- cst = lpp_add_cst(si->lpp, buf, lpp_less, 1);
+ cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 1);
lpp_set_factor_fast(si->lpp, cst, interfere, -1.0);
if(spilla->mem_in != ILP_UNDEF) lpp_set_factor_fast(si->lpp, cst, spilla->mem_in, 1.0);
lpp_set_factor_fast(si->lpp, cst, spillb->spill, 1.0);
ir_snprintf(buf, sizeof(buf), "interfere_%N_%N_%N-2", bb, a, b);
- cst = lpp_add_cst(si->lpp, buf, lpp_less, 0);
+ cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0);
lpp_set_factor_fast(si->lpp, cst, interfere, 1.0);
if(spilla->mem_in != ILP_UNDEF) lpp_set_factor_fast(si->lpp, cst, spilla->mem_in, -1.0);
lpp_set_factor_fast(si->lpp, cst, spilla->spill, -1.0);
ir_snprintf(buf, sizeof(buf), "interfere_%N_%N_%N-3", bb, a, b);
- cst = lpp_add_cst(si->lpp, buf, lpp_less, 0);
+ cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0);
lpp_set_factor_fast(si->lpp, cst, interfere, 1.0);
if(spillb->mem_in != ILP_UNDEF) lpp_set_factor_fast(si->lpp, cst, spillb->mem_in, -1.0);
/* any_interfere >= interf */
ir_snprintf(buf, sizeof(buf), "interfere_%N_%N-%N", a, b, bb);
- cst = lpp_add_cst(si->lpp, buf, lpp_less, 0);
+ cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0);
lpp_set_factor_fast(si->lpp, cst, interfere, 1.0);
lpp_set_factor_fast(si->lpp, cst, any_interfere, -1.0);
}
/** insert a spill at an arbitrary position */
-ir_node *be_spill2(const arch_env_t *arch_env, ir_node *irn, ir_node *insert, ir_node *ctx)
+ir_node *be_spill2(const arch_env_t *arch_env, ir_node *irn, ir_node *insert)
{
ir_node *bl = is_Block(insert)?insert:get_nodes_block(insert);
ir_graph *irg = get_irn_irg(bl);
const arch_register_class_t *cls = arch_get_irn_reg_class(arch_env, irn, -1);
const arch_register_class_t *cls_frame = arch_get_irn_reg_class(arch_env, frame, -1);
- spill = be_new_Spill(cls, cls_frame, irg, bl, frame, irn, ctx);
+ spill = be_new_Spill(cls, cls_frame, irg, bl, frame, irn);
/*
* search the right insertion point. a spill of a phi cannot be put
pset_foreach(remat_info->remats, remat)
{
if(remat->proj && get_irn_n_edges(remat->proj) == 0) {
- set_irn_n(remat->proj, -1, bad);
- set_irn_n(remat->proj, 0, bad);
+ set_irn_n((ir_node*)remat->proj, -1, bad);
+ set_irn_n((ir_node*)remat->proj, 0, bad);
}
if(get_irn_n_edges(remat->op) == 0) {
for (n=get_irn_arity(remat->op)-1; n>=-1; --n) {
- set_irn_n(remat->op, n, bad);
+ set_irn_n((ir_node*)remat->op, n, bad);
}
}
}
}
static pset *
-get_spills_for_value(spill_ilp_t * si, ir_node * value)
+get_spills_for_value(spill_ilp_t * si, const ir_node * value)
{
pset *spills = pset_new_ptr_default();
- ir_node *next;
- defs_t *defs;
+ const ir_node *next;
+ defs_t *defs;
defs = set_find_def(si->values, value);
return spills;
}
-static pset *
-get_remats_for_value(spill_ilp_t * si, ir_node * value)
-{
- pset *remats = pset_new_ptr_default();
-
- ir_node *next;
- defs_t *defs;
-
- pset_insert_ptr(remats, value);
- defs = set_find_def(si->values, value);
-
- if(defs && defs->remats) {
- for(next = defs->remats; next; next = get_irn_link(next)) {
- pset_insert_ptr(remats, next);
- }
- }
-
- return remats;
-}
-
-
/**
* @param before The node after which the spill will be placed in the schedule
*/
/* TODO set context properly */
static ir_node *
-insert_spill(spill_ilp_t * si, ir_node * irn, ir_node * value, ir_node * before)
+insert_spill(spill_ilp_t * si, ir_node * irn, const ir_node * value, ir_node * before)
{
defs_t *defs;
ir_node *spill;
DBG((si->dbg, LEVEL_3, "\t inserting spill for value %+F after %+F\n", irn, before));
- spill = be_spill2(arch_env, irn, before, irn);
+ spill = be_spill2(arch_env, irn, before);
defs = set_insert_def(si->values, value);
assert(defs);
* @param before The Phi node which has to be spilled
*/
static ir_node *
-insert_mem_phi(spill_ilp_t * si, const ir_node * phi)
+insert_mem_phi(spill_ilp_t * si, ir_node * phi)
{
ir_node *mem_phi;
ir_node **ins;
defs_t *defs;
int n;
- op_t *op = get_irn_link(phi);
NEW_ARR_A(ir_node*, ins, get_irn_arity(phi));
* Add reload before operation and add to list of defs
*/
static ir_node *
-insert_reload(spill_ilp_t * si, const ir_node * value, const ir_node * after)
+insert_reload(spill_ilp_t * si, const ir_node * value, ir_node * after)
{
defs_t *defs;
ir_node *reload,
return reload;
}
+#ifdef WITH_MEMOPERANDS
+void perform_memory_operand(spill_ilp_t * si, memoperand_t * memoperand)
+{
+ defs_t *defs;
+ ir_node *reload;
+ ir_node *value = get_irn_n(memoperand->irn, memoperand->pos);
+ ir_node *spill;
+ const arch_env_t *arch_env = si->chordal_env->birg->main_env->arch_env;
+
+ DBG((si->dbg, LEVEL_2, "\t inserting memory operand for value %+F at %+F\n", value, memoperand->irn));
+
+ defs = set_find_def(si->values, value);
+
+ spill = defs->spills;
+ assert(spill && "no spill placed before reload");
+
+ reload = be_reload(arch_env, si->cls, memoperand->irn, get_irn_mode(value), spill);
+
+ arch_perform_memory_operand(arch_env, memoperand->irn, reload, memoperand->pos);
+ sched_remove(reload);
+}
+
+void insert_memoperands(spill_ilp_t * si)
+{
+ memoperand_t *memoperand;
+ lpp_name_t *name;
+
+ set_foreach(si->memoperands, memoperand) {
+ name = si->lpp->vars[memoperand->ilp];
+ if(!is_zero(name->value)) {
+ perform_memory_operand(si, memoperand);
+ }
+ }
+}
+#endif
+
static void
walker_spill_placer(ir_node * bb, void * data) {
spill_ilp_t *si = (spill_ilp_t*)data;
}
static ir_node *
-insert_mem_copy(spill_ilp_t * si, const ir_node * bb, const ir_node * value)
+insert_mem_copy(spill_ilp_t * si, ir_node * bb, ir_node * value)
{
ir_node *insert_pos = bb;
ir_node *spill;
DBG((si->dbg, LEVEL_2, "\t inserting mem copy for value %+F after %+F\n", value, insert_pos));
- spill = be_spill2(arch_env, is_Block(insert_pos)?value:insert_pos, insert_pos, value);
+ spill = be_spill2(arch_env, is_Block(insert_pos)?value:insert_pos, insert_pos);
return spill;
}
if(!phi_m) continue;
for(n=get_irn_arity(phi)-1; n>=0; --n) {
- const ir_node *value = get_irn_n(phi, n);
+ ir_node *value = get_irn_n(phi, n);
defs_t *val_defs = set_find_def(si->values, value);
- ir_node *arg = get_irn_n(phi_m, n);
/* a spill of this value */
ir_node *spill;
spill_ilp_t *si = (spill_ilp_t*)data;
ir_node *irn;
spill_bb_t *spill_bb = get_irn_link(bb);
- int i;
/* reloads at end of block */
if(spill_bb->reloads) {
walker_kill_unused(ir_node * bb, void * data)
{
struct kill_helper *kh = data;
- const ir_node *bad = get_irg_bad(get_irn_irg(bb));
+ ir_node *bad = get_irg_bad(get_irn_irg(bb));
ir_node *irn;
lc_bitset_free(kh.used);
}
-static void
+void
print_irn_pset(pset * p)
{
ir_node *irn;
}
}
-static void
+void
dump_phi_class(spill_ilp_t * si, pset * phiclass, const char * file)
{
FILE *f = fopen(file, "w");
/* then fix uses of spills */
set_foreach(si->values, defs) {
- pset *reloads;
- pset *spills;
- ir_node *next = defs->remats;
+ pset *reloads;
+ pset *spills;
+ const ir_node *next = defs->remats;
int remats = 0;
reloads = pset_new_ptr_default();
// print_irn_pset(spills);
// print_irn_pset(reloads);
- be_ssa_constr_set_ignore(dfi, spills, ignore);
+ be_ssa_constr_set_ignore(dfi, si->lv, spills, ignore);
}
del_pset(reloads);
/* first fix uses of remats and reloads */
set_foreach(si->values, defs) {
- pset *nodes;
- ir_node *next = defs->remats;
+ pset *nodes;
+ const ir_node *next = defs->remats;
if(next) {
nodes = pset_new_ptr_default();
if(pset_count(nodes) > 1) {
DBG((si->dbg, LEVEL_4, "\t %d new definitions for value %+F\n", pset_count(nodes)-1, defs->value));
- be_ssa_constr_set(dfi, nodes);
+ be_ssa_constr_set(dfi, si->lv, nodes);
}
del_pset(nodes);
si->m_unknown = new_r_Unknown(si->chordal_env->irg, mode_M);
irg_block_walk_graph(si->chordal_env->irg, walker_spill_placer, NULL, si);
irg_block_walk_graph(si->chordal_env->irg, walker_reload_placer, NULL, si);
+#ifdef WITH_MEMOPERANDS
+ insert_memoperands(si);
+#endif
phim_fixer(si);
/* clean the remat info! there are still back-edges leading there! */
if( sched_is_end(irn) ||
(be_is_Reload(irn) && has_reg_class(si, irn)) ||
/* do not move reload before its spill */
- (irn == be_get_Reload_mem(reload)) ) break;
+ (irn == be_get_Reload_mem(reload)) ||
+ /* do not move before phi */
+ is_Phi(irn)) break;
set_irn_link(irn, INT_TO_PTR(pressure+1));
DBG((si->dbg, LEVEL_5, "new regpressure before %+F: %d\n", irn, pressure+1));
luke_meminterferencechecker(ir_node * bb, void * data)
{
spill_ilp_t *si = (spill_ilp_t*)data;
- irn_live_t *li1,
- *li2;
+ int l1, l2;
- live_foreach(bb, li1) {
- ir_node *a = (ir_node *) li1->irn;
+ be_lv_foreach(si->lv, bb, be_lv_state_end | be_lv_state_out | be_lv_state_in, l1) {
+ ir_node *a = be_lv_get_irn(si->lv, bb, l1);
if(!be_is_Spill(a) && (!is_Phi(a) || get_irn_mode(a) != mode_T)) continue;
- /* a is only interesting if it is inside a phi class */
- if (get_phi_class(a)) {
- for(li2=li1->next; li2; li2 = li2->next) {
- ir_node *b = (ir_node *) li2->irn;
+ /* a is only interesting if it is in my register class and if it is inside a phi class */
+ if (has_reg_class(si, a) && get_phi_class(a)) {
+ for(l2=_be_lv_next_irn(si->lv, bb, 0xff, l1+1); l2>=0; l2=_be_lv_next_irn(si->lv, bb, 0xff, l2+1)) {
+ ir_node *b = be_lv_get_irn(si->lv, bb, l2);
if(!be_is_Spill(b) && (!is_Phi(b) || get_irn_mode(b) != mode_T)) continue;
/* a and b are only interesting if they are in the same phi class */
- if(get_phi_class(a) == get_phi_class(b)) {
- if(values_interfere_in_block(bb, a, b)) {
+ if(has_reg_class(si, b) && get_phi_class(a) == get_phi_class(b)) {
+ if(values_interfere_in_block(si, bb, a, b)) {
ir_fprintf(stderr, "$$ Spills interfere in %+F: %+F, %+F \t$$\n", bb, a, b);
}
}
static void
walker_spillslotassigner(ir_node * irn, void * data)
{
- spill_ilp_t *si = (spill_ilp_t*)data;
void *cls;
if(!be_is_Spill(irn)) return;
/* set spill context to phi class if it has one ;) */
+#if 0
+ // Matze: not needed anymore
cls = get_phi_class(irn);
if(cls)
be_set_Spill_context(irn, cls);
else
be_set_Spill_context(irn, irn);
+#endif
}
si.lpp = new_lpp(problem_name, lpp_minimize);
si.remat_info = new_set(cmp_remat_info, 4096);
si.interferences = new_set(cmp_interference, 32);
+#ifdef WITH_MEMOPERANDS
+ si.memoperands = new_set(cmp_memoperands, 128);
+#endif
si.all_possible_remats = pset_new_ptr_default();
si.spills = pset_new_ptr_default();
si.inverse_ops = pset_new_ptr_default();
+ si.lv = chordal_env->lv;
#ifdef KEEPALIVE
si.keep = NULL;
#endif
/* recompute liveness */
DBG((si.dbg, LEVEL_1, "Recomputing liveness\n"));
- be_liveness(chordal_env->irg);
+ be_liveness_recompute(si.lv);
/* build the ILP */
#endif
#ifdef SOLVE
- DBG((si.dbg, LEVEL_1, "\tSolving %F\n", chordal_env->irg));
+ DBG((si.dbg, LEVEL_1, "\tSolving %s (%d variables, %d constraints)\n", problem_name, si.lpp->var_next, si.lpp->cst_next));
#ifdef ILP_TIMEOUT
lpp_set_time_limit(si.lpp, ILP_TIMEOUT);
#endif
#endif
// move reloads upwards
- be_liveness(chordal_env->irg);
+ be_liveness_recompute(si.lv);
irg_block_walk_graph(chordal_env->irg, walker_pressure_annotator, NULL, &si);
move_reloads_upward(&si);
del_set(si.interferences);
del_pset(si.inverse_ops);
del_pset(si.all_possible_remats);
+#ifdef WITH_MEMOPERANDS
+ del_set(si.memoperands);
+#endif
del_pset(si.spills);
free_lpp(si.lpp);
obstack_free(&obst, NULL);