-/** vim: set sw=4 ts=4:
- * @file bespillremat.c
- * @date 2006-04-06
- * @author Adam M. Szalkowski & Sebastian Hack
+/*
+ * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
*
- * ILP based spilling & rematerialization
+ * This file is part of libFirm.
*
- * Copyright (C) 2006 Universitaet Karlsruhe
- * Released under the GPL
+ * This file may be distributed and/or modified under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation and appearing in the file LICENSE.GPL included in the
+ * packaging of this file.
+ *
+ * Licensees holding valid libFirm Professional Edition licenses may use
+ * this file in accordance with the libFirm Commercial License.
+ * Agreement provided with the Software.
+ *
+ * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
+ * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
+/**
+ * @file
+ * @brief ILP based spilling & rematerialization
+ * @author Adam M. Szalkowski
+ * @date 06.04.2006
+ * @version $Id$
*/
-#ifdef HAVE_CONFIG_H
#include "config.h"
-#endif
#ifdef WITH_ILP
#include <math.h>
+#include "array_t.h"
#include "hashptr.h"
#include "debug.h"
#include "obst.h"
#include "irnode_t.h"
#include "ircons_t.h"
#include "irloop_t.h"
+#include "irnodeset.h"
#include "phiclass.h"
-#include "iredges.h"
+#include "iredges_t.h"
#include "execfreq.h"
#include "irvrfy.h"
#include "irbackedge_t.h"
+#include "irprofile.h"
#include <lpp/lpp.h>
#include <lpp/mps.h>
#include <lpp/lpp_net.h>
#include <lpp/lpp_cplex.h>
-//#include <lc_pset.h>
-//#include <libcore/lc_bitset.h>
#include "be_t.h"
+#include "beirg_t.h"
#include "belive_t.h"
#include "besched_t.h"
-#include "beirgmod.h"
-#include "bearch.h"
+#include "bessaconstr.h"
+#include "bearch_t.h"
+#include "beintlive_t.h"
#include "beabi.h"
#include "benode_t.h"
#include "beutil.h"
#include "bespillremat.h"
#include "bespill.h"
#include "bepressurestat.h"
-#include "beprofile.h"
#include "bespilloptions.h"
-
#include "bechordal_t.h"
+#include "bemodule.h"
-#ifdef WITH_LIBCORE
-#include <libcore/lc_opts.h>
-#include <libcore/lc_opts_enum.h>
-#endif /* WITH_LIBCORE */
+#include "lc_opts.h"
+#include "lc_opts_enum.h"
#define DUMP_PROBLEM 1
#define DUMP_MPS 2
#define REMATS_NOINVERSE 2
#define REMATS_ALL 3
-static int opt_dump_flags = 0;
+static unsigned opt_dump_flags = 0;
static int opt_log = 0;
-static int opt_keep_alive = 0;
+static unsigned opt_keep_alive = 0;
static int opt_goodwin = 1;
static int opt_memcopies = 1;
static int opt_memoperands = 1;
static int opt_verify = VERIFY_MEMINTERF;
-static int opt_remats = REMATS_ALL;
+static unsigned opt_remats = REMATS_ALL;
static int opt_repair_schedule = 0;
static int opt_no_enlarge_liveness = 0;
static int opt_remat_while_live = 1;
static double opt_cost_remat = 1.0;
-#ifdef WITH_LIBCORE
static const lc_opt_enum_mask_items_t dump_items[] = {
{ "problem", DUMP_PROBLEM },
{ "mps", DUMP_MPS },
LC_OPT_ENT_DBL ("cost_memoperand", "cost of a memory operand", &opt_cost_memoperand),
LC_OPT_ENT_DBL ("cost_spill", "cost of a spill instruction", &opt_cost_spill),
LC_OPT_ENT_DBL ("cost_remat", "cost of a rematerialization", &opt_cost_remat),
- { NULL }
+ LC_OPT_LAST
};
-#endif
-
-
//#define EXECFREQ_LOOPDEPH /* compute execution frequency from loop depth only */
//#define SCHEDULE_PHIM /* insert phim nodes into schedule */
ilp_var_t ilp; /**< the ilp var for this memory operand */
} memoperand_t;
-static INLINE int
+static inline int
has_reg_class(const spill_ilp_t * si, const ir_node * irn)
{
- return arch_irn_consider_in_reg_alloc(si->birg->main_env->arch_env,
- si->cls, irn);
+ return arch_irn_consider_in_reg_alloc(si->cls, irn);
}
#if 0
cmp_remat(const void *a, const void *b)
{
const remat_t *r = a;
- const remat_t *s = a;
+ const remat_t *s = b;
return !(r == s || r->op == s->op);
}
{
const spill_t *p = a;
const spill_t *q = b;
+ (void) size;
// return !(p->irn == q->irn && p->bb == q->bb);
return !(p->irn == q->irn);
{
const memoperand_t *p = a;
const memoperand_t *q = b;
+ (void) size;
return !(p->irn == q->irn && p->pos == q->pos);
}
#define pset_foreach(s,i) for((i)=pset_first((s)); (i); (i)=pset_next((s)))
#define set_foreach(s,i) for((i)=set_first((s)); (i); (i)=set_next((s)))
#define foreach_post_remat(s,i) for((i)=next_post_remat((s)); (i); (i)=next_post_remat((i)))
-#define foreach_pre_remat(si,s,i) for((i)=next_pre_remat((si),(s)); (i); (i)=next_pre_remat((si),(i)))
+#define foreach_pre_remat(s,i) for((i)=next_pre_remat((s)); (i); (i)=next_pre_remat((i)))
#define sched_foreach_op(s,i) for((i)=sched_next_op((s));!sched_is_end((i));(i)=sched_next_op((i)))
static int
{
const remat_info_t *p = a;
const remat_info_t *q = b;
+ (void) size;
return !(p->irn == q->irn);
}
{
const defs_t *p = a;
const defs_t *q = b;
+ (void) size;
return !(p->value == q->value);
}
{
const keyval_t *p = a;
const keyval_t *q = b;
+ (void) size;
return !(p->key == q->key);
}
execution_frequency(const spill_ilp_t *si, const ir_node * irn)
{
#define FUDGE 0.001
- if(be_profile_has_data())
- return ((double)be_profile_get_block_execcount(get_block(irn))) + FUDGE;
+ if(ir_profile_has_data())
+ return ((double)ir_profile_get_block_execcount(get_block_const(irn))) + FUDGE;
#ifndef EXECFREQ_LOOPDEPH
- return get_block_execfreq(si->birg->exec_freq, get_block(irn)) + FUDGE;
+ return get_block_execfreq(si->birg->exec_freq, get_block_const(irn)) + FUDGE;
#else
if(is_Block(irn))
return exp(get_loop_depth(get_irn_loop(irn)) * log(10)) + FUDGE;
#endif
}
-static double
-get_cost(const spill_ilp_t * si, const ir_node * irn)
+static double get_cost(const ir_node *irn)
{
if(be_is_Spill(irn)) {
return opt_cost_spill;
} else if(be_is_Reload(irn)){
return opt_cost_reload;
} else {
- return arch_get_op_estimated_cost(si->birg->main_env->arch_env, irn);
+ return arch_get_op_estimated_cost(irn);
}
}
/**
* Checks, whether node and its operands have suitable reg classes
*/
-static INLINE int
+static inline int
is_rematerializable(const spill_ilp_t * si, const ir_node * irn)
{
- int n;
- const arch_env_t *arch_env = si->birg->main_env->arch_env;
- int remat = (arch_irn_get_flags(arch_env, irn) & arch_irn_flags_rematerializable) != 0;
+ int n;
+ int remat = (arch_irn_get_flags(irn) & arch_irn_flags_rematerializable) != 0;
#if 0
if(!remat)
for (n = get_irn_arity(irn)-1; n>=0 && remat; --n) {
ir_node *op = get_irn_n(irn, n);
- remat &= has_reg_class(si, op) || arch_irn_get_flags(arch_env, op) & arch_irn_flags_ignore || (get_irn_op(op) == op_NoMem);
+ remat &= has_reg_class(si, op) || arch_irn_get_flags(op) & arch_irn_flags_ignore || is_NoMem(op);
// if(!remat)
// ir_fprintf(stderr, " Argument %d (%+F) of Node %+F has wrong regclass\n", i, op, irn);
/**
* Try to create a remat from @p op with destination value @p dest_value
*/
-static INLINE remat_t *
+static inline remat_t *
get_remat_from_op(spill_ilp_t * si, const ir_node * dest_value, const ir_node * op)
{
remat_t *remat = NULL;
remat = obstack_alloc(si->obst, sizeof(*remat));
remat->op = op;
- remat->cost = (int)get_cost(si, op);
+ remat->cost = (int)get_cost(op);
remat->value = dest_value;
remat->proj = proj;
remat->inverse = 0;
DBG((si->dbg, LEVEL_5, "\t requesting inverse op for argument %d of op %+F\n", n, op));
/* else ask the backend to give an inverse op */
- if(arch_get_inverse(si->birg->main_env->arch_env, op, n, &inverse, si->obst)) {
+ if(arch_get_inverse(op, n, &inverse, si->obst)) {
int i;
DBG((si->dbg, LEVEL_4, "\t backend gave us an inverse op with %d nodes and cost %d\n", inverse.n, inverse.costs));
}
-static INLINE void
+static inline void
add_remat(const spill_ilp_t * si, const remat_t * remat)
{
remat_info_t *remat_info,
return ret;
}
-static INLINE void
+static inline void
get_remats_from_op(spill_ilp_t * si, const ir_node * op)
{
int n;
}
}
-static INLINE int
+static inline int
value_is_defined_before(const spill_ilp_t * si, const ir_node * pos, const ir_node * val)
{
ir_node *block;
ir_node *def_block = get_nodes_block(val);
int ret;
+ (void) si;
if(val == pos)
return 0;
return ret;
}
-static INLINE ir_node *
-sched_block_last_noncf(const spill_ilp_t * si, const ir_node * bb)
+static inline ir_node *sched_block_last_noncf(const ir_node * bb)
{
- return sched_skip((ir_node*)bb, 0, sched_skip_cf_predicator, (void *) si->birg->main_env->arch_env);
+ return sched_skip((ir_node*)bb, 0, sched_skip_cf_predicator, NULL);
}
/**
* Returns first non-Phi node of block @p bb
*/
-static INLINE ir_node *
+static inline ir_node *
sched_block_first_nonphi(const ir_node * bb)
{
return sched_skip((ir_node*)bb, 1, sched_skip_phi_predicator, NULL);
static int
sched_skip_proj_predicator(const ir_node * irn, void * data)
{
+ (void) data;
return (is_Proj(irn));
}
-static INLINE ir_node *
+static inline ir_node *
sched_next_nonproj(const ir_node * irn, int forward)
{
return sched_skip((ir_node*)irn, forward, sched_skip_proj_predicator, NULL);
* Returns next operation node (non-Proj) after @p irn
* or the basic block of this node
*/
-static INLINE ir_node *
+static inline ir_node *
sched_next_op(const ir_node * irn)
{
ir_node *next = sched_next(irn);
* Returns previous operation node (non-Proj) before @p irn
* or the basic block of this node
*/
-static INLINE ir_node *
+static inline ir_node *
sched_prev_op(const ir_node * irn)
{
ir_node *prev = sched_prev(irn);
sched_add_before(insert, irn);
}
-static void
-sched_put_before(const spill_ilp_t * si, ir_node * insert, ir_node * irn)
+static void sched_put_before(ir_node * insert, ir_node * irn)
{
if(is_Block(insert)) {
- insert = sched_block_last_noncf(si, insert);
+ insert = sched_block_last_noncf(insert);
} else {
insert = sched_next_nonproj(insert, 0);
insert = sched_prev(insert);
}
-static ir_node *
-next_pre_remat(const spill_ilp_t * si, const ir_node * irn)
+static ir_node *next_pre_remat(const ir_node * irn)
{
op_t *op;
ir_node *ret;
if(is_Block(irn)) {
- ret = sched_block_last_noncf(si, irn);
+ ret = sched_block_last_noncf(irn);
ret = sched_next(ret);
ret = sched_prev_op(ret);
} else {
/**
* Tells you whether a @p remat can be placed before the irn @p pos
*/
-static INLINE int
+static inline int
can_remat_before(const spill_ilp_t * si, const remat_t * remat, const ir_node * pos, const pset * live)
{
const ir_node *op = remat->op;
res = 1;
if(is_Block(pos)) {
- prev = sched_block_last_noncf(si, pos);
+ prev = sched_block_last_noncf(pos);
prev = sched_next_nonproj(prev, 0);
} else {
prev = sched_prev_op(pos);
/**
* Tells you whether a @p remat can be placed after the irn @p pos
*/
-static INLINE int
+static inline int
can_remat_after(const spill_ilp_t * si, const remat_t * remat, const ir_node * pos, const pset * live)
{
if(is_Block(pos)) {
set_phi_class(si->pc, copy, NULL);
set_nodes_block(copy, bb);
- sched_put_before(si, pos, copy);
+ sched_put_before(pos, copy);
return copy;
}
sched_foreach_reverse(bb, irn) {
int i;
- if(!sched_skip_cf_predicator(irn, si->birg->main_env->arch_env)) break;
+ if (!sched_skip_cf_predicator(irn, NULL)) break;
for(i=get_irn_arity(irn)-1; i>=0; --i) {
ir_node *arg = get_irn_n(irn,i);
* find values that are used by remats at end of block
* and insert them into live set
*/
- foreach_pre_remat(si, bb, irn) {
+ foreach_pre_remat(bb, irn) {
int n;
for (n=get_irn_arity(irn)-1; n>=0; --n) {
if(!has_reg_class(si, phi_arg)) {
ir_node *copy = be_new_Copy(si->cls, si->birg->irg, bb, phi_arg);
- ir_node *pos = sched_block_last_noncf(si, bb);
+ ir_node *pos = sched_block_last_noncf(bb);
op_t *op = obstack_alloc(si->obst, sizeof(*op));
DBG((si->dbg, LEVEL_2, "\t copy to my regclass for arg %+F of %+F\n", phi_arg, irn));
}
/* do not place post remats after jumps */
- if(sched_skip_cf_predicator(irn, si->birg->main_env->arch_env)) {
+ if (sched_skip_cf_predicator(irn, si->birg->main_env->arch_env)) {
del_pset(used);
del_pset(args);
break;
* find values that are used by remats at end of block
* and insert them into live set
*/
- foreach_pre_remat(si, bb, irn) {
+ foreach_pre_remat(bb, irn) {
int n;
for (n=get_irn_arity(irn)-1; n>=0; --n) {
sched_foreach_reverse(bb, irn) {
int n;
- if(!sched_skip_cf_predicator(irn, si->birg->main_env->arch_env)) break;
+ if (!sched_skip_cf_predicator(irn, si->birg->main_env->arch_env)) break;
for (n=get_irn_arity(irn)-1; n>=0; --n) {
ir_node *irn_arg = get_irn_n(irn, n);
del_pset(use_end);
}
+#ifndef NDEBUG
/**
* Find a remat of value @p value in the epilog of @p pos
*/
return NULL;
}
+#endif
static spill_t *
add_to_spill_bb(spill_ilp_t * si, ir_node * bb, ir_node * irn)
lpp_set_factor_fast(si->lpp, cst, to_copy_spill->reg_out, -1.0);
if(reload != ILP_UNDEF) lpp_set_factor_fast(si->lpp, cst, reload, -1.0);
lpp_set_factor_fast(si->lpp, cst, to_copy_op->attr.live_range.ilp, -1.0);
- foreach_pre_remat(si, block, tmp) {
+ foreach_pre_remat(block, tmp) {
op_t *remat_op = get_irn_link(tmp);
if(remat_op->attr.remat.remat->value == to_copy) {
lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, -1.0);
static void
luke_blockwalker(ir_node * bb, void * data)
{
- spill_ilp_t *si = (spill_ilp_t*)data;
- ir_node *irn;
- pset *live;
- char buf[256];
- ilp_cst_t cst;
- spill_bb_t *spill_bb = get_irn_link(bb);
- ir_node *tmp;
- spill_t *spill;
- pset *defs = pset_new_ptr_default();
- const arch_env_t *arch_env = si->birg->main_env->arch_env;
+ spill_ilp_t *si = (spill_ilp_t*)data;
+ ir_node *irn;
+ pset *live;
+ char buf[256];
+ ilp_cst_t cst;
+ spill_bb_t *spill_bb = get_irn_link(bb);
+ ir_node *tmp;
+ spill_t *spill;
+ pset *defs = pset_new_ptr_default();
live = pset_new_ptr_default();
lpp_set_factor_fast(si->lpp, cst, spill->reg_out, 1.0);
if(reload != ILP_UNDEF) lpp_set_factor_fast(si->lpp, cst, reload, -1.0);
lpp_set_factor_fast(si->lpp, cst, op->attr.live_range.ilp, -1.0);
- foreach_pre_remat(si, bb, tmp) {
+ foreach_pre_remat(bb, tmp) {
op_t *remat_op = get_irn_link(tmp);
if(remat_op->attr.remat.remat->value == irn) {
lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, -1.0);
lpp_set_factor_fast(si->lpp, cst, spill->reg_out, 1.0);
if(reload != ILP_UNDEF) lpp_set_factor_fast(si->lpp, cst, reload, -1.0);
lpp_set_factor_fast(si->lpp, cst, op->attr.live_range.ilp, -1.0);
- foreach_pre_remat(si, bb, tmp) {
+ foreach_pre_remat(bb, tmp) {
op_t *remat_op = get_irn_link(tmp);
if(remat_op->attr.remat.remat->value == irn) {
lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, -1.0);
/*
* assure the remat args are available
*/
- foreach_pre_remat(si, bb, tmp) {
+ foreach_pre_remat(bb, tmp) {
op_t *remat_op = get_irn_link(tmp);
int n;
* B A S I C B L O C K B O D Y
**************************************/
- sched_foreach_reverse_from(sched_block_last_noncf(si, bb), irn) {
+ sched_foreach_reverse_from(sched_block_last_noncf(bb), irn) {
op_t *op;
op_t *tmp_op;
int n,
pset *used;
pset *remat_defs;
keyval_t *keyval;
- ilp_cst_t one_memoperand;
+ ilp_cst_t one_memoperand = -1;
/* iterate only until first phi */
if(is_Phi(irn))
}
}
}
- foreach_pre_remat(si, irn, tmp) {
+ foreach_pre_remat(irn, tmp) {
for (n=get_irn_arity(tmp)-1; n>=0; --n) {
ir_node *remat_arg = get_irn_n(tmp, n);
if(has_reg_class(si, remat_arg)) {
if(opt_memoperands && (!is_start_block(bb) || be_is_Barrier(irn))) {
for(n = get_irn_arity(irn)-1; n>=0; --n) {
- if(get_irn_n(irn, n) == arg && arch_possible_memory_operand(arch_env, irn, n)) {
+ if (get_irn_n(irn, n) == arg &&
+ arch_possible_memory_operand(irn, n)) {
ilp_var_t memoperand;
ir_snprintf(buf, sizeof(buf), "memoperand_%N_%d", irn, n);
assert(spill);
ir_snprintf(buf, sizeof(buf), "delete_%N", tmp);
- delete = lpp_add_var_default(si->lpp, buf, lpp_binary, -1.0*get_cost(si, irn)*execution_frequency(si, bb), 0.0);
+ delete = lpp_add_var_default(si->lpp, buf, lpp_binary, -1.0 * get_cost(irn) * execution_frequency(si, bb), 0.0);
/* op may not be killed if its first live_range is 1 */
ir_snprintf(buf, sizeof(buf), "killorig-lr_%N", tmp);
assert(spill);
ir_snprintf(buf, sizeof(buf), "keep_%N", tmp);
- keep = lpp_add_var_default(si->lpp, buf, lpp_binary, get_cost(si, irn)*execution_frequency(si, bb), 1.0);
+ keep = lpp_add_var_default(si->lpp, buf, lpp_binary, get_cost(irn) * execution_frequency(si, bb), 1.0);
/* op may not be killed if its first live_range is 1 */
ir_snprintf(buf, sizeof(buf), "killorig-lr_%N", tmp);
lpp_set_factor_fast(si->lpp, requirements, arg_op->attr.live_range.ilp, 1.0);
lpp_set_factor_fast(si->lpp, requirements, op->attr.live_range.args.reloads[i], 1.0);
- foreach_pre_remat(si, irn, tmp) {
+ foreach_pre_remat(irn, tmp) {
op_t *remat_op = get_irn_link(tmp);
if(remat_op->attr.remat.remat->value == arg) {
lpp_set_factor_fast(si->lpp, requirements, remat_op->attr.remat.ilp, 1.0);
}
}
for(n = get_irn_arity(irn)-1; n>=0; --n) {
- if(get_irn_n(irn, n) == arg && arch_possible_memory_operand(arch_env, irn, n)) {
+ if (get_irn_n(irn, n) == arg &&
+ arch_possible_memory_operand(irn, n)) {
memoperand_t *memoperand;
memoperand = set_find_memoperand(si->memoperands, irn, n);
}
/* requirements for remats */
- foreach_pre_remat(si, irn, tmp) {
+ foreach_pre_remat(irn, tmp) {
op_t *remat_op = get_irn_link(tmp);
int n;
assert(has_reg_class(si, tmp));
}
+#ifndef NDEBUG
for (n=get_irn_arity(irn)-1; n>=0; --n) {
ir_node *arg = get_irn_n(irn, n);
assert(!find_post_remat(arg, irn) && "there should be no post remat for an argument of an op");
}
+#endif
del_pset(remat_defs);
del_pset(used);
{
const interference_t *p = a;
const interference_t *q = b;
+ (void) size;
return !(p->a == q->a && p->b == q->b);
}
return result;
}
-static int
-values_interfere_in_block(const spill_ilp_t * si, const ir_node * bb, const ir_node * a, const ir_node * b)
+static
+int values_interfere_in_block(const spill_ilp_t *si, const ir_node *bb, const ir_node *a, const ir_node *b)
{
const ir_edge_t *edge;
- if(get_nodes_block(a) != bb && get_nodes_block(b) != bb) {
+ if (get_nodes_block(a) != bb && get_nodes_block(b) != bb) {
/* both values are live in, so they interfere */
return 1;
}
/* ensure a dominates b */
- if(value_dominates(b,a)) {
- const ir_node * t;
+ if (value_dominates(b, a)) {
+ const ir_node *t;
t = b;
b = a;
a = t;
/* the following code is stolen from bera.c */
- if(be_is_live_end(si->lv, bb, a))
+ if (be_is_live_end(si->lv, bb, a))
return 1;
foreach_out_edge(a, edge) {
const ir_node *user = edge->src;
- if(get_nodes_block(user) == bb
- && !is_Phi(user)
+ if (get_nodes_block(user) == bb
+ && ! is_Phi(user)
&& b != user
- && !pset_find_ptr(si->inverse_ops, user)
+ && ! pset_find_ptr(si->inverse_ops, user)
&& value_dominates(b, user))
return 1;
}
}
-static INLINE int
+static inline int
is_zero(double x)
{
return fabs(x) < 0.00001;
}
+/**
+ * node attribute hook for changing colors
+ */
static int mark_remat_nodes_hook(FILE *F, ir_node *n, ir_node *l)
{
spill_ilp_t *si = get_irg_link(current_ir_graph);
+ (void) l;
if(pset_find_ptr(si->all_possible_remats, n)) {
op_t *op = (op_t*)get_irn_link(n);
}
/** insert a spill at an arbitrary position */
-ir_node *be_spill2(const arch_env_t *arch_env, ir_node *irn, ir_node *insert)
+static ir_node *be_spill2(ir_node *irn, ir_node *insert)
{
ir_node *bl = is_Block(insert) ? insert : get_nodes_block(insert);
ir_graph *irg = get_irn_irg(bl);
ir_node *frame = get_irg_frame(irg);
ir_node *spill;
ir_node *next;
- const arch_register_class_t *cls = arch_get_irn_reg_class(arch_env, irn, -1);
- const arch_register_class_t *cls_frame = arch_get_irn_reg_class(arch_env, frame, -1);
+ const arch_register_class_t *cls = arch_get_irn_reg_class_out(irn);
+ const arch_register_class_t *cls_frame = arch_get_irn_reg_class_out(frame);
spill = be_new_Spill(cls, cls_frame, irg, bl, frame, irn);
assert( get_irn_arity(block) == arity );
res = new_ir_node(NULL, irg, block, op_Phi, mode_M, arity, in);
- res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
+ res->attr.phi.u.backedge = new_backedge_arr(irg->obst, arity);
return res;
}
{
defs_t *defs;
ir_node *spill;
- const arch_env_t *arch_env = si->birg->main_env->arch_env;
DBG((si->dbg, LEVEL_3, "\t inserting spill for value %+F after %+F\n", irn, before));
- spill = be_spill2(arch_env, irn, before);
+ spill = be_spill2(irn, before);
defs = set_insert_def(si->values, value);
assert(defs);
defs_t *defs;
ir_node *reload,
*spill;
- const arch_env_t *arch_env = si->birg->main_env->arch_env;
DBG((si->dbg, LEVEL_3, "\t inserting reload for value %+F before %+F\n", value, after));
spill = defs->spills;
assert(spill && "no spill placed before reload");
- reload = be_reload(arch_env, si->cls, after, get_irn_mode(value), spill);
+ reload = be_reload(si->cls, after, get_irn_mode(value), spill);
/* enter into the linked list */
set_irn_link(reload, defs->remats);
void perform_memory_operand(spill_ilp_t * si, memoperand_t * memoperand)
{
- defs_t *defs;
- ir_node *value = get_irn_n(memoperand->irn, memoperand->pos);
- ir_node *spill;
- const arch_env_t *arch_env = si->birg->main_env->arch_env;
+ defs_t *defs;
+ ir_node *value = get_irn_n(memoperand->irn, memoperand->pos);
+ ir_node *spill;
DBG((si->dbg, LEVEL_2, "\t inserting memory operand for value %+F at %+F\n", value, memoperand->irn));
spill = defs->spills;
assert(spill && "no spill placed before reload");
- arch_perform_memory_operand(arch_env, memoperand->irn, spill, memoperand->pos);
+ arch_perform_memory_operand(memoperand->irn, spill, memoperand->pos);
}
void insert_memoperands(spill_ilp_t * si)
{
ir_node *insert_pos = bb;
ir_node *spill;
- const arch_env_t *arch_env = si->birg->main_env->arch_env;
/* find last definition of arg value in block */
ir_node *next;
DBG((si->dbg, LEVEL_2, "\t inserting mem copy for value %+F after %+F\n", value, insert_pos));
- spill = be_spill2(arch_env, is_Block(insert_pos)?value:insert_pos, insert_pos);
+ spill = be_spill2(is_Block(insert_pos)?value:insert_pos, insert_pos);
return spill;
}
if(!is_zero(name->value)) {
ir_node *reload;
ir_node *insert_pos = bb;
- ir_node *prev = sched_block_last_noncf(si, bb);
+ ir_node *prev = sched_block_last_noncf(bb);
op_t *prev_op = get_irn_link(prev);
while(be_is_Spill(prev)) {
if(!bitset_is_set(kh->used, get_irn_idx(irn))) {
if(be_is_Spill(irn) || be_is_Reload(irn)) {
- DBG((kh->si->dbg, LEVEL_1, "\t SUBOPTIMAL! %+F IS UNUSED (cost: %g)\n", irn, get_cost(kh->si, irn)*execution_frequency(kh->si, bb)));
+ DBG((kh->si->dbg, LEVEL_1, "\t SUBOPTIMAL! %+F IS UNUSED (cost: %g)\n", irn, get_cost(irn) * execution_frequency(kh->si, bb)));
#if 0
assert(lpp_get_sol_state(kh->si->lpp) != lpp_optimal && "optimal solution is suboptimal?");
#endif
rewire_uses(spill_ilp_t * si)
{
defs_t *defs;
- pset *ignore = pset_new_ptr(1);
- be_dom_front_info_t *dom_front = si->birg->dom_front;
+ ir_nodeset_t ignore;
- pset_insert_ptr(ignore, get_irg_end(si->birg->irg));
+ ir_nodeset_init(&ignore);
+ ir_nodeset_insert(&ignore, get_irg_end(si->birg->irg));
/* then fix uses of spills */
set_foreach(si->values, defs) {
spills = get_spills_for_value(si, defs->value);
DBG((si->dbg, LEVEL_2, "\t %d remats, %d reloads, and %d spills for value %+F\n", remats, pset_count(reloads), pset_count(spills), defs->value));
if(pset_count(spills) > 1) {
+ be_ssa_construction_env_t senv;
+ ir_node *node;
//assert(pset_count(reloads) > 0);
// print_irn_pset(spills);
// print_irn_pset(reloads);
- be_ssa_constr_set_ignore(dom_front, si->lv, spills, ignore);
+ be_ssa_construction_init(&senv, si->birg);
+ be_ssa_construction_set_ignore_uses(&senv, &ignore);
+ pset_foreach(spills, node) {
+ be_ssa_construction_add_copy(&senv, node);
+ }
+ pset_foreach(spills, node) {
+ be_ssa_construction_fix_users(&senv, node);
+ }
+ be_ssa_construction_update_liveness_phis(&senv, si->lv);
+ pset_foreach(spills, node) {
+ be_liveness_update(si->lv, node);
+ }
+ be_ssa_construction_destroy(&senv);
}
del_pset(reloads);
/* first fix uses of remats and reloads */
set_foreach(si->values, defs) {
- pset *nodes;
const ir_node *next = defs->remats;
int orig_kept = 0;
if(next) {
- nodes = pset_new_ptr_default();
+ be_ssa_construction_env_t senv;
+
+ be_ssa_construction_init(&senv, si->birg);
+
if(sched_is_scheduled(defs->value)) {
- pset_insert_ptr(nodes, defs->value);
+ be_ssa_construction_add_copy(&senv, (ir_node*) defs->value);
orig_kept = 1;
}
+ next = defs->remats;
+ while(next) {
+ be_ssa_construction_add_copy(&senv, (ir_node*) next);
+ next = get_irn_link(next);
+ }
+
+ if(sched_is_scheduled(defs->value)) {
+ be_ssa_construction_fix_users(&senv, (ir_node*) defs->value);
+ }
+
+ next = defs->remats;
while(next) {
- pset_insert_ptr(nodes, next);
+ be_ssa_construction_fix_users(&senv, (ir_node*) next);
next = get_irn_link(next);
}
- DBG((si->dbg, LEVEL_4, "\t %d new definitions for value %+F\n", pset_count(nodes)-orig_kept, defs->value));
- be_ssa_constr_set(dom_front, si->lv, nodes);
+ be_ssa_construction_update_liveness_phis(&senv, si->lv);
+ if(sched_is_scheduled(defs->value)) {
+ be_liveness_update(si->lv, (ir_node*) defs->value);
+ }
+
+ next = defs->remats;
+ while(next) {
+ be_liveness_update(si->lv, (ir_node*) next);
+ next = get_irn_link(next);
+ }
- del_pset(nodes);
+ be_ssa_construction_destroy(&senv);
}
}
+ ir_nodeset_destroy(&ignore);
// remove_unused_defs(si);
}
bitset_t *arch_regs = bitset_malloc(arch_n_regs);
bitset_t *abi_regs = bitset_malloc(arch_n_regs);
- arch_put_non_ignore_regs(si->birg->main_env->arch_env, si->cls, arch_regs);
- be_abi_put_ignore_regs(si->birg->abi, si->cls, abi_regs);
+ arch_put_non_ignore_regs(si->cls, arch_regs);
+ be_abi_put_ignore_regs(si->birg->abi, si->cls, abi_regs);
bitset_andnot(arch_regs, abi_regs);
arch_n_regs = bitset_popcnt(arch_regs);
if(opt_verify & VERIFY_DOMINANCE)
be_check_dominance(irg);
- be_assure_dom_front(birg);
be_assure_liveness(birg);
obstack_init(&obst);
#else /* WITH_ILP */
-static void INLINE
-only_that_you_can_compile_without_WITH_ILP_defined(void)
+static __attribute__((unused))
+void only_that_you_can_compile_without_WITH_ILP_defined(void)
{
}