#include "bearch_t.h"
#include "be_t.h"
#include "bemachine.h"
+#include "beirg.h"
struct _be_node_factory_t;
* Backend may provide an own spiller.
* This spiller needs to spill all register classes.
*/
- void (*spill)(void *self, void *env);
+ void (*spill)(void *self, be_irg_t *birg);
/**
* Called before scheduling.
#define arch_code_generator_after_ra(cg) _arch_cg_call(cg, after_ra)
#define arch_code_generator_finish(cg) _arch_cg_call(cg, finish)
#define arch_code_generator_done(cg) _arch_cg_call(cg, done)
-#define arch_code_generator_spill(cg, env) _arch_cg_call_env(cg, env, spill)
+#define arch_code_generator_spill(cg, birg) _arch_cg_call_env(cg, birg, spill)
#define arch_code_generator_has_spiller(cg) ((cg)->impl->spill != NULL)
/**
static int be_elr_split = 0;
typedef struct _post_spill_env_t {
- be_chordal_env_t cenv;
- double pre_spill_cost;
+ be_chordal_env_t cenv;
+ be_irg_t *birg;
+ const arch_register_class_t *cls;
+ double pre_spill_cost;
} post_spill_env_t;
static be_ra_timer_t ra_timer = {
} node_stat_t;
struct node_stat_walker {
- node_stat_t *stat;
- const be_chordal_env_t *cenv;
- bitset_t *mem_phis;
+ node_stat_t *stat;
+ const arch_env_t *arch_env;
+ bitset_t *mem_phis;
+ const arch_register_class_t *cls;
};
static void node_stat_walker(ir_node *irn, void *data)
{
- struct node_stat_walker *env = data;
- const arch_env_t *aenv = env->cenv->birg->main_env->arch_env;
+ struct node_stat_walker *env = data;
+ const arch_env_t *aenv = env->arch_env;
- if(arch_irn_consider_in_reg_alloc(aenv, env->cenv->cls, irn)) {
+ if (arch_irn_consider_in_reg_alloc(aenv, env->cls, irn)) {
/* if the node is a normal phi */
if(is_Phi(irn))
}
}
-static void node_stats(const be_chordal_env_t *cenv, node_stat_t *stat)
+static void node_stats(be_irg_t *birg, const arch_register_class_t *cls, node_stat_t *stat)
{
struct node_stat_walker env;
memset(stat, 0, sizeof(stat[0]));
- env.cenv = cenv;
- env.mem_phis = bitset_irg_malloc(cenv->irg);
+ env.arch_env = birg->main_env->arch_env;
+ env.mem_phis = bitset_irg_malloc(birg->irg);
env.stat = stat;
- irg_walk_graph(cenv->irg, NULL, node_stat_walker, &env);
+ env.cls = cls;
+ irg_walk_graph(birg->irg, NULL, node_stat_walker, &env);
bitset_free(env.mem_phis);
}
*/
static void pre_spill(const arch_isa_t *isa, int cls_idx, post_spill_env_t *pse) {
be_chordal_env_t *chordal_env = &pse->cenv;
+ be_irg_t *birg = pse->birg;
node_stat_t node_stat;
- chordal_env->cls = arch_isa_get_reg_class(isa, cls_idx);
+ pse->cls = arch_isa_get_reg_class(isa, cls_idx);
+ chordal_env->cls = pse->cls;
chordal_env->border_heads = pmap_create();
chordal_env->ignore_colors = bitset_malloc(chordal_env->cls->n_regs);
#ifdef FIRM_STATISTICS
if (be_stat_ev_is_active()) {
- be_stat_tags[STAT_TAG_CLS] = chordal_env->cls->name;
+ be_stat_tags[STAT_TAG_CLS] = pse->cls->name;
be_stat_ev_push(be_stat_tags, STAT_TAG_LAST, be_stat_file);
/* perform some node statistics. */
- node_stats(chordal_env, &node_stat);
+ node_stats(birg, pse->cls, &node_stat);
be_stat_ev("phis_before_spill", node_stat.n_phis);
}
#endif /* FIRM_STATISTICS */
/* put all ignore registers into the ignore register set. */
- be_put_ignore_regs(chordal_env->birg, chordal_env->cls, chordal_env->ignore_colors);
+ be_put_ignore_regs(birg, pse->cls, chordal_env->ignore_colors);
be_pre_spill_prepare_constr(chordal_env);
- dump(BE_CH_DUMP_CONSTR, chordal_env->irg, chordal_env->cls, "-constr-pre", dump_ir_block_graph_sched);
+ dump(BE_CH_DUMP_CONSTR, birg->irg, pse->cls, "-constr-pre", dump_ir_block_graph_sched);
#ifdef FIRM_STATISTICS
if (be_stat_ev_is_active()) {
- pse->pre_spill_cost = be_estimate_irg_costs(chordal_env->irg,
- chordal_env->birg->main_env->arch_env, chordal_env->birg->exec_freq);
+ pse->pre_spill_cost = be_estimate_irg_costs(birg->irg,
+ birg->main_env->arch_env, birg->exec_freq);
}
#endif /* FIRM_STATISTICS */
}
*/
static void post_spill(post_spill_env_t *pse) {
be_chordal_env_t *chordal_env = &pse->cenv;
- ir_graph *irg = chordal_env->irg;
- be_irg_t *birg = chordal_env->birg;
+ be_irg_t *birg = pse->birg;
+ ir_graph *irg = birg->irg;
const be_main_env_t *main_env = birg->main_env;
be_options_t *main_opts = main_env->options;
static int splitted = 0;
be_stat_ev_l("spillcosts", (long) spillcosts);
- node_stats(chordal_env, &node_stat);
+ node_stats(birg, pse->cls, &node_stat);
be_stat_ev("phis_after_spill", node_stat.n_phis);
be_stat_ev("mem_phis", node_stat.n_mem_phis);
be_stat_ev("reloads", node_stat.n_reloads);
/* verify schedule and register pressure */
if (chordal_env->opts->vrfy_option == BE_CH_VRFY_WARN) {
be_verify_schedule(irg);
- be_verify_register_pressure(chordal_env->birg, chordal_env->cls, irg);
+ be_verify_register_pressure(birg, pse->cls, irg);
}
else if (chordal_env->opts->vrfy_option == BE_CH_VRFY_ASSERT) {
assert(be_verify_schedule(irg) && "Schedule verification failed");
- assert(be_verify_register_pressure(chordal_env->birg, chordal_env->cls, irg)
+ assert(be_verify_register_pressure(birg, pse->cls, irg)
&& "Register pressure verification failed");
}
BE_TIMER_POP(ra_timer.t_verify);
be_ra_chordal_color(chordal_env);
BE_TIMER_POP(ra_timer.t_color);
- dump(BE_CH_DUMP_CONSTR, irg, chordal_env->cls, "-color", dump_ir_block_graph_sched);
+ dump(BE_CH_DUMP_CONSTR, irg, pse->cls, "-color", dump_ir_block_graph_sched);
/* Create the ifg with the selected flavor */
BE_TIMER_PUSH(ra_timer.t_ifg);
#ifdef FIRM_STATISTICS
if (be_stat_ev_is_active()) {
be_ifg_stat_t stat;
- be_ifg_stat(chordal_env, &stat);
+
+ be_ifg_stat(birg, chordal_env->ifg, &stat);
be_stat_ev("ifg_nodes", stat.n_nodes);
be_stat_ev("ifg_edges", stat.n_edges);
be_stat_ev("ifg_comps", stat.n_comps);
- node_stats(chordal_env, &node_stat);
+ node_stats(birg, pse->cls, &node_stat);
be_stat_ev("perms_before_coal", node_stat.n_perms);
be_stat_ev("copies_before_coal", node_stat.n_copies);
}
co_driver(chordal_env);
BE_TIMER_POP(ra_timer.t_copymin);
- dump(BE_CH_DUMP_COPYMIN, irg, chordal_env->cls, "-copymin", dump_ir_block_graph_sched);
+ dump(BE_CH_DUMP_COPYMIN, irg, pse->cls, "-copymin", dump_ir_block_graph_sched);
BE_TIMER_PUSH(ra_timer.t_ssa);
BE_TIMER_POP(ra_timer.t_ssa);
- dump(BE_CH_DUMP_SSADESTR, irg, chordal_env->cls, "-ssadestr", dump_ir_block_graph_sched);
+ dump(BE_CH_DUMP_SSADESTR, irg, pse->cls, "-ssadestr", dump_ir_block_graph_sched);
BE_TIMER_PUSH(ra_timer.t_verify);
if (chordal_env->opts->vrfy_option != BE_CH_VRFY_OFF) {
}
BE_TIMER_POP(ra_timer.t_verify);
+ /* free some data structures */
be_ifg_free(chordal_env->ifg);
pmap_destroy(chordal_env->border_heads);
bitset_free(chordal_env->ignore_colors);
#ifdef FIRM_STATISTICS
if (be_stat_ev_is_active()) {
- node_stats(chordal_env, &node_stat);
+ node_stats(birg, pse->cls, &node_stat);
be_stat_ev("perms_after_coal", node_stat.n_perms);
be_stat_ev("copies_after_coal", node_stat.n_copies);
be_stat_ev_pop();
post_spill_env_t pse;
memcpy(&pse.cenv, &chordal_env, sizeof(chordal_env));
+ pse.birg = birg;
pre_spill(isa, j, &pse);
BE_TIMER_PUSH(ra_timer.t_spill);
be_do_spill(&pse.cenv);
BE_TIMER_POP(ra_timer.t_spill);
- dump(BE_CH_DUMP_SPILL, irg, pse.cenv.cls, "-spill", dump_ir_block_graph_sched);
+ dump(BE_CH_DUMP_SPILL, irg, pse.cls, "-spill", dump_ir_block_graph_sched);
post_spill(&pse);
}
for (j = 0; j < m; ++j) {
memcpy(&pse[j].cenv, &chordal_env, sizeof(chordal_env));
+ pse[j].birg = birg;
pre_spill(isa, j, &pse[j]);
}
BE_TIMER_PUSH(ra_timer.t_spill);
- arch_code_generator_spill(birg->cg, &chordal_env);
+ arch_code_generator_spill(birg->cg, birg);
BE_TIMER_POP(ra_timer.t_spill);
dump(BE_CH_DUMP_SPILL, irg, NULL, "-spill", dump_ir_block_graph_sched);
bitset_free(nodes);
}
-static void int_comp_rec(const be_chordal_env_t *cenv, ir_node *n, bitset_t *seen)
+static void int_comp_rec(be_irg_t *birg, be_ifg_t *ifg, ir_node *n, bitset_t *seen)
{
- void *neigh_it = be_ifg_neighbours_iter_alloca(cenv->ifg);
+ void *neigh_it = be_ifg_neighbours_iter_alloca(ifg);
ir_node *m;
- be_ifg_foreach_neighbour(cenv->ifg, neigh_it, n, m) {
- if(!bitset_contains_irn(seen, m) && !arch_irn_is(cenv->birg->main_env->arch_env, m, ignore)) {
+ be_ifg_foreach_neighbour(ifg, neigh_it, n, m) {
+ if(!bitset_contains_irn(seen, m) && !arch_irn_is(birg->main_env->arch_env, m, ignore)) {
bitset_add_irn(seen, m);
- int_comp_rec(cenv, m, seen);
+ int_comp_rec(birg, ifg, m, seen);
}
}
}
-static int int_component_stat(const be_chordal_env_t *cenv)
+static int int_component_stat(be_irg_t *birg, be_ifg_t *ifg)
{
- int n_comp = 0;
- void *nodes_it = be_ifg_nodes_iter_alloca(cenv->ifg);
- bitset_t *seen = bitset_irg_malloc(cenv->irg);
+ int n_comp = 0;
+ void *nodes_it = be_ifg_nodes_iter_alloca(ifg);
+ bitset_t *seen = bitset_irg_malloc(birg->irg);
ir_node *n;
- be_ifg_foreach_node(cenv->ifg, nodes_it, n) {
- if(!bitset_contains_irn(seen, n) && !arch_irn_is(cenv->birg->main_env->arch_env, n, ignore)) {
+ be_ifg_foreach_node(ifg, nodes_it, n) {
+ if (! bitset_contains_irn(seen, n) && ! arch_irn_is(birg->main_env->arch_env, n, ignore)) {
++n_comp;
bitset_add_irn(seen, n);
- int_comp_rec(cenv, n, seen);
+ int_comp_rec(birg, ifg, n, seen);
}
}
return n_comp;
}
-void be_ifg_stat(const be_chordal_env_t *cenv, be_ifg_stat_t *stat)
+void be_ifg_stat(be_irg_t *birg, be_ifg_t *ifg, be_ifg_stat_t *stat)
{
- void *nodes_it = be_ifg_nodes_iter_alloca(cenv->ifg);
- void *neigh_it = be_ifg_neighbours_iter_alloca(cenv->ifg);
- bitset_t *nodes = bitset_irg_malloc(cenv->irg);
-
- ir_node *n, *m;
+ void *nodes_it = be_ifg_nodes_iter_alloca(ifg);
+ void *neigh_it = be_ifg_neighbours_iter_alloca(ifg);
+ bitset_t *nodes = bitset_irg_malloc(birg->irg);
+ ir_node *n, *m;
memset(stat, 0, sizeof(stat[0]));
- be_ifg_foreach_node(cenv->ifg, nodes_it, n) {
+
+ be_ifg_foreach_node(ifg, nodes_it, n) {
stat->n_nodes += 1;
- be_ifg_foreach_neighbour(cenv->ifg, neigh_it, n, m) {
+ be_ifg_foreach_neighbour(ifg, neigh_it, n, m) {
bitset_add_irn(nodes, n);
stat->n_edges += !bitset_contains_irn(nodes, m);
}
}
- stat->n_comps = int_component_stat(cenv);
+ stat->n_comps = int_component_stat(birg, ifg);
bitset_free(nodes);
}
int n_comps;
} be_ifg_stat_t;
-void be_ifg_stat(const be_chordal_env_t *cenv, be_ifg_stat_t *stat);
+void be_ifg_stat(be_irg_t *birg, be_ifg_t *ifg, be_ifg_stat_t *stat);
be_ifg_t *be_create_ifg(const be_chordal_env_t *env);
static void
regpressureanawalker(ir_node * bb, void * data)
{
- regpressure_ana_t *ra = data;
+ regpressure_ana_t *ra = data;
pset *live = pset_new_ptr_default();
const ir_node *irn;
unsigned int *stat = ra->stat;
- int i;
- be_lv_t *lv = ra->lv;
+ int i;
+ const be_lv_t *lv = ra->lv;
be_lv_foreach(lv, bb, be_lv_state_end, i) {
ir_node *value = be_lv_get_irn(lv, bb, i);
struct _reloader_t {
reloader_t *next;
- ir_node *reloader;
- ir_node *rematted_node;
+ ir_node *reloader;
+ ir_node *rematted_node;
+ int allow_remat; /**< the node may be rematted instead of reloaded if global remat option is on */
};
typedef struct _spill_info_t {
spill_info = get_spillinfo(env, to_spill);
- reloader = obstack_alloc(&env->obst, sizeof(reloader[0]));
- reloader->next = spill_info->reloaders;
- reloader->reloader = before;
+ /* add the remat information */
+ reloader = obstack_alloc(&env->obst, sizeof(reloader[0]));
+ reloader->next = spill_info->reloaders;
+ reloader->reloader = before;
reloader->rematted_node = rematted_node;
+ reloader->allow_remat = 1;
- spill_info->reloaders = reloader;
+ spill_info->reloaders = reloader;
assert(spill_info->reload_cls == NULL || spill_info->reload_cls == reload_cls);
spill_info->reload_cls = reload_cls;
}
-void be_add_reload(spill_env_t *env, ir_node *to_spill, ir_node *before, const arch_register_class_t *reload_cls) {
+void be_add_reload(spill_env_t *env, ir_node *to_spill, ir_node *before,
+ const arch_register_class_t *reload_cls, int allow_remat)
+{
spill_info_t *info;
reloader_t *rel;
info = get_spillinfo(env, to_spill);
- if(is_Phi(to_spill)) {
+ if (is_Phi(to_spill)) {
int i, arity;
/* create spillinfos for the phi arguments */
// spill node and adds a reload for that spill node, problem is the
// reload gets attach to that same spill (and is totally unnecessary)
if (info->old_spill != NULL &&
- (before == info->old_spill || value_dominates(before, info->old_spill))) {
+ (before == info->old_spill || value_dominates(before, info->old_spill)))
+ {
printf("spilledphi hack was needed...\n");
before = sched_next(info->old_spill);
}
}
/* put reload into list */
- rel = obstack_alloc(&env->obst, sizeof(rel[0]));
- rel->next = info->reloaders;
- rel->reloader = before;
+ rel = obstack_alloc(&env->obst, sizeof(rel[0]));
+ rel->next = info->reloaders;
+ rel->reloader = before;
rel->rematted_node = NULL;
+ rel->allow_remat = allow_remat;
info->reloaders = rel;
assert(info->reload_cls == NULL || info->reload_cls == reload_cls);
return last;
}
-void be_add_reload_on_edge(spill_env_t *env, ir_node *to_spill,
- ir_node *block, int pos, const arch_register_class_t *reload_cls)
+void be_add_reload_on_edge(spill_env_t *env, ir_node *to_spill, ir_node *block, int pos,
+ const arch_register_class_t *reload_cls, int allow_remat)
{
ir_node *before = get_reload_insertion_point(block, pos);
- be_add_reload(env, to_spill, before, reload_cls);
+ be_add_reload(env, to_spill, before, reload_cls, allow_remat);
}
void be_spill_phi(spill_env_t *env, ir_node *node) {
for (rld = si->reloaders; rld; rld = rld->next) {
ir_node *new_val;
- if(rld->rematted_node != NULL) {
+ if (rld->rematted_node != NULL) {
new_val = rld->rematted_node;
remats++;
sched_add_before(rld->reloader, new_val);
- } else if (be_do_remats && check_remat_conditions(env, si->spilled_node, rld->reloader)) {
+ }
+ else if (be_do_remats && rld->allow_remat && check_remat_conditions(env, si->spilled_node, rld->reloader)) {
new_val = do_remat(env, si->spilled_node, rld->reloader);
remats++;
}
* explicitly create spill nodes, they will be created automatically after
* the definition of a value as soon as a reload is created. (we should add a
* possibility for explicit spill placement in the future)
+ *
+ * @param senv The spill environment
+ * @param to_spill The node which is about to be spilled
+ * @param before The node before the reload should be added
+ * @param reload_cls The register class the reloaded value will be put into
+ * @param allow_remat Set to 1 if the node may be rematerialized instead of reloaded
*/
-void be_add_reload(spill_env_t *senv, ir_node *to_spill, ir_node *before, const arch_register_class_t *reload_cls);
+void be_add_reload(spill_env_t *senv, ir_node *to_spill, ir_node *before,
+ const arch_register_class_t *reload_cls, int allow_remat);
/**
* Analog to be_add_reload, but places the reload "on an edge" between 2 blocks
+ * @see be_add_reload
*/
-void be_add_reload_on_edge(spill_env_t *senv, ir_node *to_spill, ir_node *bl, int pos, const arch_register_class_t *reload_cls);
+void be_add_reload_on_edge(spill_env_t *senv, ir_node *to_spill, ir_node *bl, int pos,
+ const arch_register_class_t *reload_cls, int allow_remat);
/**
* Analog to be_add_reload but adds an already created rematerialized node.
to_insert[demand++] = val;
if (is_usage) {
DBG((dbg, DBG_SPILL, "Reload %+F before %+F\n", val, env->instr));
- be_add_reload(env->senv, val, env->instr, env->cls);
+ be_add_reload(env->senv, val, env->instr, env->cls, 1);
}
}
else {
/* irnb is not in memory at the end of pred, so we have to reload it */
DBG((dbg, DBG_FIX, " reload %+F\n", irnb));
DBG((dbg, DBG_SPILL, "Reload %+F before %+F,%d\n", irnb, block, i));
- be_add_reload_on_edge(env->senv, irnb, block, i, env->cls);
+ be_add_reload_on_edge(env->senv, irnb, block, i, env->cls, 1);
next_value:
/*epsilon statement :)*/;
bitset_t *livethrough_unused;
} loop_attr_t;
-typedef struct block_attr {
+typedef struct morgan_block_attr {
const ir_node *block;
/** set of all values that are live in the block but not used in the block */
bitset_t *livethrough_unused;
DBG((dbg, DBG_CHOOSE, "Spilling %+F ", to_spill));
for(edge = set_first(loop_attr->out_edges); edge != NULL; edge = set_next(loop_attr->out_edges)) {
- be_add_reload_on_edge(env->senv, to_spill, edge->block, edge->pos, env->cls);
+ be_add_reload_on_edge(env->senv, to_spill, edge->block, edge->pos, env->cls, 1);
}
}
}
static void be_spill_remat_oldinterface(const be_chordal_env_t *cenv)
{
- return be_spill_remat(cenv->birg, cenv->cls);
+ be_spill_remat(cenv->birg, cenv->cls);
}
void be_init_spillremat(void)