* Copyright: (c) Universitaet Karlsruhe
* Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
*/
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+#ifdef HAVE_ALLOCA_H
#include <alloca.h>
+#endif
+
+#ifdef HAVE_MALLOC_H
+#include <malloc.h>
+#endif
+
#include "obst.h"
#include "set.h"
#include "pset.h"
#include "irgraph.h"
#include "irnode.h"
#include "irmode.h"
-#include "ircons.h"
#include "irgwalk.h"
#include "iredges_t.h"
+#include "ircons_t.h"
#include "beutil.h"
#include "bearch.h"
#include "beirgmod.h"
#include "belive_t.h"
#include "benode_t.h"
+#include "bechordal_t.h"
+
+#define DBG_SPILL 1
+#define DBG_WSETS 2
+#define DBG_FIX 4
+#define DBG_DECIDE 8
+#define DBG_START 16
+#define DBG_TRACE 64
+#define DEBUG_LVL SET_LEVEL_0 //(DBG_START | DBG_DECIDE | DBG_WSETS | DBG_FIX | DBG_SPILL)
+static firm_dbg_module_t *dbg = NULL;
#define MIN(a,b) (((a)<(b))?(a):(b))
-
-#define DBG_DECIDE 1
-#define DBG_WSETS 2
-#define DBG_FIX 4
-#define DBG_SPILL 8
-#define DBG_START 16
-#define DBG_TRACE 32
-#define DEBUG_LVL (DBG_START | DBG_DECIDE | DBG_WSETS | DBG_FIX | DBG_SPILL)
-static firm_dbg_module_t *dbg = NULL;
+#define SINGLE_START_PROJS
typedef struct _workset_t workset_t;
*/
static INLINE void workset_insert(workset_t *ws, ir_node *val) {
int i;
- assert(ws->len < ws->bel->n_regs && "Workset already full!");
/* check for current regclass */
- if (arch_get_irn_reg_class(ws->bel->arch, val, 0) != ws->bel->cls) {
- DBG((dbg, 0, "Dropped %+F\n", val));
+ if (arch_get_irn_reg_class(ws->bel->arch, val, -1) != ws->bel->cls) {
+ DBG((dbg, DBG_DECIDE, "Dropped %+F\n", val));
return;
}
return;
/* insert val */
+ assert(ws->len < ws->bel->n_regs && "Workset already full!");
ws->vals[ws->len++].irn = val;
}
*/
static INLINE void workset_bulk_insert(workset_t *ws, int cnt, ir_node **vals) {
int i, o;
- assert(ws->len + cnt <= ws->bel->n_regs && "Workset does not have enough room!");
for(o=0; o<cnt; ++o) {
ir_node *val = vals[o];
DBG((dbg, DBG_TRACE, "Bulk insert %+F\n", val));
/* check for current regclass */
- if (arch_get_irn_reg_class(ws->bel->arch, val, 0) != ws->bel->cls) {
+ if (arch_get_irn_reg_class(ws->bel->arch, val, -1) != ws->bel->cls) {
DBG((dbg, DBG_TRACE, "Wrong reg class\n"));
goto no_insert;
}
}
/* insert val */
+ assert(ws->len < ws->bel->n_regs && "Workset does not have enough room!");
ws->vals[ws->len++].irn = val;
DBG((dbg, DBG_TRACE, "Inserted\n"));
#define workset_sort(ws) qsort((ws)->vals, (ws)->len, sizeof((ws)->vals[0]), loc_compare);
+static int is_mem_phi(const ir_node *irn, void *data) {
+ workset_t *sws;
+ ir_node *blk = get_nodes_block(irn);
+
+ DBG((dbg, DBG_SPILL, "Is %+F a mem-phi?\n", irn));
+ sws = ((block_info_t *) get_irn_link(blk))->ws_start;
+ DBG((dbg, DBG_SPILL, " %d\n", !workset_contains(sws, irn)));
+ return !workset_contains(sws, irn);
+}
+
/**
* Collects all values live-in at block @p blk and all phi results in this block.
* Then it adds the best values (at most n_regs) to the ws.
first = sched_first(blk);
count = 0;
sched_foreach(blk, irn)
- if (is_Phi(irn) && arch_get_irn_reg_class(bel->arch, irn, 0) == bel->cls) {
+ if (is_Phi(irn) && arch_get_irn_reg_class(bel->arch, irn, -1) == bel->cls) {
loc.irn = irn;
loc.time = be_get_next_use(bel->uses, first, 0, irn, 0);
DBG((dbg, DBG_START, " %+F next-use %d\n", loc.irn, loc.time));
break;
live_foreach(blk, li)
- if (live_is_in(li) && arch_get_irn_reg_class(bel->arch, li->irn, 0) == bel->cls) {
+ if (live_is_in(li) && arch_get_irn_reg_class(bel->arch, li->irn, -1) == bel->cls) {
loc.irn = (ir_node *)li->irn;
loc.time = be_get_next_use(bel->uses, first, 0, li->irn, 0);
DBG((dbg, DBG_START, " %+F next-use %d\n", loc.irn, loc.time));
if (len > max_allowed) {
/* get current next-use distance */
for (i=0; i<ws->len; ++i)
- workset_set_time(ws, i, be_get_next_use(bel->uses, bel->instr, bel->instr_nr, workset_get_val(ws, i), is_usage));
+ workset_set_time(ws, i, be_get_next_use(bel->uses, bel->instr, bel->instr_nr, workset_get_val(ws, i), !is_usage));
/* sort entries by increasing nextuse-distance*/
workset_sort(ws);
belady_env_t *bel = env;
workset_t *new_vals;
ir_node *irn;
+#ifdef SINGLE_START_PROJS
+ ir_node *start_blk = get_irg_start_block(get_irn_irg(blk));
+#endif
block_info_t *blk_info = obstack_alloc(&bel->ob, sizeof(*blk_info));
set_irn_link(blk, blk_info);
bel->instr_nr = 0;
new_vals = new_workset(&bel->ob, bel);
sched_foreach(blk, irn) {
- ir_node *iii;
- DBG((dbg, DBG_WSETS, "Current workset for %+F:\n", blk));
- workset_foreach(bel->ws, iii)
- DBG((dbg, DBG_WSETS, " %+F\n", iii));
assert(workset_get_length(bel->ws) <= bel->n_regs && "Too much values in workset!");
- DBG((dbg, DBG_DECIDE, " ...%+F\n", irn));
+#ifdef SINGLE_START_PROJS
+ if (is_Phi(irn) ||
+ (is_Proj(irn) && blk!=start_blk) ||
+ (get_irn_mode(irn) == mode_T && blk==start_blk)) {
+ DBG((dbg, DBG_DECIDE, " ...%+F skipped\n", irn));
+ continue;
+ }
+#else
/* projs are handled with the tuple value.
* Phis are no real instr (see insert_starters)
* instr_nr does not increase */
- if (is_Proj(irn) || is_Phi(irn))
+ if (is_Proj(irn) || is_Phi(irn)) {
+ DBG((dbg, DBG_DECIDE, " ...%+F skipped\n", irn));
continue;
+ }
+#endif
+ DBG((dbg, DBG_DECIDE, " ...%+F\n", irn));
/* set instruction in the workset */
bel->instr = irn;
}
/**
- * 'decide' is block-local and makes assumtions
+ * 'decide' is block-local and makes assumptions
* about the set of live-ins. Thus we must adapt the
* live-outs to the live-ins at each block-border.
*/
static void fix_block_borders(ir_node *blk, void *env) {
+ workset_t *wsb;
belady_env_t *bel = env;
int i, max;
DBG((dbg, DBG_FIX, "\n"));
DBG((dbg, DBG_FIX, "Fixing %+F\n", blk));
- workset_t *wsb = ((block_info_t *)get_irn_link(blk))->ws_start;
+ wsb = ((block_info_t *)get_irn_link(blk))->ws_start;
/* process all pred blocks */
for (i=0, max=get_irn_arity(blk); i<max; ++i) {
if(is_Phi(irnb) && blk == get_nodes_block(irnb))
irnb = get_irn_n(irnb, i);
+ /* Unknowns are available everywhere */
+ if(get_irn_opcode(irnb) == iro_Unknown)
+ continue;
+
/* check if irnb is in a register at end of pred */
workset_foreach(wsp, irnp)
if (irnb == irnp)
goto next_value;
/* irnb is in memory at the end of pred, so we have to reload it */
+ DBG((dbg, DBG_FIX, " reload %+F\n", irnb));
be_add_reload_on_edge(bel->senv, irnb, blk, i);
next_value:
* The remaining nodes in bel->reloads will be removed from the graph.
*/
static void rescue_used_reloads(ir_node *irn, void *env) {
- pset *rlds = ((belady_env_t *)env)->reloads;
- if (pset_find_ptr(rlds, irn)) {
- DBG((dbg, DBG_SPILL, "Removing %+F in %+F\n", irn, get_nodes_block(irn)));
+ pset *rlds = (pset *)env;
+ if (pset_find_ptr(rlds, irn))
pset_remove_ptr(rlds, irn);
- }
}
-static int is_mem_phi(const ir_node *irn, void *data) {
- ir_node *blk = get_nodes_block(irn);
- workset_t *sws = ((block_info_t *)get_irn_link(blk))->ws_start;
- return !workset_contains(sws, irn);
+/**
+ * Finds all unused reloads and remove them from the schedule
+ * Also removes spills if they are not used anymore after removing reloads
+ */
+static void remove_unused_reloads(ir_graph *irg, belady_env_t *bel) {
+ ir_node *irn;
+
+ irg_walk_graph(irg, rescue_used_reloads, NULL, bel->reloads);
+ for(irn = pset_first(bel->reloads); irn; irn = pset_next(bel->reloads)) {
+ ir_node *spill;
+ DBG((dbg, DBG_SPILL, "Removing %+F before %+F in %+F\n", irn, sched_next(irn), get_nodes_block(irn)));
+
+ spill = get_irn_n(irn, 0);
+
+ /* remove reload */
+ set_irn_n(irn, 0, new_Bad());
+ sched_remove(irn);
+
+ /* if spill not used anymore, remove it too
+ * test of regclass is necessary since spill may be a phi-M */
+ if (get_irn_n_edges(spill) == 0 && bel->cls == arch_get_irn_reg_class(bel->arch, spill, -1)) {
+ set_irn_n(spill, 0, new_Bad());
+ sched_remove(spill);
+ }
+ }
}
-void be_spill_belady(const be_main_session_env_t *session, const arch_register_class_t *cls) {
- ir_node *irn;
+void be_spill_belady(const be_chordal_env_t *chordal_env) {
+ belady_env_t bel;
dbg = firm_dbg_register("ir.be.spillbelady");
firm_dbg_set_mask(dbg, DEBUG_LVL);
/* init belady env */
- belady_env_t *bel = alloca(sizeof(*bel));
- obstack_init(&bel->ob);
- bel->factory = session->main_env->node_factory;
- bel->arch = session->main_env->arch_env;
- bel->cls = cls;
- bel->n_regs = arch_register_class_n_regs(cls);
- bel->ws = new_workset(&bel->ob, bel);
- bel->uses = be_begin_uses(session->irg, session->main_env->arch_env, cls);
- bel->senv = be_new_spill_env(dbg, session, cls);
- bel->reloads = pset_new_ptr_default();
+ obstack_init(&bel.ob);
+ bel.factory = chordal_env->main_env->node_factory;
+ bel.arch = chordal_env->main_env->arch_env;
+ bel.cls = chordal_env->cls;
+ bel.n_regs = arch_register_class_n_regs(bel.cls);
+ bel.ws = new_workset(&bel.ob, &bel);
+ bel.uses = be_begin_uses(chordal_env->irg, chordal_env->main_env->arch_env, bel.cls);
+ bel.senv = be_new_spill_env(dbg, chordal_env, is_mem_phi, NULL);
+ bel.reloads = pset_new_ptr_default();
/* do the work */
- irg_block_walk_graph(session->irg, decide, NULL, bel);
- irg_block_walk_graph(session->irg, fix_block_borders, NULL, bel);
- be_insert_spills_reloads(bel->senv, bel->reloads, is_mem_phi, NULL);
-
- /* find all unused reloads and remove them from the schedule */
- irg_walk_graph(session->irg, rescue_used_reloads, NULL, bel);
- for(irn = pset_first(bel->reloads); irn; irn = pset_next(bel->reloads))
- sched_remove(irn);
+ irg_block_walk_graph(chordal_env->irg, decide, NULL, &bel);
+ irg_block_walk_graph(chordal_env->irg, fix_block_borders, NULL, &bel);
+ be_insert_spills_reloads(bel.senv, bel.reloads);
+ remove_unused_reloads(chordal_env->irg, &bel);
/* clean up */
- del_pset(bel->reloads);
- be_delete_spill_env(bel->senv);
- be_end_uses(bel->uses);
- obstack_free(&bel->ob, NULL);
+ del_pset(bel.reloads);
+ be_delete_spill_env(bel.senv);
+ be_end_uses(bel.uses);
+ obstack_free(&bel.ob, NULL);
}