/*
- * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
+ * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
*
* This file is part of libFirm.
*
* capacity of the blocks to let global variables live through
* them.
*/
-#ifdef HAVE_CONFIG_H
#include "config.h"
-#endif
#include <math.h>
#include <limits.h>
#include "irprintf.h"
#include "execfreq.h"
#include "dfs_t.h"
-#include "xmalloc.h"
#include "beutil.h"
#include "bearch_t.h"
-#include "bespillbelady.h"
#include "besched_t.h"
#include "beirgmod.h"
#include "belive_t.h"
#include "beloopana.h"
#include "beirg_t.h"
#include "bemodule.h"
+#include "bespill.h"
-#include <libcore/lc_opts.h>
-#include <libcore/lc_opts_enum.h>
-#include <libcore/lc_timing.h>
+#include "lc_opts.h"
+#include "lc_opts_enum.h"
#define DBG_SPILL 1
#define DBG_WSETS 2
be_lv_t *lv;
ir_exec_freq *ef;
- ir_node **blocks; /**< Array of all blocks. */
- int n_blocks; /**< Number of blocks in the graph. */
- int n_regs; /**< number of regs in this reg-class */
- workset_t *ws; /**< the main workset used while processing a block. ob-allocated */
- ir_node *instr; /**< current instruction */
- int instr_nr; /**< current instruction number (relative to block start) */
+ ir_node **blocks; /**< Array of all blocks. */
+ int n_blocks; /**< Number of blocks in the graph. */
+ int n_regs; /**< number of regs in this reg-class */
+ workset_t *ws; /**< the main workset used while processing a block. ob-allocated */
+ ir_node *instr; /**< current instruction */
+ int instr_nr; /**< current instruction number (relative to block start) */
- spill_env_t *senv; /**< see bespill.h */
- bitset_t *spilled; /**< bitset to keep all the irns which have already been spilled. */
+ spill_env_t *senv; /**< see bespill.h */
+ bitset_t *spilled; /**< bitset to keep all the irns which have already been spilled. */
+ ir_nodeset_t *extra_spilled; /** All nodes for which a special spill location has been computed. */
} belady_env_t;
static INLINE void workset_insert(belady_env_t *env, workset_t *ws, ir_node *val) {
int i;
/* check for current regclass */
- if (!arch_irn_consider_in_reg_alloc(env->arch, env->cls, val)) {
+ if (!arch_irn_consider_in_reg_alloc(env->cls, val)) {
// DBG((dbg, DBG_WORKSET, "Skipped %+F\n", val));
return;
}
real (non-phi) node. At the beginning
of the global pass, this is 0. */
struct list_head br_head; /**< List head for all bring_in variables. */
+ int free_at_jump; /**< registers free at jump. */
} block_info_t;
res->bl = bl;
res->id = id;
res->exec_freq = get_block_execfreq(bel->ef, bl);
- res->reload_cost = bel->arch->isa->reload_cost * res->exec_freq;
+ res->reload_cost = bel->arch->reload_cost * res->exec_freq;
+ res->free_at_jump = bel->n_regs;
INIT_LIST_HEAD(&res->br_head);
set_irn_link(bl, res);
return res;
or NULL. */
} next_use_t;
-static void *next_use_init(ir_phase *phase, ir_node *irn, void *old)
+static void *next_use_init(ir_phase *phase, const ir_node *irn, void *old)
{
(void) phase;
(void) irn;
int is_remat : 1; /**< Is rematerializable. */
int sect_pressure; /**< Offset to maximum pressure in block. */
struct list_head list;
+ struct list_head sect_list;
+ bring_in_t *sect_head;
};
static INLINE bring_in_t *new_bring_in(block_info_t *bi, ir_node *irn, const next_use_t *use)
br->is_remat = be_is_rematerializable(bi->bel->senv, irn, use->irn);
br->pressure_so_far = bi->pressure;
br->sect_pressure = bi->front_pressure;
+ br->sect_head = br;
INIT_LIST_HEAD(&br->list);
+ INIT_LIST_HEAD(&br->sect_list);
list_add_tail(&br->list, &bi->br_head);
return br;
}
belady_env_t *env = bi->bel;
sched_timestep_t curr_step = sched_get_time_step(env->instr);
next_use_t *use = get_current_use(bi, irn);
- int flags = arch_irn_get_flags(env->arch, irn);
+ int flags = arch_irn_get_flags(irn);
assert(!(flags & arch_irn_flags_ignore));
}
}
} else {
- assert(is_usage || "Defined value already in workset?!?");
+ assert(is_usage && "Defined value already in workset?!?");
DBG((dbg, DBG_DECIDE, "\t\tskip %+F\n", val));
}
}
DBG((dbg, DBG_DECIDE, "\t* defs\n"));
displace(block_info, new_vals, 0);
+ if (is_op_forking(get_irn_op(env->instr))) {
+ for (i = get_irn_arity(env->instr) - 1; i >= 0; --i) {
+ ir_node *op = get_irn_n(env->instr, i);
+ block_info->free_at_jump -= arch_irn_consider_in_reg_alloc(env->cls, op);
+ }
+ }
+
env->instr_nr++;
}
*/
{
- int n_regs = bi->bel->n_regs;
+ int n_regs = bi->free_at_jump;
int len = workset_get_length(end);
int slot = -1;
int i;
* there might by unknwons as operands of phis in that case
* we set the costs to zero, since they won't get spilled.
*/
- if (arch_irn_consider_in_reg_alloc(env->arch, env->cls, op))
+ if (arch_irn_consider_in_reg_alloc(env->cls, op))
c = can_make_available_at_end(ges, pr, op, limit - glob_costs, level + 1);
else
c = 0.0;
for (ia = ges->ia_top; ia != NULL; ia = ia->next) {
switch (ia->act) {
case irn_act_live_through:
- if (is_local_phi(ia->bl, ia->irn)) {
- bitset_add_irn(ges->succ_phis, ia->irn);
- DBG((dbg, DBG_GLOBAL, "\t\tlive through phi kept alive: %+F\n", ia->irn));
+ {
+ block_info_t *bi = get_block_info(ia->bl);
+ bring_in_t *iter;
+
+ if (is_local_phi(ia->bl, ia->irn)) {
+ bitset_add_irn(ges->succ_phis, ia->irn);
+ DBG((dbg, DBG_GLOBAL, "\t\tlive through phi kept alive: %+F\n", ia->irn));
+ }
+
+ list_for_each_entry_reverse(bring_in_t, iter, &bi->br_head, list)
+ ++iter->sect_pressure;
+ ++bi->front_pressure;
}
break;
case irn_act_reload:
DBG((dbg, DBG_GLOBAL, "\t\told pressure: %d, new pressure: %d, end length: %d\n",
bi->pressure, bs->pressure, workset_get_length(bs->end_state)));
bi->pressure = bs->pressure;
- /* TODO: commit front pressure */
bitset_set(ges->committed, bi->id);
}
}
#define block_last_bring_in(bi) list_entry((bi)->br_head.prev, bring_in_t, list)
+#if 0
static int get_block_max_pressure(const block_info_t *bi)
{
int max = get_max_pressure_so_far(bi, block_last_bring_in(bi));
return MAX(bi->pressure, max);
}
+#endif
/**
* Try to bring a variable into a block.
*/
static void optimize_variable(global_end_state_t *ges, bring_in_t *br)
{
- block_info_t *bi = br->bi;
+ block_info_t *bi = br->bi;
ir_node *irn = br->irn;
ir_node *bl = bi->bl;
belady_env_t *env = ges->env;
assert(front_pressure <= k);
assert(pressure_upto_use <= k);
- DBG((dbg, DBG_GLOBAL, "fixing %+F at %+F (%f), front pr: %d, pr to use: %d, first use: %u\n",
+ DBG((dbg, DBG_GLOBAL, "fixing %+F at %+F (%f), front pr: %d, pr to use: %d, first use: %x\n",
irn, bl, bi->exec_freq, front_pressure, pressure_upto_use, br->first_use));
+ // assert(!is_local_phi(bl, irn) || !bitset_contains_irn(ges->succ_phis, irn));
+
/*
* if we cannot bring the value to the use, let's see ifit would be worthwhile
* to bring the value to the beginning of the block to have a better spill
* better _spilled_here will return a node where the value can be spilled after
* or NULL if this block does not provide a better spill location.
*/
- if (pressure_upto_use >= k && front_pressure < k)
+#if 1
+ if (pressure_upto_use >= k && front_pressure < k && !bitset_contains_irn(env->spilled, irn))
better_spill_loc = better_spilled_here(br);
+#endif
/*
* If either we can bring the value to the use or we should try
/* the costs were acceptable... */
if (bring_in_costs < local_costs) {
+ int check = 0;
bring_in_t *iter;
/*
if (is_local_phi(bl, irn))
bitset_add_irn(ges->succ_phis, irn);
- pressure_inc = bi->pressure - pressure_inc;
- assert(pressure_inc >= 0);
-
- DBG((dbg, DBG_GLOBAL, "\t-> bring it in\n"));
+ DBG((dbg, DBG_GLOBAL, "\t-> bring it in.", pressure_inc));
/* second half of case 2 */
if (pressure_upto_use >= k) {
DBG((dbg, DBG_GLOBAL, "\t-> use blocked. local reload: %+F, try spill at: %+F\n",
br->first_use, better_spill_loc));
- be_add_reload2(env->senv, irn, br->first_use, better_spill_loc, env->cls, 1);
+ be_add_reload(env->senv, irn, br->first_use, env->cls, 1);
+ be_add_spill(env->senv, irn, better_spill_loc);
+ ir_nodeset_insert(env->extra_spilled, irn);
}
/*
* the pressure by one more as the nrought in value starts to count.
* Finally, adjust the front pressure as well.
*/
+ pressure_inc = 0;
list_for_each_entry_reverse(bring_in_t, iter, &bi->br_head, list) {
if (iter == br)
pressure_inc += pressure_upto_use < k;
iter->sect_pressure += pressure_inc;
+ check = MAX(check, iter->sect_pressure);
+ DBG((dbg, DBG_GLOBAL, "\tinc section pressure of %+F by %d to %d\n", iter->first_use, pressure_inc, iter->sect_pressure));
}
bi->front_pressure += pressure_inc;
+ assert(MAX(check, bi->front_pressure) <= bi->pressure);
+ DBG((dbg, DBG_GLOBAL, "\t-> result: p: %d, fp: %d\n", bi->pressure, bi->front_pressure));
}
/* case 3: nothing worked. insert normal reload and rollback. */
else {
DBG((dbg, DBG_GLOBAL, "\t-> bring in was too expensive. local reload: %+F\n", br->first_use));
be_add_reload(env->senv, irn, br->first_use, env->cls, 1);
+ bitset_add_irn(env->spilled, irn);
trans_rollback(ges, &trans);
}
}
else {
DBG((dbg, DBG_GLOBAL, "\t-> can\'t do anything but reload before %+F\n", br->first_use));
be_add_reload(env->senv, irn, br->first_use, env->cls, 1);
+ bitset_add_irn(env->spilled, irn);
}
DBG((dbg, DBG_GLOBAL, "\n"));
return res;
}
+
+
+
static void global_assign(belady_env_t *env)
{
+ ir_nodeset_iterator_t iter;
global_end_state_t ges;
bring_in_t **br;
+ ir_node *irn;
int i, j;
/*
if (!is_Phi(irn))
break;
- if (arch_irn_consider_in_reg_alloc(env->arch, env->cls, irn)
+ if (arch_irn_consider_in_reg_alloc(env->cls, irn)
&& !bitset_contains_irn(ges.succ_phis, irn))
be_spill_phi(env->senv, irn);
}
}
+
+ /* check dominance for specially spilled nodes. */
+ foreach_ir_nodeset (env->extra_spilled, irn, iter)
+ make_spill_locations_dominate_irn(env->senv, irn);
}
static void collect_blocks(ir_node *bl, void *data)
env.senv = be_new_spill_env(birg);
env.ef = be_get_birg_exec_freq(birg);
env.spilled = bitset_irg_obstack_alloc(&env.ob, irg);
+ env.extra_spilled = ir_nodeset_new(64);
env.n_blocks = 0;
irg_block_walk_graph(irg, NULL, collect_blocks, &env);
global_assign(&env);
+ /* check dominance for specially spilled nodes. */
+ {
+ ir_nodeset_iterator_t iter;
+ ir_node *irn;
+
+ foreach_ir_nodeset (env.extra_spilled, irn, iter)
+ make_spill_locations_dominate_irn(env.senv, irn);
+ }
+
/* Insert spill/reload nodes into the graph and fix usages */
be_insert_spills_reloads(env.senv);
/* clean up */
be_delete_spill_env(env.senv);
+ ir_nodeset_del(env.extra_spilled);
obstack_free(&env.ob, NULL);
}