/*
- * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
+ * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
*
* This file is part of libFirm.
*
#include "benode_t.h"
#include "bechordal_t.h"
#include "bejavacoal.h"
-#include "benodesets.h"
#include "bespilloptions.h"
#include "bestatevent.h"
#include "bessaconstr.h"
#include "beirg_t.h"
#include "beintlive_t.h"
#include "bemodule.h"
+#include "be_t.h"
DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
static spill_info_t *get_spillinfo(const spill_env_t *env, ir_node *value)
{
spill_info_t info, *res;
- int hash = nodeset_hash(value);
+ int hash = hash_irn(value);
info.to_spill = value;
res = set_find(env->spills, &info, sizeof(info), hash);
void be_add_spill(spill_env_t *env, ir_node *to_spill, ir_node *before)
{
+#if 1
spill_info_t *spill_info = get_spillinfo(env, to_spill);
spill_t *spill;
spill_t *s;
spill_t *last;
+ assert(! arch_irn_is(env->arch_env, to_spill, dont_spill));
DB((dbg, LEVEL_1, "Add spill of %+F before %+F\n", to_spill, before));
+ /* Just for safety make sure that we do not insert the spill in front of a phi */
+ for (; is_Phi(before); before = sched_next(before));
+
/* spills that are dominated by others are not needed */
last = NULL;
s = spill_info->spills;
spill->spill = NULL;
spill_info->spills = spill;
+#endif
}
void be_add_remat(spill_env_t *env, ir_node *to_spill, ir_node *before,
spill_info_t *info;
reloader_t *rel;
+ assert(! arch_irn_is(env->arch_env, to_spill, dont_spill));
+
info = get_spillinfo(env, to_spill);
if (is_Phi(to_spill)) {
return last;
}
+static ir_node *skip_keeps_phis(ir_node *node)
+{
+ node = sched_next(node);
+ while(is_Phi(node) || be_is_Keep(node)) {
+ node = sched_next(node);
+ }
+ return node;
+}
+
/**
* Returns the point at which you can insert a node that should be executed
* before block @p block when coming from pred @p pos.
spill = get_spillinfo(env, node);
for(i = 0, arity = get_irn_arity(node); i < arity; ++i) {
ir_node *arg = get_irn_n(node, i);
- ir_node *pred_block = get_Block_cfgpred_block(block, i);
- ir_node *insert = be_get_end_of_block_insertion_point(pred_block);
+ ir_node *insert;
//get_spillinfo(env, arg);
+
+ /* some backends have virtual noreg/unknown nodes that are not scheduled
+ * and simply always available. */
+ if(!sched_is_scheduled(arg)) {
+ ir_node *pred_block = get_Block_cfgpred_block(block, i);
+ insert = be_get_end_of_block_insertion_point(pred_block);
+ } else {
+ insert = skip_keeps_phis(arg);
+ }
+
be_add_spill(env, arg, insert);
}
}
* |_|
*/
-static ir_node *skip_keeps_phis(ir_node *node)
-{
- node = sched_next(node);
- while(is_Phi(node) || be_is_Keep(node)) {
- node = sched_next(node);
- }
- return node;
-}
-
static void determine_spill_costs(spill_env_t *env, spill_info_t *spillinfo);
/**
return;
}
- DBG((dbg, LEVEL_1, "spilling %+F ... ", to_spill));
+ DBG((dbg, LEVEL_1, "spilling %+F ... \n", to_spill));
spill = spillinfo->spills;
for( ; spill != NULL; spill = spill->next) {
ir_node *block = get_block(spill->before);
ir_node *before = spill->before;
/* place all spills before the reloads (as we can't guarantee the
- * same order as the be_add_spill and be_add_reload calls */
- while(get_irn_idx(sched_prev(before)) > env->new_nodes_idx) {
+ * same order as the be_add_spill and be_add_reload calls.
+ * Also make sure that we do not run into Phis when going up. */
+ while(get_irn_idx(sched_prev(before)) > env->new_nodes_idx && !is_Phi(sched_prev(before))) {
before = sched_prev(before);
}
spill->spill = be_spill(env->arch_env, block, to_spill);
sched_add_before(before, spill->spill);
- DB((dbg, LEVEL_1, "\t%+F before %+F,", spill->spill, before));
+ DB((dbg, LEVEL_1, "\t%+F before %+F\n", spill->spill, before));
#ifdef FIRM_STATISTICS
env->spill_count++;
#endif
return env->spill_cost * freq;
}
+unsigned be_get_reload_costs_no_weight(spill_env_t *env, const ir_node *to_spill,
+ const ir_node *before)
+{
+ if(be_do_remats) {
+ /* is the node rematerializable? */
+ unsigned costs = check_remat_conditions_costs(env, to_spill, before, 0);
+ if(costs < (unsigned) env->reload_cost)
+ return costs;
+ }
+
+ return env->reload_cost;
+}
+
double be_get_reload_costs(spill_env_t *env, ir_node *to_spill, ir_node *before)
{
ir_node *block = get_nodes_block(before);
DB((dbg, LEVEL_1, "spill %+F after definition\n", to_spill));
}
+void make_spill_locations_dominate_irn(spill_env_t *env, ir_node *irn)
+{
+ const spill_info_t *si = get_spillinfo(env, irn);
+ ir_node *start_block = get_irg_start_block(get_irn_irg(irn));
+ int n_blocks = get_Block_dom_max_subtree_pre_num(start_block);
+ bitset_t *reloads = bitset_alloca(n_blocks);
+ reloader_t *r;
+ spill_t *s;
+
+ if (si == NULL)
+ return;
+
+ /* Fill the bitset with the dominance pre-order numbers
+ * of the blocks the reloads are located in. */
+ for (r = si->reloaders; r != NULL; r = r->next) {
+ ir_node *bl = get_nodes_block(r->reloader);
+ bitset_set(reloads, get_Block_dom_tree_pre_num(bl));
+ }
+
+ /* Now, cancel out all the blocks that are dominated by each spill.
+ * If the bitset is not empty after that, we have reloads that are
+ * not dominated by any spill. */
+ for (s = si->spills; s != NULL; s = s->next) {
+ ir_node *bl = get_nodes_block(s->before);
+ int start = get_Block_dom_tree_pre_num(bl);
+ int end = get_Block_dom_max_subtree_pre_num(bl);
+
+ bitset_clear_range(reloads, start, end);
+ }
+
+ if (!bitset_is_empty(reloads))
+ be_add_spill(env, si->to_spill, sched_next(si->to_spill));
+}
+
void be_insert_spills_reloads(spill_env_t *env)
{
ir_graph *irg = env->irg;
ir_nodeset_iterator_t iter;
ir_node *node;
+ BE_TIMER_PUSH(t_ra_spill_apply);
+
env->new_nodes_idx = get_irg_last_idx(irg);
/* create all phi-ms first, this is needed so, that phis, hanging on
be_liveness_invalidate(env->birg->lv);
be_remove_dead_nodes_from_schedule(env->birg);
+
+ BE_TIMER_POP(t_ra_spill_apply);
}
void be_init_spill(void)