* @date 20.10.2004
* @author Sebastian Hack
*/
+
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include <string.h>
#include <limits.h>
-#include "fourcc.h"
+#include "benode_t.h"
+
#include "obst.h"
#include "list.h"
#include "iterator.h"
}
#endif
-static ir_node *trivial_select(void *env, void *block_env,
- const struct list_head *sched_head,
- int curr_time, pset *ready_set)
+static ir_node *trivial_select(void *block_env, pset *ready_set)
{
ir_node *res;
-#if 0
- int i, n = pset_count(ready_set);
- ir_node *irn;
- ir_node **ready = alloca(n * sizeof(ready[0]));
-
- for(irn = pset_first(ready_set); irn; irn = pset_next(ready_set))
- ready[i++] = irn;
-#endif
-
res = pset_first(ready_set);
pset_break(ready_set);
return res;
}
+static INLINE int must_appear_in_schedule(const list_sched_selector_t *sel, void *block_env, const ir_node *irn)
+{
+ int res = 0;
+
+ if(sel->to_appear_in_schedule)
+ res = sel->to_appear_in_schedule(block_env, irn);
+
+ return res || to_appear_in_schedule(irn) || be_is_Keep(irn);
+}
+
static const list_sched_selector_t trivial_selector_struct = {
NULL,
NULL,
trivial_select,
NULL,
+ NULL,
NULL
};
struct obstack obst;
usage_stats_t *root;
pset *already_scheduled;
+ const list_sched_selector_t *vtab;
} reg_pressure_selector_env_t;
static INLINE usage_stats_t *get_or_set_usage_stats(reg_pressure_selector_env_t *env, ir_node *irn)
return us;
}
-static int max_hops_walker(ir_node *irn, ir_node *tgt, int depth, unsigned visited_nr)
+static int max_hops_walker(reg_pressure_selector_env_t *env, ir_node *irn, ir_node *curr_bl, int depth, unsigned visited_nr)
{
- int i, n;
- int res = 0;
-
- if(irn != tgt) {
- res = INT_MAX;
+ ir_node *bl = get_nodes_block(irn);
+ /*
+ * If the reached node is not in the block desired,
+ * return the value passed for this situation.
+ */
+ if(get_nodes_block(irn) != bl)
+ return block_dominates(bl, curr_bl) ? 0 : INT_MAX;
+ /*
+ * If the node is in the current block but not
+ * yet scheduled, we keep on searching from that node.
+ */
+ if(!pset_find_ptr(env->already_scheduled, irn)) {
+ int i, n;
+ int res = 0;
for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
ir_node *operand = get_irn_n(irn, i);
int tmp;
set_irn_visited(operand, visited_nr);
- tmp = max_hops_walker(operand, tgt, depth + 1, visited_nr);
+ tmp = max_hops_walker(env, operand, bl, depth + 1, visited_nr);
res = MAX(tmp, res);
}
}
+
+ return res;
}
- return res;
+ /*
+ * If the node is in the current block and scheduled, return
+ * the depth which indicates the number of steps to the
+ * region of scheduled nodes.
+ */
+ return depth;
}
static int compute_max_hops(reg_pressure_selector_env_t *env, ir_node *irn)
{
ir_node *bl = get_nodes_block(irn);
ir_graph *irg = get_irn_irg(bl);
- int res = INT_MAX;
+ int res = 0;
const ir_edge_t *edge;
foreach_out_edge(irn, edge) {
- ir_node *user = get_edge_src_irn(edge);
+ ir_node *user = get_edge_src_irn(edge);
+ unsigned visited_nr = get_irg_visited(irg) + 1;
+ int max_hops;
- if(get_nodes_block(user) == bl && !pset_find_ptr(env->already_scheduled, user)) {
- unsigned visited_nr = get_irg_visited(irg) + 1;
- int max_hops;
-
- set_irg_visited(irg, visited_nr);
- max_hops = max_hops_walker(user, irn, 0, visited_nr);
- res = MAX(res, max_hops);
- }
+ set_irg_visited(irg, visited_nr);
+ max_hops = max_hops_walker(env, user, irn, 0, visited_nr);
+ res = MAX(res, max_hops);
}
return res;
}
-static void *reg_pressure_graph_init(const arch_isa_t *isa, ir_graph *irg)
+static void *reg_pressure_graph_init(const list_sched_selector_t *vtab, const arch_isa_t *isa, ir_graph *irg)
{
irg_walk_graph(irg, firm_clear_link, NULL, NULL);
- return NULL;
+ return (void *) vtab;
}
static void *reg_pressure_block_init(void *graph_env, ir_node *bl)
{
ir_node *irn;
- reg_pressure_selector_env_t *env = xmalloc(sizeof(env[0]));
+ reg_pressure_selector_env_t *env = xmalloc(sizeof(env[0]));
obstack_init(&env->obst);
- env->root = NULL;
env->already_scheduled = pset_new_ptr(32);
+ env->root = NULL;
+ env->vtab = graph_env;
/*
* Collect usage statistics.
*/
sched_foreach(bl, irn) {
- if(to_appear_in_schedule(irn)) {
+ if(must_appear_in_schedule(env->vtab, env, irn)) {
int i, n;
for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
ir_node *op = get_irn_n(irn, i);
- if(to_appear_in_schedule(op)) {
+ if(must_appear_in_schedule(env->vtab, env, irn)) {
usage_stats_t *us = get_or_set_usage_stats(env, irn);
if(is_live_end(bl, op))
us->uses_in_block = 99999;
return env;
}
-static void reg_pressure_block_free(void *graph_env, void *block_env, ir_node *bl)
+static void reg_pressure_block_free(void *block_env)
{
reg_pressure_selector_env_t *env = block_env;
usage_stats_t *us;
free(env);
}
+static int get_result_hops_sum(reg_pressure_selector_env_t *env, ir_node *irn)
+{
+ int res = 0;
+ if(get_irn_mode(irn) == mode_T) {
+ const ir_edge_t *edge;
+
+ foreach_out_edge(irn, edge)
+ res += get_result_hops_sum(env, get_edge_src_irn(edge));
+ }
+
+ else if(mode_is_data(get_irn_mode(irn)))
+ res = compute_max_hops(env, irn);
+
+
+ return res;
+}
+
static INLINE int reg_pr_costs(reg_pressure_selector_env_t *env, ir_node *irn)
{
int i, n;
for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
ir_node *op = get_irn_n(irn, i);
- if(to_appear_in_schedule(op))
+ if(must_appear_in_schedule(env->vtab, env, op))
sum += compute_max_hops(env, op);
}
+ sum += get_result_hops_sum(env, irn);
+
return sum;
}
-ir_node *reg_pressure_select(void *graph_env, void *block_env,
- const struct list_head *sched_head,
- int curr_time, pset *ready_set)
+static ir_node *reg_pressure_select(void *block_env, pset *ready_set)
{
reg_pressure_selector_env_t *env = block_env;
ir_node *irn, *res = NULL;
reg_pressure_graph_init,
reg_pressure_block_init,
reg_pressure_select,
+ NULL,
reg_pressure_block_free,
NULL
};
memset(&env, 0, sizeof(env));
selector = env.selector = isa->impl->get_list_sched_selector(isa);
- env.selector_env = selector->init_graph ? selector->init_graph(isa, irg) : NULL;
+ env.selector_env = selector->init_graph ? selector->init_graph(selector, isa, irg) : NULL;
env.irg = irg;
/* Assure, that the out edges are computed */
irg_block_walk_graph(irg, list_sched_block, NULL, &env);
if(selector->finish_graph)
- selector->finish_graph(env.selector_env, irg);
+ selector->finish_graph(env.selector_env);
}
pset *already_scheduled;
ir_node *block;
firm_dbg_module_t *dbg;
+ const list_sched_selector_t *selector;
+ void *selector_block_env;
} block_sched_env_t;
/**
/**
* Append an instruction to a schedule.
- * @param env The block scheduleing environment.
+ * @param env The block scheduling environment.
* @param irn The node to add to the schedule.
- * @return The given node.
+ * @return The given node.
*/
static ir_node *add_to_sched(block_sched_env_t *env, ir_node *irn)
{
/* If the node consumes/produces data, it is appended to the schedule
* list, otherwise, it is not put into the list */
- if(to_appear_in_schedule(irn)) {
+ if(must_appear_in_schedule(env->selector, env->selector_block_env, irn)) {
sched_info_t *info = get_irn_sched_info(irn);
INIT_LIST_HEAD(&info->list);
info->scheduled = 1;
return irn;
}
-
/**
* Add the proj nodes of a tuple-mode irn to the schedule immediately
* after the tuple-moded irn. By pinning the projs after the irn, no
}
}
+static ir_node *select_node(block_sched_env_t *be)
+{
+ ir_node *irn;
+
+ for(irn = pset_first(be->ready_set); irn; irn = pset_next(be->ready_set)) {
+ if(be_is_Keep(irn)) {
+ pset_break(be->ready_set);
+ return irn;
+ }
+ }
+
+ return be->selector->select(be->selector_block_env, be->ready_set);
+}
+
/**
* Perform list scheduling on a block.
*
*/
static void list_sched_block(ir_node *block, void *env_ptr)
{
- void *block_env = NULL;
sched_env_t *env = env_ptr;
block_sched_env_t be;
const list_sched_selector_t *selector = env->selector;
const ir_edge_t *edge;
ir_node *irn;
+ ir_node *start_node = get_irg_start(get_irn_irg(block));
+ ir_node *final_jmp = NULL;
int j, m;
int phi_seen = 0;
sched_info_t *info = get_irn_sched_info(block);
INIT_LIST_HEAD(&info->list);
/* Initialize the block scheduling environment */
- be.dbg = firm_dbg_register("firm.be.sched");
- be.block = block;
- be.curr_time = 0;
- be.ready_set = new_pset(node_cmp_func, get_irn_n_edges(block));
+ be.dbg = firm_dbg_register("firm.be.sched");
+ be.block = block;
+ be.curr_time = 0;
+ be.ready_set = new_pset(node_cmp_func, get_irn_n_edges(block));
be.already_scheduled = new_pset(node_cmp_func, get_irn_n_edges(block));
-
- firm_dbg_set_mask(be.dbg, 0);
+ be.selector = selector;
if(selector->init_block)
- block_env = selector->init_block(env->selector_env, block);
+ be.selector_block_env = selector->init_block(env->selector_env, block);
DBG((be.dbg, LEVEL_1, "scheduling %+F\n", block));
phi_seen = 1;
}
+ else if(irn == start_node) {
+ add_to_sched(&be, irn);
+ add_tuple_projs(&be, irn);
+ }
+
+ else if(get_irn_opcode(irn) == iro_Jmp) {
+ final_jmp = irn;
+ }
+
/* Other nodes must have all operands in other blocks to be made
* ready */
else {
while(pset_count(be.ready_set) > 0) {
/* select a node to be scheduled and check if it was ready */
- irn = selector->select(env->selector_env, block_env, &info->list, be.curr_time, be.ready_set);
+ irn = select_node(&be);
DBG((be.dbg, LEVEL_3, "\tpicked node %+F\n", irn));
}
if(selector->finish_block)
- selector->finish_block(env->selector_env, block_env, block);
+ selector->finish_block(be.selector_block_env);
+
+ if(final_jmp)
+ add_to_sched(&be, final_jmp);
del_pset(be.ready_set);
del_pset(be.already_scheduled);