* @brief Implements a trace scheduler as presented in Muchnik[TM].
* @author Michael Beck
* @date 28.08.2006
- * @version $Id$
*/
#include "config.h"
#include <stdlib.h>
#include "iredges_t.h"
-
+#include "beirg.h"
#include "besched.h"
#include "belistsched.h"
#include "benode.h"
#include "belive.h"
+#include "bemodule.h"
/* we need a special mark */
static char _mark;
typedef struct trace_env {
trace_irn_t *sched_info; /**< trace scheduling information about the nodes */
sched_timestep_t curr_time; /**< current time of the scheduler */
- void *selector_env; /**< the backend selector environment */
- const list_sched_selector_t *selector; /**< the actual backend selector */
be_lv_t *liveness; /**< The liveness for the irg */
DEBUG_ONLY(firm_dbg_module_t *dbg;)
} trace_env_t;
*/
static ir_node *get_nodeset_node(const ir_nodeset_t *nodeset)
{
- ir_nodeset_iterator_t iter;
-
- ir_nodeset_iterator_init(&iter, nodeset);
- return ir_nodeset_iterator_next(&iter);
+ return ir_nodeset_first(nodeset);
}
/**
*/
static inline unsigned is_root_node(trace_env_t *env, ir_node *n)
{
- int idx = get_irn_idx(n);
+ unsigned const idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
return env->sched_info[idx].is_root;
*/
static inline void mark_root_node(trace_env_t *env, ir_node *n)
{
- int idx = get_irn_idx(n);
+ unsigned const idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
env->sched_info[idx].is_root = 1;
*/
static inline sched_timestep_t get_irn_delay(trace_env_t *env, ir_node *n)
{
- int idx = get_irn_idx(n);
+ unsigned const idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
return env->sched_info[idx].delay;
*/
static inline void set_irn_delay(trace_env_t *env, ir_node *n, sched_timestep_t delay)
{
- int idx = get_irn_idx(n);
+ unsigned const idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
env->sched_info[idx].delay = delay;
*/
static inline sched_timestep_t get_irn_etime(trace_env_t *env, ir_node *n)
{
- int idx = get_irn_idx(n);
+ unsigned const idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
return env->sched_info[idx].etime;
*/
static inline void set_irn_etime(trace_env_t *env, ir_node *n, sched_timestep_t etime)
{
- int idx = get_irn_idx(n);
+ unsigned const idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
env->sched_info[idx].etime = etime;
*/
static inline unsigned get_irn_num_user(trace_env_t *env, ir_node *n)
{
- int idx = get_irn_idx(n);
+ unsigned const idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
return env->sched_info[idx].num_user;
*/
static inline void set_irn_num_user(trace_env_t *env, ir_node *n, unsigned num_user)
{
- int idx = get_irn_idx(n);
+ unsigned const idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
env->sched_info[idx].num_user = num_user;
*/
static inline int get_irn_reg_diff(trace_env_t *env, ir_node *n)
{
- int idx = get_irn_idx(n);
+ unsigned const idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
return env->sched_info[idx].reg_diff;
*/
static inline void set_irn_reg_diff(trace_env_t *env, ir_node *n, int reg_diff)
{
- int idx = get_irn_idx(n);
+ unsigned const idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
env->sched_info[idx].reg_diff = reg_diff;
*/
static inline int get_irn_preorder(trace_env_t *env, ir_node *n)
{
- int idx = get_irn_idx(n);
+ unsigned const idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
return env->sched_info[idx].preorder;
*/
static inline void set_irn_preorder(trace_env_t *env, ir_node *n, int pos)
{
- int idx = get_irn_idx(n);
+ unsigned const idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
env->sched_info[idx].preorder = pos;
*/
static inline unsigned get_irn_critical_path_len(trace_env_t *env, ir_node *n)
{
- int idx = get_irn_idx(n);
+ unsigned const idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
return env->sched_info[idx].critical_path_len;
*/
static inline void set_irn_critical_path_len(trace_env_t *env, ir_node *n, unsigned len)
{
- int idx = get_irn_idx(n);
+ unsigned const idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
env->sched_info[idx].critical_path_len = len;
*/
static sched_timestep_t exectime(trace_env_t *env, ir_node *n)
{
+ (void) env;
if (be_is_Keep(n) || is_Proj(n))
return 0;
+#if 0
if (env->selector->exectime)
return env->selector->exectime(env->selector_env, n);
+#endif
return 1;
}
*/
static sched_timestep_t latency(trace_env_t *env, ir_node *pred, int pred_cycle, ir_node *curr, int curr_cycle)
{
+ (void) pred_cycle;
+ (void) curr_cycle;
/* a Keep hides a root */
if (be_is_Keep(curr))
return exectime(env, pred);
if (is_Proj(curr))
return 0;
+#if 0
/* predecessors Proj's must be skipped */
if (is_Proj(pred))
pred = get_Proj_pred(pred);
if (env->selector->latency)
return env->selector->latency(env->selector_env, pred, pred_cycle, curr, curr_cycle);
+#endif
+
return 1;
}
static int get_num_successors(ir_node *irn)
{
int sum = 0;
- const ir_edge_t *edge;
if (get_irn_mode(irn) == mode_T) {
/* for mode_T nodes: count the users of all Projs */
if (get_irn_mode(irn) == mode_T) {
/* mode_T nodes: num out regs == num Projs with mode datab */
- const ir_edge_t *edge;
foreach_out_edge(irn, edge) {
ir_node *proj = get_edge_src_irn(edge);
if (mode_is_datab(get_irn_mode(proj)))
*/
static int is_root(ir_node *root, ir_node *block)
{
- const ir_edge_t *edge;
-
foreach_out_edge(root, edge) {
ir_node *succ = get_edge_src_irn(edge);
ir_node *root = NULL, *preord = NULL;
ir_node *curr, *irn;
int cur_pos;
- const ir_edge_t *edge;
/* First step: Find the root set. */
foreach_out_edge(block, edge) {
env->curr_time = 0;
env->sched_info = NEW_ARR_F(trace_irn_t, nn);
- env->liveness = be_liveness(irg);
+ env->liveness = be_get_irg_liveness(irg);
FIRM_DBG_REGISTER(env->dbg, "firm.be.sched.trace");
- be_liveness_assure_chk(env->liveness);
+ be_assure_live_chk(irg);
memset(env->sched_info, 0, nn * sizeof(*(env->sched_info)));
return env;
static void trace_free(void *data)
{
trace_env_t *env = (trace_env_t*)data;
- be_liveness_free(env->liveness);
DEL_ARR_F(env->sched_info);
free(env);
}
*/
static ir_node *basic_selection(ir_nodeset_t *ready_set)
{
- ir_node *irn = NULL;
- ir_nodeset_iterator_t iter;
-
/* assure that branches and constants are executed last */
foreach_ir_nodeset(ready_set, irn, iter) {
if (!is_cfop(irn)) {
}
/* at last: schedule branches */
- irn = get_nodeset_node(ready_set);
-
- return irn;
+ return get_nodeset_node(ready_set);
}
/**
* The muchnik selector.
*/
-static ir_node *muchnik_select(void *block_env, ir_nodeset_t *ready_set, ir_nodeset_t *live_set)
+static ir_node *muchnik_select(void *block_env, ir_nodeset_t *ready_set)
{
trace_env_t *env = (trace_env_t*)block_env;
ir_nodeset_t mcands, ecands;
- ir_nodeset_iterator_t iter;
sched_timestep_t max_delay = 0;
- ir_node *irn;
- (void) live_set;
/* calculate the max delay of all candidates */
foreach_ir_nodeset(ready_set, irn, iter) {
}
/* select a node */
+ ir_node *irn;
if (ir_nodeset_size(&mcands) == 1) {
irn = get_nodeset_node(&mcands);
DB((env->dbg, LEVEL_3, "\tirn = %+F, mcand = 1, max_delay = %u\n", irn, max_delay));
return irn;
}
-static void *muchnik_init_graph(const list_sched_selector_t *vtab, ir_graph *irg)
+static void *muchnik_init_graph(ir_graph *irg)
{
trace_env_t *env = trace_init(irg);
- env->selector = vtab;
- env->selector_env = (void*) be_get_irg_arch_env(irg);
return (void *)env;
}
return graph_env;
}
-const list_sched_selector_t muchnik_selector = {
- muchnik_init_graph,
- muchnik_init_block,
- muchnik_select,
- trace_node_ready, /* node_ready */
- trace_update_time, /* node_selected */
- NULL, /* exectime */
- NULL, /* latency */
- NULL, /* finish_block */
- trace_free /* finish_graph */
-};
+static void sched_muchnik(ir_graph *irg)
+{
+ static const list_sched_selector_t muchnik_selector = {
+ muchnik_init_graph,
+ muchnik_init_block,
+ muchnik_select,
+ trace_node_ready, /* node_ready */
+ trace_update_time, /* node_selected */
+ NULL, /* finish_block */
+ trace_free /* finish_graph */
+ };
+ be_list_sched_graph(irg, &muchnik_selector);
+}
/**
* Execute the heuristic function.
*/
-static ir_node *heuristic_select(void *block_env, ir_nodeset_t *ns, ir_nodeset_t *lv)
+static ir_node *heuristic_select(void *block_env, ir_nodeset_t *ns)
{
trace_env_t *trace_env = (trace_env_t*)block_env;
- ir_node *irn, *cand = NULL;
+ ir_node *cand = NULL;
int max_prio = INT_MIN;
int cur_prio = INT_MIN;
- int cur_pressure = ir_nodeset_size(lv);
- int reg_fact, cand_reg_fact;
- ir_nodeset_iterator_t iter;
+ int reg_fact;
+ /* Note: register pressure calculation needs an overhaul, you need correct
+ * tracking for each register class indidually and weight by each class
+ int cur_pressure = ir_nodeset_size(lv); */
+ int cur_pressure = 1;
/* prefer instructions which can be scheduled early */
#define PRIO_TIME 3
int sign = rdiff < 0;
int chg = (rdiff < 0 ? -rdiff : rdiff) << PRIO_CHG_PRESS;
- /* reg_fact = chg << cur_pressure; */
reg_fact = chg * cur_pressure;
if (reg_fact < chg)
reg_fact = INT_MAX - 2;
if (cur_prio > max_prio) {
cand = irn;
max_prio = cur_prio;
- cand_reg_fact = reg_fact;
}
DBG((trace_env->dbg, LEVEL_4, "checked NODE %+F\n", irn));
return cand;
}
-const list_sched_selector_t heuristic_selector = {
- muchnik_init_graph,
- muchnik_init_block,
- heuristic_select,
- trace_node_ready, /* node_ready */
- trace_update_time, /* node_selected */
- NULL, /* exectime */
- NULL, /* latency */
- NULL, /* finish_block */
- trace_free /* finish_graph */
-};
+static void sched_heuristic(ir_graph *irg)
+{
+ static const list_sched_selector_t heuristic_selector = {
+ muchnik_init_graph,
+ muchnik_init_block,
+ heuristic_select,
+ trace_node_ready, /* node_ready */
+ trace_update_time, /* node_selected */
+ NULL, /* finish_block */
+ trace_free /* finish_graph */
+ };
+ be_list_sched_graph(irg, &heuristic_selector);
+}
+
+BE_REGISTER_MODULE_CONSTRUCTOR(be_init_sched_trace)
+void be_init_sched_trace(void)
+{
+ be_register_scheduler("heur", sched_heuristic);
+ be_register_scheduler("muchnik", sched_muchnik);
+}