/**
* Scheduling algorithms.
* Just a simple list scheduling algorithm is here.
- * @date 20.10.2004
+ * @date 20.10.2004
* @author Sebastian Hack
+ * @cvs-id $Id$
*/
#ifdef HAVE_CONFIG_H
#include <limits.h>
#include "benode_t.h"
+#include "be_t.h"
#include "obst.h"
#include "list.h"
* All scheduling info needed per node.
*/
typedef struct _sched_irn_t {
- sched_timestep_t delay; /**< The delay for this node if already calculated, else 0. */
- sched_timestep_t etime; /**< The earliest time of this node. */
- unsigned already_sched : 1; /**< Set if this node is already scheduled */
- unsigned is_root : 1; /**< is a root node of a block */
+ sched_timestep_t delay; /**< The delay for this node if already calculated, else 0. */
+ sched_timestep_t etime; /**< The earliest time of this node. */
+ unsigned num_user; /**< The number real users (mode datab) of this node */
+ unsigned num_not_sched_user; /**< The number of not yet scheduled users of this node */
+ int reg_diff; /**< The difference of num(out registers) - num(in registers) */
+ int preorder; /**< The pre-order position */
+ unsigned critical_path_len; /**< The weighted length of the longest critical path */
+ unsigned already_sched : 1; /**< Set if this node is already scheduled */
+ unsigned is_root : 1; /**< is a root node of a block */
} sched_irn_t;
/**
const arch_env_t *arch_env; /**< The architecture environment. */
const ir_graph *irg; /**< The graph to schedule. */
void *selector_env; /**< A pointer to give to the selector. */
+ int sel_strategy; /**< Node selection strategy (muchnik, mueller, isa) */
} sched_env_t;
#if 0
{
const arch_env_t *arch_env = block_env;
ir_node *irn = NULL;
- int const_last = 0;
/* assure that branches and constants are executed last */
for (irn = nodeset_first(ready_set); irn; irn = nodeset_next(ready_set)) {
- arch_irn_class_t irn_class = arch_irn_classify(arch_env, irn);
-
- if (irn_class != arch_irn_class_branch && (const_last ? (irn_class != arch_irn_class_const) : 1)) {
+ if (! arch_irn_class_is(arch_env, irn, branch)) {
nodeset_break(ready_set);
return irn;
}
}
- /* assure that constants are executed before branches */
- if (const_last) {
- for (irn = nodeset_first(ready_set); irn; irn = nodeset_next(ready_set)) {
- if (arch_irn_classify(arch_env, irn) != arch_irn_class_branch) {
- nodeset_break(ready_set);
- return irn;
- }
- }
- }
-
/* at last: schedule branches */
irn = nodeset_first(ready_set);
int i, n;
for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
- ir_node *op = get_irn_n(irn, i);
+ //ir_node *op = get_irn_n(irn, i);
if(must_appear_in_schedule(env->main_env->vtab, env, irn)) {
usage_stats_t *us = get_or_set_usage_stats(env, irn);
+#if 0 /* Liveness is not computed here! */
if(is_live_end(bl, op))
us->uses_in_block = 99999;
else
+#endif
us->uses_in_block++;
}
}
Ignore branch instructions for the time being.
They should only be scheduled if there is nothing else.
*/
- if (arch_irn_classify(env->main_env->arch_env, irn) != arch_irn_class_branch) {
+ if (! arch_irn_class_is(env->main_env->arch_env, irn, branch)) {
int costs = reg_pr_costs(env, irn);
if (costs <= curr_cost) {
res = irn;
nodeset *cands; /**< the set of candidates */
ir_node *block; /**< the current block */
sched_env_t *sched_env; /**< the scheduler environment */
+ nodeset *live; /**< simple liveness during scheduling */
const list_sched_selector_t *selector;
void *selector_block_env;
DEBUG_ONLY(firm_dbg_module_t *dbg;)
/**
* Get the current delay.
*/
-static sched_timestep_t get_irn_delay(block_sched_env_t *env, ir_node *n) {
+static INLINE sched_timestep_t get_irn_delay(block_sched_env_t *env, ir_node *n) {
int idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
/**
* Set the current delay.
*/
-static void set_irn_delay(block_sched_env_t *env, ir_node *n, sched_timestep_t delay) {
+static INLINE void set_irn_delay(block_sched_env_t *env, ir_node *n, sched_timestep_t delay) {
int idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
/**
* Get the current etime.
*/
-static sched_timestep_t get_irn_etime(block_sched_env_t *env, ir_node *n) {
+static INLINE sched_timestep_t get_irn_etime(block_sched_env_t *env, ir_node *n) {
int idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
/**
* Set the current etime.
*/
-static void set_irn_etime(block_sched_env_t *env, ir_node *n, sched_timestep_t etime) {
+static INLINE void set_irn_etime(block_sched_env_t *env, ir_node *n, sched_timestep_t etime) {
int idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
env->sched_info[idx].etime = etime;
}
+/**
+ * Get the number of users.
+ */
+static INLINE unsigned get_irn_num_user(block_sched_env_t *env, ir_node *n) {
+ int idx = get_irn_idx(n);
+
+ assert(idx < ARR_LEN(env->sched_info));
+ return env->sched_info[idx].num_user;
+}
+
+/**
+ * Set the number of users.
+ */
+static INLINE void set_irn_num_user(block_sched_env_t *env, ir_node *n, unsigned num_user) {
+ int idx = get_irn_idx(n);
+
+ assert(idx < ARR_LEN(env->sched_info));
+ env->sched_info[idx].num_user = num_user;
+}
+
+/**
+ * Get the register difference.
+ */
+static INLINE int get_irn_reg_diff(block_sched_env_t *env, ir_node *n) {
+ int idx = get_irn_idx(n);
+
+ assert(idx < ARR_LEN(env->sched_info));
+ return env->sched_info[idx].reg_diff;
+}
+
+/**
+ * Set the register difference.
+ */
+static INLINE void set_irn_reg_diff(block_sched_env_t *env, ir_node *n, int reg_diff) {
+ int idx = get_irn_idx(n);
+
+ assert(idx < ARR_LEN(env->sched_info));
+ env->sched_info[idx].reg_diff = reg_diff;
+}
+
+/**
+ * Get the pre-order position.
+ */
+static INLINE int get_irn_preorder(block_sched_env_t *env, ir_node *n) {
+ int idx = get_irn_idx(n);
+
+ assert(idx < ARR_LEN(env->sched_info));
+ return env->sched_info[idx].preorder;
+}
+
+/**
+ * Set the pre-order position.
+ */
+static INLINE void set_irn_preorder(block_sched_env_t *env, ir_node *n, int pos) {
+ int idx = get_irn_idx(n);
+
+ assert(idx < ARR_LEN(env->sched_info));
+ env->sched_info[idx].preorder = pos;
+}
+
+/**
+ * Get the pre-order position.
+ */
+static INLINE unsigned get_irn_critical_path_len(block_sched_env_t *env, ir_node *n) {
+ int idx = get_irn_idx(n);
+
+ assert(idx < ARR_LEN(env->sched_info));
+ return env->sched_info[idx].critical_path_len;
+}
+
+/**
+ * Set the pre-order position.
+ */
+static INLINE void set_irn_critical_path_len(block_sched_env_t *env, ir_node *n, unsigned len) {
+ int idx = get_irn_idx(n);
+
+ assert(idx < ARR_LEN(env->sched_info));
+ env->sched_info[idx].critical_path_len = len;
+}
+
/**
* returns the exec-time for node n.
*/
*/
static sched_timestep_t latency(sched_env_t *env, ir_node *pred, int pred_cycle, ir_node *curr, int curr_cycle) {
/* a Keep hides a root */
- if (be_is_Keep(curr))
+ if (be_is_Keep(curr))
return exectime(env, pred);
/* Proj's are executed immediately */
if (is_Proj(curr))
- return 0;
+ return 0;
/* predecessors Proj's must be skipped */
- if (is_Proj(pred))
- pred = get_Proj_pred(pred);
+ if (is_Proj(pred))
+ pred = get_Proj_pred(pred);
if (env->selector->latency)
return env->selector->latency(env->selector_env, pred, pred_cycle, curr, curr_cycle);
*/
static INLINE int make_ready(block_sched_env_t *env, ir_node *pred, ir_node *irn)
{
- int i, n;
- sched_timestep_t etime_p, etime;
+ int i, n;
+ sched_timestep_t etime_p, etime;
/* Blocks cannot be scheduled. */
if (is_Block(irn))
nodeset_insert(env->cands, irn);
- /* calculate the etime of this node */
- etime = env->curr_time;
- if (pred) {
- etime_p = get_irn_etime(env, pred);
- etime += latency(env->sched_env, pred, 1, irn, 0);
+ /* calculate the etime of this node */
+ etime = env->curr_time;
+ if (pred) {
+ etime_p = get_irn_etime(env, pred);
+ etime += latency(env->sched_env, pred, 1, irn, 0);
- etime = etime_p > etime ? etime_p : etime;
- }
+ etime = etime_p > etime ? etime_p : etime;
+ }
- set_irn_etime(env, irn, etime);
+ set_irn_etime(env, irn, etime);
DB((env->dbg, LEVEL_2, "\tmaking ready: %+F etime %u\n", irn, etime));
}
/**
- * Compare to nodes using pointer equality.
- * @param p1 Node one.
- * @param p2 Node two.
- * @return 0 if they are identical.
+ * Returns the number of not yet schedules users.
*/
-static int node_cmp_func(const void *p1, const void *p2)
-{
- return p1 != p2;
+static INLINE int get_irn_not_sched_user(block_sched_env_t *env, ir_node *n) {
+ int idx = get_irn_idx(n);
+
+ assert(idx < ARR_LEN(env->sched_info));
+ return env->sched_info[idx].num_not_sched_user;
+}
+
+/**
+ * Sets the number of not yet schedules users.
+ */
+static INLINE void set_irn_not_sched_user(block_sched_env_t *env, ir_node *n, int num) {
+ int idx = get_irn_idx(n);
+
+ assert(idx < ARR_LEN(env->sched_info));
+ env->sched_info[idx].num_not_sched_user = num;
+}
+
+/**
+ * Add @p num to the number of not yet schedules users and returns the result.
+ */
+static INLINE int add_irn_not_sched_user(block_sched_env_t *env, ir_node *n, int num) {
+ int idx = get_irn_idx(n);
+
+ assert(idx < ARR_LEN(env->sched_info));
+ env->sched_info[idx].num_not_sched_user += num;
+ return env->sched_info[idx].num_not_sched_user;
+}
+
+/**
+ * Returns the number of users of a node having mode datab.
+ */
+static int get_num_successors(ir_node *irn) {
+ int sum = 0;
+ const ir_edge_t *edge;
+
+ if (get_irn_mode(irn) == mode_T) {
+ /* for mode_T nodes: count the users of all Projs */
+ foreach_out_edge(irn, edge) {
+ ir_node *proj = get_edge_src_irn(edge);
+ ir_mode *mode = get_irn_mode(proj);
+
+ if (mode == mode_T)
+ sum += get_num_successors(proj);
+ else if (mode_is_datab(mode))
+ sum += get_irn_n_edges(proj);
+ }
+ }
+ else {
+ /* do not count keep-alive edges */
+ foreach_out_edge(irn, edge) {
+ if (get_irn_opcode(get_edge_src_irn(edge)) != iro_End)
+ sum++;
+ }
+ }
+
+ return sum;
+}
+
+/**
+ * Adds irn to @p live, updates all inputs that this user is scheduled
+ * and counts all of it's non scheduled users.
+ */
+static void update_sched_liveness(block_sched_env_t *env, ir_node *irn) {
+ int i;
+
+ /* ignore Projs */
+ if (is_Proj(irn))
+ return;
+
+ for (i = get_irn_arity(irn) - 1; i >= 0; i--) {
+ ir_node *in = get_irn_n(irn, i);
+
+ /* if in is a proj: update predecessor */
+ while (is_Proj(in))
+ in = get_Proj_pred(in);
+
+ /* if in is still in the live set: reduce number of users by one */
+ if (nodeset_find(env->live, in)) {
+ if (add_irn_not_sched_user(env, in, -1) <= 0)
+ nodeset_remove(env->live, in);
+ }
+ }
+
+ /*
+ get_num_successors returns the number of all users. This includes
+ users in different blocks as well. As the each block is scheduled separately
+ the liveness info of those users will not be updated and so these
+ users will keep up the register pressure as it is desired.
+ */
+ i = get_num_successors(irn);
+ if (i > 0) {
+ set_irn_not_sched_user(env, irn, i);
+ nodeset_insert(env->live, irn);
+ }
+}
+
+/**
+ * Returns the current register pressure for the current block
+ * while scheduling.
+ * This is the number of already scheduled nodes having not yet
+ * scheduled users.
+ */
+static INLINE int get_cur_reg_pressure(block_sched_env_t *env) {
+ /*
+ Nodes with all users scheduled are removed from live set,
+ so the number of nodes in this set represent the current
+ register pressure in this block.
+ */
+ return nodeset_count(env->live);
}
/**
sched_info_t *info = get_irn_sched_info(irn);
INIT_LIST_HEAD(&info->list);
info->scheduled = 1;
+ update_sched_liveness(env, irn);
sched_add_before(env->block, irn);
DBG((env->dbg, LEVEL_2, "\tadding %+F\n", irn));
}
}
+/**
+ * Returns the difference of regs_output - regs_input;
+ */
+static int get_reg_difference(block_sched_env_t *be, ir_node *irn) {
+ int num_out = 0;
+ int num_in = 0;
+ int i;
+
+ if (get_irn_mode(irn) == mode_T) {
+ /* mode_T nodes: num out regs == num Projs with mode datab */
+ const ir_edge_t *edge;
+ foreach_out_edge(irn, edge) {
+ ir_node *proj = get_edge_src_irn(edge);
+ if (mode_is_datab(get_irn_mode(proj)))
+ num_out++;
+ }
+ }
+ else
+ num_out = 1;
+
+ /* num in regs: number of ins with mode datab and not ignore */
+ for (i = get_irn_arity(irn) - 1; i >= 0; i--) {
+ ir_node *in = get_irn_n(irn, i);
+ if (mode_is_datab(get_irn_mode(in)) && ! arch_irn_is(be->sched_env->arch_env, in, ignore))
+ num_in++;
+ }
+
+ return num_out - num_in;
+}
+
/**
* Execute the heuristic function,
*/
static ir_node *select_node_heuristic(block_sched_env_t *be, nodeset *ns)
{
- ir_node *irn;
-
- for (irn = nodeset_first(ns); irn; irn = nodeset_next(ns)) {
+ ir_node *irn, *cand = NULL;
+
+/* prefer instructions which can be scheduled early */
+#define PRIO_TIME 16
+/* prefer instructions with lots of successors */
+#define PRIO_NUMSUCCS 8
+/* prefer instructions with long critical path */
+#define PRIO_LEVEL 12
+/* prefer instructions coming early in preorder */
+#define PRIO_PREORD 8
+/* weight of current register pressure */
+#define PRIO_CUR_PRESS 20
+/* weight of register pressure difference */
+#define PRIO_CHG_PRESS 8
+
+ /* schedule keeps as early as possible */
+ foreach_nodeset(ns, irn) {
if (be_is_Keep(irn)) {
nodeset_break(ns);
return irn;
}
}
- return be->selector->select(be->selector_block_env, ns);
+ if (be->sched_env->sel_strategy & BE_SCHED_SELECT_HEUR) {
+ int max_prio = INT_MIN;
+ int cur_prio = INT_MIN;
+ int cur_pressure = get_cur_reg_pressure(be);
+ int reg_fact, cand_reg_fact;
+
+ /* priority based selection, heuristic inspired by mueller diss */
+ foreach_nodeset(ns, irn) {
+ /* make sure that branches are scheduled last */
+ if (! arch_irn_class_is(be->sched_env->arch_env, irn, branch)) {
+ int rdiff = get_irn_reg_diff(be, irn);
+ int sign = rdiff < 0;
+ int chg = (rdiff < 0 ? -rdiff : rdiff) << PRIO_CHG_PRESS;
+
+ reg_fact = chg << cur_pressure;
+ if (reg_fact < chg)
+ reg_fact = INT_MAX - 2;
+ reg_fact = sign ? -reg_fact : reg_fact;
+
+ cur_prio = (get_irn_critical_path_len(be, irn) << PRIO_LEVEL)
+ //- (get_irn_delay(be, irn) << PRIO_LEVEL)
+ + (get_irn_num_user(be, irn) << PRIO_NUMSUCCS)
+ - (get_irn_etime(be, irn) << PRIO_TIME)
+// - ((get_irn_reg_diff(be, irn) >> PRIO_CHG_PRESS) << ((cur_pressure >> PRIO_CUR_PRESS) - 3))
+ - reg_fact
+ + (get_irn_preorder(be, irn) << PRIO_PREORD); /* high preorder means early schedule */
+ if (cur_prio > max_prio) {
+ cand = irn;
+ max_prio = cur_prio;
+ cand_reg_fact = reg_fact;
+ }
+
+ DBG((be->dbg, LEVEL_4, "checked NODE %+F\n", irn));
+ DBG((be->dbg, LEVEL_4, "\tpriority: %d\n", cur_prio));
+ DBG((be->dbg, LEVEL_4, "\tpath len: %d (%d)\n", get_irn_critical_path_len(be, irn), get_irn_critical_path_len(be, irn) << PRIO_LEVEL));
+ DBG((be->dbg, LEVEL_4, "\tdelay: %d (%d)\n", get_irn_delay(be, irn), get_irn_delay(be, irn) << PRIO_LEVEL));
+ DBG((be->dbg, LEVEL_4, "\t#user: %d (%d)\n", get_irn_num_user(be, irn), get_irn_num_user(be, irn) << PRIO_NUMSUCCS));
+ DBG((be->dbg, LEVEL_4, "\tetime: %d (%d)\n", get_irn_etime(be, irn), -(get_irn_etime(be, irn) << PRIO_TIME)));
+ DBG((be->dbg, LEVEL_4, "\tpreorder: %d (%d)\n", get_irn_preorder(be, irn), get_irn_preorder(be, irn) << PRIO_PREORD));
+ DBG((be->dbg, LEVEL_4, "\treg diff: %d (%d)\n", get_irn_reg_diff(be, irn), -cand_reg_fact));
+ DBG((be->dbg, LEVEL_4, "\tpressure: %d\n", cur_pressure));
+ }
+ }
+
+ if (cand) {
+ DBG((be->dbg, LEVEL_4, "heuristic selected %+F:\n", cand));
+ }
+ else {
+ cand = nodeset_first(ns);
+ }
+ }
+ else {
+ /* use backend selector */
+ cand = be->selector->select(be->selector_block_env, ns);
+ }
+
+ return cand;
}
/**
static char _mark;
#define MARK &_mark
+static firm_dbg_module_t *xxxdbg;
+
/**
* descent into a dag and create a pre-order list.
*/
-static void descent(ir_node *root, ir_node *block, ir_node **list) {
+static void descent(ir_node *root, ir_node *block, ir_node **list, block_sched_env_t *env, unsigned path_len) {
int i;
if (! is_Phi(root)) {
+ path_len += exectime(env->sched_env, root);
+ if (get_irn_critical_path_len(env, root) < path_len) {
+ set_irn_critical_path_len(env, root, path_len);
+ }
+
/* Phi nodes always leave the block */
for (i = get_irn_arity(root) - 1; i >= 0; --i) {
ir_node *pred = get_irn_n(root, i);
+ DBG((xxxdbg, LEVEL_3, " node %+F\n", pred));
/* Blocks may happen as predecessors of End nodes */
if (is_Block(pred))
continue;
if (get_nodes_block(pred) != block)
continue;
- descent(pred, block, list);
+ set_irn_link(pred, NULL);
+
+ descent(pred, block, list, env, path_len);
}
}
set_irn_link(root, *list);
block_sched_env_t be;
const ir_edge_t *edge;
ir_node *irn;
- int j, m;
+ int j, m, cur_pos;
ir_node *root = NULL, *preord = NULL;
ir_node *curr;
be.block = block;
be.curr_time = 0;
be.cands = new_nodeset(get_irn_n_edges(block));
+ be.live = new_nodeset(get_irn_n_edges(block));
be.selector = selector;
be.sched_env = env;
FIRM_DBG_REGISTER(be.dbg, "firm.be.sched");
+ FIRM_DBG_REGISTER(xxxdbg, "firm.be.sched");
-// firm_dbg_set_mask(be.dbg, SET_LEVEL_3);
+ // firm_dbg_set_mask(be.dbg, SET_LEVEL_3);
if (selector->init_block)
be.selector_block_env = selector->init_block(env->selector_env, block);
preord = NULL;
for (curr = root; curr; curr = irn) {
irn = get_irn_link(curr);
- descent(curr, block, &preord);
+ DBG((be.dbg, LEVEL_2, " DAG root %+F\n", curr));
+ descent(curr, block, &preord, &be, 0);
}
root = preord;
/* Third step: calculate the Delay. Note that our
- * list is now in pre-order, starting at root
- */
- for (curr = root; curr; curr = get_irn_link(curr)) {
+ * list is now in pre-order, starting at root
+ */
+ for (cur_pos = 0, curr = root; curr; curr = get_irn_link(curr), cur_pos++) {
sched_timestep_t d;
- if (arch_irn_classify(env->arch_env, curr) == arch_irn_class_branch) {
+ if (arch_irn_class_is(env->arch_env, curr, branch)) {
/* assure, that branches can be executed last */
d = 0;
}
/* set the etime of all nodes to 0 */
set_irn_etime(&be, curr, 0);
+
+ set_irn_preorder(&be, curr, cur_pos);
}
if (is_Phi(irn)) {
/* Phi functions are scheduled immediately, since they only transfer
- * data flow from the predecessors to this block. */
+ * data flow from the predecessors to this block. */
/* Increase the time step. */
be.curr_time += get_irn_etime(&be, irn);
}
else {
/* Other nodes must have all operands in other blocks to be made
- * ready */
+ * ready */
int ready = 1;
/* Check, if the operands of a node are not local to this block */
ready = 0;
break;
}
+ else {
+ /* live in values increase register pressure */
+ update_sched_liveness(&be, operand);
+ }
}
/* Make the node ready, if all operands live in a foreign block */
DBG((be.dbg, LEVEL_2, "\timmediately ready: %+F\n", irn));
make_ready(&be, NULL, irn);
}
+
+ /* calculate number of users (needed for heuristic) */
+ set_irn_num_user(&be, irn, get_num_successors(irn));
+
+ /* calculate register difference (needed for heuristic) */
+ set_irn_reg_diff(&be, irn, get_reg_difference(&be, irn));
}
}
while (nodeset_count(be.cands) > 0) {
nodeset *mcands; /**< the set of candidates with maximum delay time */
nodeset *ecands; /**< the set of nodes in mcands whose etime <= curr_time */
+ nodeset *local_cands;
sched_timestep_t max_delay = 0;
/* collect statistics about amount of ready nodes */
max_delay = d > max_delay ? d : max_delay;
}
- mcands = new_nodeset(8);
- ecands = new_nodeset(8);
+
+ if (be.sched_env->sel_strategy & BE_SCHED_SELECT_MUCHNIK) {
+ mcands = new_nodeset(8);
+ ecands = new_nodeset(8);
+ }
+ else {
+ local_cands = new_nodeset(8);
+ }
/* calculate mcands and ecands */
foreach_nodeset(be.cands, irn) {
- if (be_is_Keep(irn)) {
- nodeset_break(be.cands);
- break;
- }
- if (get_irn_delay(&be, irn) == max_delay) {
- nodeset_insert(mcands, irn);
- if (get_irn_etime(&be, irn) <= be.curr_time)
- nodeset_insert(ecands, irn);
+ if (be_is_Keep(irn)) {
+ nodeset_break(be.cands);
+ break;
+ }
+ if (be.sched_env->sel_strategy & BE_SCHED_SELECT_MUCHNIK) {
+ if (get_irn_delay(&be, irn) == max_delay) {
+ nodeset_insert(mcands, irn);
+ if (get_irn_etime(&be, irn) <= be.curr_time)
+ nodeset_insert(ecands, irn);
+ }
+ }
+ else {
+ nodeset_insert(local_cands, irn);
}
}
- if (irn) {
- /* Keeps must be immediately scheduled */
- }
- else {
- DB((be.dbg, LEVEL_2, "\tbe.curr_time = %u\n", be.curr_time));
-
- /* select a node to be scheduled and check if it was ready */
- if (nodeset_count(mcands) == 1) {
- DB((be.dbg, LEVEL_3, "\tmcand = 1, max_delay = %u\n", max_delay));
- irn = nodeset_first(mcands);
- }
- else {
- int cnt = nodeset_count(ecands);
- if (cnt == 1) {
- arch_irn_class_t irn_class;
-
- irn = nodeset_first(ecands);
- irn_class = arch_irn_classify(env->arch_env, irn);
-
- if (irn_class == arch_irn_class_branch) {
+ if (irn) {
+ /* Keeps must be immediately scheduled */
+ }
+ else {
+ DB((be.dbg, LEVEL_2, "\tbe.curr_time = %u\n", be.curr_time));
+
+ /* select a node to be scheduled and check if it was ready */
+ if (! (be.sched_env->sel_strategy & BE_SCHED_SELECT_MUCHNIK)) {
+ DB((be.dbg, LEVEL_3, "\tlocal_cands = %d\n", nodeset_count(local_cands)));
+ irn = select_node_heuristic(&be, local_cands);
+ }
+ else if (nodeset_count(mcands) == 1) {
+ irn = nodeset_first(mcands);
+ DB((be.dbg, LEVEL_3, "\tirn = %+F, mcand = 1, max_delay = %u\n", irn, max_delay));
+ }
+ else {
+ int cnt = nodeset_count(ecands);
+ if (cnt == 1) {
+ irn = nodeset_first(ecands);
+
+ if (arch_irn_class_is(env->arch_env, irn, branch)) {
/* BEWARE: don't select a JUMP if others are still possible */
goto force_mcands;
}
- DB((be.dbg, LEVEL_3, "\tecand = 1, max_delay = %u\n", max_delay));
- }
- else if (cnt > 1) {
- DB((be.dbg, LEVEL_3, "\tecand = %d, max_delay = %u\n", cnt, max_delay));
- irn = select_node_heuristic(&be, ecands);
- }
- else {
+ DB((be.dbg, LEVEL_3, "\tirn = %+F, ecand = 1, max_delay = %u\n", irn, max_delay));
+ }
+ else if (cnt > 1) {
+ DB((be.dbg, LEVEL_3, "\tecand = %d, max_delay = %u\n", cnt, max_delay));
+ irn = select_node_heuristic(&be, ecands);
+ }
+ else {
force_mcands:
- DB((be.dbg, LEVEL_3, "\tmcand = %d\n", nodeset_count(mcands)));
- irn = select_node_heuristic(&be, mcands);
- }
- }
- }
- del_nodeset(mcands);
- del_nodeset(ecands);
+ DB((be.dbg, LEVEL_3, "\tmcand = %d\n", nodeset_count(mcands)));
+ irn = select_node_heuristic(&be, mcands);
+ }
+ }
+ }
+
+ if (be.sched_env->sel_strategy & BE_SCHED_SELECT_MUCHNIK) {
+ del_nodeset(mcands);
+ del_nodeset(ecands);
+ }
+ else {
+ del_nodeset(local_cands);
+ }
DB((be.dbg, LEVEL_2, "\tpicked node %+F\n", irn));
selector->finish_block(be.selector_block_env);
del_nodeset(be.cands);
+ del_nodeset(be.live);
}
static const list_sched_selector_t reg_pressure_selector_struct = {
const list_sched_selector_t *reg_pressure_selector = ®_pressure_selector_struct;
/* List schedule a graph. */
-void list_sched(const be_irg_t *birg, int enable_mris)
+void list_sched(const be_irg_t *birg, be_options_t *be_opts)
{
const arch_env_t *arch_env = birg->main_env->arch_env;
ir_graph *irg = birg->irg;
/* Assure, that the out edges are computed */
edges_assure(irg);
- if(enable_mris)
+ if(be_opts->mris)
mris = be_sched_mris_preprocess(birg);
num_nodes = get_irg_last_idx(irg);
memset(&env, 0, sizeof(env));
- env.selector = arch_env->isa->impl->get_list_sched_selector(arch_env->isa);
- env.arch_env = arch_env;
- env.irg = irg;
- env.sched_info = NEW_ARR_F(sched_irn_t, num_nodes);
+ env.selector = arch_env->isa->impl->get_list_sched_selector(arch_env->isa);
+ env.arch_env = arch_env;
+ env.irg = irg;
+ env.sel_strategy = be_opts->sched_select;
+ env.sched_info = NEW_ARR_F(sched_irn_t, num_nodes);
memset(env.sched_info, 0, num_nodes * sizeof(*env.sched_info));
if (env.selector->finish_graph)
env.selector->finish_graph(env.selector_env);
- if(enable_mris)
+ if(be_opts->mris)
be_sched_mris_free(mris);
DEL_ARR_F(env.sched_info);