// attach last nop to end node (so that firm doesn't discard it)
if(sched_env->last_nop != NULL) {
ir_node* end = get_irg_end(get_irn_irg(sched_env->block));
+ (void) end;
+ // TODO
}
sched_env->block = NULL;
}
return 1;
}
-static ir_node *mips_scheduler_select(void *block_env, nodeset *ready_set)
+static ir_node *mips_scheduler_select(void *block_env, nodeset *ready_set, nodeset *live_set)
{
mips_sched_env_t *sched_env = (mips_sched_env_t*) block_env;
const arch_env_t *arch_env = (const arch_env_t*) sched_env->arch_env;
// test all nodes in the ready set and take the first non-branch that
// is allowed
for (node = nodeset_first(ready_set); node != NULL; node = nodeset_next(ready_set)) {
- if (arch_irn_classify(arch_env, node) == arch_irn_class_branch) {
+ if (arch_irn_class_is(arch_env, node, branch)) {
if (is_irn_forking(node))
condjmp = node;
continue;
return condjmp;
}
node = nodeset_first(ready_set);
- assert(arch_irn_classify(arch_env, node) == arch_irn_class_branch);
+ assert(arch_irn_class_is(arch_env, node, branch));
nodeset_break(ready_set);
return node;
}
/**
* Returns the reg_pressure scheduler with to_appear_in_schedule() overloaded
*/
-const list_sched_selector_t *mips_get_list_sched_selector(const void *self)
+const list_sched_selector_t *mips_get_list_sched_selector(const void *self, list_sched_selector_t *selector)
{
memset(&mips_sched_selector, 0, sizeof(mips_sched_selector));
mips_sched_selector.init_graph = mips_scheduler_init_graph;
mips_sched_selector.to_appear_in_schedule = mips_scheduler_to_appear_in_schedule;
mips_sched_selector.finish_block = mips_scheduler_finish_block;
mips_sched_selector.finish_graph = mips_scheduler_finish_graph;
- return &mips_sched_selector;
+ //return &mips_sched_selector;
+ return selector;
+}
+
+const ilp_sched_selector_t *mips_get_ilp_sched_selector(const void *self) {
+ return NULL;
}