1 /* Mips implementation of list scheduler selector */
12 #include "mips_scheduler.h"
14 #include "../besched_t.h"
19 #include "gen_mips_regalloc_if.h"
21 #include "mips_new_nodes.h"
23 list_sched_selector_t mips_sched_selector;
26 const arch_env_t* arch_env;
29 * This array holds an entry for each register that specifies how much cycles
30 * have to pass before we can access that register again
31 * (because mips will write the register value back in the WB phase of the pipeline)
33 int busy_registers[N_mips_general_purpose_REGS];
39 static void *mips_scheduler_init_graph(const list_sched_selector_t *vtab, const arch_env_t *arch_env, ir_graph *irg)
41 mips_sched_env_t *sched_env = xmalloc(sizeof(sched_env[0]));
42 memset(sched_env, 0, sizeof(sched_env[0]));
44 sched_env->arch_env = arch_env;
45 sched_env->div_set = new_pset(pset_default_ptr_cmp, 4);
50 static void mips_scheduler_finish_graph(void* graph_env)
52 mips_sched_env_t *sched_env = (mips_sched_env_t*) graph_env;
53 del_pset(sched_env->div_set);
56 static void *mips_scheduler_init_block(void *graph_env, ir_node *block)
58 mips_sched_env_t *sched_env = (mips_sched_env_t*) graph_env;
59 assert(pset_count(sched_env->div_set) == 0);
61 // TODO later we might have blocks that don't end in a jump
62 memset(&sched_env->busy_registers, 0, sizeof(sched_env->busy_registers));
63 sched_env->block = block;
64 sched_env->last_nop = NULL;
68 static void mips_scheduler_finish_block(void* graph_env)
70 mips_sched_env_t *sched_env = (mips_sched_env_t*) graph_env;
71 // attach last nop to end node (so that firm doesn't discard it)
72 if(sched_env->last_nop != NULL) {
73 ir_node* end = get_irg_end(get_irn_irg(sched_env->block));
75 sched_env->block = NULL;
78 static int mips_scheduler_to_appear_in_schedule(void *block_env, const ir_node *irn)
80 return is_mips_irn(irn) && !is_mips_zero(irn) && !is_mips_reinterpret_conv(irn) && !is_mips_fallthrough(irn);
83 static void mips_collect_mflohis(pset* set, ir_node* node) {
84 // construct a list of nodes that need to be scheduled before
85 // we are allowed to schedule another div or mul instruction
86 const ir_edge_t *edge, *edge2;
88 if(is_mips_div(node)) {
89 foreach_out_edge(node, edge) {
90 const ir_node* node2 = get_edge_src_irn(edge);
92 assert(is_Proj(node2));
93 foreach_out_edge(node2, edge2) {
94 const ir_node* node3 = get_edge_src_irn(edge2);
95 if(is_mips_mfhi(node3) || is_mips_mflo(node3))
96 pset_insert_ptr(set, node3);
99 } else if(is_mips_mult(node)) {
100 foreach_out_edge(node, edge) {
101 const ir_node* node2 = get_edge_src_irn(edge);
103 if(is_mips_mfhi(node2) || is_mips_mflo(node2))
104 pset_insert_ptr(set, node2);
109 static int mips_scheduler_node_allowed(mips_sched_env_t *sched_env, ir_node* node)
111 if(pset_count(sched_env->div_set) != 0 && (is_mips_div(node) || is_mips_mult(node))) {
118 static ir_node *mips_scheduler_select(void *block_env, pset *ready_set)
120 mips_sched_env_t *sched_env = (mips_sched_env_t*) block_env;
121 const arch_env_t *arch_env = (const arch_env_t*) sched_env->arch_env;
122 ir_node *node = NULL;
123 ir_node *block = sched_env->block;
124 ir_node *condjmp = NULL;
125 ir_graph *irg = get_irn_irg(block);
126 int have_non_branch_nodes = 0;
128 // test all nodes in the ready set and take the first non-branch that
130 for(node = pset_first(ready_set); node != NULL; node = pset_next(ready_set)) {
131 if(arch_irn_classify(arch_env, node) == arch_irn_class_branch) {
132 if(is_irn_forking(node))
137 have_non_branch_nodes = 1;
139 if(mips_scheduler_node_allowed(sched_env, node))
141 pset_break(ready_set);
143 // TODO update busy_registers
145 if(is_mips_div(node) || is_mips_mult(node)) {
146 mips_collect_mflohis(sched_env->div_set, node);
147 } else if(is_mips_mflo(node) || is_mips_mfhi(node)) {
148 pset_remove_ptr(sched_env->div_set, node);
155 // if we arrive here no non-branch node was found that we can emit
157 // return a branch if there are just branches left
158 if(!have_non_branch_nodes) {
159 // schedule conditional branches before non-conditional ones
160 if(condjmp != NULL) {
163 node = pset_first(ready_set);
164 assert(arch_irn_classify(arch_env, node) == arch_irn_class_branch);
165 pset_break(ready_set);
170 node = new_rd_mips_nop(NULL, irg, block, mode_M);
176 * Returns the reg_pressure scheduler with to_appear_in_schedule() overloaded
178 const list_sched_selector_t *mips_get_list_sched_selector(const void *self)
180 memset(&mips_sched_selector, 0, sizeof(mips_sched_selector));
181 mips_sched_selector.init_graph = mips_scheduler_init_graph;
182 mips_sched_selector.init_block = mips_scheduler_init_block;
183 mips_sched_selector.select = mips_scheduler_select;
184 mips_sched_selector.to_appear_in_schedule = mips_scheduler_to_appear_in_schedule;
185 mips_sched_selector.finish_block = mips_scheduler_finish_block;
186 mips_sched_selector.finish_graph = mips_scheduler_finish_graph;
187 return &mips_sched_selector;