rewritten be_ssa_constr which isn't using sets anymore, started working on a 'state...
[libfirm] / ir / be / mips / mips_scheduler.c
1 /* Mips implementation of list scheduler selector */
2 /* $Id$ */
3 #ifdef HAVE_CONFIG_H
4 #include <config.h>
5 #endif
6
7 #ifdef HAVE_STDLIB_H
8 #include <stdlib.h>
9 #endif
10
11 #include "mips_scheduler.h"
12
13 #include "../besched_t.h"
14 #include "../be.h"
15 #include "../beabi.h"
16 #include "iredges.h"
17 #include "ircons.h"
18 #include "gen_mips_regalloc_if.h"
19
20 #include "mips_new_nodes.h"
21
22 list_sched_selector_t mips_sched_selector;
23
24 typedef struct {
25         const arch_env_t* arch_env;
26         pset *div_set;
27         /**
28          * This array holds an entry for each register that specifies how much cycles
29          * have to pass before we can access that register again
30          * (because mips will write the register value back in the WB phase of the pipeline)
31          */
32         int busy_registers[N_mips_gp_REGS];
33         /// current block
34         ir_node* block;
35         ir_node* last_nop;
36 } mips_sched_env_t;
37
38 /* Matze: deprecated and totally broken */
39 #if 0
40
41 static void *mips_scheduler_init_graph(const list_sched_selector_t *vtab, const arch_env_t *arch_env, ir_graph *irg)
42 {
43         mips_sched_env_t *sched_env = xmalloc(sizeof(sched_env[0]));
44         memset(sched_env, 0, sizeof(sched_env[0]));
45
46         sched_env->arch_env = arch_env;
47         sched_env->div_set = new_pset(pset_default_ptr_cmp, 4);
48
49         return sched_env;
50 }
51
52 static void mips_scheduler_finish_graph(void* graph_env)
53 {
54         mips_sched_env_t *sched_env = (mips_sched_env_t*) graph_env;
55         del_pset(sched_env->div_set);
56 }
57
58 static void *mips_scheduler_init_block(void *graph_env, ir_node *block)
59 {
60         mips_sched_env_t *sched_env = (mips_sched_env_t*) graph_env;
61         assert(pset_count(sched_env->div_set) == 0);
62         srand(12234);
63         // TODO later we might have blocks that don't end in a jump
64         memset(&sched_env->busy_registers, 0, sizeof(sched_env->busy_registers));
65         sched_env->block = block;
66         sched_env->last_nop = NULL;
67         return sched_env;
68 }
69
70 static void mips_scheduler_finish_block(void* graph_env)
71 {
72         mips_sched_env_t *sched_env = (mips_sched_env_t*) graph_env;
73         // attach last nop to end node (so that firm doesn't discard it)
74         if(sched_env->last_nop != NULL) {
75                 ir_node* end = get_irg_end(get_irn_irg(sched_env->block));
76                 (void) end;
77                 // TODO
78         }
79         sched_env->block = NULL;
80 }
81
82 static int mips_scheduler_to_appear_in_schedule(void *block_env, const ir_node *irn)
83 {
84         return is_mips_irn(irn) && !is_mips_zero(irn) && !is_mips_reinterpret_conv(irn) && !is_mips_fallthrough(irn);
85 }
86
87 static void mips_collect_mflohis(pset* set, ir_node* node) {
88         // construct a list of nodes that need to be scheduled before
89         // we are allowed to schedule another div or mul instruction
90         const ir_edge_t *edge, *edge2;
91
92         if(is_mips_div(node)) {
93                 foreach_out_edge(node, edge) {
94                         const ir_node* node2 = get_edge_src_irn(edge);
95
96                         assert(is_Proj(node2));
97                         foreach_out_edge(node2, edge2) {
98                                 const ir_node* node3 = get_edge_src_irn(edge2);
99                                 if(is_mips_mfhi(node3) || is_mips_mflo(node3))
100                                         pset_insert_ptr(set, node3);
101                         }
102                 }
103         } else if(is_mips_mult(node)) {
104                 foreach_out_edge(node, edge) {
105                         const ir_node* node2 = get_edge_src_irn(edge);
106
107                         if(is_mips_mfhi(node2) || is_mips_mflo(node2))
108                                 pset_insert_ptr(set, node2);
109                 }
110         }
111 }
112
113 static int mips_scheduler_node_allowed(mips_sched_env_t *sched_env, ir_node* node)
114 {
115         if(pset_count(sched_env->div_set) != 0 && (is_mips_div(node) || is_mips_mult(node))) {
116                 return 0;
117         }
118
119         return 1;
120 }
121
122 static ir_node *mips_scheduler_select(void *block_env, nodeset *ready_set, nodeset *live_set)
123 {
124         mips_sched_env_t *sched_env = (mips_sched_env_t*) block_env;
125         const arch_env_t *arch_env = (const arch_env_t*) sched_env->arch_env;
126         ir_node *node = NULL;
127         ir_node *block = sched_env->block;
128         ir_node *condjmp = NULL;
129         ir_graph *irg = get_irn_irg(block);
130         int have_non_branch_nodes = 0;
131
132         // test all nodes in the ready set and take the first non-branch that
133         // is allowed
134         for (node = nodeset_first(ready_set); node != NULL; node = nodeset_next(ready_set)) {
135                 if (arch_irn_class_is(arch_env, node, branch)) {
136                         if (is_irn_forking(node))
137                                 condjmp = node;
138                         continue;
139                 }
140
141                 have_non_branch_nodes = 1;
142
143                 if (mips_scheduler_node_allowed(sched_env, node))
144                 {
145                         nodeset_break(ready_set);
146
147                         // TODO update busy_registers
148
149                         if (is_mips_div(node) || is_mips_mult(node)) {
150                                 mips_collect_mflohis(sched_env->div_set, node);
151                         } else if(is_mips_mflo(node) || is_mips_mfhi(node)) {
152                                 pset_remove_ptr(sched_env->div_set, node);
153                         }
154
155                         return node;
156                 }
157         }
158
159         // if we arrive here no non-branch node was found that we can emit
160
161         // return a branch if there are just branches left
162         if(!have_non_branch_nodes) {
163                 // schedule conditional branches before non-conditional ones
164                 if(condjmp != NULL) {
165                         return condjmp;
166                 }
167                 node = nodeset_first(ready_set);
168                 assert(arch_irn_class_is(arch_env, node, branch));
169                 nodeset_break(ready_set);
170                 return node;
171         }
172
173         // emit a nop
174         node = new_rd_mips_nop(NULL, irg, block, mode_M);
175         keep_alive(node);
176         return node;
177 }
178
179 #endif
180
181 /**
182  * Returns the reg_pressure scheduler with to_appear_in_schedule() overloaded
183  */
184 const list_sched_selector_t *mips_get_list_sched_selector(const void *self, list_sched_selector_t *selector)
185 {
186 #if 0
187         memset(&mips_sched_selector, 0, sizeof(mips_sched_selector));
188         mips_sched_selector.init_graph = mips_scheduler_init_graph;
189         mips_sched_selector.init_block = mips_scheduler_init_block;
190         mips_sched_selector.select = mips_scheduler_select;
191         mips_sched_selector.to_appear_in_schedule = mips_scheduler_to_appear_in_schedule;
192         mips_sched_selector.finish_block = mips_scheduler_finish_block;
193         mips_sched_selector.finish_graph = mips_scheduler_finish_graph;
194         //return &mips_sched_selector;
195 #endif
196         return selector;
197 }
198
199 const ilp_sched_selector_t *mips_get_ilp_sched_selector(const void *self) {
200         return NULL;
201 }