added context parameter for create_intrinsic_fkt
[libfirm] / ir / be / mips / mips_scheduler.c
1 /* Mips implementation of list scheduler selector */
2 /* $Id$ */
3
4 #ifdef HAVE_CONFIG_H
5 #include "config.h"
6 #endif
7
8 #ifdef HAVE_STDLIB_H
9 #include <stdlib.h>
10 #endif
11
12 #include "mips_scheduler.h"
13
14 #include "../besched_t.h"
15 #include "../be.h"
16 #include "../beabi.h"
17 #include "iredges.h"
18 #include "ircons.h"
19 #include "gen_mips_regalloc_if.h"
20
21 #include "mips_new_nodes.h"
22
23 list_sched_selector_t mips_sched_selector;
24
25 typedef struct {
26         const arch_env_t* arch_env;
27         pset *div_set;
28         /**
29          * This array holds an entry for each register that specifies how much cycles
30          * have to pass before we can access that register again
31          * (because mips will write the register value back in the WB phase of the pipeline)
32          */
33         int busy_registers[N_mips_gp_REGS];
34         /// current block
35         ir_node* block;
36         ir_node* last_nop;
37 } mips_sched_env_t;
38
39 static void *mips_scheduler_init_graph(const list_sched_selector_t *vtab, const arch_env_t *arch_env, ir_graph *irg)
40 {
41         mips_sched_env_t *sched_env = xmalloc(sizeof(sched_env[0]));
42         memset(sched_env, 0, sizeof(sched_env[0]));
43
44         sched_env->arch_env = arch_env;
45         sched_env->div_set = new_pset(pset_default_ptr_cmp, 4);
46
47         return sched_env;
48 }
49
50 static void mips_scheduler_finish_graph(void* graph_env)
51 {
52         mips_sched_env_t *sched_env = (mips_sched_env_t*) graph_env;
53         del_pset(sched_env->div_set);
54 }
55
56 static void *mips_scheduler_init_block(void *graph_env, ir_node *block)
57 {
58         mips_sched_env_t *sched_env = (mips_sched_env_t*) graph_env;
59         assert(pset_count(sched_env->div_set) == 0);
60         srand(12234);
61         // TODO later we might have blocks that don't end in a jump
62         memset(&sched_env->busy_registers, 0, sizeof(sched_env->busy_registers));
63         sched_env->block = block;
64         sched_env->last_nop = NULL;
65         return sched_env;
66 }
67
68 static void mips_scheduler_finish_block(void* graph_env)
69 {
70         mips_sched_env_t *sched_env = (mips_sched_env_t*) graph_env;
71         // attach last nop to end node (so that firm doesn't discard it)
72         if(sched_env->last_nop != NULL) {
73                 ir_node* end = get_irg_end(get_irn_irg(sched_env->block));
74         }
75         sched_env->block = NULL;
76 }
77
78 static int mips_scheduler_to_appear_in_schedule(void *block_env, const ir_node *irn)
79 {
80         return is_mips_irn(irn) && !is_mips_zero(irn) && !is_mips_reinterpret_conv(irn) && !is_mips_fallthrough(irn);
81 }
82
83 static void mips_collect_mflohis(pset* set, ir_node* node) {
84         // construct a list of nodes that need to be scheduled before
85         // we are allowed to schedule another div or mul instruction
86         const ir_edge_t *edge, *edge2;
87
88         if(is_mips_div(node)) {
89                 foreach_out_edge(node, edge) {
90                         const ir_node* node2 = get_edge_src_irn(edge);
91
92                         assert(is_Proj(node2));
93                         foreach_out_edge(node2, edge2) {
94                                 const ir_node* node3 = get_edge_src_irn(edge2);
95                                 if(is_mips_mfhi(node3) || is_mips_mflo(node3))
96                                         pset_insert_ptr(set, node3);
97                         }
98                 }
99         } else if(is_mips_mult(node)) {
100                 foreach_out_edge(node, edge) {
101                         const ir_node* node2 = get_edge_src_irn(edge);
102
103                         if(is_mips_mfhi(node2) || is_mips_mflo(node2))
104                                 pset_insert_ptr(set, node2);
105                 }
106         }
107 }
108
109 static int mips_scheduler_node_allowed(mips_sched_env_t *sched_env, ir_node* node)
110 {
111         if(pset_count(sched_env->div_set) != 0 && (is_mips_div(node) || is_mips_mult(node))) {
112                 return 0;
113         }
114
115         return 1;
116 }
117
118 static ir_node *mips_scheduler_select(void *block_env, nodeset *ready_set)
119 {
120         mips_sched_env_t *sched_env = (mips_sched_env_t*) block_env;
121         const arch_env_t *arch_env = (const arch_env_t*) sched_env->arch_env;
122         ir_node *node = NULL;
123         ir_node *block = sched_env->block;
124         ir_node *condjmp = NULL;
125         ir_graph *irg = get_irn_irg(block);
126         int have_non_branch_nodes = 0;
127
128         // test all nodes in the ready set and take the first non-branch that
129         // is allowed
130         for (node = nodeset_first(ready_set); node != NULL; node = nodeset_next(ready_set)) {
131                 if (arch_irn_classify(arch_env, node) == arch_irn_class_branch) {
132                         if (is_irn_forking(node))
133                                 condjmp = node;
134                         continue;
135                 }
136
137                 have_non_branch_nodes = 1;
138
139                 if (mips_scheduler_node_allowed(sched_env, node))
140                 {
141                         nodeset_break(ready_set);
142
143                         // TODO update busy_registers
144
145                         if (is_mips_div(node) || is_mips_mult(node)) {
146                                 mips_collect_mflohis(sched_env->div_set, node);
147                         } else if(is_mips_mflo(node) || is_mips_mfhi(node)) {
148                                 pset_remove_ptr(sched_env->div_set, node);
149                         }
150
151                         return node;
152                 }
153         }
154
155         // if we arrive here no non-branch node was found that we can emit
156
157         // return a branch if there are just branches left
158         if(!have_non_branch_nodes) {
159                 // schedule conditional branches before non-conditional ones
160                 if(condjmp != NULL) {
161                         return condjmp;
162                 }
163                 node = nodeset_first(ready_set);
164                 assert(arch_irn_classify(arch_env, node) == arch_irn_class_branch);
165                 nodeset_break(ready_set);
166                 return node;
167         }
168
169         // emit a nop
170         node = new_rd_mips_nop(NULL, irg, block, mode_M);
171         keep_alive(node);
172         return node;
173 }
174
175 /**
176  * Returns the reg_pressure scheduler with to_appear_in_schedule() overloaded
177  */
178 const list_sched_selector_t *mips_get_list_sched_selector(const void *self)
179 {
180         memset(&mips_sched_selector, 0, sizeof(mips_sched_selector));
181         mips_sched_selector.init_graph = mips_scheduler_init_graph;
182         mips_sched_selector.init_block = mips_scheduler_init_block;
183         mips_sched_selector.select = mips_scheduler_select;
184         mips_sched_selector.to_appear_in_schedule = mips_scheduler_to_appear_in_schedule;
185         mips_sched_selector.finish_block = mips_scheduler_finish_block;
186         mips_sched_selector.finish_graph = mips_scheduler_finish_graph;
187         return &mips_sched_selector;
188 }