2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Mips implementation of list scheduler selector
23 * @author Matthias Braun, Mehdi
28 #include "mips_scheduler.h"
30 #include "../besched.h"
33 #include "../belistsched.h"
36 #include "gen_mips_regalloc_if.h"
38 #include "mips_new_nodes.h"
40 list_sched_selector_t mips_sched_selector;
45 * This array holds an entry for each register that specifies how much cycles
46 * have to pass before we can access that register again
47 * (because mips will write the register value back in the WB phase of the pipeline)
49 int busy_registers[N_mips_gp_REGS];
55 /* Matze: deprecated and totally broken */
58 static void *mips_scheduler_init_graph(const list_sched_selector_t *vtab, ir_graph *irg)
60 mips_sched_env_t *sched_env = XMALLOCZ(mips_sched_env_t);
62 sched_env->div_set = new_pset(pset_default_ptr_cmp, 4);
67 static void mips_scheduler_finish_graph(void* graph_env)
69 mips_sched_env_t *sched_env = (mips_sched_env_t*) graph_env;
70 del_pset(sched_env->div_set);
73 static void *mips_scheduler_init_block(void *graph_env, ir_node *block)
75 mips_sched_env_t *sched_env = (mips_sched_env_t*) graph_env;
76 assert(pset_count(sched_env->div_set) == 0);
78 // TODO later we might have blocks that don't end in a jump
79 memset(&sched_env->busy_registers, 0, sizeof(sched_env->busy_registers));
80 sched_env->block = block;
81 sched_env->last_nop = NULL;
85 static void mips_scheduler_finish_block(void* graph_env)
87 mips_sched_env_t *sched_env = (mips_sched_env_t*) graph_env;
88 // attach last nop to end node (so that firm doesn't discard it)
89 if(sched_env->last_nop != NULL) {
90 ir_node* end = get_irg_end(get_irn_irg(sched_env->block));
94 sched_env->block = NULL;
97 static int mips_scheduler_to_appear_in_schedule(void *block_env, const ir_node *irn)
99 return is_mips_irn(irn) && !is_mips_zero(irn) && !is_mips_reinterpret_conv(irn) && !is_mips_fallthrough(irn);
102 static void mips_collect_mflohis(pset* set, ir_node* node) {
103 // construct a list of nodes that need to be scheduled before
104 // we are allowed to schedule another div or mul instruction
105 const ir_edge_t *edge, *edge2;
107 if(is_mips_div(node)) {
108 foreach_out_edge(node, edge) {
109 const ir_node* node2 = get_edge_src_irn(edge);
111 assert(is_Proj(node2));
112 foreach_out_edge(node2, edge2) {
113 const ir_node* node3 = get_edge_src_irn(edge2);
114 if(is_mips_mfhi(node3) || is_mips_mflo(node3))
115 pset_insert_ptr(set, node3);
118 } else if(is_mips_mult(node)) {
119 foreach_out_edge(node, edge) {
120 const ir_node* node2 = get_edge_src_irn(edge);
122 if(is_mips_mfhi(node2) || is_mips_mflo(node2))
123 pset_insert_ptr(set, node2);
128 static int mips_scheduler_node_allowed(mips_sched_env_t *sched_env, ir_node* node)
130 if(pset_count(sched_env->div_set) != 0 && (is_mips_div(node) || is_mips_mult(node))) {
137 static ir_node *mips_scheduler_select(void *block_env, nodeset *ready_set, nodeset *live_set)
139 mips_sched_env_t *sched_env = (mips_sched_env_t*) block_env;
140 ir_node *node = NULL;
141 ir_node *block = sched_env->block;
142 ir_node *condjmp = NULL;
143 ir_graph *irg = get_irn_irg(block);
144 int have_non_branch_nodes = 0;
146 // test all nodes in the ready set and take the first non-branch that
148 for (node = nodeset_first(ready_set); node != NULL; node = nodeset_next(ready_set)) {
149 if (arch_irn_class_is(node, branch)) {
150 if (is_irn_forking(node))
155 have_non_branch_nodes = 1;
157 if (mips_scheduler_node_allowed(sched_env, node))
159 nodeset_break(ready_set);
161 // TODO update busy_registers
163 if (is_mips_div(node) || is_mips_mult(node)) {
164 mips_collect_mflohis(sched_env->div_set, node);
165 } else if(is_mips_mflo(node) || is_mips_mfhi(node)) {
166 pset_remove_ptr(sched_env->div_set, node);
173 // if we arrive here no non-branch node was found that we can emit
175 // return a branch if there are just branches left
176 if(!have_non_branch_nodes) {
177 // schedule conditional branches before non-conditional ones
178 if(condjmp != NULL) {
181 node = nodeset_first(ready_set);
182 assert(arch_irn_class_is(node, branch));
183 nodeset_break(ready_set);
188 node = new_rd_mips_nop(NULL, irg, block, mode_M);
196 int mips_to_appear_in_schedule(void *block_env, const ir_node *node)
200 if(!is_mips_irn(node))
202 if(is_mips_zero(node) || is_mips_Immediate(node))
208 list_sched_selector_t mips_selector;
211 * Returns the reg_pressure scheduler with to_appear_in_schedule() overloaded
213 const list_sched_selector_t *mips_get_list_sched_selector(const void *self,
214 list_sched_selector_t *selector)
218 memset(&mips_sched_selector, 0, sizeof(mips_sched_selector));
219 mips_sched_selector.init_graph = mips_scheduler_init_graph;
220 mips_sched_selector.init_block = mips_scheduler_init_block;
221 mips_sched_selector.select = mips_scheduler_select;
222 mips_sched_selector.to_appear_in_schedule = mips_scheduler_to_appear_in_schedule;
223 mips_sched_selector.finish_block = mips_scheduler_finish_block;
224 mips_sched_selector.finish_graph = mips_scheduler_finish_graph;
225 //return &mips_sched_selector;
227 memcpy(&mips_selector, selector, sizeof(mips_selector));
228 mips_selector.to_appear_in_schedule = mips_to_appear_in_schedule;
230 return &mips_selector;
233 const ilp_sched_selector_t *mips_get_ilp_sched_selector(const void *self)