2 * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21 * @brief Use the strong normal form theorem (though it does not hold)
22 * @author Christoph Mallon
31 #include "besched_t.h"
32 #include "belistsched.h"
39 // XXX there is no one time init for schedulers
43 static const arch_env_t *cur_arch_env;
46 static ir_node *normal_select(void *block_env, ir_nodeset_t *ready_set,
47 ir_nodeset_t *live_set)
49 ir_nodeset_iterator_t iter;
58 ir_nodeset_iterator_init(&iter, ready_set);
59 irn = ir_nodeset_iterator_next(&iter);
60 block = get_nodes_block(irn);
61 sched = get_irn_link(block);
62 sched_count = ARR_LEN(sched);
63 for (; sched_count-- != 0; ++sched) {
64 ir_node* irn = *sched;
65 if (ir_nodeset_contains(ready_set, irn) &&
66 !arch_irn_class_is(cur_arch_env, irn, branch)) {
67 #if defined NORMAL_DBG
68 ir_fprintf(stderr, "scheduling %+F\n", irn);
78 typedef struct irn_cost_pair {
84 static int cost_cmp(const void* a, const void* b)
86 const irn_cost_pair* a1 = a;
87 const irn_cost_pair* b1 = b;
88 return b1->cost - a1->cost;
92 typedef struct flag_and_cost {
94 irn_cost_pair costs[];
98 static int count_result(const ir_node* irn)
100 const ir_mode* mode = get_irn_mode(irn);
104 !arch_irn_is(cur_arch_env, irn, ignore);
108 static int normal_tree_cost(ir_node* irn)
110 flag_and_cost* fc = get_irn_link(irn);
111 ir_node* block = get_nodes_block(irn);
112 int arity = get_irn_arity(irn);
121 irn_cost_pair* costs;
124 fc = malloc(sizeof(*fc) + sizeof(*fc->costs) * arity);
128 for (i = 0; i < arity; ++i) {
129 ir_node* pred = get_irn_n(irn, i);
132 if (is_Phi(irn) || get_irn_mode(pred) == mode_M || is_Block(pred)) {
134 } else if (get_nodes_block(pred) != block) {
137 flag_and_cost* pred_fc;
139 cost = normal_tree_cost(pred);
140 if (be_is_Barrier(pred)) cost = 1; // XXX hack: the barrier causes all users to have a reguse of #regs
141 pred_fc = get_irn_link(pred);
142 pred_fc->no_root = 1;
143 #if defined NORMAL_DBG
144 ir_fprintf(stderr, "%+F says that %+F is no root\n", irn, pred);
149 costs[i].cost = cost;
151 if (cost > cost_max) {
154 } else if (cost == cost_max) {
159 qsort(costs, arity, sizeof(*costs), cost_cmp);
160 set_irn_link(irn, fc);
162 irn_cost_pair* costs = fc->costs;
166 cost_max = costs[0].cost;
168 for (i = 0; i < arity; ++i) {
169 if (costs[i].cost < cost_max) break;
176 for (i = 0; i < arity; ++i) {
177 if (get_irn_mode(fc->costs[i].irn) == mode_M) continue;
178 if (arch_irn_is(cur_arch_env, fc->costs[i].irn, ignore)) continue;
179 cost = MAX(fc->costs[i].cost + n_op_res, cost);
182 n_res = count_result(irn);
183 cost = MAX(n_res, cost);
185 #if defined NORMAL_DBG
186 ir_fprintf(stderr, "reguse of %+F is %d\n", irn, cost);
193 static void normal_cost_walker(ir_node* irn, void* env)
197 #if defined NORMAL_DBG
198 ir_fprintf(stderr, "cost walking node %+F\n", irn);
200 if (is_Block(irn)) return;
201 normal_tree_cost(irn);
205 static void collect_roots(ir_node* irn, void* env)
211 if (is_Block(irn)) return;
213 fc = get_irn_link(irn);
215 #if defined NORMAL_DBG
216 ir_fprintf(stderr, "%+F is %sroot\n", irn, fc->no_root ? "no " : "");
220 ir_node* block = get_nodes_block(irn);
221 ir_node** roots = get_irn_link(block);
223 roots = NEW_ARR_F(ir_node*, 0);
225 ARR_APP1(ir_node*, roots, irn);
226 set_irn_link(block, roots);
231 static ir_node** sched_node(ir_node** sched, ir_node* irn)
233 ir_node* block = get_nodes_block(irn);
234 flag_and_cost* fc = get_irn_link(irn);
235 irn_cost_pair* irns = fc->costs;
236 int arity = get_irn_arity(irn);
239 if (irn_visited(irn)) return sched;
241 if (is_End(irn)) return sched;
244 for (i = 0; i < arity; ++i) {
245 ir_node* pred = irns[i].irn;
246 if (get_nodes_block(pred) != block) continue;
247 if (get_irn_mode(pred) == mode_M) continue;
248 sched = sched_node(sched, pred);
252 mark_irn_visited(irn);
253 ARR_APP1(ir_node*, sched, irn);
258 static void normal_sched_block(ir_node* block, void* env)
260 ir_node** roots = get_irn_link(block);
262 irn_cost_pair* root_costs;
268 #if defined NORMAL_DBG
269 ir_fprintf(stderr, "sched walking block %+F\n", block);
273 #if defined NORMAL_DBG
274 fprintf(stderr, "has no roots\n");
279 root_count = ARR_LEN(roots);
280 NEW_ARR_A(irn_cost_pair, root_costs, root_count);
281 for (i = 0; i < root_count; ++i) {
282 root_costs[i].irn = roots[i];
283 root_costs[i].cost = normal_tree_cost(roots[i]);
285 qsort(root_costs, root_count, sizeof(*root_costs), cost_cmp);
287 sched = NEW_ARR_F(ir_node*, 0);
288 for (i = 0; i < root_count; ++i) {
289 ir_node* irn = root_costs[i].irn;
290 sched = sched_node(sched, irn);
292 set_irn_link(block, sched);
295 #if defined NORMAL_DBG
297 int n = ARR_LEN(sched);
300 ir_fprintf(stderr, "Scheduling of %+F:\n", block);
301 for (i = 0; i < n; ++i) {
302 ir_fprintf(stderr, " %+F\n", sched[i]);
304 fprintf(stderr, "\n");
310 static void *normal_init_graph(const list_sched_selector_t *vtab,
311 const be_irg_t *birg)
313 ir_graph* irg = be_get_birg_irg(birg);
317 cur_arch_env = be_get_birg_arch_env(birg);
321 irg_walk_graph(irg, normal_cost_walker, NULL, NULL);
322 irg_walk_graph(irg, collect_roots, NULL, NULL);
323 inc_irg_visited(irg);
324 irg_block_walk_graph(irg, normal_sched_block, NULL, NULL);
330 static void *normal_init_block(void *graph_env, ir_node *block)
339 const list_sched_selector_t normal_selector = {
343 NULL, /* to_appear_in_schedule */
344 NULL, /* node_ready */
345 NULL, /* node_selected */
348 NULL, /* finish_block */
349 NULL /* finish_graph */