2 * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21 * @brief Use the strong normal form theorem (though it does not hold)
22 * @author Christoph Mallon
31 #include "besched_t.h"
32 #include "belistsched.h"
40 // XXX there is no one time init for schedulers
44 static const arch_env_t *cur_arch_env;
47 static ir_node *normal_select(void *block_env, ir_nodeset_t *ready_set,
48 ir_nodeset_t *live_set)
50 ir_nodeset_iterator_t iter;
59 ir_nodeset_iterator_init(&iter, ready_set);
60 irn = ir_nodeset_iterator_next(&iter);
61 block = get_nodes_block(irn);
62 sched = get_irn_link(block);
63 sched_count = ARR_LEN(sched);
64 for (; sched_count-- != 0; ++sched) {
65 ir_node* irn = *sched;
66 if (ir_nodeset_contains(ready_set, irn) &&
67 !arch_irn_class_is(cur_arch_env, irn, branch)) {
68 #if defined NORMAL_DBG
69 ir_fprintf(stderr, "scheduling %+F\n", irn);
79 typedef struct irn_cost_pair {
85 static int cost_cmp(const void* a, const void* b)
87 const irn_cost_pair* a1 = a;
88 const irn_cost_pair* b1 = b;
89 return b1->cost - a1->cost;
93 typedef struct flag_and_cost {
95 irn_cost_pair costs[];
99 static int count_result(const ir_node* irn)
101 const ir_mode* mode = get_irn_mode(irn);
105 !arch_irn_is(cur_arch_env, irn, ignore);
109 /* TODO high cost for store trees
113 static int normal_tree_cost(ir_node* irn)
115 flag_and_cost* fc = get_irn_link(irn);
116 ir_node* block = get_nodes_block(irn);
117 int arity = get_irn_arity(irn);
126 irn_cost_pair* costs;
129 fc = malloc(sizeof(*fc) + sizeof(*fc->costs) * arity);
133 for (i = 0; i < arity; ++i) {
134 ir_node* pred = get_irn_n(irn, i);
137 if (is_Phi(irn) || get_irn_mode(pred) == mode_M || is_Block(pred)) {
139 } else if (get_nodes_block(pred) != block) {
142 flag_and_cost* pred_fc;
144 cost = normal_tree_cost(pred);
145 if (be_is_Barrier(pred)) cost = 1; // XXX hack: the barrier causes all users to have a reguse of #regs
146 pred_fc = get_irn_link(pred);
147 pred_fc->no_root = 1;
148 #if defined NORMAL_DBG
149 ir_fprintf(stderr, "%+F says that %+F is no root\n", irn, pred);
154 costs[i].cost = cost;
156 if (cost > cost_max) {
159 } else if (cost == cost_max) {
164 qsort(costs, arity, sizeof(*costs), cost_cmp);
165 set_irn_link(irn, fc);
167 irn_cost_pair* costs = fc->costs;
171 cost_max = costs[0].cost;
173 for (i = 0; i < arity; ++i) {
174 if (costs[i].cost < cost_max) break;
181 for (i = 0; i < arity; ++i) {
182 if (get_irn_mode(fc->costs[i].irn) == mode_M) continue;
183 if (arch_irn_is(cur_arch_env, fc->costs[i].irn, ignore)) continue;
184 cost = MAX(fc->costs[i].cost + n_op_res, cost);
187 n_res = count_result(irn);
188 cost = MAX(n_res, cost);
190 #if defined NORMAL_DBG
191 ir_fprintf(stderr, "reguse of %+F is %d\n", irn, cost);
198 static void normal_cost_walker(ir_node* irn, void* env)
202 #if defined NORMAL_DBG
203 ir_fprintf(stderr, "cost walking node %+F\n", irn);
205 if (is_Block(irn)) return;
206 normal_tree_cost(irn);
210 static void collect_roots(ir_node* irn, void* env)
216 if (is_Block(irn)) return;
218 fc = get_irn_link(irn);
220 #if defined NORMAL_DBG
221 ir_fprintf(stderr, "%+F is %sroot\n", irn, fc->no_root ? "no " : "");
225 ir_node* block = get_nodes_block(irn);
226 ir_node** roots = get_irn_link(block);
228 roots = NEW_ARR_F(ir_node*, 0);
230 ARR_APP1(ir_node*, roots, irn);
231 set_irn_link(block, roots);
236 static ir_node** sched_node(ir_node** sched, ir_node* irn)
238 ir_node* block = get_nodes_block(irn);
239 flag_and_cost* fc = get_irn_link(irn);
240 irn_cost_pair* irns = fc->costs;
241 int arity = get_irn_arity(irn);
244 if (irn_visited(irn)) return sched;
246 if (is_End(irn)) return sched;
249 for (i = 0; i < arity; ++i) {
250 ir_node* pred = irns[i].irn;
251 if (get_nodes_block(pred) != block) continue;
252 if (get_irn_mode(pred) == mode_M) continue;
253 sched = sched_node(sched, pred);
257 mark_irn_visited(irn);
258 ARR_APP1(ir_node*, sched, irn);
263 static void normal_sched_block(ir_node* block, void* env)
265 ir_node** roots = get_irn_link(block);
267 irn_cost_pair* root_costs;
273 #if defined NORMAL_DBG
274 ir_fprintf(stderr, "sched walking block %+F\n", block);
278 #if defined NORMAL_DBG
279 fprintf(stderr, "has no roots\n");
284 root_count = ARR_LEN(roots);
285 NEW_ARR_A(irn_cost_pair, root_costs, root_count);
286 for (i = 0; i < root_count; ++i) {
287 root_costs[i].irn = roots[i];
288 root_costs[i].cost = normal_tree_cost(roots[i]);
290 qsort(root_costs, root_count, sizeof(*root_costs), cost_cmp);
292 sched = NEW_ARR_F(ir_node*, 0);
293 for (i = 0; i < root_count; ++i) {
294 ir_node* irn = root_costs[i].irn;
295 sched = sched_node(sched, irn);
297 set_irn_link(block, sched);
300 #if defined NORMAL_DBG
302 int n = ARR_LEN(sched);
305 ir_fprintf(stderr, "Scheduling of %+F:\n", block);
306 for (i = 0; i < n; ++i) {
307 ir_fprintf(stderr, " %+F\n", sched[i]);
309 fprintf(stderr, "\n");
315 static void *normal_init_graph(const list_sched_selector_t *vtab,
316 const be_irg_t *birg)
318 ir_graph* irg = be_get_birg_irg(birg);
322 cur_arch_env = be_get_birg_arch_env(birg);
326 irg_walk_graph(irg, normal_cost_walker, NULL, NULL);
327 irg_walk_graph(irg, collect_roots, NULL, NULL);
328 inc_irg_visited(irg);
329 irg_block_walk_graph(irg, normal_sched_block, NULL, NULL);
335 static void *normal_init_block(void *graph_env, ir_node *block)
344 const list_sched_selector_t normal_selector = {
348 NULL, /* to_appear_in_schedule */
349 NULL, /* node_ready */
350 NULL, /* node_selected */
353 NULL, /* finish_block */
354 NULL /* finish_graph */