2 * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21 * @brief Use the strong normal form theorem (though it does not hold)
22 * @author Christoph Mallon
31 #include "besched_t.h"
32 #include "belistsched.h"
40 // XXX there is no one time init for schedulers
44 static const arch_env_t *cur_arch_env;
47 static ir_node *normal_select(void *block_env, ir_nodeset_t *ready_set,
48 ir_nodeset_t *live_set)
50 ir_nodeset_iterator_t iter;
59 ir_nodeset_iterator_init(&iter, ready_set);
60 irn = ir_nodeset_iterator_next(&iter);
61 block = get_nodes_block(irn);
62 sched = get_irn_link(block);
63 sched_count = ARR_LEN(sched);
64 for (; sched_count-- != 0; ++sched) {
65 ir_node* irn = *sched;
66 if (ir_nodeset_contains(ready_set, irn) &&
67 !arch_irn_class_is(cur_arch_env, irn, branch)) {
68 #if defined NORMAL_DBG
69 ir_fprintf(stderr, "scheduling %+F\n", irn);
79 typedef struct irn_cost_pair {
85 static int cost_cmp(const void* a, const void* b)
87 const irn_cost_pair* a1 = a;
88 const irn_cost_pair* b1 = b;
89 return b1->cost - a1->cost;
93 typedef struct flag_and_cost {
95 irn_cost_pair costs[];
99 static int count_result(const ir_node* irn)
101 const ir_mode* mode = get_irn_mode(irn);
105 !arch_irn_is(cur_arch_env, irn, ignore);
109 static int normal_tree_cost(ir_node* irn)
111 flag_and_cost* fc = get_irn_link(irn);
112 ir_node* block = get_nodes_block(irn);
113 int arity = get_irn_arity(irn);
122 irn_cost_pair* costs;
125 fc = malloc(sizeof(*fc) + sizeof(*fc->costs) * arity);
129 for (i = 0; i < arity; ++i) {
130 ir_node* pred = get_irn_n(irn, i);
133 if (is_Phi(irn) || get_irn_mode(pred) == mode_M || is_Block(pred)) {
135 } else if (get_nodes_block(pred) != block) {
138 flag_and_cost* pred_fc;
140 cost = normal_tree_cost(pred);
141 if (be_is_Barrier(pred)) cost = 1; // XXX hack: the barrier causes all users to have a reguse of #regs
142 pred_fc = get_irn_link(pred);
143 pred_fc->no_root = 1;
144 #if defined NORMAL_DBG
145 ir_fprintf(stderr, "%+F says that %+F is no root\n", irn, pred);
150 costs[i].cost = cost;
152 if (cost > cost_max) {
155 } else if (cost == cost_max) {
160 qsort(costs, arity, sizeof(*costs), cost_cmp);
161 set_irn_link(irn, fc);
163 irn_cost_pair* costs = fc->costs;
167 cost_max = costs[0].cost;
169 for (i = 0; i < arity; ++i) {
170 if (costs[i].cost < cost_max) break;
177 for (i = 0; i < arity; ++i) {
178 if (get_irn_mode(fc->costs[i].irn) == mode_M) continue;
179 if (arch_irn_is(cur_arch_env, fc->costs[i].irn, ignore)) continue;
180 cost = MAX(fc->costs[i].cost + n_op_res, cost);
183 n_res = count_result(irn);
184 cost = MAX(n_res, cost);
186 #if defined NORMAL_DBG
187 ir_fprintf(stderr, "reguse of %+F is %d\n", irn, cost);
194 static void normal_cost_walker(ir_node* irn, void* env)
198 #if defined NORMAL_DBG
199 ir_fprintf(stderr, "cost walking node %+F\n", irn);
201 if (is_Block(irn)) return;
202 normal_tree_cost(irn);
206 static void collect_roots(ir_node* irn, void* env)
212 if (is_Block(irn)) return;
214 fc = get_irn_link(irn);
216 #if defined NORMAL_DBG
217 ir_fprintf(stderr, "%+F is %sroot\n", irn, fc->no_root ? "no " : "");
221 ir_node* block = get_nodes_block(irn);
222 ir_node** roots = get_irn_link(block);
224 roots = NEW_ARR_F(ir_node*, 0);
226 ARR_APP1(ir_node*, roots, irn);
227 set_irn_link(block, roots);
232 static ir_node** sched_node(ir_node** sched, ir_node* irn)
234 ir_node* block = get_nodes_block(irn);
235 flag_and_cost* fc = get_irn_link(irn);
236 irn_cost_pair* irns = fc->costs;
237 int arity = get_irn_arity(irn);
240 if (irn_visited(irn)) return sched;
242 if (is_End(irn)) return sched;
245 for (i = 0; i < arity; ++i) {
246 ir_node* pred = irns[i].irn;
247 if (get_nodes_block(pred) != block) continue;
248 if (get_irn_mode(pred) == mode_M) continue;
249 sched = sched_node(sched, pred);
253 mark_irn_visited(irn);
254 ARR_APP1(ir_node*, sched, irn);
259 static void normal_sched_block(ir_node* block, void* env)
261 ir_node** roots = get_irn_link(block);
263 irn_cost_pair* root_costs;
269 #if defined NORMAL_DBG
270 ir_fprintf(stderr, "sched walking block %+F\n", block);
274 #if defined NORMAL_DBG
275 fprintf(stderr, "has no roots\n");
280 root_count = ARR_LEN(roots);
281 NEW_ARR_A(irn_cost_pair, root_costs, root_count);
282 for (i = 0; i < root_count; ++i) {
283 root_costs[i].irn = roots[i];
284 root_costs[i].cost = normal_tree_cost(roots[i]);
286 qsort(root_costs, root_count, sizeof(*root_costs), cost_cmp);
288 sched = NEW_ARR_F(ir_node*, 0);
289 for (i = 0; i < root_count; ++i) {
290 ir_node* irn = root_costs[i].irn;
291 sched = sched_node(sched, irn);
293 set_irn_link(block, sched);
296 #if defined NORMAL_DBG
298 int n = ARR_LEN(sched);
301 ir_fprintf(stderr, "Scheduling of %+F:\n", block);
302 for (i = 0; i < n; ++i) {
303 ir_fprintf(stderr, " %+F\n", sched[i]);
305 fprintf(stderr, "\n");
311 static void *normal_init_graph(const list_sched_selector_t *vtab,
312 const be_irg_t *birg)
314 ir_graph* irg = be_get_birg_irg(birg);
318 cur_arch_env = be_get_birg_arch_env(birg);
322 irg_walk_graph(irg, normal_cost_walker, NULL, NULL);
323 irg_walk_graph(irg, collect_roots, NULL, NULL);
324 inc_irg_visited(irg);
325 irg_block_walk_graph(irg, normal_sched_block, NULL, NULL);
331 static void *normal_init_block(void *graph_env, ir_node *block)
340 const list_sched_selector_t normal_selector = {
344 NULL, /* to_appear_in_schedule */
345 NULL, /* node_ready */
346 NULL, /* node_selected */
349 NULL, /* finish_block */
350 NULL /* finish_graph */