2 * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21 * @brief Use the strong normal form theorem (though it does not hold)
22 * @author Christoph Mallon
23 * @version $Id: beschedrand.c 14604 2007-06-18 14:07:07Z matze $
31 #include "besched_t.h"
32 #include "belistsched.h"
39 // XXX there is no one time init for schedulers
43 static const arch_env_t *cur_arch_env;
46 static ir_node *normal_select(void *block_env, ir_nodeset_t *ready_set,
47 ir_nodeset_t *live_set)
49 ir_nodeset_iterator_t iter;
58 ir_nodeset_iterator_init(&iter, ready_set);
59 irn = ir_nodeset_iterator_next(&iter);
60 block = get_nodes_block(irn);
61 sched = get_irn_link(block);
62 sched_count = ARR_LEN(sched);
63 for (; sched_count-- != 0; ++sched) {
64 ir_node* irn = *sched;
65 if (ir_nodeset_contains(ready_set, irn) &&
66 !arch_irn_class_is(cur_arch_env, irn, branch)) {
67 #if defined NORMAL_DBG
68 ir_fprintf(stderr, "scheduling %+F\n", irn);
78 typedef struct irn_cost_pair {
84 static int cost_cmp(const void* a, const void* b)
86 const irn_cost_pair* a1 = a;
87 const irn_cost_pair* b1 = b;
88 return b1->cost - a1->cost;
92 typedef struct flag_and_cost {
94 irn_cost_pair costs[];
98 static int count_result(const ir_node* irn)
100 const ir_mode* mode = get_irn_mode(irn);
104 !arch_irn_is(cur_arch_env, irn, ignore);
108 static int normal_tree_cost(ir_node* irn)
110 flag_and_cost* fc = get_irn_link(irn);
111 ir_node* block = get_nodes_block(irn);
112 int arity = get_irn_arity(irn);
119 irn_cost_pair* costs;
122 fc = malloc(sizeof(*fc) + sizeof(*fc->costs) * arity);
126 for (i = 0; i < arity; ++i) {
127 ir_node* pred = get_irn_n(irn, i);
130 if (is_Phi(irn) || get_irn_mode(pred) == mode_M) {
132 } else if (get_nodes_block(pred) != block) {
135 flag_and_cost* pred_fc;
137 cost = normal_tree_cost(pred);
138 pred_fc = get_irn_link(pred);
139 pred_fc->no_root = 1;
140 #if defined NORMAL_DBG
141 ir_fprintf(stderr, "%+F says that %+F is no root\n", irn, pred);
146 costs[i].cost = cost;
148 if (cost > cost_max) {
151 } else if (cost == cost_max) {
156 qsort(costs, arity, sizeof(*costs), cost_cmp);
157 set_irn_link(irn, fc);
159 irn_cost_pair* costs = fc->costs;
163 cost_max = costs[0].cost;
165 for (i = 0; i < arity; ++i) {
166 if (costs[i].cost < cost_max) break;
172 n_res = count_result(irn);
176 cost = MAX(n_res, cost_max + count_max - 1);
179 #if defined NORMAL_DBG
180 ir_fprintf(stderr, "reguse of %+F is %d\n", irn, cost);
187 static void normal_tree_sched(ir_node* irn)
189 irn_cost_pair* irns = get_irn_link(irn);
190 int arity = get_irn_arity(irn);
193 if (irns == NULL) return;
195 for (i = 0; i < arity; ++i) {
196 normal_tree_sched(irns[i].irn);
199 if (1) { // TODO check if node needs to be scheduled
200 ir_node* block = get_nodes_block(irn);
201 ir_node** sched = get_irn_link(block);
203 #if defined NORMAL_DBG
204 ir_fprintf(stderr, "scheduling %+F in array %p\n", irn, sched);
208 sched = NEW_ARR_F(ir_node*, 0);
210 ARR_APP1(ir_node*, sched, irn);
211 set_irn_link(block, sched);
215 set_irn_link(irn, NULL);
219 static void normal_cost_walker(ir_node* irn, void* env)
223 #if defined NORMAL_DBG
224 ir_fprintf(stderr, "cost walking node %+F\n", irn);
226 if (is_Block(irn)) return;
227 normal_tree_cost(irn);
231 static void collect_roots(ir_node* irn, void* env)
237 if (is_Block(irn)) return;
239 fc = get_irn_link(irn);
241 #if defined NORMAL_DBG
242 ir_fprintf(stderr, "%+F is %sroot\n", irn, fc->no_root ? "no " : "");
246 ir_node* block = get_nodes_block(irn);
247 ir_node** roots = get_irn_link(block);
249 roots = NEW_ARR_F(ir_node*, 0);
251 ARR_APP1(ir_node*, roots, irn);
252 set_irn_link(block, roots);
257 static ir_node** sched_node(ir_node** sched, ir_node* irn)
259 ir_node* block = get_nodes_block(irn);
260 int arity = get_irn_arity(irn);
264 for (i = 0; i < arity; ++i) {
265 ir_node* pred = get_irn_n(irn, i);
266 if (get_nodes_block(pred) != block) continue;
267 sched = sched_node(sched, pred);
271 ARR_APP1(ir_node*, sched, irn);
276 static void normal_sched_block(ir_node* block, void* env)
278 ir_node** roots = get_irn_link(block);
280 irn_cost_pair* root_costs;
286 #if defined NORMAL_DBG
287 ir_fprintf(stderr, "sched walking block %+F\n", block);
291 #if defined NORMAL_DBG
292 fprintf(stderr, "has no roots\n");
297 root_count = ARR_LEN(roots);
298 NEW_ARR_A(irn_cost_pair, root_costs, root_count);
299 for (i = 0; i < root_count; ++i) {
300 root_costs[i].irn = roots[i];
301 root_costs[i].cost = normal_tree_cost(roots[i]);
303 qsort(root_costs, root_count, sizeof(*root_costs), cost_cmp);
305 sched = NEW_ARR_F(ir_node*, 0);
306 for (i = 0; i < root_count; ++i) {
307 ir_node* irn = root_costs[i].irn;
308 sched = sched_node(sched, irn);
310 set_irn_link(block, sched);
313 #if defined NORMAL_DBG
315 int n = ARR_LEN(sched);
318 ir_fprintf(stderr, "Scheduling of %+F:\n", block);
319 for (i = 0; i < n; ++i) {
321 for (j = 0; j < i; ++j) {
322 if (sched[i] == sched[j]) goto skip;
324 ir_fprintf(stderr, " %+F\n", sched[i]);
327 fprintf(stderr, "\n");
333 static void *normal_init_graph(const list_sched_selector_t *vtab,
334 const be_irg_t *birg)
336 ir_graph* irg = be_get_birg_irg(birg);
340 cur_arch_env = be_get_birg_arch_env(birg);
344 irg_walk_graph(irg, normal_cost_walker, NULL, NULL);
345 irg_walk_graph(irg, collect_roots, NULL, NULL);
346 irg_block_walk_graph(irg, normal_sched_block, NULL, NULL);
352 static void *normal_init_block(void *graph_env, ir_node *block)
361 const list_sched_selector_t normal_selector = {
365 NULL, /* to_appear_in_schedule */
366 NULL, /* node_ready */
367 NULL, /* node_selected */
370 NULL, /* finish_block */
371 NULL /* finish_graph */