2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21 * @brief Use the strong normal form theorem (though it does not hold)
22 * @author Christoph Mallon
29 #include "belistsched.h"
39 // XXX there is no one time init for schedulers
43 /** An instance of the normal scheduler. */
44 typedef struct instance_t {
45 ir_graph* irg; /**< the IR graph of this instance */
46 struct obstack obst; /**< obstack for temporary data */
47 ir_node* curr_list; /**< current block schedule list */
50 static int must_be_scheduled(const ir_node* const irn)
52 return !is_Proj(irn) && !is_Sync(irn);
56 static ir_node *normal_select(void *block_env, ir_nodeset_t *ready_set)
58 instance_t* inst = (instance_t*)block_env;
62 ir_nodeset_iterator_t iter;
64 for (irn = inst->curr_list; irn != NULL; last = irn, irn = next) {
65 next = (ir_node*)get_irn_link(irn);
66 if (ir_nodeset_contains(ready_set, irn)) {
67 #if defined NORMAL_DBG
68 ir_fprintf(stderr, "scheduling %+F\n", irn);
71 inst->curr_list = next;
73 set_irn_link(last, next);
78 ir_nodeset_iterator_init(&iter, ready_set);
79 irn = ir_nodeset_iterator_next(&iter);
84 typedef struct irn_cost_pair {
89 static int cost_cmp(const void* a, const void* b)
91 const irn_cost_pair* const a1 = (const irn_cost_pair*)a;
92 const irn_cost_pair* const b1 = (const irn_cost_pair*)b;
93 int ret = b1->cost - a1->cost;
95 ret = (int)get_irn_idx(a1->irn) - (int)get_irn_idx(b1->irn);
96 #if defined NORMAL_DBG
97 ir_fprintf(stderr, "cost %+F %s %+F\n", a1->irn, ret < 0 ? "<" : ret > 0 ? ">" : "=", b1->irn);
103 typedef struct flag_and_cost {
105 irn_cost_pair costs[];
108 #define get_irn_fc(irn) ((flag_and_cost*)get_irn_link(irn))
109 #define set_irn_fc(irn, fc) set_irn_link(irn, fc)
112 static int count_result(const ir_node* irn)
114 const ir_mode* mode = get_irn_mode(irn);
116 if (mode == mode_M || mode == mode_X)
122 if (arch_get_irn_register_req(irn)->type & arch_register_req_type_ignore)
129 /* TODO high cost for store trees
132 static int normal_tree_cost(ir_node* irn, instance_t *inst)
146 return normal_tree_cost(get_Proj_pred(irn), inst);
149 arity = get_irn_arity(irn);
150 fc = get_irn_fc(irn);
153 irn_cost_pair* costs;
154 ir_node* block = get_nodes_block(irn);
156 fc = OALLOCF(&inst->obst, flag_and_cost, costs, arity);
160 for (i = 0; i < arity; ++i) {
161 ir_node* pred = get_irn_n(irn, i);
163 if (is_Phi(irn) || get_irn_mode(pred) == mode_M || is_Block(pred)) {
165 } else if (get_nodes_block(pred) != block) {
168 flag_and_cost* pred_fc;
171 cost = normal_tree_cost(pred, inst);
172 if (!arch_irn_is_ignore(pred)) {
173 real_pred = (is_Proj(pred) ? get_Proj_pred(pred) : pred);
174 pred_fc = get_irn_fc(real_pred);
175 pred_fc->no_root = 1;
176 #if defined NORMAL_DBG
177 ir_fprintf(stderr, "%+F says that %+F is no root\n", irn, real_pred);
183 costs[i].cost = cost;
186 qsort(costs, arity, sizeof(*costs), cost_cmp);
187 set_irn_link(irn, fc);
192 for (i = 0; i < arity; ++i) {
193 ir_node* op = fc->costs[i].irn;
197 mode = get_irn_mode(op);
200 if (arch_get_irn_flags(op) & arch_irn_flags_not_scheduled)
202 if (mode != mode_T && arch_irn_is_ignore(op))
204 cost = MAX(fc->costs[i].cost + n_op_res, cost);
208 n_res = count_result(irn);
209 cost = MAX(n_res, cost);
211 #if defined NORMAL_DBG
212 ir_fprintf(stderr, "reguse of %+F is %d\n", irn, cost);
219 static void normal_cost_walker(ir_node* irn, void* env)
221 instance_t *inst = (instance_t*)env;
223 #if defined NORMAL_DBG
224 ir_fprintf(stderr, "cost walking node %+F\n", irn);
226 if (is_Block(irn)) return;
227 if (!must_be_scheduled(irn)) return;
228 normal_tree_cost(irn, inst);
232 static void collect_roots(ir_node* irn, void* env)
238 if (is_Block(irn)) return;
239 if (!must_be_scheduled(irn)) return;
241 is_root = be_is_Keep(irn) || !get_irn_fc(irn)->no_root;
243 #if defined NORMAL_DBG
244 ir_fprintf(stderr, "%+F is %sroot\n", irn, is_root ? "" : "no ");
248 ir_node* block = get_nodes_block(irn);
249 ir_node** roots = (ir_node**)get_irn_link(block);
251 roots = NEW_ARR_F(ir_node*, 0);
253 ARR_APP1(ir_node*, roots, irn);
254 set_irn_link(block, roots);
259 static ir_node** sched_node(ir_node** sched, ir_node* irn)
261 if (irn_visited_else_mark(irn)) return sched;
262 if (is_End(irn)) return sched;
264 if (!is_Phi(irn) && !be_is_Keep(irn)) {
265 ir_node* block = get_nodes_block(irn);
266 int arity = get_irn_arity(irn);
267 flag_and_cost* fc = get_irn_fc(irn);
268 irn_cost_pair* irns = fc->costs;
271 for (i = 0; i < arity; ++i) {
272 ir_node* pred = irns[i].irn;
273 if (get_nodes_block(pred) != block) continue;
274 if (get_irn_mode(pred) == mode_M) continue;
275 if (is_Proj(pred)) pred = get_Proj_pred(pred);
276 sched = sched_node(sched, pred);
280 ARR_APP1(ir_node*, sched, irn);
285 static int root_cmp(const void* a, const void* b)
287 const irn_cost_pair* const a1 = (const irn_cost_pair*)a;
288 const irn_cost_pair* const b1 = (const irn_cost_pair*)b;
290 if (is_irn_forking(a1->irn) && !is_irn_forking(b1->irn)) {
292 } else if (is_irn_forking(b1->irn) && !is_irn_forking(a1->irn)) {
295 ret = b1->cost - a1->cost;
297 /* place live-out nodes later */
298 ret = (count_result(a1->irn) != 0) - (count_result(b1->irn) != 0);
300 /* compare node idx */
301 ret = get_irn_idx(a1->irn) - get_irn_idx(b1->irn);
305 #if defined NORMAL_DBG
306 ir_fprintf(stderr, "root %+F %s %+F\n", a1->irn, ret < 0 ? "<" : ret > 0 ? ">" : "=", b1->irn);
312 static void normal_sched_block(ir_node* block, void* env)
314 ir_node** roots = (ir_node**)get_irn_link(block);
315 ir_heights_t* heights = (ir_heights_t*)env;
317 irn_cost_pair* root_costs;
321 #if defined NORMAL_DBG
322 ir_fprintf(stderr, "sched walking block %+F\n", block);
326 #if defined NORMAL_DBG
327 fprintf(stderr, "has no roots\n");
332 root_count = ARR_LEN(roots);
333 NEW_ARR_A(irn_cost_pair, root_costs, root_count);
334 for (i = 0; i < root_count; ++i) {
335 root_costs[i].irn = roots[i];
336 root_costs[i].cost = get_irn_height(heights, roots[i]);
337 #if defined NORMAL_DBG
338 ir_fprintf(stderr, "height of %+F is %u\n", roots[i], root_costs[i].cost);
341 qsort(root_costs, root_count, sizeof(*root_costs), root_cmp);
342 #if defined NORMAL_DBG
347 ir_fprintf(stderr, "Root Scheduling of %+F:\n", block);
348 for (i = 0; i < n; ++i) {
349 ir_fprintf(stderr, " %+F\n", root_costs[i].irn);
351 fprintf(stderr, "\n");
355 sched = NEW_ARR_F(ir_node*, 0);
356 for (i = 0; i < root_count; ++i) {
357 ir_node* irn = root_costs[i].irn;
358 assert(must_be_scheduled(irn));
359 sched = sched_node(sched, irn);
361 set_irn_link(block, sched);
364 #if defined NORMAL_DBG
366 int n = ARR_LEN(sched);
369 ir_fprintf(stderr, "Scheduling of %+F:\n", block);
370 for (i = 0; i < n; ++i) {
371 ir_fprintf(stderr, " %+F\n", sched[i]);
373 fprintf(stderr, "\n");
379 static void *normal_init_graph(ir_graph *irg)
381 instance_t *inst = XMALLOC(instance_t);
382 ir_heights_t *heights;
386 obstack_init(&inst->obst);
389 heights = heights_new(irg);
391 ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
392 irg_walk_graph(irg, normal_cost_walker, NULL, inst);
393 irg_walk_graph(irg, collect_roots, NULL, NULL);
394 inc_irg_visited(irg);
395 ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED);
396 irg_block_walk_graph(irg, normal_sched_block, NULL, heights);
397 ir_free_resources(irg, IR_RESOURCE_IRN_VISITED);
399 heights_free(heights);
404 static void *normal_init_block(void *graph_env, ir_node *block)
406 instance_t* inst = (instance_t*)graph_env;
407 ir_node** sched = (ir_node**)get_irn_link(block);
408 ir_node* first = NULL;
411 /* turn into a list, so we can easily remove nodes.
412 The link field is used anyway. */
413 for (i = ARR_LEN(sched) - 1; i >= 0; --i) {
414 ir_node* irn = sched[i];
416 set_irn_link(irn, first);
420 /* note: we can free sched here, there should be no attempt to schedule
423 set_irn_link(block, sched);
424 inst->curr_list = first;
428 static void normal_finish_graph(void *env)
430 instance_t *inst = (instance_t*)env;
432 /* block uses the link field to store the schedule */
433 ir_free_resources(inst->irg, IR_RESOURCE_IRN_LINK);
434 obstack_free(&inst->obst, NULL);
438 static void sched_normal(ir_graph *irg)
440 static const list_sched_selector_t normal_selector = {
444 NULL, /* node_ready */
445 NULL, /* node_selected */
446 NULL, /* finish_block */
449 be_list_sched_graph(irg, &normal_selector);
452 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_sched_normal)
453 void be_init_sched_normal(void)
455 be_register_scheduler("normal", sched_normal);