2 * This file is part of libFirm.
3 * Copyright (C) 2012 University of Karlsruhe.
8 * @brief Block-scheduling strategies.
9 * @author Matthias Braun, Christoph Mallon
12 * The goals of the greedy (and ILP) algorithm here works by assuming that
13 * we want to change as many jumps to fallthroughs as possible (executed jumps
14 * actually, we have to look at the execution frequencies). The algorithms
15 * do this by collecting execution frequencies of all branches (which is easily
16 * possible when all critical edges are split) then removes critical edges where
17 * possible as we don't need and want them anymore now. The algorithms then try
18 * to change as many edges to fallthroughs as possible, this is done by setting
19 * a next and prev pointers on blocks. The greedy algorithm sorts the edges by
20 * execution frequencies and tries to transform them to fallthroughs in this order
24 #include "beblocksched.h"
34 #include "irgraph_t.h"
48 #include "lc_opts_enum.h"
53 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
55 typedef enum blocksched_algos_t {
56 BLOCKSCHED_NAIV, BLOCKSCHED_GREEDY, BLOCKSCHED_ILP
59 static int algo = BLOCKSCHED_GREEDY;
61 static const lc_opt_enum_int_items_t blockschedalgo_items[] = {
62 { "naiv", BLOCKSCHED_NAIV },
63 { "greedy", BLOCKSCHED_GREEDY },
64 { "ilp", BLOCKSCHED_ILP },
68 static lc_opt_enum_int_var_t algo_var = {
69 &algo, blockschedalgo_items
72 static const lc_opt_table_entry_t be_blocksched_options[] = {
73 LC_OPT_ENT_ENUM_INT ("blockscheduler", "the block scheduling algorithm", &algo_var),
79 * / ___|_ __ ___ ___ __| |_ _
80 * | | _| '__/ _ \/ _ \/ _` | | | |
81 * | |_| | | | __/ __/ (_| | |_| |
82 * \____|_| \___|\___|\__,_|\__, |
86 typedef struct blocksched_entry_t blocksched_entry_t;
87 struct blocksched_entry_t {
89 blocksched_entry_t *next;
90 blocksched_entry_t *prev;
93 typedef struct edge_t edge_t;
95 ir_node *block; /**< source block */
96 int pos; /**< number of cfg predecessor (target) */
97 double execfreq; /**< the frequency */
98 double outedge_penalty_freq; /**< for edges leaving the loop this is the
99 penality when we make them a
101 int highest_execfreq; /**< flag that indicates whether this edge is
102 the edge with the highest execfreq pointing
103 away from this block */
106 typedef struct blocksched_env_t blocksched_env_t;
107 struct blocksched_env_t {
115 static blocksched_entry_t* get_blocksched_entry(const ir_node *block)
117 return (blocksched_entry_t*)get_irn_link(block);
121 * Collect cfg frequencies of all edges between blocks.
122 * Also determines edge with highest frequency.
124 static void collect_egde_frequency(ir_node *block, void *data)
126 blocksched_env_t *env = (blocksched_env_t*)data;
129 blocksched_entry_t *entry;
132 memset(&edge, 0, sizeof(edge));
134 entry = OALLOCZ(&env->obst, blocksched_entry_t);
135 entry->block = block;
136 set_irn_link(block, entry);
138 loop = get_irn_loop(block);
140 arity = get_Block_n_cfgpreds(block);
143 /* must be the start block (or end-block for endless loops),
144 * everything else is dead code and should be removed by now */
145 assert(block == get_irg_start_block(env->irg)
146 || block == get_irg_end_block(env->irg));
147 /* nothing to do here */
149 } else if (arity == 1) {
150 ir_node *pred_block = get_Block_cfgpred_block(block, 0);
151 ir_loop *pred_loop = get_irn_loop(pred_block);
152 float freq = (float)get_block_execfreq(block);
154 /* is it an edge leaving a loop */
155 if (get_loop_depth(pred_loop) > get_loop_depth(loop)) {
156 float pred_freq = (float)get_block_execfreq(pred_block);
157 edge.outedge_penalty_freq = -(pred_freq - freq);
162 edge.execfreq = freq;
163 edge.highest_execfreq = 1;
164 ARR_APP1(edge_t, env->edges, edge);
167 double highest_execfreq = -1.0;
168 int highest_edge_num = -1;
171 for (i = 0; i < arity; ++i) {
173 ir_node *pred_block = get_Block_cfgpred_block(block, i);
175 execfreq = get_block_execfreq(pred_block);
178 edge.execfreq = execfreq;
179 edge.highest_execfreq = 0;
180 ARR_APP1(edge_t, env->edges, edge);
182 if (execfreq > highest_execfreq) {
183 highest_execfreq = execfreq;
184 highest_edge_num = ARR_LEN(env->edges) - 1;
188 if (highest_edge_num >= 0)
189 env->edges[highest_edge_num].highest_execfreq = 1;
193 static int cmp_edges_base(const edge_t *e1, const edge_t *e2)
195 long nr1 = get_irn_node_nr(e1->block);
196 long nr2 = get_irn_node_nr(e2->block);
199 } else if (nr1 > nr2) {
202 if (e1->pos < e2->pos) {
204 } else if (e1->pos > e2->pos) {
212 static int cmp_edges(const void *d1, const void *d2)
214 const edge_t *e1 = (const edge_t*)d1;
215 const edge_t *e2 = (const edge_t*)d2;
216 double freq1 = e1->execfreq;
217 double freq2 = e2->execfreq;
220 } else if (freq1 > freq2) {
223 return cmp_edges_base(e1, e2);
227 static int cmp_edges_outedge_penalty(const void *d1, const void *d2)
229 const edge_t *e1 = (const edge_t*)d1;
230 const edge_t *e2 = (const edge_t*)d2;
231 double pen1 = e1->outedge_penalty_freq;
232 double pen2 = e2->outedge_penalty_freq;
235 } else if (pen1 < pen2) {
238 return cmp_edges_base(e1, e2);
242 static void clear_loop_links(ir_loop *loop)
246 set_loop_link(loop, NULL);
247 n = get_loop_n_elements(loop);
248 for (i = 0; i < n; ++i) {
249 loop_element elem = get_loop_element(loop, i);
250 if (*elem.kind == k_ir_loop) {
251 clear_loop_links(elem.son);
256 static void coalesce_blocks(blocksched_env_t *env)
259 int edge_count = ARR_LEN(env->edges);
260 edge_t *edges = env->edges;
262 /* sort interblock edges by execution frequency */
263 qsort(edges, ARR_LEN(edges), sizeof(edges[0]), cmp_edges);
265 /* run1: only look at jumps */
266 for (i = 0; i < edge_count; ++i) {
267 const edge_t *edge = &edges[i];
268 ir_node *block = edge->block;
271 blocksched_entry_t *entry, *pred_entry;
273 /* only check edge with highest frequency */
274 if (! edge->highest_execfreq)
277 /* the block might have been removed already... */
278 if (is_Bad(get_Block_cfgpred(block, 0)))
281 pred_block = get_Block_cfgpred_block(block, pos);
282 entry = get_blocksched_entry(block);
283 pred_entry = get_blocksched_entry(pred_block);
285 if (pred_entry->next != NULL || entry->prev != NULL)
288 /* only coalesce jumps */
289 if (get_block_succ_next(pred_block, get_block_succ_first(pred_block)) != NULL)
292 /* schedule the 2 blocks behind each other */
293 DB((dbg, LEVEL_1, "Coalesce (Jump) %+F -> %+F (%.3g)\n",
294 pred_entry->block, entry->block, edge->execfreq));
295 pred_entry->next = entry;
296 entry->prev = pred_entry;
299 /* run2: pick loop fallthroughs */
300 clear_loop_links(get_irg_loop(env->irg));
302 qsort(edges, ARR_LEN(edges), sizeof(edges[0]), cmp_edges_outedge_penalty);
303 for (i = 0; i < edge_count; ++i) {
304 const edge_t *edge = &edges[i];
305 ir_node *block = edge->block;
308 blocksched_entry_t *entry, *pred_entry;
312 /* already seen all loop outedges? */
313 if (edge->outedge_penalty_freq == 0)
316 /* the block might have been removed already... */
317 if (is_Bad(get_Block_cfgpred(block, pos)))
320 pred_block = get_Block_cfgpred_block(block, pos);
321 entry = get_blocksched_entry(block);
322 pred_entry = get_blocksched_entry(pred_block);
324 if (pred_entry->next != NULL || entry->prev != NULL)
327 /* we want at most 1 outedge fallthrough per loop */
328 loop = get_irn_loop(pred_block);
329 if (get_loop_link(loop) != NULL)
332 /* schedule the 2 blocks behind each other */
333 DB((dbg, LEVEL_1, "Coalesce (Loop Outedge) %+F -> %+F (%.3g)\n",
334 pred_entry->block, entry->block, edge->execfreq));
335 pred_entry->next = entry;
336 entry->prev = pred_entry;
338 /* all loops left have an outedge now */
339 outer_loop = get_irn_loop(block);
341 /* we set loop link to loop to mark it */
342 set_loop_link(loop, loop);
343 loop = get_loop_outer_loop(loop);
344 } while (loop != outer_loop);
347 /* sort interblock edges by execution frequency */
348 qsort(edges, ARR_LEN(edges), sizeof(edges[0]), cmp_edges);
350 /* run3: remaining edges */
351 for (i = 0; i < edge_count; ++i) {
352 const edge_t *edge = &edges[i];
353 ir_node *block = edge->block;
356 blocksched_entry_t *entry, *pred_entry;
358 /* the block might have been removed already... */
359 if (is_Bad(get_Block_cfgpred(block, pos)))
362 pred_block = get_Block_cfgpred_block(block, pos);
363 entry = get_blocksched_entry(block);
364 pred_entry = get_blocksched_entry(pred_block);
366 /* is 1 of the blocks already attached to another block? */
367 if (pred_entry->next != NULL || entry->prev != NULL)
370 /* schedule the 2 blocks behind each other */
371 DB((dbg, LEVEL_1, "Coalesce (CondJump) %+F -> %+F (%.3g)\n",
372 pred_entry->block, entry->block, edge->execfreq));
373 pred_entry->next = entry;
374 entry->prev = pred_entry;
378 static void pick_block_successor(blocksched_entry_t *entry, blocksched_env_t *env)
380 ir_node *block = entry->block;
381 ir_node *succ = NULL;
382 blocksched_entry_t *succ_entry;
383 double best_succ_execfreq;
385 if (irn_visited_else_mark(block))
390 DB((dbg, LEVEL_1, "Pick succ of %+F\n", block));
392 /* put all successors into the worklist */
393 foreach_block_succ(block, edge) {
394 ir_node *succ_block = get_edge_src_irn(edge);
396 if (irn_visited(succ_block))
399 /* we only need to put the first of a series of already connected
400 * blocks into the worklist */
401 succ_entry = get_blocksched_entry(succ_block);
402 while (succ_entry->prev != NULL) {
403 /* break cycles... */
404 if (succ_entry->prev->block == succ_block) {
405 succ_entry->prev->next = NULL;
406 succ_entry->prev = NULL;
409 succ_entry = succ_entry->prev;
412 if (irn_visited(succ_entry->block))
415 DB((dbg, LEVEL_1, "Put %+F into worklist\n", succ_entry->block));
416 pdeq_putr(env->worklist, succ_entry->block);
419 if (entry->next != NULL) {
420 pick_block_successor(entry->next, env);
424 DB((dbg, LEVEL_1, "deciding...\n"));
425 best_succ_execfreq = -1;
427 /* no successor yet: pick the successor block with the highest execution
428 * frequency which has no predecessor yet */
430 foreach_block_succ(block, edge) {
431 ir_node *succ_block = get_edge_src_irn(edge);
433 if (irn_visited(succ_block))
436 succ_entry = get_blocksched_entry(succ_block);
437 if (succ_entry->prev != NULL)
440 double execfreq = get_block_execfreq(succ_block);
441 if (execfreq > best_succ_execfreq) {
442 best_succ_execfreq = execfreq;
448 DB((dbg, LEVEL_1, "pick from worklist\n"));
451 if (pdeq_empty(env->worklist)) {
452 DB((dbg, LEVEL_1, "worklist empty\n"));
455 succ = (ir_node*)pdeq_getl(env->worklist);
456 } while (irn_visited(succ));
459 succ_entry = get_blocksched_entry(succ);
460 entry->next = succ_entry;
461 succ_entry->prev = entry;
463 pick_block_successor(succ_entry, env);
466 static blocksched_entry_t *finish_block_schedule(blocksched_env_t *env)
468 ir_graph *irg = env->irg;
469 ir_node *startblock = get_irg_start_block(irg);
470 blocksched_entry_t *entry = get_blocksched_entry(startblock);
472 ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED);
473 inc_irg_visited(irg);
475 env->worklist = new_pdeq();
476 pick_block_successor(entry, env);
477 assert(pdeq_empty(env->worklist));
478 del_pdeq(env->worklist);
480 ir_free_resources(irg, IR_RESOURCE_IRN_VISITED);
485 static ir_node **create_blocksched_array(blocksched_env_t *env, blocksched_entry_t *first,
486 int count, struct obstack* obst)
489 ir_node **block_list;
490 blocksched_entry_t *entry;
493 block_list = NEW_ARR_D(ir_node *, obst, count);
494 DB((dbg, LEVEL_1, "Blockschedule:\n"));
496 for (entry = first; entry != NULL; entry = entry->next) {
498 block_list[i++] = entry->block;
499 DB((dbg, LEVEL_1, "\t%+F\n", entry->block));
506 static ir_node **create_block_schedule_greedy(ir_graph *irg)
508 blocksched_env_t env;
509 blocksched_entry_t *start_entry;
510 ir_node **block_list;
513 env.edges = NEW_ARR_F(edge_t, 0);
516 obstack_init(&env.obst);
518 assure_loopinfo(irg);
520 // collect edge execution frequencies
521 irg_block_walk_graph(irg, collect_egde_frequency, NULL, &env);
523 (void)be_remove_empty_blocks(irg);
525 if (algo != BLOCKSCHED_NAIV)
526 coalesce_blocks(&env);
528 start_entry = finish_block_schedule(&env);
529 block_list = create_blocksched_array(&env, start_entry, env.blockcount,
530 be_get_be_obst(irg));
532 DEL_ARR_F(env.edges);
533 obstack_free(&env.obst, NULL);
547 typedef struct ilp_edge_t {
548 ir_node *block; /**< source block */
549 int pos; /**< number of cfg predecessor (target) */
553 typedef struct blocksched_ilp_env_t {
554 blocksched_env_t env;
555 ilp_edge_t *ilpedges;
557 } blocksched_ilp_env_t;
559 typedef struct blocksched_ilp_entry_t {
561 struct blocksched_entry_t *next;
562 struct blocksched_entry_t *prev;
565 } blocksched_ilp_entry_t;
567 static int add_ilp_edge(ir_node *block, int pos, double execfreq, blocksched_ilp_env_t *env)
571 int edgeidx = ARR_LEN(env->ilpedges);
573 snprintf(name, sizeof(name), "edge%d", edgeidx);
577 edge.ilpvar = lpp_add_var_default(env->lpp, name, lpp_binary, execfreq, 1.0);
579 ARR_APP1(ilp_edge_t, env->ilpedges, edge);
583 static void collect_egde_frequency_ilp(ir_node *block, void *data)
585 blocksched_ilp_env_t *env = (blocksched_ilp_env_t*)data;
586 ir_graph *irg = env->env.irg;
587 ir_node *startblock = get_irg_start_block(irg);
591 blocksched_ilp_entry_t *entry;
593 snprintf(name, sizeof(name), "block_out_constr_%ld", get_irn_node_nr(block));
594 out_count = get_irn_n_edges_kind(block, EDGE_KIND_BLOCK);
596 entry = OALLOC(&env->env.obst, blocksched_ilp_entry_t);
597 entry->block = block;
600 entry->out_cst = lpp_add_cst_uniq(env->lpp, name, lpp_greater_equal, out_count - 1);
601 set_irn_link(block, entry);
603 if (block == startblock)
606 arity = get_irn_arity(block);
608 double execfreq = get_block_execfreq(block);
609 add_ilp_edge(block, 0, execfreq, env);
615 snprintf(name, sizeof(name), "block_in_constr_%ld", get_irn_node_nr(block));
616 cst_idx = lpp_add_cst_uniq(env->lpp, name, lpp_greater_equal, arity - 1);
618 for (i = 0; i < arity; ++i) {
622 ir_node *pred_block = get_Block_cfgpred_block(block, i);
624 execfreq = get_block_execfreq(pred_block);
625 edgenum = add_ilp_edge(block, i, execfreq, env);
626 edge = &env->ilpedges[edgenum];
627 lpp_set_factor_fast(env->lpp, cst_idx, edge->ilpvar, 1.0);
632 static blocksched_ilp_entry_t *get_blocksched_ilp_entry(const ir_node *block)
634 return (blocksched_ilp_entry_t*)get_irn_link(block);
637 static void coalesce_blocks_ilp(blocksched_ilp_env_t *env)
639 int edge_count = ARR_LEN(env->ilpedges);
642 /* complete out constraints */
643 for (i = 0; i < edge_count; ++i) {
644 const ilp_edge_t *edge = &env->ilpedges[i];
645 ir_node *block = edge->block;
647 blocksched_ilp_entry_t *entry;
649 /* the block might have been removed already... */
650 if (is_Bad(get_Block_cfgpred(block, 0)))
653 pred = get_Block_cfgpred_block(block, edge->pos);
654 entry = get_blocksched_ilp_entry(pred);
656 DB((dbg, LEVEL_1, "Adding out cst to %+F from %+F,%d\n",
657 pred, block, edge->pos));
658 lpp_set_factor_fast(env->lpp, entry->out_cst, edge->ilpvar, 1.0);
661 lpp_solve_net(env->lpp, be_options.ilp_server, be_options.ilp_solver);
662 assert(lpp_is_sol_valid(env->lpp));
664 /* Apply results to edges */
665 for (i = 0; i < edge_count; ++i) {
666 const ilp_edge_t *edge = &env->ilpedges[i];
667 ir_node *block = edge->block;
670 blocksched_entry_t *entry;
671 blocksched_entry_t *pred_entry;
673 /* the block might have been removed already... */
674 if (is_Bad(get_Block_cfgpred(block, 0)))
677 is_jump = (int)lpp_get_var_sol(env->lpp, edge->ilpvar);
681 pred = get_Block_cfgpred_block(block, edge->pos);
682 entry = get_blocksched_entry(block);
683 pred_entry = get_blocksched_entry(pred);
685 assert(entry->prev == NULL && pred_entry->next == NULL);
686 entry->prev = pred_entry;
687 pred_entry->next = entry;
691 static ir_node **create_block_schedule_ilp(ir_graph *irg)
693 blocksched_ilp_env_t env;
694 blocksched_entry_t *start_entry;
695 ir_node **block_list;
698 env.env.worklist = NULL;
699 env.env.blockcount = 0;
700 env.ilpedges = NEW_ARR_F(ilp_edge_t, 0);
701 obstack_init(&env.env.obst);
703 env.lpp = lpp_new("blockschedule", lpp_minimize);
704 lpp_set_time_limit(env.lpp, 20);
705 lpp_set_log(env.lpp, stdout);
707 irg_block_walk_graph(irg, collect_egde_frequency_ilp, NULL, &env);
709 (void)be_remove_empty_blocks(irg);
710 coalesce_blocks_ilp(&env);
712 start_entry = finish_block_schedule(&env.env);
713 block_list = create_blocksched_array(&env.env, start_entry,
715 be_get_be_obst(irg));
717 DEL_ARR_F(env.ilpedges);
719 obstack_free(&env.env.obst, NULL);
727 * | |\/| |/ _` | | '_ \
728 * | | | | (_| | | | | |
729 * |_| |_|\__,_|_|_| |_|
732 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_blocksched)
733 void be_init_blocksched(void)
735 lc_opt_entry_t *be_grp = lc_opt_get_grp(firm_opt_get_root(), "be");
737 lc_opt_add_table(be_grp, be_blocksched_options);
739 FIRM_DBG_REGISTER(dbg, "firm.be.blocksched");
742 ir_node **be_create_block_schedule(ir_graph *irg)
745 case BLOCKSCHED_GREEDY:
746 case BLOCKSCHED_NAIV:
747 return create_block_schedule_greedy(irg);
749 return create_block_schedule_ilp(irg);
752 panic("unknown blocksched algo");