2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Block-scheduling strategies.
23 * @author Matthias Braun, Christoph Mallon
27 * The goals of the greedy (and ILP) algorithm here works by assuming that
28 * we want to change as many jumps to fallthroughs as possible (executed jumps
29 * actually, we have to look at the execution frequencies). The algorithms
30 * do this by collecting execution frequencies of all branches (which is easily
31 * possible when all critical edges are split) then removes critical edges where
32 * possible as we don't need and want them anymore now. The algorithms then try
33 * to change as many edges to fallthroughs as possible, this is done by setting
34 * a next and prev pointers on blocks. The greedy algorithm sorts the edges by
35 * execution frequencies and tries to transform them to fallthroughs in this order
39 #include "beblocksched.h"
49 #include "irgraph_t.h"
62 #include "lc_opts_enum.h"
66 #include <lpp/lpp_net.h>
69 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
71 typedef enum _blocksched_algos_t {
72 BLOCKSCHED_NAIV, BLOCKSCHED_GREEDY, BLOCKSCHED_ILP
75 static int algo = BLOCKSCHED_GREEDY;
77 static const lc_opt_enum_int_items_t blockschedalgo_items[] = {
78 { "naiv", BLOCKSCHED_NAIV },
79 { "greedy", BLOCKSCHED_GREEDY },
81 { "ilp", BLOCKSCHED_ILP },
86 static lc_opt_enum_int_var_t algo_var = {
87 &algo, blockschedalgo_items
90 static const lc_opt_table_entry_t be_blocksched_options[] = {
91 LC_OPT_ENT_ENUM_INT ("algo", "the block scheduling algorithm", &algo_var),
97 * / ___|_ __ ___ ___ __| |_ _
98 * | | _| '__/ _ \/ _ \/ _` | | | |
99 * | |_| | | | __/ __/ (_| | |_| |
100 * \____|_| \___|\___|\__,_|\__, |
104 typedef struct blocksched_entry_t blocksched_entry_t;
105 struct blocksched_entry_t {
107 blocksched_entry_t *next;
108 blocksched_entry_t *prev;
111 typedef struct edge_t edge_t;
113 ir_node *block; /**< source block */
114 int pos; /**< number of cfg predecessor (target) */
115 double execfreq; /**< the frequency */
116 double outedge_penalty_freq; /**< for edges leaving the loop this is the
117 penality when we make them a
119 int highest_execfreq; /**< flag that indicates whether this edge is
120 the edge with the highest execfreq pointing
121 away from this block */
124 typedef struct blocksched_env_t blocksched_env_t;
125 struct blocksched_env_t {
127 struct obstack *obst;
128 ir_exec_freq *execfreqs;
135 * Collect cfg frequencies of all edges between blocks.
136 * Also determines edge with highest frequency.
138 static void collect_egde_frequency(ir_node *block, void *data)
140 blocksched_env_t *env = data;
143 blocksched_entry_t *entry;
146 memset(&edge, 0, sizeof(edge));
148 entry = obstack_alloc(env->obst, sizeof(entry[0]));
149 memset(entry, 0, sizeof(*entry));
150 entry->block = block;
151 set_irn_link(block, entry);
153 loop = get_irn_loop(block);
155 arity = get_Block_n_cfgpreds(block);
158 /* must be the start block (or end-block for endless loops),
159 * everything else is dead code and should be removed by now */
160 assert(block == get_irg_start_block(env->irg)
161 || block == get_irg_end_block(env->irg));
162 /* nothing to do here */
164 } else if (arity == 1) {
165 ir_node *pred_block = get_Block_cfgpred_block(block, 0);
166 ir_loop *pred_loop = get_irn_loop(pred_block);
167 float freq = get_block_execfreq(env->execfreqs, block);
169 /* is it an edge leaving a loop */
170 if (get_loop_depth(pred_loop) > get_loop_depth(loop)) {
171 float pred_freq = get_block_execfreq(env->execfreqs, pred_block);
172 edge.outedge_penalty_freq = -(pred_freq - freq);
177 edge.execfreq = freq;
178 edge.highest_execfreq = 1;
179 ARR_APP1(edge_t, env->edges, edge);
182 double highest_execfreq = -1.0;
183 int highest_edge_num = -1;
186 for (i = 0; i < arity; ++i) {
188 ir_node *pred_block = get_Block_cfgpred_block(block, i);
190 execfreq = get_block_execfreq(env->execfreqs, pred_block);
193 edge.execfreq = execfreq;
194 edge.highest_execfreq = 0;
195 ARR_APP1(edge_t, env->edges, edge);
197 if (execfreq > highest_execfreq) {
198 highest_execfreq = execfreq;
199 highest_edge_num = ARR_LEN(env->edges) - 1;
203 if(highest_edge_num >= 0)
204 env->edges[highest_edge_num].highest_execfreq = 1;
208 static int cmp_edges(const void *d1, const void *d2)
210 const edge_t *e1 = d1;
211 const edge_t *e2 = d2;
213 return QSORT_CMP(e2->execfreq, e1->execfreq);
216 static int cmp_edges_outedge_penalty(const void *d1, const void *d2)
218 const edge_t *e1 = d1;
219 const edge_t *e2 = d2;
220 /* reverse sorting as penalties are negative */
221 return QSORT_CMP(e1->outedge_penalty_freq, e2->outedge_penalty_freq);
224 static void clear_loop_links(ir_loop *loop)
228 set_loop_link(loop, NULL);
229 n = get_loop_n_elements(loop);
230 for (i = 0; i < n; ++i) {
231 loop_element elem = get_loop_element(loop, i);
232 if (*elem.kind == k_ir_loop) {
233 clear_loop_links(elem.son);
238 static void coalesce_blocks(blocksched_env_t *env)
241 int edge_count = ARR_LEN(env->edges);
242 edge_t *edges = env->edges;
244 /* sort interblock edges by execution frequency */
245 qsort(edges, ARR_LEN(edges), sizeof(edges[0]), cmp_edges);
247 /* run1: only look at jumps */
248 for (i = 0; i < edge_count; ++i) {
249 const edge_t *edge = &edges[i];
250 ir_node *block = edge->block;
253 blocksched_entry_t *entry, *pred_entry;
255 /* only check edge with highest frequency */
256 if (! edge->highest_execfreq)
259 /* the block might have been removed already... */
260 if (is_Bad(get_Block_cfgpred(block, 0)))
263 pred_block = get_Block_cfgpred_block(block, pos);
264 entry = get_irn_link(block);
265 pred_entry = get_irn_link(pred_block);
267 if (pred_entry->next != NULL || entry->prev != NULL)
270 /* only coalesce jumps */
271 if (get_block_succ_next(pred_block, get_block_succ_first(pred_block)) != NULL)
274 /* schedule the 2 blocks behind each other */
275 DB((dbg, LEVEL_1, "Coalesce (Jump) %+F -> %+F (%.3g)\n",
276 pred_entry->block, entry->block, edge->execfreq));
277 pred_entry->next = entry;
278 entry->prev = pred_entry;
281 /* run2: pick loop fallthroughs */
282 clear_loop_links(get_irg_loop(env->irg));
284 qsort(edges, ARR_LEN(edges), sizeof(edges[0]), cmp_edges_outedge_penalty);
285 for (i = 0; i < edge_count; ++i) {
286 const edge_t *edge = &edges[i];
287 ir_node *block = edge->block;
290 blocksched_entry_t *entry, *pred_entry;
294 /* already seen all loop outedges? */
295 if (edge->outedge_penalty_freq == 0)
298 /* the block might have been removed already... */
299 if (is_Bad(get_Block_cfgpred(block, pos)))
302 pred_block = get_Block_cfgpred_block(block, pos);
303 entry = get_irn_link(block);
304 pred_entry = get_irn_link(pred_block);
306 if (pred_entry->next != NULL || entry->prev != NULL)
309 /* we want at most 1 outedge fallthrough per loop */
310 loop = get_irn_loop(pred_block);
311 if (get_loop_link(loop) != NULL)
314 /* schedule the 2 blocks behind each other */
315 DB((dbg, LEVEL_1, "Coalesce (Loop Outedge) %+F -> %+F (%.3g)\n",
316 pred_entry->block, entry->block, edge->execfreq));
317 pred_entry->next = entry;
318 entry->prev = pred_entry;
320 /* all loops left have an outedge now */
321 outer_loop = get_irn_loop(block);
323 /* we set loop link to loop to mark it */
324 set_loop_link(loop, loop);
325 loop = get_loop_outer_loop(loop);
326 } while (loop != outer_loop);
329 /* sort interblock edges by execution frequency */
330 qsort(edges, ARR_LEN(edges), sizeof(edges[0]), cmp_edges);
332 /* run3: remaining edges */
333 for (i = 0; i < edge_count; ++i) {
334 const edge_t *edge = &edges[i];
335 ir_node *block = edge->block;
338 blocksched_entry_t *entry, *pred_entry;
340 /* the block might have been removed already... */
341 if (is_Bad(get_Block_cfgpred(block, pos)))
344 pred_block = get_Block_cfgpred_block(block, pos);
345 entry = get_irn_link(block);
346 pred_entry = get_irn_link(pred_block);
348 /* is 1 of the blocks already attached to another block? */
349 if (pred_entry->next != NULL || entry->prev != NULL)
352 /* schedule the 2 blocks behind each other */
353 DB((dbg, LEVEL_1, "Coalesce (CondJump) %+F -> %+F (%.3g)\n",
354 pred_entry->block, entry->block, edge->execfreq));
355 pred_entry->next = entry;
356 entry->prev = pred_entry;
360 static void pick_block_successor(blocksched_entry_t *entry, blocksched_env_t *env)
362 ir_node *block = entry->block;
363 ir_node *succ = NULL;
364 blocksched_entry_t *succ_entry;
365 const ir_edge_t *edge;
366 double best_succ_execfreq;
368 if (irn_visited_else_mark(block))
373 DB((dbg, LEVEL_1, "Pick succ of %+F\n", block));
375 /* put all successors into the worklist */
376 foreach_block_succ(block, edge) {
377 ir_node *succ_block = get_edge_src_irn(edge);
379 if (irn_visited(succ_block))
382 /* we only need to put the first of a series of already connected
383 * blocks into the worklist */
384 succ_entry = get_irn_link(succ_block);
385 while (succ_entry->prev != NULL) {
386 /* break cycles... */
387 if (succ_entry->prev->block == succ_block) {
388 succ_entry->prev->next = NULL;
389 succ_entry->prev = NULL;
392 succ_entry = succ_entry->prev;
395 if (irn_visited(succ_entry->block))
398 DB((dbg, LEVEL_1, "Put %+F into worklist\n", succ_entry->block));
399 pdeq_putr(env->worklist, succ_entry->block);
402 if (entry->next != NULL) {
403 pick_block_successor(entry->next, env);
407 DB((dbg, LEVEL_1, "deciding...\n"));
408 best_succ_execfreq = -1;
410 /* no successor yet: pick the successor block with the highest execution
411 * frequency which has no predecessor yet */
413 foreach_block_succ(block, edge) {
414 ir_node *succ_block = get_edge_src_irn(edge);
417 if (irn_visited(succ_block))
420 succ_entry = get_irn_link(succ_block);
421 if (succ_entry->prev != NULL)
424 execfreq = get_block_execfreq(env->execfreqs, succ_block);
425 if (execfreq > best_succ_execfreq) {
426 best_succ_execfreq = execfreq;
432 DB((dbg, LEVEL_1, "pick from worklist\n"));
435 if (pdeq_empty(env->worklist)) {
436 DB((dbg, LEVEL_1, "worklist empty\n"));
439 succ = pdeq_getl(env->worklist);
440 } while (irn_visited(succ));
443 succ_entry = get_irn_link(succ);
444 entry->next = succ_entry;
445 succ_entry->prev = entry;
447 pick_block_successor(succ_entry, env);
450 static blocksched_entry_t *finish_block_schedule(blocksched_env_t *env)
452 ir_graph *irg = env->irg;
453 ir_node *startblock = get_irg_start_block(irg);
454 blocksched_entry_t *entry = get_irn_link(startblock);
456 ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED);
457 inc_irg_visited(irg);
459 env->worklist = new_pdeq();
460 pick_block_successor(entry, env);
461 assert(pdeq_empty(env->worklist));
462 del_pdeq(env->worklist);
464 ir_free_resources(irg, IR_RESOURCE_IRN_VISITED);
469 static ir_node **create_blocksched_array(blocksched_env_t *env, blocksched_entry_t *first,
470 int count, struct obstack* obst)
473 ir_node **block_list;
474 blocksched_entry_t *entry;
477 block_list = NEW_ARR_D(ir_node *, obst, count);
478 DB((dbg, LEVEL_1, "Blockschedule:\n"));
480 for (entry = first; entry != NULL; entry = entry->next) {
482 block_list[i++] = entry->block;
483 DB((dbg, LEVEL_1, "\t%+F\n", entry->block));
490 static ir_node **create_block_schedule_greedy(ir_graph *irg, ir_exec_freq *execfreqs)
492 blocksched_env_t env;
494 blocksched_entry_t *start_entry;
495 ir_node **block_list;
501 env.execfreqs = execfreqs;
502 env.edges = NEW_ARR_F(edge_t, 0);
506 /* make sure loopinfo is up-to-date */
507 if (! (get_irg_loopinfo_state(irg) & loopinfo_cf_consistent)) {
508 construct_cf_backedges(irg);
511 // collect edge execution frequencies
512 irg_block_walk_graph(irg, collect_egde_frequency, NULL, &env);
514 (void)be_remove_empty_blocks(irg);
516 if (algo != BLOCKSCHED_NAIV)
517 coalesce_blocks(&env);
519 start_entry = finish_block_schedule(&env);
520 block_list = create_blocksched_array(&env, start_entry, env.blockcount, get_irg_obstack(irg));
522 DEL_ARR_F(env.edges);
523 obstack_free(&obst, NULL);
538 typedef struct _ilp_edge_t {
539 ir_node *block; /**< source block */
540 int pos; /**< number of cfg predecessor (target) */
544 typedef struct _blocksched_ilp_env_t {
545 blocksched_env_t env;
546 ilp_edge_t *ilpedges;
548 } blocksched_ilp_env_t;
550 typedef struct _blocksched_ilp_entry_t {
552 struct _blocksched_entry_t *next;
553 struct _blocksched_entry_t *prev;
556 } blocksched_ilp_entry_t;
558 static int add_ilp_edge(ir_node *block, int pos, double execfreq, blocksched_ilp_env_t *env)
562 int edgeidx = ARR_LEN(env->ilpedges);
564 snprintf(name, sizeof(name), "edge%d", edgeidx);
568 edge.ilpvar = lpp_add_var_default(env->lpp, name, lpp_binary, execfreq, 1.0);
570 ARR_APP1(ilp_edge_t, env->ilpedges, edge);
574 static void collect_egde_frequency_ilp(ir_node *block, void *data)
576 blocksched_ilp_env_t *env = data;
577 ir_graph *irg = env->env.irg;
578 ir_node *startblock = get_irg_start_block(irg);
583 blocksched_ilp_entry_t *entry;
585 snprintf(name, sizeof(name), "block_out_constr_%ld", get_irn_node_nr(block));
586 out_count = get_irn_n_edges_kind(block, EDGE_KIND_BLOCK);
588 entry = obstack_alloc(env->env.obst, sizeof(entry[0]));
589 entry->block = block;
592 entry->out_cst = lpp_add_cst_uniq(env->lpp, name, lpp_greater, out_count - 1);
593 set_irn_link(block, entry);
595 if (block == startblock)
598 arity = get_irn_arity(block);
600 double execfreq = get_block_execfreq(env->env.execfreqs, block);
601 add_ilp_edge(block, 0, execfreq, env);
606 snprintf(name, sizeof(name), "block_in_constr_%ld", get_irn_node_nr(block));
607 cst = lpp_add_cst_uniq(env->lpp, name, lpp_greater, arity - 1);
609 for (i = 0; i < arity; ++i) {
613 ir_node *pred_block = get_Block_cfgpred_block(block, i);
615 execfreq = get_block_execfreq(env->env.execfreqs, pred_block);
616 edgenum = add_ilp_edge(block, i, execfreq, env);
617 edge = &env->ilpedges[edgenum];
618 lpp_set_factor_fast(env->lpp, cst, edge->ilpvar, 1.0);
624 static void coalesce_blocks_ilp(blocksched_ilp_env_t *env)
627 int edge_count = ARR_LEN(env->ilpedges);
629 /* complete out constraints */
630 for(i = 0; i < edge_count; ++i) {
631 const ilp_edge_t *edge = &env->ilpedges[i];
632 ir_node *block = edge->block;
634 blocksched_ilp_entry_t *entry;
636 /* the block might have been removed already... */
637 if (is_Bad(get_Block_cfgpred(block, 0)))
640 pred = get_Block_cfgpred_block(block, edge->pos);
641 entry = get_irn_link(pred);
643 DB((dbg, LEVEL_1, "Adding out cst to %+F from %+F,%d\n",
644 pred, block, edge->pos));
645 lpp_set_factor_fast(env->lpp, entry->out_cst, edge->ilpvar, 1.0);
652 lpp_dump(env->lpp, "lpp.out");
653 snprintf(fname, sizeof(fname), "lpp_%s.plain", get_irg_dump_name(env->env.irg));
654 f = fopen(fname, "w");
655 lpp_dump_plain(env->lpp, f);
660 //lpp_solve_net(env->lpp, main_env->options->ilp_server, main_env->options->ilp_solver);
661 lpp_solve_net(env->lpp, "i44pc52", "cplex");
662 assert(lpp_is_sol_valid(env->lpp));
664 /* Apply results to edges */
665 for (i = 0; i < edge_count; ++i) {
666 const ilp_edge_t *edge = &env->ilpedges[i];
667 ir_node *block = edge->block;
670 blocksched_entry_t *entry;
671 blocksched_entry_t *pred_entry;
673 /* the block might have been removed already... */
674 if (is_Bad(get_Block_cfgpred(block, 0)))
677 is_jump = (int)lpp_get_var_sol(env->lpp, edge->ilpvar);
681 pred = get_Block_cfgpred_block(block, edge->pos);
682 entry = get_irn_link(block);
683 pred_entry = get_irn_link(pred);
685 assert(entry->prev == NULL && pred_entry->next == NULL);
686 entry->prev = pred_entry;
687 pred_entry->next = entry;
691 static ir_node **create_block_schedule_ilp(ir_graph *irg, ir_exec_freq *execfreqs)
693 blocksched_ilp_env_t env;
695 blocksched_entry_t *start_entry;
696 ir_node **block_list;
701 env.env.obst = &obst;
702 env.env.execfreqs = execfreqs;
703 env.env.worklist = NULL;
704 env.env.blockcount = 0;
705 env.ilpedges = NEW_ARR_F(ilp_edge_t, 0);
707 env.lpp = new_lpp("blockschedule", lpp_minimize);
708 lpp_set_time_limit(env.lpp, 20);
709 lpp_set_log(env.lpp, stdout);
711 irg_block_walk_graph(irg, collect_egde_frequency_ilp, NULL, &env);
713 (void)be_remove_empty_blocks(irg);
714 coalesce_blocks_ilp(&env);
716 start_entry = finish_block_schedule(&env.env);
717 block_list = create_blocksched_array(&env.env, start_entry, env.env.blockcount, get_irg_obstack(irg));
719 DEL_ARR_F(env.ilpedges);
721 obstack_free(&obst, NULL);
725 #endif /* WITH_ILP */
730 * | |\/| |/ _` | | '_ \
731 * | | | | (_| | | | | |
732 * |_| |_|\__,_|_|_| |_|
735 void be_init_blocksched(void)
737 lc_opt_entry_t *be_grp = lc_opt_get_grp(firm_opt_get_root(), "be");
738 lc_opt_entry_t *blocksched_grp = lc_opt_get_grp(be_grp, "blocksched");
740 lc_opt_add_table(blocksched_grp, be_blocksched_options);
742 FIRM_DBG_REGISTER(dbg, "firm.be.blocksched");
745 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_blocksched);
747 ir_node **be_create_block_schedule(ir_graph *irg, ir_exec_freq *execfreqs)
750 case BLOCKSCHED_GREEDY:
751 case BLOCKSCHED_NAIV:
752 return create_block_schedule_greedy(irg, execfreqs);
755 return create_block_schedule_ilp(irg, execfreqs);
756 #endif /* WITH_ILP */
759 panic("unknown blocksched algo");