2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Block-scheduling strategies.
23 * @author Matthias Braun, Christoph Mallon
27 * The goals of the greedy (and ILP) algorithm here works by assuming that
28 * we want to change as many jumps to fallthroughs as possible (executed jumps
29 * actually, we have to look at the execution frequencies). The algorithms
30 * do this by collecting execution frequencies of all branches (which is easily
31 * possible when all critical edges are split) then removes critical edges where
32 * possible as we don't need and want them anymore now. The algorithms then try
33 * to change as many edges to fallthroughs as possible, this is done by setting
34 * a next and prev pointers on blocks. The greedy algorithm sorts the edges by
35 * execution frequencies and tries to transform them to fallthroughs in this order
39 #include "beblocksched.h"
49 #include "irgraph_t.h"
62 #include "lc_opts_enum.h"
66 #include <lpp/lpp_net.h>
69 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
71 typedef enum _blocksched_algos_t {
72 BLOCKSCHED_NAIV, BLOCKSCHED_EXTBB, BLOCKSCHED_GREEDY, BLOCKSCHED_ILP
75 static int algo = BLOCKSCHED_GREEDY;
77 static const lc_opt_enum_int_items_t blockschedalgo_items[] = {
78 { "naiv", BLOCKSCHED_NAIV },
79 { "extbb", BLOCKSCHED_EXTBB },
80 { "greedy", BLOCKSCHED_GREEDY },
82 { "ilp", BLOCKSCHED_ILP },
87 static lc_opt_enum_int_var_t algo_var = {
88 &algo, blockschedalgo_items
91 static const lc_opt_table_entry_t be_blocksched_options[] = {
92 LC_OPT_ENT_ENUM_INT ("algo", "the block scheduling algorithm", &algo_var),
98 * / ___|_ __ ___ ___ __| |_ _
99 * | | _| '__/ _ \/ _ \/ _` | | | |
100 * | |_| | | | __/ __/ (_| | |_| |
101 * \____|_| \___|\___|\__,_|\__, |
105 typedef struct _blocksched_entry_t {
107 struct _blocksched_entry_t *next;
108 struct _blocksched_entry_t *prev;
109 } blocksched_entry_t;
111 typedef struct _edge_t {
112 ir_node *block; /**< source block */
113 int pos; /**< number of cfg predecessor (target) */
114 double execfreq; /**< the frequency */
115 int highest_execfreq; /**< flag that indicates whether this edge is the edge with the highest
116 execfreq pointing away from this block */
119 typedef struct _blocksched_env_t {
121 struct obstack *obst;
122 ir_exec_freq *execfreqs;
129 * Collect cfg frequencies of all edges between blocks.
130 * Also determines edge with highest frequency.
132 static void collect_egde_frequency(ir_node *block, void *data)
134 blocksched_env_t *env = data;
137 blocksched_entry_t *entry;
139 entry = obstack_alloc(env->obst, sizeof(entry[0]));
140 entry->block = block;
143 set_irn_link(block, entry);
145 arity = get_Block_n_cfgpreds(block);
148 assert(block == get_irg_start_block(env->irg)
149 || block == get_irg_end_block(env->irg));
150 /* must be the start block (or end-block for endless loops), nothing to
153 } else if (arity == 1) {
156 edge.execfreq = get_block_execfreq(env->execfreqs, block);
157 edge.highest_execfreq = 1;
158 ARR_APP1(edge_t, env->edges, edge);
161 double highest_execfreq = -1.0;
162 int highest_edge_num = -1;
165 for (i = 0; i < arity; ++i) {
167 ir_node *pred_block = get_Block_cfgpred_block(block, i);
169 execfreq = get_block_execfreq(env->execfreqs, pred_block);
172 edge.execfreq = execfreq;
173 edge.highest_execfreq = 0;
174 ARR_APP1(edge_t, env->edges, edge);
176 if (execfreq > highest_execfreq) {
177 highest_execfreq = execfreq;
178 highest_edge_num = ARR_LEN(env->edges) - 1;
182 if(highest_edge_num >= 0)
183 env->edges[highest_edge_num].highest_execfreq = 1;
187 static int cmp_edges(const void *d1, const void *d2)
189 const edge_t *e1 = d1;
190 const edge_t *e2 = d2;
192 return QSORT_CMP(e2->execfreq, e1->execfreq);
195 static void coalesce_blocks(blocksched_env_t *env)
198 int edge_count = ARR_LEN(env->edges);
200 /* run1: only look at jumps */
201 for (i = 0; i < edge_count; ++i) {
202 const edge_t *edge = &env->edges[i];
203 ir_node *block = edge->block;
206 blocksched_entry_t *entry, *pred_entry;
208 /* only check edge with highest frequency */
209 if (! edge->highest_execfreq)
212 /* the block might have been removed already... */
213 if (is_Bad(get_Block_cfgpred(block, 0)))
216 pred_block = get_Block_cfgpred_block(block, pos);
217 entry = get_irn_link(block);
218 pred_entry = get_irn_link(pred_block);
220 if (pred_entry->next != NULL || entry->prev != NULL)
223 /* only coalesce jumps */
224 if (get_block_succ_next(pred_block, get_block_succ_first(pred_block)) != NULL)
227 /* schedule the 2 blocks behind each other */
228 DBG((dbg, LEVEL_1, "Coalesce (Jump) %+F -> %+F (%.3g)\n",
229 pred_entry->block, entry->block, edge->execfreq));
230 pred_entry->next = entry;
231 entry->prev = pred_entry;
234 /* run2: remaining edges */
235 for (i = 0; i < edge_count; ++i) {
236 const edge_t *edge = &env->edges[i];
237 ir_node *block = edge->block;
240 blocksched_entry_t *entry, *pred_entry;
242 /* the block might have been removed already... */
243 if (is_Bad(get_Block_cfgpred(block, 0)))
246 /* we can't do fallthroughs in backedges */
247 if (is_backedge(block, pos))
250 pred_block = get_Block_cfgpred_block(block, pos);
251 entry = get_irn_link(block);
252 pred_entry = get_irn_link(pred_block);
254 /* is 1 of the blocks already attached to another block? */
255 if (pred_entry->next != NULL || entry->prev != NULL)
258 /* schedule the 2 blocks behind each other */
259 DBG((dbg, LEVEL_1, "Coalesce (CondJump) %+F -> %+F (%.3g)\n",
260 pred_entry->block, entry->block, edge->execfreq));
261 pred_entry->next = entry;
262 entry->prev = pred_entry;
266 static void pick_block_successor(blocksched_entry_t *entry, blocksched_env_t *env)
268 ir_node *block = entry->block;
269 ir_node *succ = NULL;
270 blocksched_entry_t *succ_entry;
271 const ir_edge_t *edge;
272 double best_succ_execfreq;
274 if (irn_visited_else_mark(block))
279 DBG((dbg, LEVEL_1, "Pick succ of %+F\n", block));
281 /* put all successors into the worklist */
282 foreach_block_succ(block, edge) {
283 ir_node *succ_block = get_edge_src_irn(edge);
285 if (irn_visited(succ_block))
288 /* we only need to put the first of a series of already connected
289 * blocks into the worklist */
290 succ_entry = get_irn_link(succ_block);
291 while (succ_entry->prev != NULL) {
292 /* break cycles... */
293 if (succ_entry->prev->block == succ_block) {
294 succ_entry->prev->next = NULL;
295 succ_entry->prev = NULL;
298 succ_entry = succ_entry->prev;
301 if (irn_visited(succ_entry->block))
304 DBG((dbg, LEVEL_1, "Put %+F into worklist\n", succ_entry->block));
305 pdeq_putr(env->worklist, succ_entry->block);
308 if (entry->next != NULL) {
309 pick_block_successor(entry->next, env);
313 DBG((dbg, LEVEL_1, "deciding...\n"));
314 best_succ_execfreq = -1;
316 /* no successor yet: pick the successor block with the highest execution
317 * frequency which has no predecessor yet */
319 foreach_block_succ(block, edge) {
320 ir_node *succ_block = get_edge_src_irn(edge);
323 if (irn_visited(succ_block))
326 succ_entry = get_irn_link(succ_block);
327 if (succ_entry->prev != NULL)
330 execfreq = get_block_execfreq(env->execfreqs, succ_block);
331 if (execfreq > best_succ_execfreq) {
332 best_succ_execfreq = execfreq;
338 DBG((dbg, LEVEL_1, "pick from worklist\n"));
341 if (pdeq_empty(env->worklist)) {
342 DBG((dbg, LEVEL_1, "worklist empty\n"));
345 succ = pdeq_getl(env->worklist);
346 } while (irn_visited(succ));
349 succ_entry = get_irn_link(succ);
350 entry->next = succ_entry;
351 succ_entry->prev = entry;
353 pick_block_successor(succ_entry, env);
356 static blocksched_entry_t *finish_block_schedule(blocksched_env_t *env)
358 ir_graph *irg = env->irg;
359 ir_node *startblock = get_irg_start_block(irg);
360 blocksched_entry_t *entry = get_irn_link(startblock);
362 ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED);
363 inc_irg_visited(irg);
365 env->worklist = new_pdeq();
366 pick_block_successor(entry, env);
367 assert(pdeq_empty(env->worklist));
368 del_pdeq(env->worklist);
370 ir_free_resources(irg, IR_RESOURCE_IRN_VISITED);
375 static ir_node **create_blocksched_array(blocksched_env_t *env, blocksched_entry_t *first,
376 int count, struct obstack* obst)
379 ir_node **block_list;
380 blocksched_entry_t *entry;
383 block_list = NEW_ARR_D(ir_node *, obst, count);
384 DBG((dbg, LEVEL_1, "Blockschedule:\n"));
386 for (entry = first; entry != NULL; entry = entry->next) {
388 block_list[i++] = entry->block;
389 DBG((dbg, LEVEL_1, "\t%+F\n", entry->block));
396 static ir_node **create_block_schedule_greedy(ir_graph *irg, ir_exec_freq *execfreqs)
398 blocksched_env_t env;
400 blocksched_entry_t *start_entry;
401 ir_node **block_list;
407 env.execfreqs = execfreqs;
408 env.edges = NEW_ARR_F(edge_t, 0);
412 // collect edge execution frequencies
413 irg_block_walk_graph(irg, collect_egde_frequency, NULL, &env);
415 // sort interblock edges by execution frequency
416 qsort(env.edges, ARR_LEN(env.edges), sizeof(env.edges[0]), cmp_edges);
418 (void)be_remove_empty_blocks(irg);
420 if (algo != BLOCKSCHED_NAIV)
421 coalesce_blocks(&env);
423 start_entry = finish_block_schedule(&env);
424 block_list = create_blocksched_array(&env, start_entry, env.blockcount, get_irg_obstack(irg));
426 DEL_ARR_F(env.edges);
427 obstack_free(&obst, NULL);
442 typedef struct _ilp_edge_t {
443 ir_node *block; /**< source block */
444 int pos; /**< number of cfg predecessor (target) */
448 typedef struct _blocksched_ilp_env_t {
449 blocksched_env_t env;
450 ilp_edge_t *ilpedges;
452 } blocksched_ilp_env_t;
454 typedef struct _blocksched_ilp_entry_t {
456 struct _blocksched_entry_t *next;
457 struct _blocksched_entry_t *prev;
460 } blocksched_ilp_entry_t;
462 static int add_ilp_edge(ir_node *block, int pos, double execfreq, blocksched_ilp_env_t *env)
466 int edgeidx = ARR_LEN(env->ilpedges);
468 snprintf(name, sizeof(name), "edge%d", edgeidx);
472 edge.ilpvar = lpp_add_var_default(env->lpp, name, lpp_binary, execfreq, 1.0);
474 ARR_APP1(ilp_edge_t, env->ilpedges, edge);
478 static void collect_egde_frequency_ilp(ir_node *block, void *data)
480 blocksched_ilp_env_t *env = data;
481 ir_graph *irg = env->env.irg;
482 ir_node *startblock = get_irg_start_block(irg);
487 blocksched_ilp_entry_t *entry;
489 snprintf(name, sizeof(name), "block_out_constr_%ld", get_irn_node_nr(block));
490 out_count = get_irn_n_edges_kind(block, EDGE_KIND_BLOCK);
492 entry = obstack_alloc(env->env.obst, sizeof(entry[0]));
493 entry->block = block;
496 entry->out_cst = lpp_add_cst_uniq(env->lpp, name, lpp_greater, out_count - 1);
497 set_irn_link(block, entry);
499 if (block == startblock)
502 arity = get_irn_arity(block);
504 double execfreq = get_block_execfreq(env->env.execfreqs, block);
505 add_ilp_edge(block, 0, execfreq, env);
509 int *edgenums = alloca(sizeof(edgenums[0]) * arity);
511 snprintf(name, sizeof(name), "block_in_constr_%ld", get_irn_node_nr(block));
512 cst = lpp_add_cst_uniq(env->lpp, name, lpp_greater, arity - 1);
514 for (i = 0; i < arity; ++i) {
518 ir_node *pred_block = get_Block_cfgpred_block(block, i);
520 execfreq = get_block_execfreq(env->env.execfreqs, pred_block);
521 edgenum = add_ilp_edge(block, i, execfreq, env);
522 edge = &env->ilpedges[edgenum];
523 lpp_set_factor_fast(env->lpp, cst, edge->ilpvar, 1.0);
529 static void coalesce_blocks_ilp(blocksched_ilp_env_t *env)
532 int edge_count = ARR_LEN(env->ilpedges);
534 /* complete out constraints */
535 for(i = 0; i < edge_count; ++i) {
536 const ilp_edge_t *edge = &env->ilpedges[i];
537 ir_node *block = edge->block;
539 blocksched_ilp_entry_t *entry;
541 /* the block might have been removed already... */
542 if (is_Bad(get_Block_cfgpred(block, 0)))
545 pred = get_Block_cfgpred_block(block, edge->pos);
546 entry = get_irn_link(pred);
548 DBG((dbg, LEVEL_1, "Adding out cst to %+F from %+F,%d\n",
549 pred, block, edge->pos));
550 lpp_set_factor_fast(env->lpp, entry->out_cst, edge->ilpvar, 1.0);
557 lpp_dump(env->lpp, "lpp.out");
558 snprintf(fname, sizeof(fname), "lpp_%s.plain", get_irg_dump_name(env->env.irg));
559 f = fopen(fname, "w");
560 lpp_dump_plain(env->lpp, f);
565 //lpp_solve_net(env->lpp, main_env->options->ilp_server, main_env->options->ilp_solver);
566 lpp_solve_net(env->lpp, "i44pc52", "cplex");
567 assert(lpp_is_sol_valid(env->lpp));
569 /* Apply results to edges */
570 for (i = 0; i < edge_count; ++i) {
571 const ilp_edge_t *edge = &env->ilpedges[i];
572 ir_node *block = edge->block;
575 blocksched_entry_t *entry;
576 blocksched_entry_t *pred_entry;
578 /* the block might have been removed already... */
579 if (is_Bad(get_Block_cfgpred(block, 0)))
582 is_jump = (int)lpp_get_var_sol(env->lpp, edge->ilpvar);
586 pred = get_Block_cfgpred_block(block, edge->pos);
587 entry = get_irn_link(block);
588 pred_entry = get_irn_link(pred);
590 assert(entry->prev == NULL && pred_entry->next == NULL);
591 entry->prev = pred_entry;
592 pred_entry->next = entry;
596 static ir_node **create_block_schedule_ilp(ir_graph *irg, ir_exec_freq *execfreqs)
598 blocksched_ilp_env_t env;
600 blocksched_entry_t *start_entry;
601 ir_node **block_list;
606 env.env.obst = &obst;
607 env.env.execfreqs = execfreqs;
608 env.env.worklist = NULL;
609 env.env.blockcount = 0;
610 env.ilpedges = NEW_ARR_F(ilp_edge_t, 0);
612 env.lpp = new_lpp("blockschedule", lpp_minimize);
613 lpp_set_time_limit(env.lpp, 20);
614 lpp_set_log(env.lpp, stdout);
616 irg_block_walk_graph(irg, collect_egde_frequency_ilp, NULL, &env);
618 (void)be_remove_empty_blocks(irg);
619 coalesce_blocks_ilp(&env);
621 start_entry = finish_block_schedule(&env.env);
622 block_list = create_blocksched_array(&env.env, start_entry, env.env.blockcount, get_irg_obstack(irg));
624 DEL_ARR_F(env.ilpedges);
626 obstack_free(&obst, NULL);
630 #endif /* WITH_ILP */
634 * | ____|_ _| |_| __ )| __ )
635 * | _| \ \/ / __| _ \| _ \
636 * | |___ > <| |_| |_) | |_) |
637 * |_____/_/\_\\__|____/|____/
641 /** A simple forward single linked list. */
643 ir_node *start; /**< start of the list */
644 ir_node *end; /**< last block in the list */
645 unsigned n_blks; /**< number of blocks in the list */
648 static void add_block(anchor *list, ir_node *block) {
649 if (list->start == NULL) {
653 set_irn_link(list->end, block);
660 static void create_block_list(ir_node *leader_block, anchor *list) {
662 const ir_edge_t *edge;
663 ir_node *block = NULL;
664 ir_extblk *extbb = get_Block_extbb(leader_block);
666 if (extbb_visited(extbb))
668 mark_extbb_visited(extbb);
670 for (i = 0; i < get_extbb_n_blocks(extbb); ++i) {
671 block = get_extbb_block(extbb, i);
672 add_block(list, block);
675 assert(block != NULL);
677 /* pick successor extbbs */
678 foreach_block_succ(block, edge) {
679 ir_node *succ = get_edge_src_irn(edge);
680 create_block_list(succ, list);
683 for (i = 0; i < get_extbb_n_blocks(extbb) - 1; ++i) {
684 block = get_extbb_block(extbb, i);
686 foreach_block_succ(block, edge) {
687 ir_node *succ = get_edge_src_irn(edge);
688 create_block_list(succ, list);
693 void compute_extbb_execfreqs(ir_graph *irg, ir_exec_freq *execfreqs);
696 * Calculates a block schedule. The schedule is stored as a linked
697 * list starting at the start_block of the irg.
699 static ir_node **create_extbb_block_schedule(ir_graph *irg, ir_exec_freq *execfreqs)
702 ir_node **blk_list, *b, *n;
705 /* schedule extended basic blocks */
706 compute_extbb_execfreqs(irg, execfreqs);
707 //compute_extbb(irg);
713 ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED | IR_RESOURCE_IRN_LINK);
714 inc_irg_block_visited(irg);
716 create_block_list(get_irg_start_block(irg), &list);
718 /** create an array, so we can go forward and backward */
719 blk_list = NEW_ARR_D(ir_node *, irg->obst,list.n_blks);
721 for (i = 0, b = list.start; b; b = n, ++i) {
726 ir_free_resources(irg, IR_RESOURCE_IRN_VISITED | IR_RESOURCE_IRN_LINK);
734 * | |\/| |/ _` | | '_ \
735 * | | | | (_| | | | | |
736 * |_| |_|\__,_|_|_| |_|
739 void be_init_blocksched(void)
741 lc_opt_entry_t *be_grp = lc_opt_get_grp(firm_opt_get_root(), "be");
742 lc_opt_entry_t *blocksched_grp = lc_opt_get_grp(be_grp, "blocksched");
744 lc_opt_add_table(blocksched_grp, be_blocksched_options);
746 FIRM_DBG_REGISTER(dbg, "firm.be.blocksched");
749 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_blocksched);
751 ir_node **be_create_block_schedule(ir_graph *irg, ir_exec_freq *execfreqs)
754 case BLOCKSCHED_GREEDY:
755 case BLOCKSCHED_NAIV:
756 return create_block_schedule_greedy(irg, execfreqs);
757 case BLOCKSCHED_EXTBB:
758 return create_extbb_block_schedule(irg, execfreqs);
761 return create_block_schedule_ilp(irg, execfreqs);
762 #endif /* WITH_ILP */
765 panic("unknown blocksched algo");