2 * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Block-scheduling strategies.
23 * @author Matthias Braun, Christoph Mallon
31 #include "beblocksched.h"
41 #include "irgraph_t.h"
54 #include <lpp/lpp_net.h>
57 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
59 typedef enum _blocksched_algos_t {
60 BLOCKSCHED_NAIV, BLOCKSCHED_EXTBB, BLOCKSCHED_GREEDY, BLOCKSCHED_ILP
63 static int algo = BLOCKSCHED_GREEDY;
68 #include <libcore/lc_opts.h>
69 #include <libcore/lc_opts_enum.h>
70 #include <libcore/lc_timing.h>
72 static const lc_opt_enum_int_items_t blockschedalgo_items[] = {
73 { "naiv", BLOCKSCHED_NAIV },
74 { "extbb", BLOCKSCHED_EXTBB },
75 { "greedy", BLOCKSCHED_GREEDY },
77 { "ilp", BLOCKSCHED_ILP },
82 static lc_opt_enum_int_var_t algo_var = {
83 &algo, blockschedalgo_items
86 static const lc_opt_table_entry_t be_blocksched_options[] = {
87 LC_OPT_ENT_ENUM_INT ("algo", "the block scheduling algorithm", &algo_var),
90 #endif /* WITH_LIBCORE */
94 * / ___|_ __ ___ ___ __| |_ _
95 * | | _| '__/ _ \/ _ \/ _` | | | |
96 * | |_| | | | __/ __/ (_| | |_| |
97 * \____|_| \___|\___|\__,_|\__, |
101 typedef struct _blocksched_entry_t {
103 struct _blocksched_entry_t *next;
104 struct _blocksched_entry_t *prev;
105 } blocksched_entry_t;
107 typedef struct _edge_t {
108 ir_node *block; /**< source block */
109 int pos; /**< number of cfg predecessor (target) */
110 double execfreq; /**< the frequency */
111 int highest_execfreq; /**< flag that indicates whether this edge is the edge with the highest
112 execfreq pointing away from this block */
115 typedef struct _blocksched_env_t {
117 struct obstack *obst;
118 ir_exec_freq *execfreqs;
125 * Collect cfg frequencies of all edges between blocks.
126 * Also determines edge with highest frequency.
128 static void collect_egde_frequency(ir_node *block, void *data)
130 blocksched_env_t *env = data;
133 blocksched_entry_t *entry;
135 entry = obstack_alloc(env->obst, sizeof(entry[0]));
136 entry->block = block;
139 set_irn_link(block, entry);
141 if (block == get_irg_start_block(env->irg))
144 arity = get_Block_n_cfgpreds(block);
149 edge.execfreq = get_block_execfreq(env->execfreqs, block);
150 edge.highest_execfreq = 1;
151 ARR_APP1(edge_t, env->edges, edge);
154 double highest_execfreq = -1.0;
155 int highest_edge_num = -1;
158 for (i = 0; i < arity; ++i) {
160 ir_node *pred_block = get_Block_cfgpred_block(block, i);
162 execfreq = get_block_execfreq(env->execfreqs, pred_block);
165 edge.execfreq = execfreq;
166 edge.highest_execfreq = 0;
167 ARR_APP1(edge_t, env->edges, edge);
169 if (execfreq > highest_execfreq) {
170 highest_execfreq = execfreq;
171 highest_edge_num = ARR_LEN(env->edges) - 1;
175 if(highest_edge_num >= 0)
176 env->edges[highest_edge_num].highest_execfreq = 1;
180 static int cmp_edges(const void *d1, const void *d2)
182 const edge_t *e1 = d1;
183 const edge_t *e2 = d2;
185 return QSORT_CMP(e2->execfreq, e1->execfreq);
188 static void coalesce_blocks(blocksched_env_t *env)
191 int edge_count = ARR_LEN(env->edges);
193 /* run1: only look at jumps */
194 for (i = 0; i < edge_count; ++i) {
195 const edge_t *edge = &env->edges[i];
196 ir_node *block = edge->block;
198 blocksched_entry_t *entry, *pred_entry;
200 /* the block might have been removed already... */
201 if (is_Bad(get_Block_cfgpred(block, 0)))
204 /* only check edge with highest frequency */
205 if (! edge->highest_execfreq)
208 pred_block = get_Block_cfgpred_block(block, edge->pos);
209 entry = get_irn_link(block);
210 pred_entry = get_irn_link(pred_block);
212 if (pred_entry->next != NULL || entry->prev != NULL)
215 /* only coalesce jumps */
216 if (get_block_succ_next(pred_block, get_block_succ_first(pred_block)) != NULL)
219 /* schedule the 2 blocks behind each other */
220 DBG((dbg, LEVEL_1, "Coalesce (Jump) %+F -> %+F (%.3g)\n",
221 pred_entry->block, entry->block, edge->execfreq));
222 pred_entry->next = entry;
223 entry->prev = pred_entry;
226 /* run2: remaining edges */
227 for (i = 0; i < edge_count; ++i) {
228 const edge_t *edge = &env->edges[i];
229 ir_node *block = edge->block;
231 blocksched_entry_t *entry, *pred_entry;
233 /* the block might have been removed already... */
234 if (is_Bad(get_Block_cfgpred(block, 0)))
237 pred_block = get_Block_cfgpred_block(block, edge->pos);
238 entry = get_irn_link(block);
239 pred_entry = get_irn_link(pred_block);
241 /* is 1 of the blocks already attached to another block? */
242 if (pred_entry->next != NULL || entry->prev != NULL)
245 /* schedule the 2 blocks behind each other */
246 DBG((dbg, LEVEL_1, "Coalesce (CondJump) %+F -> %+F (%.3g)\n",
247 pred_entry->block, entry->block, edge->execfreq));
248 pred_entry->next = entry;
249 entry->prev = pred_entry;
253 static void pick_block_successor(blocksched_entry_t *entry, blocksched_env_t *env)
255 ir_node *block = entry->block;
256 ir_node *succ = NULL;
257 blocksched_entry_t *succ_entry;
258 const ir_edge_t *edge;
259 double best_succ_execfreq;
261 if (irn_visited(block))
265 mark_irn_visited(block);
267 DBG((dbg, LEVEL_1, "Pick succ of %+F\n", block));
269 /* put all successors into the worklist */
270 foreach_block_succ(block, edge) {
271 ir_node *succ_block = get_edge_src_irn(edge);
273 if (irn_visited(succ_block))
276 /* we only need to put the first of a series of already connected
277 * blocks into the worklist */
278 succ_entry = get_irn_link(succ_block);
279 while (succ_entry->prev != NULL) {
280 /* break cycles... */
281 if (succ_entry->prev->block == succ_block) {
282 succ_entry->prev->next = NULL;
283 succ_entry->prev = NULL;
286 succ_entry = succ_entry->prev;
289 if (irn_visited(succ_entry->block))
292 DBG((dbg, LEVEL_1, "Put %+F into worklist\n", succ_entry->block));
293 pdeq_putr(env->worklist, succ_entry->block);
296 if (entry->next != NULL) {
297 pick_block_successor(entry->next, env);
301 DBG((dbg, LEVEL_1, "deciding...\n"));
302 best_succ_execfreq = -1;
304 /* no successor yet: pick the successor block with the highest execution
305 * frequency which has no predecessor yet */
307 foreach_block_succ(block, edge) {
308 ir_node *succ_block = get_edge_src_irn(edge);
311 if (irn_visited(succ_block))
314 succ_entry = get_irn_link(succ_block);
315 if (succ_entry->prev != NULL)
318 execfreq = get_block_execfreq(env->execfreqs, succ_block);
319 if (execfreq > best_succ_execfreq) {
320 best_succ_execfreq = execfreq;
326 DBG((dbg, LEVEL_1, "pick from worklist\n"));
329 if (pdeq_empty(env->worklist)) {
330 DBG((dbg, LEVEL_1, "worklist empty\n"));
333 succ = pdeq_getl(env->worklist);
334 } while (irn_visited(succ));
337 succ_entry = get_irn_link(succ);
338 entry->next = succ_entry;
339 succ_entry->prev = entry;
341 pick_block_successor(succ_entry, env);
344 static blocksched_entry_t *finish_block_schedule(blocksched_env_t *env)
346 ir_graph *irg = env->irg;
347 ir_node *startblock = get_irg_start_block(irg);
348 blocksched_entry_t *entry = get_irn_link(startblock);
350 set_using_visited(irg);
351 inc_irg_visited(irg);
353 env->worklist = new_pdeq();
354 pick_block_successor(entry, env);
355 assert(pdeq_empty(env->worklist));
356 del_pdeq(env->worklist);
358 clear_using_visited(irg);
363 static ir_node **create_blocksched_array(blocksched_env_t *env, blocksched_entry_t *first,
364 int count, struct obstack* obst)
367 ir_node **block_list;
368 blocksched_entry_t *entry;
371 block_list = NEW_ARR_D(ir_node *, obst, count);
372 DBG((dbg, LEVEL_1, "Blockschedule:\n"));
374 for (entry = first; entry != NULL; entry = entry->next) {
376 block_list[i++] = entry->block;
377 DBG((dbg, LEVEL_1, "\t%+F\n", entry->block));
384 static ir_node **create_block_schedule_greedy(ir_graph *irg, ir_exec_freq *execfreqs)
386 blocksched_env_t env;
388 blocksched_entry_t *start_entry;
389 ir_node **block_list;
395 env.execfreqs = execfreqs;
396 env.edges = NEW_ARR_F(edge_t, 0);
400 // collect edge execution frequencies
401 irg_block_walk_graph(irg, collect_egde_frequency, NULL, &env);
403 // sort interblock edges by execution frequency
404 qsort(env.edges, ARR_LEN(env.edges), sizeof(env.edges[0]), cmp_edges);
406 (void)be_remove_empty_blocks(irg);
408 if (algo != BLOCKSCHED_NAIV)
409 coalesce_blocks(&env);
411 start_entry = finish_block_schedule(&env);
412 block_list = create_blocksched_array(&env, start_entry, env.blockcount, get_irg_obstack(irg));
414 DEL_ARR_F(env.edges);
415 obstack_free(&obst, NULL);
430 typedef struct _ilp_edge_t {
431 ir_node *block; /**< source block */
432 int pos; /**< number of cfg predecessor (target) */
436 typedef struct _blocksched_ilp_env_t {
437 blocksched_env_t env;
438 ilp_edge_t *ilpedges;
440 } blocksched_ilp_env_t;
442 typedef struct _blocksched_ilp_entry_t {
444 struct _blocksched_entry_t *next;
445 struct _blocksched_entry_t *prev;
448 } blocksched_ilp_entry_t;
450 static int add_ilp_edge(ir_node *block, int pos, double execfreq, blocksched_ilp_env_t *env)
454 int edgeidx = ARR_LEN(env->ilpedges);
456 snprintf(name, sizeof(name), "edge%d", edgeidx);
460 edge.ilpvar = lpp_add_var_default(env->lpp, name, lpp_binary, execfreq, 1.0);
462 ARR_APP1(ilp_edge_t, env->ilpedges, edge);
466 static void collect_egde_frequency_ilp(ir_node *block, void *data)
468 blocksched_ilp_env_t *env = data;
469 ir_graph *irg = env->env.irg;
470 ir_node *startblock = get_irg_start_block(irg);
475 blocksched_ilp_entry_t *entry;
477 snprintf(name, sizeof(name), "block_out_constr_%ld", get_irn_node_nr(block));
478 out_count = get_irn_n_edges_kind(block, EDGE_KIND_BLOCK);
480 entry = obstack_alloc(env->env.obst, sizeof(entry[0]));
481 entry->block = block;
484 entry->out_cst = lpp_add_cst_uniq(env->lpp, name, lpp_greater, out_count - 1);
485 set_irn_link(block, entry);
487 if (block == startblock)
490 arity = get_irn_arity(block);
492 double execfreq = get_block_execfreq(env->env.execfreqs, block);
493 add_ilp_edge(block, 0, execfreq, env);
497 int *edgenums = alloca(sizeof(edgenums[0]) * arity);
499 snprintf(name, sizeof(name), "block_in_constr_%ld", get_irn_node_nr(block));
500 cst = lpp_add_cst_uniq(env->lpp, name, lpp_greater, arity - 1);
502 for (i = 0; i < arity; ++i) {
506 ir_node *pred_block = get_Block_cfgpred_block(block, i);
508 execfreq = get_block_execfreq(env->env.execfreqs, pred_block);
509 edgenum = add_ilp_edge(block, i, execfreq, env);
510 edge = &env->ilpedges[edgenum];
511 lpp_set_factor_fast(env->lpp, cst, edge->ilpvar, 1.0);
517 static void coalesce_blocks_ilp(blocksched_ilp_env_t *env)
520 int edge_count = ARR_LEN(env->ilpedges);
522 /* complete out constraints */
523 for(i = 0; i < edge_count; ++i) {
524 const ilp_edge_t *edge = &env->ilpedges[i];
525 ir_node *block = edge->block;
527 blocksched_ilp_entry_t *entry;
529 /* the block might have been removed already... */
530 if (is_Bad(get_Block_cfgpred(block, 0)))
533 pred = get_Block_cfgpred_block(block, edge->pos);
534 entry = get_irn_link(pred);
536 DBG((dbg, LEVEL_1, "Adding out cst to %+F from %+F,%d\n",
537 pred, block, edge->pos));
538 lpp_set_factor_fast(env->lpp, entry->out_cst, edge->ilpvar, 1.0);
545 lpp_dump(env->lpp, "lpp.out");
546 snprintf(fname, sizeof(fname), "lpp_%s.plain", get_irg_dump_name(env->env.irg));
547 f = fopen(fname, "w");
548 lpp_dump_plain(env->lpp, f);
553 //lpp_solve_net(env->lpp, main_env->options->ilp_server, main_env->options->ilp_solver);
554 lpp_solve_net(env->lpp, "i44pc52", "cplex");
555 assert(lpp_is_sol_valid(env->lpp));
557 /* Apply results to edges */
558 for (i = 0; i < edge_count; ++i) {
559 const ilp_edge_t *edge = &env->ilpedges[i];
560 ir_node *block = edge->block;
563 blocksched_entry_t *entry;
564 blocksched_entry_t *pred_entry;
566 /* the block might have been removed already... */
567 if (is_Bad(get_Block_cfgpred(block, 0)))
570 is_jump = (int)lpp_get_var_sol(env->lpp, edge->ilpvar);
574 pred = get_Block_cfgpred_block(block, edge->pos);
575 entry = get_irn_link(block);
576 pred_entry = get_irn_link(pred);
578 assert(entry->prev == NULL && pred_entry->next == NULL);
579 entry->prev = pred_entry;
580 pred_entry->next = entry;
584 static ir_node **create_block_schedule_ilp(ir_graph *irg, ir_exec_freq *execfreqs)
586 blocksched_ilp_env_t env;
588 blocksched_entry_t *start_entry;
589 ir_node **block_list;
594 env.env.obst = &obst;
595 env.env.execfreqs = execfreqs;
596 env.env.worklist = NULL;
597 env.env.blockcount = 0;
598 env.ilpedges = NEW_ARR_F(ilp_edge_t, 0);
600 env.lpp = new_lpp("blockschedule", lpp_minimize);
601 lpp_set_time_limit(env.lpp, 20);
602 lpp_set_log(env.lpp, stdout);
604 irg_block_walk_graph(irg, collect_egde_frequency_ilp, NULL, &env);
606 (void)be_remove_empty_blocks(irg);
607 coalesce_blocks_ilp(&env);
609 start_entry = finish_block_schedule(&env.env);
610 block_list = create_blocksched_array(&env.env, start_entry, env.env.blockcount, get_irg_obstack(irg));
612 DEL_ARR_F(env.ilpedges);
614 obstack_free(&obst, NULL);
618 #endif /* WITH_ILP */
622 * | ____|_ _| |_| __ )| __ )
623 * | _| \ \/ / __| _ \| _ \
624 * | |___ > <| |_| |_) | |_) |
625 * |_____/_/\_\\__|____/|____/
629 /** A simple forward single linked list. */
631 ir_node *start; /**< start of the list */
632 ir_node *end; /**< last block in the list */
633 unsigned n_blks; /**< number of blocks in the list */
636 static void add_block(anchor *list, ir_node *block) {
637 if (list->start == NULL) {
641 set_irn_link(list->end, block);
648 static void create_block_list(ir_node *leader_block, anchor *list) {
650 const ir_edge_t *edge;
651 ir_node *block = NULL;
652 ir_extblk *extbb = get_Block_extbb(leader_block);
654 if (extbb_visited(extbb))
656 mark_extbb_visited(extbb);
658 for (i = 0; i < get_extbb_n_blocks(extbb); ++i) {
659 block = get_extbb_block(extbb, i);
660 add_block(list, block);
663 assert(block != NULL);
665 /* pick successor extbbs */
666 foreach_block_succ(block, edge) {
667 ir_node *succ = get_edge_src_irn(edge);
668 create_block_list(succ, list);
671 for (i = 0; i < get_extbb_n_blocks(extbb) - 1; ++i) {
672 block = get_extbb_block(extbb, i);
674 foreach_block_succ(block, edge) {
675 ir_node *succ = get_edge_src_irn(edge);
676 create_block_list(succ, list);
681 void compute_extbb_execfreqs(ir_graph *irg, ir_exec_freq *execfreqs);
684 * Calculates a block schedule. The schedule is stored as a linked
685 * list starting at the start_block of the irg.
687 static ir_node **create_extbb_block_schedule(ir_graph *irg, ir_exec_freq *execfreqs)
690 ir_node **blk_list, *b, *n;
693 /* schedule extended basic blocks */
694 compute_extbb_execfreqs(irg, execfreqs);
695 //compute_extbb(irg);
701 set_using_irn_link(irg);
702 set_using_visited(irg);
703 inc_irg_block_visited(irg);
705 create_block_list(get_irg_start_block(irg), &list);
707 /** create an array, so we can go forward and backward */
708 blk_list = NEW_ARR_D(ir_node *, irg->obst,list.n_blks);
710 for (i = 0, b = list.start; b; b = n, ++i) {
715 clear_using_irn_link(irg);
716 clear_using_visited(irg);
724 * | |\/| |/ _` | | '_ \
725 * | | | | (_| | | | | |
726 * |_| |_|\__,_|_|_| |_|
729 void be_init_blocksched(void)
732 lc_opt_entry_t *be_grp = lc_opt_get_grp(firm_opt_get_root(), "be");
733 lc_opt_entry_t *blocksched_grp = lc_opt_get_grp(be_grp, "blocksched");
735 lc_opt_add_table(blocksched_grp, be_blocksched_options);
737 FIRM_DBG_REGISTER(dbg, "firm.be.blocksched");
740 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_blocksched);
742 ir_node **be_create_block_schedule(ir_graph *irg, ir_exec_freq *execfreqs)
745 case BLOCKSCHED_GREEDY:
746 case BLOCKSCHED_NAIV:
747 return create_block_schedule_greedy(irg, execfreqs);
748 case BLOCKSCHED_EXTBB:
749 return create_extbb_block_schedule(irg, execfreqs);
752 return create_block_schedule_ilp(irg, execfreqs);
753 #endif /* WITH_ILP */
756 assert(0 && "unknown blocksched algo");