2 * Author: Matthias Braun, Christoph Mallon
4 * Copyright: (c) Universitaet Karlsruhe
5 * License: This file is protected by GPL - GNU GENERAL PUBLIC LICENSE.
10 #endif /* HAVE_CONFIG_H */
12 #include "beblocksched.h"
21 #include "irgraph_t.h"
32 #include <libcore/lc_opts.h>
33 #include <libcore/lc_opts_enum.h>
34 #include <libcore/lc_timing.h>
35 #endif /* WITH_LIBCORE */
39 #include <lpp/lpp_net.h>
42 typedef enum _blocksched_algos_t {
43 BLOCKSCHED_NAIV, BLOCKSCHED_EXTBB, BLOCKSCHED_GREEDY, BLOCKSCHED_ILP
46 static int algo = BLOCKSCHED_GREEDY;
49 static const lc_opt_enum_int_items_t blockschedalgo_items[] = {
50 { "naiv", BLOCKSCHED_NAIV },
51 { "extbb", BLOCKSCHED_EXTBB },
52 { "greedy", BLOCKSCHED_GREEDY },
54 { "ilp", BLOCKSCHED_ILP },
59 static lc_opt_enum_int_var_t algo_var = {
60 &algo, blockschedalgo_items
63 static const lc_opt_table_entry_t be_blocksched_options[] = {
64 LC_OPT_ENT_ENUM_INT ("algo", "the block scheduling algorithm", &algo_var),
71 * / ___|_ __ ___ ___ __| |_ _
72 * | | _| '__/ _ \/ _ \/ _` | | | |
73 * | |_| | | | __/ __/ (_| | |_| |
74 * \____|_| \___|\___|\__,_|\__, |
78 typedef struct _blocksched_entry_t {
80 struct _blocksched_entry_t *next;
81 struct _blocksched_entry_t *prev;
84 typedef struct _edge_t {
85 ir_node *block; /**< source block */
86 int pos; /**< number of cfg predecessor (target) */
87 double execfreq; /**< the frequency */
88 int highest_execfreq; /**< flag that indicates wether this edge is the edge with the highest
89 execfreq pointing away from this block */
92 typedef struct _blocksched_env_t {
95 ir_exec_freq *execfreqs;
99 DEBUG_ONLY(firm_dbg_module_t *dbg;)
103 * Collect cfg frequencies of all edges between blocks.
104 * Also determines edge with highest frequency.
106 static void collect_egde_frequency(ir_node *block, void *data)
108 blocksched_env_t *env = data;
109 ir_graph *irg = env->irg;
110 ir_node *startblock = get_irg_start_block(irg);
113 blocksched_entry_t *entry;
115 entry = obstack_alloc(env->obst, sizeof(entry[0]));
116 entry->block = block;
119 set_irn_link(block, entry);
121 if (block == startblock)
124 arity = get_irn_arity(block);
129 edge.execfreq = get_block_execfreq(env->execfreqs, block);
130 edge.highest_execfreq = 1;
131 ARR_APP1(edge_t, env->edges, edge);
134 double highest_execfreq = -1;
135 int highest_edge_num = -1;
138 for (i = 0; i < arity; ++i) {
140 ir_node *pred_block = get_Block_cfgpred_block(block, i);
142 execfreq = get_block_execfreq(env->execfreqs, pred_block);
145 edge.execfreq = execfreq;
146 edge.highest_execfreq = 0;
147 ARR_APP1(edge_t, env->edges, edge);
149 if (execfreq > highest_execfreq) {
150 highest_execfreq = execfreq;
151 highest_edge_num = ARR_LEN(env->edges) - 1;
155 if(highest_edge_num >= 0)
156 env->edges[highest_edge_num].highest_execfreq = 1;
160 static int cmp_edges(const void *d1, const void *d2)
162 const edge_t *e1 = d1;
163 const edge_t *e2 = d2;
165 return QSORT_CMP(e2->execfreq, e1->execfreq);
168 static void coalesce_blocks(blocksched_env_t *env)
171 int edge_count = ARR_LEN(env->edges);
173 /* run1: only look at jumps */
174 for (i = 0; i < edge_count; ++i) {
175 const edge_t *edge = &env->edges[i];
176 ir_node *block = edge->block;
178 blocksched_entry_t *entry, *pred_entry;
180 /* the block might have been removed already... */
181 if (is_Bad(get_Block_cfgpred(block, 0)))
184 /* only check edge with highest frequency */
185 if (! edge->highest_execfreq)
188 pred_block = get_Block_cfgpred_block(block, edge->pos);
189 entry = get_irn_link(block);
190 pred_entry = get_irn_link(pred_block);
192 if (pred_entry->next != NULL || entry->prev != NULL)
195 /* only coalesce jumps */
196 if (get_block_succ_next(pred_block, get_block_succ_first(pred_block)) != NULL)
199 /* schedule the 2 blocks behind each other */
200 DBG((env->dbg, LEVEL_1, "Coalesce (Jump) %+F -> %+F (%.3g)\n",
201 pred_entry->block, entry->block, edge->execfreq));
202 pred_entry->next = entry;
203 entry->prev = pred_entry;
206 /* run2: remaining edges */
207 for (i = 0; i < edge_count; ++i) {
208 const edge_t *edge = &env->edges[i];
209 ir_node *block = edge->block;
211 blocksched_entry_t *entry, *pred_entry;
213 /* the block might have been removed already... */
214 if (is_Bad(get_Block_cfgpred(block, 0)))
217 pred_block = get_Block_cfgpred_block(block, edge->pos);
218 entry = get_irn_link(block);
219 pred_entry = get_irn_link(pred_block);
221 /* is 1 of the blocks already attached to another block? */
222 if (pred_entry->next != NULL || entry->prev != NULL)
225 /* schedule the 2 blocks behind each other */
226 DBG((env->dbg, LEVEL_1, "Coalesce (CondJump) %+F -> %+F (%.3g)\n",
227 pred_entry->block, entry->block, edge->execfreq));
228 pred_entry->next = entry;
229 entry->prev = pred_entry;
233 static void pick_block_successor(blocksched_entry_t *entry, blocksched_env_t *env)
235 ir_node *block = entry->block;
236 ir_node *succ = NULL;
237 blocksched_entry_t *succ_entry;
238 const ir_edge_t *edge;
239 double best_succ_execfreq;
241 if (irn_visited(block))
245 mark_irn_visited(block);
247 DBG((env->dbg, LEVEL_1, "Pick succ of %+F\n", block));
249 /* put all successors into the worklist */
250 foreach_block_succ(block, edge) {
251 ir_node *succ_block = get_edge_src_irn(edge);
253 if (irn_visited(succ_block))
256 /* we only need to put the first of a series of already connected
257 * blocks into the worklist */
258 succ_entry = get_irn_link(succ_block);
259 while (succ_entry->prev != NULL) {
260 /* break cycles... */
261 if (succ_entry->prev->block == succ_block) {
262 succ_entry->prev->next = NULL;
263 succ_entry->prev = NULL;
266 succ_entry = succ_entry->prev;
269 if (irn_visited(succ_entry->block))
272 DBG((env->dbg, LEVEL_1, "Put %+F into worklist\n", succ_entry->block));
273 pdeq_putr(env->worklist, succ_entry->block);
276 if (entry->next != NULL) {
277 pick_block_successor(entry->next, env);
281 DBG((env->dbg, LEVEL_1, "deciding...\n"));
282 best_succ_execfreq = -1;
284 /* no successor yet: pick the successor block with the highest execution
285 * frequency which has no predecessor yet */
287 foreach_block_succ(block, edge) {
288 ir_node *succ_block = get_edge_src_irn(edge);
291 if (irn_visited(succ_block))
294 succ_entry = get_irn_link(succ_block);
295 if (succ_entry->prev != NULL)
298 execfreq = get_block_execfreq(env->execfreqs, succ_block);
299 if (execfreq > best_succ_execfreq) {
300 best_succ_execfreq = execfreq;
306 DBG((env->dbg, LEVEL_1, "pick from worklist\n"));
309 if (pdeq_empty(env->worklist)) {
310 DBG((env->dbg, LEVEL_1, "worklist empty\n"));
313 succ = pdeq_getl(env->worklist);
314 } while (irn_visited(succ));
317 succ_entry = get_irn_link(succ);
318 entry->next = succ_entry;
319 succ_entry->prev = entry;
321 pick_block_successor(succ_entry, env);
324 static blocksched_entry_t *finish_block_schedule(blocksched_env_t *env)
326 ir_graph *irg = env->irg;
327 ir_node *startblock = get_irg_start_block(irg);
328 blocksched_entry_t *entry = get_irn_link(startblock);
330 inc_irg_visited(irg);
332 env->worklist = new_pdeq();
333 pick_block_successor(entry, env);
334 assert(pdeq_empty(env->worklist));
335 del_pdeq(env->worklist);
340 static ir_node **create_blocksched_array(blocksched_env_t *env, blocksched_entry_t *first,
341 int count, struct obstack* obst)
344 ir_node **block_list;
345 blocksched_entry_t *entry;
347 block_list = NEW_ARR_D(ir_node *, obst, count);
348 DBG((env->dbg, LEVEL_1, "Blockschedule:\n"));
350 for (entry = first; entry != NULL; entry = entry->next) {
352 block_list[i++] = entry->block;
353 DBG((env->dbg, LEVEL_1, "\t%+F\n", entry->block));
360 static ir_node **create_block_schedule_greedy(ir_graph *irg, ir_exec_freq *execfreqs)
362 blocksched_env_t env;
364 blocksched_entry_t *start_entry;
365 ir_node **block_list;
371 env.execfreqs = execfreqs;
372 env.edges = NEW_ARR_F(edge_t, 0);
375 FIRM_DBG_REGISTER(env.dbg, "firm.be.blocksched");
377 // collect edge execution frequencies
378 irg_block_walk_graph(irg, collect_egde_frequency, NULL, &env);
380 // sort interblock edges by execution frequency
381 qsort(env.edges, ARR_LEN(env.edges), sizeof(env.edges[0]), cmp_edges);
383 be_remove_empty_blocks(irg);
385 if (algo != BLOCKSCHED_NAIV)
386 coalesce_blocks(&env);
388 start_entry = finish_block_schedule(&env);
389 block_list = create_blocksched_array(&env, start_entry, env.blockcount, get_irg_obstack(irg));
391 DEL_ARR_F(env.edges);
392 obstack_free(&obst, NULL);
407 typedef struct _ilp_edge_t {
408 ir_node *block; /**< source block */
409 int pos; /**< number of cfg predecessor (target) */
413 typedef struct _blocksched_ilp_env_t {
414 blocksched_env_t env;
415 ilp_edge_t *ilpedges;
417 } blocksched_ilp_env_t;
419 typedef struct _blocksched_ilp_entry_t {
421 struct _blocksched_entry_t *next;
422 struct _blocksched_entry_t *prev;
425 } blocksched_ilp_entry_t;
427 static int add_ilp_edge(ir_node *block, int pos, double execfreq, blocksched_ilp_env_t *env)
431 int edgeidx = ARR_LEN(env->ilpedges);
433 snprintf(name, sizeof(name), "edge%d", edgeidx);
437 edge.ilpvar = lpp_add_var_default(env->lpp, name, lpp_binary, execfreq, 1.0);
439 ARR_APP1(ilp_edge_t, env->ilpedges, edge);
443 static void collect_egde_frequency_ilp(ir_node *block, void *data)
445 blocksched_ilp_env_t *env = data;
446 ir_graph *irg = env->env.irg;
447 ir_node *startblock = get_irg_start_block(irg);
452 blocksched_ilp_entry_t *entry;
454 snprintf(name, sizeof(name), "block_out_constr_%ld", get_irn_node_nr(block));
455 out_count = get_irn_n_edges_kind(block, EDGE_KIND_BLOCK);
457 entry = obstack_alloc(env->env.obst, sizeof(entry[0]));
458 entry->block = block;
461 entry->out_cst = lpp_add_cst_uniq(env->lpp, name, lpp_greater, out_count - 1);
462 set_irn_link(block, entry);
464 if (block == startblock)
467 arity = get_irn_arity(block);
469 double execfreq = get_block_execfreq(env->env.execfreqs, block);
470 add_ilp_edge(block, 0, execfreq, env);
474 int *edgenums = alloca(sizeof(edgenums[0]) * arity);
476 snprintf(name, sizeof(name), "block_in_constr_%ld", get_irn_node_nr(block));
477 cst = lpp_add_cst_uniq(env->lpp, name, lpp_greater, arity - 1);
479 for (i = 0; i < arity; ++i) {
483 ir_node *pred_block = get_Block_cfgpred_block(block, i);
485 execfreq = get_block_execfreq(env->env.execfreqs, pred_block);
486 edgenum = add_ilp_edge(block, i, execfreq, env);
487 edge = &env->ilpedges[edgenum];
488 lpp_set_factor_fast(env->lpp, cst, edge->ilpvar, 1.0);
494 static void coalesce_blocks_ilp(blocksched_ilp_env_t *env)
497 int edge_count = ARR_LEN(env->ilpedges);
499 /* complete out constraints */
500 for(i = 0; i < edge_count; ++i) {
501 const ilp_edge_t *edge = &env->ilpedges[i];
502 ir_node *block = edge->block;
504 blocksched_ilp_entry_t *entry;
506 /* the block might have been removed already... */
507 if (is_Bad(get_Block_cfgpred(block, 0)))
510 pred = get_Block_cfgpred_block(block, edge->pos);
511 entry = get_irn_link(pred);
513 DBG((env->env.dbg, LEVEL_1, "Adding out cst to %+F from %+F,%d\n",
514 pred, block, edge->pos));
515 lpp_set_factor_fast(env->lpp, entry->out_cst, edge->ilpvar, 1.0);
522 lpp_dump(env->lpp, "lpp.out");
523 snprintf(fname, sizeof(fname), "lpp_%s.plain", get_irg_dump_name(env->env.irg));
524 f = fopen(fname, "w");
525 lpp_dump_plain(env->lpp, f);
530 //lpp_solve_net(env->lpp, main_env->options->ilp_server, main_env->options->ilp_solver);
531 lpp_solve_net(env->lpp, "i44pc52", "cplex");
532 assert(lpp_is_sol_valid(env->lpp));
534 /* Apply results to edges */
535 for (i = 0; i < edge_count; ++i) {
536 const ilp_edge_t *edge = &env->ilpedges[i];
537 ir_node *block = edge->block;
540 blocksched_entry_t *entry;
541 blocksched_entry_t *pred_entry;
543 /* the block might have been removed already... */
544 if (is_Bad(get_Block_cfgpred(block, 0)))
547 is_jump = lpp_get_var_sol(env->lpp, edge->ilpvar);
551 pred = get_Block_cfgpred_block(block, edge->pos);
552 entry = get_irn_link(block);
553 pred_entry = get_irn_link(pred);
555 assert(entry->prev == NULL && pred_entry->next == NULL);
556 entry->prev = pred_entry;
557 pred_entry->next = entry;
561 static ir_node **create_block_schedule_ilp(ir_graph *irg, ir_exec_freq *execfreqs)
563 blocksched_ilp_env_t env;
565 blocksched_entry_t *start_entry;
566 ir_node **block_list;
571 env.env.obst = &obst;
572 env.env.execfreqs = execfreqs;
573 env.env.worklist = NULL;
574 env.env.blockcount = 0;
575 env.ilpedges = NEW_ARR_F(ilp_edge_t, 0);
576 FIRM_DBG_REGISTER(env.env.dbg, "firm.be.blocksched");
578 env.lpp = new_lpp("blockschedule", lpp_minimize);
579 lpp_set_time_limit(env.lpp, 20);
580 lpp_set_log(env.lpp, stdout);
582 irg_block_walk_graph(irg, collect_egde_frequency_ilp, NULL, &env);
584 be_remove_empty_blocks(irg);
585 coalesce_blocks_ilp(&env);
587 start_entry = finish_block_schedule(&env.env);
588 block_list = create_blocksched_array(&env.env, start_entry, env.env.blockcount, get_irg_obstack(irg));
590 DEL_ARR_F(env.ilpedges);
592 obstack_free(&obst, NULL);
596 #endif /* WITH_ILP */
600 * | ____|_ _| |_| __ )| __ )
601 * | _| \ \/ / __| _ \| _ \
602 * | |___ > <| |_| |_) | |_) |
603 * |_____/_/\_\\__|____/|____/
607 /** A simple forward single linked list. */
609 ir_node *start; /**< start of the list */
610 ir_node *end; /**< last block in the list */
611 unsigned n_blks; /**< number of blocks in the list */
614 static void add_block(anchor *list, ir_node *block) {
615 if (list->start == NULL) {
620 set_irn_link(list->end, block);
627 static void create_block_list(ir_node *leader_block, anchor *list) {
629 const ir_edge_t *edge;
630 ir_node *block = NULL;
631 ir_extblk *extbb = get_Block_extbb(leader_block);
633 if (extbb_visited(extbb))
635 mark_extbb_visited(extbb);
637 for (i = 0; i < get_extbb_n_blocks(extbb); ++i) {
638 block = get_extbb_block(extbb, i);
639 add_block(list, block);
642 assert(block != NULL);
644 /* pick successor extbbs */
645 foreach_block_succ(block, edge) {
646 ir_node *succ = get_edge_src_irn(edge);
647 create_block_list(succ, list);
650 for (i = 0; i < get_extbb_n_blocks(extbb) - 1; ++i) {
651 block = get_extbb_block(extbb, i);
653 foreach_block_succ(block, edge) {
654 ir_node *succ = get_edge_src_irn(edge);
655 create_block_list(succ, list);
660 void compute_extbb_execfreqs(ir_graph *irg, ir_exec_freq *execfreqs);
663 * Calculates a block schedule. The schedule is stored as a linked
664 * list starting at the start_block of the irg.
666 static ir_node **create_extbb_block_schedule(ir_graph *irg, ir_exec_freq *execfreqs)
669 ir_node **blk_list, *b, *n;
672 /* schedule extended basic blocks */
673 compute_extbb_execfreqs(irg, execfreqs);
674 //compute_extbb(irg);
679 inc_irg_block_visited(irg);
680 create_block_list(get_irg_start_block(irg), &list);
682 /** create an array, so we can go forward and backward */
683 blk_list = NEW_ARR_D(ir_node *, irg->obst,list.n_blks);
685 for (i = 0, b = list.start; b; b = n, ++i) {
696 * | |\/| |/ _` | | '_ \
697 * | | | | (_| | | | | |
698 * |_| |_|\__,_|_|_| |_|
701 void be_init_blocksched(void)
703 lc_opt_entry_t *be_grp = lc_opt_get_grp(firm_opt_get_root(), "be");
704 lc_opt_entry_t *blocksched_grp = lc_opt_get_grp(be_grp, "blocksched");
706 lc_opt_add_table(blocksched_grp, be_blocksched_options);
709 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_blocksched);
711 ir_node **be_create_block_schedule(ir_graph *irg, ir_exec_freq *execfreqs)
714 case BLOCKSCHED_GREEDY:
715 case BLOCKSCHED_NAIV:
716 return create_block_schedule_greedy(irg, execfreqs);
717 case BLOCKSCHED_EXTBB:
718 return create_extbb_block_schedule(irg, execfreqs);
721 return create_block_schedule_ilp(irg, execfreqs);
722 #endif /* WITH_ILP */
725 assert(0 && "unknown blocksched algo");