X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fbe%2Fbeblocksched.c;h=38fc4def5e6fc30fc74cb495a82399d4c4a7dde4;hb=a9eee95c42def4095dad5214b493aa0e3ab5a1f7;hp=c4096777ec0d0e40ac0fa072ad77f6e468f1baa1;hpb=79ed89a676afd08eb739330d46b85b1d1532aec1;p=libfirm diff --git a/ir/be/beblocksched.c b/ir/be/beblocksched.c index c4096777e..38fc4def5 100644 --- a/ir/be/beblocksched.c +++ b/ir/be/beblocksched.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved. + * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved. * * This file is part of libFirm. * @@ -23,6 +23,16 @@ * @author Matthias Braun, Christoph Mallon * @date 27.09.2006 * @version $Id$ + * + * The goals of the greedy (and ILP) algorithm here works by assuming that + * we want to change as many jumps to fallthroughs as possible (executed jumps + * actually, we have to look at the execution frequencies). The algorithms + * do this by collecting execution frequencies of all branches (which is easily + * possible when all critical edges are split) then removes critical edges where + * possible as we don't need and want them anymore now. The algorithms then try + * to change as many edges to fallthroughs as possible, this is done by setting + * a next and prev pointers on blocks. The greedy algorithm sorts the edges by + * execution frequencies and tries to transform them to fallthroughs in this order */ #ifdef HAVE_CONFIG_H #include "config.h" @@ -49,6 +59,9 @@ #include "bemodule.h" #include "be.h" +#include "lc_opts.h" +#include "lc_opts_enum.h" + #ifdef WITH_ILP #include #include @@ -62,13 +75,6 @@ typedef enum _blocksched_algos_t { static int algo = BLOCKSCHED_GREEDY; - -#ifdef WITH_LIBCORE - -#include -#include -#include - static const lc_opt_enum_int_items_t blockschedalgo_items[] = { { "naiv", BLOCKSCHED_NAIV }, { "extbb", BLOCKSCHED_EXTBB }, @@ -87,7 +93,6 @@ static const lc_opt_table_entry_t be_blocksched_options[] = { LC_OPT_ENT_ENUM_INT ("algo", "the block scheduling algorithm", &algo_var), LC_OPT_LAST }; -#endif /* WITH_LIBCORE */ /* * ____ _ @@ -138,12 +143,15 @@ static void collect_egde_frequency(ir_node *block, void *data) entry->prev = NULL; set_irn_link(block, entry); - if (block == get_irg_start_block(env->irg)) - return; - arity = get_Block_n_cfgpreds(block); - if (arity == 1) { + if (arity == 0) { + assert(block == get_irg_start_block(env->irg) + || block == get_irg_end_block(env->irg)); + /* must be the start block (or end-block for endless loops), nothing to + * do here */ + return; + } else if (arity == 1) { edge.block = block; edge.pos = 0; edge.execfreq = get_block_execfreq(env->execfreqs, block); @@ -194,18 +202,19 @@ static void coalesce_blocks(blocksched_env_t *env) for (i = 0; i < edge_count; ++i) { const edge_t *edge = &env->edges[i]; ir_node *block = edge->block; + int pos = edge->pos; ir_node *pred_block; blocksched_entry_t *entry, *pred_entry; - /* the block might have been removed already... */ - if (is_Bad(get_Block_cfgpred(block, 0))) - continue; - /* only check edge with highest frequency */ if (! edge->highest_execfreq) continue; - pred_block = get_Block_cfgpred_block(block, edge->pos); + /* the block might have been removed already... */ + if (is_Bad(get_Block_cfgpred(block, 0))) + continue; + + pred_block = get_Block_cfgpred_block(block, pos); entry = get_irn_link(block); pred_entry = get_irn_link(pred_block); @@ -227,6 +236,7 @@ static void coalesce_blocks(blocksched_env_t *env) for (i = 0; i < edge_count; ++i) { const edge_t *edge = &env->edges[i]; ir_node *block = edge->block; + int pos = edge->pos; ir_node *pred_block; blocksched_entry_t *entry, *pred_entry; @@ -234,7 +244,11 @@ static void coalesce_blocks(blocksched_env_t *env) if (is_Bad(get_Block_cfgpred(block, 0))) continue; - pred_block = get_Block_cfgpred_block(block, edge->pos); + /* we can't do fallthroughs in backedges */ + if (is_backedge(block, pos)) + continue; + + pred_block = get_Block_cfgpred_block(block, pos); entry = get_irn_link(block); pred_entry = get_irn_link(pred_block); @@ -347,7 +361,7 @@ static blocksched_entry_t *finish_block_schedule(blocksched_env_t *env) ir_node *startblock = get_irg_start_block(irg); blocksched_entry_t *entry = get_irn_link(startblock); - set_using_visited(irg); + set_using_irn_visited(irg); inc_irg_visited(irg); env->worklist = new_pdeq(); @@ -355,7 +369,7 @@ static blocksched_entry_t *finish_block_schedule(blocksched_env_t *env) assert(pdeq_empty(env->worklist)); del_pdeq(env->worklist); - clear_using_visited(irg); + clear_using_irn_visited(irg); return entry; } @@ -699,7 +713,7 @@ static ir_node **create_extbb_block_schedule(ir_graph *irg, ir_exec_freq *execfr list.n_blks = 0; set_using_irn_link(irg); - set_using_visited(irg); + set_using_irn_visited(irg); inc_irg_block_visited(irg); create_block_list(get_irg_start_block(irg), &list); @@ -713,7 +727,7 @@ static ir_node **create_extbb_block_schedule(ir_graph *irg, ir_exec_freq *execfr } clear_using_irn_link(irg); - clear_using_visited(irg); + clear_using_irn_visited(irg); return blk_list; } @@ -728,12 +742,11 @@ static ir_node **create_extbb_block_schedule(ir_graph *irg, ir_exec_freq *execfr */ void be_init_blocksched(void) { -#ifdef WITH_LIBCORE lc_opt_entry_t *be_grp = lc_opt_get_grp(firm_opt_get_root(), "be"); lc_opt_entry_t *blocksched_grp = lc_opt_get_grp(be_grp, "blocksched"); lc_opt_add_table(blocksched_grp, be_blocksched_options); -#endif + FIRM_DBG_REGISTER(dbg, "firm.be.blocksched"); }