X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fbe%2Fbechordal_main.c;h=05611be22f63c4a7806b47e9d88a1222bd121654;hb=429d687f06baeeb63d04750f846d39e55fb62343;hp=5e2f38f8587a47e01419ce743033bff25843b3fc;hpb=db82f30bbda008e296e8dfab17f899aa175fc469;p=libfirm diff --git a/ir/be/bechordal_main.c b/ir/be/bechordal_main.c index 5e2f38f85..05611be22 100644 --- a/ir/be/bechordal_main.c +++ b/ir/be/bechordal_main.c @@ -24,9 +24,7 @@ * @date 29.11.2005 * @version $Id$ */ -#ifdef HAVE_CONFIG_H #include "config.h" -#endif #include #include @@ -36,7 +34,6 @@ #include "list.h" #include "bitset.h" #include "iterator.h" -#include "firm_config.h" #include "lc_opts.h" #include "lc_opts_enum.h" @@ -54,13 +51,11 @@ #include "irnode.h" #include "ircons.h" #include "debug.h" -#include "xmalloc.h" #include "execfreq.h" #include "iredges_t.h" #include "bechordal_t.h" #include "beabi.h" -#include "bejavacoal.h" #include "beutil.h" #include "besched.h" #include "besched_t.h" @@ -80,11 +75,6 @@ #include "bespilloptions.h" #include "belower.h" -#ifdef WITH_ILP -#include "bespillremat.h" -#endif /* WITH_ILP */ - -#include "bejavacoal.h" #include "becopystat.h" #include "becopyopt.h" #include "bessadestr.h" @@ -106,8 +96,6 @@ typedef struct _post_spill_env_t { double pre_spill_cost; } post_spill_env_t; -static be_options_t *main_opts; - static const lc_opt_enum_int_items_t lower_perm_items[] = { { "copy", BE_CH_LOWER_PERM_COPY }, { "swap", BE_CH_LOWER_PERM_SWAP }, @@ -156,7 +144,7 @@ static lc_opt_enum_int_var_t be_ch_vrfy_var = { static const lc_opt_table_entry_t be_chordal_options[] = { LC_OPT_ENT_ENUM_PTR ("perm", "perm lowering options", &lower_perm_var), LC_OPT_ENT_ENUM_MASK("dump", "select dump phases", &dump_var), - LC_OPT_ENT_ENUM_PTR ("vrfy", "verify options", &be_ch_vrfy_var), + LC_OPT_ENT_ENUM_PTR ("verify", "verify options", &be_ch_vrfy_var), LC_OPT_LAST }; @@ -177,15 +165,16 @@ static void dump(unsigned mask, ir_graph *irg, } /** - * Checks for every reload if it's user can perform the load on itself. + * Checks for every reload if its user can perform the load on itself. */ -static void memory_operand_walker(ir_node *irn, void *env) { - be_chordal_env_t *cenv = env; - const arch_env_t *aenv = cenv->birg->main_env->arch_env; +static void memory_operand_walker(ir_node *irn, void *env) +{ const ir_edge_t *edge, *ne; ir_node *block; ir_node *spill; + (void)env; + if (! be_is_Reload(irn)) return; @@ -202,8 +191,8 @@ static void memory_operand_walker(ir_node *irn, void *env) { assert(src && "outedges broken!"); - if (get_nodes_block(src) == block && arch_possible_memory_operand(aenv, src, pos)) { - arch_perform_memory_operand(aenv, src, spill, pos); + if (get_nodes_block(src) == block && arch_possible_memory_operand(src, pos)) { + arch_perform_memory_operand(src, spill, pos); } } @@ -218,101 +207,13 @@ static void memory_operand_walker(ir_node *irn, void *env) { /** * Starts a walk for memory operands if supported by the backend. */ -static INLINE void check_for_memory_operands(be_chordal_env_t *chordal_env) { - irg_walk_graph(chordal_env->irg, NULL, memory_operand_walker, chordal_env); -} - -/** - * Sorry for doing stats again... - */ -typedef struct _node_stat_t { - unsigned int n_phis; /**< Phis of the current register class. */ - unsigned int n_mem_phis; /**< Memory Phis (Phis with spill operands). */ - unsigned int n_copies; /**< Copies */ - unsigned int n_perms; /**< Perms */ - unsigned int n_spills; /**< Spill nodes */ - unsigned int n_reloads; /**< Reloads */ -} node_stat_t; - -struct node_stat_walker { - node_stat_t *stat; - const arch_env_t *arch_env; -}; - -static void node_stat_walker(ir_node *irn, void *data) +static inline void check_for_memory_operands(ir_graph *irg) { - struct node_stat_walker *env = data; - const arch_env_t *aenv = env->arch_env; - - /* if the node is a normal phi */ - if(is_Phi(irn)) { - if (get_irn_mode(irn) == mode_M) { - env->stat->n_mem_phis++; - } else { - env->stat->n_phis++; - } - } else { - arch_irn_class_t classify = arch_irn_classify(aenv, irn); - - if(classify & arch_irn_class_spill) - ++env->stat->n_spills; - if(classify & arch_irn_class_reload) - ++env->stat->n_reloads; - if(classify & arch_irn_class_copy) - ++env->stat->n_copies; - if(classify & arch_irn_class_perm) - ++env->stat->n_perms; - } + irg_walk_graph(irg, NULL, memory_operand_walker, NULL); } -static void node_stats(be_irg_t *birg, node_stat_t *stat) -{ - struct node_stat_walker env; - memset(stat, 0, sizeof(*stat)); - env.arch_env = birg->main_env->arch_env; - env.stat = stat; - irg_walk_graph(birg->irg, NULL, node_stat_walker, &env); -} - -static void insn_count_walker(ir_node *irn, void *data) -{ - unsigned long *cnt = data; - - switch(get_irn_opcode(irn)) { - case iro_Proj: - case iro_Phi: - case iro_Start: - case iro_End: - break; - default: - (*cnt)++; - } -} - -static unsigned long count_insns(ir_graph *irg) -{ - unsigned long cnt = 0; - irg_walk_graph(irg, insn_count_walker, NULL, &cnt); - return cnt; -} - -static void block_count_walker(ir_node *node, void *data) -{ - unsigned long *cnt = data; - if (node == get_irg_end_block(current_ir_graph)) - return; - (*cnt)++; -} - -static unsigned long count_blocks(ir_graph *irg) -{ - unsigned long cnt = 0; - irg_block_walk_graph(irg, block_count_walker, NULL, &cnt); - return cnt; -} - -static node_stat_t last_node_stat; +static be_node_stats_t last_node_stats; /** * Perform things which need to be done per register class before spilling. @@ -322,7 +223,6 @@ static void pre_spill(post_spill_env_t *pse, const arch_register_class_t *cls) be_chordal_env_t *chordal_env = &pse->cenv; be_irg_t *birg = pse->birg; ir_graph *irg = be_get_birg_irg(birg); - const be_main_env_t *main_env = birg->main_env; pse->cls = cls; chordal_env->cls = cls; @@ -332,7 +232,7 @@ static void pre_spill(post_spill_env_t *pse, const arch_register_class_t *cls) be_assure_liveness(birg); be_liveness_assure_chk(be_get_birg_liveness(birg)); - stat_ev_do(pse->pre_spill_cost = be_estimate_irg_costs(irg, main_env->arch_env, birg->exec_freq)); + stat_ev_do(pse->pre_spill_cost = be_estimate_irg_costs(irg, birg->exec_freq)); /* put all ignore registers into the ignore register set. */ be_put_ignore_regs(birg, pse->cls, chordal_env->ignore_colors); @@ -351,29 +251,28 @@ static void post_spill(post_spill_env_t *pse, int iteration) { be_chordal_env_t *chordal_env = &pse->cenv; be_irg_t *birg = pse->birg; ir_graph *irg = birg->irg; - const be_main_env_t *main_env = birg->main_env; - int colors_n = arch_register_class_n_regs(chordal_env->cls); + int colors_n = arch_register_class_n_regs(chordal_env->cls); int allocatable_regs = colors_n - be_put_ignore_regs(birg, chordal_env->cls, NULL); /* some special classes contain only ignore regs, no work to be done */ if (allocatable_regs > 0) { - stat_ev_dbl("bechordal_spillcosts", be_estimate_irg_costs(irg, main_env->arch_env, birg->exec_freq) - pse->pre_spill_cost); + stat_ev_dbl("bechordal_spillcosts", be_estimate_irg_costs(irg, birg->exec_freq) - pse->pre_spill_cost); /* If we have a backend provided spiller, post spill is called in a loop after spilling for each register class. But we only need to fix stack nodes once in this case. */ - BE_TIMER_PUSH(t_ra_spill); - check_for_memory_operands(chordal_env); + BE_TIMER_PUSH(t_ra_spill_apply); + check_for_memory_operands(irg); if (iteration == 0) { be_abi_fix_stack_nodes(birg->abi); } - BE_TIMER_POP(t_ra_spill); + BE_TIMER_POP(t_ra_spill_apply); - BE_TIMER_PUSH(t_verify); /* verify schedule and register pressure */ + BE_TIMER_PUSH(t_verify); if (chordal_env->opts->vrfy_option == BE_CH_VRFY_WARN) { be_verify_schedule(birg); be_verify_register_pressure(birg, pse->cls, irg); @@ -397,19 +296,21 @@ static void post_spill(post_spill_env_t *pse, int iteration) { BE_TIMER_POP(t_ra_ifg); stat_ev_if { - be_ifg_stat_t stat; - node_stat_t node_stat; + be_ifg_stat_t stat; + be_node_stats_t node_stats; be_ifg_stat(birg, chordal_env->ifg, &stat); stat_ev_dbl("bechordal_ifg_nodes", stat.n_nodes); stat_ev_dbl("bechordal_ifg_edges", stat.n_edges); stat_ev_dbl("bechordal_ifg_comps", stat.n_comps); - node_stats(birg, &node_stat); + be_collect_node_stats(&node_stats, birg); + be_subtract_node_stats(&node_stats, &last_node_stats); + stat_ev_dbl("bechordal_perms_before_coal", - node_stat.n_perms - last_node_stat.n_perms); + node_stats[BE_STAT_PERMS]); stat_ev_dbl("bechordal_copies_before_coal", - node_stat.n_copies - last_node_stat.n_copies); + node_stats[BE_STAT_COPIES]); } /* copy minimization */ @@ -450,14 +351,12 @@ static void post_spill(post_spill_env_t *pse, int iteration) { */ static void be_ra_chordal_main(be_irg_t *birg) { - const be_main_env_t *main_env = birg->main_env; - const arch_env_t *arch_env = main_env->arch_env; - ir_graph *irg = birg->irg; - int j, m; - be_chordal_env_t chordal_env; - struct obstack obst; - - main_opts = main_env->options; + const arch_env_t *arch_env = birg->main_env->arch_env; + ir_graph *irg = birg->irg; + int j; + int m; + be_chordal_env_t chordal_env; + struct obstack obst; BE_TIMER_PUSH(t_ra_other); @@ -478,9 +377,7 @@ static void be_ra_chordal_main(be_irg_t *birg) BE_TIMER_POP(t_ra_prolog); stat_ev_if { - be_stat_ev("bechordal_insns_before", count_insns(irg)); - be_stat_ev("bechordal_blocks_before", count_blocks(irg)); - node_stats(birg, &last_node_stat); + be_collect_node_stats(&last_node_stats, birg); } if (! arch_code_generator_has_spiller(birg->cg)) { @@ -498,6 +395,10 @@ static void be_ra_chordal_main(be_irg_t *birg) stat_ev_ctx_push_str("bechordal_cls", cls->name); + stat_ev_if { + be_do_stat_reg_pressure(birg, cls); + } + memcpy(&pse.cenv, &chordal_env, sizeof(chordal_env)); pse.birg = birg; pre_spill(&pse, cls); @@ -512,23 +413,13 @@ static void be_ra_chordal_main(be_irg_t *birg) post_spill(&pse, 0); stat_ev_if { - node_stat_t node_stat; - - node_stats(birg, &node_stat); - stat_ev_dbl("bechordal_phis", - node_stat.n_phis - last_node_stat.n_phis); - stat_ev_dbl("bechordal_mem_phis", - node_stat.n_mem_phis - last_node_stat.n_mem_phis); - stat_ev_dbl("bechordal_reloads", - node_stat.n_reloads - last_node_stat.n_reloads); - stat_ev_dbl("bechordal_spills", - node_stat.n_spills - last_node_stat.n_spills); - stat_ev_dbl("bechordal_perms_after_coal", - node_stat.n_perms - last_node_stat.n_perms); - stat_ev_dbl("bechordal_copies_after_coal", - node_stat.n_copies - last_node_stat.n_copies); - - last_node_stat = node_stat; + be_node_stats_t node_stats; + + be_collect_node_stats(&node_stats, birg); + be_subtract_node_stats(&node_stats, &last_node_stats); + be_emit_node_stats(&node_stats, "bechordal_"); + + be_copy_node_stats(&last_node_stats, &node_stats); stat_ev_ctx_pop("bechordal_cls"); } } @@ -538,7 +429,7 @@ static void be_ra_chordal_main(be_irg_t *birg) /* the backend has its own spiller */ m = arch_env_get_n_reg_class(arch_env); - pse = alloca(m * sizeof(pse[0])); + pse = ALLOCAN(post_spill_env_t, m); for (j = 0; j < m; ++j) { memcpy(&pse[j].cenv, &chordal_env, sizeof(chordal_env)); @@ -557,7 +448,12 @@ static void be_ra_chordal_main(be_irg_t *birg) } BE_TIMER_PUSH(t_verify); - be_verify_register_allocation(birg); + if (chordal_env.opts->vrfy_option == BE_CH_VRFY_WARN) { + be_verify_register_allocation(birg); + } else if(chordal_env.opts->vrfy_option == BE_CH_VRFY_ASSERT) { + assert(be_verify_register_allocation(birg) + && "Register allocation invalid"); + } BE_TIMER_POP(t_verify); BE_TIMER_PUSH(t_ra_epilog); @@ -569,10 +465,6 @@ static void be_ra_chordal_main(be_irg_t *birg) BE_TIMER_POP(t_ra_epilog); BE_TIMER_POP(t_ra_other); - - stat_ev_if { - be_stat_ev("bechordal_insns_after", count_insns(irg)); - } } static be_ra_t be_ra_chordal_allocator = {