#include "besched.h"
#include "belive_t.h"
#include "bearch.h"
-#include "beifg_t.h"
-#include "beifg_impl.h"
+#include "beifg.h"
#include "benode.h"
#include "bestatevent.h"
#include "bestat.h"
#include "be_t.h"
#include "bera.h"
#include "beirg.h"
+#include "bestack.h"
#include "bespillslots.h"
#include "bespill.h"
""
};
-typedef struct _post_spill_env_t {
+typedef struct post_spill_env_t {
be_chordal_env_t cenv;
- be_irg_t *birg;
+ ir_graph *irg;
const arch_register_class_t *cls;
double pre_spill_cost;
} post_spill_env_t;
{ NULL, 0 }
};
-static const lc_opt_enum_int_items_t lower_perm_stat_items[] = {
- { NULL, 0 }
-};
-
static const lc_opt_enum_int_items_t dump_items[] = {
{ "none", BE_CH_DUMP_NONE },
{ "spill", BE_CH_DUMP_SPILL },
&options.vrfy_option, be_ch_vrfy_items
};
-static char minir_file[256] = "";
-
static const lc_opt_table_entry_t be_chordal_options[] = {
LC_OPT_ENT_ENUM_PTR ("perm", "perm lowering options", &lower_perm_var),
LC_OPT_ENT_ENUM_MASK("dump", "select dump phases", &dump_var),
LC_OPT_ENT_ENUM_PTR ("verify", "verify options", &be_ch_vrfy_var),
- LC_OPT_ENT_STR ("minirout", "dump MinIR to file", minir_file, sizeof(minir_file)),
LC_OPT_LAST
};
static void be_ra_chordal_coloring(be_chordal_env_t *env)
{
- assert(selected_coloring != NULL);
- if (selected_coloring != NULL) {
- selected_coloring->allocate(env);
- }
+ selected_coloring->allocate(env);
}
static void dump(unsigned mask, ir_graph *irg,
const arch_register_class_t *cls,
- const char *suffix,
- void (*dump_func)(ir_graph *, const char *))
+ const char *suffix)
{
if ((options.dump_flags & mask) == mask) {
if (cls) {
char buf[256];
- snprintf(buf, sizeof(buf), "-%s%s", cls->name, suffix);
- be_dump(irg, buf, dump_func);
+ snprintf(buf, sizeof(buf), "%s-%s", cls->name, suffix);
+ dump_ir_graph(irg, buf);
+ } else {
+ dump_ir_graph(irg, suffix);
}
- else
- be_dump(irg, suffix, dump_func);
}
}
/**
- * Checks for every reload if its user can perform the load on itself.
+ * Post-Walker: Checks for the given reload if has only one user that can perform the
+ * reload as part of its address mode.
+ * Fold the reload into the user it that is possible.
*/
static void memory_operand_walker(ir_node *irn, void *env)
{
}
}
- /* kill the Reload */
+ /* kill the Reload if it was folded */
if (get_irn_n_edges(irn) == 0) {
+ ir_graph *irg = get_irn_irg(irn);
+ ir_mode *frame_mode = get_irn_mode(get_irn_n(irn, n_be_Reload_frame));
sched_remove(irn);
- set_irn_n(irn, be_pos_Reload_mem, new_Bad());
- set_irn_n(irn, be_pos_Reload_frame, new_Bad());
+ set_irn_n(irn, n_be_Reload_mem, new_r_Bad(irg, mode_X));
+ set_irn_n(irn, n_be_Reload_frame, new_r_Bad(irg, frame_mode));
}
}
*/
static void pre_spill(post_spill_env_t *pse, const arch_register_class_t *cls)
{
- be_chordal_env_t *chordal_env = &pse->cenv;
- be_irg_t *birg = pse->birg;
- ir_graph *irg = be_get_birg_irg(birg);
+ be_chordal_env_t *chordal_env = &pse->cenv;
+ ir_graph *irg = pse->irg;
+ ir_exec_freq *exec_freq = be_get_irg_exec_freq(irg);
- pse->cls = cls;
- chordal_env->cls = cls;
- chordal_env->border_heads = pmap_create();
- chordal_env->ignore_colors = bitset_malloc(chordal_env->cls->n_regs);
+ pse->cls = cls;
+ chordal_env->cls = cls;
+ chordal_env->border_heads = pmap_create();
+ chordal_env->allocatable_regs = bitset_malloc(chordal_env->cls->n_regs);
- be_assure_liveness(birg);
- be_liveness_assure_chk(be_get_birg_liveness(birg));
+ be_assure_liveness(irg);
+ be_liveness_assure_chk(be_get_irg_liveness(irg));
- stat_ev_do(pse->pre_spill_cost = be_estimate_irg_costs(irg, birg->exec_freq));
+ if (stat_ev_enabled) {
+ pse->pre_spill_cost = be_estimate_irg_costs(irg, exec_freq);
+ }
/* put all ignore registers into the ignore register set. */
- be_put_ignore_regs(birg, pse->cls, chordal_env->ignore_colors);
+ be_put_allocatable_regs(irg, pse->cls, chordal_env->allocatable_regs);
be_timer_push(T_RA_CONSTR);
- be_pre_spill_prepare_constr(chordal_env->birg, chordal_env->cls);
+ be_pre_spill_prepare_constr(irg, chordal_env->cls);
be_timer_pop(T_RA_CONSTR);
- dump(BE_CH_DUMP_CONSTR, birg->irg, pse->cls, "-constr-pre", dump_ir_block_graph_sched);
+ dump(BE_CH_DUMP_CONSTR, irg, pse->cls, "constr-pre");
}
/**
*/
static void post_spill(post_spill_env_t *pse, int iteration)
{
- be_chordal_env_t *chordal_env = &pse->cenv;
- be_irg_t *birg = pse->birg;
- ir_graph *irg = birg->irg;
- int colors_n = arch_register_class_n_regs(chordal_env->cls);
- int allocatable_regs = colors_n - be_put_ignore_regs(birg, chordal_env->cls, NULL);
+ be_chordal_env_t *chordal_env = &pse->cenv;
+ ir_graph *irg = pse->irg;
+ ir_exec_freq *exec_freq = be_get_irg_exec_freq(irg);
+ int allocatable_regs = be_get_n_allocatable_regs(irg, chordal_env->cls);
/* some special classes contain only ignore regs, no work to be done */
if (allocatable_regs > 0) {
- stat_ev_dbl("bechordal_spillcosts", be_estimate_irg_costs(irg, birg->exec_freq) - pse->pre_spill_cost);
+ stat_ev_dbl("bechordal_spillcosts", be_estimate_irg_costs(irg, exec_freq) - pse->pre_spill_cost);
/*
If we have a backend provided spiller, post spill is
be_timer_push(T_RA_SPILL_APPLY);
check_for_memory_operands(irg);
if (iteration == 0) {
- be_abi_fix_stack_nodes(birg->abi);
+ be_abi_fix_stack_nodes(irg);
}
be_timer_pop(T_RA_SPILL_APPLY);
/* verify schedule and register pressure */
be_timer_push(T_VERIFY);
if (chordal_env->opts->vrfy_option == BE_CH_VRFY_WARN) {
- be_verify_schedule(birg);
- be_verify_register_pressure(birg, pse->cls, irg);
+ be_verify_schedule(irg);
+ be_verify_register_pressure(irg, pse->cls);
} else if (chordal_env->opts->vrfy_option == BE_CH_VRFY_ASSERT) {
- assert(be_verify_schedule(birg) && "Schedule verification failed");
- assert(be_verify_register_pressure(birg, pse->cls, irg)
+ assert(be_verify_schedule(irg) && "Schedule verification failed");
+ assert(be_verify_register_pressure(irg, pse->cls)
&& "Register pressure verification failed");
}
be_timer_pop(T_VERIFY);
be_ra_chordal_coloring(chordal_env);
be_timer_pop(T_RA_COLOR);
- dump(BE_CH_DUMP_CONSTR, irg, pse->cls, "-color", dump_ir_block_graph_sched);
+ dump(BE_CH_DUMP_CONSTR, irg, pse->cls, "color");
/* Create the ifg with the selected flavor */
be_timer_push(T_RA_IFG);
chordal_env->ifg = be_create_ifg(chordal_env);
be_timer_pop(T_RA_IFG);
- stat_ev_if {
+ if (stat_ev_enabled) {
be_ifg_stat_t stat;
be_node_stats_t node_stats;
- be_ifg_stat(birg, chordal_env->ifg, &stat);
+ be_ifg_stat(irg, chordal_env->ifg, &stat);
stat_ev_dbl("bechordal_ifg_nodes", stat.n_nodes);
stat_ev_dbl("bechordal_ifg_edges", stat.n_edges);
stat_ev_dbl("bechordal_ifg_comps", stat.n_comps);
- be_collect_node_stats(&node_stats, birg);
+ be_collect_node_stats(&node_stats, irg);
be_subtract_node_stats(&node_stats, &last_node_stats);
stat_ev_dbl("bechordal_perms_before_coal",
}
be_timer_push(T_RA_COPYMIN);
- if (minir_file[0] != '\0') {
- extern void be_export_minir(const arch_env_t *arch_env, FILE *out,
- ir_graph *irg);
- FILE *out;
-
- if (strcmp(minir_file, "-") == 0) {
- out = stdout;
- } else {
- out = fopen(minir_file, "w");
- if (out == NULL) {
- panic("Cound't open minir output '%s'", minir_file);
- }
- }
-
- be_export_minir(chordal_env->birg->main_env->arch_env, out, irg);
- if (out != stdout)
- fclose(out);
- }
co_driver(chordal_env);
be_timer_pop(T_RA_COPYMIN);
- dump(BE_CH_DUMP_COPYMIN, irg, pse->cls, "-copymin", dump_ir_block_graph_sched);
+ dump(BE_CH_DUMP_COPYMIN, irg, pse->cls, "copymin");
/* ssa destruction */
be_timer_push(T_RA_SSA);
be_ssa_destruction(chordal_env);
be_timer_pop(T_RA_SSA);
- dump(BE_CH_DUMP_SSADESTR, irg, pse->cls, "-ssadestr", dump_ir_block_graph_sched);
+ dump(BE_CH_DUMP_SSADESTR, irg, pse->cls, "ssadestr");
if (chordal_env->opts->vrfy_option != BE_CH_VRFY_OFF) {
be_timer_push(T_VERIFY);
/* free some always allocated data structures */
pmap_destroy(chordal_env->border_heads);
- bitset_free(chordal_env->ignore_colors);
+ bitset_free(chordal_env->allocatable_regs);
}
/**
* Performs chordal register allocation for each register class on given irg.
*
- * @param birg Backend irg object
- * @return Structure containing timer for the single phases or NULL if no timing requested.
+ * @param irg the graph
+ * @return Structure containing timer for the single phases or NULL if no
+ * timing requested.
*/
-static void be_ra_chordal_main(be_irg_t *birg)
+static void be_ra_chordal_main(ir_graph *irg)
{
- const arch_env_t *arch_env = birg->main_env->arch_env;
- ir_graph *irg = birg->irg;
+ const arch_env_t *arch_env = be_get_irg_arch_env(irg);
int j;
int m;
be_chordal_env_t chordal_env;
be_timer_push(T_RA_PROLOG);
- be_assure_liveness(birg);
+ be_assure_liveness(irg);
- chordal_env.obst = &obst;
- chordal_env.opts = &options;
- chordal_env.irg = irg;
- chordal_env.birg = birg;
- chordal_env.border_heads = NULL;
- chordal_env.ifg = NULL;
- chordal_env.ignore_colors = NULL;
+ chordal_env.obst = &obst;
+ chordal_env.opts = &options;
+ chordal_env.irg = irg;
+ chordal_env.border_heads = NULL;
+ chordal_env.ifg = NULL;
+ chordal_env.allocatable_regs = NULL;
obstack_init(&obst);
be_timer_pop(T_RA_PROLOG);
- stat_ev_if {
- be_collect_node_stats(&last_node_stats, birg);
+ if (stat_ev_enabled) {
+ be_collect_node_stats(&last_node_stats, irg);
}
- if (! arch_code_generator_has_spiller(birg->cg)) {
- /* use one of the generic spiller */
-
- /* Perform the following for each register class. */
- for (j = 0, m = arch_env_get_n_reg_class(arch_env); j < m; ++j) {
- post_spill_env_t pse;
- const arch_register_class_t *cls
- = arch_env_get_reg_class(arch_env, j);
-
- if (arch_register_class_flags(cls) & arch_register_class_flag_manual_ra)
- continue;
-
-
- stat_ev_ctx_push_str("bechordal_cls", cls->name);
+ /* use one of the generic spiller */
- stat_ev_if {
- be_do_stat_reg_pressure(birg, cls);
- }
+ /* Perform the following for each register class. */
+ for (j = 0, m = arch_env->n_register_classes; j < m; ++j) {
+ post_spill_env_t pse;
+ const arch_register_class_t *cls = &arch_env->register_classes[j];
- memcpy(&pse.cenv, &chordal_env, sizeof(chordal_env));
- pse.birg = birg;
- pre_spill(&pse, cls);
+ if (arch_register_class_flags(cls) & arch_register_class_flag_manual_ra)
+ continue;
- be_timer_push(T_RA_SPILL);
- be_do_spill(birg, cls);
- be_timer_pop(T_RA_SPILL);
- dump(BE_CH_DUMP_SPILL, irg, pse.cls, "-spill",
- dump_ir_block_graph_sched);
+ stat_ev_ctx_push_str("bechordal_cls", cls->name);
- post_spill(&pse, 0);
-
- stat_ev_if {
- be_node_stats_t node_stats;
+ if (stat_ev_enabled) {
+ be_do_stat_reg_pressure(irg, cls);
+ }
- be_collect_node_stats(&node_stats, birg);
- be_subtract_node_stats(&node_stats, &last_node_stats);
- be_emit_node_stats(&node_stats, "bechordal_");
+ pse.cenv = chordal_env;
+ pse.irg = irg;
+ pre_spill(&pse, cls);
- be_copy_node_stats(&last_node_stats, &node_stats);
- stat_ev_ctx_pop("bechordal_cls");
- }
- }
- } else {
- post_spill_env_t *pse;
+ be_timer_push(T_RA_SPILL);
+ be_do_spill(irg, cls);
+ be_timer_pop(T_RA_SPILL);
- /* the backend has its own spiller */
- m = arch_env_get_n_reg_class(arch_env);
+ dump(BE_CH_DUMP_SPILL, irg, pse.cls, "spill");
- pse = ALLOCAN(post_spill_env_t, m);
+ post_spill(&pse, 0);
- for (j = 0; j < m; ++j) {
- memcpy(&pse[j].cenv, &chordal_env, sizeof(chordal_env));
- pse[j].birg = birg;
- pre_spill(&pse[j], pse[j].cls);
- }
+ if (stat_ev_enabled) {
+ be_node_stats_t node_stats;
- be_timer_push(T_RA_SPILL);
- arch_code_generator_spill(birg->cg, birg);
- be_timer_pop(T_RA_SPILL);
- dump(BE_CH_DUMP_SPILL, irg, NULL, "-spill", dump_ir_block_graph_sched);
+ be_collect_node_stats(&node_stats, irg);
+ be_subtract_node_stats(&node_stats, &last_node_stats);
+ be_emit_node_stats(&node_stats, "bechordal_");
- for (j = 0; j < m; ++j) {
- post_spill(&pse[j], j);
+ be_copy_node_stats(&last_node_stats, &node_stats);
+ stat_ev_ctx_pop("bechordal_cls");
}
}
be_timer_push(T_VERIFY);
if (chordal_env.opts->vrfy_option == BE_CH_VRFY_WARN) {
- be_verify_register_allocation(birg);
+ be_verify_register_allocation(irg);
} else if (chordal_env.opts->vrfy_option == BE_CH_VRFY_ASSERT) {
- assert(be_verify_register_allocation(birg)
+ assert(be_verify_register_allocation(irg)
&& "Register allocation invalid");
}
be_timer_pop(T_VERIFY);
be_timer_push(T_RA_EPILOG);
- lower_nodes_after_ra(birg, options.lower_perm_opt & BE_CH_LOWER_PERM_COPY ? 1 : 0);
- dump(BE_CH_DUMP_LOWER, irg, NULL, "-belower-after-ra", dump_ir_block_graph_sched);
+ lower_nodes_after_ra(irg,
+ options.lower_perm_opt&BE_CH_LOWER_PERM_COPY ? 1 : 0);
+ dump(BE_CH_DUMP_LOWER, irg, NULL, "belower-after-ra");
obstack_free(&obst, NULL);
- be_liveness_invalidate(be_get_birg_liveness(birg));
+ be_liveness_invalidate(be_get_irg_liveness(irg));
be_timer_pop(T_RA_EPILOG);
be_timer_pop(T_RA_OTHER);
}
-BE_REGISTER_MODULE_CONSTRUCTOR(be_init_chordal_main);
+BE_REGISTER_MODULE_CONSTRUCTOR(be_init_chordal_main)
void be_init_chordal_main(void)
{
static be_ra_t be_ra_chordal_allocator = {