#include "be_t.h"
#include "bera.h"
#include "beirg.h"
-#include "bedump_minir.h"
+#include "bestack.h"
#include "bespillslots.h"
#include "bespill.h"
""
};
-typedef struct _post_spill_env_t {
+typedef struct post_spill_env_t {
be_chordal_env_t cenv;
ir_graph *irg;
const arch_register_class_t *cls;
{ NULL, 0 }
};
-static const lc_opt_enum_int_items_t lower_perm_stat_items[] = {
- { NULL, 0 }
-};
-
static const lc_opt_enum_int_items_t dump_items[] = {
{ "none", BE_CH_DUMP_NONE },
{ "spill", BE_CH_DUMP_SPILL },
&options.vrfy_option, be_ch_vrfy_items
};
-static char minir_file[256] = "";
-
static const lc_opt_table_entry_t be_chordal_options[] = {
LC_OPT_ENT_ENUM_PTR ("perm", "perm lowering options", &lower_perm_var),
LC_OPT_ENT_ENUM_MASK("dump", "select dump phases", &dump_var),
LC_OPT_ENT_ENUM_PTR ("verify", "verify options", &be_ch_vrfy_var),
- LC_OPT_ENT_STR ("minirout", "dump MinIR to file", minir_file, sizeof(minir_file)),
LC_OPT_LAST
};
static void be_ra_chordal_coloring(be_chordal_env_t *env)
{
- assert(selected_coloring != NULL);
- if (selected_coloring != NULL) {
- selected_coloring->allocate(env);
- }
+ selected_coloring->allocate(env);
}
static void dump(unsigned mask, ir_graph *irg,
}
/**
- * Checks for every reload if its user can perform the load on itself.
+ * Post-Walker: Checks for the given reload if has only one user that can perform the
+ * reload as part of its address mode.
+ * Fold the reload into the user it that is possible.
*/
static void memory_operand_walker(ir_node *irn, void *env)
{
}
}
- /* kill the Reload */
+ /* kill the Reload if it was folded */
if (get_irn_n_edges(irn) == 0) {
+ ir_graph *irg = get_irn_irg(irn);
+ ir_mode *frame_mode = get_irn_mode(get_irn_n(irn, n_be_Reload_frame));
sched_remove(irn);
- set_irn_n(irn, be_pos_Reload_mem, new_Bad());
- set_irn_n(irn, be_pos_Reload_frame, new_Bad());
+ set_irn_n(irn, n_be_Reload_mem, new_r_Bad(irg, mode_X));
+ set_irn_n(irn, n_be_Reload_frame, new_r_Bad(irg, frame_mode));
}
}
ir_graph *irg = pse->irg;
ir_exec_freq *exec_freq = be_get_irg_exec_freq(irg);
- pse->cls = cls;
- chordal_env->cls = cls;
- chordal_env->border_heads = pmap_create();
- chordal_env->ignore_colors = bitset_malloc(chordal_env->cls->n_regs);
+ pse->cls = cls;
+ chordal_env->cls = cls;
+ chordal_env->border_heads = pmap_create();
+ chordal_env->allocatable_regs = bitset_malloc(chordal_env->cls->n_regs);
be_assure_liveness(irg);
be_liveness_assure_chk(be_get_irg_liveness(irg));
- stat_ev_do(pse->pre_spill_cost = be_estimate_irg_costs(irg, exec_freq));
+ if (stat_ev_enabled) {
+ pse->pre_spill_cost = be_estimate_irg_costs(irg, exec_freq);
+ }
/* put all ignore registers into the ignore register set. */
- be_put_ignore_regs(irg, pse->cls, chordal_env->ignore_colors);
+ be_put_allocatable_regs(irg, pse->cls, chordal_env->allocatable_regs);
be_timer_push(T_RA_CONSTR);
be_pre_spill_prepare_constr(irg, chordal_env->cls);
*/
static void post_spill(post_spill_env_t *pse, int iteration)
{
- be_chordal_env_t *chordal_env = &pse->cenv;
- ir_graph *irg = pse->irg;
- ir_exec_freq *exec_freq = be_get_irg_exec_freq(irg);
- int colors_n = arch_register_class_n_regs(chordal_env->cls);
- int allocatable_regs
- = colors_n - be_put_ignore_regs(irg, chordal_env->cls, NULL);
+ be_chordal_env_t *chordal_env = &pse->cenv;
+ ir_graph *irg = pse->irg;
+ ir_exec_freq *exec_freq = be_get_irg_exec_freq(irg);
+ int allocatable_regs = be_get_n_allocatable_regs(irg, chordal_env->cls);
/* some special classes contain only ignore regs, no work to be done */
if (allocatable_regs > 0) {
be_timer_push(T_RA_SPILL_APPLY);
check_for_memory_operands(irg);
if (iteration == 0) {
- be_abi_fix_stack_nodes(be_get_irg_abi(irg));
+ be_abi_fix_stack_nodes(irg);
}
be_timer_pop(T_RA_SPILL_APPLY);
chordal_env->ifg = be_create_ifg(chordal_env);
be_timer_pop(T_RA_IFG);
- stat_ev_if {
+ if (stat_ev_enabled) {
be_ifg_stat_t stat;
be_node_stats_t node_stats;
}
be_timer_push(T_RA_COPYMIN);
- if (minir_file[0] != '\0') {
- FILE *out;
-
- if (strcmp(minir_file, "-") == 0) {
- out = stdout;
- } else {
- out = fopen(minir_file, "w");
- if (out == NULL) {
- panic("Cound't open minir output '%s'", minir_file);
- }
- }
-
- be_export_minir(out, irg);
- if (out != stdout)
- fclose(out);
- }
co_driver(chordal_env);
be_timer_pop(T_RA_COPYMIN);
/* free some always allocated data structures */
pmap_destroy(chordal_env->border_heads);
- bitset_free(chordal_env->ignore_colors);
+ bitset_free(chordal_env->allocatable_regs);
}
/**
be_assure_liveness(irg);
- chordal_env.obst = &obst;
- chordal_env.opts = &options;
- chordal_env.irg = irg;
- chordal_env.border_heads = NULL;
- chordal_env.ifg = NULL;
- chordal_env.ignore_colors = NULL;
+ chordal_env.obst = &obst;
+ chordal_env.opts = &options;
+ chordal_env.irg = irg;
+ chordal_env.border_heads = NULL;
+ chordal_env.ifg = NULL;
+ chordal_env.allocatable_regs = NULL;
obstack_init(&obst);
be_timer_pop(T_RA_PROLOG);
- stat_ev_if {
+ if (stat_ev_enabled) {
be_collect_node_stats(&last_node_stats, irg);
}
- if (! arch_code_generator_has_spiller(be_get_irg_cg(irg))) {
- /* use one of the generic spiller */
-
- /* Perform the following for each register class. */
- for (j = 0, m = arch_env_get_n_reg_class(arch_env); j < m; ++j) {
- post_spill_env_t pse;
- const arch_register_class_t *cls
- = arch_env_get_reg_class(arch_env, j);
-
- if (arch_register_class_flags(cls) & arch_register_class_flag_manual_ra)
- continue;
-
+ /* use one of the generic spiller */
- stat_ev_ctx_push_str("bechordal_cls", cls->name);
+ /* Perform the following for each register class. */
+ for (j = 0, m = arch_env->n_register_classes; j < m; ++j) {
+ post_spill_env_t pse;
+ const arch_register_class_t *cls = &arch_env->register_classes[j];
- stat_ev_if {
- be_do_stat_reg_pressure(irg, cls);
- }
+ if (arch_register_class_flags(cls) & arch_register_class_flag_manual_ra)
+ continue;
- memcpy(&pse.cenv, &chordal_env, sizeof(chordal_env));
- pse.irg = irg;
- pre_spill(&pse, cls);
- be_timer_push(T_RA_SPILL);
- be_do_spill(irg, cls);
- be_timer_pop(T_RA_SPILL);
+ stat_ev_ctx_push_str("bechordal_cls", cls->name);
- dump(BE_CH_DUMP_SPILL, irg, pse.cls, "spill");
-
- post_spill(&pse, 0);
-
- stat_ev_if {
- be_node_stats_t node_stats;
+ if (stat_ev_enabled) {
+ be_do_stat_reg_pressure(irg, cls);
+ }
- be_collect_node_stats(&node_stats, irg);
- be_subtract_node_stats(&node_stats, &last_node_stats);
- be_emit_node_stats(&node_stats, "bechordal_");
+ pse.cenv = chordal_env;
+ pse.irg = irg;
+ pre_spill(&pse, cls);
- be_copy_node_stats(&last_node_stats, &node_stats);
- stat_ev_ctx_pop("bechordal_cls");
- }
- }
- } else {
- post_spill_env_t *pse;
+ be_timer_push(T_RA_SPILL);
+ be_do_spill(irg, cls);
+ be_timer_pop(T_RA_SPILL);
- /* the backend has its own spiller */
- m = arch_env_get_n_reg_class(arch_env);
+ dump(BE_CH_DUMP_SPILL, irg, pse.cls, "spill");
- pse = ALLOCAN(post_spill_env_t, m);
+ post_spill(&pse, 0);
- for (j = 0; j < m; ++j) {
- memcpy(&pse[j].cenv, &chordal_env, sizeof(chordal_env));
- pse[j].irg = irg;
- pre_spill(&pse[j], pse[j].cls);
- }
+ if (stat_ev_enabled) {
+ be_node_stats_t node_stats;
- be_timer_push(T_RA_SPILL);
- arch_code_generator_spill(be_get_irg_cg(irg), be_birg_from_irg(irg));
- be_timer_pop(T_RA_SPILL);
- dump(BE_CH_DUMP_SPILL, irg, NULL, "spill");
+ be_collect_node_stats(&node_stats, irg);
+ be_subtract_node_stats(&node_stats, &last_node_stats);
+ be_emit_node_stats(&node_stats, "bechordal_");
- for (j = 0; j < m; ++j) {
- post_spill(&pse[j], j);
+ be_copy_node_stats(&last_node_stats, &node_stats);
+ stat_ev_ctx_pop("bechordal_cls");
}
}
be_timer_pop(T_RA_OTHER);
}
-BE_REGISTER_MODULE_CONSTRUCTOR(be_init_chordal_main);
+BE_REGISTER_MODULE_CONSTRUCTOR(be_init_chordal_main)
void be_init_chordal_main(void)
{
static be_ra_t be_ra_chordal_allocator = {