for (i = insn->use_start; i < insn->n_ops; ++i) {
int n_total;
const be_operand_t *op = &insn->ops[i];
+ be_lv_t *lv;
if (op->partner != NULL)
continue;
- if (be_values_interfere(env->birg->lv, op->irn, op->carrier))
+ lv = be_get_irg_liveness(env->irg);
+ if (be_values_interfere(lv, op->irn, op->carrier))
continue;
bitset_clear_all(bs);
foreach_out_edge(perm, edge) {
int i;
ir_node *proj = get_edge_src_irn(edge);
+ be_lv_t *lv = be_get_irg_liveness(env->irg);
assert(is_Proj(proj));
- if (!be_values_interfere(env->birg->lv, proj, irn)
- || pmap_contains(partners, proj))
+ if (!be_values_interfere(lv, proj, irn)
+ || pmap_contains(partners, proj))
continue;
/* don't insert a node twice */
bitset_t *colors = alloc_env->colors;
bitset_t *in_colors = alloc_env->in_colors;
struct list_head *head = get_block_border_head(env, block);
- be_lv_t *lv = env->birg->lv;
+ be_lv_t *lv = be_get_irg_liveness(env->irg);
const ir_node *irn;
border_t *b;
be_chordal_env_t *env = env_ptr;
bitset_t *live = bitset_malloc(get_irg_last_idx(env->irg));
ir_node *irn;
- be_lv_t *lv = env->birg->lv;
+ be_lv_t *lv = be_get_irg_liveness(env->irg);
int i, n;
unsigned elm;
* Make the Perm, recompute liveness and re-scan the insn since the
* in operands are now the Projs of the Perm.
*/
- perm = insert_Perm_after(env->birg, env->cls, sched_prev(insn->irn));
+ perm = insert_Perm_after(env->irg, env->cls, sched_prev(insn->irn));
/* Registers are propagated by insert_Perm_after(). Clean them here! */
if (perm == NULL)
typedef struct _post_spill_env_t {
be_chordal_env_t cenv;
- be_irg_t *birg;
+ ir_graph *irg;
const arch_register_class_t *cls;
double pre_spill_cost;
} post_spill_env_t;
static void pre_spill(post_spill_env_t *pse, const arch_register_class_t *cls)
{
be_chordal_env_t *chordal_env = &pse->cenv;
- be_irg_t *birg = pse->birg;
- ir_graph *irg = be_get_birg_irg(birg);
+ ir_graph *irg = pse->irg;
+ ir_exec_freq *exec_freq = be_get_irg_exec_freq(irg);
pse->cls = cls;
chordal_env->cls = cls;
be_assure_liveness(irg);
be_liveness_assure_chk(be_get_irg_liveness(irg));
- stat_ev_do(pse->pre_spill_cost = be_estimate_irg_costs(irg, birg->exec_freq));
+ stat_ev_do(pse->pre_spill_cost = be_estimate_irg_costs(irg, exec_freq));
/* put all ignore registers into the ignore register set. */
be_put_ignore_regs(irg, pse->cls, chordal_env->ignore_colors);
be_pre_spill_prepare_constr(irg, chordal_env->cls);
be_timer_pop(T_RA_CONSTR);
- dump(BE_CH_DUMP_CONSTR, birg->irg, pse->cls, "constr-pre");
+ dump(BE_CH_DUMP_CONSTR, irg, pse->cls, "constr-pre");
}
/**
static void post_spill(post_spill_env_t *pse, int iteration)
{
be_chordal_env_t *chordal_env = &pse->cenv;
- be_irg_t *birg = pse->birg;
- ir_graph *irg = birg->irg;
+ ir_graph *irg = pse->irg;
+ ir_exec_freq *exec_freq = be_get_irg_exec_freq(irg);
int colors_n = arch_register_class_n_regs(chordal_env->cls);
int allocatable_regs
= colors_n - be_put_ignore_regs(irg, chordal_env->cls, NULL);
/* some special classes contain only ignore regs, no work to be done */
if (allocatable_regs > 0) {
- stat_ev_dbl("bechordal_spillcosts", be_estimate_irg_costs(irg, birg->exec_freq) - pse->pre_spill_cost);
+ stat_ev_dbl("bechordal_spillcosts", be_estimate_irg_costs(irg, exec_freq) - pse->pre_spill_cost);
/*
If we have a backend provided spiller, post spill is
be_timer_push(T_RA_SPILL_APPLY);
check_for_memory_operands(irg);
if (iteration == 0) {
- be_abi_fix_stack_nodes(birg->abi);
+ be_abi_fix_stack_nodes(be_get_irg_abi(irg));
}
be_timer_pop(T_RA_SPILL_APPLY);
/* verify schedule and register pressure */
be_timer_push(T_VERIFY);
if (chordal_env->opts->vrfy_option == BE_CH_VRFY_WARN) {
- be_verify_schedule(birg);
+ be_verify_schedule(irg);
be_verify_register_pressure(irg, pse->cls);
} else if (chordal_env->opts->vrfy_option == BE_CH_VRFY_ASSERT) {
- assert(be_verify_schedule(birg) && "Schedule verification failed");
+ assert(be_verify_schedule(irg) && "Schedule verification failed");
assert(be_verify_register_pressure(irg, pse->cls)
&& "Register pressure verification failed");
}
be_ifg_stat_t stat;
be_node_stats_t node_stats;
- be_ifg_stat(birg, chordal_env->ifg, &stat);
+ be_ifg_stat(irg, chordal_env->ifg, &stat);
stat_ev_dbl("bechordal_ifg_nodes", stat.n_nodes);
stat_ev_dbl("bechordal_ifg_edges", stat.n_edges);
stat_ev_dbl("bechordal_ifg_comps", stat.n_comps);
- be_collect_node_stats(&node_stats, birg);
+ be_collect_node_stats(&node_stats, irg);
be_subtract_node_stats(&node_stats, &last_node_stats);
stat_ev_dbl("bechordal_perms_before_coal",
}
}
- be_export_minir(chordal_env->birg->main_env->arch_env, out, irg);
+ be_export_minir(out, irg);
if (out != stdout)
fclose(out);
}
/**
* Performs chordal register allocation for each register class on given irg.
*
- * @param birg Backend irg object
- * @return Structure containing timer for the single phases or NULL if no timing requested.
+ * @param irg the graph
+ * @return Structure containing timer for the single phases or NULL if no
+ * timing requested.
*/
-static void be_ra_chordal_main(be_irg_t *birg)
+static void be_ra_chordal_main(ir_graph *irg)
{
- const arch_env_t *arch_env = birg->main_env->arch_env;
- ir_graph *irg = birg->irg;
+ const arch_env_t *arch_env = be_get_irg_arch_env(irg);
int j;
int m;
be_chordal_env_t chordal_env;
chordal_env.obst = &obst;
chordal_env.opts = &options;
chordal_env.irg = irg;
- chordal_env.birg = birg;
chordal_env.border_heads = NULL;
chordal_env.ifg = NULL;
chordal_env.ignore_colors = NULL;
be_timer_pop(T_RA_PROLOG);
stat_ev_if {
- be_collect_node_stats(&last_node_stats, birg);
+ be_collect_node_stats(&last_node_stats, irg);
}
- if (! arch_code_generator_has_spiller(birg->cg)) {
+ if (! arch_code_generator_has_spiller(be_get_irg_cg(irg))) {
/* use one of the generic spiller */
/* Perform the following for each register class. */
stat_ev_ctx_push_str("bechordal_cls", cls->name);
stat_ev_if {
- be_do_stat_reg_pressure(birg, cls);
+ be_do_stat_reg_pressure(irg, cls);
}
memcpy(&pse.cenv, &chordal_env, sizeof(chordal_env));
- pse.birg = birg;
+ pse.irg = irg;
pre_spill(&pse, cls);
be_timer_push(T_RA_SPILL);
stat_ev_if {
be_node_stats_t node_stats;
- be_collect_node_stats(&node_stats, birg);
+ be_collect_node_stats(&node_stats, irg);
be_subtract_node_stats(&node_stats, &last_node_stats);
be_emit_node_stats(&node_stats, "bechordal_");
for (j = 0; j < m; ++j) {
memcpy(&pse[j].cenv, &chordal_env, sizeof(chordal_env));
- pse[j].birg = birg;
+ pse[j].irg = irg;
pre_spill(&pse[j], pse[j].cls);
}
be_timer_push(T_RA_SPILL);
- arch_code_generator_spill(birg->cg, birg);
+ arch_code_generator_spill(be_get_irg_cg(irg), be_birg_from_irg(irg));
be_timer_pop(T_RA_SPILL);
dump(BE_CH_DUMP_SPILL, irg, NULL, "spill");
be_timer_push(T_VERIFY);
if (chordal_env.opts->vrfy_option == BE_CH_VRFY_WARN) {
- be_verify_register_allocation(birg);
+ be_verify_register_allocation(irg);
} else if (chordal_env.opts->vrfy_option == BE_CH_VRFY_ASSERT) {
- assert(be_verify_register_allocation(birg)
+ assert(be_verify_register_allocation(irg)
&& "Register allocation invalid");
}
be_timer_pop(T_VERIFY);
be_timer_push(T_RA_EPILOG);
- lower_nodes_after_ra(birg, options.lower_perm_opt & BE_CH_LOWER_PERM_COPY ? 1 : 0);
+ lower_nodes_after_ra(irg,
+ options.lower_perm_opt&BE_CH_LOWER_PERM_COPY ? 1 : 0);
dump(BE_CH_DUMP_LOWER, irg, NULL, "belower-after-ra");
obstack_free(&obst, NULL);
struct be_chordal_env_t {
struct obstack *obst; /**< An obstack for temporary storage. */
be_ra_chordal_opts_t *opts; /**< A pointer to the chordal ra options. */
- be_irg_t *birg; /**< Back-end IRG session. */
ir_graph *irg; /**< The graph under examination. */
const arch_register_class_t *cls; /**< The current register class. */
pmap *border_heads; /**< Maps blocks to border heads. */
{
if (env->ifg)
return be_ifg_connected(env->ifg, a, b);
- else
- return be_values_interfere(env->birg->lv, a, b);
+ else {
+ be_lv_t *lv = be_get_irg_liveness(env->irg);
+ return be_values_interfere(lv, a, b);
+ }
}
static int set_cmp_conflict_t(const void *x, const void *y, size_t size)
co2_t env;
FILE *f;
- phase_init(&env.ph, co->cenv->birg->irg, co2_irn_init);
+ phase_init(&env.ph, co->cenv->irg, co2_irn_init);
env.touched = NULL;
env.visited = 0;
env.co = co;
{
if (env->ifg)
return be_ifg_connected(env->ifg, a, b);
- else
- return be_values_interfere(env->birg->lv, a, b);
+ else {
+ be_lv_t *lv = be_get_irg_liveness(env->irg);
+ return be_values_interfere(lv, a, b);
+ }
}
int res;
ir_node *root_bl = get_nodes_block(root);
ir_node *copy_bl = is_Phi(root) ? get_Block_cfgpred_block(root_bl, pos) : root_bl;
+ ir_exec_freq *exec_freq = be_get_irg_exec_freq(co->cenv->irg);
(void) arg;
- res = get_block_execfreq_ulong(co->cenv->birg->exec_freq, copy_bl);
+ res = get_block_execfreq_ulong(exec_freq, copy_bl);
/* don't allow values smaller than one. */
return res < 1 ? 1 : res;
char buf[1024];
size_t i, n;
char *tu_name;
+ const char *cup_name = be_birg_from_irg(env->irg)->main_env->cup_name;
- n = strlen(env->birg->main_env->cup_name);
+ n = strlen(cup_name);
tu_name = XMALLOCN(char, n + 1);
- strcpy(tu_name, env->birg->main_env->cup_name);
+ strcpy(tu_name, cup_name);
for (i = 0; i < n; ++i)
if (tu_name[i] == '.')
tu_name[i] = '_';
end_block_mapping(NULL);
}
-void be_export_minir(const arch_env_t *new_arch_env, FILE *out, ir_graph *irg)
+void be_export_minir(FILE *out, ir_graph *irg)
{
- arch_env = new_arch_env;
+ arch_env = be_get_irg_arch_env(irg);
init_yaml(out);
print_regclasses();
#ifndef FIRM_BE_BEDUMP_MINIR_H
#define FIRM_BE_BEDUMP_MINIR_H
-void be_export_minir(const arch_env_t *arch_env, FILE *out, ir_graph *irg);
+#include "firm_types.h"
+#include <stdio.h>
+
+void be_export_minir(FILE *out, ir_graph *irg);
#endif
int be_ifg_connected(const be_ifg_t *ifg, const ir_node *a, const ir_node *b)
{
- return be_values_interfere(ifg->env->birg->lv, a, b);
+ be_lv_t *lv = be_get_irg_liveness(ifg->env->irg);
+ return be_values_interfere(lv, a, b);
}
static void nodes_walker(ir_node *bl, void *data)
{
neighbours_iter_t *it = data;
struct list_head *head = get_block_border_head(it->env, block);
+ be_lv_t *lv = be_get_irg_liveness(it->env->irg);
border_t *b;
int has_started = 0;
- if (!be_is_live_in(it->env->birg->lv, block, it->irn) && block != get_nodes_block(it->irn))
+ if (!be_is_live_in(lv, block, it->irn) && block != get_nodes_block(it->irn))
return;
foreach_border_head(head, b) {
}
-static int int_component_stat(be_irg_t *birg, be_ifg_t *ifg)
+static int int_component_stat(ir_graph *irg, be_ifg_t *ifg)
{
int n_comp = 0;
nodes_iter_t nodes_it;
- bitset_t *seen = bitset_irg_malloc(birg->irg);
+ bitset_t *seen = bitset_irg_malloc(irg);
ir_node *n;
return n_comp;
}
-void be_ifg_stat(be_irg_t *birg, be_ifg_t *ifg, be_ifg_stat_t *stat)
+void be_ifg_stat(ir_graph *irg, be_ifg_t *ifg, be_ifg_stat_t *stat)
{
nodes_iter_t nodes_it;
neighbours_iter_t neigh_it;
- bitset_t *nodes = bitset_irg_malloc(birg->irg);
+ bitset_t *nodes = bitset_irg_malloc(irg);
ir_node *n, *m;
memset(stat, 0, sizeof(stat[0]));
}
}
- stat->n_comps = int_component_stat(birg, ifg);
+ stat->n_comps = int_component_stat(irg, ifg);
bitset_free(nodes);
}
int n_comps;
} be_ifg_stat_t;
-void be_ifg_stat(be_irg_t *birg, be_ifg_t *ifg, be_ifg_stat_t *stat);
+void be_ifg_stat(ir_graph *irg, be_ifg_t *ifg, be_ifg_stat_t *stat);
be_ifg_t *be_create_ifg(const be_chordal_env_t *env);
return be_birg_from_irg(irg)->dom_front;
}
+static inline be_abi_irg_t *be_get_irg_abi(const ir_graph *irg)
+{
+ return be_birg_from_irg(irg)->abi;
+}
+
+static inline be_options_t *be_get_irg_options(const ir_graph *irg)
+{
+ return be_birg_from_irg(irg)->main_env->options;
+}
+
+static inline arch_code_generator_t *be_get_irg_cg(const ir_graph *irg)
+{
+ return be_birg_from_irg(irg)->cg;
+}
+
/** deprecated */
static inline ir_graph *be_get_birg_irg(const be_irg_t *birg)
{
*/
-ir_node *insert_Perm_after(be_irg_t *birg,
- const arch_register_class_t *cls,
+ir_node *insert_Perm_after(ir_graph *irg, const arch_register_class_t *cls,
ir_node *pos)
{
- be_lv_t *lv = birg->lv;
+ be_lv_t *lv = be_get_irg_liveness(irg);
ir_node *bl = is_Block(pos) ? pos : get_nodes_block(pos);
ir_nodeset_t live;
ir_nodeset_iterator_t iter;
curr = proj;
- be_ssa_construction_init(&senv, birg->irg);
+ be_ssa_construction_init(&senv, irg);
be_ssa_construction_add_copy(&senv, perm_op);
be_ssa_construction_add_copy(&senv, proj);
be_ssa_construction_fix_users(&senv, perm_op);
* @param irn The node to insert the Perm after.
* @return The Perm or NULL if nothing was live before @p irn.
*/
-ir_node *insert_Perm_after(be_irg_t *birg, const arch_register_class_t *cls,
+ir_node *insert_Perm_after(ir_graph *irg, const arch_register_class_t *cls,
ir_node *irn);
/**
*/
int be_remove_empty_blocks(ir_graph *irg);
-#endif /* FIRM_BE_BEIRGMOD_H */
+#endif
/** Environment for constraints. */
typedef struct {
- be_irg_t *birg;
+ ir_graph *irg;
ir_nodemap_t op_set;
struct obstack obst;
} constraint_env_t;
/** Lowering walker environment. */
typedef struct _lower_env_t {
- be_irg_t *birg;
- unsigned do_copy : 1;
+ ir_graph *irg;
+ unsigned do_copy : 1;
} lower_env_t;
/** Holds a Perm register pair. */
static void gen_assure_different_pattern(ir_node *irn, ir_node *other_different, constraint_env_t *env)
{
- ir_graph *irg;
ir_nodemap_t *op_set;
ir_node *block;
const arch_register_class_t *cls;
return;
}
- irg = be_get_birg_irg(env->birg);
op_set = &env->op_set;
block = get_nodes_block(irn);
cls = arch_get_irn_reg_class_out(other_different);
}
}
-/**
- * Walks over all nodes to assure register constraints.
- *
- * @param birg The birg structure containing the irg
- */
-void assure_constraints(be_irg_t *birg)
+void assure_constraints(ir_graph *irg)
{
- ir_graph *irg = be_get_birg_irg(birg);
constraint_env_t cenv;
ir_nodemap_iterator_t map_iter;
ir_nodemap_entry_t map_entry;
FIRM_DBG_REGISTER(dbg_constr, "firm.be.lower.constr");
- cenv.birg = birg;
+ cenv.irg = irg;
ir_nodemap_init(&cenv.op_set);
obstack_init(&cenv.obst);
* @return 1, if there is something left to perm over.
* 0, if removed the complete perm.
*/
-static int push_through_perm(ir_node *perm, lower_env_t *env)
+static int push_through_perm(ir_node *perm)
{
ir_graph *irg = get_irn_irg(perm);
ir_node *bl = get_nodes_block(perm);
sched_foreach_reverse_from(sched_prev(perm), irn) {
for (i = get_irn_arity(irn) - 1; i >= 0; --i) {
ir_node *op = get_irn_n(irn, i);
+ be_lv_t *lv = be_get_irg_liveness(irg);
if (arch_irn_consider_in_reg_alloc(cls, op) &&
- !be_values_interfere(env->birg->lv, op, one_proj)) {
+ !be_values_interfere(lv, op, one_proj)) {
frontier = irn;
goto found_front;
}
if (!be_is_Perm(irn))
return;
- perm_stayed = push_through_perm(irn, walk_env);
+ perm_stayed = push_through_perm(irn);
if (perm_stayed)
lower_perm_node(irn, walk_env);
}
-/**
- * Walks over all blocks in an irg and performs lowering need to be
- * done after register allocation (e.g. perm lowering).
- *
- * @param birg The birg object
- * @param do_copy 1 == resolve cycles with a free reg if available
- */
-void lower_nodes_after_ra(be_irg_t *birg, int do_copy)
+void lower_nodes_after_ra(ir_graph *irg, int do_copy)
{
lower_env_t env;
- ir_graph *irg;
FIRM_DBG_REGISTER(dbg, "firm.be.lower");
FIRM_DBG_REGISTER(dbg_permmove, "firm.be.lower.permmove");
- env.birg = birg;
+ env.irg = irg;
env.do_copy = do_copy;
/* we will need interference */
- irg = be_get_birg_irg(birg);
be_liveness_assure_chk(be_get_irg_liveness(irg));
irg_walk_graph(irg, NULL, lower_nodes_after_ra_walker, &env);
/**
* @file
- * @brief Performs lowering of perm nodes. Inserts copies to assure register constraints.
+ * @brief Performs lowering of perm nodes. Inserts copies to assure
+ * register constraints.
* @author Christian Wuerdig
* @date 14.12.2005
* @version $Id$
#include "beirg.h"
-void assure_constraints(be_irg_t *birg);
-void lower_nodes_after_ra(be_irg_t *birg, int do_copy);
+/**
+ * Walks over all nodes to assure register constraints.
+ *
+ * @param irg The graph
+ */
+void assure_constraints(ir_graph *irg);
+
+/**
+ * Walks over all blocks in an irg and performs lowering need to be
+ * done after register allocation (e.g. perm lowering).
+ *
+ * @param irg The graph
+ * @param do_copy 1 == resolve cycles with a free reg if available
+ */
+void lower_nodes_after_ra(ir_graph *irg, int do_copy);
-#endif /* FIRM_BE_BELOWER_H */
+#endif
};
/* Perform schedule verification if requested. */
-static void be_sched_vrfy(be_irg_t *birg, int vrfy_opt)
+static void be_sched_vrfy(ir_graph *irg, int vrfy_opt)
{
if (vrfy_opt == BE_VRFY_WARN) {
- be_verify_schedule(birg);
+ be_verify_schedule(irg);
} else if (vrfy_opt == BE_VRFY_ASSERT) {
- assert(be_verify_schedule(birg) && "Schedule verification failed.");
+ assert(be_verify_schedule(irg) && "Schedule verification failed.");
}
}
}
/**
- * Prepare a backend graph for code generation and initialize its birg
+ * Prepare a backend graph for code generation and initialize its irg
*/
static void initialize_birg(be_irg_t *birg, ir_graph *irg, be_main_env_t *env)
{
/* check schedule */
be_timer_push(T_VERIFY);
- be_sched_vrfy(birg, be_options.vrfy_option);
+ be_sched_vrfy(irg, be_options.vrfy_option);
be_timer_pop(T_VERIFY);
/* introduce patterns to assure constraints */
/* add Keeps for should_be_different constrained nodes */
/* beware: needs schedule due to usage of be_ssa_constr */
- assure_constraints(birg);
+ assure_constraints(irg);
be_timer_pop(T_CONSTR);
dump(DUMP_SCHED, irg, "assured");
/* check schedule */
be_timer_push(T_VERIFY);
- be_sched_vrfy(birg, be_options.vrfy_option);
+ be_sched_vrfy(irg, be_options.vrfy_option);
be_timer_pop(T_VERIFY);
stat_ev_if {
}
/* Do register allocation */
- be_allocate_registers(birg);
+ be_allocate_registers(irg);
#ifdef FIRM_STATISTICS
stat_ev_dbl("bemain_costs_before_ra", be_estimate_irg_costs(irg, birg->exec_freq));
if (be_options.vrfy_option == BE_VRFY_WARN) {
irg_verify(irg, VRFY_ENFORCE_SSA);
be_check_dominance(irg);
- be_verify_schedule(birg);
- be_verify_register_allocation(birg);
+ be_verify_schedule(irg);
+ be_verify_register_allocation(irg);
} else if (be_options.vrfy_option == BE_VRFY_ASSERT) {
assert(irg_verify(irg, VRFY_ENFORCE_SSA) && "irg verification failed");
assert(be_check_dominance(irg) && "Dominance verification failed");
- assert(be_verify_schedule(birg) && "Schedule verification failed");
- assert(be_verify_register_allocation(birg)
+ assert(be_verify_schedule(irg) && "Schedule verification failed");
+ assert(be_verify_register_allocation(irg)
&& "register allocation verification failed");
}
DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
static struct obstack obst;
-static be_irg_t *birg;
static ir_graph *irg;
static const arch_register_class_t *cls;
static const arch_register_req_t *default_cls_req;
static void dump(int mask, ir_graph *irg, const char *suffix)
{
- if (birg->main_env->options->dump_flags & mask)
+ if (be_get_irg_options(irg)->dump_flags & mask)
dump_ir_graph(irg, suffix);
}
/**
* The pref register allocator for a whole procedure.
*/
-static void be_pref_alloc(be_irg_t *new_birg)
+static void be_pref_alloc(ir_graph *new_irg)
{
- const arch_env_t *arch_env = new_birg->main_env->arch_env;
+ const arch_env_t *arch_env = be_get_irg_arch_env(new_irg);
int n_cls = arch_env_get_n_reg_class(arch_env);
int c;
obstack_init(&obst);
- birg = new_birg;
- irg = be_get_birg_irg(birg);
- execfreqs = birg->exec_freq;
+ irg = new_irg;
+ execfreqs = be_get_irg_exec_freq(irg);
/* determine a good coloring order */
determine_block_order();
n_regs = arch_register_class_n_regs(cls);
normal_regs = rbitset_malloc(n_regs);
- be_abi_set_non_ignore_regs(birg->abi, cls, normal_regs);
+ be_abi_set_non_ignore_regs(be_get_irg_abi(irg), cls, normal_regs);
spill();
/* verify schedule and register pressure */
be_timer_push(T_VERIFY);
- if (birg->main_env->options->vrfy_option == BE_VRFY_WARN) {
- be_verify_schedule(birg);
+ if (be_get_irg_options(irg)->vrfy_option == BE_VRFY_WARN) {
+ be_verify_schedule(irg);
be_verify_register_pressure(irg, cls);
- } else if (birg->main_env->options->vrfy_option == BE_VRFY_ASSERT) {
- assert(be_verify_schedule(birg) && "Schedule verification failed");
+ } else if (be_get_irg_options(irg)->vrfy_option == BE_VRFY_ASSERT) {
+ assert(be_verify_schedule(irg) && "Schedule verification failed");
assert(be_verify_register_pressure(irg, cls)
&& "Register pressure verification failed");
}
}
be_timer_push(T_RA_SPILL_APPLY);
- be_abi_fix_stack_nodes(birg->abi);
+ be_abi_fix_stack_nodes(be_get_irg_abi(irg));
be_timer_pop(T_RA_SPILL_APPLY);
be_timer_push(T_VERIFY);
- if (birg->main_env->options->vrfy_option == BE_VRFY_WARN) {
- be_verify_register_allocation(birg);
- } else if (birg->main_env->options->vrfy_option == BE_VRFY_ASSERT) {
- assert(be_verify_register_allocation(birg)
- && "Register allocation invalid");
+ if (be_get_irg_options(irg)->vrfy_option == BE_VRFY_WARN) {
+ be_verify_register_allocation(irg);
+ } else if (be_get_irg_options(irg)->vrfy_option == BE_VRFY_ASSERT) {
+ assert(be_verify_register_allocation(irg)
+ && "Register allocation invalid");
}
be_timer_pop(T_VERIFY);
be_add_module_to_list(®ister_allocators, name, allocator);
}
-void be_allocate_registers(be_irg_t *birg)
+void be_allocate_registers(ir_graph *irg)
{
assert(selected_allocator != NULL);
if (selected_allocator != NULL) {
- selected_allocator->allocate(birg);
+ selected_allocator->allocate(irg);
}
}
#include "beirg.h"
typedef struct be_ra_t {
- void (*allocate)(be_irg_t *bi); /**< allocate registers on a graph */
+ void (*allocate)(ir_graph *irg); /**< allocate registers on a graph */
} be_ra_t;
void be_register_allocator(const char *name, be_ra_t *allocator);
/**
* Do register allocation with currently selected register allocator
*/
-void be_allocate_registers(be_irg_t *birg);
+void be_allocate_registers(ir_graph *irg);
#endif
insert_all_perms_env_t *env = data;
be_chordal_env_t *chordal_env = env->chordal_env;
pmap *perm_map = env->perm_map;
- be_lv_t *lv = chordal_env->birg->lv;
+ be_lv_t *lv = be_get_irg_liveness(chordal_env->irg);
int i, n;
assert(is_Block(bl));
static void set_regs_or_place_dupls_walker(ir_node *bl, void *data)
{
be_chordal_env_t *chordal_env = data;
- be_lv_t *lv = chordal_env->birg->lv;
+ be_lv_t *lv = be_get_irg_liveness(chordal_env->irg);
ir_node *phi;
/* Consider all phis of this block */
typedef struct pressure_walker_env_t pressure_walker_env_t;
struct pressure_walker_env_t {
- be_irg_t *birg;
+ ir_graph *irg;
be_lv_t *lv;
double insn_count;
double regpressure;
ir_node *block,
const arch_register_class_t *cls)
{
- be_irg_t *birg = env->birg;
- ir_graph *irg = be_get_birg_irg(birg);
+ ir_graph *irg = env->irg;
ir_node *irn;
ir_nodeset_t live_nodes;
int max_live;
check_reg_pressure_class(env, block, env->cls);
}
-void be_do_stat_reg_pressure(be_irg_t *birg, const arch_register_class_t *cls)
+void be_do_stat_reg_pressure(ir_graph *irg, const arch_register_class_t *cls)
{
pressure_walker_env_t env;
- ir_graph *irg = be_get_birg_irg(birg);
double average_pressure;
- env.birg = birg;
+ env.irg = irg;
env.insn_count = 0;
env.max_pressure = 0;
env.regpressure = 0;
}
}
-void be_collect_node_stats(be_node_stats_t *new_stats, be_irg_t *birg)
+void be_collect_node_stats(be_node_stats_t *new_stats, ir_graph *irg)
{
memset(new_stats, 0, sizeof(*new_stats));
- irg_walk_graph(birg->irg, NULL, node_stat_walker, new_stats);
+ irg_walk_graph(irg, NULL, node_stat_walker, new_stats);
}
void be_subtract_node_stats(be_node_stats_t *stats, be_node_stats_t *sub)
/**
* Collect statistics about node types
*/
-void be_collect_node_stats(be_node_stats_t *stats, be_irg_t *birg);
+void be_collect_node_stats(be_node_stats_t *stats, ir_graph *irg);
void be_subtract_node_stats(be_node_stats_t *stats, be_node_stats_t *sub);
/**
* Collects statistics information about register pressure.
- * @param birg The be irg object containing the irg
+ * @param irg The irg
*/
-void be_do_stat_reg_pressure(be_irg_t *birg, const arch_register_class_t *cls);
+void be_do_stat_reg_pressure(ir_graph *irg, const arch_register_class_t *cls);
/**
* Gives a cost estimate for the program (based on execution frequencies)
typedef struct be_verify_schedule_env_t_ {
- int problem_found; /**< flags indicating if there was a problem */
- bitset_t *scheduled; /**< bitset of scheduled nodes */
- ir_graph *irg; /**< the irg to check */
+ int problem_found; /**< flags indicating a problem */
+ bitset_t *scheduled; /**< bitset of scheduled nodes */
+ ir_graph *irg; /**< the irg to check */
} be_verify_schedule_env_t;
/**
/**
* Start a walk over the irg and check schedule.
*/
-int be_verify_schedule(const be_irg_t *birg)
+int be_verify_schedule(ir_graph *irg)
{
be_verify_schedule_env_t env;
env.problem_found = 0;
- env.irg = be_get_birg_irg(birg);
+ env.irg = irg;
env.scheduled = bitset_alloca(get_irg_last_idx(env.irg));
- irg_block_walk_graph(env.irg, verify_schedule_walker, NULL, &env);
+ irg_block_walk_graph(irg, verify_schedule_walker, NULL, &env);
/* check if all nodes are scheduled */
- irg_walk_graph(env.irg, check_schedule, NULL, &env);
+ irg_walk_graph(irg, check_schedule, NULL, &env);
return ! env.problem_found;
}
}
}
-int be_verify_register_allocation(const be_irg_t *birg)
+int be_verify_register_allocation(ir_graph *new_irg)
{
- irg = be_get_birg_irg(birg);
+ irg = new_irg;
arch_env = be_get_irg_arch_env(irg);
lv = be_liveness(irg);
problem_found = 0;
* @param irg The irg to check
* @return 1 if the schedule is valid, 0 otherwise
*/
-int be_verify_schedule(const be_irg_t *birg);
+int be_verify_schedule(ir_graph *irg);
/**
* Verify spillslots
* register assigned, also checks that each scheduled node has a register
* assigned.
*
- * @param birg The birg to check
+ * @param irg The graph to check
* @return 1 if verify succeeded, 0 otherwise
*/
-int be_verify_register_allocation(const be_irg_t *birg);
+int be_verify_register_allocation(ir_graph *irg);
#endif