free(cg);
}
-static void *TEMPLATE_cg_init(be_irg_t *birg);
+static void *TEMPLATE_cg_init(ir_graph *irg);
static const arch_code_generator_if_t TEMPLATE_code_gen_if = {
TEMPLATE_cg_init,
/**
* Initializes the code generator.
*/
-static void *TEMPLATE_cg_init(be_irg_t *birg)
+static void *TEMPLATE_cg_init(ir_graph *irg)
{
- const arch_env_t *arch_env = be_get_irg_arch_env(birg->irg);
+ const arch_env_t *arch_env = be_get_irg_arch_env(irg);
TEMPLATE_isa_t *isa = (TEMPLATE_isa_t *) arch_env;
TEMPLATE_code_gen_t *cg = XMALLOC(TEMPLATE_code_gen_t);
- cg->impl = &TEMPLATE_code_gen_if;
- cg->irg = be_get_birg_irg(birg);
- cg->isa = isa;
+ cg->impl = &TEMPLATE_code_gen_if;
+ cg->irg = irg;
+ cg->isa = isa;
return (arch_code_generator_t *)cg;
}
blk_sched = be_create_block_schedule(irg);
- be_dbg_method_begin(entity, be_abi_get_stack_layout(cg->birg->abi));
+ be_dbg_method_begin(entity, be_abi_get_stack_layout(be_get_irg_abi(cg->irg)));
be_gas_emit_function_prolog(entity, 4);
irg_block_walk_graph(irg, amd64_gen_labels, NULL, NULL);
{
amd64_code_gen_t *cg = self;
- be_sched_fix_flags(cg->birg, &amd64_reg_classes[CLASS_amd64_flags], 0);
+ be_sched_fix_flags(cg->irg, &amd64_reg_classes[CLASS_amd64_flags], 0);
}
static void amd64_after_ra(void *self)
{
amd64_code_gen_t *cg = self;
- be_coalesce_spillslots(cg->birg);
+ be_coalesce_spillslots(cg->irg);
irg_block_walk_graph(cg->irg, NULL, amd64_after_ra_walker, NULL);
}
free(cg);
}
-static void *amd64_cg_init(be_irg_t *birg);
+static void *amd64_cg_init(ir_graph *irg);
static const arch_code_generator_if_t amd64_code_gen_if = {
amd64_cg_init,
/**
* Initializes the code generator.
*/
-static void *amd64_cg_init(be_irg_t *birg)
+static void *amd64_cg_init(ir_graph *irg)
{
- const arch_env_t *arch_env = be_get_irg_arch_env(birg->irg);
+ const arch_env_t *arch_env = be_get_irg_arch_env(irg);
amd64_isa_t *isa = (amd64_isa_t *) arch_env;
amd64_code_gen_t *cg = XMALLOC(amd64_code_gen_t);
cg->impl = &amd64_code_gen_if;
- cg->irg = be_get_birg_irg(birg);
+ cg->irg = irg;
cg->isa = isa;
- cg->birg = birg;
- cg->dump = (birg->main_env->options->dump_flags & DUMP_BE) ? 1 : 0;
+ cg->dump = (be_get_irg_options(irg)->dump_flags & DUMP_BE) ? 1 : 0;
return (arch_code_generator_t *)cg;
}
const arch_code_generator_if_t *impl; /**< implementation */
ir_graph *irg; /**< current irg */
amd64_isa_t *isa; /**< the isa instance */
- be_irg_t *birg; /**< The be-irg (contains additional information about the irg) */
char dump; /**< set to 1 if graphs should be dumped */
ir_node *noreg_gp; /**< unique NoReg_GP node */
};
int n_cfgpreds;
int need_label;
int i, arity;
- ir_exec_freq *exec_freq = cg->birg->exec_freq;
+ ir_exec_freq *exec_freq = be_get_irg_exec_freq(cg->irg);
need_label = 0;
n_cfgpreds = get_Block_n_cfgpreds(block);
arm_register_emitters();
- be_dbg_method_begin(entity, be_abi_get_stack_layout(cg->birg->abi));
+ be_dbg_method_begin(entity, be_abi_get_stack_layout(be_get_irg_abi(cg->irg)));
/* create the block schedule */
blk_sched = be_create_block_schedule(irg);
register_peephole_optimisation(op_arm_Ldr, peephole_arm_Str_Ldr);
register_peephole_optimisation(op_arm_FrameAddr, peephole_arm_FrameAddr);
- be_peephole_opt(cg->birg);
+ be_peephole_opt(cg->irg);
}
{
arm_code_gen_t *cg = self;
- be_sched_fix_flags(cg->birg, &arm_reg_classes[CLASS_arm_flags],
+ be_sched_fix_flags(cg->irg, &arm_reg_classes[CLASS_arm_flags],
&arm_flags_remat);
}
static void arm_after_ra(void *self)
{
arm_code_gen_t *cg = self;
- be_coalesce_spillslots(cg->birg);
+ be_coalesce_spillslots(cg->irg);
irg_block_walk_graph(cg->irg, NULL, arm_after_ra_walker, NULL);
}
}
/* forward */
-static void *arm_cg_init(be_irg_t *birg);
+static void *arm_cg_init(ir_graph *irg);
static const arch_code_generator_if_t arm_code_gen_if = {
arm_cg_init,
/**
* Initializes the code generator.
*/
-static void *arm_cg_init(be_irg_t *birg)
+static void *arm_cg_init(ir_graph *irg)
{
static ir_type *int_tp = NULL;
- arm_isa_t *isa = (arm_isa_t *)birg->main_env->arch_env;
+ arm_isa_t *isa = (arm_isa_t *) be_get_irg_arch_env(irg);
arm_code_gen_t *cg;
if (! int_tp) {
cg = XMALLOC(arm_code_gen_t);
cg->impl = &arm_code_gen_if;
- cg->irg = birg->irg;
+ cg->irg = irg;
cg->reg_set = new_set(arm_cmp_irn_reg_assoc, 1024);
cg->isa = isa;
- cg->birg = birg;
cg->int_tp = int_tp;
cg->have_fp_insn = 0;
- cg->dump = (birg->main_env->options->dump_flags & DUMP_BE) ? 1 : 0;
+ cg->dump = (be_get_irg_options(irg)->dump_flags & DUMP_BE) ? 1 : 0;
FIRM_DBG_REGISTER(cg->mod, "firm.be.arm.cg");
ir_graph *irg; /**< current irg */
set *reg_set; /**< set to memorize registers for FIRM nodes (e.g. phi) */
arm_isa_t *isa; /**< the isa instance */
- be_irg_t *birg; /**< The be-irg (contains additional information about the irg) */
ir_type *int_tp; /**< the int type, needed for Call conversion */
char have_fp_insn; /**< non-zero, if fp hardware instructions are emitted */
char dump; /**< set to 1 if graphs should be dumped */
#define FIRM_BE_TYPES_H
typedef unsigned int sched_timestep_t;
-typedef struct be_irg_t be_irg_t;
typedef struct arch_register_class_t arch_register_class_t;
typedef struct arch_register_req_t arch_register_req_t;
};
/**
- * The ABI information for the current birg.
+ * The ABI information for the current graph.
*/
struct _be_abi_irg_t {
- be_irg_t *birg; /**< The back end IRG. */
ir_graph *irg;
const arch_env_t *arch_env;
survive_dce_t *dce_survivor;
*/
static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp)
{
- ir_graph *irg = env->birg->irg;
- const arch_env_t *arch_env = env->birg->main_env->arch_env;
+ ir_graph *irg = env->irg;
+ const arch_env_t *arch_env = env->arch_env;
ir_type *call_tp = get_Call_type(irn);
ir_node *call_ptr = get_Call_ptr(irn);
int n_params = get_method_n_params(call_tp);
const arch_register_t *sp = arch_env->sp;
be_abi_call_t *call = be_abi_call_new(sp->reg_class);
ir_mode *mach_mode = sp->reg_class->mode;
- struct obstack *obst = be_get_birg_obst(irg);
+ struct obstack *obst = be_get_be_obst(irg);
int no_alloc = call->flags.bits.frame_is_setup_on_call;
int n_res = get_method_n_ress(call_tp);
int do_seq = call->flags.bits.store_args_sequential && !no_alloc;
*/
static void process_calls(be_abi_irg_t *env)
{
- ir_graph *irg = env->birg->irg;
+ ir_graph *irg = env->irg;
env->call->flags.bits.irg_is_leaf = 1;
irg_walk_graph(irg, firm_clear_link, link_ops_in_block_walker, env);
- ir_heights = heights_new(env->birg->irg);
+ ir_heights = heights_new(env->irg);
irg_block_walk_graph(irg, NULL, process_ops_in_block, env);
heights_free(ir_heights);
}
ir_entity ***param_map)
{
int dir = env->call->flags.bits.left_to_right ? 1 : -1;
- int inc = env->birg->main_env->arch_env->stack_dir * dir;
+ int inc = env->arch_env->stack_dir * dir;
int n = get_method_n_params(method_type);
int curr = inc > 0 ? 0 : n - 1;
- struct obstack *obst = be_get_birg_obst(env->irg);
+ struct obstack *obst = be_get_be_obst(env->irg);
int ofs = 0;
char buf[128];
ir_type *res;
int i;
- ident *id = get_entity_ident(get_irg_entity(env->birg->irg));
+ ident *id = get_entity_ident(get_irg_entity(env->irg));
ir_entity **map;
*param_map = map = OALLOCN(obst, ir_entity*, n);
ir_node *mem, int n_res)
{
be_abi_call_t *call = env->call;
- const arch_env_t *arch_env = env->birg->main_env->arch_env;
+ const arch_env_t *arch_env = env->arch_env;
dbg_info *dbgi;
pmap *reg_map = pmap_create();
ir_node *keep = pmap_get(env->keep_map, bl);
if (keep) {
stack = get_irn_n(keep, 0);
kill_node(keep);
- remove_End_keepalive(get_irg_end(env->birg->irg), keep);
+ remove_End_keepalive(get_irg_end(env->irg), keep);
}
/* Insert results for Return into the register map. */
}
/* we have to pop the shadow parameter in in case of struct returns */
pop = call->pop;
- ret = be_new_Return(dbgi, env->birg->irg, bl, n_res, pop, n, in);
+ ret = be_new_Return(dbgi, env->irg, bl, n_res, pop, n, in);
/* Set the register classes of the return's parameter accordingly. */
for (i = 0; i < n; ++i) {
static void fix_address_of_parameter_access(be_abi_irg_t *env, ent_pos_pair *value_param_list)
{
be_abi_call_t *call = env->call;
- ir_graph *irg = env->birg->irg;
+ ir_graph *irg = env->irg;
ent_pos_pair *entry, *new_list;
ir_type *frame_tp;
int i, n = ARR_LEN(value_param_list);
static void modify_irg(be_abi_irg_t *env)
{
be_abi_call_t *call = env->call;
- const arch_env_t *arch_env= env->birg->main_env->arch_env;
+ const arch_env_t *arch_env= env->arch_env;
const arch_register_t *sp = arch_env->sp;
- ir_graph *irg = env->birg->irg;
+ ir_graph *irg = env->irg;
ir_node *end;
ir_node *old_mem;
ir_node *new_mem_proj;
ir_node *mem;
ir_type *method_type = get_entity_type(get_irg_entity(irg));
- struct obstack *obst = be_get_birg_obst(irg);
+ struct obstack *obst = be_get_be_obst(irg);
int n_params;
int i, n;
ir_node *load_res;
be_abi_irg_t *env = data;
int arity, i;
- be_main_env_t *be = env->birg->main_env;
+ be_main_env_t *be = be_birg_from_irg(env->irg)->main_env;
arity = get_irn_arity(node);
for (i = 0; i < arity; ++i) {
/* everything else is accessed relative to EIP */
mode = get_irn_mode(pred);
- pic_base = arch_code_generator_get_pic_base(env->birg->cg);
+ pic_base = arch_code_generator_get_pic_base(be_get_irg_cg(env->irg));
/* all ok now for locally constructed stuff */
if (can_address_relative(entity)) {
}
}
-be_abi_irg_t *be_abi_introduce(be_irg_t *birg)
+be_abi_irg_t *be_abi_introduce(ir_graph *irg)
{
- be_abi_irg_t *env = XMALLOC(be_abi_irg_t);
- ir_node *old_frame = get_irg_frame(birg->irg);
- ir_graph *irg = birg->irg;
- struct obstack *obst = be_get_birg_obst(irg);
+ be_abi_irg_t *env = XMALLOC(be_abi_irg_t);
+ ir_node *old_frame = get_irg_frame(irg);
+ struct obstack *obst = be_get_be_obst(irg);
+ be_options_t *options = be_get_irg_options(irg);
+ const arch_env_t *arch_env = be_get_irg_arch_env(irg);
pmap_entry *ent;
ir_node *dummy;
unsigned *limited_bitset;
arch_register_req_t *sp_req;
- be_omit_fp = birg->main_env->options->omit_fp;
- be_omit_leaf_fp = birg->main_env->options->omit_leaf_fp;
+ be_omit_fp = options->omit_fp;
+ be_omit_leaf_fp = options->omit_leaf_fp;
obstack_init(obst);
- env->arch_env = birg->main_env->arch_env;
+ env->arch_env = arch_env;
env->method_type = get_entity_type(get_irg_entity(irg));
- env->call = be_abi_call_new(env->arch_env->sp->reg_class);
- arch_env_get_call_abi(env->arch_env, env->method_type, env->call);
+ env->call = be_abi_call_new(arch_env->sp->reg_class);
+ arch_env_get_call_abi(arch_env, env->method_type, env->call);
env->ignore_regs = pset_new_ptr_default();
env->keep_map = pmap_create();
env->dce_survivor = new_survive_dce();
- env->birg = birg;
env->irg = irg;
sp_req = OALLOCZ(obst, arch_register_req_t);
sp_req->type = arch_register_req_type_limited
| arch_register_req_type_produces_sp;
- sp_req->cls = arch_register_get_class(env->arch_env->sp);
+ sp_req->cls = arch_register_get_class(arch_env->sp);
limited_bitset = rbitset_obstack_alloc(obst, sp_req->cls->n_regs);
- rbitset_set(limited_bitset, arch_register_get_index(env->arch_env->sp));
+ rbitset_set(limited_bitset, arch_register_get_index(arch_env->sp));
sp_req->limited = limited_bitset;
- if (env->arch_env->sp->type & arch_register_type_ignore) {
+ if (arch_env->sp->type & arch_register_type_ignore) {
sp_req->type |= arch_register_req_type_ignore;
}
- env->init_sp = dummy = new_r_Dummy(irg, env->arch_env->sp->reg_class->mode);
+ env->init_sp = dummy = new_r_Dummy(irg, arch_env->sp->reg_class->mode);
env->calls = NEW_ARR_F(ir_node*, 0);
- if (birg->main_env->options->pic) {
+ if (options->pic) {
irg_walk_graph(irg, fix_pic_symconsts, NULL, env);
}
Beware: init backend abi call object after processing calls,
otherwise some information might be not yet available.
*/
- env->cb = env->call->cb->init(env->call, birg->main_env->arch_env, irg);
+ env->cb = env->call->cb->init(env->call, arch_env, irg);
/* Process the IRG */
modify_irg(env);
be_ssa_construction_env_t senv;
int i, len;
ir_node **phis;
- be_irg_t *birg = env->birg;
ir_graph *irg = env->irg;
be_lv_t *lv = be_get_irg_liveness(irg);
fix_stack_walker_env_t walker_env;
walker_env.sp_nodes = NEW_ARR_F(ir_node*, 0);
- irg_walk_graph(birg->irg, collect_stack_nodes_walker, NULL, &walker_env);
+ irg_walk_graph(irg, collect_stack_nodes_walker, NULL, &walker_env);
/* nothing to be done if we didn't find any node, in fact we mustn't
* continue, as for endless loops incsp might have had no users and is bad
if (be_is_IncSP(irn)) {
/* fill in real stack frame size */
if (ofs == BE_STACK_FRAME_SIZE_EXPAND) {
- ir_type *frame_type = get_irg_frame_type(env->birg->irg);
+ ir_type *frame_type = get_irg_frame_type(env->irg);
ofs = (int) get_type_size_bytes(frame_type);
be_set_IncSP_offset(irn, ofs);
} else if (ofs == BE_STACK_FRAME_SIZE_SHRINK) {
- ir_type *frame_type = get_irg_frame_type(env->birg->irg);
+ ir_type *frame_type = get_irg_frame_type(env->irg);
ofs = - (int)get_type_size_bytes(frame_type);
be_set_IncSP_offset(irn, ofs);
} else {
void be_abi_fix_stack_bias(be_abi_irg_t *env)
{
- ir_graph *irg = env->birg->irg;
+ ir_graph *irg = env->irg;
ir_type *frame_tp;
int i;
struct bias_walk bw;
*/
ir_type *be_abi_call_get_method_type(const be_abi_call_t *call);
-be_abi_irg_t *be_abi_introduce(be_irg_t *bi);
+be_abi_irg_t *be_abi_introduce(ir_graph *irg);
/**
* Fix the stack bias for all nodes accessing the stack frame using the
struct arch_code_generator_if_t {
/**
* Initialize the code generator.
- * @param birg A backend IRG session.
+ * @param irg A graph
* @return A newly created code generator.
*/
- void *(*init)(be_irg_t *birg);
+ void *(*init)(ir_graph *irg);
/**
* return node used as base in pic code addresses
* Backend may provide an own spiller.
* This spiller needs to spill all register classes.
*/
- void (*spill)(void *self, be_irg_t *birg);
+ void (*spill)(void *self, ir_graph *irg);
/**
* Called before register allocation.
#define arch_code_generator_after_ra(cg) _arch_cg_call(cg, after_ra)
#define arch_code_generator_finish(cg) _arch_cg_call(cg, finish)
#define arch_code_generator_done(cg) _arch_cg_call(cg, done)
-#define arch_code_generator_spill(cg, birg) _arch_cg_call_env(cg, birg, spill)
+#define arch_code_generator_spill(cg, irg) _arch_cg_call_env(cg, irg, spill)
#define arch_code_generator_has_spiller(cg) ((cg)->impl->spill != NULL)
#define arch_code_generator_get_pic_base(cg) \
((cg)->impl->get_pic_base != NULL ? (cg)->impl->get_pic_base(cg) : NULL)
start_entry = finish_block_schedule(&env);
block_list = create_blocksched_array(&env, start_entry, env.blockcount,
- be_get_birg_obst(irg));
+ be_get_be_obst(irg));
DEL_ARR_F(env.edges);
obstack_free(&obst, NULL);
start_entry = finish_block_schedule(&env.env);
block_list = create_blocksched_array(&env.env, start_entry,
env.env.blockcount,
- be_get_birg_obst(irg));
+ be_get_be_obst(irg));
DEL_ARR_F(env.ilpedges);
free_lpp(env.lpp);
}
be_timer_push(T_RA_SPILL);
- arch_code_generator_spill(be_get_irg_cg(irg), be_birg_from_irg(irg));
+ arch_code_generator_spill(be_get_irg_cg(irg), irg);
be_timer_pop(T_RA_SPILL);
dump(BE_CH_DUMP_SPILL, irg, NULL, "spill");
lpp_sol_state_t ilp_go(ilp_env_t *ienv)
{
- be_main_env_t *main_env = ienv->co->cenv->birg->main_env;
+ be_options_t *options = be_get_irg_options(ienv->co->irg);
sr_remove(ienv->sr);
lpp_set_log(ienv->lp, stdout);
if (solve_net)
- lpp_solve_net(ienv->lp, main_env->options->ilp_server, main_env->options->ilp_solver);
+ lpp_solve_net(ienv->lp, options->ilp_server, options->ilp_solver);
else {
#ifdef LPP_SOLVE_NET
fprintf(stderr, "can only solve ilp over the net\n");
char buf[1024];
size_t i, n;
char *tu_name;
+ const char *cup_name = be_birg_from_irg(irg)->main_env->cup_name;
- n = strlen(env->birg->main_env->cup_name);
+ n = strlen(cup_name);
tu_name = XMALLOCN(char, n + 1);
- strcpy(tu_name, env->birg->main_env->cup_name);
+ strcpy(tu_name, cup_name);
for (i = 0; i < n; ++i)
if (tu_name[i] == '.')
tu_name[i] = '_';
bitset_clear_all(pbqp_co.restricted_nodes);
/* get ignored registers */
- be_put_ignore_regs(co->cenv->birg, co->cls, pbqp_co.ignore_reg);
+ be_put_ignore_regs(co->cenv->irg, co->cls, pbqp_co.ignore_reg);
/* add costs vector to nodes */
be_ifg_foreach_node(co->cenv->ifg, &nodes_it, ifg_node) {
assert(flag_consumers == NULL);
}
-void be_sched_fix_flags(be_irg_t *birg, const arch_register_class_t *flag_cls,
+void be_sched_fix_flags(ir_graph *irg, const arch_register_class_t *flag_cls,
func_rematerialize remat_func)
{
- ir_graph *irg = be_get_birg_irg(birg);
-
flag_class = flag_cls;
flags_reg = & flag_class->regs[0];
remat = remat_func;
remat = &default_remat;
ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
- irg_block_walk_graph(irg, fix_flags_walker, NULL, birg->lv);
+ irg_block_walk_graph(irg, fix_flags_walker, NULL, be_get_irg_liveness(irg));
ir_free_resources(irg, IR_RESOURCE_IRN_LINK);
if (changed) {
* and consumer of flags. It does so by moving down/rematerialising of the
* nodes. This does not work across blocks.
*/
-void be_sched_fix_flags(be_irg_t *birg, const arch_register_class_t *flag_cls,
+void be_sched_fix_flags(ir_graph *irg, const arch_register_class_t *flag_cls,
func_rematerialize remat_func);
#endif
void *irg_env; /**< An environment for the irg scheduling, provided by the backend */
void *block_env; /**< An environment for scheduling a block, provided by the backend */
const arch_env_t *arch_env;
- const be_main_env_t *main_env;
const be_machine_t *cpu; /**< the current abstract machine */
ilpsched_options_t *opts; /**< the ilp options for current irg */
- const be_irg_t *birg; /**< The birg object */
- be_options_t *be_opts; /**< backend options */
const ilp_sched_selector_t *sel; /**< The ILP sched selector provided by the backend */
DEBUG_ONLY(firm_dbg_module_t *dbg);
} be_ilpsched_env_t;
int base_num = ba->n_interesting_nodes * ba->n_interesting_nodes;
int estimated_n_var = (int)((double)base_num * fact_var);
int estimated_n_cst = (int)((double)base_num * fact_cst);
+ be_options_t *options = be_get_irg_options(env->irg);
DBG((env->dbg, LEVEL_1, "Creating LPP with estimated numbers: %d vars, %d cst\n",
estimated_n_var, estimated_n_cst));
}
/* solve the ILP */
- lpp_solve_net(lpp, env->main_env->options->ilp_server, env->main_env->options->ilp_solver);
+ lpp_solve_net(lpp, options->ilp_server, options->ilp_solver);
if (logfile)
fclose(logfile);
if (need_heur) {
be_stat_ev("time", -1);
be_stat_ev_dbl("opt", 0.0);
- list_sched_single_block(env->birg, block, env->be_opts);
- }
- else {
+ list_sched_single_block(env->irg, block, env->be_opts);
+ } else {
if (lpp) {
double opt = lpp->sol_state == lpp_optimal ? 100.0 : 100.0 * lpp->best_bound / lpp->objval;
be_stat_ev_dbl("time", lpp->sol_time);
/**
* Perform ILP scheduling on the given irg.
*/
-void be_ilp_sched(const be_irg_t *birg, be_options_t *be_opts)
+void be_ilp_sched(ir_graph *irg)
{
be_ilpsched_env_t env;
- ir_graph *irg = be_get_birg_irg(birg);
const arch_env_t *arch_env = be_get_irg_arch_env(irg);
const ilp_sched_selector_t *sel = arch_env->impl->get_ilp_sched_selector(arch_env);
env.sel = sel;
env.irg = irg;
env.height = heights_new(irg);
- env.main_env = birg->main_env;
env.arch_env = arch_env;
env.cpu = arch_env_get_machine(arch_env);
env.opts = &ilp_opts;
- env.birg = birg;
env.be_opts = be_opts;
phase_init(&env.ph, env.irg, init_ilpsched_irn);
heights_free(env.height);
/* notify backend */
- be_ilp_sched_finish_irg_ilp_schedule(sel, birg->irg, env.irg_env);
+ be_ilp_sched_finish_irg_ilp_schedule(sel, irg, env.irg_env);
stat_ev_ctx_pop("ilpsched");
}
BE_ILP_SCHED_CALL2(node_scheduled, self, irn, cycle, block_env)
/**
- * Perform ILP scheduling on given birg.
+ * Perform ILP scheduling on given irg.
*/
-void be_ilp_sched(const be_irg_t *birg, be_options_t *be_opts);
+void be_ilp_sched(ir_graph *irg, be_options_t *be_opts);
-#endif /* FIRM_BE_BEILPSCHED_H */
+#endif
if (is_Proj(node))
return;
- obst = be_get_birg_obst(current_ir_graph);
+ obst = be_get_be_obst(current_ir_graph);
info = OALLOCZ(obst, backend_info_t);
assert(node->backend_info == NULL);
return insn;
}
-be_insn_env_t *be_insn_env_init(be_insn_env_t *ie, const be_irg_t *birg,
+be_insn_env_t *be_insn_env_init(be_insn_env_t *ie, ir_graph *irg,
const arch_register_class_t *cls,
struct obstack *obst)
{
ie->cls = cls;
ie->obst = obst;
ie->ignore_colors = bitset_obstack_alloc(obst, cls->n_regs);
- be_abi_put_ignore_regs(birg->abi, cls, ie->ignore_colors);
+ be_abi_put_ignore_regs(be_get_irg_abi(irg), cls, ie->ignore_colors);
return ie;
}
be_insn_t *be_scan_insn(const be_insn_env_t *env, ir_node *irn);
-be_insn_env_t *be_insn_env_init(be_insn_env_t *ie, const be_irg_t *birg, const arch_register_class_t *cls, struct obstack *obst);
+be_insn_env_t *be_insn_env_init(be_insn_env_t *ie, ir_graph *irg, const arch_register_class_t *cls, struct obstack *obst);
#endif /* FIRM_BE_BEINSN_T_H */
/**
* Check, if two values interfere.
- * @param lv Liveness information (in the future we should use a be_irg_t here).
+ * @param lv Liveness information
* @param a The first value.
* @param b The second value.
* @return 1, if a and b interfere, 0 if not.
const ir_edge_t *edge;
ir_node *bb = get_nodes_block(b);
- //stat_ev_dbl("beintlive_ignore", arch_irn_is(lv->birg->main_env->arch_env, a, ignore));
+ //stat_ev_dbl("beintlive_ignore", arch_irn_is(be_get_irg_arch_env(lv->irg), a, ignore));
/*
* If a is live end in b's block it is
/**
* Check, if a node is live in front of another.
- * @param birg The backend irg.
+ * @param irg The backend irg.
* @param irn The node.
* @param where The location to check for.
* @return 1, if @p irn is live in front of @p where.
*/
-static inline int _be_lv_chk_before_irn(const be_irg_t *birg, const ir_node *irn, const ir_node *where)
+static inline int _be_lv_chk_before_irn(ir_graph *irg, const ir_node *irn,
+ const ir_node *where)
{
- const be_lv_t *lv = be_get_irg_liveness(birg->irg);
+ const be_lv_t *lv = be_get_irg_liveness(irg);
const ir_edge_t *edge;
/* the node must strictly dominate the location, else it cannot be live there. */
/**
* Check, if a node is live after another node.
- * @param birg The backend irg.
+ * @param irg The backend irg.
* @param irn The node.
* @param where The location to check for.
* @return 1, if @p irn is live after @p where.
*/
-static inline int _be_lv_chk_after_irn(const be_irg_t *birg, const ir_node *irn, const ir_node *where)
+static inline int _be_lv_chk_after_irn(ir_graph *irg, const ir_node *irn,
+ const ir_node *where)
{
- const be_lv_t *lv = be_get_irg_liveness(birg->irg);
+ const be_lv_t *lv = be_get_irg_liveness(irg);
const ir_edge_t *edge;
if (!_value_dominates(irn, where))
#define value_dominates(a, b) _value_dominates(a, b)
#define dominates_use(a, e) _dominates_use(a, e)
#define strictly_dominates_use(a, e) _strictly_dominates_use(a, e)
-#define be_lv_chk_before_irn(birg, a, b) _be_lv_chk_before_irn(birg, a, b)
-#define be_lv_chk_after_irn(birg, a, b) _be_lv_chk_after_irn(birg, a, b)
+#define be_lv_chk_before_irn(irg, a, b) _be_lv_chk_before_irn(irg, a, b)
+#define be_lv_chk_after_irn(irg, a, b) _be_lv_chk_after_irn(irg, a, b)
-#endif /* _BELIVECHK_T_H */
+#endif
* An ir_graph with additional analysis data about this irg. Also includes some
* backend structures
*/
-struct be_irg_t {
+typedef struct be_irg_t {
ir_graph *irg;
be_main_env_t *main_env;
be_abi_irg_t *abi;
register constraints which we can't keep
in the irg obst, because it gets replace
during code selection) */
-};
+} be_irg_t;
static inline be_irg_t *be_birg_from_irg(const ir_graph *irg)
{
return be_birg_from_irg(irg)->main_env->arch_env;
}
-static inline struct obstack *be_get_birg_obst(const ir_graph *irg)
+static inline struct obstack *be_get_be_obst(const ir_graph *irg)
{
be_irg_t *birg = be_birg_from_irg(irg);
return &birg->obst;
}
/* List schedule a graph. */
-void list_sched(be_irg_t *birg, be_options_t *be_opts)
+void list_sched(ir_graph *irg)
{
- ir_graph *irg = birg->irg;
-
int num_nodes;
sched_env_t env;
mris_env_t *mris = NULL;
list_sched_selector_t sel;
- (void)be_opts;
-
/* Select a scheduler based on backend options */
switch (list_sched_options.select) {
case BE_SCHED_SELECT_TRIVIAL: sel = trivial_selector; break;
switch (list_sched_options.prep) {
case BE_SCHED_PREP_MRIS:
- mris = be_sched_mris_preprocess(birg);
+ mris = be_sched_mris_preprocess(irg);
break;
case BE_SCHED_PREP_RSS:
- rss_schedule_preparation(birg);
+ rss_schedule_preparation(irg);
break;
default:
break;
/* initialize environment for list scheduler */
memset(&env, 0, sizeof(env));
- env.selector = arch_env_get_list_sched_selector(birg->main_env->arch_env, &sel);
+ env.selector = arch_env_get_list_sched_selector(be_get_irg_arch_env(irg), &sel);
env.sched_info = NEW_ARR_F(sched_irn_t, num_nodes);
memset(env.sched_info, 0, num_nodes * sizeof(env.sched_info[0]));
if (env.selector->init_graph)
- env.selector_env = env.selector->init_graph(env.selector, birg);
+ env.selector_env = env.selector->init_graph(env.selector, irg);
/* Schedule each single block. */
irg_block_walk_graph(irg, list_sched_block, NULL, &env);
}
/* List schedule a block. */
-void list_sched_single_block(const be_irg_t *birg, ir_node *block,
- be_options_t *be_opts)
+void list_sched_single_block(ir_graph *irg, ir_node *block)
{
- ir_graph *irg = birg->irg;
-
int num_nodes;
sched_env_t env;
list_sched_selector_t sel;
- (void)be_opts;
-
/* Select a scheduler based on backend options */
switch (list_sched_options.select) {
case BE_SCHED_SELECT_TRIVIAL: sel = trivial_selector; break;
/* initialize environment for list scheduler */
memset(&env, 0, sizeof(env));
- env.selector = arch_env_get_list_sched_selector(birg->main_env->arch_env, &sel);
+ env.selector = arch_env_get_list_sched_selector(be_get_irg_arch_env(irg), &sel);
env.sched_info = NEW_ARR_F(sched_irn_t, num_nodes);
memset(env.sched_info, 0, num_nodes * sizeof(env.sched_info[0]));
if (env.selector->init_graph)
- env.selector_env = env.selector->init_graph(env.selector, birg);
+ env.selector_env = env.selector->init_graph(env.selector, irg);
/* Schedule block. */
list_sched_block(block, &env);
* May be NULL.
*
* @param vtab The selector vtab.
- * @param birg The backend graph.
+ * @param irg The backend graph.
* @return The environment pointer that is passed to all other functions in this struct.
*/
- void *(*init_graph)(const list_sched_selector_t *vtab, const be_irg_t *birg);
+ void *(*init_graph)(const list_sched_selector_t *vtab, ir_graph *irg);
/**
* Called before scheduling starts on a block.
* head of the schedule. You can walk this list using the functions in
* list.h.
*
- * @param birg The backend irg.
- * @param be_opts The backend options
+ * @param irg The backend irg.
*/
-void list_sched(be_irg_t *birg, be_options_t *be_opts);
+void list_sched(ir_graph *irg);
/**
* List schedule a block.
* Same as list_sched but only for a certain block (needed for ILP fallback).
*/
-void list_sched_single_block(const be_irg_t *birg, ir_node *block, be_options_t *be_opts);
+void list_sched_single_block(ir_graph *irg, ir_node *block);
-#endif /* FIRM_BE_BELISTSCHED_H */
+#endif
#include "bearch.h"
#include "irnodeset.h"
-struct be_irg_t;
-
typedef enum {
be_lv_state_in = 1,
be_lv_state_end = 2,
#include "irlivechk.h"
#endif
-struct be_irg_t;
-
struct _be_lv_t {
ir_phase ph;
ir_graph *irg;
#define be_lv_has_info_about(lv, irn) bitset_is_set((lv)->nodes, get_irn_idx(irn))
-#endif /* FIRM_BE_BELIVE_T_H */
+#endif
}
/**
- * Compute the register pressure for all classes of all loops in the birg.
- * @param birg The backend irg object
+ * Compute the register pressure for all classes of all loops in the irg.
+ * @param irg The graph
* @return The loop analysis object.
*/
be_loopana_t *be_new_loop_pressure(ir_graph *irg,
typedef struct _be_loopana_t be_loopana_t;
/**
- * Compute the register pressure for a class of all loops in the birg.
+ * Compute the register pressure for a class of all loops in the irg.
* @param irg The graph
* @param cls The register class to compute the pressure for
* @return The loop analysis object.
const arch_register_class_t *cls);
/**
- * Compute the register pressure of all loops in the birg.
+ * Compute the register pressure of all loops in the irg.
* @param irg The graph
* @param cls register class to compute loop pressure for,
* if NULL computes for all classes
cg_if = arch_env_get_code_generator_if(arch_env);
/* get a code generator for this graph. */
- birg->cg = cg_if->init(birg);
+ birg->cg = cg_if->init(irg);
/* some transformations need to be done before abi introduce */
arch_code_generator_before_abi(birg->cg);
/* implement the ABI conventions. */
be_timer_push(T_ABI);
- birg->abi = be_abi_introduce(birg);
+ birg->abi = be_abi_introduce(irg);
be_timer_pop(T_ABI);
dump(DUMP_ABI, irg, "abi");
/* disabled for now, fails for EmptyFor.c and XXEndless.c */
- /* be_live_chk_compare(birg); */
+ /* be_live_chk_compare(irg); */
/* schedule the irg */
be_timer_push(T_SCHED);
default:
fprintf(stderr, "Warning: invalid scheduler (%d) selected, falling back to list scheduler.\n", be_options.scheduler);
case BE_SCHED_LIST:
- list_sched(birg, &be_options);
+ list_sched(irg);
break;
#ifdef WITH_ILP
case BE_SCHED_ILP:
- be_ilp_sched(birg, &be_options);
+ be_ilp_sched(irg);
break;
#endif /* WITH_ILP */
};
assert(bitset_size(bs) == cls->n_regs);
arch_put_non_ignore_regs(cls, bs);
bitset_flip_all(bs);
- be_abi_put_ignore_regs(be_birg_from_irg(irg)->abi, cls, bs);
+ be_abi_put_ignore_regs(be_get_irg_abi(irg), cls, bs);
return bitset_popcount(bs);
}
static arch_register_req_t *allocate_reg_req(const ir_node *node)
{
ir_graph *irg = get_irn_irg(node);
- struct obstack *obst = be_get_birg_obst(irg);
+ struct obstack *obst = be_get_be_obst(irg);
arch_register_req_t *req = obstack_alloc(obst, sizeof(*req));
memset(req, 0, sizeof(*req));
static void *init_node_attr(ir_node *node, int n_inputs, int n_outputs)
{
ir_graph *irg = get_irn_irg(node);
- struct obstack *obst = be_get_birg_obst(irg);
+ struct obstack *obst = be_get_be_obst(irg);
be_node_attr_t *a = get_irn_attr(node);
backend_info_t *info = be_get_info(node);
req = reg->single_req;
} else {
ir_graph *irg = get_irn_irg(node);
- struct obstack *obst = be_get_birg_obst(irg);
+ struct obstack *obst = be_get_be_obst(irg);
req = get_single_req(obst, reg, additional_types);
}
be_set_constr_in(node, pos, req);
req = reg->single_req;
} else {
ir_graph *irg = get_irn_irg(node);
- struct obstack *obst = be_get_birg_obst(irg);
+ struct obstack *obst = be_get_be_obst(irg);
req = get_single_req(obst, reg, additional_types);
}
ir_node *be_new_Phi(ir_node *block, int n_ins, ir_node **ins, ir_mode *mode,
const arch_register_class_t *cls)
{
- struct obstack *obst = be_get_birg_obst(get_irn_irg(block));
+ struct obstack *obst = be_get_be_obst(get_irn_irg(block));
backend_info_t *info;
ir_node *phi = new_r_Phi(block, n_ins, ins, mode);
{
const be_node_attr_t *old_attr = get_irn_attr_const(old_node);
be_node_attr_t *new_attr = get_irn_attr(new_node);
- struct obstack *obst = be_get_birg_obst(irg);
+ struct obstack *obst = be_get_be_obst(irg);
backend_info_t *old_info = be_get_info(old_node);
backend_info_t *new_info = be_get_info(new_node);
typedef struct _be_pbqp_alloc_env_t {
pbqp *pbqp_inst; /**< PBQP instance for register allocation */
- be_irg_t *birg; /**< Back-end IRG session. */
ir_graph *irg; /**< The graph under examination. */
const arch_register_class_t *cls; /**< Current processed register class */
be_lv_t *lv;
char buf[1024];
size_t i, n;
char *tu_name;
+ const char *cup_name = be_birg_from_irg(env->irg)->main_env->cup_name;
- n = strlen(env->birg->main_env->cup_name);
+ n = strlen(cup_name);
tu_name = XMALLOCN(char, n + 1);
- strcpy(tu_name, env->birg->main_env->cup_name);
+ strcpy(tu_name, cup_name);
for (i = 0; i < n; ++i)
if (tu_name[i] == '.')
tu_name[i] = '_';
/* get exec_freq for copy_block */
ir_node *root_bl = get_nodes_block(src_node);
ir_node *copy_bl = is_Phi(src_node) ? get_Block_cfgpred_block(root_bl, pos) : root_bl;
- unsigned long res = get_block_execfreq_ulong(pbqp_alloc_env->birg->exec_freq, copy_bl);
+ ir_exec_freq *exec_freq = be_get_irg_exec_freq(pbqp_alloc_env->irg);
+ unsigned long res = get_block_execfreq_ulong(exec_freq, copy_bl);
/* create afe-matrix */
unsigned row, col;
static void be_pbqp_coloring(be_chordal_env_t *env)
{
ir_graph *irg = env->irg;
- be_irg_t *birg = env->birg;
const arch_register_class_t *cls = env->cls;
be_lv_t *lv = NULL;
plist_element_t *element = NULL;
/* initialize pbqp allocation data structure */
pbqp_alloc_env.pbqp_inst = alloc_pbqp(get_irg_last_idx(irg)); /* initialize pbqp instance */
- pbqp_alloc_env.birg = birg;
pbqp_alloc_env.cls = cls;
pbqp_alloc_env.irg = irg;
pbqp_alloc_env.lv = lv;
pbqp_alloc_env.restr_nodes = XMALLOCNZ(unsigned, get_irg_last_idx(irg));
pbqp_alloc_env.ife_edge_num = XMALLOCNZ(unsigned, get_irg_last_idx(irg));
pbqp_alloc_env.env = env;
- be_put_ignore_regs(birg, cls, pbqp_alloc_env.ignored_regs); /* get ignored registers */
+ be_put_ignore_regs(irg, cls, pbqp_alloc_env.ignored_regs); /* get ignored registers */
/* create costs matrix template for interference edges */
return pred;
}
-void be_peephole_opt(be_irg_t *birg)
+void be_peephole_opt(ir_graph *irg)
{
- ir_graph *irg = be_get_birg_irg(birg);
unsigned n_classes;
unsigned i;
/* we sometimes find BadE nodes in float apps like optest_float.c or
* kahansum.c for example... */
- be_liveness_invalidate(birg->lv);
+ be_liveness_invalidate(be_get_irg_liveness(irg));
be_liveness_assure_sets(be_assure_liveness(irg));
arch_env = be_get_irg_arch_env(irg);
* backend specific optimisations should be performed based on the
* register-liveness information.
*/
-void be_peephole_opt(be_irg_t *birg);
+void be_peephole_opt(ir_graph *irg);
#endif
set_dump_node_edge_hook(old);
}
-mris_env_t *be_sched_mris_preprocess(const be_irg_t *birg)
+mris_env_t *be_sched_mris_preprocess(ir_graph *irg)
{
mris_env_t *env = XMALLOC(mris_env_t);
- ir_graph *irg = be_get_birg_irg(birg);
phase_init(&env->ph, irg, mris_irn_data_init);
env->irg = irg;
/**
* Preprocess the irg with the MRIS algorithm.
- * @param birg The backend irg.
+ * @param irg The graph
* @return Private data to be kept.
*/
-mris_env_t *be_sched_mris_preprocess(const be_irg_t *birg);
+mris_env_t *be_sched_mris_preprocess(ir_graph *irg);
/**
* Cleanup the MRIS preprocessing.
*/
void dump_ir_block_graph_mris(mris_env_t *env, const char *suffix);
-#endif /* FIRM_BE_BESCHEDMRIS_H */
+#endif
static void *normal_init_graph(const list_sched_selector_t *vtab,
- const be_irg_t *birg)
+ ir_graph *irg)
{
instance_t* inst = XMALLOC(instance_t);
- ir_graph* irg = be_get_birg_irg(birg);
heights_t* heights;
(void)vtab;
return irn;
}
-static void *random_init_graph(const list_sched_selector_t *vtab, const be_irg_t *birg)
+static void *random_init_graph(const list_sched_selector_t *vtab, ir_graph *irg)
{
(void)vtab;
- (void)birg;
+ (void)irg;
/* Using time(NULL) as a seed here gives really random results,
but is NOT deterministic which makes debugging impossible.
Moreover no-one want non-deterministic compilers ... */
return res;
}
-static void *reg_pressure_graph_init(const list_sched_selector_t *vtab, const be_irg_t *birg)
+static void *reg_pressure_graph_init(const list_sched_selector_t *vtab, ir_graph *irg)
{
reg_pressure_main_env_t *main_env = XMALLOC(reg_pressure_main_env_t);
main_env->vtab = vtab;
- irg_walk_graph(be_get_birg_irg(birg), firm_clear_link, NULL, NULL);
+ irg_walk_graph(irg, firm_clear_link, NULL, NULL);
return main_env;
}
/**
* Preprocess the irg for scheduling.
*/
-void rss_schedule_preparation(be_irg_t *birg)
+void rss_schedule_preparation(ir_graph *irg)
{
- ir_graph *irg = be_get_birg_irg(birg);
rss_t rss;
FIRM_DBG_REGISTER(rss.dbg, "firm.be.sched.rss");
rss.irg = irg;
rss.arch_env = be_get_irg_arch_env(irg);
- rss.abi = birg->abi;
+ rss.abi = be_get_irg_abi(irg);
rss.h = heights_new(irg);
rss.nodes = plist_new();
rss.opts = &rss_options;
plist_free(rss.nodes);
be_liveness_free(rss.liveness);
- if (birg->main_env->options->dump_flags & DUMP_SCHED)
- dump_ir_graph(rss.irg, "rss");
+ if (be_get_irg_options(irg)->dump_flags & DUMP_SCHED)
+ dump_ir_graph(irg, "rss");
}
/**
* Perform RSS schedule preprocessing for the given irg.
- * @param birg The backend irg object
+ * @param irg The graph
*/
-void rss_schedule_preparation(be_irg_t *birg);
+void rss_schedule_preparation(ir_graph *irg);
-#endif /* FIRM_BE_BESCHEDRSS_H */
+#endif
/**
* Allocates memory and initializes trace scheduling environment.
- * @param birg The backend irg object
+ * @param irg The backend irg object
* @return The environment
*/
-static trace_env_t *trace_init(const be_irg_t *birg)
+static trace_env_t *trace_init(ir_graph *irg)
{
trace_env_t *env = XMALLOCZ(trace_env_t);
- ir_graph *irg = be_get_birg_irg(birg);
int nn = get_irg_last_idx(irg);
env->curr_time = 0;
return irn;
}
-static void *muchnik_init_graph(const list_sched_selector_t *vtab, const be_irg_t *birg)
+static void *muchnik_init_graph(const list_sched_selector_t *vtab, ir_graph *irg)
{
- trace_env_t *env = trace_init(birg);
+ trace_env_t *env = trace_init(irg);
env->selector = vtab;
- env->selector_env = (void*) be_get_irg_arch_env(birg->irg);
+ env->selector_env = (void*) be_get_irg_arch_env(irg);
return (void *)env;
}
return irn;
}
-static void *trivial_init_graph(const list_sched_selector_t *vtab, const be_irg_t *birg)
+static void *trivial_init_graph(const list_sched_selector_t *vtab, ir_graph *irg)
{
(void)vtab;
- (void)birg;
+ (void)irg;
return NULL;
}
DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
typedef struct _spill_t {
- ir_node *spill;
- const ir_mode *mode; /**< mode of the spilled value */
- int alignment; /**< alignment for the spilled value */
- int spillslot; /**< index into spillslot_unionfind structure */
+ ir_node *spill;
+ const ir_mode *mode; /**< mode of the spilled value */
+ int alignment; /**< alignment for the spilled value */
+ int spillslot; /**< index into spillslot_unionfind structure */
} spill_t;
typedef struct _affinity_edge_t {
double affinity;
- int slot1, slot2;
+ int slot1;
+ int slot2;
} affinity_edge_t;
struct _be_fec_env_t {
- struct obstack obst;
+ struct obstack obst;
const arch_env_t *arch_env;
- be_irg_t *birg;
- set *spills;
- ir_node **reloads;
+ ir_graph *irg;
+ set *spills;
+ ir_node **reloads;
affinity_edge_t **affinity_edges;
- set *memperms;
+ set *memperms;
};
/** Compare 2 affinity edges (used in quicksort) */
int i, arity;
spill_t spill, *res;
int hash = hash_irn(node);
- const ir_exec_freq *exec_freq = be_get_irg_exec_freq(env->birg->irg);
+ const ir_exec_freq *exec_freq = be_get_irg_exec_freq(env->irg);
assert(is_Phi(node));
return res;
}
-static int my_values_interfere2(be_irg_t *birg, const ir_node *a,
+static int my_values_interfere2(ir_graph *irg, const ir_node *a,
const ir_node *b)
{
- be_lv_t *lv = be_get_irg_liveness(birg->irg);
+ be_lv_t *lv = be_get_irg_liveness(irg);
int a2b = _value_dominates(a, b);
int b2a = _value_dominates(b, a);
/**
* same as values_interfere but with special handling for Syncs
*/
-static int my_values_interfere(be_irg_t *birg, ir_node *a, ir_node *b)
+static int my_values_interfere(ir_graph *irg, ir_node *a, ir_node *b)
{
if (is_Sync(a)) {
int i, arity = get_irn_arity(a);
for (i = 0; i < arity; ++i) {
ir_node *in = get_irn_n(a, i);
- if (my_values_interfere(birg, in, b))
+ if (my_values_interfere(irg, in, b))
return 1;
}
return 0;
for (i = 0; i < arity; ++i) {
ir_node *in = get_irn_n(b, i);
/* a is not a sync, so no need for my_values_interfere */
- if (my_values_interfere2(birg, a, in))
+ if (my_values_interfere2(irg, a, in))
return 1;
}
return 0;
}
- return my_values_interfere2(birg, a, b);
+ return my_values_interfere2(irg, a, b);
}
/**
if (is_NoMem(spill2))
continue;
- if (my_values_interfere(env->birg, spill1, spill2)) {
+ if (my_values_interfere(env->irg, spill1, spill2)) {
DB((dbg, DBG_INTERFERENCES,
"Slot %d and %d interfere\n", i, i2));
static ir_entity* create_stack_entity(be_fec_env_t *env, spill_slot_t *slot)
{
- ir_graph *irg = be_get_birg_irg(env->birg);
+ ir_graph *irg = env->irg;
ir_type *frame = get_irg_frame_type(irg);
/* TODO: backend should be able to specify wether we want spill slots
* at begin or end of frame */
static void create_memperms(be_fec_env_t *env)
{
const arch_env_t *arch_env = env->arch_env;
- ir_graph *irg = be_get_birg_irg(env->birg);
- memperm_t *memperm;
+ ir_graph *irg = env->irg;
+ memperm_t *memperm;
for (memperm = set_first(env->memperms); memperm != NULL; memperm = set_next(env->memperms)) {
ir_node **nodes = ALLOCAN(ir_node*, memperm->entrycount);
return slotcount;
}
-be_fec_env_t *be_new_frame_entity_coalescer(be_irg_t *birg)
+be_fec_env_t *be_new_frame_entity_coalescer(ir_graph *irg)
{
- const arch_env_t *arch_env = birg->main_env->arch_env;
+ const arch_env_t *arch_env = be_get_irg_arch_env(irg);
be_fec_env_t *env = XMALLOC(be_fec_env_t);
- be_liveness_assure_chk(be_assure_liveness(birg->irg));
+ be_liveness_assure_chk(be_assure_liveness(irg));
obstack_init(&env->obst);
env->arch_env = arch_env;
- env->birg = birg;
+ env->irg = irg;
env->spills = new_set(cmp_spill, 10);
env->reloads = NEW_ARR_F(ir_node*, 0);
env->affinity_edges = NEW_ARR_F(affinity_edge_t*, 0);
be_node_needs_frame_entity(env, node, mode, align);
}
-void be_coalesce_spillslots(be_irg_t *birg)
+void be_coalesce_spillslots(ir_graph *irg)
{
- be_fec_env_t *env = be_new_frame_entity_coalescer(birg);
+ be_fec_env_t *env = be_new_frame_entity_coalescer(irg);
/* collect reloads */
- irg_walk_graph(birg->irg, NULL, collect_spills_walker, env);
+ irg_walk_graph(irg, NULL, collect_spills_walker, env);
be_assign_entities(env);
/**
* Initializes a new frame entity coalescer environment
*/
-be_fec_env_t *be_new_frame_entity_coalescer(be_irg_t *birg);
+be_fec_env_t *be_new_frame_entity_coalescer(ir_graph *irg);
/**
* Frees a frame entity coalescer environment
* Coalesces spillslots and minimizes the number of memcopies induced by
* memory-phis.
*/
-void be_coalesce_spillslots(be_irg_t *birg);
+void be_coalesce_spillslots(ir_graph *irg);
-#endif /* FIRM_BE_BESPILLSLOTS_H */
+#endif
* Initializes an SSA construction environment.
*
* @param env an SSA empty construction environment
- * @param birg
+ * @param irg the graph
*/
void be_ssa_construction_init(be_ssa_construction_env_t *env, ir_graph *irg);
}
}
-void be_assure_state(be_irg_t *birg, const arch_register_t *reg, void *func_env,
+void be_assure_state(ir_graph *irg, const arch_register_t *reg, void *func_env,
create_spill_func create_spill,
create_reload_func create_reload)
{
minibelady_env_t env;
- ir_graph *irg = be_get_birg_irg(birg);
spill_info_t *info;
be_lv_t *lv = be_assure_liveness(irg);
* This functions asserts that the state is switched to fullfill all state
* requirements of nodes.
*/
-void be_assure_state(be_irg_t *birg, const arch_register_t *reg, void *func_env,
+void be_assure_state(ir_graph *irg, const arch_register_t *reg, void *func_env,
create_spill_func spill_func,
create_reload_func reload_func);
-#endif /* FIRM_BE_BESTATE_H */
+#endif
static ir_node *new_be_Anchor(ir_graph *irg)
{
- struct obstack *obst = be_get_birg_obst(irg);
+ struct obstack *obst = be_get_be_obst(irg);
backend_info_t *info;
ir_node *new_anchor;
ir_graph *old_current_ir_graph = current_ir_graph;
struct obstack *old_obst = NULL;
struct obstack *new_obst = NULL;
- be_irg_t *birg = be_birg_from_irg(irg);
current_ir_graph = irg;
be_liveness_invalidate(be_get_irg_liveness(irg));
/* Hack for now, something is buggy with invalidate liveness... */
- birg->lv = NULL;
+ be_birg_from_irg(irg)->lv = NULL;
be_invalidate_dom_front(irg);
/* recalculate edges */
edges_deactivate(irg);
edges_activate(irg);
-
- if (birg->lv) {
- be_liveness_free(birg->lv);
- birg->lv = be_liveness(irg);
- }
}
if (is_ia32_Pop(irn) || is_ia32_PopMem(irn)) {
ia32_code_gen_t *cg = ia32_current_cg;
- int omit_fp = be_abi_omit_fp(cg->birg->abi);
+ int omit_fp = be_abi_omit_fp(be_get_irg_abi(cg->irg));
if (omit_fp) {
/* Pop nodes modify the stack pointer before calculating the
* destination address, so fix this here
ia32_setup_fpu_mode(cg);
/* fixup flags */
- be_sched_fix_flags(cg->birg, &ia32_reg_classes[CLASS_ia32_flags],
+ be_sched_fix_flags(cg->irg, &ia32_reg_classes[CLASS_ia32_flags],
&flags_remat);
ia32_add_missing_keeps(cg);
static void transform_MemPerm(ia32_code_gen_t *cg, ir_node *node)
{
ir_node *block = get_nodes_block(node);
- ir_node *sp = be_abi_get_ignore_irn(cg->birg->abi, &ia32_gp_regs[REG_ESP]);
+ ir_node *sp = be_abi_get_ignore_irn(be_get_irg_abi(cg->irg), &ia32_gp_regs[REG_ESP]);
int arity = be_get_MemPerm_entity_arity(node);
ir_node **pops = ALLOCAN(ir_node*, arity);
ir_node *in[1];
{
ia32_code_gen_t *cg = self;
ir_graph *irg = cg->irg;
- be_fec_env_t *fec_env = be_new_frame_entity_coalescer(cg->birg);
+ be_fec_env_t *fec_env = be_new_frame_entity_coalescer(cg->irg);
/* create and coalesce frame entities */
irg_walk_graph(irg, NULL, ia32_collect_frame_entity_nodes, fec_env);
/* we might have to rewrite x87 virtual registers */
if (cg->do_x87_sim) {
- x87_simulate_graph(cg->birg);
+ x87_simulate_graph(cg->irg);
}
/* do peephole optimisations */
return get_eip;
}
-static void *ia32_cg_init(be_irg_t *birg);
+static void *ia32_cg_init(ir_graph *irg);
static const arch_code_generator_if_t ia32_code_gen_if = {
ia32_cg_init,
/**
* Initializes a IA32 code generator.
*/
-static void *ia32_cg_init(be_irg_t *birg)
+static void *ia32_cg_init(ir_graph *irg)
{
- ia32_isa_t *isa = (ia32_isa_t *)birg->main_env->arch_env;
+ ia32_isa_t *isa = (ia32_isa_t *)be_get_irg_arch_env(irg);
ia32_code_gen_t *cg = XMALLOCZ(ia32_code_gen_t);
cg->impl = &ia32_code_gen_if;
- cg->irg = birg->irg;
+ cg->irg = irg;
cg->isa = isa;
- cg->birg = birg;
cg->blk_sched = NULL;
- cg->dump = (birg->main_env->options->dump_flags & DUMP_BE) ? 1 : 0;
- cg->gprof = (birg->main_env->options->gprof) ? 1 : 0;
+ cg->dump = (be_get_irg_options(irg)->dump_flags & DUMP_BE) ? 1 : 0;
+ cg->gprof = (be_get_irg_options(irg)->gprof) ? 1 : 0;
if (cg->gprof) {
/* Linux gprof implementation needs base pointer */
- birg->main_env->options->omit_fp = 0;
+ be_get_irg_options(irg)->omit_fp = 0;
}
/* enter it */
ir_graph *irg; /**< current irg */
set *reg_set; /**< set to memorize registers for non-ia32 nodes (e.g. phi nodes) */
ia32_isa_t *isa; /**< for fast access to the isa object */
- be_irg_t *birg; /**< The be-irg (contains additional information about the irg) */
ir_node **blk_sched; /**< an array containing the scheduled blocks */
unsigned do_x87_sim:1; /**< set to 1 if x87 simulation should be enforced */
unsigned dump:1; /**< set to 1 if graphs should be dumped */
}
}
-void ia32_calculate_non_address_mode_nodes(be_irg_t *birg)
+void ia32_calculate_non_address_mode_nodes(ir_graph *irg)
{
- ir_graph *irg = be_get_birg_irg(birg);
be_lv_t *lv = be_assure_liveness(irg);
non_address_mode_nodes = bitset_malloc(get_irg_last_idx(irg));
* Mark those nodes of the given graph that cannot be used inside an
* address mode because there values must be materialized in registers.
*/
-void ia32_calculate_non_address_mode_nodes(be_irg_t *birg);
+void ia32_calculate_non_address_mode_nodes(ir_graph *irg);
/**
* Free the non_address_mode information.
if (r_clobber_bits != 0) {
if (parsed_constraint.all_registers_allowed) {
parsed_constraint.all_registers_allowed = 0;
- be_abi_set_non_ignore_regs(env_cg->birg->abi,
+ be_abi_set_non_ignore_regs(be_get_irg_abi(env_cg->irg),
parsed_constraint.cls,
&parsed_constraint.allowed_registers);
}
static int should_align_block(const ir_node *block)
{
static const double DELTA = .0001;
- ir_exec_freq *exec_freq = cg->birg->exec_freq;
+ ir_exec_freq *exec_freq = be_get_irg_exec_freq(cg->irg);
ir_node *prev = get_prev_block_sched(block);
double block_freq;
double prev_freq = 0; /**< execfreq of the fallthrough block */
ir_graph *irg = current_ir_graph;
int need_label = block_needs_label(block);
int i, arity;
- ir_exec_freq *exec_freq = cg->birg->exec_freq;
+ ir_exec_freq *exec_freq = be_get_irg_exec_freq(cg->irg);
if (block == get_irg_end_block(irg))
return;
cg = ia32_cg;
isa = cg->isa;
- do_pic = cg->birg->main_env->options->pic;
+ do_pic = be_get_irg_options(cg->irg)->pic;
be_gas_elf_type_char = '@';
get_unique_label(pic_base_label, sizeof(pic_base_label), "PIC_BASE");
- be_dbg_method_begin(entity, be_abi_get_stack_layout(cg->birg->abi));
+ be_dbg_method_begin(entity, be_abi_get_stack_layout(be_get_irg_abi(cg->irg)));
be_gas_emit_function_prolog(entity, ia32_cg_config.function_alignment);
/* we use links to point to target blocks */
}
}
-static void rewire_fpu_mode_nodes(be_irg_t *birg)
+static void rewire_fpu_mode_nodes(ir_graph *irg)
{
collect_fpu_mode_nodes_env_t env;
be_ssa_construction_env_t senv;
const arch_register_t *reg = &ia32_fp_cw_regs[REG_FPCW];
- ir_graph *irg = be_get_birg_irg(birg);
ir_node *initial_value;
ir_node **phis;
be_lv_t *lv = be_get_irg_liveness(irg);
env.state_nodes = NEW_ARR_F(ir_node*, 0);
irg_walk_graph(irg, collect_fpu_mode_nodes_walker, NULL, &env);
- initial_value = be_abi_get_ignore_irn(birg->abi, reg);
+ initial_value = be_abi_get_ignore_irn(be_get_irg_abi(irg), reg);
/* nothing needs to be done, in fact we must not continue as for endless
* loops noone is using the initial_value and it will point to a bad node
be_liveness_update(lv, env.state_nodes[i]);
}
} else {
- be_liveness_invalidate(birg->lv);
+ be_liveness_invalidate(be_get_irg_liveness(irg));
}
/* set registers for the phis */
void ia32_setup_fpu_mode(ia32_code_gen_t *cg)
{
/* do ssa construction for the fpu modes */
- rewire_fpu_mode_nodes(cg->birg);
+ rewire_fpu_mode_nodes(cg->irg);
/* ensure correct fpu mode for operations */
- be_assure_state(cg->birg, &ia32_fp_cw_regs[REG_FPCW],
+ be_assure_state(cg->irg, &ia32_fp_cw_regs[REG_FPCW],
cg, create_fpu_mode_spill, create_fpu_mode_reload);
}
if (ia32_cg_config.use_short_sex_eax)
register_peephole_optimisation(op_ia32_Conv_I2I, peephole_ia32_Conv_I2I);
- be_peephole_opt(cg->birg);
+ be_peephole_opt(cg->irg);
}
/**
*/
static ir_node *get_symconst_base(void)
{
- if (env_cg->birg->main_env->options->pic) {
+ if (be_get_irg_options(env_cg->irg)->pic) {
return arch_code_generator_get_pic_base(env_cg);
}
if (initial_fpcw != NULL)
return initial_fpcw;
- fpcw = be_abi_get_ignore_irn(env_cg->birg->abi,
+ fpcw = be_abi_get_ignore_irn(be_get_irg_abi(env_cg->irg),
&ia32_fp_cw_regs[REG_FPCW]);
initial_fpcw = be_transform_node(fpcw);
/* special case for PIC trampoline calls */
old_no_pic_adjust = no_pic_adjust;
- no_pic_adjust = env_cg->birg->main_env->options->pic;
+ no_pic_adjust = be_get_irg_options(env_cg->irg)->pic;
match_arguments(&am, src_block, NULL, src_ptr, src_mem,
match_am | match_immediate);
*/
void ia32_add_missing_keeps(ia32_code_gen_t *cg)
{
- ir_graph *irg = be_get_birg_irg(cg->birg);
- irg_walk_graph(irg, add_missing_keep_walker, NULL, NULL);
+ irg_walk_graph(cg->irg, add_missing_keep_walker, NULL, NULL);
}
/**
be_timer_push(T_HEIGHTS);
heights = heights_new(cg->irg);
be_timer_pop(T_HEIGHTS);
- ia32_calculate_non_address_mode_nodes(cg->birg);
+ ia32_calculate_non_address_mode_nodes(cg->irg);
/* the transform phase is not safe for CSE (yet) because several nodes get
* attributes set after their creation */
* Replaces all virtual floating point instructions and registers
* by real ones.
*/
-void x87_simulate_graph(be_irg_t *birg)
+void x87_simulate_graph(ir_graph *irg)
{
/* TODO improve code quality (less executed fxch) by using execfreqs */
ir_node *block, *start_block;
blk_state *bl_state;
x87_simulator sim;
- ir_graph *irg = be_get_birg_irg(birg);
/* create the simulator */
x87_init_simulator(&sim, irg);
* Replaces all virtual floating point instructions and registers
* by real ones.
*
- * @param birg the graph to simulate and patch
+ * @param irg the graph to simulate and patch
*
* Registers must be allocated.
*/
-void x87_simulate_graph(be_irg_t *birg);
+void x87_simulate_graph(ir_graph *irg);
/**
* Initializes the x87 simulator.
*/
void ia32_init_x87(void);
-#endif /* FIRM_BE_IA32_IA32_X87_H */
+#endif
static void mips_after_ra(void* self)
{
mips_code_gen_t *cg = self;
- be_coalesce_spillslots(cg->birg);
+ be_coalesce_spillslots(cg->irg);
irg_walk_blkwise_graph(cg->irg, NULL, mips_after_ra_walker, self);
}
free(cg);
}
-static void *mips_cg_init(be_irg_t *birg);
+static void *mips_cg_init(ir_graph *irg);
static const arch_code_generator_if_t mips_code_gen_if = {
mips_cg_init,
/**
* Initializes the code generator.
*/
-static void *mips_cg_init(be_irg_t *birg)
+static void *mips_cg_init(ir_graph *irg)
{
- const arch_env_t *arch_env = be_get_irg_arch_env(birg->irg);
+ const arch_env_t *arch_env = be_get_irg_arch_env(irg);
mips_isa_t *isa = (mips_isa_t *) arch_env;
mips_code_gen_t *cg = XMALLOCZ(mips_code_gen_t);
cg->impl = &mips_code_gen_if;
- cg->irg = be_get_birg_irg(birg);
+ cg->irg = irg;
cg->reg_set = new_set(mips_cmp_irn_reg_assoc, 1024);
cg->isa = isa;
- cg->birg = birg;
isa->cg = cg;
ir_graph *irg; /**< current irg */
set *reg_set; /**< set to memorize registers for FIRM nodes (e.g. phi) */
mips_isa_t *isa; /**< the isa instance */
- be_irg_t *birg; /**< The be-irg (contains additional information about the irg) */
ir_node **block_schedule;
};
{
sparc_code_gen_t *cg = self;
/* fixup flags register */
- be_sched_fix_flags(cg->birg, &sparc_reg_classes[CLASS_sparc_flags], &sparc_flags_remat);
+ be_sched_fix_flags(cg->irg, &sparc_reg_classes[CLASS_sparc_flags], &sparc_flags_remat);
}
/**
static void sparc_after_ra(void *self)
{
sparc_code_gen_t *cg = self;
- be_coalesce_spillslots(cg->birg);
+ be_coalesce_spillslots(cg->irg);
irg_block_walk_graph(cg->irg, NULL, sparc_after_ra_walker, NULL);
}
free(cg);
}
-static void *sparc_cg_init(be_irg_t *birg);
+static void *sparc_cg_init(ir_graph *irg);
static const arch_code_generator_if_t sparc_code_gen_if = {
sparc_cg_init,
/**
* Initializes the code generator.
*/
-static void *sparc_cg_init(be_irg_t *birg)
+static void *sparc_cg_init(ir_graph *irg)
{
static ir_type *int_tp = NULL;
- sparc_isa_t *isa = (sparc_isa_t *)birg->main_env->arch_env;
+ sparc_isa_t *isa = (sparc_isa_t *) be_get_irg_arch_env(irg);
sparc_code_gen_t *cg;
if (! int_tp) {
cg = XMALLOC(sparc_code_gen_t);
cg->impl = &sparc_code_gen_if;
- cg->irg = birg->irg;
+ cg->irg = irg;
//cg->reg_set = new_set(arm_cmp_irn_reg_assoc, 1024);
cg->isa = isa;
- cg->birg = birg;
//cg->int_tp = int_tp;
//cg->have_fp_insn = 0;
//cg->unknown_gp = NULL;
//cg->unknown_fpa = NULL;
- cg->dump = (birg->main_env->options->dump_flags & DUMP_BE) ? 1 : 0;
+ cg->dump = (be_get_irg_options(irg)->dump_flags & DUMP_BE) ? 1 : 0;
/* enter the current code generator */
isa->cg = cg;
ir_graph *irg; /**< current irg */
set *reg_set; /**< set to memorize registers for FIRM nodes (e.g. phi) */
sparc_isa_t *isa; /**< the isa instance */
- be_irg_t *birg; /**< The be-irg (contains additional information about the irg) */
char dump; /**< set to 1 if graphs should be dumped */
} sparc_code_gen_t;
/* register all emitter functions */
sparc_register_emitters();
- be_dbg_method_begin(entity, be_abi_get_stack_layout(cg->birg->abi));
+ be_dbg_method_begin(entity, be_abi_get_stack_layout(be_get_irg_abi(cg->irg)));
/* create the block schedule. For now, we don't need it earlier. */
blk_sched = be_create_block_schedule(irg);