arch_set_irn_register(proj, reg);
/* if the proj projects a ignore register or a node which is set to ignore, propagate this property. */
- if (arch_register_type_is(reg, ignore) || arch_irn_is(env->birg->main_env->arch_env, in[n], ignore))
+ if (arch_register_type_is(reg, ignore) || arch_irn_is(in[n], ignore))
flags |= arch_irn_flags_ignore;
- if (arch_irn_is(env->birg->main_env->arch_env, in[n], modify_sp))
+ if (arch_irn_is(in[n], modify_sp))
flags |= arch_irn_flags_modify_sp;
be_node_set_flags(irn, pos, flags);
{
fix_stack_walker_env_t *env = data;
- if (arch_irn_is(env->arch_env, node, modify_sp)) {
+ if (arch_irn_is(node, modify_sp)) {
assert(get_irn_mode(node) != mode_M && get_irn_mode(node) != mode_T);
ARR_APP1(ir_node*, env->sp_nodes, node);
}
return ops->classify(irn);
}
-extern arch_irn_flags_t arch_irn_get_flags(const arch_env_t *env, const ir_node *irn)
+arch_irn_flags_t arch_irn_get_flags(const ir_node *irn)
{
const arch_irn_ops_t *ops = get_irn_ops(irn);
- (void)env; // TODO remove parameter
return ops->get_flags(irn);
}
/**
* Get the flags of a node.
- * @param env The architecture environment.
* @param irn The node.
* @return The flags.
*/
-extern arch_irn_flags_t arch_irn_get_flags(const arch_env_t *env, const ir_node *irn);
+arch_irn_flags_t arch_irn_get_flags(const ir_node *irn);
-#define arch_irn_is(env, irn, flag) ((arch_irn_get_flags(env, irn) & arch_irn_flags_ ## flag) != 0)
+#define arch_irn_is(irn, flag) ((arch_irn_get_flags(irn) & arch_irn_flags_ ## flag) != 0)
#define arch_irn_has_reg_class(irn, pos, cls) \
((cls) == arch_get_irn_reg_class(irn, pos))
-#define arch_irn_consider_in_reg_alloc(env, cls, irn) \
- (arch_irn_has_reg_class(irn, -1, cls) && !arch_irn_is(env, irn, ignore))
+#define arch_irn_consider_in_reg_alloc(cls, irn) \
+ (arch_irn_has_reg_class(irn, -1, cls) && !arch_irn_is(irn, ignore))
/**
* Get the operations of an irn.
*/
static INLINE int has_reg_class(const be_chordal_env_t *env, const ir_node *irn)
{
- return arch_irn_consider_in_reg_alloc(env->birg->main_env->arch_env, env->cls, irn);
+ return arch_irn_consider_in_reg_alloc(env->cls, irn);
}
static int get_next_free_reg(const be_chordal_alloc_env_t *alloc_env, bitset_t *colors)
bitset_t *live = alloc_env->live;
bitset_t *colors = alloc_env->colors;
bitset_t *in_colors = alloc_env->in_colors;
- const arch_env_t *arch_env = env->birg->main_env->arch_env;
struct list_head *head = get_block_border_head(env, block);
be_lv_t *lv = env->birg->lv;
list_for_each_entry_reverse(border_t, b, head, list) {
ir_node *irn = b->irn;
int nr = get_irn_idx(irn);
- int ignore = arch_irn_is(arch_env, irn, ignore);
+ int ignore = arch_irn_is(irn, ignore);
/*
* Assign a color, if it is a local def. Global defs already have a
be_lv_foreach(lv, bl, be_lv_state_in, idx) {
ir_node *irn = be_lv_get_irn(lv, bl, idx);
- if (arch_irn_consider_in_reg_alloc(env->arch_env, env->cls, irn)) {
+ if (arch_irn_consider_in_reg_alloc(env->cls, irn)) {
const arch_register_t *reg = arch_get_irn_register(irn);
int col = arch_register_get_index(reg);
int x = (col + 1) * opts->h_inter_gap;
#define border_prev(b) (list_entry((b)->list.prev, border_t, list))
#define chordal_has_class(chordal_env, irn) \
- arch_irn_consider_in_reg_alloc(chordal_env->birg->main_env->arch_env, chordal_env->cls, irn)
+ arch_irn_consider_in_reg_alloc(chordal_env->cls, irn)
void be_ra_chordal_color(be_chordal_env_t *chordal_env);
static int ifg_is_dump_node(void *self, ir_node *irn)
{
- co2_t *env = self;
- return !arch_irn_is(env->co->aenv, irn, ignore);
+ (void)self;
+ return !arch_irn_is(irn, ignore);
}
static void ifg_dump_node_attr(FILE *f, void *self, ir_node *irn)
pqueue_t *chunks; /**< priority queue for chunks */
pset *chunkset; /**< set holding all chunks */
be_ifg_t *ifg; /**< the interference graph */
- const arch_env_t *aenv; /**< the arch environment */
copy_opt_t *co; /**< the copy opt object */
unsigned chunk_visited;
col_cost_t **single_cols;
/* build list of interfering neighbours */
len = 0;
be_ifg_foreach_neighbour(env->ifg, nodes_it, irn, neigh) {
- if (! arch_irn_is(env->aenv, neigh, ignore)) {
+ if (!arch_irn_is(neigh, ignore)) {
obstack_ptr_grow(phase_obst(ph), neigh);
++len;
}
const ir_node *m = neigh->irn;
/* skip ignore nodes */
- if (arch_irn_is(env->aenv, m, ignore))
+ if (arch_irn_is(m, ignore))
continue;
w += node_contains(c->n, m) ? neigh->costs : 0;
int i;
/* skip ignore nodes */
- if (arch_irn_is(env->aenv, n, ignore))
+ if (arch_irn_is(n, ignore))
continue;
/* check if the affinity neighbour interfere */
affinity_node_t *an;
/* skip ignore nodes */
- if (arch_irn_is(env->aenv, n, ignore))
+ if (arch_irn_is(n, ignore))
continue;
n1 = get_co_mst_irn(env, n);
aff_edge_t edge;
/* skip ignore nodes */
- if (arch_irn_is(env->aenv, m, ignore))
+ if (arch_irn_is(m, ignore))
continue;
edge.src = n;
int w = 0;
neighb_t *neigh;
- if (arch_irn_is(env->aenv, irn, ignore))
+ if (arch_irn_is(irn, ignore))
continue;
if (an) {
affinity_node_t *an = get_affinity_info(env->co, irn);
neighb_t *neigh;
- if (arch_irn_is(env->aenv, irn, ignore))
+ if (arch_irn_is(irn, ignore))
continue;
assert(i <= ARR_LEN(chunk->n));
co_mst_irn_t *n2;
/* skip ignore nodes */
- if (arch_irn_is(env->aenv, m, ignore))
+ if (arch_irn_is(m, ignore))
continue;
n2 = get_co_mst_irn(env, m);
neigh = node->int_neighs[j];
/* skip ignore nodes */
- if (arch_irn_is(env->aenv, neigh, ignore))
+ if (arch_irn_is(neigh, ignore))
continue;
nn = get_co_mst_irn(env, neigh);
mst_env.co = co;
mst_env.ignore_regs = ignore_regs;
mst_env.ifg = co->cenv->ifg;
- mst_env.aenv = co->aenv;
mst_env.chunkset = pset_new_ptr(512);
mst_env.chunk_visited = 0;
mst_env.single_cols = phase_alloc(&mst_env.ph, sizeof(*mst_env.single_cols) * n_regs);
co_mst_irn_t *mirn;
const arch_register_t *reg;
- if (arch_irn_is(mst_env.aenv, irn, ignore))
+ if (arch_irn_is(irn, ignore))
continue;
mirn = get_co_mst_irn(&mst_env, irn);
set *edges;
int i, o, n_nodes, n_edges;
- if (arch_irn_is(ienv->co->aenv, aff->irn, ignore))
+ if (arch_irn_is(aff->irn, ignore))
continue;
obstack_init(&ob);
/* get all affinity neighbours */
n_nodes = 0;
co_gs_foreach_neighb(aff, nbr) {
- if (!arch_irn_is(ienv->co->aenv, nbr->irn, ignore)) {
+ if (!arch_irn_is(nbr->irn, ignore)) {
obstack_ptr_grow(&ob, nbr->irn);
++n_nodes;
}
if (pdeq_contains(path, irn))
return;
- if (arch_irn_is(ienv->co->aenv, irn, ignore))
+ if (arch_irn_is(irn, ignore))
return;
/* insert the new irn */
int co_is_optimizable_root(const copy_opt_t *co, ir_node *irn) {
const arch_register_req_t *req;
const arch_register_t *reg;
+ (void)co; // TODO remove parameter
- if (arch_irn_is(co->aenv, irn, ignore))
+ if (arch_irn_is(irn, ignore))
return 0;
reg = arch_get_irn_register(irn);
/* Else insert the argument of the phi to the members of this ou */
DBG((dbg, LEVEL_1, "\t Member: %+F\n", arg));
- if (! arch_irn_is(co->aenv, arg, ignore)) {
+ if (!arch_irn_is(arg, ignore)) {
/* Check if arg has occurred at a prior position in the arg/list */
arg_pos = 0;
for (o=1; o<unit->node_count; ++o) {
for (i = 0; (1U << i) <= other; ++i) {
if (other & (1U << i)) {
ir_node *o = get_irn_n(skip_Proj(irn), i);
- if (!arch_irn_is(co->aenv, o, ignore) &&
+ if (!arch_irn_is(o, ignore) &&
!nodes_interfere(co->cenv, irn, o)) {
++count;
}
for (i = 0; 1U << i <= other; ++i) {
if (other & (1U << i)) {
ir_node *o = get_irn_n(skip_Proj(irn), i);
- if (!arch_irn_is(co->aenv, o, ignore) &&
+ if (!arch_irn_is(o, ignore) &&
!nodes_interfere(co->cenv, irn, o)) {
unit->nodes[k] = o;
unit->costs[k] = co->get_costs(co, irn, o, -1);
int pos, max;
const arch_register_t *reg;
- if (!is_curr_reg_class(co, irn) || arch_irn_is(co->aenv, irn, ignore))
+ if (!is_curr_reg_class(co, irn) || arch_irn_is(irn, ignore))
return;
reg = arch_get_irn_register(irn);
for (i = 0; 1U << i <= other; ++i) {
if (other & (1U << i)) {
ir_node *other = get_irn_n(skip_Proj(irn), i);
- if (! arch_irn_is(co->aenv, other, ignore))
+ if (!arch_irn_is(other, ignore))
add_edges(co, irn, other, co->get_costs(co, irn, other, 0));
}
}
n = n_regs;
be_ifg_foreach_node(ifg, it, irn) {
- if(!arch_irn_is(co->aenv, irn, ignore))
+ if (!arch_irn_is(irn, ignore))
node_map[get_irn_idx(irn)] = n++;
}
fprintf(f, "%d %d\n", n, n_regs);
be_ifg_foreach_node(ifg, it, irn) {
- if(!arch_irn_is(co->aenv, irn, ignore)) {
+ if (!arch_irn_is(irn, ignore)) {
int idx = node_map[get_irn_idx(irn)];
affinity_node_t *a = get_affinity_info(co, irn);
}
be_ifg_foreach_neighbour(ifg, nit, irn, adj) {
- if(!arch_irn_is(co->aenv, adj, ignore) && !co_dump_appel_disjoint_constraints(co, irn, adj)) {
+ if (!arch_irn_is(adj, ignore) &&
+ !co_dump_appel_disjoint_constraints(co, irn, adj)) {
int adj_idx = node_map[get_irn_idx(adj)];
if(idx < adj_idx)
fprintf(f, "%d %d -1\n", idx, adj_idx);
neighb_t *n;
co_gs_foreach_neighb(a, n) {
- if(!arch_irn_is(co->aenv, n->irn, ignore)) {
+ if (!arch_irn_is(n->irn, ignore)) {
int n_idx = node_map[get_irn_idx(n->irn)];
if(idx < n_idx)
fprintf(f, "%d %d %d\n", idx, n_idx, (int) n->costs);
static int ifg_is_dump_node(void *self, ir_node *irn)
{
- co_ifg_dump_t *cod = self;
- return !arch_irn_is(cod->co->aenv, irn, ignore);
+ (void)self;
+ return !arch_irn_is(irn, ignore);
}
static void ifg_dump_node_attr(FILE *f, void *self, ir_node *irn)
#include "besched_t.h"
#include "benode_t.h"
-static const arch_env_t *arch_env = NULL;
static const arch_register_class_t *flag_class = NULL;
static const arch_register_t *flags_reg = NULL;
static func_rematerialize remat = NULL;
static int is_modify_flags(ir_node *node) {
int i, arity;
- if(arch_irn_is(arch_env, node, modify_flags))
+ if (arch_irn_is(node, modify_flags))
return 1;
if(!be_is_Keep(node))
return 0;
for(i = 0; i < arity; ++i) {
ir_node *in = get_irn_n(node, i);
in = skip_Proj(in);
- if(arch_irn_is(arch_env, in, modify_flags))
+ if (arch_irn_is(in, modify_flags))
return 1;
}
continue;
/* spiller can't (correctly) remat flag consumers at the moment */
- assert(!arch_irn_is(arch_env, node, rematerializable));
+ assert(!arch_irn_is(node, rematerializable));
if(skip_Proj(new_flags_needed) != flags_needed) {
if(flags_needed != NULL) {
}
flag_consumers = node;
set_irn_link(flag_consumers, NULL);
- assert(arch_irn_is(arch_env, flags_needed, rematerializable));
+ assert(arch_irn_is(flags_needed, rematerializable));
} else {
/* link all consumers in a list */
set_irn_link(node, flag_consumers);
{
ir_graph *irg = be_get_birg_irg(birg);
- arch_env = be_get_birg_arch_env(birg);
flag_class = flag_cls;
flags_reg = & flag_class->regs[0];
remat = remat_func;
bitset_free(nodes);
}
-static void int_comp_rec(be_irg_t *birg, be_ifg_t *ifg, ir_node *n, bitset_t *seen)
+static void int_comp_rec(be_ifg_t *ifg, ir_node *n, bitset_t *seen)
{
void *neigh_it = be_ifg_neighbours_iter_alloca(ifg);
ir_node *m;
be_ifg_foreach_neighbour(ifg, neigh_it, n, m) {
- if(!bitset_contains_irn(seen, m) && !arch_irn_is(birg->main_env->arch_env, m, ignore)) {
+ if (!bitset_contains_irn(seen, m) && !arch_irn_is(m, ignore)) {
bitset_add_irn(seen, m);
- int_comp_rec(birg, ifg, m, seen);
+ int_comp_rec(ifg, m, seen);
}
}
ir_node *n;
be_ifg_foreach_node(ifg, nodes_it, n) {
- if (! bitset_contains_irn(seen, n) && ! arch_irn_is(birg->main_env->arch_env, n, ignore)) {
+ if (!bitset_contains_irn(seen, n) && !arch_irn_is(n, ignore)) {
++n_comp;
bitset_add_irn(seen, n);
- int_comp_rec(birg, ifg, n, seen);
+ int_comp_rec(ifg, n, seen);
}
}
* @param mach_op the machine operand for which uses are added
*/
static void add_machine_operands(const be_insn_env_t *env, be_insn_t *insn, ir_node *mach_op) {
- const arch_env_t *arch_env = env->aenv;
- struct obstack *obst = env->obst;
+ struct obstack *obst = env->obst;
int i, n;
for (i = 0, n = get_irn_arity(mach_op); i < n; ++i) {
if (is_irn_machine_operand(op)) {
add_machine_operands(env, insn, op);
- } else if (arch_irn_consider_in_reg_alloc(arch_env, env->cls, op)) {
+ } else if (arch_irn_consider_in_reg_alloc(env->cls, op)) {
be_operand_t o;
/* found a register use, create an operand */
*/
be_insn_t *be_scan_insn(const be_insn_env_t *env, ir_node *irn)
{
- const arch_env_t *arch_env = env->aenv;
- struct obstack *obst = env->obst;
+ struct obstack *obst = env->obst;
be_operand_t o;
be_insn_t *insn;
int i, n;
in the backend, but check it for now. */
assert(get_irn_mode(p) != mode_T);
- if (arch_irn_consider_in_reg_alloc(arch_env, env->cls, p)) {
+ if (arch_irn_consider_in_reg_alloc(env->cls, p)) {
/* found a def: create a new operand */
o.req = arch_get_register_req(p, -1);
o.carrier = p;
pre_colored += arch_get_irn_register(p) != NULL;
}
}
- } else if (arch_irn_consider_in_reg_alloc(arch_env, env->cls, irn)) {
+ } else if (arch_irn_consider_in_reg_alloc(env->cls, irn)) {
/* only one def, create one operand */
o.req = arch_get_register_req(irn, -1);
o.carrier = irn;
if (is_irn_machine_operand(op)) {
add_machine_operands(env, insn, op);
- } else if (arch_irn_consider_in_reg_alloc(arch_env, env->cls, op)) {
+ } else if (arch_irn_consider_in_reg_alloc(env->cls, op)) {
/* found a register use, create an operand */
o.req = arch_get_register_req(irn, i);
o.carrier = op;
ir_node *node, ir_nodeset_t *nodeset)
{
int i, arity;
+ (void)arch_env; // TODO remove parameter
/* You should better break out of your loop when hitting the first phi
* function. */
foreach_out_edge(node, edge) {
ir_node *proj = get_edge_src_irn(edge);
- if (arch_irn_consider_in_reg_alloc(arch_env, cls, proj)) {
+ if (arch_irn_consider_in_reg_alloc(cls, proj)) {
ir_nodeset_remove(nodeset, proj);
}
}
- } else if (arch_irn_consider_in_reg_alloc(arch_env, cls, node)) {
+ } else if (arch_irn_consider_in_reg_alloc(cls, node)) {
ir_nodeset_remove(nodeset, node);
}
for (i = 0; i < arity; ++i) {
ir_node *op = get_irn_n(node, i);
- if (arch_irn_consider_in_reg_alloc(arch_env, cls, op))
+ if (arch_irn_consider_in_reg_alloc(cls, op))
ir_nodeset_insert(nodeset, op);
}
}
const ir_node *block, ir_nodeset_t *live)
{
int i;
+ (void)arch_env; // TODO remove parameter
assert(lv->nodes && "live sets must be computed");
be_lv_foreach(lv, block, be_lv_state_end, i) {
ir_node *node = be_lv_get_irn(lv, block, i);
- if(!arch_irn_consider_in_reg_alloc(arch_env, cls, node))
+ if (!arch_irn_consider_in_reg_alloc(cls, node))
continue;
ir_nodeset_insert(live, node);
/** Lowering walker environment. */
typedef struct _lower_env_t {
be_irg_t *birg;
- const arch_env_t *arch_env;
unsigned do_copy : 1;
DEBUG_ONLY(firm_dbg_module_t *dbg_module;)
} lower_env_t;
return irn;
}
-static ir_node *find_copy(constraint_env_t *env, ir_node *irn, ir_node *op) {
- const arch_env_t *arch_env = be_get_birg_arch_env(env->birg);
- ir_node *block = get_nodes_block(irn);
- ir_node *cur_node;
+static ir_node *find_copy(ir_node *irn, ir_node *op)
+{
+ ir_node *block = get_nodes_block(irn);
+ ir_node *cur_node;
for (cur_node = sched_prev(irn);
! is_Block(cur_node) && be_is_Copy(cur_node) && get_nodes_block(cur_node) == block;
cur_node = sched_prev(cur_node))
{
- if (be_get_Copy_op(cur_node) == op && arch_irn_is(arch_env, cur_node, dont_spill))
+ if (be_get_Copy_op(cur_node) == op && arch_irn_is(cur_node, dont_spill))
return cur_node;
}
be_irg_t *birg = env->birg;
ir_graph *irg = be_get_birg_irg(birg);
pset *op_set = env->op_set;
- const arch_env_t *arch_env = be_get_birg_arch_env(birg);
ir_node *block = get_nodes_block(irn);
const arch_register_class_t *cls = arch_get_irn_reg_class(other_different, -1);
ir_node *in[2], *keep, *cpy;
op_copy_assoc_t key, *entry;
DEBUG_ONLY(firm_dbg_module_t *mod = env->dbg;)
- if (arch_irn_is(arch_env, other_different, ignore) || ! mode_is_datab(get_irn_mode(other_different))) {
+ if (arch_irn_is(other_different, ignore) ||
+ !mode_is_datab(get_irn_mode(other_different))) {
DBG((mod, LEVEL_1, "ignore constraint for %+F because other_irn is ignore or not a datab node\n", irn));
return;
}
/* The copy is optimized later if not needed */
/* check if already exists such a copy in the schedule immediately before */
- cpy = find_copy(env, belower_skip_proj(irn), other_different);
+ cpy = find_copy(belower_skip_proj(irn), other_different);
if (! cpy) {
cpy = be_new_Copy(cls, irg, block, other_different);
be_node_set_flags(cpy, BE_OUT_POS(0), arch_irn_flags_dont_spill);
*/
static int push_through_perm(ir_node *perm, void *data)
{
- lower_env_t *env = data;
- const arch_env_t *aenv = env->arch_env;
+ lower_env_t *env = data;
ir_graph *irg = get_irn_irg(perm);
ir_node *bl = get_nodes_block(perm);
sched_foreach_reverse_from (sched_prev(perm), irn) {
for (i = get_irn_arity(irn) - 1; i >= 0; --i) {
ir_node *op = get_irn_n(irn, i);
- if (arch_irn_consider_in_reg_alloc(aenv, cls, op) &&
+ if (arch_irn_consider_in_reg_alloc(cls, op) &&
!values_interfere(env->birg, op, one_proj)) {
frontier = irn;
goto found_front;
break;
if(!sched_comes_after(frontier, node))
break;
- if(arch_irn_is(aenv, node, modify_flags))
+ if (arch_irn_is(node, modify_flags))
break;
if(is_Proj(node)) {
req = arch_get_register_req(get_Proj_pred(node),
break;
for(i = get_irn_arity(node) - 1; i >= 0; --i) {
ir_node *opop = get_irn_n(node, i);
- if (arch_irn_consider_in_reg_alloc(aenv, cls, opop)) {
+ if (arch_irn_consider_in_reg_alloc(cls, opop)) {
break;
}
}
lower_env_t env;
ir_graph *irg = be_get_birg_irg(birg);
- env.birg = birg;
- env.arch_env = be_get_birg_arch_env(birg);
- env.do_copy = do_copy;
+ env.birg = birg;
+ env.do_copy = do_copy;
FIRM_DBG_REGISTER(env.dbg_module, "firm.be.lower");
/* we will need interference */
static INLINE int has_reg_class(const regpressure_ana_t *ra, const ir_node *irn)
{
- return arch_irn_consider_in_reg_alloc(ra->arch_env, ra->cls, irn);
+ return arch_irn_consider_in_reg_alloc(ra->cls, irn);
}
static INLINE int regpressure(pset *live) {
}
-static const arch_env_t *cur_arch_env;
-
-
static ir_node *normal_select(void *block_env, ir_nodeset_t *ready_set,
ir_nodeset_t *live_set)
{
return
mode != mode_M &&
mode != mode_X &&
- !arch_irn_is(cur_arch_env, irn, ignore);
+ !arch_irn_is(irn, ignore);
}
cost = normal_tree_cost(pred);
if (be_is_Barrier(pred)) cost = 1; // XXX hack: the barrier causes all users to have a reguse of #regs
- if (!arch_irn_is(cur_arch_env, pred, ignore)) {
+ if (!arch_irn_is(pred, ignore)) {
real_pred = (is_Proj(pred) ? get_Proj_pred(pred) : pred);
pred_fc = get_irn_link(real_pred);
pred_fc->no_root = 1;
last = 0;
for (i = 0; i < arity; ++i) {
ir_node* op = fc->costs[i].irn;
- if (op == last) continue;
- if (get_irn_mode(op) == mode_M) continue;
- if (arch_irn_is(cur_arch_env, op, ignore)) continue;
+ if (op == last) continue;
+ if (get_irn_mode(op) == mode_M) continue;
+ if (arch_irn_is(op, ignore)) continue;
cost = MAX(fc->costs[i].cost + n_op_res, cost);
last = op;
++n_op_res;
(void)vtab;
- cur_arch_env = be_get_birg_arch_env(birg);
-
be_clear_links(irg);
heights = heights_new(irg);
ir_node *user = get_edge_src_irn(edge);
/* skip ignore nodes as they do not really contribute to register pressure */
- if (arch_irn_is(rss->arch_env, user, ignore))
+ if (arch_irn_is(user, ignore))
continue;
/*
assert(! is_Proj(consumer) && "Cannot handle Projs");
if (! is_Phi(consumer) && ! is_Block(consumer) && get_nodes_block(consumer) == block) {
- if (! arch_irn_is(rss->arch_env, consumer, ignore) && ! plist_has_value(rss_irn->consumer_list, consumer)) {
+ if (!arch_irn_is(consumer, ignore) &&
+ !plist_has_value(rss_irn->consumer_list, consumer)) {
plist_insert_back(rss_irn->consumer_list, consumer);
DBG((rss->dbg, LEVEL_2, "\t\tconsumer %+F\n", consumer));
}
if (be_is_Keep(irn))
continue;
- if (!arch_irn_is(rss->arch_env, irn, ignore) && arch_get_irn_reg_class(irn, -1) == cls) {
+ if (!arch_irn_is(irn, ignore) &&
+ arch_get_irn_reg_class(irn, -1) == cls) {
plist_insert_back(rss->nodes, skip_Proj(irn));
}
//}
for (i = get_irn_arity(irn) - 1; i >= 0; i--) {
ir_node *in = get_irn_n(irn, i);
- if (mode_is_datab(get_irn_mode(in)) && /* must be data node */
- ! arch_irn_is(env->arch_env, in, ignore) && /* ignore "ignore" nodes :) */
- ! be_is_live_end(env->liveness, block, in) /* if the value lives outside of block: do not count */
-
- ) {
+ if (mode_is_datab(get_irn_mode(in)) && /* must be data node */
+ !arch_irn_is(in, ignore) && /* ignore "ignore" nodes :) */
+ !be_is_live_end(env->liveness, block, in)) { /* if the value lives outside of block: do not count */
num_in++;
}
}
spill_t *s;
spill_t *last;
- assert(! arch_irn_is(env->arch_env, to_spill, dont_spill));
+ assert(!arch_irn_is(to_spill, dont_spill));
DB((dbg, LEVEL_1, "Add spill of %+F after %+F\n", to_spill, after));
/* Just for safety make sure that we do not insert the spill in front of a phi */
spill_info_t *info;
reloader_t *rel;
- assert(! arch_irn_is(env->arch_env, to_spill, dont_spill));
+ assert(!arch_irn_is(to_spill, dont_spill));
info = get_spillinfo(env, to_spill);
/*
* Ignore registers are always available
*/
- if(arch_irn_is(env->arch_env, arg, ignore)) {
+ if (arch_irn_is(arg, ignore)) {
return 1;
}
/**
* Checks whether the node can principally be rematerialized
*/
-static int is_remat_node(spill_env_t *env, const ir_node *node)
+static int is_remat_node(const ir_node *node)
{
- const arch_env_t *arch_env = env->arch_env;
-
assert(!be_is_Spill(node));
- if(arch_irn_is(arch_env, node, rematerializable))
+ if (arch_irn_is(node, rematerializable))
return 1;
return 0;
int argremats;
int costs = 0;
- if(!is_remat_node(env, spilled))
+ if (!is_remat_node(spilled))
return REMAT_COST_INFINITE;
if(be_is_Reload(spilled)) {
if(parentcosts + costs >= env->reload_cost + env->spill_cost) {
return REMAT_COST_INFINITE;
}
- if(arch_irn_is(env->arch_env, spilled, modify_flags)) {
+ if (arch_irn_is(spilled, modify_flags)) {
return REMAT_COST_INFINITE;
}
if(spillinfo->spill_costs >= 0)
return;
- assert(! arch_irn_is(env->arch_env, to_spill, dont_spill));
+ assert(!arch_irn_is(to_spill, dont_spill));
assert(!be_is_Reload(to_spill));
/* some backends have virtual noreg/unknown nodes that are not scheduled
} workset_t;
static struct obstack obst;
-static const arch_env_t *arch_env;
static const arch_register_class_t *cls;
static const be_lv_t *lv;
static be_loopana_t *loop_ana;
loc_t *loc;
int i;
/* check for current regclass */
- assert(arch_irn_consider_in_reg_alloc(arch_env, cls, val));
+ assert(arch_irn_consider_in_reg_alloc(cls, val));
/* check if val is already contained */
for (i = 0; i < workset->len; ++i) {
const ir_node *def, int skip_from_uses)
{
be_next_use_t use;
- int flags = arch_irn_get_flags(arch_env, def);
+ int flags = arch_irn_get_flags(def);
unsigned costs;
unsigned time;
loc.node = node;
loc.spilled = false;
- if (!arch_irn_consider_in_reg_alloc(arch_env, cls, node)) {
+ if (!arch_irn_consider_in_reg_alloc(cls, node)) {
loc.time = USES_INFINITY;
return loc;
}
/* We have to keep nonspillable nodes in the workingset */
- if (arch_irn_get_flags(arch_env, node) & arch_irn_flags_dont_spill) {
+ if (arch_irn_get_flags(node) & arch_irn_flags_dont_spill) {
loc.time = 0;
DB((dbg, DBG_START, " %+F taken (dontspill node)\n", node, loc.time));
return loc;
if (! is_Phi(node))
break;
- if (!arch_irn_consider_in_reg_alloc(arch_env, cls, node))
+ if (!arch_irn_consider_in_reg_alloc(cls, node))
continue;
if (all_preds_known) {
workset_clear(new_vals);
for(i = 0, arity = get_irn_arity(irn); i < arity; ++i) {
ir_node *in = get_irn_n(irn, i);
- if (!arch_irn_consider_in_reg_alloc(arch_env, cls, in))
+ if (!arch_irn_consider_in_reg_alloc(cls, in))
continue;
/* (note that "spilled" is irrelevant here) */
foreach_out_edge(irn, edge) {
ir_node *proj = get_edge_src_irn(edge);
- if (!arch_irn_consider_in_reg_alloc(arch_env, cls, proj))
+ if (!arch_irn_consider_in_reg_alloc(cls, proj))
continue;
workset_insert(new_vals, proj, false);
}
} else {
- if (!arch_irn_consider_in_reg_alloc(arch_env, cls, irn))
+ if (!arch_irn_consider_in_reg_alloc(cls, irn))
continue;
workset_insert(new_vals, irn, false);
}
assert(!l->spilled);
/* we might have unknowns as argument for the phi */
- if (!arch_irn_consider_in_reg_alloc(arch_env, cls, node))
+ if (!arch_irn_consider_in_reg_alloc(cls, node))
continue;
}
/* init belady env */
stat_ev_tim_push();
obstack_init(&obst);
- arch_env = birg->main_env->arch_env;
cls = rcls;
lv = be_get_birg_liveness(birg);
n_regs = cls->n_regs - be_put_ignore_regs(birg, cls, NULL);
static INLINE void workset_insert(belady_env_t *env, workset_t *ws, ir_node *val) {
int i;
/* check for current regclass */
- if (!arch_irn_consider_in_reg_alloc(env->arch, env->cls, val)) {
+ if (!arch_irn_consider_in_reg_alloc(env->cls, val)) {
// DBG((dbg, DBG_WORKSET, "Skipped %+F\n", val));
return;
}
belady_env_t *env = bi->bel;
sched_timestep_t curr_step = sched_get_time_step(env->instr);
next_use_t *use = get_current_use(bi, irn);
- int flags = arch_irn_get_flags(env->arch, irn);
+ int flags = arch_irn_get_flags(irn);
assert(!(flags & arch_irn_flags_ignore));
if (is_op_forking(get_irn_op(env->instr))) {
for (i = get_irn_arity(env->instr) - 1; i >= 0; --i) {
ir_node *op = get_irn_n(env->instr, i);
- block_info->free_at_jump -= arch_irn_consider_in_reg_alloc(env->arch, env->cls, op);
+ block_info->free_at_jump -= arch_irn_consider_in_reg_alloc(env->cls, op);
}
}
* there might by unknwons as operands of phis in that case
* we set the costs to zero, since they won't get spilled.
*/
- if (arch_irn_consider_in_reg_alloc(env->arch, env->cls, op))
+ if (arch_irn_consider_in_reg_alloc(env->cls, op))
c = can_make_available_at_end(ges, pr, op, limit - glob_costs, level + 1);
else
c = 0.0;
if (!is_Phi(irn))
break;
- if (arch_irn_consider_in_reg_alloc(env->arch, env->cls, irn)
+ if (arch_irn_consider_in_reg_alloc(env->cls, irn)
&& !bitset_contains_irn(ges.succ_phis, irn))
be_spill_phi(env->senv, irn);
}
worklist_t *end_worklist;
};
-static const arch_env_t *arch_env;
static const arch_register_class_t *cls;
static struct obstack obst;
static spill_env_t *senv;
value = get_Phi_pred(value, succ_pos);
/* can happen for unknown phi preds */
- if (!arch_irn_consider_in_reg_alloc(arch_env, cls, value))
+ if (!arch_irn_consider_in_reg_alloc(cls, value))
continue;
}
/* already in the worklist? move around, otherwise add at back */
worklist_entry_t *entry = get_irn_link(value);
- assert(arch_irn_consider_in_reg_alloc(arch_env, cls, value));
+ assert(arch_irn_consider_in_reg_alloc(cls, value));
if (worklist_contains(value)) {
assert(entry != NULL);
if (worklist_contains(node2))
continue;
- if (!arch_irn_consider_in_reg_alloc(arch_env, cls, node2))
+ if (!arch_irn_consider_in_reg_alloc(cls, node2))
continue;
if (!tentative_mode)
foreach_out_edge(node, edge) {
ir_node *proj = get_edge_src_irn(edge);
- if (!arch_irn_consider_in_reg_alloc(arch_env, cls, proj))
+ if (!arch_irn_consider_in_reg_alloc(cls, proj))
continue;
if (worklist_contains(proj)) {
worklist_remove(worklist, proj);
++n_defs;
}
}
- } else if (arch_irn_consider_in_reg_alloc(arch_env, cls, node)) {
+ } else if (arch_irn_consider_in_reg_alloc(cls, node)) {
if (worklist_contains(node)) {
worklist_remove(worklist, node);
} else {
for(i = 0; i < arity; ++i) {
ir_node *use = get_irn_n(node, i);
- if (!arch_irn_consider_in_reg_alloc(arch_env, cls, use))
+ if (!arch_irn_consider_in_reg_alloc(cls, use))
continue;
val_used(worklist, use, node);
value = get_irn_n(value, i);
/* we might have unknowns as argument for the phi */
- if (!arch_irn_consider_in_reg_alloc(arch_env, cls, value))
+ if (!arch_irn_consider_in_reg_alloc(cls, value))
continue;
}
return;
worklist_visited = 0;
- arch_env = be_get_birg_arch_env(birg);
exec_freq = be_get_birg_exec_freq(birg);
be_clear_links(irg);
foreach_out_edge(node, edge) {
const ir_node *proj = get_edge_src_irn(edge);
- if(arch_irn_consider_in_reg_alloc(arch_env, cls, proj)) {
+ if (arch_irn_consider_in_reg_alloc(cls, proj)) {
++values_defined;
}
}
- } else if(arch_irn_consider_in_reg_alloc(arch_env, cls, node)) {
+ } else if (arch_irn_consider_in_reg_alloc(cls, node)) {
++values_defined;
}
arity = get_irn_arity(node);
for(i = 0; i < arity; ++i) {
ir_node *pred = get_irn_n(node, i);
- if(arch_irn_consider_in_reg_alloc(arch_env, cls, pred)
+ if (arch_irn_consider_in_reg_alloc(cls, pred)
&& !ir_nodeset_contains(live_nodes, pred)) {
++free_regs_needed;
}
cand_node = candidate->node;
++cand_idx;
- if(arch_irn_is(arch_env, cand_node, dont_spill))
+ if (arch_irn_is(cand_node, dont_spill))
continue;
/* make sure the node is not an argument of the instruction */
foreach_out_edge(node, edge) {
const ir_node *proj = get_edge_src_irn(edge);
- if (arch_irn_consider_in_reg_alloc(arch_env, cls, proj)) {
+ if (arch_irn_consider_in_reg_alloc(cls, proj)) {
ir_nodeset_remove(nodeset, proj);
}
}
}
- if(arch_irn_consider_in_reg_alloc(arch_env, cls, node)) {
- ir_nodeset_remove(nodeset, node);
- }
+ if (arch_irn_consider_in_reg_alloc(cls, node)) {
+ ir_nodeset_remove(nodeset, node);
+ }
}
static void add_uses(ir_node *node, ir_nodeset_t *nodeset)
{
int i, arity;
- arity = get_irn_arity(node);
- for(i = 0; i < arity; ++i) {
- ir_node *op = get_irn_n(node, i);
+ arity = get_irn_arity(node);
+ for(i = 0; i < arity; ++i) {
+ ir_node *op = get_irn_n(node, i);
- if(arch_irn_consider_in_reg_alloc(arch_env, cls, op)
- && !bitset_is_set(spilled_nodes, get_irn_idx(op))) {
- ir_nodeset_insert(nodeset, op);
+ if (arch_irn_consider_in_reg_alloc(cls, op) &&
+ !bitset_is_set(spilled_nodes, get_irn_idx(op))) {
+ ir_nodeset_insert(nodeset, op);
}
- }
+ }
}
static __attribute__((unused))
unsigned hash = hash_irn(arg);
perm_proj_t templ;
- if (arch_irn_is(chordal_env->birg->main_env->arch_env, arg, ignore))
+ if (arch_irn_is(arg, ignore))
continue;
templ.arg = arg;
arg_block = get_Block_cfgpred_block(phi_block, i);
arg_reg = get_reg(arg);
- if (arch_irn_is(chordal_env->birg->main_env->arch_env, arg, ignore))
+ if (arch_irn_is(arg, ignore))
continue;
assert(arg_reg && "Register must be set while placing perms");
}
static void ssa_destruction_check_walker(ir_node *bl, void *data) {
- be_chordal_env_t *chordal_env = data;
ir_node *phi;
int i, max;
+ (void)data;
for (phi = get_irn_link(bl); phi; phi = get_irn_link(phi)) {
const arch_register_t *phi_reg, *arg_reg;
for (i = 0, max = get_irn_arity(phi); i < max; ++i) {
ir_node *arg = get_irn_n(phi, i);
- if (arch_irn_is(chordal_env->birg->main_env->arch_env, arg, ignore))
+ if (arch_irn_is(arg, ignore))
continue;
arg_reg = get_reg(arg);
}
void be_ssa_destruction_check(be_chordal_env_t *chordal_env) {
- irg_block_walk_graph(chordal_env->irg, ssa_destruction_check_walker, NULL, chordal_env);
+ irg_block_walk_graph(chordal_env->irg, ssa_destruction_check_walker, NULL, NULL);
}
}
}
-static int should_be_scheduled(be_verify_schedule_env_t *env, ir_node *node) {
+static int should_be_scheduled(ir_node *node)
+{
if(is_Block(node))
return -1;
break;
}
- if(arch_irn_get_flags(env->arch_env, node) & arch_irn_flags_ignore)
+ if (arch_irn_get_flags(node) & arch_irn_flags_ignore)
return -1;
return 1;
int should_be;
int scheduled;
- should_be = should_be_scheduled(env, node);
+ should_be = should_be_scheduled(node);
if(should_be == -1)
return;
schedpoint = sched_prev(schedpoint);
if (schedpoint == left)
break;
- if (arch_irn_is(cg->arch_env, schedpoint, modify_flags))
+ if (arch_irn_is(schedpoint, modify_flags))
return;
if (schedpoint == block)
panic("couldn't find left");
struct _x87_simulator {
struct obstack obst; /**< An obstack for fast allocating. */
pmap *blk_states; /**< Map blocks to states. */
- const arch_env_t *arch_env; /**< The architecture environment. */
be_lv_t *lv; /**< intrablock liveness. */
vfp_liveness *live; /**< Liveness information. */
unsigned n_idx; /**< The cached get_irg_last_idx() result. */
* Updates a live set over a single step from a given node to its predecessor.
* Everything defined at the node is removed from the set, the uses of the node get inserted.
*
- * @param sim The simulator handle.
* @param irn The node at which liveness should be computed.
* @param live The bitset of registers live before @p irn. This set gets modified by updating it to
* the registers live after irn.
*
* @return The live bitset.
*/
-static vfp_liveness vfp_liveness_transfer(x87_simulator *sim, ir_node *irn, vfp_liveness live)
+static vfp_liveness vfp_liveness_transfer(ir_node *irn, vfp_liveness live)
{
int i, n;
const arch_register_class_t *cls = &ia32_reg_classes[CLASS_ia32_vfp];
- const arch_env_t *arch_env = sim->arch_env;
if (get_irn_mode(irn) == mode_T) {
const ir_edge_t *edge;
foreach_out_edge(irn, edge) {
ir_node *proj = get_edge_src_irn(edge);
- if (arch_irn_consider_in_reg_alloc(arch_env, cls, proj)) {
+ if (arch_irn_consider_in_reg_alloc(cls, proj)) {
const arch_register_t *reg = x87_get_irn_register(proj);
live &= ~(1 << arch_register_get_index(reg));
}
}
}
- if (arch_irn_consider_in_reg_alloc(arch_env, cls, irn)) {
+ if (arch_irn_consider_in_reg_alloc(cls, irn)) {
const arch_register_t *reg = x87_get_irn_register(irn);
live &= ~(1 << arch_register_get_index(reg));
}
for (i = 0, n = get_irn_arity(irn); i < n; ++i) {
ir_node *op = get_irn_n(irn, i);
- if (mode_is_float(get_irn_mode(op)) && arch_irn_consider_in_reg_alloc(arch_env, cls, op)) {
+ if (mode_is_float(get_irn_mode(op)) &&
+ arch_irn_consider_in_reg_alloc(cls, op)) {
const arch_register_t *reg = x87_get_irn_register(op);
live |= 1 << arch_register_get_index(reg);
}
int i;
vfp_liveness live = 0;
const arch_register_class_t *cls = &ia32_reg_classes[CLASS_ia32_vfp];
- const arch_env_t *arch_env = sim->arch_env;
const be_lv_t *lv = sim->lv;
be_lv_foreach(lv, block, be_lv_state_end, i) {
const arch_register_t *reg;
const ir_node *node = be_lv_get_irn(lv, block, i);
- if (!arch_irn_consider_in_reg_alloc(arch_env, cls, node))
+ if (!arch_irn_consider_in_reg_alloc(cls, node))
continue;
reg = x87_get_irn_register(node);
idx = get_irn_idx(irn);
sim->live[idx] = live;
- live = vfp_liveness_transfer(sim, irn, live);
+ live = vfp_liveness_transfer(irn, live);
}
idx = get_irn_idx(block);
sim->live[idx] = live;
static int sim_Barrier(x87_state *state, ir_node *node)
{
- //const arch_env_t *arch_env = state->sim->arch_env;
int i, arity;
/* materialize unknown if needed */
*
* @param sim a simulator handle, will be initialized
* @param irg the current graph
- * @param arch_env the architecture environment
*/
-static void x87_init_simulator(x87_simulator *sim, ir_graph *irg,
- const arch_env_t *arch_env)
+static void x87_init_simulator(x87_simulator *sim, ir_graph *irg)
{
obstack_init(&sim->obst);
sim->blk_states = pmap_create();
- sim->arch_env = arch_env;
sim->n_idx = get_irg_last_idx(irg);
sim->live = obstack_alloc(&sim->obst, sizeof(*sim->live) * sim->n_idx);
blk_state *bl_state;
x87_simulator sim;
ir_graph *irg = be_get_birg_irg(birg);
+ (void)arch_env;
/* create the simulator */
- x87_init_simulator(&sim, irg, arch_env);
+ x87_init_simulator(&sim, irg);
start_block = get_irg_start_block(irg);
bl_state = x87_get_bl_state(&sim, start_block);