ops->set_irn_reg(irn, reg);
}
-extern arch_irn_class_t arch_irn_classify(const arch_env_t *env, const ir_node *irn)
+arch_irn_class_t arch_irn_classify(const ir_node *irn)
{
const arch_irn_ops_t *ops = get_irn_ops(irn);
- (void)env; // TODO remove parameter
return ops->classify(irn);
}
/**
* Classify a node.
- * @param env The architecture environment.
* @param irn The node.
* @return A classification of the node.
*/
-extern arch_irn_class_t arch_irn_classify(const arch_env_t *env, const ir_node *irn);
+arch_irn_class_t arch_irn_classify(const ir_node *irn);
-#define arch_irn_class_is(env, irn, irn_class) ((arch_irn_classify(env, irn) & arch_irn_class_ ## irn_class) != 0)
+#define arch_irn_class_is(irn, irn_class) ((arch_irn_classify(irn) & arch_irn_class_ ## irn_class) != 0)
/**
* Get the flags of a node.
return 0;
req = arch_get_register_req(irn, -1);
- if (is_Reg_Phi(irn) || is_Perm_Proj(co->aenv, irn) || is_2addr_code(req))
+ if (is_Reg_Phi(irn) || is_Perm_Proj(irn) || is_2addr_code(req))
return 1;
return 0;
}
unit->nodes = XREALLOC(unit->nodes, ir_node*, unit->node_count);
unit->costs = XREALLOC(unit->costs, int, unit->node_count);
- } else if (is_Perm_Proj(co->aenv, irn)) {
+ } else if (is_Perm_Proj(irn)) {
/* Proj of a perm with corresponding arg */
assert(!nodes_interfere(co->cenv, irn, get_Perm_src(irn)));
unit->nodes = XMALLOCN(ir_node*, 2);
ir_node *arg = get_irn_n(irn, pos);
add_edges(co, irn, arg, co->get_costs(co, irn, arg, pos));
}
- }
- else if (is_Perm_Proj(co->aenv, irn)) { /* Perms */
+ } else if (is_Perm_Proj(irn)) { /* Perms */
ir_node *arg = get_Perm_src(irn);
add_edges(co, irn, arg, co->get_costs(co, irn, arg, 0));
}
#define is_Reg_Phi(irn) (is_Phi(irn) && mode_is_data(get_irn_mode(irn)))
-#define get_Perm_src(irn) (get_irn_n(get_Proj_pred(irn), get_Proj_proj(irn)))
-#define is_Perm(arch_env, irn) (arch_irn_classify(arch_env, irn) == arch_irn_class_perm)
-#define is_Perm_Proj(arch_env, irn) (is_Proj(irn) && is_Perm(arch_env, get_Proj_pred(irn)))
+#define get_Perm_src(irn) (get_irn_n(get_Proj_pred(irn), get_Proj_proj(irn)))
+#define is_Perm(irn) (arch_irn_classify(irn) == arch_irn_class_perm)
+#define is_Perm_Proj(irn) (is_Proj(irn) && is_Perm(get_Proj_pred(irn)))
static INLINE int is_2addr_code(const arch_register_req_t *req)
{
/**
* Collect general data
*/
-static void irg_stat_walker(ir_node *node, void *env) {
- arch_env_t *arch_env = env;
+static void irg_stat_walker(ir_node *node, void *env)
+{
+ (void)env;
+
curr_vals[I_ALL_NODES]++; /* count all nodes */
if (is_Block(node)) /* count all blocks */
if (is_Reg_Phi(node)) /* collect phis */
ir_nodeset_insert(all_phi_nodes, node);
- if (is_Perm_Proj(arch_env, node))
+ if (is_Perm_Proj(node))
ir_nodeset_insert(all_copy_nodes, node);
/* TODO: Add 2-Addr-Code nodes */
}
-static void copystat_collect_irg(ir_graph *irg, arch_env_t *arch_env) {
- irg_walk_graph(irg, irg_stat_walker, NULL, arch_env);
+static void copystat_collect_irg(ir_graph *irg)
+{
+ irg_walk_graph(irg, irg_stat_walker, NULL, NULL);
last_irg = irg;
}
static void copystat_collect_cls(be_chordal_env_t *cenv) {
ir_graph *irg = cenv->irg;
- arch_env_t *aenv = cenv->birg->main_env->arch_env;
ir_node *n, **pc;
phi_classes_t *pc_obj;
pset *all_phi_classes;
ir_nodeset_iterator_t iter;
copystat_reset();
- copystat_collect_irg(irg, aenv);
+ copystat_collect_irg(irg);
/* compute the Phi classes of the collected Phis */
pc_obj = phi_class_new_from_set(cenv->irg, all_phi_nodes, 0);
}
}
-int sched_skip_cf_predicator(const ir_node *irn, void *data) {
- arch_env_t *ae = data;
- return arch_irn_class_is(ae, irn, branch);
+int sched_skip_cf_predicator(const ir_node *irn, void *data)
+{
+ (void)data;
+ return arch_irn_class_is(irn, branch);
}
int sched_skip_phi_predicator(const ir_node *irn, void *data) {
for (; sched_count-- != 0; ++sched) {
ir_node* irn = *sched;
if (ir_nodeset_contains(ready_set, irn) &&
- !arch_irn_class_is(cur_arch_env, irn, branch)) {
+ !arch_irn_class_is(irn, branch)) {
#if defined NORMAL_DBG
ir_fprintf(stderr, "scheduling %+F\n", irn);
#endif
ir_nodeset_t *live_set)
{
ir_nodeset_iterator_t iter;
- const arch_env_t *arch_env = block_env;
ir_node *irn = NULL;
int only_branches_left = 1;
- (void) live_set;
+ (void)block_env;
+ (void)live_set;
/* assure that branches and constants are executed last */
ir_nodeset_iterator_init(&iter, ready_set);
while( (irn = ir_nodeset_iterator_next(&iter)) != NULL) {
- if (! arch_irn_class_is(arch_env, irn, branch)) {
+ if (!arch_irn_class_is(irn, branch)) {
only_branches_left = 0;
break;
}
}
++i;
}
- } while(arch_irn_class_is(arch_env, irn, branch));
+ } while (arch_irn_class_is(irn, branch));
}
return irn;
static void *random_init_graph(const list_sched_selector_t *vtab, const be_irg_t *birg)
{
- (void) vtab;
+ (void)vtab;
+ (void)birg;
/* Using time(NULL) as a seed here gives really random results,
but is NOT deterministic which makes debugging impossible.
Moreover no-one want non-deterministic compilers ... */
srand(0x4711);
- return (void *) be_get_birg_arch_env(birg);
+ return NULL;
}
static void *random_init_block(void *graph_env, ir_node *block)
{
- (void) block;
- return graph_env;
+ (void)graph_env;
+ (void)block;
+ return NULL;
}
const list_sched_selector_t random_selector = {
Ignore branch instructions for the time being.
They should only be scheduled if there is nothing else.
*/
- if (! arch_irn_class_is(env->main_env->arch_env, irn, branch)) {
+ if (!arch_irn_class_is(irn, branch)) {
int costs = reg_pr_costs(env, irn);
if (costs <= curr_cost) {
res = irn;
for (cur_pos = 0, curr = root; curr; curr = get_irn_link(curr), cur_pos++) {
sched_timestep_t d;
- if (arch_irn_class_is(env->arch_env, curr, branch)) {
+ if (arch_irn_class_is(curr, branch)) {
/* assure, that branches can be executed last */
d = 0;
}
/**
* Simple selector. Just assure that jumps are scheduled last.
*/
-static ir_node *basic_selection(const arch_env_t *arch_env, ir_nodeset_t *ready_set) {
+static ir_node *basic_selection(ir_nodeset_t *ready_set)
+{
ir_node *irn = NULL;
ir_nodeset_iterator_t iter;
/* assure that branches and constants are executed last */
foreach_ir_nodeset(ready_set, irn, iter) {
- if (! arch_irn_class_is(arch_env, irn, branch)) {
+ if (!arch_irn_class_is(irn, branch)) {
return irn;
}
}
if (cnt == 1) {
irn = get_nodeset_node(&ecands);
- if (arch_irn_class_is(env->arch_env, irn, branch)) {
+ if (arch_irn_class_is(irn, branch)) {
/* BEWARE: don't select a JUMP if others are still possible */
goto force_mcands;
}
}
else if (cnt > 1) {
DB((env->dbg, LEVEL_3, "\tecand = %d, max_delay = %u\n", cnt, max_delay));
- irn = basic_selection(env->arch_env, &ecands);
+ irn = basic_selection(&ecands);
}
else {
force_mcands:
DB((env->dbg, LEVEL_3, "\tmcand = %d\n", ir_nodeset_size(&mcands)));
- irn = basic_selection(env->arch_env, &mcands);
+ irn = basic_selection(&mcands);
}
}
/* priority based selection, heuristic inspired by mueller diss */
foreach_ir_nodeset(ns, irn, iter) {
/* make sure that branches are scheduled last */
- if (! arch_irn_class_is(trace_env->arch_env, irn, branch)) {
+ if (!arch_irn_class_is(irn, branch)) {
int rdiff = get_irn_reg_diff(trace_env, irn);
int sign = rdiff < 0;
int chg = (rdiff < 0 ? -rdiff : rdiff) << PRIO_CHG_PRESS;
DBG((trace_env->dbg, LEVEL_4, "heuristic selected %+F:\n", cand));
}
else {
- cand = basic_selection(trace_env->arch_env, ns);
+ cand = basic_selection(ns);
}
return cand;
static ir_node *trivial_select(void *block_env, ir_nodeset_t *ready_set,
ir_nodeset_t *live_set)
{
- const arch_env_t *arch_env = block_env;
- ir_node *irn = NULL;
- ir_nodeset_iterator_t iter;
- (void) live_set;
+ ir_node *irn;
+ ir_nodeset_iterator_t iter;
+ (void)block_env;
+ (void)live_set;
/* assure that branches and constants are executed last */
foreach_ir_nodeset(ready_set, irn, iter) {
- if (! arch_irn_class_is(arch_env, irn, branch)) {
+ if (!arch_irn_class_is(irn, branch)) {
return irn;
}
}
static void *trivial_init_graph(const list_sched_selector_t *vtab, const be_irg_t *birg)
{
- (void) vtab;
- return (void *) be_get_birg_arch_env(birg);
+ (void)vtab;
+ (void)birg;
+ return NULL;
}
static void *trivial_init_block(void *graph_env, ir_node *block)
{
- (void) block;
- return graph_env;
+ (void)graph_env;
+ (void)block;
+ return NULL;
}
const list_sched_selector_t trivial_selector = {
static void collect_spills_walker(ir_node *node, void *data)
{
be_fec_env_t *env = data;
- const arch_env_t *arch_env = env->arch_env;
const ir_mode *mode;
const arch_register_class_t *cls;
int align;
if (is_Proj(node))
return;
- if (!arch_irn_class_is(arch_env, node, reload))
+ if (!arch_irn_class_is(node, reload))
return;
mode = get_irn_mode(node);
cls = arch_get_irn_reg_class(node, -1);
- align = arch_env_get_reg_class_alignment(arch_env, cls);
+ align = arch_env_get_reg_class_alignment(env->arch_env, cls);
be_node_needs_frame_entity(env, node, mode, align);
}
-static const arch_env_t *arch_env;
-static be_node_stats_t *stats;
+static be_node_stats_t *stats;
static void node_stat_walker(ir_node *irn, void *data)
{
(*stats)[BE_STAT_PHIS]++;
}
} else {
- arch_irn_class_t classify = arch_irn_classify(arch_env, irn);
+ arch_irn_class_t classify = arch_irn_classify(irn);
if(classify & arch_irn_class_spill)
(*stats)[BE_STAT_SPILLS]++;
void be_collect_node_stats(be_node_stats_t *new_stats, be_irg_t *birg)
{
- arch_env = birg->main_env->arch_env;
- stats = new_stats;
+ stats = new_stats;
memset(stats, 0, sizeof(*stats));
irg_walk_graph(birg->irg, NULL, node_stat_walker, NULL);
*/
static void collect_spills_walker(ir_node *node, void *data) {
be_verify_spillslots_env_t *env = data;
- const arch_env_t *arch_env = env->arch_env;
/* @@@ ia32_classify returns classification of Proj_pred :-/ */
if(is_Proj(node))
return;
- if(arch_irn_class_is(arch_env, node, reload)) {
+ if (arch_irn_class_is(node, reload)) {
ir_node *spill = get_memory_edge(node);
ir_entity *ent;