#include "phiclass.h"
#include "beraextern.h"
+#include "beabi.h"
#include "bearch.h"
#include "benode_t.h"
#include "beirgmod.h"
-#include "besched.h"
+#include "besched_t.h"
#include "beutil.h"
#include "belive_t.h"
arch_get_register_req(raenv->aenv, &req, irn, -1);
if (arch_register_req_is(&req, limited)) {
ir_node *cpy = be_new_Copy(req.cls, raenv->irg, get_nodes_block(irn), irn);
- const ir_edge_t *edge;
/* all users of the irn use the copy instead */
sched_add_after(irn, cpy);
- foreach_out_edge(irn, edge)
- set_irn_n(edge->src, edge->pos, cpy);
+ edges_reroute(irn, cpy, raenv->irg);
}
}
static void handle_constraints(be_raext_env_t *raenv) {
- irg_block_walk_graph(raenv->irg, NULL, handle_constraints_walker, raenv);
+ irg_walk_graph(raenv->irg, NULL, handle_constraints_walker, raenv);
}
if (has_been_done(start_phi, pos))
return NULL;
- /* In case this is a 'normal' phi we insert into
- * the schedule before the pred_blk irn */
- last_cpy = pred_blk;
+ /* In case this is a 'normal' phi we insert at the
+ * end of the pred block before cf nodes */
+ last_cpy = sched_skip(pred_blk, 0, sched_skip_cf_predicator, raenv->aenv);
+ last_cpy = sched_next(last_cpy);
/* If we detect a loop stop recursion. */
if (arg == start_phi) {
/* At least 2 phis are involved */
/* Insert a loop breaking copy (an additional variable T) */
loop_breaker = be_new_Copy(raenv->cls, raenv->irg, pred_blk, start_phi);
- sched_add_before(pred_blk, loop_breaker);
+ sched_add_before(last_cpy, loop_breaker);
arg = loop_breaker;
}
if (!is_Phi(phi))
break;
+ if (arch_irn_is(raenv->aenv, phi, ignore))
+ continue;
+
raenv->cls = arch_get_irn_reg_class(raenv->aenv, phi, -1);
insert_copies(raenv, phi, pos, phi);
}
}
/* values <--> var mapping */
- pset_foreach(vals, irn)
+ pset_foreach(vals, irn) {
+ DBG((raenv->dbg, 0, "Var %d contains %+F\n", nr, irn));
var_add_value(raenv, nr, irn);
+ }
}
}
}
-static INLINE unsigned int get_spill_costs(be_raext_env_t *raenv, var_info_t *vi) {
+#define UNSPILLABLE -1
+
+static INLINE int get_spill_costs(be_raext_env_t *raenv, var_info_t *vi) {
ir_node *irn;
int c_spills=0, c_reloads=0;
pset_foreach(vi->values, irn) {
- if (arch_irn_is_ignore(raenv->aenv, irn)) {
+ if (arch_irn_is(raenv->aenv, irn, ignore) || be_is_Reload(irn)) {
pset_break(vi->values);
- return -1;
+ return UNSPILLABLE;
}
if (is_Phi(irn)) {
if (vi->var_nr == SET_REMOVED)
continue;
- fprintf(f, "%d %u", vi->var_nr, get_spill_costs(raenv, vi));
+ fprintf(f, "%d %d", vi->var_nr, get_spill_costs(raenv, vi));
dump_constraint(raenv, get_first_non_phi(vi->values), -1);
fprintf(f, "\n");
}
fprintf(f, "}\n");
+ fflush(f);
}
pset_break(vi1->values);
pset_break(vi2->values);
fprintf(f, "(%d, %d)\n", vi1->var_nr, vi2->var_nr);
+ goto NextVar;
}
+
+NextVar: ;
}
}
fprintf(f, "}\n");
int pos, max;
var_info_t *vi1, *vi2;
- if (arch_get_irn_reg_class(raenv->aenv, irn, -1) == NULL || arch_irn_is_ignore(raenv->aenv, irn))
+ if (arch_get_irn_reg_class(raenv->aenv, irn, -1) != raenv->cls || arch_irn_is(raenv->aenv, irn, ignore))
return;
vi1 = get_var_info(irn);
/* copies have affinities */
if (arch_irn_classify(raenv->aenv, irn) == arch_irn_class_copy) {
- ir_node *other = get_irn_n(irn, 0);
+ ir_node *other = get_irn_n(irn, be_pos_Copy_orig);
- if (! arch_irn_is_ignore(raenv->aenv, other)) {
+ if (! arch_irn_is(raenv->aenv, other, ignore)) {
vi2 = get_var_info(other);
fprintf(raenv->f, "(%d, %d, %d)\n", vi1->var_nr, vi2->var_nr, get_affinity_weight(irn));
for (pos = 0, max = get_irn_arity(irn); pos<max; ++pos) {
arch_get_register_req(raenv->aenv, &req, irn, pos);
- if (arch_register_req_is(&req, should_be_same) && arch_irn_is_ignore(raenv->aenv, req.other_same)) {
+ if (arch_register_req_is(&req, should_be_same) && arch_irn_is(raenv->aenv, req.other_same, ignore)) {
vi2 = get_var_info(req.other_same);
fprintf(raenv->f, "(%d, %d, %d)\n", vi1->var_nr, vi2->var_nr, get_affinity_weight(irn));
ret_status = system(cmd_line);
assert(ret_status != -1 && "Invokation of external register allocator failed");
+ assert(ret_status == 0 && "External register allocator is unhappy with sth.");
}
/******************************************************************************
static INLINE void var_add_spills_and_reloads(be_raext_env_t *raenv, int var_nr) {
var_info_t *vi = var_find(raenv->vars, var_nr);
ir_node *spill=NULL, *ctx, *irn;
+ ir_mode *mode;
const ir_edge_t *edge, *ne;
pset *spills = pset_new_ptr(4); /* the spills of this variable */
pset *reloads = pset_new_ptr(4); /* the reloads of this variable */
assert(spill && "There must be at least one non-phi-node");
+ mode = get_irn_mode(get_irn_n(spill, be_pos_Spill_val));
+
/* insert reloads and wire them arbitrary*/
pset_foreach(vi->values, irn)
foreach_out_edge_safe(irn, edge, ne) {
/* all real uses must be reloaded */
DBG((raenv->dbg, LEVEL_2, " reloading before %+F\n", src));
- reload = be_reload(raenv->aenv, raenv->cls, edge->src, edge->pos, get_irn_mode(get_irn_n(spill, 0)), spill);
+ reload = be_reload(raenv->aenv, raenv->cls, edge->src, mode, spill);
+ set_irn_n(edge->src, edge->pos, reload);
/* remember the reload */
pset_insert_ptr(reloads, reload);
/* ...add new vars for each non-phi-member */
pset_foreach(spills, irn) {
- ir_node *spilled = get_irn_n(irn, 0);
+ ir_node *spilled = get_irn_n(irn, be_pos_Spill_val);
raenv->cls_vars[raenv->n_cls_vars++] = var_add_value(raenv, get_irn_node_nr(spilled), spilled);
}
}
/* add new variables for all reloads */
- pset_foreach(reloads, irn)
+ pset_foreach(reloads, irn) {
+ assert(get_irn_node_nr(irn) != 1089);
raenv->cls_vars[raenv->n_cls_vars++] = var_add_value(raenv, get_irn_node_nr(irn), irn);
+ }
del_pset(spills);
del_pset(reloads);
return is_allocation;
}
+static void check_allocation(be_raext_env_t *raenv) {
+ int i, o;
+
+ for (i=0; i<raenv->n_cls_vars; ++i) {
+ var_info_t *vi1 = raenv->cls_vars[i];
+
+ if (vi1->var_nr == SET_REMOVED)
+ continue;
+
+ for (o=0; o<i; ++o) {
+ var_info_t *vi2 = raenv->cls_vars[o];
+ ir_node *irn1, *irn2;
+
+ if (vi2->var_nr == SET_REMOVED)
+ continue;
+
+ pset_foreach(vi1->values, irn1)
+ pset_foreach(vi2->values, irn2)
+ if (values_interfere(irn1, irn2) && arch_get_irn_register(raenv->aenv, irn1) == arch_get_irn_register(raenv->aenv, irn2)) {
+ dump_ir_block_graph_sched(raenv->irg, "ERROR");
+ ir_fprintf(stdout, "SSA values %+F and %+F interfere. They belong to varible %d and %d respectively.\n", irn1, irn2, vi1->var_nr, vi2->var_nr);
+ assert(0 && "ERROR graph dumped");
+ }
+ }
+ }
+}
+
/******************************************************************************
__ __ _
| \/ | (_)
* Default values for options
*/
static void (*ssa_destr)(be_raext_env_t*) = ssa_destr_simple;
-static char callee[128] = "\"E:/user/kimohoff/ipd-registerallocator/win32/register allocator\"";
+static char callee[128] = "\"E:/user/kimohoff/public/register allocator\"";
//static char callee[128] = "/ben/kimohoff/ipd-registerallocator/register_allocator";
be_main_env_t *env = bi->main_env;
ir_graph *irg = bi->irg;
- be_raext_env_t raenv;
+ be_raext_env_t raenv;
int clsnr, clss;
var_info_t *vi;
compute_doms(irg);
+ edges_assure(irg);
raenv.irg = irg;
raenv.aenv = env->arch_env;
/* Insert copies for constraints */
handle_constraints(&raenv);
- dump_ir_block_graph_sched(irg, "-extern-constr");
+ be_dump(irg, "-extern-constr", dump_ir_block_graph_sched);
/* SSA destruction respectively transformation into "Conventional SSA" */
ssa_destr(&raenv);
- dump_ir_block_graph_sched(irg, "-extern-ssadestr");
+ be_dump(irg, "-extern-ssadestr", dump_ir_block_graph_sched);
/* Mapping of SSA-Values <--> Variables */
phi_class_compute(irg);
dump_to_file(&raenv, out);
execute(callee, out, in);
done = read_and_apply_results(&raenv, in);
+ be_abi_fix_stack_nodes(bi->abi);
ir_snprintf(in, sizeof(in), "-extern-%s-round-%d", raenv.cls->name, round);
- dump_ir_block_graph_sched(irg, in);
+ be_dump(irg, in, dump_ir_block_graph_sched);
round++;
} while (!done);
+ check_allocation(&raenv);
+
free(raenv.cls_vars);
}
- dump_ir_block_graph_sched(irg, "-extern-alloc");
+ be_dump(irg, "-extern-alloc", dump_ir_block_graph_sched);
/* Clean up */
set_foreach(raenv.vars, vi)