#include "iredges_t.h"
#include "irgwalk.h"
-#ifdef _WIN32
-#include <malloc.h>
-#else
-#include <alloca.h>
+#ifdef HAVE_MALLOC_H
+ #include <malloc.h>
+#endif
+#ifdef HAVE_ALLOCA_H
+ #include <alloca.h>
#endif
#undef is_Perm
#define is_Perm(arch_env, irn) (arch_irn_classify(arch_env, irn) == arch_irn_class_perm)
-#undef is_Call
-#define is_Call(arch_env, irn) (arch_irn_classify(arch_env, irn) == arch_irn_class_call)
+/* collect static data about perms */
+typedef struct _perm_stat_t {
+ const arch_register_class_t *cls; /**< the current register class */
+ int *perm_size_ar; /**< the sizes of all perms in an irg */
+ int *real_perm_size_ar; /**< the sizes of all perms in an irg */
+ int *chain_len_ar; /**< the sizes of all chains for all perms */
+ int *cycle_len_ar; /**< the siyes of all cycles for all perms */
+ int num_perms; /**< number of all perms */
+ int num_real_perms; /**< number of all perms */
+ int num_chains; /**< the number of all chains */
+ int num_cycles; /**< the number of all cycles */
+} perm_stat_t;
/* lowering walker environment */
typedef struct _lower_env_t {
be_chordal_env_t *chord_env;
- int do_copy;
- firm_dbg_module_t *dbg_module;
+ unsigned do_copy:1;
+ unsigned do_stat:1;
+ unsigned pstat_n:30;
+ perm_stat_t **pstat;
+ DEBUG_ONLY(firm_dbg_module_t *dbg_module;)
} lower_env_t;
/* holds a perm register pair */
}
/* assume worst case: all remaining pairs build a cycle or chain */
- cycle->elems = calloc((n - n_pairs_done) * 2, sizeof(cycle->elems[0]));
+ cycle->elems = xcalloc((n - n_pairs_done) * 2, sizeof(cycle->elems[0]));
cycle->n_elems = 2; /* initial number of elements is 2 */
cycle->elems[0] = pairs[start].in_reg;
cycle->elems[1] = pairs[start].out_reg;
static void lower_perm_node(ir_node *irn, void *walk_env) {
const arch_register_class_t *reg_class;
const arch_env_t *arch_env;
- firm_dbg_module_t *mod;
- lower_env_t *env = walk_env;
+ lower_env_t *env = walk_env;
+ perm_stat_t **pstat = env->pstat;
reg_pair_t *pairs;
const ir_edge_t *edge;
perm_cycle_t *cycle;
- int n, i, pn, do_copy, j;
+ int n, i, pn, do_copy, j, pstat_idx = -1;
ir_node *sched_point, *block, *in[2];
ir_node *arg1, *arg2, *res1, *res2;
ir_node *cpyxchg = NULL;
+ DEBUG_ONLY(firm_dbg_module_t *mod;)
- arch_env = env->chord_env->main_env->arch_env;
+ arch_env = env->chord_env->birg->main_env->arch_env;
do_copy = env->do_copy;
- mod = env->dbg_module;
+ DEBUG_ONLY(mod = env->dbg_module;)
block = get_nodes_block(irn);
/*
reg_class = arch_get_irn_register(arch_env, get_irn_n(irn, 0))->reg_class;
pairs = alloca(n * sizeof(pairs[0]));
+ if (env->do_stat) {
+ /* determine index in statistics */
+ for (i = 0; i < env->pstat_n; i++) {
+ if (strcmp(pstat[i]->cls->name, reg_class->name) == 0) {
+ pstat_idx = i;
+ break;
+ }
+ }
+ assert(pstat_idx >= 0 && "could not determine class index for statistics");
+
+ pstat[pstat_idx]->num_perms++;
+ pstat[pstat_idx]->perm_size_ar[n - 1]++;
+ }
+
/* build the list of register pairs (in, out) */
i = 0;
foreach_out_edge(irn, edge) {
do_copy = 0;
}
+ if (env->do_stat && get_n_checked_pairs(pairs, n) < n) {
+ pstat[pstat_idx]->num_real_perms++;
+ pstat[pstat_idx]->real_perm_size_ar[n - 1]++;
+ }
+
/* check for cycles and chains */
while (get_n_checked_pairs(pairs, n) < n) {
i = 0;
/* go to the first not-checked pair */
while (pairs[i].checked) i++;
- cycle = calloc(1, sizeof(*cycle));
+ cycle = xcalloc(1, sizeof(*cycle));
cycle = get_perm_cycle(cycle, pairs, n, i);
DB((mod, LEVEL_1, "%+F: following %s created:\n ", irn, cycle->type == PERM_CHAIN ? "chain" : "cycle"));
}
DB((mod, LEVEL_1, "\n"));
+ /* statistics */
+ if (env->do_stat) {
+ int n_idx = cycle->n_elems - 1;
+ if (cycle->type == PERM_CHAIN) {
+ pstat[pstat_idx]->num_chains++;
+ pstat[pstat_idx]->chain_len_ar[n_idx]++;
+ }
+ else {
+ pstat[pstat_idx]->num_cycles++;
+ pstat[pstat_idx]->cycle_len_ar[n_idx]++;
+ }
+ }
+
/* We don't need to do anything if we have a Perm with two
elements which represents a cycle, because those nodes
already represent exchange nodes */
//TODO: - iff PERM_CYCLE && do_copy -> determine free temp reg and insert copy to/from it before/after
// the copy cascade (this reduces the cycle into a chain)
- /* build copy/swap nodes */
- for (i = 0; i < cycle->n_elems - 1; i++) {
+ /* build copy/swap nodes from back to front */
+ for (i = cycle->n_elems - 2; i >= 0; i--) {
arg1 = get_node_for_register(pairs, n, cycle->elems[i], 0);
arg2 = get_node_for_register(pairs, n, cycle->elems[i + 1], 0);
in[0] = arg1;
in[1] = arg2;
+ /* At this point we have to handle the following problem: */
+ /* */
+ /* If we have a cycle with more than two elements, then */
+ /* this could correspond to the following Perm node: */
+ /* */
+ /* +----+ +----+ +----+ */
+ /* | r1 | | r2 | | r3 | */
+ /* +-+--+ +-+--+ +--+-+ */
+ /* | | | */
+ /* | | | */
+ /* +-+--------+---------+-+ */
+ /* | Perm | */
+ /* +-+--------+---------+-+ */
+ /* | | | */
+ /* | | | */
+ /* +-+--+ +-+--+ +--+-+ */
+ /* |Proj| |Proj| |Proj| */
+ /* | r2 | | r3 | | r1 | */
+ /* +----+ +----+ +----+ */
+ /* */
+ /* This node is about to be split up into two 2x Perm's */
+ /* for which we need 4 Proj's and the one additional Proj */
+ /* of the first Perm has to be one IN of the second. So in */
+ /* general we need to create one additional Proj for each */
+ /* "middle" Perm and set this to one in node of the successor */
+ /* Perm. */
+
DBG((mod, LEVEL_1, "%+F creating exchange node (%+F, %s) and (%+F, %s) with\n",
irn, arg1, cycle->elems[i]->name, arg2, cycle->elems[i + 1]->name));
DBG((mod, LEVEL_1, "%+F (%+F, %s) and (%+F, %s)\n",
cpyxchg = be_new_Perm(reg_class, env->chord_env->irg, block, 2, in);
- sched_remove(res1);
+ if (i > 0) {
+ /* cycle is not done yet */
+ int pidx = get_pairidx_for_regidx(pairs, n, cycle->elems[i]->index, 0);
+
+ /* create intermediate proj */
+ res1 = new_r_Proj(get_irn_irg(irn), block, cpyxchg, get_irn_mode(res1), 0);
+
+ /* set as in for next Perm */
+ pairs[pidx].in_node = res1;
+ }
+ else {
+ sched_remove(res1);
+ }
+
sched_remove(res2);
set_Proj_pred(res2, cpyxchg);
arch_set_irn_register(arch_env, res2, cycle->elems[i + 1]);
arch_set_irn_register(arch_env, res1, cycle->elems[i]);
+
+ /* insert the copy/exchange node in schedule after the magic schedule node (see above) */
+ sched_add_after(sched_point, cpyxchg);
+
+ DBG((mod, LEVEL_1, "replacing %+F with %+F, placed new node after %+F\n", irn, cpyxchg, sched_point));
+
+ /* set the new scheduling point */
+ sched_point = res1;
}
else {
DBG((mod, LEVEL_1, "%+F creating copy node (%+F, %s) -> (%+F, %s)\n",
/* exchange copy node and proj */
exchange(res2, cpyxchg);
- }
- /* insert the copy/exchange node in schedule after the magic schedule node (see above) */
- sched_add_after(sched_point, cpyxchg);
+ /* insert the copy/exchange node in schedule after the magic schedule node (see above) */
+ sched_add_after(sched_point, cpyxchg);
- DBG((mod, LEVEL_1, "replacing %+F with %+F, placed new node after %+F\n", irn, cpyxchg, sched_point));
+ /* set the new scheduling point */
+ sched_point = cpyxchg;
+ }
}
- free(cycle->elems);
+ free((void *) cycle->elems);
free(cycle);
}
-/**
- * Adds Projs to keep nodes for each register class, which eats the
- * caller saved registers.
- * Note: The caller has to make sure, that call is a Call
- *
- * @param call The Call node
- * @param walk_env The walker environment
- */
-static void lower_call_node(ir_node *call, const void *walk_env) {
- const arch_env_t *arch_env = walk_env;
- int bitset_idx = 0;
- int set_size = 0;
- arch_isa_t *isa = arch_env_get_isa(arch_env);
- const ir_node *proj_T = NULL;
- ir_node *block = get_nodes_block(call);
- const arch_register_class_t *reg_class;
- int i, j, pn, keep_arity;
- ir_node **in_keep;
- bitset_t *proj_set;
- const ir_edge_t *edge;
- const arch_register_t *reg;
-
- /* Prepare the bitset where we store the projnums which are already in use*/
- for (i = 0; i < arch_isa_get_n_reg_class(isa); i++) {
- reg_class = arch_isa_get_reg_class(isa, i);
- set_size += arch_register_class_n_regs(reg_class);
+static int get_n_out_edges(const ir_node *irn) {
+ const ir_edge_t *edge;
+ int cnt = 0;
+
+ foreach_out_edge(irn, edge) {
+ cnt++;
}
- in_keep = malloc(set_size * sizeof(ir_node *));
+ return cnt;
+}
- proj_set = bitset_malloc(set_size);
- bitset_clear_all(proj_set);
+static ir_node *belower_skip_proj(ir_node *irn) {
+ while(is_Proj(irn))
+ irn = get_Proj_pred(irn);
+ return irn;
+}
- /* check if there is a ProjT node and which arguments are used */
- foreach_out_edge(call, edge) {
- if (get_irn_mode(get_edge_src_irn(edge)) == mode_T)
- proj_T = get_edge_src_irn(edge);
- }
+static void fix_in(ir_node *irn, ir_node *old, ir_node *nw) {
+ int i, n;
- /* set all used arguments */
- if (proj_T) {
- foreach_out_edge(proj_T, edge) {
- ir_node *proj = get_edge_src_irn(edge);
+ irn = belower_skip_proj(irn);
+ n = get_irn_arity(irn);
- assert(is_Proj(proj));
- pn = isa->impl->handle_call_proj(isa, proj, 0);
- bitset_set(proj_set, pn);
+ for (i = 0; i < n; i++) {
+ if (get_irn_n(irn, i) == old) {
+ set_irn_n(irn, i, nw);
+ break;
}
}
- else {
- proj_T = new_r_Proj(current_ir_graph, block, call, mode_T, pn_Call_T_result);
+}
+
+static void gen_assure_different_pattern(ir_node *irn, be_irg_t *birg, ir_node *other_different) {
+ const arch_env_t *arch_env = birg->main_env->arch_env;
+ ir_node *in[2], *keep, *cpy, *temp;
+ ir_node *block = get_nodes_block(irn);
+ const arch_register_class_t *cls = arch_get_irn_reg_class(arch_env, other_different, -1);
+ FIRM_DBG_REGISTER(firm_dbg_module_t *mod, "firm.be.lower");
+
+ if (arch_irn_is(arch_env, other_different, ignore) || ! mode_is_datab(get_irn_mode(other_different))) {
+ DBG((mod, LEVEL_1, "ignore constraint for %+F because other_irn is ignore or not a datab node\n", irn));
+ return;
}
- /* Create for each caller save register a proj (keep node argument) */
- /* if this proj is not already present */
- for (i = 0; i < arch_isa_get_n_reg_class(isa); i++) {
+ /* Make a not spillable copy of the different node */
+ /* this is needed because the different irn could be */
+ /* in block far far away */
+ /* The copy is optimized later if not needed */
- /* reset the keep input, as we need one keep for each register class */
- memset(in_keep, 0, set_size * sizeof(ir_node *));
- keep_arity = 0;
- reg_class = arch_isa_get_reg_class(isa, i);
+ temp = new_rd_Unknown(birg->irg, get_irn_mode(other_different));
+ cpy = be_new_Copy(cls, birg->irg, block, temp);
+ be_node_set_flags(cpy, BE_OUT_POS(0), arch_irn_flags_dont_spill);
- for (j = 0; j < arch_register_class_n_regs(reg_class); j++) {
- reg = arch_register_for_index(reg_class, j);
+ in[0] = irn;
+ in[1] = cpy;
- /* only check caller save registers */
- if (arch_register_type_is(reg, caller_saved)) {
+ /* Let the irn use the copy instead of the old other_different */
+ fix_in(irn, other_different, cpy);
- /* Only create new proj, iff not already present */
- if (!bitset_is_set(proj_set, bitset_idx)) {
- ir_node *proj = new_r_Proj(current_ir_graph, block, (ir_node *)proj_T, mode_Is, bitset_idx);
+ /* Add the Keep resp. CopyKeep and reroute the users */
+ /* of the other_different irn in case of CopyKeep. */
+ if (get_n_out_edges(other_different) == 0) {
+ keep = be_new_Keep(cls, birg->irg, block, 2, in);
+ }
+ else {
+ keep = be_new_CopyKeep_single(cls, birg->irg, block, cpy, irn, get_irn_mode(other_different));
+ be_node_set_reg_class(keep, 1, cls);
+ edges_reroute(other_different, keep, birg->irg);
+ }
- pn = isa->impl->handle_call_proj(isa, proj, 1);
- in_keep[keep_arity++] = proj;
- }
+ /* after rerouting: let the copy point to the other_different irn */
+ set_irn_n(cpy, 0, other_different);
- bitset_idx++;
- }
- }
+ DBG((mod, LEVEL_1, "created %+F for %+F to assure should_be_different\n", keep, irn));
+}
+
+/**
+ * Checks if node has a should_be_different constraint in output
+ * and adds a Keep then to assure the constraint.
+ */
+static void assure_different_constraints(ir_node *irn, be_irg_t *birg) {
+ const arch_env_t *arch_env = birg->main_env->arch_env;
+ const arch_register_req_t *req;
+ arch_register_req_t req_temp;
+ int i, n;
+
+ req = arch_get_register_req(arch_env, &req_temp, irn, -1);
- /* ok, we found some caller save register which are not in use but must be saved */
- if (keep_arity) {
- be_new_Keep(reg_class, current_ir_graph, block, keep_arity, in_keep);
+ if (req) {
+ if (arch_register_req_is(req, should_be_different)) {
+ gen_assure_different_pattern(irn, birg, req->other_different);
+ }
+ else if (arch_register_req_is(req, should_be_different_from_all)) {
+ n = get_irn_arity(belower_skip_proj(irn));
+ for (i = 0; i < n; i++) {
+ gen_assure_different_pattern(irn, birg, get_irn_n(belower_skip_proj(irn), i));
+ }
}
}
-
- bitset_free(proj_set);
- return;
}
/**
- * Calls the backend code generator functions to lower Spill and
- * Reload nodes into Store and Load. The backend is fully responsible
- * for creating the new nodes and setting their input correct.
- * Note: The caller of this has to make sure that irn is a Spill
- * or Reload!
+ * Calls the functions to assure register constraints.
*
- * @param irn The Spill/Reload node
+ * @param irn The node to be checked for lowering
* @param walk_env The walker environment
*/
-static void lower_spill_reload(ir_node *irn, void *walk_env) {
- lower_env_t *env = walk_env;
- arch_code_generator_t *cg = env->chord_env->main_env->cg;
- const arch_env_t *aenv = env->chord_env->main_env->arch_env;
- ir_node *res = NULL;
- ir_node *sched_point;
-
- if (be_is_Spill(irn) && cg->impl->lower_spill) {
- res = cg->impl->lower_spill(cg, irn);
- }
- else if (be_is_Reload(irn) && cg->impl->lower_reload) {
- res = cg->impl->lower_reload(cg, irn);
- if (res && res != irn) {
- /* copy the result register from the reload to the load */
- arch_set_irn_register(aenv, res, arch_get_irn_register(aenv, irn));
- }
- }
+static void assure_constraints_walker(ir_node *irn, void *walk_env) {
+ if (is_Block(irn))
+ return;
- if (res && res != irn) {
- sched_point = sched_prev(irn);
- sched_remove(irn);
- exchange(irn, res);
- sched_add_after(sched_point, res);
- }
- else {
- DBG((env->dbg_module, LEVEL_1, "node %+F not lowered\n", irn));
- }
+ if (mode_is_datab(get_irn_mode(irn)))
+ assure_different_constraints(irn, walk_env);
return;
}
+
/**
- * Calls the corresponding lowering function for the node.
+ * Walks over all nodes to assure register constraints.
*
- * @param irn The node to be checked for lowering
- * @param walk_env The walker environment
+ * @param birg The birg structure containing the irg
*/
-static void lower_nodes_before_sched_walker(ir_node *irn, const void *walk_env) {
- const arch_env_t *arch_env = walk_env;
-
- if (!is_Block(irn) && !is_Proj(irn)) {
- if (is_Call(arch_env, irn)) {
- lower_call_node(irn, walk_env);
- }
- }
-
- return;
+void assure_constraints(be_irg_t *birg) {
+ irg_walk_blkwise_graph(birg->irg, NULL, assure_constraints_walker, birg);
}
+
/**
* Calls the corresponding lowering function for the node.
*
*/
static void lower_nodes_after_ra_walker(ir_node *irn, void *walk_env) {
lower_env_t *env = walk_env;
- const arch_env_t *arch_env = env->chord_env->main_env->arch_env;
+ const arch_env_t *arch_env = env->chord_env->birg->main_env->arch_env;
if (!is_Block(irn) && !is_Proj(irn)) {
if (is_Perm(arch_env, irn)) {
lower_perm_node(irn, walk_env);
}
- else if (be_is_Spill(irn) || be_is_Reload(irn)) {
- lower_spill_reload(irn, walk_env);
- }
}
return;
}
+static void lower_print_perm_stat(lower_env_t *env) {
+ int i, j, total_len_chain, total_len_cycle, total_size_perm, total_size_real_perm;
+ printf("=== IRG: %s ===\n", get_entity_name(get_irg_entity(env->chord_env->irg)));
+ for (i = 0; i < env->pstat_n; i++) {
+ if (env->pstat[i]->num_perms == 0)
+ continue;
-/**
- * Walks over all blocks in an irg and performs lowering need
- * to be done before scheduling (e.g. call lowering).
- *
- * @param chord_env The chordal environment containing the irg
- * @param do_copy 1 == resolve cycles with a free reg if available
- */
-void lower_nodes_before_sched(ir_graph *irg, const void *env) {
- irg_walk_blkwise_graph(irg, NULL, lower_nodes_before_sched_walker, (void *)env);
-}
+ printf("CLASS: %s\n", env->pstat[i]->cls->name);
+ printf("# total perms: %d (size:num -> 1:%d", env->pstat[i]->num_perms, env->pstat[i]->perm_size_ar[0]);
+
+ total_size_perm = env->pstat[i]->perm_size_ar[0];
+ for (j = 1; j < env->pstat[i]->cls->n_regs; j++) {
+ total_size_perm += (j + 1) * env->pstat[i]->perm_size_ar[j];
+ printf(", %d:%d", j + 1, env->pstat[i]->perm_size_ar[j]);
+ }
+ printf(")\n");
+ printf("avg perm size: %.2f\n", env->pstat[i]->num_perms ? (float)total_size_perm / (float)env->pstat[i]->num_perms : 0);
+
+ printf("# real perms: %d (size:num -> 1:%d", env->pstat[i]->num_real_perms, env->pstat[i]->real_perm_size_ar[0]);
+
+ total_size_real_perm = env->pstat[i]->real_perm_size_ar[0];
+ for (j = 1; j < env->pstat[i]->cls->n_regs; j++) {
+ total_size_real_perm += (j + 1) * env->pstat[i]->real_perm_size_ar[j];
+ printf(", %d:%d", j + 1, env->pstat[i]->real_perm_size_ar[j]);
+ }
+ printf(")\n");
+ printf("avg real perm size: %.2f\n", env->pstat[i]->num_real_perms ? (float)total_size_real_perm / (float)env->pstat[i]->num_real_perms : 0);
+ printf("# total chains: %d (lenght:num -> 1:%d", env->pstat[i]->num_chains, env->pstat[i]->chain_len_ar[0]);
+ total_len_chain = env->pstat[i]->chain_len_ar[0];
+
+ for (j = 1; j < env->pstat[i]->cls->n_regs; j++) {
+ total_len_chain += (j + 1) * env->pstat[i]->chain_len_ar[j];
+ printf(", %d:%d", j + 1, env->pstat[i]->chain_len_ar[j]);
+ }
+ printf(")\n");
+ printf("avg chain length: %.2f\n", env->pstat[i]->num_chains ? (float)total_len_chain / (float)env->pstat[i]->num_chains : 0);
+ printf("avg chains/perm: %.2f\n", env->pstat[i]->num_real_perms ? (float)env->pstat[i]->num_chains / (float)env->pstat[i]->num_real_perms : 0);
+
+ printf("# total cycles: %d (length:num -> 1:%d", env->pstat[i]->num_cycles, env->pstat[i]->cycle_len_ar[0]);
+
+ total_len_cycle = env->pstat[i]->cycle_len_ar[0];
+ for (j = 1; j < env->pstat[i]->cls->n_regs; j++) {
+ total_len_cycle += (j + 1) * env->pstat[i]->cycle_len_ar[j];
+ printf(", %d:%d", j + 1, env->pstat[i]->cycle_len_ar[j]);
+ }
+ printf(")\n");
+ printf("avg cycle length: %.2f\n", env->pstat[i]->num_cycles ? (float)total_len_cycle / (float)env->pstat[i]->num_cycles : 0);
+ printf("avg cycles/perm: %.2f\n", env->pstat[i]->num_real_perms ? (float)env->pstat[i]->num_cycles / (float)env->pstat[i]->num_real_perms : 0);
+ }
+}
/**
* Walks over all blocks in an irg and performs lowering need to be
- * done after register allocation (e.g. perm and spill/reload lowering).
+ * done after register allocation (e.g. perm lowering).
*
* @param chord_env The chordal environment containing the irg
* @param do_copy 1 == resolve cycles with a free reg if available
*/
-void lower_nodes_after_ra(be_chordal_env_t *chord_env, int do_copy) {
+void lower_nodes_after_ra(be_chordal_env_t *chord_env, int do_copy, int do_stat) {
lower_env_t env;
env.chord_env = chord_env;
env.do_copy = do_copy;
- env.dbg_module = firm_dbg_register("firm.be.lower");
+ env.do_stat = do_stat;
+ FIRM_DBG_REGISTER(env.dbg_module, "firm.be.lower");
+
+ /* if we want statistics: allocate memory for the data and initialize with 0 */
+ if (do_stat) {
+ const arch_isa_t *isa = chord_env->birg->main_env->arch_env->isa;
+ int i, n = arch_isa_get_n_reg_class(isa);
+
+ env.pstat = alloca(n * sizeof(env.pstat[0]));
+ env.pstat_n = n;
+
+ for (i = 0; i < n; i++) {
+ const arch_register_class_t *cls = arch_isa_get_reg_class(isa, i);
+ int n_regs = cls->n_regs;
+
+ env.pstat[i] = alloca(sizeof(*(env.pstat[0])));
+ memset(env.pstat[i], 0, sizeof(*(env.pstat[0])));
+
+ env.pstat[i]->perm_size_ar = alloca(n_regs * sizeof(env.pstat[i]->perm_size_ar[0]));
+ env.pstat[i]->real_perm_size_ar = alloca(n_regs * sizeof(env.pstat[i]->real_perm_size_ar[0]));
+ env.pstat[i]->chain_len_ar = alloca(n_regs * sizeof(env.pstat[i]->chain_len_ar[0]));
+ env.pstat[i]->cycle_len_ar = alloca(n_regs * sizeof(env.pstat[i]->cycle_len_ar[0]));
+
+ memset(env.pstat[i]->perm_size_ar, 0, n_regs * sizeof(env.pstat[i]->perm_size_ar[0]));
+ memset(env.pstat[i]->real_perm_size_ar, 0, n_regs * sizeof(env.pstat[i]->real_perm_size_ar[0]));
+ memset(env.pstat[i]->chain_len_ar, 0, n_regs * sizeof(env.pstat[i]->chain_len_ar[0]));
+ memset(env.pstat[i]->cycle_len_ar, 0, n_regs * sizeof(env.pstat[i]->cycle_len_ar[0]));
+
+ env.pstat[i]->cls = cls;
+ }
+ }
irg_walk_blkwise_graph(chord_env->irg, NULL, lower_nodes_after_ra_walker, &env);
+
+ if (do_stat) {
+ lower_print_perm_stat(&env);
+ }
}
#undef is_Perm
-#undef is_Call