#include "ircons.h"
#include "debug.h"
#include "irhooks.h"
+#include "xmalloc.h"
#include "bearch.h"
#include "belower.h"
#include "benode_t.h"
-#include "bechordal_t.h"
#include "besched_t.h"
#include "bestat.h"
+#include "bessaconstr.h"
+#include "irnodeset.h"
#include "irgmod.h"
#include "iredges_t.h"
#include "irgwalk.h"
-#ifdef HAVE_MALLOC_H
- #include <malloc.h>
-#endif
-#ifdef HAVE_ALLOCA_H
- #include <alloca.h>
-#endif
-
-#undef is_Perm
-#define is_Perm(arch_env, irn) (arch_irn_classify(arch_env, irn) == arch_irn_class_perm)
+#undef KEEP_ALIVE_COPYKEEP_HACK
/* associates op with it's copy and CopyKeep */
typedef struct {
ir_node *op; /* an irn which must be different */
- pset *copies; /* all non-spillable copies of this irn */
+ ir_nodeset_t copies; /* all non-spillable copies of this irn */
+ const arch_register_class_t *cls;
} op_copy_assoc_t;
/* environment for constraints */
be_irg_t *birg;
pset *op_set;
struct obstack obst;
+ DEBUG_ONLY(firm_dbg_module_t *dbg;)
} constraint_env_t;
/* lowering walker environment */
typedef struct _lower_env_t {
- be_chordal_env_t *chord_env;
- unsigned do_copy:1;
+ be_irg_t *birg;
+ const arch_env_t *arch_env;
+ unsigned do_copy : 1;
DEBUG_ONLY(firm_dbg_module_t *dbg_module;)
} lower_env_t;
const arch_register_class_t *reg_class;
const arch_env_t *arch_env;
lower_env_t *env = walk_env;
- int real_size = 0;
- int n, i, pn, do_copy, j, n_ops;
+ int real_size = 0;
+ int keep_perm = 0;
+ int n, i, pn, do_copy, j, n_ops;
reg_pair_t *pairs;
const ir_edge_t *edge;
perm_cycle_t *cycle;
ir_node *cpyxchg = NULL;
DEBUG_ONLY(firm_dbg_module_t *mod;)
- arch_env = env->chord_env->birg->main_env->arch_env;
+ arch_env = env->arch_env;
do_copy = env->do_copy;
DEBUG_ONLY(mod = env->dbg_module;)
block = get_nodes_block(irn);
/* We have to check for a special case:
The in-node could be a Proj from a Perm. In this case,
we need to correct the projnum */
- if (is_Perm(arch_env, pairs[i].in_node) && is_Proj(pairs[i].in_node)) {
+ if (be_is_Perm(pairs[i].in_node) && is_Proj(pairs[i].in_node)) {
set_Proj_proj(pairs[i].out_node, get_Proj_proj(pairs[i].in_node));
}
sched_remove(pairs[i].out_node);
/* reroute the edges from the proj to the argument */
- edges_reroute(pairs[i].out_node, pairs[i].in_node, env->chord_env->irg);
+ exchange(pairs[i].out_node, pairs[i].in_node);
+ //edges_reroute(pairs[i].out_node, pairs[i].in_node, env->birg->irg);
+ //set_irn_n(pairs[i].out_node, 0, new_Bad());
pairs[i].checked = 1;
}
}
DB((mod, LEVEL_1, "\n"));
- /* We don't need to do anything if we have a Perm with two
+ /*
+ We don't need to do anything if we have a Perm with two
elements which represents a cycle, because those nodes
- already represent exchange nodes */
+ already represent exchange nodes
+ */
if (n == 2 && cycle->type == PERM_CYCLE) {
free(cycle);
+ keep_perm = 1;
continue;
}
DBG((mod, LEVEL_1, "%+F (%+F, %s) and (%+F, %s)\n",
irn, res1, cycle->elems[i]->name, res2, cycle->elems[i + 1]->name));
- cpyxchg = be_new_Perm(reg_class, env->chord_env->irg, block, 2, in);
+ cpyxchg = be_new_Perm(reg_class, env->birg->irg, block, 2, in);
n_ops++;
if (i > 0) {
DBG((mod, LEVEL_1, "%+F creating copy node (%+F, %s) -> (%+F, %s)\n",
irn, arg1, cycle->elems[i]->name, res2, cycle->elems[i + 1]->name));
- cpyxchg = be_new_Copy(reg_class, env->chord_env->irg, block, arg1);
+ cpyxchg = be_new_Copy(reg_class, env->birg->irg, block, arg1);
arch_set_irn_register(arch_env, cpyxchg, cycle->elems[i + 1]);
n_ops++;
free(cycle);
}
-
-
/* remove the perm from schedule */
- sched_remove(irn);
+ if (! keep_perm) {
+ sched_remove(irn);
+ be_kill_node(irn);
+ }
}
return irn;
}
+static ir_node *find_copy(constraint_env_t *env, ir_node *irn, ir_node *op) {
+ const arch_env_t *arch_env = env->birg->main_env->arch_env;
+ ir_node *block = get_nodes_block(irn);
+ ir_node *cur_node;
+
+ for (cur_node = sched_prev(irn);
+ ! is_Block(cur_node) && be_is_Copy(cur_node) && get_nodes_block(cur_node) == block;
+ cur_node = sched_prev(cur_node))
+ {
+ if (be_get_Copy_op(cur_node) == op && arch_irn_is(arch_env, cur_node, dont_spill))
+ return cur_node;
+ }
+
+ return NULL;
+}
+
static void gen_assure_different_pattern(ir_node *irn, ir_node *other_different, constraint_env_t *env) {
be_irg_t *birg = env->birg;
pset *op_set = env->op_set;
const arch_register_class_t *cls = arch_get_irn_reg_class(arch_env, other_different, -1);
ir_node *in[2], *keep, *cpy;
op_copy_assoc_t key, *entry;
- FIRM_DBG_REGISTER(firm_dbg_module_t *mod, "firm.be.lower");
+ DEBUG_ONLY(firm_dbg_module_t *mod = env->dbg;)
if (arch_irn_is(arch_env, other_different, ignore) || ! mode_is_datab(get_irn_mode(other_different))) {
DBG((mod, LEVEL_1, "ignore constraint for %+F because other_irn is ignore or not a datab node\n", irn));
/* in block far far away */
/* The copy is optimized later if not needed */
- cpy = be_new_Copy(cls, birg->irg, block, other_different);
- be_node_set_flags(cpy, BE_OUT_POS(0), arch_irn_flags_dont_spill);
+ /* check if already exists such a copy in the schedule immediatly before */
+ cpy = find_copy(env, belower_skip_proj(irn), other_different);
+ if (! cpy) {
+ cpy = be_new_Copy(cls, birg->irg, block, other_different);
+ be_node_set_flags(cpy, BE_OUT_POS(0), arch_irn_flags_dont_spill);
+ DBG((mod, LEVEL_1, "created non-spillable %+F for value %+F\n", cpy, other_different));
+ }
+ else {
+ DBG((mod, LEVEL_1, "using already existing %+F for value %+F\n", cpy, other_different));
+ }
in[0] = irn;
in[1] = cpy;
be_node_set_reg_class(keep, 1, cls);
}
- /* let the copy point to the other_different irn */
- set_irn_n(cpy, 0, other_different);
+ DBG((mod, LEVEL_1, "created %+F(%+F, %+F)\n\n", keep, irn, cpy));
/* insert copy and keep into schedule */
assert(sched_is_scheduled(irn) && "need schedule to assure constraints");
- sched_add_before(belower_skip_proj(irn), cpy);
+ if (! sched_is_scheduled(cpy))
+ sched_add_before(belower_skip_proj(irn), cpy);
sched_add_after(irn, keep);
/* insert the other different and it's copies into the set */
key.op = other_different;
- key.copies = NULL;
- entry = pset_find(op_set, &key, HASH_PTR(other_different));
+ entry = pset_find(op_set, &key, nodeset_hash(other_different));
if (! entry) {
entry = obstack_alloc(&env->obst, sizeof(*entry));
- entry->copies = pset_new_ptr_default();
+ ir_nodeset_init(&entry->copies);
entry->op = other_different;
+ entry->cls = cls;
}
/* insert copy */
- pset_insert_ptr(entry->copies, cpy);
+ ir_nodeset_insert(&entry->copies, cpy);
/* insert keep in case of CopyKeep */
- if (be_is_CopyKeep(keep))
- pset_insert_ptr(entry->copies, keep);
-
- pset_insert(op_set, entry, HASH_PTR(other_different));
+ if (be_is_CopyKeep(keep)) {
+ ir_nodeset_insert(&entry->copies, keep);
+ }
- DBG((mod, LEVEL_1, "created %+F for %+F to assure should_be_different\n", keep, irn));
+ pset_insert(op_set, entry, nodeset_hash(other_different));
}
/**
*/
static void assure_different_constraints(ir_node *irn, constraint_env_t *env) {
const arch_register_req_t *req;
- arch_register_req_t req_temp;
- req = arch_get_register_req(env->birg->main_env->arch_env, &req_temp, irn, -1);
+ req = arch_get_register_req(env->birg->main_env->arch_env, irn, -1);
- if (req) {
- if (arch_register_req_is(req, should_be_different)) {
- gen_assure_different_pattern(irn, req->other_different, env);
- }
- else if (arch_register_req_is(req, should_be_different_from_all)) {
- int i, n = get_irn_arity(belower_skip_proj(irn));
- for (i = 0; i < n; i++) {
- gen_assure_different_pattern(irn, get_irn_n(belower_skip_proj(irn), i), env);
- }
+ if (arch_register_req_is(req, should_be_different)) {
+ ir_node *different_from = get_irn_n(irn, req->other_different);
+ gen_assure_different_pattern(irn, different_from, env);
+ } else if (arch_register_req_is(req, should_be_different_from_all)) {
+ int i, n = get_irn_arity(belower_skip_proj(irn));
+ for (i = 0; i < n; i++) {
+ gen_assure_different_pattern(irn, get_irn_n(belower_skip_proj(irn), i), env);
}
}
}
if (is_Block(irn))
return;
- if (mode_is_datab(get_irn_mode(irn)))
+ if (sched_is_scheduled(irn) && mode_is_datab(get_irn_mode(irn)))
assure_different_constraints(irn, walk_env);
return;
}
+/**
+ * Melt all copykeeps pointing to the same node
+ * (or Projs of the same node), copying the same operand.
+ */
+static void melt_copykeeps(constraint_env_t *cenv) {
+ op_copy_assoc_t *entry;
+
+ /* for all */
+ foreach_pset(cenv->op_set, entry) {
+ int idx, num_ck;
+ ir_node *cp;
+ struct obstack obst;
+ ir_nodeset_iterator_t iter;
+ ir_node **ck_arr, **melt_arr;
+
+ obstack_init(&obst);
+
+ /* collect all copykeeps */
+ num_ck = idx = 0;
+ foreach_ir_nodeset(&entry->copies, cp, iter) {
+ if (be_is_CopyKeep(cp)) {
+ obstack_grow(&obst, &cp, sizeof(cp));
+ ++num_ck;
+ }
+#ifdef KEEP_ALIVE_COPYKEEP_HACK
+ else {
+ set_irn_mode(cp, mode_ANY);
+ keep_alive(cp);
+ }
+#endif /* KEEP_ALIVE_COPYKEEP_HACK */
+ }
+
+ /* compare each copykeep with all other copykeeps */
+ ck_arr = (ir_node **)obstack_finish(&obst);
+ for (idx = 0; idx < num_ck; ++idx) {
+ ir_node *ref, *ref_mode_T;
+
+ if (ck_arr[idx]) {
+ int j, n_melt;
+ ir_node **new_ck_in;
+ ir_node *new_ck;
+ ir_node *sched_pt = NULL;
+
+ n_melt = 1;
+ ref = ck_arr[idx];
+ ref_mode_T = skip_Proj(get_irn_n(ref, 1));
+ obstack_grow(&obst, &ref, sizeof(ref));
+
+ DBG((cenv->dbg, LEVEL_1, "Trying to melt %+F:\n", ref));
+
+ /* check for copykeeps pointing to the same mode_T node as the reference copykeep */
+ for (j = 0; j < num_ck; ++j) {
+ ir_node *cur_ck = ck_arr[j];
+
+ if (j != idx && cur_ck && skip_Proj(get_irn_n(cur_ck, 1)) == ref_mode_T) {
+ obstack_grow(&obst, &cur_ck, sizeof(cur_ck));
+ ir_nodeset_remove(&entry->copies, cur_ck);
+ DBG((cenv->dbg, LEVEL_1, "\t%+F\n", cur_ck));
+ ck_arr[j] = NULL;
+ ++n_melt;
+ sched_remove(cur_ck);
+ }
+ }
+ ck_arr[idx] = NULL;
+
+ /* check, if we found some candidates for melting */
+ if (n_melt == 1) {
+ DBG((cenv->dbg, LEVEL_1, "\tno candidate found\n"));
+ continue;
+ }
+
+ ir_nodeset_remove(&entry->copies, ref);
+ sched_remove(ref);
+
+ melt_arr = (ir_node **)obstack_finish(&obst);
+ /* melt all found copykeeps */
+ NEW_ARR_A(ir_node *, new_ck_in, n_melt);
+ for (j = 0; j < n_melt; ++j) {
+ new_ck_in[j] = get_irn_n(melt_arr[j], 1);
+
+ /* now, we can kill the melted keep, except the */
+ /* ref one, we still need some information */
+ if (melt_arr[j] != ref)
+ be_kill_node(melt_arr[j]);
+ }
+
+#ifdef KEEP_ALIVE_COPYKEEP_HACK
+ new_ck = be_new_CopyKeep(entry->cls, cenv->birg->irg, get_nodes_block(ref), be_get_CopyKeep_op(ref), n_melt, new_ck_in, mode_ANY);
+ keep_alive(new_ck);
+#else
+ new_ck = be_new_CopyKeep(entry->cls, cenv->birg->irg, get_nodes_block(ref), be_get_CopyKeep_op(ref), n_melt, new_ck_in, get_irn_mode(ref));
+#endif /* KEEP_ALIVE_COPYKEEP_HACK */
+
+ /* set register class for all keeped inputs */
+ for (j = 1; j <= n_melt; ++j)
+ be_node_set_reg_class(new_ck, j, entry->cls);
+
+ ir_nodeset_insert(&entry->copies, new_ck);
+
+ /* find scheduling point */
+ if (get_irn_mode(ref_mode_T) == mode_T) {
+ /* walk along the Projs */
+ for (sched_pt = sched_next(ref_mode_T); is_Proj(sched_pt) || be_is_Keep(sched_pt) || be_is_CopyKeep(sched_pt); sched_pt = sched_next(sched_pt))
+ /* just walk along the schedule until a non-Proj/Keep/CopyKeep node is found*/ ;
+ }
+ else {
+ sched_pt = ref_mode_T;
+ }
+
+ sched_add_before(sched_pt, new_ck);
+ DBG((cenv->dbg, LEVEL_1, "created %+F, scheduled before %+F\n", new_ck, sched_pt));
+ /* finally: kill the reference copykeep */
+ be_kill_node(ref);
+ }
+ }
+
+ obstack_free(&obst, NULL);
+ }
+}
/**
* Walks over all nodes to assure register constraints.
void assure_constraints(be_irg_t *birg) {
constraint_env_t cenv;
op_copy_assoc_t *entry;
- dom_front_info_t *dom;
ir_node **nodes;
- FIRM_DBG_REGISTER(firm_dbg_module_t *mod, "firm.be.lower");
+ FIRM_DBG_REGISTER(firm_dbg_module_t *mod, "firm.be.lower.constr");
+ be_assure_dom_front(birg);
+
+ DEBUG_ONLY(cenv.dbg = mod;)
cenv.birg = birg;
cenv.op_set = new_pset(cmp_op_copy_assoc, 16);
obstack_init(&cenv.obst);
irg_walk_blkwise_graph(birg->irg, NULL, assure_constraints_walker, &cenv);
- /* introduce copies needs dominance information */
- dom = be_compute_dominance_frontiers(birg->irg);
+ /* melt copykeeps, pointing to projs of */
+ /* the same mode_T node and keeping the */
+ /* same operand */
+ melt_copykeeps(&cenv);
/* for all */
foreach_pset(cenv.op_set, entry) {
int n;
ir_node *cp;
+ ir_nodeset_iterator_t iter;
+ be_ssa_construction_env_t senv;
- n = pset_count(entry->copies);
- nodes = alloca((n + 1) * sizeof(nodes[0]));
+ n = ir_nodeset_size(&entry->copies);
+ nodes = alloca(n * sizeof(nodes[0]));
/* put the node in an array */
- n = 0;
- nodes[n++] = entry->op;
DBG((mod, LEVEL_1, "introduce copies for %+F ", entry->op));
/* collect all copies */
- foreach_pset(entry->copies, cp) {
+ n = 0;
+ foreach_ir_nodeset(&entry->copies, cp, iter) {
nodes[n++] = cp;
DB((mod, LEVEL_1, ", %+F ", cp));
}
DB((mod, LEVEL_1, "\n"));
/* introduce the copies for the operand and it's copies */
- be_ssa_constr(dom, NULL, n, nodes);
-
+ be_ssa_construction_init(&senv, birg);
+ be_ssa_construction_add_copy(&senv, entry->op);
+ be_ssa_construction_add_copies(&senv, nodes, n);
+ be_ssa_construction_fix_users(&senv, entry->op);
+ be_ssa_construction_destroy(&senv);
/* Could be that not all CopyKeeps are really needed, */
/* so we transform unnecessary ones into Keeps. */
- foreach_pset(entry->copies, cp) {
+ foreach_ir_nodeset(&entry->copies, cp, iter) {
if (be_is_CopyKeep(cp) && get_irn_n_edges(cp) < 1) {
ir_node *keep;
int n = get_irn_arity(cp);
keep = be_new_Keep(arch_get_irn_reg_class(birg->main_env->arch_env, cp, -1),
birg->irg, get_nodes_block(cp), n, (ir_node **)&get_irn_in(cp)[1]);
sched_add_before(cp, keep);
- sched_remove(cp);
/* Set all ins (including the block) of the CopyKeep BAD to keep the verifier happy. */
- while (--n >= -1)
- set_irn_n(cp, n, get_irg_bad(birg->irg));
+ sched_remove(cp);
+ be_kill_node(cp);
}
}
- del_pset(entry->copies);
+ ir_nodeset_destroy(&entry->copies);
}
- be_free_dominance_frontiers(dom);
-
del_pset(cenv.op_set);
obstack_free(&cenv.obst, NULL);
+ be_invalidate_liveness(birg);
}
* @param walk_env The walker environment
*/
static void lower_nodes_after_ra_walker(ir_node *irn, void *walk_env) {
- lower_env_t *env = walk_env;
- const arch_env_t *arch_env = env->chord_env->birg->main_env->arch_env;
-
- if (!is_Block(irn) && !is_Proj(irn)) {
- if (is_Perm(arch_env, irn)) {
+ if (! is_Block(irn) && ! is_Proj(irn)) {
+ if (be_is_Perm(irn)) {
lower_perm_node(irn, walk_env);
}
}
* Walks over all blocks in an irg and performs lowering need to be
* done after register allocation (e.g. perm lowering).
*
- * @param chord_env The chordal environment containing the irg
+ * @param birg The birg object
* @param do_copy 1 == resolve cycles with a free reg if available
*/
-void lower_nodes_after_ra(be_chordal_env_t *chord_env, int do_copy) {
+void lower_nodes_after_ra(be_irg_t *birg, int do_copy) {
lower_env_t env;
- env.chord_env = chord_env;
- env.do_copy = do_copy;
+ env.birg = birg;
+ env.arch_env = birg->main_env->arch_env;
+ env.do_copy = do_copy;
FIRM_DBG_REGISTER(env.dbg_module, "firm.be.lower");
- irg_walk_blkwise_graph(chord_env->irg, NULL, lower_nodes_after_ra_walker, &env);
+ irg_walk_blkwise_graph(birg->irg, NULL, lower_nodes_after_ra_walker, &env);
}
-
-#undef is_Perm