/*
- * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
- *
* This file is part of libFirm.
- *
- * This file may be distributed and/or modified under the terms of the
- * GNU General Public License version 2 as published by the Free Software
- * Foundation and appearing in the file LICENSE.GPL included in the
- * packaging of this file.
- *
- * Licensees holding valid libFirm Professional Edition licenses may use
- * this file in accordance with the libFirm Commercial License.
- * Agreement provided with the Software.
- *
- * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
- * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE.
+ * Copyright (C) 2012 University of Karlsruhe.
*/
/**
* @file
- * @brief Performs lowering of perm nodes. Inserts copies to assure register constraints.
+ * @brief Performs lowering of perm nodes. Inserts copies to assure
+ * register constraints.
* @author Christian Wuerdig
* @date 14.12.2005
- * @version $Id$
*/
#include "config.h"
#include "debug.h"
#include "xmalloc.h"
#include "irnodeset.h"
-#include "irnodemap.h"
+#include "irnodehashmap.h"
#include "irgmod.h"
#include "iredges_t.h"
#include "irgwalk.h"
#include "array_t.h"
#include "bearch.h"
+#include "beirg.h"
#include "belower.h"
#include "benode.h"
#include "besched.h"
DEBUG_ONLY(static firm_dbg_module_t *dbg_constr;)
DEBUG_ONLY(static firm_dbg_module_t *dbg_permmove;)
-/** Associates an ir_node with it's copy and CopyKeep. */
+/** Associates an ir_node with its copy and CopyKeep. */
typedef struct {
ir_nodeset_t copies; /**< all non-spillable copies of this irn */
const arch_register_class_t *cls;
/** Environment for constraints. */
typedef struct {
- ir_graph *irg;
- ir_nodemap_t op_set;
- struct obstack obst;
+ ir_graph *irg;
+ ir_nodehashmap_t op_set;
+ struct obstack obst;
} constraint_env_t;
/** Lowering walker environment. */
typedef enum perm_type_t {
PERM_CYCLE,
PERM_CHAIN,
- PERM_SWAP,
- PERM_COPY
} perm_type_t;
/** Structure to represent cycles or chains in a Perm. */
* NOTE: This works with auto-magic. If we insert the new copy/exchange
* nodes after this node, everything should be ok. */
ir_node * sched_point = sched_prev(irn);
- const ir_edge_t * edge;
- const ir_edge_t * next;
int n;
int i;
/* build the list of register pairs (in, out) */
n = 0;
- foreach_out_edge_safe(irn, edge, next) {
+ foreach_out_edge_safe(irn, edge) {
ir_node *const out = get_edge_src_irn(edge);
long const pn = get_Proj_proj(out);
ir_node *const in = get_irn_n(irn, pn);
* IN_2 = in node with register i + 1
* OUT_1 = out node with register i + 1
* OUT_2 = out node with register i */
+ ir_node *cpyxchg;
if (cycle.type == PERM_CYCLE && !do_copy) {
ir_node *in[2];
- ir_node *cpyxchg;
in[0] = arg1;
in[1] = arg2;
arch_set_irn_register(res2, cycle.elems[i + 1]);
arch_set_irn_register(res1, cycle.elems[i]);
- /* insert the copy/exchange node in schedule after the magic schedule node (see above) */
- sched_add_after(skip_Proj(sched_point), cpyxchg);
-
DB((dbg, LEVEL_1, "replacing %+F with %+F, placed new node after %+F\n", irn, cpyxchg, sched_point));
-
- /* set the new scheduling point */
- sched_point = res1;
} else {
- ir_node *cpyxchg;
-
DB((dbg, LEVEL_1, "%+F creating copy node (%+F, %s) -> (%+F, %s)\n",
irn, arg1, cycle.elems[i]->name, res2, cycle.elems[i + 1]->name));
- cpyxchg = be_new_Copy(reg_class, block, arg1);
+ cpyxchg = be_new_Copy(block, arg1);
arch_set_irn_register(cpyxchg, cycle.elems[i + 1]);
/* exchange copy node and proj */
exchange(res2, cpyxchg);
+ }
- /* insert the copy/exchange node in schedule after the magic schedule node (see above) */
- sched_add_after(skip_Proj(sched_point), cpyxchg);
+ /* insert the copy/exchange node in schedule after the magic schedule node (see above) */
+ sched_add_after(sched_point, cpyxchg);
- /* set the new scheduling point */
- sched_point = cpyxchg;
- }
+ /* set the new scheduling point */
+ sched_point = cpyxchg;
}
}
static void gen_assure_different_pattern(ir_node *irn, ir_node *other_different, constraint_env_t *env)
{
- ir_nodemap_t *op_set;
+ ir_nodehashmap_t *op_set;
ir_node *block;
const arch_register_class_t *cls;
ir_node *keep, *cpy;
op_copy_assoc_t *entry;
- if (arch_irn_is_ignore(other_different) ||
+ arch_register_req_t const *const req = arch_get_irn_register_req(other_different);
+ if (arch_register_req_is(req, ignore) ||
!mode_is_datab(get_irn_mode(other_different))) {
DB((dbg_constr, LEVEL_1, "ignore constraint for %+F because other_irn is ignore or not a datab node\n", irn));
return;
op_set = &env->op_set;
block = get_nodes_block(irn);
- cls = arch_get_irn_reg_class_out(other_different);
+ cls = req->cls;
/* Make a not spillable copy of the different node */
/* this is needed because the different irn could be */
/* check if already exists such a copy in the schedule immediately before */
cpy = find_copy(skip_Proj(irn), other_different);
if (! cpy) {
- cpy = be_new_Copy(cls, block, other_different);
- arch_irn_set_flags(cpy, arch_irn_flags_dont_spill);
+ cpy = be_new_Copy(block, other_different);
+ arch_set_irn_flags(cpy, arch_irn_flags_dont_spill);
DB((dbg_constr, LEVEL_1, "created non-spillable %+F for value %+F\n", cpy, other_different));
} else {
DB((dbg_constr, LEVEL_1, "using already existing %+F for value %+F\n", cpy, other_different));
/* Add the Keep resp. CopyKeep and reroute the users */
/* of the other_different irn in case of CopyKeep. */
if (has_irn_users(other_different)) {
- keep = be_new_CopyKeep_single(cls, block, cpy, irn, get_irn_mode(other_different));
+ keep = be_new_CopyKeep_single(block, cpy, irn);
be_node_set_reg_class_in(keep, 1, cls);
} else {
ir_node *in[2];
sched_add_before(skip_Proj(irn), cpy);
sched_add_after(skip_Proj(irn), keep);
- /* insert the other different and it's copies into the map */
- entry = ir_nodemap_get(op_set, other_different);
+ /* insert the other different and its copies into the map */
+ entry = ir_nodehashmap_get(op_copy_assoc_t, op_set, other_different);
if (! entry) {
entry = OALLOC(&env->obst, op_copy_assoc_t);
entry->cls = cls;
ir_nodeset_init(&entry->copies);
- ir_nodemap_insert(op_set, other_different, entry);
+ ir_nodehashmap_insert(op_set, other_different, entry);
}
/* insert copy */
*/
static void assure_different_constraints(ir_node *irn, ir_node *skipped_irn, constraint_env_t *env)
{
- const arch_register_req_t *req = arch_get_register_req_out(irn);
+ const arch_register_req_t *req = arch_get_irn_register_req(irn);
if (arch_register_req_is(req, must_be_different)) {
const unsigned other = req->other_different;
*/
static void assure_constraints_walker(ir_node *block, void *walk_env)
{
- ir_node *irn;
+ constraint_env_t *env = (constraint_env_t*)walk_env;
sched_foreach_reverse(block, irn) {
- ir_mode *mode = get_irn_mode(irn);
-
- if (mode == mode_T) {
- const ir_edge_t *edge;
-
- foreach_out_edge(irn, edge) {
- ir_node *proj = get_edge_src_irn(edge);
-
- mode = get_irn_mode(proj);
- if (mode_is_datab(mode))
- assure_different_constraints(proj, irn, walk_env);
- }
- } else if (mode_is_datab(mode)) {
- assure_different_constraints(irn, irn, walk_env);
- }
+ be_foreach_value(irn, value,
+ if (mode_is_datab(get_irn_mode(value)))
+ assure_different_constraints(value, irn, env);
+ );
}
}
*/
static void melt_copykeeps(constraint_env_t *cenv)
{
- ir_nodemap_iterator_t map_iter;
- ir_nodemap_entry_t map_entry;
+ ir_nodehashmap_iterator_t map_iter;
+ ir_nodehashmap_entry_t map_entry;
/* for all */
- foreach_ir_nodemap(&cenv->op_set, map_entry, map_iter) {
- op_copy_assoc_t *entry = map_entry.data;
+ foreach_ir_nodehashmap(&cenv->op_set, map_entry, map_iter) {
+ op_copy_assoc_t *entry = (op_copy_assoc_t*)map_entry.data;
int idx, num_ck;
- ir_node *cp;
struct obstack obst;
- ir_nodeset_iterator_t iter;
ir_node **ck_arr, **melt_arr;
obstack_init(&obst);
if (ck_arr[idx]) {
int j, n_melt;
ir_node **new_ck_in;
- ir_node *new_ck;
ir_node *sched_pt = NULL;
n_melt = 1;
kill_node(melt_arr[j]);
}
+ ir_node *const new_ck = be_new_CopyKeep(get_nodes_block(ref), be_get_CopyKeep_op(ref), n_melt, new_ck_in);
#ifdef KEEP_ALIVE_COPYKEEP_HACK
- new_ck = be_new_CopyKeep(entry->cls, get_nodes_block(ref), be_get_CopyKeep_op(ref), n_melt, new_ck_in, mode_ANY);
keep_alive(new_ck);
-#else
- new_ck = be_new_CopyKeep(entry->cls, get_nodes_block(ref), be_get_CopyKeep_op(ref), n_melt, new_ck_in, get_irn_mode(ref));
#endif /* KEEP_ALIVE_COPYKEEP_HACK */
/* set register class for all kept inputs */
void assure_constraints(ir_graph *irg)
{
- constraint_env_t cenv;
- ir_nodemap_iterator_t map_iter;
- ir_nodemap_entry_t map_entry;
+ constraint_env_t cenv;
+ ir_nodehashmap_iterator_t map_iter;
+ ir_nodehashmap_entry_t map_entry;
FIRM_DBG_REGISTER(dbg_constr, "firm.be.lower.constr");
cenv.irg = irg;
- ir_nodemap_init(&cenv.op_set);
+ ir_nodehashmap_init(&cenv.op_set);
obstack_init(&cenv.obst);
irg_block_walk_graph(irg, NULL, assure_constraints_walker, &cenv);
melt_copykeeps(&cenv);
/* for all */
- foreach_ir_nodemap(&cenv.op_set, map_entry, map_iter) {
- op_copy_assoc_t *entry = map_entry.data;
- int n = ir_nodeset_size(&entry->copies);
+ foreach_ir_nodehashmap(&cenv.op_set, map_entry, map_iter) {
+ op_copy_assoc_t *entry = (op_copy_assoc_t*)map_entry.data;
+ size_t n = ir_nodeset_size(&entry->copies);
ir_node **nodes = ALLOCAN(ir_node*, n);
- ir_node *cp;
- ir_nodeset_iterator_t iter;
be_ssa_construction_env_t senv;
/* put the node in an array */
DB((dbg_constr, LEVEL_1, "\n"));
- /* introduce the copies for the operand and it's copies */
+ /* introduce the copies for the operand and its copies */
be_ssa_construction_init(&senv, irg);
be_ssa_construction_add_copy(&senv, map_entry.node);
be_ssa_construction_add_copies(&senv, nodes, n);
ir_node *keep;
keep = be_new_Keep(get_nodes_block(cp), n, get_irn_in(cp) + 1);
- sched_add_before(cp, keep);
+ sched_replace(cp, keep);
/* Set all ins (including the block) of the CopyKeep BAD to keep the verifier happy. */
- sched_remove(cp);
kill_node(cp);
}
}
ir_nodeset_destroy(&entry->copies);
}
- ir_nodemap_destroy(&cenv.op_set);
+ ir_nodehashmap_destroy(&cenv.op_set);
obstack_free(&cenv.obst, NULL);
- be_liveness_invalidate(be_get_irg_liveness(irg));
+ be_invalidate_live_sets(irg);
}
-
/**
* Push nodes that do not need to be permed through the Perm.
* This is commonly a reload cascade at block ends.
{
ir_graph *irg = get_irn_irg(perm);
ir_node *bl = get_nodes_block(perm);
- ir_node *node;
int arity = get_irn_arity(perm);
int *map;
int *proj_map;
int n_moved;
int new_size;
ir_node *frontier = bl;
- ir_node *irn;
int i, n;
+ be_lv_t *lv = be_get_irg_liveness(irg);
/* get some Proj and find out the register class of that Proj. */
- const ir_edge_t *edge = get_irn_out_edge_first_kind(perm, EDGE_KIND_NORMAL);
- ir_node *one_proj = get_edge_src_irn(edge);
- const arch_register_class_t *cls = arch_get_irn_reg_class_out(one_proj);
+ ir_node *one_proj = get_edge_src_irn(get_irn_out_edge_first_kind(perm, EDGE_KIND_NORMAL));
+ const arch_register_class_t *cls = arch_get_irn_reg_class(one_proj);
assert(is_Proj(one_proj));
DB((dbg_permmove, LEVEL_1, "perm move %+F irg %+F\n", perm, irg));
* the former dead operand would be live now at the point of
* the Perm, increasing the register pressure by one.
*/
- sched_foreach_reverse_from(sched_prev(perm), irn) {
- for (i = get_irn_arity(irn) - 1; i >= 0; --i) {
- ir_node *op = get_irn_n(irn, i);
- be_lv_t *lv = be_get_irg_liveness(irg);
- if (arch_irn_consider_in_reg_alloc(cls, op) &&
- !be_values_interfere(lv, op, one_proj)) {
+ sched_foreach_reverse_before(perm, irn) {
+ be_foreach_use(irn, cls, in_req_, op, op_req_,
+ if (!be_values_interfere(lv, op, one_proj)) {
frontier = irn;
goto found_front;
}
- }
+ );
}
found_front:
DB((dbg_permmove, LEVEL_2, "\tfrontier: %+F\n", frontier));
- node = sched_prev(perm);
n_moved = 0;
- while (!sched_is_begin(node)) {
+ for (;;) {
+ ir_node *const node = sched_prev(perm);
+ if (node == frontier)
+ break;
+
const arch_register_req_t *req;
int input = -1;
- ir_node *proj;
+ ir_node *proj = NULL;
/* search if node is a INPUT of Perm */
foreach_out_edge(perm, edge) {
/* it wasn't an input to the perm, we can't do anything more */
if (input < 0)
break;
- if (!sched_comes_after(frontier, node))
- break;
if (arch_irn_is(node, modify_flags))
break;
- req = arch_get_register_req_out(node);
+ req = arch_get_irn_register_req(node);
if (req->type != arch_register_req_type_normal)
break;
for (i = get_irn_arity(node) - 1; i >= 0; --i) {
arch_set_irn_register(node, arch_get_irn_register(proj));
/* reroute all users of the proj to the moved node. */
- edges_reroute(proj, node, irg);
-
- /* and kill it */
- set_Proj_pred(proj, new_Bad());
- kill_node(proj);
+ exchange(proj, node);
bitset_set(moved, input);
n_moved++;
-
- node = sched_prev(node);
}
/* well, we could not push anything through the perm */
perm_stayed = push_through_perm(irn);
if (perm_stayed)
- lower_perm_node(irn, walk_env);
+ lower_perm_node(irn, (lower_env_t*)walk_env);
}
void lower_nodes_after_ra(ir_graph *irg, int do_copy)
env.do_copy = do_copy;
/* we will need interference */
- be_liveness_assure_chk(be_get_irg_liveness(irg));
+ be_assure_live_chk(irg);
irg_walk_graph(irg, NULL, lower_nodes_after_ra_walker, &env);
}