X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fbe%2Fbelower.c;h=3cbeec92af61a3bac1a52aaf6375d69e70b2f7f4;hb=7a6efcac0fd46869f31d992d022dd909db4b4c29;hp=1e544cfe08cdf50d0a201cb343a85ea65c86bf03;hpb=c0acb5cc9a2967e31e2b2961a98831d674cea3b8;p=libfirm diff --git a/ir/be/belower.c b/ir/be/belower.c index 1e544cfe0..3cbeec92a 100644 --- a/ir/be/belower.c +++ b/ir/be/belower.c @@ -1,11 +1,28 @@ -/** - * Author: Christian Wuerdig - * Date: 2005/12/14 - * Copyright: (c) Universitaet Karlsruhe - * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE. - * CVS-Id: $Id$ +/* + * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved. + * + * This file is part of libFirm. + * + * This file may be distributed and/or modified under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation and appearing in the file LICENSE.GPL included in the + * packaging of this file. * - * Performs lowering of perm nodes and spill/reload optimization. + * Licensees holding valid libFirm Professional Edition licenses may use + * this file in accordance with the libFirm Commercial License. + * Agreement provided with the Software. + * + * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE + * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE. + */ + +/** + * @file + * @brief Performs lowering of perm nodes. Inserts copies to assure register constraints. + * @author Christian Wuerdig + * @date 14.12.2005 + * @version $Id$ */ #ifdef HAVE_CONFIG_H #include "config.h" @@ -16,25 +33,20 @@ #include "ircons.h" #include "debug.h" #include "irhooks.h" +#include "xmalloc.h" +#include "irnodeset.h" +#include "irgmod.h" +#include "iredges_t.h" +#include "irgwalk.h" -#include "bearch.h" +#include "bearch_t.h" #include "belower.h" #include "benode_t.h" #include "besched_t.h" #include "bestat.h" #include "bessaconstr.h" -#include "irnodeset.h" - -#include "irgmod.h" -#include "iredges_t.h" -#include "irgwalk.h" - -#ifdef HAVE_MALLOC_H - #include -#endif -#ifdef HAVE_ALLOCA_H - #include -#endif +#include "benodesets.h" +#include "beintlive_t.h" #undef KEEP_ALIVE_COPYKEEP_HACK @@ -86,6 +98,7 @@ typedef struct _perm_cycle_t { perm_type_t type; /**< type (CHAIN or CYCLE) */ } perm_cycle_t; +// /* Compare the two operands */ static int cmp_op_copy_assoc(const void *a, const void *b) { const op_copy_assoc_t *op1 = a; @@ -269,6 +282,7 @@ static perm_cycle_t *get_perm_cycle(perm_cycle_t *cycle, reg_pair_t *pairs, int * @param walk_env The environment */ static void lower_perm_node(ir_node *irn, void *walk_env) { + ir_graph *irg = get_irn_irg(irn); const arch_register_class_t *reg_class; const arch_env_t *arch_env; lower_env_t *env = walk_env; @@ -295,6 +309,7 @@ static void lower_perm_node(ir_node *irn, void *walk_env) { should be ok. */ sched_point = sched_prev(irn); + DBG((mod, LEVEL_1, "perm: %+F\n", irn)); DBG((mod, LEVEL_1, "sched point is %+F\n", sched_point)); assert(sched_point && "Perm is not scheduled or has no predecessor"); @@ -335,9 +350,6 @@ static void lower_perm_node(ir_node *irn, void *walk_env) { set_Proj_proj(pairs[i].out_node, get_Proj_proj(pairs[i].in_node)); } - /* remove the proj from the schedule */ - sched_remove(pairs[i].out_node); - /* reroute the edges from the proj to the argument */ exchange(pairs[i].out_node, pairs[i].in_node); //edges_reroute(pairs[i].out_node, pairs[i].in_node, env->birg->irg); @@ -436,7 +448,7 @@ static void lower_perm_node(ir_node *irn, void *walk_env) { DBG((mod, LEVEL_1, "%+F (%+F, %s) and (%+F, %s)\n", irn, res1, cycle->elems[i]->name, res2, cycle->elems[i + 1]->name)); - cpyxchg = be_new_Perm(reg_class, env->birg->irg, block, 2, in); + cpyxchg = be_new_Perm(reg_class, irg, block, 2, in); n_ops++; if (i > 0) { @@ -444,25 +456,17 @@ static void lower_perm_node(ir_node *irn, void *walk_env) { int pidx = get_pairidx_for_regidx(pairs, n, cycle->elems[i]->index, 0); /* create intermediate proj */ - res1 = new_r_Proj(get_irn_irg(irn), block, cpyxchg, get_irn_mode(res1), 0); + res1 = new_r_Proj(irg, block, cpyxchg, get_irn_mode(res1), 0); /* set as in for next Perm */ pairs[pidx].in_node = res1; } - else { - sched_remove(res1); - } - - sched_remove(res2); set_Proj_pred(res2, cpyxchg); set_Proj_proj(res2, 0); set_Proj_pred(res1, cpyxchg); set_Proj_proj(res1, 1); - sched_add_after(sched_point, res1); - sched_add_after(sched_point, res2); - arch_set_irn_register(arch_env, res2, cycle->elems[i + 1]); arch_set_irn_register(arch_env, res1, cycle->elems[i]); @@ -478,13 +482,10 @@ static void lower_perm_node(ir_node *irn, void *walk_env) { DBG((mod, LEVEL_1, "%+F creating copy node (%+F, %s) -> (%+F, %s)\n", irn, arg1, cycle->elems[i]->name, res2, cycle->elems[i + 1]->name)); - cpyxchg = be_new_Copy(reg_class, env->birg->irg, block, arg1); + cpyxchg = be_new_Copy(reg_class, irg, block, arg1); arch_set_irn_register(arch_env, cpyxchg, cycle->elems[i + 1]); n_ops++; - /* remove the proj from the schedule */ - sched_remove(res2); - /* exchange copy node and proj */ exchange(res2, cpyxchg); @@ -522,14 +523,14 @@ static int get_n_out_edges(const ir_node *irn) { return cnt; } -static ir_node *belower_skip_proj(ir_node *irn) { +static INLINE ir_node *belower_skip_proj(ir_node *irn) { while(is_Proj(irn)) irn = get_Proj_pred(irn); return irn; } static ir_node *find_copy(constraint_env_t *env, ir_node *irn, ir_node *op) { - const arch_env_t *arch_env = env->birg->main_env->arch_env; + const arch_env_t *arch_env = be_get_birg_arch_env(env->birg); ir_node *block = get_nodes_block(irn); ir_node *cur_node; @@ -546,8 +547,9 @@ static ir_node *find_copy(constraint_env_t *env, ir_node *irn, ir_node *op) { static void gen_assure_different_pattern(ir_node *irn, ir_node *other_different, constraint_env_t *env) { be_irg_t *birg = env->birg; + ir_graph *irg = be_get_birg_irg(birg); pset *op_set = env->op_set; - const arch_env_t *arch_env = birg->main_env->arch_env; + const arch_env_t *arch_env = be_get_birg_arch_env(birg); ir_node *block = get_nodes_block(irn); const arch_register_class_t *cls = arch_get_irn_reg_class(arch_env, other_different, -1); ir_node *in[2], *keep, *cpy; @@ -567,7 +569,7 @@ static void gen_assure_different_pattern(ir_node *irn, ir_node *other_different, /* check if already exists such a copy in the schedule immediatly before */ cpy = find_copy(env, belower_skip_proj(irn), other_different); if (! cpy) { - cpy = be_new_Copy(cls, birg->irg, block, other_different); + cpy = be_new_Copy(cls, irg, block, other_different); be_node_set_flags(cpy, BE_OUT_POS(0), arch_irn_flags_dont_spill); DBG((mod, LEVEL_1, "created non-spillable %+F for value %+F\n", cpy, other_different)); } @@ -581,10 +583,10 @@ static void gen_assure_different_pattern(ir_node *irn, ir_node *other_different, /* Add the Keep resp. CopyKeep and reroute the users */ /* of the other_different irn in case of CopyKeep. */ if (get_n_out_edges(other_different) == 0) { - keep = be_new_Keep(cls, birg->irg, block, 2, in); + keep = be_new_Keep(cls, irg, block, 2, in); } else { - keep = be_new_CopyKeep_single(cls, birg->irg, block, cpy, irn, get_irn_mode(other_different)); + keep = be_new_CopyKeep_single(cls, irg, block, cpy, irn, get_irn_mode(other_different)); be_node_set_reg_class(keep, 1, cls); } @@ -624,11 +626,12 @@ static void gen_assure_different_pattern(ir_node *irn, ir_node *other_different, */ static void assure_different_constraints(ir_node *irn, constraint_env_t *env) { const arch_register_req_t *req; + const arch_env_t *arch_env = be_get_birg_arch_env(env->birg); - req = arch_get_register_req(env->birg->main_env->arch_env, irn, -1); + req = arch_get_register_req(arch_env, irn, -1); if (arch_register_req_is(req, should_be_different)) { - ir_node *different_from = get_irn_n(irn, req->other_different); + ir_node *different_from = get_irn_n(belower_skip_proj(irn), req->other_different); gen_assure_different_pattern(irn, different_from, env); } else if (arch_register_req_is(req, should_be_different_from_all)) { int i, n = get_irn_arity(belower_skip_proj(irn)); @@ -661,6 +664,8 @@ static void assure_constraints_walker(ir_node *irn, void *walk_env) { * (or Projs of the same node), copying the same operand. */ static void melt_copykeeps(constraint_env_t *cenv) { + be_irg_t *birg = cenv->birg; + ir_graph *irg = be_get_birg_irg(birg); op_copy_assoc_t *entry; /* for all */ @@ -743,10 +748,10 @@ static void melt_copykeeps(constraint_env_t *cenv) { } #ifdef KEEP_ALIVE_COPYKEEP_HACK - new_ck = be_new_CopyKeep(entry->cls, cenv->birg->irg, get_nodes_block(ref), be_get_CopyKeep_op(ref), n_melt, new_ck_in, mode_ANY); + new_ck = be_new_CopyKeep(entry->cls, irg, get_nodes_block(ref), be_get_CopyKeep_op(ref), n_melt, new_ck_in, mode_ANY); keep_alive(new_ck); #else - new_ck = be_new_CopyKeep(entry->cls, cenv->birg->irg, get_nodes_block(ref), be_get_CopyKeep_op(ref), n_melt, new_ck_in, get_irn_mode(ref)); + new_ck = be_new_CopyKeep(entry->cls, irg, get_nodes_block(ref), be_get_CopyKeep_op(ref), n_melt, new_ck_in, get_irn_mode(ref)); #endif /* KEEP_ALIVE_COPYKEEP_HACK */ /* set register class for all keeped inputs */ @@ -783,6 +788,8 @@ static void melt_copykeeps(constraint_env_t *cenv) { * @param birg The birg structure containing the irg */ void assure_constraints(be_irg_t *birg) { + ir_graph *irg = be_get_birg_irg(birg); + const arch_env_t *arch_env = be_get_birg_arch_env(birg); constraint_env_t cenv; op_copy_assoc_t *entry; ir_node **nodes; @@ -795,7 +802,7 @@ void assure_constraints(be_irg_t *birg) { cenv.op_set = new_pset(cmp_op_copy_assoc, 16); obstack_init(&cenv.obst); - irg_walk_blkwise_graph(birg->irg, NULL, assure_constraints_walker, &cenv); + irg_walk_blkwise_graph(irg, NULL, assure_constraints_walker, &cenv); /* melt copykeeps, pointing to projs of */ /* the same mode_T node and keeping the */ @@ -838,8 +845,8 @@ void assure_constraints(be_irg_t *birg) { ir_node *keep; int n = get_irn_arity(cp); - keep = be_new_Keep(arch_get_irn_reg_class(birg->main_env->arch_env, cp, -1), - birg->irg, get_nodes_block(cp), n, (ir_node **)&get_irn_in(cp)[1]); + keep = be_new_Keep(arch_get_irn_reg_class(arch_env, cp, -1), + irg, get_nodes_block(cp), n, (ir_node **)&get_irn_in(cp)[1]); sched_add_before(cp, keep); /* Set all ins (including the block) of the CopyKeep BAD to keep the verifier happy. */ @@ -853,10 +860,143 @@ void assure_constraints(be_irg_t *birg) { del_pset(cenv.op_set); obstack_free(&cenv.obst, NULL); - be_invalidate_liveness(birg); + be_liveness_invalidate(be_get_birg_liveness(birg)); } +/** + * Push nodes that do not need to be permed through the Perm. + * This is commonly a reload cascade at block ends. + * @note This routine needs interference. + * @note Probably, we can implement it a little more efficient. + * Especially searching the frontier lazily might be better. + * @param perm The perm. + * @param data The walker data (lower_env_t). + * @return 1, if there is something left to perm over. + * 0, if removed the complete perm. + */ +static int push_through_perm(ir_node *perm, void *data) +{ + lower_env_t *env = data; + const arch_env_t *aenv = env->arch_env; + + ir_graph *irg = get_irn_irg(perm); + ir_node *bl = get_nodes_block(perm); + int n = get_irn_arity(perm); + int *map = alloca(n * sizeof(map[0])); + ir_node **projs = alloca(n * sizeof(projs[0])); + bitset_t *keep = bitset_alloca(n); + ir_node *frontier = sched_first(bl); + FIRM_DBG_REGISTER(firm_dbg_module_t *mod, "firm.be.lower.permmove"); + + int i, new_size, n_keep; + const ir_edge_t *edge; + ir_node *last_proj, *irn; + const arch_register_class_t *cls; + + DBG((mod, LEVEL_1, "perm move %+F irg %+F\n", perm, irg)); + + /* get some proj and find out the register class of the proj. */ + foreach_out_edge (perm, edge) { + last_proj = get_edge_src_irn(edge); + cls = arch_get_irn_reg_class(aenv, last_proj, -1); + assert(is_Proj(last_proj)); + break; + } + + /* find the point in the schedule after which the + * potentially movable nodes must be defined. + * A perm will only be pushed up to first instruction + * which lets an operand of itself die. */ + + sched_foreach_reverse_from (sched_prev(perm), irn) { + for(i = get_irn_arity(irn) - 1; i >= 0; --i) { + ir_node *op = get_irn_n(irn, i); + if(arch_irn_consider_in_reg_alloc(aenv, cls, op) + && !values_interfere(env->birg, op, last_proj)) { + frontier = sched_next(irn); + goto found_front; + } + } + } +found_front: + + DBG((mod, LEVEL_2, "\tfrontier: %+F\n", frontier)); + + foreach_out_edge (perm, edge) { + ir_node *proj = get_edge_src_irn(edge); + int nr = get_Proj_proj(proj); + ir_node *op = get_irn_n(perm, nr); + + assert(nr < n); + + /* we will need the last Proj as an insertion point + * for the instruction(s) pushed through the Perm */ + if (sched_comes_after(last_proj, proj)) + last_proj = proj; + + projs[nr] = proj; + + bitset_set(keep, nr); + if (!is_Proj(op) && get_nodes_block(op) == bl + && (op == frontier || sched_comes_after(frontier, op))) { + for (i = get_irn_arity(op) - 1; i >= 0; --i) { + ir_node *opop = get_irn_n(op, i); + if (!arch_irn_consider_in_reg_alloc(aenv, cls, opop)) { + bitset_clear(keep, nr); + break; + } + } + } + } + + n_keep = bitset_popcnt(keep); + + /* well, we could not push enything through the perm */ + if (n_keep == n) + return 1; + + assert(is_Proj(last_proj)); + + DBG((mod, LEVEL_2, "\tkeep: %d, total: %d, mask: %b\n", n_keep, n, keep)); + last_proj = sched_next(last_proj); + for (new_size = 0, i = 0; i < n; ++i) { + ir_node *proj = projs[i]; + + if (bitset_is_set(keep, i)) { + map[i] = new_size++; + set_Proj_proj(proj, map[i]); + DBG((mod, LEVEL_1, "\targ %d remap to %d\n", i, map[i])); + } + + else { + ir_node *move = get_irn_n(perm, i); + + DBG((mod, LEVEL_2, "\tmoving %+F before %+F, killing %+F\n", move, last_proj, proj)); + + /* move the movable node in front of the Perm */ + sched_remove(move); + sched_add_before(last_proj, move); + + /* give it the proj's register */ + arch_set_irn_register(aenv, move, arch_get_irn_register(aenv, proj)); + + /* reroute all users of the proj to the moved node. */ + edges_reroute(proj, move, irg); + + /* and like it to bad so it is no more in the use array of the perm */ + set_Proj_pred(proj, get_irg_bad(irg)); + + map[i] = -1; + } + + } + + if (n_keep > 0) + be_Perm_reduce(perm, new_size, map); + + return n_keep > 0; +} /** * Calls the corresponding lowering function for the node. @@ -865,13 +1005,18 @@ void assure_constraints(be_irg_t *birg) { * @param walk_env The walker environment */ static void lower_nodes_after_ra_walker(ir_node *irn, void *walk_env) { - if (! is_Block(irn) && ! is_Proj(irn)) { - if (be_is_Perm(irn)) { - lower_perm_node(irn, walk_env); - } - } + int perm_stayed; - return; + if (is_Block(irn) || is_Proj(irn)) + return; + if (!be_is_Perm(irn)) + return; + + perm_stayed = push_through_perm(irn, walk_env); + if (!perm_stayed) + return; + + lower_perm_node(irn, walk_env); } /** @@ -883,11 +1028,15 @@ static void lower_nodes_after_ra_walker(ir_node *irn, void *walk_env) { */ void lower_nodes_after_ra(be_irg_t *birg, int do_copy) { lower_env_t env; + ir_graph *irg = be_get_birg_irg(birg); env.birg = birg; - env.arch_env = birg->main_env->arch_env; + env.arch_env = be_get_birg_arch_env(birg); env.do_copy = do_copy; FIRM_DBG_REGISTER(env.dbg_module, "firm.be.lower"); - irg_walk_blkwise_graph(birg->irg, NULL, lower_nodes_after_ra_walker, &env); + /* we will need interference */ + be_liveness_assure_chk(be_get_birg_liveness(birg)); + + irg_walk_blkwise_graph(irg, NULL, lower_nodes_after_ra_walker, &env); }