X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fbe%2Fbeuses.c;h=7da6b4ffd7d22c94d1509f0b37e146de4cff29a1;hb=bb9f2e36362333c6635b89f5258171b06c786608;hp=34091d86c94d614135e57469f4c508bbd3c50320;hpb=ed4893e63e31728555b34f4fc3f92b6dce0f693a;p=libfirm diff --git a/ir/be/beuses.c b/ir/be/beuses.c index 34091d86c..7da6b4ffd 100644 --- a/ir/be/beuses.c +++ b/ir/be/beuses.c @@ -1,16 +1,30 @@ -/** - * @file beuse.c - * @date 27.06.2005 - * @author Sebastian Hack, Matthias Braun +/* + * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved. + * + * This file is part of libFirm. + * + * This file may be distributed and/or modified under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation and appearing in the file LICENSE.GPL included in the + * packaging of this file. * - * Methods to compute when a value will be used again. + * Licensees holding valid libFirm Professional Edition licenses may use + * this file in accordance with the libFirm Commercial License. + * Agreement provided with the Software. * - * Copyright (C) 2005 Universitaet Karlsruhe - * Released under the GPL + * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE + * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE. + */ + +/** + * @file + * @brief Methods to compute when a value will be used again. + * @author Sebastian Hack, Matthias Braun + * @date 27.06.2005 + * @version $Id$ */ -#ifdef HAVE_CONFIG_H #include "config.h" -#endif #include #include @@ -33,23 +47,24 @@ #include "benode_t.h" #include "besched_t.h" #include "beirgmod.h" -#include "bearch.h" -#include "beuses_t.h" -#include "benodesets.h" +#include "bearch_t.h" +#include "beuses.h" #define SCAN_INTERBLOCK_USES typedef struct _be_use_t { const ir_node *block; const ir_node *node; + int outermost_loop; unsigned next_use; + unsigned visited; } be_use_t; struct _be_uses_t { set *uses; ir_graph *irg; - const exec_freq_t *execfreqs; const be_lv_t *lv; + unsigned visited_counter; DEBUG_ONLY(firm_dbg_module_t *dbg;) }; @@ -57,125 +72,329 @@ static int cmp_use(const void *a, const void *b, size_t n) { const be_use_t *p = a; const be_use_t *q = b; + (void) n; + return !(p->block == q->block && p->node == q->node); } -static const be_use_t *get_or_set_use_block(be_uses_t *uses, +static be_next_use_t get_next_use(be_uses_t *env, ir_node *from, + unsigned from_step, const ir_node *def, + int skip_from_uses); + +static const be_use_t *get_or_set_use_block(be_uses_t *env, const ir_node *block, const ir_node *def) { - unsigned hash = HASH_COMBINE(nodeset_hash(block), nodeset_hash(def)); + unsigned hash = HASH_COMBINE(hash_irn(block), hash_irn(def)); be_use_t temp; be_use_t* result; temp.block = block; temp.node = def; - result = set_find(uses->uses, &temp, sizeof(temp), hash); + result = set_find(env->uses, &temp, sizeof(temp), hash); if(result == NULL) { // insert templ first as we might end in a loop in the get_next_use // call otherwise temp.next_use = USES_INFINITY; - result = set_insert(uses->uses, &temp, sizeof(temp), hash); + temp.outermost_loop = -1; + temp.visited = 0; + result = set_insert(env->uses, &temp, sizeof(temp), hash); + } + + if(result->outermost_loop < 0 && result->visited < env->visited_counter) { + be_next_use_t next_use; - result->next_use = be_get_next_use(uses, sched_first(block), 0, def, 0); + result->visited = env->visited_counter; + next_use = get_next_use(env, sched_first(block), 0, def, 0); + if(next_use.outermost_loop >= 0) { + result->next_use = next_use.time; + result->outermost_loop = next_use.outermost_loop; + DBG((env->dbg, LEVEL_5, "Setting nextuse of %+F in block %+F to %u (outermostloop %d)\n", def, block, result->next_use, result->outermost_loop)); + } } return result; } -unsigned be_get_next_use(be_uses_t *uses, const ir_node *from, - unsigned from_step, const ir_node *def, - int skip_from_uses) +static int be_is_phi_argument(const ir_node *block, const ir_node *def) { - unsigned step = from_step; - ir_node *block = get_nodes_block(from); - const ir_node *node; + ir_node *node; + ir_node *succ_block = NULL; + const ir_edge_t *edge; + int arity, i; + +#if 1 + if (get_irn_n_edges_kind(block, EDGE_KIND_BLOCK) < 1) +#else + if (get_irn_n_edges_kind(block, EDGE_KIND_BLOCK) != 1) +#endif + return 0; + + foreach_block_succ(block, edge) { + succ_block = get_edge_src_irn(edge); + break; + } + + arity = get_Block_n_cfgpreds(succ_block); + if(arity <= 1) + return 0; + + for(i = 0; i < arity; ++i) { + if(get_Block_cfgpred_block(succ_block, i) == block) + break; + } + assert(i < arity); + + sched_foreach(succ_block, node) { + ir_node *arg; + + if(!is_Phi(node)) + break; + + arg = get_irn_n(node, i); + if(arg == def) + return 1; + } + + return 0; +} + +static inline +unsigned get_step(const ir_node *node) +{ + return PTR_TO_INT(get_irn_link(node)); +} + +static be_next_use_t get_next_use(be_uses_t *env, ir_node *from, + unsigned from_step, const ir_node *def, + int skip_from_uses) +{ + unsigned step = from_step; + ir_node *block = get_nodes_block(from); + ir_node *next_use; + ir_node *node; + unsigned timestep; + unsigned next_use_step; const ir_edge_t *edge; +#if 1 + assert(skip_from_uses == 0 || skip_from_uses == 1); + if(skip_from_uses) { + from = sched_next(from); + } + + next_use = NULL; + next_use_step = INT_MAX; + timestep = get_step(from); + foreach_out_edge(def, edge) { + ir_node *node = get_edge_src_irn(edge); + unsigned node_step; + + if(is_Anchor(node)) + continue; + if(get_nodes_block(node) != block) + continue; + if(is_Phi(node)) + continue; + + node_step = get_step(node); + if(node_step < timestep) + continue; + if(node_step < next_use_step) { + next_use = node; + next_use_step = node_step; + } + } + + if(next_use != NULL) { + be_next_use_t result; + result.time = next_use_step - timestep + skip_from_uses; + result.outermost_loop = get_loop_depth(get_irn_loop(block)); + result.before = next_use; + return result; + } + + node = sched_last(block); + step = get_step(node) + 1 + timestep + skip_from_uses; + +#else if(skip_from_uses) { - step++; from = sched_next(from); + ++step; } sched_foreach_from(from, node) { int i, arity; + if(is_Phi(node)) { + step++; + continue; + } + arity = get_irn_arity(node); for (i = 0; i < arity; ++i) { const ir_node *operand = get_irn_n(node, i); if (operand == def) { - DBG((uses->dbg, LEVEL_3, "found use of %+F at %+F\n", operand, node)); - return step; + be_next_use_t result; + + DBG((env->dbg, LEVEL_3, "found use of %+F at %+F\n", operand, node)); + + /** + * Spills/Reloads are a special case, they're not really a + * usage of a value, continue searching + */ + if (be_is_Spill(node) || be_is_Reload(node)) { + return be_get_next_use(env, node, step, node, 1); + } + + result.time = step; + result.outermost_loop = get_loop_depth(get_irn_loop(block)); + result.before = node; + return result; } } step++; } +#endif - if(be_is_live_end(uses->lv, block, def)) - return step; + if(be_is_phi_argument(block, def)) { + // TODO we really should continue searching the uses of the phi, + // as a phi isn't a real use that implies a reload (because we could + // easily spill the whole phi) + + be_next_use_t result; + result.time = step; + result.outermost_loop = get_loop_depth(get_irn_loop(block)); + result.before = block; + return result; + } #ifdef SCAN_INTERBLOCK_USES { - double best_execfreq = -1; - unsigned next_use = USES_INFINITY; - + unsigned next_use = USES_INFINITY; + int outermost_loop; + be_next_use_t result; + ir_loop *loop = get_irn_loop(block); + int loopdepth = get_loop_depth(loop); + int found_visited = 0; + int found_use = 0; + ir_graph *irg = get_irn_irg(block); + ir_node *startblock = get_irg_start_block(irg); + + result.before = NULL; + outermost_loop = loopdepth; foreach_block_succ(block, edge) { const be_use_t *use; const ir_node *succ_block = get_edge_src_irn(edge); - double execfreq = get_block_execfreq(uses->execfreqs, succ_block); + ir_loop *succ_loop; + unsigned use_dist; - //execfreq_sum += execfreq; + if(succ_block == startblock) + continue; - if(execfreq > best_execfreq) { - best_execfreq = execfreq; + DBG((env->dbg, LEVEL_5, "Checking succ of block %+F: %+F (for use of %+F)\n", block, succ_block, def)); + if(!be_is_live_in(env->lv, succ_block, def)) { + //next_use = USES_INFINITY; + DBG((env->dbg, LEVEL_5, " not live in\n")); + continue; + } - if(!be_is_live_in(uses->lv, succ_block, def)) { - next_use = USES_INFINITY; - continue; + use = get_or_set_use_block(env, succ_block, def); + DBG((env->dbg, LEVEL_5, "Found %u (loopdepth %d) (we're in block %+F)\n", use->next_use, + use->outermost_loop, block)); + if(USES_IS_INFINITE(use->next_use)) { + if(use->outermost_loop < 0) { + found_visited = 1; } + continue; + } - use = get_or_set_use_block(uses, succ_block, def); - //if(USES_IS_INFINITE(use->next_use)) - // continue; + found_use = 1; + use_dist = use->next_use; - next_use = use->next_use; + succ_loop = get_irn_loop(succ_block); + if(get_loop_depth(succ_loop) < loopdepth) { + unsigned factor = (loopdepth - get_loop_depth(succ_loop)) * 5000; + DBG((env->dbg, LEVEL_5, "Increase usestep because of loop out edge %d -> %d (%u)\n", factor)); + // TODO we should use the number of nodes in the loop or so... + use_dist += factor; } - //next_use += use->next_use / execfreq; + if(use_dist < next_use) { + next_use = use_dist; + outermost_loop = use->outermost_loop; + result.before = use->node; + } } - /*if(next_use == 0) - return USES_INFINITY;*/ + if(loopdepth < outermost_loop) + outermost_loop = loopdepth; - //next_use /= execfreq_sum; + result.time = next_use + step; + result.outermost_loop = outermost_loop; - return ((unsigned) next_use) + step; + if(!found_use && found_visited) { + // the current result is correct for the current search, but isn't + // generally correct, so mark it + result.outermost_loop = -1; + } + DBG((env->dbg, LEVEL_5, "Result: %d (outerloop: %d)\n", result.time, result.outermost_loop)); + return result; } #else return USES_INFINITY; #endif } -be_uses_t *be_begin_uses(ir_graph *irg, const exec_freq_t *execfreqs, const be_lv_t *lv) +be_next_use_t be_get_next_use(be_uses_t *env, ir_node *from, + unsigned from_step, const ir_node *def, + int skip_from_uses) { - be_uses_t *uses = xmalloc(sizeof(uses[0])); + env->visited_counter++; + return get_next_use(env, from, from_step, def, skip_from_uses); +} + +static +void set_sched_step_walker(ir_node *block, void *data) +{ + ir_node *node; + unsigned step = 0; + (void) data; + + sched_foreach(block, node) { + set_irn_link(node, INT_TO_PTR(step)); + if(is_Phi(node)) + continue; + ++step; + } +} + +be_uses_t *be_begin_uses(ir_graph *irg, const be_lv_t *lv) +{ + be_uses_t *env = XMALLOC(be_uses_t); edges_assure(irg); - uses->uses = new_set(cmp_use, 512); - uses->irg = irg; - uses->execfreqs = execfreqs; - uses->lv = lv; - FIRM_DBG_REGISTER(uses->dbg, "firm.be.uses"); + //set_using_irn_link(irg); + + /* precalculate sched steps */ + irg_block_walk_graph(irg, set_sched_step_walker, NULL, NULL); + + env->uses = new_set(cmp_use, 512); + env->irg = irg; + env->lv = lv; + env->visited_counter = 0; + FIRM_DBG_REGISTER(env->dbg, "firm.be.uses"); - return uses; + return env; } -void be_end_uses(be_uses_t *uses) +void be_end_uses(be_uses_t *env) { - del_set(uses->uses); - free(uses); + //clear_using_irn_link(env->irg); + del_set(env->uses); + free(env); }