X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fbe%2Fbecopyheur.c;h=be203051dcd66c03e0704aa840bc513f53e25a09;hb=34e3b8d50bce639e760da7233524a4db85c80290;hp=47c9495831ddae44eb0d586638a46cbcdcfbbd42;hpb=6fdee05f3b291a108be1dc76209cbebed5d2d06e;p=libfirm diff --git a/ir/be/becopyheur.c b/ir/be/becopyheur.c index 47c949583..be203051d 100644 --- a/ir/be/becopyheur.c +++ b/ir/be/becopyheur.c @@ -1,20 +1,6 @@ /* - * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved. - * * This file is part of libFirm. - * - * This file may be distributed and/or modified under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation and appearing in the file LICENSE.GPL included in the - * packaging of this file. - * - * Licensees holding valid libFirm Professional Edition licenses may use - * this file in accordance with the libFirm Commercial License. - * Agreement provided with the Software. - * - * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE - * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE. + * Copyright (C) 2012 University of Karlsruhe. */ /** @@ -22,7 +8,6 @@ * @brief First simple copy minimization heuristics. * @author Daniel Grund * @date 12.04.2005 - * @version $Id$ * * Heuristic for minimizing copies using a queue which holds 'qnodes' not yet * examined. A qnode has a 'target color', nodes out of the opt unit and @@ -32,9 +17,7 @@ * and the qnode is reinserted in the queue. The first qnode colored without * conflicts is the best one. */ -#ifdef HAVE_CONFIG_H #include "config.h" -#endif #include "debug.h" #include "bitset.h" @@ -44,10 +27,14 @@ #include "becopyopt_t.h" #include "becopystat.h" #include "beintlive_t.h" -#include "beirg_t.h" +#include "beirg.h" +#include "bemodule.h" DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;) +/** Defines an invalid register index. */ +#define NO_COLOR (-1) + #define SEARCH_FREE_COLORS #define SLOTS_PINNED_GLOBAL 64 @@ -60,7 +47,7 @@ DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;) /** * Modeling additional conflicts between nodes. NOT live range interference */ -typedef struct _conflict_t { +typedef struct conflict_t { const ir_node *n1, *n2; } conflict_t; @@ -68,18 +55,17 @@ typedef struct _conflict_t { * If an irn is changed, the changes first get stored in a node_stat_t, * to allow undo of changes (=drop new data) in case of conflicts. */ -typedef struct _node_stat_t { +typedef struct node_stat_t { ir_node *irn; - int new_color; - int pinned_local :1; + int new_color; + unsigned pinned_local :1; } node_stat_t; /** * Represents a node in the optimization queue. */ -typedef struct _qnode_t { +typedef struct qnode_t { struct list_head queue; /**< chaining of unit_t->queue */ - const unit_t *ou; /**< the opt unit this node belongs to */ int color; /**< target color */ set *conflicts; /**< contains conflict_t's. All internal conflicts */ int mis_costs; /**< costs of nodes/copies in the mis. */ @@ -88,19 +74,12 @@ typedef struct _qnode_t { set *changed_nodes; /**< contains node_stat_t's. */ } qnode_t; -static pset *pinned_global; /**< optimized nodes should not be altered any more */ +static pset *pinned_global; /**< optimized nodes should not be altered any more */ -static INLINE int nodes_interfere(const be_chordal_env_t *env, const ir_node *a, const ir_node *b) +static int set_cmp_conflict_t(const void *x, const void *y, size_t size) { - if (env->ifg) - return be_ifg_connected(env->ifg, a, b); - else - return values_interfere(env->birg, a, b); -} - -static int set_cmp_conflict_t(const void *x, const void *y, size_t size) { - const conflict_t *xx = x; - const conflict_t *yy = y; + const conflict_t *xx = (const conflict_t*)x; + const conflict_t *yy = (const conflict_t*)y; (void) size; return xx->n1 != yy->n1 || xx->n2 != yy->n2; @@ -110,7 +89,8 @@ static int set_cmp_conflict_t(const void *x, const void *y, size_t size) { * If a local pinned conflict occurs, a new edge in the conflict graph is added. * The next maximum independent set build, will regard it. */ -static INLINE void qnode_add_conflict(const qnode_t *qn, const ir_node *n1, const ir_node *n2) { +static inline void qnode_add_conflict(const qnode_t *qn, const ir_node *n1, const ir_node *n2) +{ conflict_t c; DBG((dbg, LEVEL_4, "\t %+F -- %+F\n", n1, n2)); @@ -121,17 +101,21 @@ static INLINE void qnode_add_conflict(const qnode_t *qn, const ir_node *n1, cons c.n1 = n2; c.n2 = n1; } - set_insert(qn->conflicts, &c, sizeof(c), HASH_CONFLICT(c)); + (void)set_insert(conflict_t, qn->conflicts, &c, sizeof(c), HASH_CONFLICT(c)); } /** * Checks if two nodes are in a conflict. */ -static INLINE int qnode_are_conflicting(const qnode_t *qn, const ir_node *n1, const ir_node *n2) { +static inline int qnode_are_conflicting(const qnode_t *qn, const ir_node *n1, const ir_node *n2) +{ conflict_t c; /* search for live range interference */ - if (n1!=n2 && nodes_interfere(qn->ou->co->cenv, n1, n2)) - return 1; + if (n1 != n2) { + be_lv_t *const lv = be_get_irg_liveness(get_irn_irg(n1)); + if (be_values_interfere(lv, n1, n2)) + return 1; + } /* search for recoloring conflicts */ if (get_irn_idx(n1) < get_irn_idx(n2)) { c.n1 = n1; @@ -140,10 +124,11 @@ static INLINE int qnode_are_conflicting(const qnode_t *qn, const ir_node *n1, co c.n1 = n2; c.n2 = n1; } - return set_find(qn->conflicts, &c, sizeof(c), HASH_CONFLICT(c)) != 0; + return set_find(conflict_t, qn->conflicts, &c, sizeof(c), HASH_CONFLICT(c)) != 0; } -static int set_cmp_node_stat_t(const void *x, const void *y, size_t size) { +static int set_cmp_node_stat_t(const void *x, const void *y, size_t size) +{ (void) size; return ((const node_stat_t*)x)->irn != ((const node_stat_t*)y)->irn; } @@ -151,39 +136,43 @@ static int set_cmp_node_stat_t(const void *x, const void *y, size_t size) { /** * Finds a node status entry of a node if existent. Otherwise return NULL */ -static INLINE const node_stat_t *qnode_find_node(const qnode_t *qn, ir_node *irn) { +static inline const node_stat_t *qnode_find_node(const qnode_t *qn, ir_node *irn) +{ node_stat_t find; find.irn = irn; - return set_find(qn->changed_nodes, &find, sizeof(find), hash_irn(irn)); + return set_find(node_stat_t, qn->changed_nodes, &find, sizeof(find), hash_irn(irn)); } /** * Finds a node status entry of a node if existent. Otherwise it will return * an initialized new entry for this node. */ -static INLINE node_stat_t *qnode_find_or_insert_node(const qnode_t *qn, ir_node *irn) { +static inline node_stat_t *qnode_find_or_insert_node(const qnode_t *qn, ir_node *irn) +{ node_stat_t find; find.irn = irn; find.new_color = NO_COLOR; find.pinned_local = 0; - return set_insert(qn->changed_nodes, &find, sizeof(find), hash_irn(irn)); + return set_insert(node_stat_t, qn->changed_nodes, &find, sizeof(find), hash_irn(irn)); } /** * Returns the virtual color of a node if set before, else returns the real color. */ -static INLINE int qnode_get_new_color(const qnode_t *qn, ir_node *irn) { +static inline int qnode_get_new_color(const qnode_t *qn, ir_node *irn) +{ const node_stat_t *found = qnode_find_node(qn, irn); if (found) return found->new_color; else - return get_irn_col(qn->ou->co, irn); + return get_irn_col(irn); } /** * Sets the virtual color of a node. */ -static INLINE void qnode_set_new_color(const qnode_t *qn, ir_node *irn, int color) { +static inline void qnode_set_new_color(const qnode_t *qn, ir_node *irn, int color) +{ node_stat_t *found = qnode_find_or_insert_node(qn, irn); found->new_color = color; DBG((dbg, LEVEL_3, "\t col(%+F) := %d\n", irn, color)); @@ -194,7 +183,8 @@ static INLINE void qnode_set_new_color(const qnode_t *qn, ir_node *irn, int colo * to the same optimization unit and has been optimized before the current * processed node. */ -static INLINE int qnode_is_pinned_local(const qnode_t *qn, ir_node *irn) { +static inline int qnode_is_pinned_local(const qnode_t *qn, ir_node *irn) +{ const node_stat_t *found = qnode_find_node(qn, irn); if (found) return found->pinned_local; @@ -206,11 +196,12 @@ static INLINE int qnode_is_pinned_local(const qnode_t *qn, ir_node *irn) { * Local-pins a node, so optimizations of further nodes of the same opt unit * can handle situations in which a color change would undo prior optimizations. */ -static INLINE void qnode_pin_local(const qnode_t *qn, ir_node *irn) { +static inline void qnode_pin_local(const qnode_t *qn, ir_node *irn) +{ node_stat_t *found = qnode_find_or_insert_node(qn, irn); found->pinned_local = 1; if (found->new_color == NO_COLOR) - found->new_color = get_irn_col(qn->ou->co, irn); + found->new_color = get_irn_col(irn); } @@ -238,15 +229,10 @@ static INLINE void qnode_pin_local(const qnode_t *qn, ir_node *irn) { * Else the first conflicting ir_node encountered is returned. * */ -static ir_node *qnode_color_irn(const qnode_t *qn, ir_node *irn, int col, const ir_node *trigger) { - copy_opt_t *co = qn->ou->co; - const be_chordal_env_t *chordal_env = co->cenv; - const arch_register_class_t *cls = co->cls; +static ir_node *qnode_color_irn(qnode_t const *const qn, ir_node *const irn, int const col, ir_node const *const trigger, bitset_t const *const allocatable_regs, be_ifg_t *const ifg) +{ int irn_col = qnode_get_new_color(qn, irn); - ir_node *sub_res, *curr; - be_ifg_t *ifg = chordal_env->ifg; - void *iter = be_ifg_neighbours_iter_alloca(ifg); - + neighbours_iter_t iter; DBG((dbg, LEVEL_3, "\t %+F \tcaused col(%+F) \t%2d --> %2d\n", trigger, irn, irn_col, col)); @@ -262,33 +248,28 @@ static ir_node *qnode_color_irn(const qnode_t *qn, ir_node *irn, int col, const return irn; } + arch_register_req_t const *const req = arch_get_irn_register_req(irn); + arch_register_class_t const *const cls = req->cls; #ifdef SEARCH_FREE_COLORS /* If we resolve conflicts (recursive calls) we can use any unused color. * In case of the first call @p col must be used. */ if (irn != trigger) { bitset_t *free_cols = bitset_alloca(cls->n_regs); - const arch_register_req_t *req; - ir_node *curr; int free_col; /* Get all possible colors */ - bitset_copy(free_cols, co->cenv->ignore_colors); - bitset_flip_all(free_cols); + bitset_copy(free_cols, allocatable_regs); /* Exclude colors not assignable to the irn */ - req = arch_get_register_req(irn, -1); - if (arch_register_req_is(req, limited)) { - bitset_t *limited = bitset_alloca(cls->n_regs); - rbitset_copy_to_bitset(req->limited, limited); - bitset_and(free_cols, limited); - } + if (arch_register_req_is(req, limited)) + rbitset_and(free_cols->data, req->limited, free_cols->size); /* Exclude the color of the irn, because it must _change_ its color */ bitset_clear(free_cols, irn_col); /* Exclude all colors used by adjacent nodes */ - be_ifg_foreach_neighbour(ifg, iter, irn, curr) + be_ifg_foreach_neighbour(ifg, &iter, irn, curr) bitset_clear(free_cols, qnode_get_new_color(qn, curr)); free_col = bitset_next_set(free_cols, 0); @@ -301,7 +282,7 @@ static ir_node *qnode_color_irn(const qnode_t *qn, ir_node *irn, int col, const #endif /* SEARCH_FREE_COLORS */ /* If target color is not allocatable changing color is impossible */ - if (!arch_reg_is_allocatable(co->aenv, irn, -1, arch_register_for_index(cls, col))) { + if (!arch_reg_is_allocatable(req, arch_register_for_index(cls, col))) { DBG((dbg, LEVEL_3, "\t %+F impossible\n", irn)); return CHANGE_IMPOSSIBLE; } @@ -310,12 +291,12 @@ static ir_node *qnode_color_irn(const qnode_t *qn, ir_node *irn, int col, const * If we arrive here changing color may be possible, but there may be conflicts. * Try to color all conflicting nodes 'curr' with the color of the irn itself. */ - be_ifg_foreach_neighbour(ifg, iter, irn, curr) { + be_ifg_foreach_neighbour(ifg, &iter, irn, curr) { DBG((dbg, LEVEL_3, "\t Confl %+F(%d)\n", curr, qnode_get_new_color(qn, curr))); if (qnode_get_new_color(qn, curr) == col && curr != trigger) { - sub_res = qnode_color_irn(qn, curr, irn_col, irn); + ir_node *const sub_res = qnode_color_irn(qn, curr, irn_col, irn, allocatable_regs, ifg); if (sub_res != CHANGE_SAVE) { - be_ifg_neighbours_break(ifg, iter); + be_ifg_neighbours_break(&iter); return sub_res; } } @@ -336,14 +317,15 @@ static ir_node *qnode_color_irn(const qnode_t *qn, ir_node *irn, int col, const * @returns 1 iff all members colors could be set * 0 else */ -static int qnode_try_color(const qnode_t *qn) { +static int qnode_try_color(qnode_t const *const qn, bitset_t const *const allocatable_regs, be_ifg_t *const ifg) +{ int i; for (i=0; imis_size; ++i) { ir_node *test_node, *confl_node; test_node = qn->mis[i]; DBG((dbg, LEVEL_3, "\t Testing %+F\n", test_node)); - confl_node = qnode_color_irn(qn, test_node, qn->color, test_node); + confl_node = qnode_color_irn(qn, test_node, qn->color, test_node, allocatable_regs, ifg); if (confl_node == CHANGE_SAVE) { DBG((dbg, LEVEL_3, "\t Save --> pin local\n")); @@ -355,7 +337,7 @@ static int qnode_try_color(const qnode_t *qn) { } else { if (qnode_is_pinned_local(qn, confl_node)) { /* changing test_node would change back a node of current ou */ - if (confl_node == qn->ou->nodes[0]) { + if (confl_node == qn->mis[0]) { /* Adding a conflict edge between testnode and conflnode * would introduce a root -- arg interference. * So remove the arg of the qn */ @@ -381,26 +363,26 @@ static int qnode_try_color(const qnode_t *qn) { * Determines a maximum weighted independent set with respect to * the interference and conflict edges of all nodes in a qnode. */ -static INLINE void qnode_max_ind_set(qnode_t *qn, const unit_t *ou) { +static inline void qnode_max_ind_set(qnode_t *qn, const unit_t *ou) +{ ir_node **safe, **unsafe; int i, o, safe_count, safe_costs, unsafe_count, *unsafe_costs; bitset_t *curr, *best; - bitset_pos_t pos; - int max, next, curr_weight, best_weight = 0; + int next, curr_weight, best_weight = 0; /* assign the nodes into two groups. * safe: node has no interference, hence it is in every max stable set. * unsafe: node has an interference */ - safe = alloca((ou->node_count-1) * sizeof(*safe)); - safe_costs = 0; - safe_count = 0; - unsafe = alloca((ou->node_count-1) * sizeof(*unsafe)); - unsafe_costs = alloca((ou->node_count-1) * sizeof(*unsafe_costs)); + safe = ALLOCAN(ir_node*, ou->node_count - 1); + safe_costs = 0; + safe_count = 0; + unsafe = ALLOCAN(ir_node*, ou->node_count - 1); + unsafe_costs = ALLOCAN(int, ou->node_count - 1); unsafe_count = 0; - for(i=1; inode_count; ++i) { + for (i=1; inode_count; ++i) { int is_safe = 1; - for(o=1; onode_count; ++o) { + for (o=1; onode_count; ++o) { if (qnode_are_conflicting(qn, ou->nodes[i], ou->nodes[o])) { if (i!=o) { unsafe_costs[unsafe_count] = ou->costs[i]; @@ -440,7 +422,7 @@ static INLINE void qnode_max_ind_set(qnode_t *qn, const unit_t *ou) { /* Exact Algorithm: Brute force */ curr = bitset_alloca(unsafe_count); bitset_set_all(curr); - while ((max = bitset_popcnt(curr)) != 0) { + while (!bitset_is_empty(curr)) { /* check if curr is a stable set */ for (i=bitset_next_set(curr, 0); i!=-1; i=bitset_next_set(curr, i+1)) for (o=bitset_next_set(curr, i); o!=-1; o=bitset_next_set(curr, o+1)) /* !!!!! difference to ou_max_ind_set_costs(): NOT (curr, i+1) */ @@ -448,7 +430,7 @@ static INLINE void qnode_max_ind_set(qnode_t *qn, const unit_t *ou) { goto no_stable_set; /* if we arrive here, we have a stable set */ - /* compute the weigth of the stable set*/ + /* compute the weight of the stable set*/ curr_weight = 0; bitset_foreach(curr, pos) curr_weight += unsafe_costs[pos]; @@ -465,7 +447,7 @@ no_stable_set: } /* transfer the best set into the qn */ - qn->mis_size = 1+safe_count+bitset_popcnt(best); + qn->mis_size = 1+safe_count+bitset_popcount(best); qn->mis_costs = safe_costs+best_weight; qn->mis[0] = ou->nodes[0]; /* the root is always in a max stable set */ next = 1; @@ -478,9 +460,9 @@ no_stable_set: /** * Creates a new qnode */ -static INLINE qnode_t *new_qnode(const unit_t *ou, int color) { +static inline qnode_t *new_qnode(const unit_t *ou, int color) +{ qnode_t *qn = XMALLOC(qnode_t); - qn->ou = ou; qn->color = color; qn->mis = XMALLOCN(ir_node*, ou->node_count); qn->conflicts = new_set(set_cmp_conflict_t, SLOTS_CONFLICTS); @@ -491,7 +473,8 @@ static INLINE qnode_t *new_qnode(const unit_t *ou, int color) { /** * Frees space used by a queue node */ -static INLINE void free_qnode(qnode_t *qn) { +static inline void free_qnode(qnode_t *qn) +{ del_set(qn->conflicts); del_set(qn->changed_nodes); xfree(qn->mis); @@ -502,7 +485,8 @@ static INLINE void free_qnode(qnode_t *qn) { * Inserts a qnode in the sorted queue of the optimization unit. Queue is * ordered by field 'size' (the size of the mis) in decreasing order. */ -static INLINE void ou_insert_qnode(unit_t *ou, qnode_t *qn) { +static inline void ou_insert_qnode(unit_t *ou, qnode_t *qn) +{ struct list_head *lh; if (qnode_are_conflicting(qn, ou->nodes[0], ou->nodes[0])) { @@ -531,32 +515,39 @@ static INLINE void ou_insert_qnode(unit_t *ou, qnode_t *qn) { * case for approximately 80% of all phi classes and 100% of register constrained * nodes. (All other phi classes are reduced to this case.) */ -static void ou_optimize(unit_t *ou) { - int i; - qnode_t *curr = NULL, *tmp; - const arch_register_class_t *cls = ou->co->cls; - bitset_pos_t idx; - bitset_t *pos_regs = bitset_alloca(cls->n_regs); - +static void ou_optimize(unit_t *ou, bitset_t const *const allocatable_regs, be_ifg_t *const ifg) +{ DBG((dbg, LEVEL_1, "\tOptimizing unit:\n")); - for (i=0; inode_count; ++i) + for (int i = 0; i < ou->node_count; ++i) DBG((dbg, LEVEL_1, "\t %+F\n", ou->nodes[i])); /* init queue */ INIT_LIST_HEAD(&ou->queue); - arch_get_allocatable_regs(ou->nodes[0], -1, pos_regs); + arch_register_req_t const *const req = arch_get_irn_register_req(ou->nodes[0]); + unsigned const n_regs = req->cls->n_regs; + if (arch_register_req_is(req, limited)) { + unsigned const* limited = req->limited; - /* exclude ignore colors */ - bitset_andnot(pos_regs, ou->co->cenv->ignore_colors); + for (unsigned idx = 0; idx != n_regs; ++idx) { + if (!bitset_is_set(allocatable_regs, idx)) + continue; + if (!rbitset_is_set(limited, idx)) + continue; - assert(bitset_popcnt(pos_regs) != 0 && "No register is allowed for this node !!?"); + ou_insert_qnode(ou, new_qnode(ou, idx)); + } + } else { + for (unsigned idx = 0; idx != n_regs; ++idx) { + if (!bitset_is_set(allocatable_regs, idx)) + continue; - /* create new qnode */ - bitset_foreach(pos_regs, idx) - ou_insert_qnode(ou, new_qnode(ou, idx)); + ou_insert_qnode(ou, new_qnode(ou, idx)); + } + } /* search best */ + qnode_t *curr; for (;;) { assert(!list_empty(&ou->queue)); /* get head of queue */ @@ -565,7 +556,7 @@ static void ou_optimize(unit_t *ou) { DBG((dbg, LEVEL_2, "\t Examine qnode color %d with cost %d\n", curr->color, curr->mis_costs)); /* try */ - if (qnode_try_color(curr)) + if (qnode_try_color(curr, allocatable_regs, ifg)) break; /* no success, so re-insert */ @@ -576,12 +567,11 @@ static void ou_optimize(unit_t *ou) { /* apply the best found qnode */ if (curr->mis_size >= 2) { - node_stat_t *ns; int root_col = qnode_get_new_color(curr, ou->nodes[0]); DBG((dbg, LEVEL_1, "\t Best color: %d Costs: %d << %d << %d\n", curr->color, ou->min_nodes_costs, ou->all_nodes_costs - curr->mis_costs, ou->all_nodes_costs)); /* globally pin root and all args which have the same color */ pset_insert_ptr(pinned_global, ou->nodes[0]); - for (i=1; inode_count; ++i) { + for (int i = 1; i < ou->node_count; ++i) { ir_node *irn = ou->nodes[i]; int nc = qnode_get_new_color(curr, irn); if (nc != NO_COLOR && nc == root_col) @@ -589,11 +579,11 @@ static void ou_optimize(unit_t *ou) { } /* set color of all changed nodes */ - for (ns = set_first(curr->changed_nodes); ns; ns = set_next(curr->changed_nodes)) { + foreach_set(curr->changed_nodes, node_stat_t, ns) { /* NO_COLOR is possible, if we had an undo */ if (ns->new_color != NO_COLOR) { DBG((dbg, LEVEL_1, "\t color(%+F) := %d\n", ns->irn, ns->new_color)); - set_irn_col(ou->co, ns->irn, ns->new_color); + set_irn_col(req->cls, ns->irn, ns->new_color); } } } @@ -604,17 +594,33 @@ static void ou_optimize(unit_t *ou) { free_qnode(curr); } -int co_solve_heuristic(copy_opt_t *co) { - unit_t *curr; - FIRM_DBG_REGISTER(dbg, "ir.be.copyoptheur"); - +/** + * Solves the problem using a heuristic approach + * Uses the OU data structure + */ +int co_solve_heuristic(copy_opt_t *co) +{ ASSERT_OU_AVAIL(co); pinned_global = pset_new_ptr(SLOTS_PINNED_GLOBAL); - list_for_each_entry(unit_t, curr, &co->units, units) + bitset_t const *const allocatable_regs = co->cenv->allocatable_regs; + be_ifg_t *const ifg = co->cenv->ifg; + list_for_each_entry(unit_t, curr, &co->units, units) { if (curr->node_count > 1) - ou_optimize(curr); + ou_optimize(curr, allocatable_regs, ifg); + } del_pset(pinned_global); return 0; } + +BE_REGISTER_MODULE_CONSTRUCTOR(be_init_copyheur) +void be_init_copyheur(void) +{ + static co_algo_info copyheur = { + co_solve_heuristic, 0 + }; + + be_register_copyopt("heur1", ©heur); + FIRM_DBG_REGISTER(dbg, "ir.be.copyoptheur"); +}