X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fbe%2Fbecopyheur.c;h=47f6d18cf975781ab9f88ddc53648157bcd07bec;hb=905edd3db0f7417a4ea8da45eb0cbf001bc68a67;hp=79afaba9a9c6d968447fc9d58ab06ce8322fe32e;hpb=026ec52ac914eee06e7afb961fb754735fb4ad9f;p=libfirm diff --git a/ir/be/becopyheur.c b/ir/be/becopyheur.c index 79afaba9a..47f6d18cf 100644 --- a/ir/be/becopyheur.c +++ b/ir/be/becopyheur.c @@ -42,7 +42,8 @@ #include "becopyopt_t.h" #include "becopystat.h" #include "beintlive_t.h" -#include "beirg_t.h" +#include "beirg.h" +#include "bemodule.h" DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;) @@ -92,11 +93,14 @@ static inline int nodes_interfere(const be_chordal_env_t *env, const ir_node *a, { if (env->ifg) return be_ifg_connected(env->ifg, a, b); - else - return values_interfere(env->birg, a, b); + else { + be_lv_t *lv = be_get_irg_liveness(env->irg); + return be_values_interfere(lv, a, b); + } } -static int set_cmp_conflict_t(const void *x, const void *y, size_t size) { +static int set_cmp_conflict_t(const void *x, const void *y, size_t size) +{ const conflict_t *xx = x; const conflict_t *yy = y; (void) size; @@ -108,7 +112,8 @@ static int set_cmp_conflict_t(const void *x, const void *y, size_t size) { * If a local pinned conflict occurs, a new edge in the conflict graph is added. * The next maximum independent set build, will regard it. */ -static inline void qnode_add_conflict(const qnode_t *qn, const ir_node *n1, const ir_node *n2) { +static inline void qnode_add_conflict(const qnode_t *qn, const ir_node *n1, const ir_node *n2) +{ conflict_t c; DBG((dbg, LEVEL_4, "\t %+F -- %+F\n", n1, n2)); @@ -125,7 +130,8 @@ static inline void qnode_add_conflict(const qnode_t *qn, const ir_node *n1, cons /** * Checks if two nodes are in a conflict. */ -static inline int qnode_are_conflicting(const qnode_t *qn, const ir_node *n1, const ir_node *n2) { +static inline int qnode_are_conflicting(const qnode_t *qn, const ir_node *n1, const ir_node *n2) +{ conflict_t c; /* search for live range interference */ if (n1!=n2 && nodes_interfere(qn->ou->co->cenv, n1, n2)) @@ -141,7 +147,8 @@ static inline int qnode_are_conflicting(const qnode_t *qn, const ir_node *n1, co return set_find(qn->conflicts, &c, sizeof(c), HASH_CONFLICT(c)) != 0; } -static int set_cmp_node_stat_t(const void *x, const void *y, size_t size) { +static int set_cmp_node_stat_t(const void *x, const void *y, size_t size) +{ (void) size; return ((const node_stat_t*)x)->irn != ((const node_stat_t*)y)->irn; } @@ -149,7 +156,8 @@ static int set_cmp_node_stat_t(const void *x, const void *y, size_t size) { /** * Finds a node status entry of a node if existent. Otherwise return NULL */ -static inline const node_stat_t *qnode_find_node(const qnode_t *qn, ir_node *irn) { +static inline const node_stat_t *qnode_find_node(const qnode_t *qn, ir_node *irn) +{ node_stat_t find; find.irn = irn; return set_find(qn->changed_nodes, &find, sizeof(find), hash_irn(irn)); @@ -159,7 +167,8 @@ static inline const node_stat_t *qnode_find_node(const qnode_t *qn, ir_node *irn * Finds a node status entry of a node if existent. Otherwise it will return * an initialized new entry for this node. */ -static inline node_stat_t *qnode_find_or_insert_node(const qnode_t *qn, ir_node *irn) { +static inline node_stat_t *qnode_find_or_insert_node(const qnode_t *qn, ir_node *irn) +{ node_stat_t find; find.irn = irn; find.new_color = NO_COLOR; @@ -170,7 +179,8 @@ static inline node_stat_t *qnode_find_or_insert_node(const qnode_t *qn, ir_node /** * Returns the virtual color of a node if set before, else returns the real color. */ -static inline int qnode_get_new_color(const qnode_t *qn, ir_node *irn) { +static inline int qnode_get_new_color(const qnode_t *qn, ir_node *irn) +{ const node_stat_t *found = qnode_find_node(qn, irn); if (found) return found->new_color; @@ -181,7 +191,8 @@ static inline int qnode_get_new_color(const qnode_t *qn, ir_node *irn) { /** * Sets the virtual color of a node. */ -static inline void qnode_set_new_color(const qnode_t *qn, ir_node *irn, int color) { +static inline void qnode_set_new_color(const qnode_t *qn, ir_node *irn, int color) +{ node_stat_t *found = qnode_find_or_insert_node(qn, irn); found->new_color = color; DBG((dbg, LEVEL_3, "\t col(%+F) := %d\n", irn, color)); @@ -192,7 +203,8 @@ static inline void qnode_set_new_color(const qnode_t *qn, ir_node *irn, int colo * to the same optimization unit and has been optimized before the current * processed node. */ -static inline int qnode_is_pinned_local(const qnode_t *qn, ir_node *irn) { +static inline int qnode_is_pinned_local(const qnode_t *qn, ir_node *irn) +{ const node_stat_t *found = qnode_find_node(qn, irn); if (found) return found->pinned_local; @@ -204,7 +216,8 @@ static inline int qnode_is_pinned_local(const qnode_t *qn, ir_node *irn) { * Local-pins a node, so optimizations of further nodes of the same opt unit * can handle situations in which a color change would undo prior optimizations. */ -static inline void qnode_pin_local(const qnode_t *qn, ir_node *irn) { +static inline void qnode_pin_local(const qnode_t *qn, ir_node *irn) +{ node_stat_t *found = qnode_find_or_insert_node(qn, irn); found->pinned_local = 1; if (found->new_color == NO_COLOR) @@ -236,14 +249,15 @@ static inline void qnode_pin_local(const qnode_t *qn, ir_node *irn) { * Else the first conflicting ir_node encountered is returned. * */ -static ir_node *qnode_color_irn(const qnode_t *qn, ir_node *irn, int col, const ir_node *trigger) { +static ir_node *qnode_color_irn(const qnode_t *qn, ir_node *irn, int col, const ir_node *trigger) +{ copy_opt_t *co = qn->ou->co; const be_chordal_env_t *chordal_env = co->cenv; const arch_register_class_t *cls = co->cls; int irn_col = qnode_get_new_color(qn, irn); ir_node *sub_res, *curr; be_ifg_t *ifg = chordal_env->ifg; - void *iter = be_ifg_neighbours_iter_alloca(ifg); + neighbours_iter_t iter; DBG((dbg, LEVEL_3, "\t %+F \tcaused col(%+F) \t%2d --> %2d\n", trigger, irn, irn_col, col)); @@ -286,7 +300,7 @@ static ir_node *qnode_color_irn(const qnode_t *qn, ir_node *irn, int col, const bitset_clear(free_cols, irn_col); /* Exclude all colors used by adjacent nodes */ - be_ifg_foreach_neighbour(ifg, iter, irn, curr) + be_ifg_foreach_neighbour(ifg, &iter, irn, curr) bitset_clear(free_cols, qnode_get_new_color(qn, curr)); free_col = bitset_next_set(free_cols, 0); @@ -308,12 +322,12 @@ static ir_node *qnode_color_irn(const qnode_t *qn, ir_node *irn, int col, const * If we arrive here changing color may be possible, but there may be conflicts. * Try to color all conflicting nodes 'curr' with the color of the irn itself. */ - be_ifg_foreach_neighbour(ifg, iter, irn, curr) { + be_ifg_foreach_neighbour(ifg, &iter, irn, curr) { DBG((dbg, LEVEL_3, "\t Confl %+F(%d)\n", curr, qnode_get_new_color(qn, curr))); if (qnode_get_new_color(qn, curr) == col && curr != trigger) { sub_res = qnode_color_irn(qn, curr, irn_col, irn); if (sub_res != CHANGE_SAVE) { - be_ifg_neighbours_break(ifg, iter); + be_ifg_neighbours_break(&iter); return sub_res; } } @@ -334,7 +348,8 @@ static ir_node *qnode_color_irn(const qnode_t *qn, ir_node *irn, int col, const * @returns 1 iff all members colors could be set * 0 else */ -static int qnode_try_color(const qnode_t *qn) { +static int qnode_try_color(const qnode_t *qn) +{ int i; for (i=0; imis_size; ++i) { ir_node *test_node, *confl_node; @@ -379,26 +394,27 @@ static int qnode_try_color(const qnode_t *qn) { * Determines a maximum weighted independent set with respect to * the interference and conflict edges of all nodes in a qnode. */ -static inline void qnode_max_ind_set(qnode_t *qn, const unit_t *ou) { +static inline void qnode_max_ind_set(qnode_t *qn, const unit_t *ou) +{ ir_node **safe, **unsafe; int i, o, safe_count, safe_costs, unsafe_count, *unsafe_costs; bitset_t *curr, *best; - bitset_pos_t pos; - int max, next, curr_weight, best_weight = 0; + unsigned pos; + int next, curr_weight, best_weight = 0; /* assign the nodes into two groups. * safe: node has no interference, hence it is in every max stable set. * unsafe: node has an interference */ - safe = alloca((ou->node_count-1) * sizeof(*safe)); - safe_costs = 0; - safe_count = 0; - unsafe = alloca((ou->node_count-1) * sizeof(*unsafe)); - unsafe_costs = alloca((ou->node_count-1) * sizeof(*unsafe_costs)); + safe = ALLOCAN(ir_node*, ou->node_count - 1); + safe_costs = 0; + safe_count = 0; + unsafe = ALLOCAN(ir_node*, ou->node_count - 1); + unsafe_costs = ALLOCAN(int, ou->node_count - 1); unsafe_count = 0; - for(i=1; inode_count; ++i) { + for (i=1; inode_count; ++i) { int is_safe = 1; - for(o=1; onode_count; ++o) { + for (o=1; onode_count; ++o) { if (qnode_are_conflicting(qn, ou->nodes[i], ou->nodes[o])) { if (i!=o) { unsafe_costs[unsafe_count] = ou->costs[i]; @@ -438,7 +454,7 @@ static inline void qnode_max_ind_set(qnode_t *qn, const unit_t *ou) { /* Exact Algorithm: Brute force */ curr = bitset_alloca(unsafe_count); bitset_set_all(curr); - while ((max = bitset_popcnt(curr)) != 0) { + while (!bitset_is_empty(curr)) { /* check if curr is a stable set */ for (i=bitset_next_set(curr, 0); i!=-1; i=bitset_next_set(curr, i+1)) for (o=bitset_next_set(curr, i); o!=-1; o=bitset_next_set(curr, o+1)) /* !!!!! difference to ou_max_ind_set_costs(): NOT (curr, i+1) */ @@ -463,7 +479,7 @@ no_stable_set: } /* transfer the best set into the qn */ - qn->mis_size = 1+safe_count+bitset_popcnt(best); + qn->mis_size = 1+safe_count+bitset_popcount(best); qn->mis_costs = safe_costs+best_weight; qn->mis[0] = ou->nodes[0]; /* the root is always in a max stable set */ next = 1; @@ -476,7 +492,8 @@ no_stable_set: /** * Creates a new qnode */ -static inline qnode_t *new_qnode(const unit_t *ou, int color) { +static inline qnode_t *new_qnode(const unit_t *ou, int color) +{ qnode_t *qn = XMALLOC(qnode_t); qn->ou = ou; qn->color = color; @@ -489,7 +506,8 @@ static inline qnode_t *new_qnode(const unit_t *ou, int color) { /** * Frees space used by a queue node */ -static inline void free_qnode(qnode_t *qn) { +static inline void free_qnode(qnode_t *qn) +{ del_set(qn->conflicts); del_set(qn->changed_nodes); xfree(qn->mis); @@ -500,7 +518,8 @@ static inline void free_qnode(qnode_t *qn) { * Inserts a qnode in the sorted queue of the optimization unit. Queue is * ordered by field 'size' (the size of the mis) in decreasing order. */ -static inline void ou_insert_qnode(unit_t *ou, qnode_t *qn) { +static inline void ou_insert_qnode(unit_t *ou, qnode_t *qn) +{ struct list_head *lh; if (qnode_are_conflicting(qn, ou->nodes[0], ou->nodes[0])) { @@ -529,13 +548,14 @@ static inline void ou_insert_qnode(unit_t *ou, qnode_t *qn) { * case for approximately 80% of all phi classes and 100% of register constrained * nodes. (All other phi classes are reduced to this case.) */ -static void ou_optimize(unit_t *ou) { +static void ou_optimize(unit_t *ou) +{ qnode_t *curr = NULL; qnode_t *tmp; const arch_register_req_t *req; bitset_t const* ignore; - bitset_pos_t n_regs; - bitset_pos_t idx; + unsigned n_regs; + unsigned idx; int i; DBG((dbg, LEVEL_1, "\tOptimizing unit:\n")); @@ -549,7 +569,7 @@ static void ou_optimize(unit_t *ou) { ignore = ou->co->cenv->ignore_colors; n_regs = req->cls->n_regs; if (arch_register_req_is(req, limited)) { - rawbs_base_t const* limited = req->limited; + unsigned const* limited = req->limited; for (idx = 0; idx != n_regs; ++idx) { if (bitset_is_set(ignore, idx)) @@ -616,9 +636,13 @@ static void ou_optimize(unit_t *ou) { free_qnode(curr); } -int co_solve_heuristic(copy_opt_t *co) { +/** + * Solves the problem using a heuristic approach + * Uses the OU data structure + */ +int co_solve_heuristic(copy_opt_t *co) +{ unit_t *curr; - FIRM_DBG_REGISTER(dbg, "ir.be.copyoptheur"); ASSERT_OU_AVAIL(co); @@ -630,3 +654,14 @@ int co_solve_heuristic(copy_opt_t *co) { del_pset(pinned_global); return 0; } + +BE_REGISTER_MODULE_CONSTRUCTOR(be_init_copyheur); +void be_init_copyheur(void) +{ + static co_algo_info copyheur = { + co_solve_heuristic, 0 + }; + + be_register_copyopt("heur1", ©heur); + FIRM_DBG_REGISTER(dbg, "ir.be.copyoptheur"); +}