4 * Copyright: (c) Universitaet Karlsruhe
5 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
22 #include "bechordal_t.h"
23 #include "becopyopt.h"
24 #include "becopystat.h"
26 static firm_dbg_module_t *dbg = NULL;
28 #define is_curr_reg_class(irn) \
29 (arch_get_irn_reg_class(get_arch_env(co), \
30 irn, -1) == co->chordal_env->cls)
32 #define MIN(a,b) ((a<b)?(a):(b))
33 #define MAX(a,b) ((a<b)?(b):(a))
37 * Determines a maximum weighted independent set with respect to
38 * the interference and conflict edges of all nodes in a qnode.
40 static int ou_max_ind_set_costs(unit_t *ou) {
41 be_chordal_env_t *chordal_env = ou->co->chordal_env;
42 ir_node **safe, **unsafe;
43 int i, o, safe_count, safe_costs, unsafe_count, *unsafe_costs;
45 int max, pos, curr_weight, best_weight = 0;
47 /* assign the nodes into two groups.
48 * safe: node has no interference, hence it is in every max stable set.
49 * unsafe: node has an interference
51 safe = alloca((ou->node_count-1) * sizeof(*safe));
54 unsafe = alloca((ou->node_count-1) * sizeof(*unsafe));
55 unsafe_costs = alloca((ou->node_count-1) * sizeof(*unsafe_costs));
57 for(i=1; i<ou->node_count; ++i) {
59 for(o=1; o<ou->node_count; ++o) {
62 if (nodes_interfere(chordal_env, ou->nodes[i], ou->nodes[o])) {
63 unsafe_costs[unsafe_count] = ou->costs[i];
64 unsafe[unsafe_count] = ou->nodes[i];
71 safe_costs += ou->costs[i];
72 safe[safe_count++] = ou->nodes[i];
77 /* now compute the best set out of the unsafe nodes*/
78 if (unsafe_count > MIS_HEUR_TRIGGER) {
79 bitset_t *best = bitset_alloca(unsafe_count);
80 /* Heuristik: Greedy trial and error form index 0 to unsafe_count-1 */
81 for (i=0; i<unsafe_count; ++i) {
83 /* check if it is a stable set */
84 for (o=bitset_next_set(best, 0); o!=-1 && o<i; o=bitset_next_set(best, o+1))
85 if (nodes_interfere(chordal_env, unsafe[i], unsafe[o])) {
86 bitset_clear(best, i); /* clear the bit and try next one */
90 /* compute the weight */
91 bitset_foreach(best, pos)
92 best_weight += unsafe_costs[pos];
94 /* Exact Algorithm: Brute force */
95 curr = bitset_alloca(unsafe_count);
97 while ((max = bitset_popcnt(curr)) != 0) {
98 /* check if curr is a stable set */
99 for (i=bitset_next_set(curr, 0); i!=-1; i=bitset_next_set(curr, i+1))
100 for (o=bitset_next_set(curr, i+1); o!=-1; o=bitset_next_set(curr, o+1)) /* !!!!! difference to qnode_max_ind_set(): NOT (curr, i) */
101 if (nodes_interfere(chordal_env, unsafe[i], unsafe[o]))
104 /* if we arrive here, we have a stable set */
105 /* compute the weigth of the stable set*/
107 bitset_foreach(curr, pos)
108 curr_weight += unsafe_costs[pos];
111 if (curr_weight > best_weight) {
112 best_weight = curr_weight;
120 return safe_costs+best_weight;
124 * Builds an optimization unit for a given optimizable irn (root).
125 * This opt-unit is inserted in the main structure co.
126 * If an arg of root itself is optimizable process this arg before with a
127 * recursive call. For handling this situation and loops co->root is used
128 * to remember all roots.
130 static void co_append_unit(copy_opt_t *co, ir_node *root) {
133 struct list_head *tmp;
135 DBG((dbg, LEVEL_1, "\t Root: %n %N\n", root, root));
136 /* check if we encountered this root earlier */
137 if (pset_find_ptr(co->roots, root))
139 pset_insert_ptr(co->roots, root);
141 assert(is_curr_reg_class(root) && "node is in wrong register class!");
144 arity = get_irn_arity(root);
145 unit = xcalloc(1, sizeof(*unit));
147 unit->nodes = xmalloc((arity+1) * sizeof(*unit->nodes));
148 unit->costs = xmalloc((arity+1) * sizeof(*unit->costs));
149 unit->node_count = 1;
150 unit->nodes[0] = root;
151 INIT_LIST_HEAD(&unit->queue);
154 if (is_Phi(root) && is_firm_be_mode(get_irn_mode(root))) {
155 for (i=0; i<arity; ++i) {
157 ir_node *arg = get_irn_n(root, i);
159 assert(is_curr_reg_class(arg) && "Argument not in same register class.");
162 if (nodes_interfere(co->chordal_env, root, arg)) {
163 unit->inevitable_costs += co->get_costs(root, arg, i);
167 /* Else insert the argument of the phi to the members of this ou */
168 DBG((dbg, LEVEL_1, "\t Member: %n %N\n", arg, arg));
170 /* Check if arg has occurred at a prior position in the arg/list */
171 for (o=0; o<unit->node_count; ++o)
172 if (unit->nodes[o] == arg) {
177 if (!arg_pos) { /* a new argument */
178 /* insert node, set costs */
179 unit->nodes[unit->node_count] = arg;
180 unit->costs[unit->node_count] = co->get_costs(root, arg, i);
182 } else { /* arg has occured before in same phi */
183 /* increase costs for existing arg */
184 unit->costs[arg_pos] += co->get_costs(root, arg, i);
187 unit->nodes = xrealloc(unit->nodes, unit->node_count * sizeof(*unit->nodes));
188 unit->costs = xrealloc(unit->costs, unit->node_count * sizeof(*unit->costs));
189 } else if (is_Copy(get_arch_env(co), root)) {
190 assert(!nodes_interfere(co->chordal_env, root, get_Copy_src(root)));
191 unit->nodes[1] = get_Copy_src(root);
192 unit->costs[1] = co->get_costs(root, unit->nodes[1], -1);
193 unit->node_count = 2;
194 unit->nodes = xrealloc(unit->nodes, 2 * sizeof(*unit->nodes));
195 unit->costs = xrealloc(unit->costs, 2 * sizeof(*unit->costs));
197 assert(0 && "This is not an optimizable node!");
198 /* TODO add ou's for 2-addr-code instructions */
201 /* Determine the maximum costs this unit can cause: all_nodes_cost */
202 for(i=1; i<unit->node_count; ++i) {
203 unit->sort_key = MAX(unit->sort_key, unit->costs[i]);
204 unit->all_nodes_costs += unit->costs[i];
207 /* Determine the minimal costs this unit will cause: min_nodes_costs */
208 unit->min_nodes_costs += unit->all_nodes_costs - ou_max_ind_set_costs(unit);
210 /* Insert the new ou according to its sort_key */
212 while (tmp->next != &co->units && list_entry_units(tmp->next)->sort_key > unit->sort_key)
214 list_add(&unit->units, tmp);
217 static void co_collect_in_block(ir_node *block, void *env) {
218 copy_opt_t *co = env;
219 struct list_head *head = get_block_border_head(co->chordal_env, block);
222 list_for_each_entry_reverse(border_t, curr, head, list)
223 if (curr->is_def && curr->is_real && is_optimizable(get_arch_env(co), curr->irn))
224 co_append_unit(co, curr->irn);
227 static void co_collect_units(copy_opt_t *co) {
228 DBG((dbg, LEVEL_1, "\tCollecting optimization units\n"));
229 co->roots = pset_new_ptr(64);
230 dom_tree_walk_irg(get_irg(co), co_collect_in_block, NULL, co);
234 copy_opt_t *new_copy_opt(be_chordal_env_t *chordal_env, int (*get_costs)(ir_node*, ir_node*, int)) {
235 const char *s1, *s2, *s3;
239 dbg = firm_dbg_register("ir.be.copyopt");
240 firm_dbg_set_mask(dbg, DEBUG_LVL_CO);
242 co = xcalloc(1, sizeof(*co));
243 co->chordal_env = chordal_env;
244 co->get_costs = get_costs;
246 s1 = get_irp_prog_name();
247 s2 = get_entity_name(get_irg_entity(get_irg(co)));
248 s3 = chordal_env->cls->name;
249 len = strlen(s1) + strlen(s2) + strlen(s3) + 5;
250 co->name = xmalloc(len);
251 snprintf(co->name, len, "%s__%s__%s", s1, s2, s3);
252 if (!strcmp(co->name, DEBUG_IRG))
253 firm_dbg_set_mask(dbg, DEBUG_IRG_LVL_CO);
255 firm_dbg_set_mask(dbg, DEBUG_LVL_CO);
257 INIT_LIST_HEAD(&co->units);
258 co_collect_units(co);
262 void free_copy_opt(copy_opt_t *co) {
265 list_for_each_entry_safe(unit_t, curr, tmp, &co->units, units) {
271 int is_optimizable_arg(const copy_opt_t *co, ir_node *irn) {
273 for(i=0, max=get_irn_n_outs(irn); i<max; ++i) {
274 ir_node *n = get_irn_out(irn, i);
275 if (((is_Phi(n) && is_firm_be_mode(get_irn_mode(n))) ||
276 is_Perm(get_arch_env(co), n)) && (irn == n || !nodes_interfere(co->chordal_env, irn, n)))
282 int get_costs_loop_depth(ir_node *root, ir_node* arg, int pos) {
285 ir_node *root_block = get_nodes_block(root);
287 assert(pos==-1 || is_Phi(root));
289 /* a perm places the copy in the same block as it resides */
290 loop = get_irn_loop(root_block);
292 /* for phis the copies are placed in the corresponding pred-block */
293 loop = get_irn_loop(get_Block_cfgpred_block(root_block, pos));
296 int d = get_loop_depth(loop);
302 int get_costs_all_one(ir_node *root, ir_node* arg, int pos) {
306 int co_get_max_copy_costs(const copy_opt_t *co) {
310 list_for_each_entry(unit_t, curr, &co->units, units) {
311 res += curr->inevitable_costs;
312 for (i=1; i<curr->node_count; ++i)
313 res += curr->costs[i];
318 int co_get_inevit_copy_costs(const copy_opt_t *co) {
322 list_for_each_entry(unit_t, curr, &co->units, units)
323 res += curr->inevitable_costs;
327 int co_get_copy_costs(const copy_opt_t *co) {
331 list_for_each_entry(unit_t, curr, &co->units, units) {
332 int root_col = get_irn_col(co, curr->nodes[0]);
333 DBG((dbg, LEVEL_1, " %3d costs for root %+F color %d\n", curr->inevitable_costs, curr->nodes[0], root_col));
334 res += curr->inevitable_costs;
335 for (i=1; i<curr->node_count; ++i) {
336 int arg_col = get_irn_col(co, curr->nodes[i]);
337 if (root_col != arg_col) {
338 DBG((dbg, LEVEL_1, " %3d for arg %+F color %d\n", curr->costs[i], curr->nodes[i], arg_col));
339 res += curr->costs[i];
346 int co_get_lower_bound(const copy_opt_t *co) {
349 list_for_each_entry(unit_t, curr, &co->units, units)
350 res += curr->inevitable_costs + curr->min_nodes_costs;