+
+
+ /* now compute the best set out of the unsafe nodes*/
+ if (unsafe_count > MIS_HEUR_TRIGGER) {
+ bitset_t *best = bitset_alloca(unsafe_count);
+ /* Heuristik: Greedy trial and error form index 0 to unsafe_count-1 */
+ for (i=0; i<unsafe_count; ++i) {
+ bitset_set(best, i);
+ /* check if it is a stable set */
+ for (o=bitset_next_set(best, 0); o!=-1 && o<i; o=bitset_next_set(best, o+1))
+ if (nodes_interfere(chordal_env, unsafe[i], unsafe[o])) {
+ bitset_clear(best, i); /* clear the bit and try next one */
+ break;
+ }
+ }
+ /* compute the weight */
+ bitset_foreach(best, pos)
+ best_weight += unsafe_costs[pos];
+ } else {
+ /* Exact Algorithm: Brute force */
+ curr = bitset_alloca(unsafe_count);
+ bitset_set_all(curr);
+ while ((max = bitset_popcnt(curr)) != 0) {
+ /* check if curr is a stable set */
+ for (i=bitset_next_set(curr, 0); i!=-1; i=bitset_next_set(curr, i+1))
+ for (o=bitset_next_set(curr, i+1); o!=-1; o=bitset_next_set(curr, o+1)) /* !!!!! difference to qnode_max_ind_set(): NOT (curr, i) */
+ if (nodes_interfere(chordal_env, unsafe[i], unsafe[o]))
+ goto no_stable_set;
+
+ /* if we arrive here, we have a stable set */
+ /* compute the weigth of the stable set*/
+ curr_weight = 0;
+ bitset_foreach(curr, pos)
+ curr_weight += unsafe_costs[pos];
+
+ /* any better ? */
+ if (curr_weight > best_weight) {
+ best_weight = curr_weight;
+ }
+
+ no_stable_set:
+ bitset_minus1(curr);
+ }
+ }
+
+ return safe_costs+best_weight;
+}
+
+static void co_collect_units(ir_node *irn, void *env)
+{
+ const arch_register_req_t *req = arch_get_register_req_out(irn);
+ copy_opt_t *co = env;
+ unit_t *unit;
+
+ if (req->cls != co->cls)
+ return;
+ if (!co_is_optimizable_root(irn))
+ return;
+
+ /* Init a new unit */
+ unit = XMALLOCZ(unit_t);
+ unit->co = co;
+ unit->node_count = 1;
+ INIT_LIST_HEAD(&unit->queue);
+
+ /* Phi with some/all of its arguments */
+ if (is_Reg_Phi(irn)) {
+ int i, arity;
+
+ /* init */
+ arity = get_irn_arity(irn);
+ unit->nodes = XMALLOCN(ir_node*, arity + 1);
+ unit->costs = XMALLOCN(int, arity + 1);
+ unit->nodes[0] = irn;
+
+ /* fill */
+ for (i=0; i<arity; ++i) {
+ int o, arg_pos;
+ ir_node *arg = get_irn_n(irn, i);
+
+ assert(arch_get_irn_reg_class_out(arg) == co->cls && "Argument not in same register class.");
+ if (arg == irn)
+ continue;
+ if (nodes_interfere(co->cenv, irn, arg)) {
+ unit->inevitable_costs += co->get_costs(co, irn, arg, i);
+ continue;
+ }
+
+ /* Else insert the argument of the phi to the members of this ou */
+ DBG((dbg, LEVEL_1, "\t Member: %+F\n", arg));
+
+ if (arch_irn_is_ignore(arg))
+ continue;
+
+ /* Check if arg has occurred at a prior position in the arg/list */
+ arg_pos = 0;
+ for (o=1; o<unit->node_count; ++o) {
+ if (unit->nodes[o] == arg) {
+ arg_pos = o;
+ break;
+ }
+ }
+
+ if (!arg_pos) { /* a new argument */
+ /* insert node, set costs */
+ unit->nodes[unit->node_count] = arg;
+ unit->costs[unit->node_count] = co->get_costs(co, irn, arg, i);
+ unit->node_count++;
+ } else { /* arg has occurred before in same phi */
+ /* increase costs for existing arg */
+ unit->costs[arg_pos] += co->get_costs(co, irn, arg, i);
+ }
+ }
+ unit->nodes = XREALLOC(unit->nodes, ir_node*, unit->node_count);
+ unit->costs = XREALLOC(unit->costs, int, unit->node_count);
+ } else if (is_Perm_Proj(irn)) {
+ /* Proj of a perm with corresponding arg */
+ assert(!nodes_interfere(co->cenv, irn, get_Perm_src(irn)));
+ unit->nodes = XMALLOCN(ir_node*, 2);
+ unit->costs = XMALLOCN(int, 2);
+ unit->node_count = 2;
+ unit->nodes[0] = irn;
+ unit->nodes[1] = get_Perm_src(irn);
+ unit->costs[1] = co->get_costs(co, irn, unit->nodes[1], -1);
+ } else {
+ /* Src == Tgt of a 2-addr-code instruction */
+ if (is_2addr_code(req)) {
+ const unsigned other = req->other_same;
+ int count = 0;
+ int i;
+
+ for (i = 0; (1U << i) <= other; ++i) {
+ if (other & (1U << i)) {
+ ir_node *o = get_irn_n(skip_Proj(irn), i);
+ if (arch_irn_is_ignore(o))
+ continue;
+ if (nodes_interfere(co->cenv, irn, o))
+ continue;
+ ++count;
+ }
+ }
+
+ if (count != 0) {
+ int k = 0;
+ ++count;
+ unit->nodes = XMALLOCN(ir_node*, count);
+ unit->costs = XMALLOCN(int, count);
+ unit->node_count = count;
+ unit->nodes[k++] = irn;
+
+ for (i = 0; 1U << i <= other; ++i) {
+ if (other & (1U << i)) {
+ ir_node *o = get_irn_n(skip_Proj(irn), i);
+ if (!arch_irn_is_ignore(o) &&
+ !nodes_interfere(co->cenv, irn, o)) {
+ unit->nodes[k] = o;
+ unit->costs[k] = co->get_costs(co, irn, o, -1);
+ ++k;
+ }
+ }
+ }
+ }
+ } else {
+ assert(0 && "This is not an optimizable node!");
+ }
+ }
+
+ /* Insert the new unit at a position according to its costs */
+ if (unit->node_count > 1) {
+ int i;
+ struct list_head *tmp;
+
+ /* Determine the maximum costs this unit can cause: all_nodes_cost */
+ for (i=1; i<unit->node_count; ++i) {
+ unit->sort_key = MAX(unit->sort_key, unit->costs[i]);
+ unit->all_nodes_costs += unit->costs[i];
+ }
+
+ /* Determine the minimal costs this unit will cause: min_nodes_costs */
+ unit->min_nodes_costs += unit->all_nodes_costs - ou_max_ind_set_costs(unit);
+ /* Insert the new ou according to its sort_key */
+ tmp = &co->units;
+ while (tmp->next != &co->units && list_entry_units(tmp->next)->sort_key > unit->sort_key)
+ tmp = tmp->next;
+ list_add(&unit->units, tmp);
+ } else {
+ free(unit);
+ }
+}
+
+#ifdef QUICK_AND_DIRTY_HACK
+
+static int compare_ous(const void *k1, const void *k2)
+{
+ const unit_t *u1 = *((const unit_t **) k1);
+ const unit_t *u2 = *((const unit_t **) k2);
+ int i, o, u1_has_constr, u2_has_constr;
+ arch_register_req_t req;
+
+ /* Units with constraints come first */
+ u1_has_constr = 0;
+ for (i=0; i<u1->node_count; ++i) {
+ arch_get_register_req_out(&req, u1->nodes[i]);
+ if (arch_register_req_is(&req, limited)) {
+ u1_has_constr = 1;
+ break;
+ }
+ }
+
+ u2_has_constr = 0;
+ for (i=0; i<u2->node_count; ++i) {
+ arch_get_register_req_out(&req, u2->nodes[i]);
+ if (arch_register_req_is(&req, limited)) {
+ u2_has_constr = 1;
+ break;
+ }
+ }
+
+ if (u1_has_constr != u2_has_constr)
+ return u2_has_constr - u1_has_constr;
+
+ /* Now check, whether the two units are connected */
+#if 0
+ for (i=0; i<u1->node_count; ++i)
+ for (o=0; o<u2->node_count; ++o)
+ if (u1->nodes[i] == u2->nodes[o])
+ return 0;
+#endif
+
+ /* After all, the sort key decides. Greater keys come first. */
+ return u2->sort_key - u1->sort_key;
+
+}
+
+/**
+ * Sort the ou's according to constraints and their sort_key
+ */
+static void co_sort_units(copy_opt_t *co)
+{
+ int i, count = 0, costs;
+ unit_t *ou, **ous;
+
+ /* get the number of ous, remove them form the list and fill the array */
+ list_for_each_entry(unit_t, ou, &co->units, units)
+ count++;
+ ous = ALLOCAN(unit_t, count);
+
+ costs = co_get_max_copy_costs(co);
+
+ i = 0;
+ list_for_each_entry(unit_t, ou, &co->units, units)
+ ous[i++] = ou;
+
+ INIT_LIST_HEAD(&co->units);
+
+ assert(count == i && list_empty(&co->units));
+
+ for (i=0; i<count; ++i)
+ ir_printf("%+F\n", ous[i]->nodes[0]);
+
+ qsort(ous, count, sizeof(*ous), compare_ous);
+
+ ir_printf("\n\n");
+ for (i=0; i<count; ++i)
+ ir_printf("%+F\n", ous[i]->nodes[0]);
+
+ /* reinsert into list in correct order */
+ for (i=0; i<count; ++i)
+ list_add_tail(&ous[i]->units, &co->units);
+
+ assert(costs == co_get_max_copy_costs(co));
+}
+#endif
+
+void co_build_ou_structure(copy_opt_t *co)
+{
+ DBG((dbg, LEVEL_1, "\tCollecting optimization units\n"));
+ INIT_LIST_HEAD(&co->units);
+ irg_walk_graph(co->irg, co_collect_units, NULL, co);
+#ifdef QUICK_AND_DIRTY_HACK
+ co_sort_units(co);
+#endif
+}
+
+void co_free_ou_structure(copy_opt_t *co)
+{
+ unit_t *curr, *tmp;
+ ASSERT_OU_AVAIL(co);
+ list_for_each_entry_safe(unit_t, curr, tmp, &co->units, units) {
+ xfree(curr->nodes);
+ xfree(curr->costs);
+ xfree(curr);
+ }
+ co->units.next = NULL;