Bugfixes, Unknown-stuff, Heuristic for maximum independent set.
authorDaniel Grund <grund@cs.uni-saarland.de>
Mon, 10 Oct 2005 16:26:27 +0000 (16:26 +0000)
committerDaniel Grund <grund@cs.uni-saarland.de>
Mon, 10 Oct 2005 16:26:27 +0000 (16:26 +0000)
ir/be/bearch_firm.c
ir/be/becopyheur.c
ir/be/becopyopt.c
ir/be/becopyopt.h
ir/be/belistsched.c
ir/be/bespill.c
ir/be/bespillbelady.c
ir/be/beutil.h

index 23d70e7..94bbe18 100644 (file)
@@ -283,14 +283,18 @@ static ir_node *new_Imm(ir_graph *irg, ir_node *bl, ir_node *cnst)
        res = new_ir_node(NULL, irg, bl, op_imm, get_irn_mode(cnst), 0, ins);
        attr = (imm_attr_t *) &res->attr;
 
-       if(get_irn_opcode(cnst) == iro_SymConst) {
-               attr->tp = imm_SymConst;
-               //attr->data.ent = get_SymConst_entity(cnst);
-       }
-
-       else {
-               attr->tp = imm_Const;
-               attr->data.tv = get_Const_tarval(cnst);
+       switch (get_irn_opcode(cnst)) {
+               case iro_Const:
+                       attr->tp = imm_Const;
+                       attr->data.tv = get_Const_tarval(cnst);
+                       break;
+               case iro_SymConst:
+                       attr->tp = imm_SymConst;
+                       //attr->data.ent = get_SymConst_entity(cnst);
+                       break;
+               case iro_Unknown:
+                       break;
+               default:                assert(0 && "Cannot create Imm for this opcode");
        }
 
        return res;
@@ -356,6 +360,7 @@ static void localize_const_walker(ir_node *irn, void *data)
                        opcode opc     = get_irn_opcode(op);
 
                        if(opc == iro_Const
+                       || opc == iro_Unknown
                        || (opc == iro_SymConst /*&& get_SymConst_kind(op) == symconst_addr_ent*/)) {
                                ir_graph *irg   = get_irn_irg(bl);
                                ir_node *imm_bl = is_Phi(irn) ? get_Block_cfgpred_block(bl, i) : bl;
index 4db77c5..b8e573a 100644 (file)
@@ -377,10 +377,24 @@ static int qnode_try_color(const qnode_t *qn) {
        return 1;
 }
 
+typedef int(*confl_f)(const ir_node *a, const ir_node *b, void *data);
+
+/**
+ * @param result       Gets filled with the computed maximum independent set.
+ * @param count                The size of input arrays / the number of nodes
+ * @param nodes                A set of nodes to copmute the max. ind. set for
+ * @param weights      Weights associated to the nodes in @p nodes
+ * @param confl                Callback function to decide if two values interfere
+ * @param data         Passed into all callbacks
+ * @return The size of the computed set
+ */
+//int max_ind_set(ir_node **result, int count, ir_node **nodes, int *weights, confl_f confl, void *data) {
+//
+//}
+
 /**
  * Determines a maximum weighted independent set with respect to
  * the interference and conflict edges of all nodes in a qnode.
- * TODO: This runs in n! in worst case. Use a heuristic iff n>???
  */
 static INLINE void qnode_max_ind_set(qnode_t *qn, const unit_t *ou) {
        ir_node **safe, **unsafe;
@@ -419,38 +433,55 @@ static INLINE void qnode_max_ind_set(qnode_t *qn, const unit_t *ou) {
 
 
 
-       /* now brute force the best set out of the unsafe nodes*/
+       /* now compute the best set out of the unsafe nodes*/
        best = bitset_alloca(unsafe_count);
-       curr = bitset_alloca(unsafe_count);
-
-       bitset_set_all(curr);
-       while ((max = bitset_popcnt(curr)) != 0) {
-               /* check if curr is a stable set */
-               for (i=bitset_next_set(curr, 0); i!=-1; i=bitset_next_set(curr, i+1))
-                       for (o=bitset_next_set(curr, i); o!=-1; o=bitset_next_set(curr, o+1)) /* !!!!! difference to ou_max_ind_set_costs(): NOT (curr, i+1) */
-                                       if (qnode_are_conflicting(qn, unsafe[i], unsafe[o]))
-                                               goto no_stable_set;
-
-               /* if we arrive here, we have a stable set */
-               /* compute the weigth of the stable set*/
-               curr_weight = 0;
-               bitset_foreach(curr, pos)
-                       curr_weight += unsafe_costs[pos];
-
-               /* any better ? */
-               if (curr_weight > best_weight) {
-                       best_weight = curr_weight;
-                       bitset_copy(best, curr);
+
+       if (unsafe_count > MIS_HEUR_TRIGGER) {
+               /* Heuristik: Greedy trial and error form index 0 to unsafe_count-1 */
+               for (i=0; i<unsafe_count; ++i) {
+                       bitset_set(best, i);
+                       /* check if it is a stable set */
+                       for (o=bitset_next_set(best, 0); o!=-1 && o<i; o=bitset_next_set(best, o+1))
+                               if (qnode_are_conflicting(qn, unsafe[i], unsafe[o])) {
+                                       bitset_clear(best, i); /* clear the bit and try next one */
+                                       break;
+                               }
                }
+               /* compute the weight */
+               bitset_foreach(best, pos)
+                       best_weight += unsafe_costs[pos];
+       } else {
+               /* Exact Algorithm: Brute force */
+               curr = bitset_alloca(unsafe_count);
+               bitset_set_all(curr);
+               while ((max = bitset_popcnt(curr)) != 0) {
+                       /* check if curr is a stable set */
+                       for (i=bitset_next_set(curr, 0); i!=-1; i=bitset_next_set(curr, i+1))
+                               for (o=bitset_next_set(curr, i); o!=-1; o=bitset_next_set(curr, o+1)) /* !!!!! difference to ou_max_ind_set_costs(): NOT (curr, i+1) */
+                                               if (qnode_are_conflicting(qn, unsafe[i], unsafe[o]))
+                                                       goto no_stable_set;
+
+                       /* if we arrive here, we have a stable set */
+                       /* compute the weigth of the stable set*/
+                       curr_weight = 0;
+                       bitset_foreach(curr, pos)
+                               curr_weight += unsafe_costs[pos];
+
+                       /* any better ? */
+                       if (curr_weight > best_weight) {
+                               best_weight = curr_weight;
+                               bitset_copy(best, curr);
+                       }
 
 no_stable_set:
-               bitset_minus1(curr);
+                       bitset_minus1(curr);
+               }
        }
 
        /* transfer the best set into the qn */
        qn->mis_size = 1+safe_count+bitset_popcnt(best);
        qn->mis_costs = safe_costs+best_weight;
-       qn->mis[0] = ou->nodes[0]; /* the root is alwazs in a max stable set */
+       qn->mis[0] = ou->nodes[0]; /* the root is always in a max stable set */
        next = 1;
        for (i=0; i<safe_count; ++i)
                qn->mis[next++] = safe[i];
index 93ef6e0..c9ec52c 100644 (file)
@@ -36,7 +36,6 @@ static firm_dbg_module_t *dbg = NULL;
 /**
  * Determines a maximum weighted independent set with respect to
  * the interference and conflict edges of all nodes in a qnode.
- * TODO: This runs in n! in worst case. Use a heuristic iff n>???
  */
 static int ou_max_ind_set_costs(unit_t *ou) {
        be_chordal_env_t *chordal_env = ou->co->chordal_env;
@@ -75,31 +74,47 @@ static int ou_max_ind_set_costs(unit_t *ou) {
        }
 
 
-
-       /* now brute force the best set out of the unsafe nodes*/
-       curr = bitset_alloca(unsafe_count);
-
-       bitset_set_all(curr);
-       while ((max = bitset_popcnt(curr)) != 0) {
-               /* check if curr is a stable set */
-               for (i=bitset_next_set(curr, 0); i!=-1; i=bitset_next_set(curr, i+1))
-                       for (o=bitset_next_set(curr, i+1); o!=-1; o=bitset_next_set(curr, o+1)) /* !!!!! difference to qnode_max_ind_set(): NOT (curr, i) */
-                                       if (nodes_interfere(chordal_env, unsafe[i], unsafe[o]))
-                                               goto no_stable_set;
-
-               /* if we arrive here, we have a stable set */
-               /* compute the weigth of the stable set*/
-               curr_weight = 0;
-               bitset_foreach(curr, pos)
-                       curr_weight += unsafe_costs[pos];
-
-               /* any better ? */
-               if (curr_weight > best_weight) {
-                       best_weight = curr_weight;
+       /* now compute the best set out of the unsafe nodes*/
+       if (unsafe_count > MIS_HEUR_TRIGGER) {
+               bitset_t *best = bitset_alloca(unsafe_count);
+               /* Heuristik: Greedy trial and error form index 0 to unsafe_count-1 */
+               for (i=0; i<unsafe_count; ++i) {
+                       bitset_set(best, i);
+                       /* check if it is a stable set */
+                       for (o=bitset_next_set(best, 0); o!=-1 && o<i; o=bitset_next_set(best, o+1))
+                               if (nodes_interfere(chordal_env, unsafe[i], unsafe[o])) {
+                                       bitset_clear(best, i); /* clear the bit and try next one */
+                                       break;
+                               }
                }
+               /* compute the weight */
+               bitset_foreach(best, pos)
+                       best_weight += unsafe_costs[pos];
+       } else {
+               /* Exact Algorithm: Brute force */
+               curr = bitset_alloca(unsafe_count);
+               bitset_set_all(curr);
+               while ((max = bitset_popcnt(curr)) != 0) {
+                       /* check if curr is a stable set */
+                       for (i=bitset_next_set(curr, 0); i!=-1; i=bitset_next_set(curr, i+1))
+                               for (o=bitset_next_set(curr, i+1); o!=-1; o=bitset_next_set(curr, o+1)) /* !!!!! difference to qnode_max_ind_set(): NOT (curr, i) */
+                                               if (nodes_interfere(chordal_env, unsafe[i], unsafe[o]))
+                                                       goto no_stable_set;
+
+                       /* if we arrive here, we have a stable set */
+                       /* compute the weigth of the stable set*/
+                       curr_weight = 0;
+                       bitset_foreach(curr, pos)
+                               curr_weight += unsafe_costs[pos];
+
+                       /* any better ? */
+                       if (curr_weight > best_weight) {
+                               best_weight = curr_weight;
+                       }
 
-no_stable_set:
-               bitset_minus1(curr);
+       no_stable_set:
+                       bitset_minus1(curr);
+               }
        }
 
        return safe_costs+best_weight;
index 6dadcb3..4518cc1 100644 (file)
@@ -37,6 +37,9 @@
 #define DEBUG_LVL_HEUR SET_LEVEL_0
 #define DEBUG_LVL_ILP  SET_LEVEL_0
 
+#define MIS_HEUR_TRIGGER 8
+
+
 typedef int(*cost_fct_t)(ir_node*, ir_node*, int);
 
 /**
index c202ed0..880d442 100644 (file)
@@ -233,7 +233,6 @@ static ir_node *add_to_sched(block_sched_env_t *env, ir_node *irn)
         sched_info_t *info = get_irn_sched_info(irn);
         INIT_LIST_HEAD(&info->list);
         info->scheduled = 1;
-        assert(get_irn_opcode(irn) != iro_Unknown && "'Unknown' in schedule!");
         sched_add_before(env->block, irn);
 
         DBG((env->dbg, LEVEL_2, "\tadding %+F\n", irn));
@@ -407,7 +406,10 @@ static void imm_scheduler(ir_node *irn, void *env) {
                const ir_edge_t *e;
                ir_node *user, *user_block, *before, *tgt_block;
 
-               // assert(1 == get_irn_n_edges(irn)); why is this wrong ?
+               if (1 != get_irn_n_edges(irn)) {
+                       printf("Out edges: %d\n", get_irn_n_edges(irn));
+                       assert(1 == get_irn_n_edges(irn));
+               }
 
                e = get_irn_out_edge_first(irn);
                user = e->src;
index 139b488..1627763 100644 (file)
@@ -91,7 +91,7 @@ static spill_ctx_t *be_get_spill_ctx(set *sc, ir_node *to_spill, ir_node *ctx_ir
 
 static ir_node *be_spill_irn(spill_env_t *senv, ir_node *irn, ir_node *ctx_irn) {
        spill_ctx_t *ctx;
-       DBG((senv->dbg, LEVEL_1, "%+F\n", irn));
+       DBG((senv->dbg, LEVEL_1, "%+F in ctx %+F\n", irn, ctx_irn));
 
        ctx = be_get_spill_ctx(senv->spill_ctxs, irn, ctx_irn);
        if(!ctx->spill) {
@@ -115,7 +115,7 @@ static ir_node *be_spill_phi(spill_env_t *senv, ir_node *phi, ir_node *ctx_irn)
        spill_ctx_t *ctx;
 
        assert(is_Phi(phi));
-       DBG((senv->dbg, LEVEL_1, "%+F\n", phi));
+       DBG((senv->dbg, LEVEL_1, "%+F in ctx %+F\n", phi, ctx_irn));
 
        /* search an existing spill for this context */
        ctx = be_get_spill_ctx(senv->spill_ctxs, phi, ctx_irn);
@@ -180,6 +180,7 @@ void be_insert_spills_reloads(spill_env_t *senv, pset *reload_set) {
        irg_walk_graph(senv->session->irg, phi_walker, NULL, senv);
 
        /* Add reloads for mem_phis */
+       /* BETTER: These reloads (1) should only be inserted, if they are really needed */
        DBG((senv->dbg, LEVEL_1, "Reloads for mem-phis:\n"));
        for(irn = pset_first(senv->mem_phis); irn; irn = pset_next(senv->mem_phis)) {
                const ir_edge_t *e;
@@ -189,7 +190,7 @@ void be_insert_spills_reloads(spill_env_t *senv, pset *reload_set) {
                        if (is_Phi(user) && !pset_find_ptr(senv->mem_phis, user)) {
                                        DBG((senv->dbg, LEVEL_1, " non-mem-phi user %+F\n", user));
                                        ir_node *use_bl = get_nodes_block(user);
-                                       be_add_reload_on_edge(senv, irn, use_bl, e->pos);
+                                       be_add_reload_on_edge(senv, irn, use_bl, e->pos); /* (1) */
                        }
                }
        }
@@ -245,7 +246,7 @@ void be_add_reload(spill_env_t *senv, ir_node *to_spill, ir_node *before) {
        spill_info_t templ, *res;
        reloader_t *rel;
 
-       assert(get_irn_opcode(to_spill) != iro_Unknown);
+//     assert(get_irn_opcode(to_spill) != iro_Unknown);
 
        templ.spilled_node = to_spill;
        templ.reloaders    = NULL;
index 2a578e9..2eaffdc 100644 (file)
@@ -12,9 +12,9 @@
 #include "irgraph.h"
 #include "irnode.h"
 #include "irmode.h"
-#include "ircons.h"
 #include "irgwalk.h"
 #include "iredges_t.h"
+#include "ircons_t.h"
 
 #include "beutil.h"
 #include "bearch.h"
@@ -31,7 +31,7 @@
 #define DBG_DECIDE  8
 #define DBG_START  16
 #define DBG_TRACE  64
-#define DEBUG_LVL SET_LEVEL_0 // (DBG_START | DBG_DECIDE | DBG_WSETS | DBG_FIX | DBG_SPILL)
+#define DEBUG_LVL SET_LEVEL_0 //(DBG_START | DBG_DECIDE | DBG_WSETS | DBG_FIX | DBG_SPILL)
 static firm_dbg_module_t *dbg = NULL;
 
 #define MIN(a,b) (((a)<(b))?(a):(b))
@@ -450,14 +450,38 @@ next_value:
  * The remaining nodes in bel->reloads will be removed from the graph.
  */
 static void rescue_used_reloads(ir_node *irn, void *env) {
-       pset *rlds = ((belady_env_t *)env)->reloads;
+       pset *rlds = (pset *)env;
        if (pset_find_ptr(rlds, irn))
                pset_remove_ptr(rlds, irn);
 }
 
-void be_spill_belady(const be_main_session_env_t *session, const arch_register_class_t *cls) {
+/**
+ * Finds all unused reloads and remove them from the schedule
+ * Also removes spills if they are not used anymore after removing reloads
+ */
+static void remove_unused_reloads(ir_graph *irg, belady_env_t *bel) {
        ir_node *irn;
 
+       irg_walk_graph(irg, rescue_used_reloads, NULL, bel->reloads);
+       for(irn = pset_first(bel->reloads); irn; irn = pset_next(bel->reloads)) {
+               DBG((dbg, DBG_SPILL, "Removing %+F before %+F in %+F\n", irn, sched_next(irn), get_nodes_block(irn)));
+
+               ir_node *spill = get_irn_n(irn, 0);
+
+               /* remove reaload */
+               set_irn_n(irn, 0, new_Bad());
+               sched_remove(irn);
+
+               /* if spill not used anymore, remove it too
+                * test of regclass is necessary since spill may be a phi-M */
+               if (get_irn_n_edges(spill) == 0 && bel->cls == arch_get_irn_reg_class(bel->arch, spill, 0)) {
+                       set_irn_n(spill, 0, new_Bad());
+                       sched_remove(spill);
+               }
+       }
+}
+
+void be_spill_belady(const be_main_session_env_t *session, const arch_register_class_t *cls) {
        dbg = firm_dbg_register("ir.be.spillbelady");
        firm_dbg_set_mask(dbg, DEBUG_LVL);
 
@@ -477,13 +501,7 @@ void be_spill_belady(const be_main_session_env_t *session, const arch_register_c
        irg_block_walk_graph(session->irg, decide, NULL, bel);
        irg_block_walk_graph(session->irg, fix_block_borders, NULL, bel);
        be_insert_spills_reloads(bel->senv, bel->reloads);
-
-       /* find all unused reloads and remove them from the schedule */
-       irg_walk_graph(session->irg, rescue_used_reloads, NULL, bel);
-       for(irn = pset_first(bel->reloads); irn; irn = pset_next(bel->reloads)) {
-               DBG((dbg, DBG_SPILL, "Removing %+F before %+F in %+F\n", irn, sched_next(irn), get_nodes_block(irn)));
-               sched_remove(irn);
-       }
+       remove_unused_reloads(session->irg, bel);
 
        /* clean up */
        del_pset(bel->reloads);
index 386d86a..585d2a6 100644 (file)
@@ -41,10 +41,6 @@ static INLINE int is_data_node(const ir_node *irn)
 {
        int i, n;
 
-       /* Unknowns do not exist, so they produce nothing */
-       if(get_irn_opcode(irn) == iro_Unknown)
-               return 0;
-
        /* If the node produces a data value, return immediately. */
        if(is_firm_be_mode(get_irn_mode(irn)))
                return 1;