rework ASM node, it always has a memory input now
[libfirm] / ir / be / beblocksched.c
index 470ad90..e920d2c 100644 (file)
@@ -22,7 +22,6 @@
  * @brief       Block-scheduling strategies.
  * @author      Matthias Braun, Christoph Mallon
  * @date        27.09.2006
- * @version     $Id$
  *
  * The goals of the greedy (and ILP) algorithm here works by assuming that
  * we want to change as many jumps to fallthroughs as possible (executed jumps
@@ -128,6 +127,11 @@ struct blocksched_env_t {
        int            blockcount;
 };
 
+static blocksched_entry_t* get_blocksched_entry(const ir_node *block)
+{
+       return (blocksched_entry_t*)get_irn_link(block);
+}
+
 /**
  * Collect cfg frequencies of all edges between blocks.
  * Also determines edge with highest frequency.
@@ -201,20 +205,53 @@ static void collect_egde_frequency(ir_node *block, void *data)
        }
 }
 
+static int cmp_edges_base(const edge_t *e1, const edge_t *e2)
+{
+       long nr1 = get_irn_node_nr(e1->block);
+       long nr2 = get_irn_node_nr(e2->block);
+       if (nr1 < nr2) {
+               return 1;
+       } else if (nr1 > nr2) {
+               return -1;
+       } else {
+               if (e1->pos < e2->pos) {
+                       return 1;
+               } else if (e1->pos > e2->pos) {
+                       return -1;
+               } else {
+                       return 0;
+               }
+       }
+}
+
 static int cmp_edges(const void *d1, const void *d2)
 {
        const edge_t *e1 = (const edge_t*)d1;
        const edge_t *e2 = (const edge_t*)d2;
-
-       return QSORT_CMP(e2->execfreq, e1->execfreq);
+       double        freq1 = e1->execfreq;
+       double        freq2 = e2->execfreq;
+       if (freq1 < freq2) {
+               return 1;
+       } else if (freq1 > freq2) {
+               return -1;
+       } else {
+               return cmp_edges_base(e1, e2);
+       }
 }
 
 static int cmp_edges_outedge_penalty(const void *d1, const void *d2)
 {
-       const edge_t *e1 = (const edge_t*)d1;
-       const edge_t *e2 = (const edge_t*)d2;
-       /* reverse sorting as penalties are negative */
-       return QSORT_CMP(e1->outedge_penalty_freq, e2->outedge_penalty_freq);
+       const edge_t *e1   = (const edge_t*)d1;
+       const edge_t *e2   = (const edge_t*)d2;
+       double        pen1 = e1->outedge_penalty_freq;
+       double        pen2 = e2->outedge_penalty_freq;
+       if (pen1 > pen2) {
+               return 1;
+       } else if (pen1 < pen2) {
+               return -1;
+       } else {
+               return cmp_edges_base(e1, e2);
+       }
 }
 
 static void clear_loop_links(ir_loop *loop)
@@ -257,8 +294,8 @@ static void coalesce_blocks(blocksched_env_t *env)
                        continue;
 
                pred_block = get_Block_cfgpred_block(block, pos);
-               entry      = (blocksched_entry_t*)get_irn_link(block);
-               pred_entry = (blocksched_entry_t*)get_irn_link(pred_block);
+               entry      = get_blocksched_entry(block);
+               pred_entry = get_blocksched_entry(pred_block);
 
                if (pred_entry->next != NULL || entry->prev != NULL)
                        continue;
@@ -296,8 +333,8 @@ static void coalesce_blocks(blocksched_env_t *env)
                        continue;
 
                pred_block = get_Block_cfgpred_block(block, pos);
-               entry      = (blocksched_entry_t*)get_irn_link(block);
-               pred_entry = (blocksched_entry_t*)get_irn_link(pred_block);
+               entry      = get_blocksched_entry(block);
+               pred_entry = get_blocksched_entry(pred_block);
 
                if (pred_entry->next != NULL || entry->prev != NULL)
                        continue;
@@ -338,8 +375,8 @@ static void coalesce_blocks(blocksched_env_t *env)
                        continue;
 
                pred_block = get_Block_cfgpred_block(block, pos);
-               entry      = (blocksched_entry_t*)get_irn_link(block);
-               pred_entry = (blocksched_entry_t*)get_irn_link(pred_block);
+               entry      = get_blocksched_entry(block);
+               pred_entry = get_blocksched_entry(pred_block);
 
                /* is 1 of the blocks already attached to another block? */
                if (pred_entry->next != NULL || entry->prev != NULL)
@@ -358,8 +395,7 @@ static void pick_block_successor(blocksched_entry_t *entry, blocksched_env_t *en
        ir_node            *block = entry->block;
        ir_node            *succ  = NULL;
        blocksched_entry_t *succ_entry;
-       const ir_edge_t    *edge;
-       double             best_succ_execfreq;
+       double              best_succ_execfreq;
 
        if (irn_visited_else_mark(block))
                return;
@@ -377,7 +413,7 @@ static void pick_block_successor(blocksched_entry_t *entry, blocksched_env_t *en
 
                /* we only need to put the first of a series of already connected
                 * blocks into the worklist */
-               succ_entry = (blocksched_entry_t*)get_irn_link(succ_block);
+               succ_entry = get_blocksched_entry(succ_block);
                while (succ_entry->prev != NULL) {
                        /* break cycles... */
                        if (succ_entry->prev->block == succ_block) {
@@ -413,7 +449,7 @@ static void pick_block_successor(blocksched_entry_t *entry, blocksched_env_t *en
                if (irn_visited(succ_block))
                        continue;
 
-               succ_entry = (blocksched_entry_t*)get_irn_link(succ_block);
+               succ_entry = get_blocksched_entry(succ_block);
                if (succ_entry->prev != NULL)
                        continue;
 
@@ -436,7 +472,7 @@ static void pick_block_successor(blocksched_entry_t *entry, blocksched_env_t *en
                } while (irn_visited(succ));
        }
 
-       succ_entry       = (blocksched_entry_t*)get_irn_link(succ);
+       succ_entry       = get_blocksched_entry(succ);
        entry->next      = succ_entry;
        succ_entry->prev = entry;
 
@@ -447,7 +483,7 @@ static blocksched_entry_t *finish_block_schedule(blocksched_env_t *env)
 {
        ir_graph           *irg        = env->irg;
        ir_node            *startblock = get_irg_start_block(irg);
-       blocksched_entry_t *entry      = (blocksched_entry_t*)get_irn_link(startblock);
+       blocksched_entry_t *entry      = get_blocksched_entry(startblock);
 
        ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED);
        inc_irg_visited(irg);
@@ -566,11 +602,10 @@ static int add_ilp_edge(ir_node *block, int pos, double execfreq, blocksched_ilp
 
 static void collect_egde_frequency_ilp(ir_node *block, void *data)
 {
-       blocksched_ilp_env_t *env        = data;
+       blocksched_ilp_env_t *env        = (blocksched_ilp_env_t*)data;
        ir_graph             *irg        = env->env.irg;
        ir_node              *startblock = get_irg_start_block(irg);
        int                  arity;
-       lpp_cst_t            cst;
        char                 name[64];
        int                  out_count;
        blocksched_ilp_entry_t *entry;
@@ -595,9 +630,10 @@ static void collect_egde_frequency_ilp(ir_node *block, void *data)
        }
        else {
                int i;
+               int cst_idx;
 
                snprintf(name, sizeof(name), "block_in_constr_%ld", get_irn_node_nr(block));
-               cst = lpp_add_cst_uniq(env->lpp, name, lpp_greater_equal, arity - 1);
+               cst_idx = lpp_add_cst_uniq(env->lpp, name, lpp_greater_equal, arity - 1);
 
                for (i = 0; i < arity; ++i) {
                        double     execfreq;
@@ -608,16 +644,19 @@ static void collect_egde_frequency_ilp(ir_node *block, void *data)
                        execfreq = get_block_execfreq(env->env.execfreqs, pred_block);
                        edgenum  = add_ilp_edge(block, i, execfreq, env);
                        edge     = &env->ilpedges[edgenum];
-                       lpp_set_factor_fast(env->lpp, cst, edge->ilpvar, 1.0);
+                       lpp_set_factor_fast(env->lpp, cst_idx, edge->ilpvar, 1.0);
                }
        }
 }
 
+static blocksched_ilp_entry_t *get_blocksched_ilp_entry(const ir_node *block)
+{
+       return (blocksched_ilp_entry_t*)get_irn_link(block);
+}
 
 static void coalesce_blocks_ilp(blocksched_ilp_env_t *env)
 {
        int           edge_count = ARR_LEN(env->ilpedges);
-       be_options_t *options    = be_get_irg_options(env->env.irg);
        int           i;
 
        /* complete out constraints */
@@ -632,14 +671,14 @@ static void coalesce_blocks_ilp(blocksched_ilp_env_t *env)
                        continue;
 
                pred  = get_Block_cfgpred_block(block, edge->pos);
-               entry = get_irn_link(pred);
+               entry = get_blocksched_ilp_entry(pred);
 
                DB((dbg, LEVEL_1, "Adding out cst to %+F from %+F,%d\n",
                                  pred, block, edge->pos));
                lpp_set_factor_fast(env->lpp, entry->out_cst, edge->ilpvar, 1.0);
        }
 
-       lpp_solve_net(env->lpp, options->ilp_server, options->ilp_solver);
+       lpp_solve_net(env->lpp, be_options.ilp_server, be_options.ilp_solver);
        assert(lpp_is_sol_valid(env->lpp));
 
        /* Apply results to edges */
@@ -660,8 +699,8 @@ static void coalesce_blocks_ilp(blocksched_ilp_env_t *env)
                        continue;
 
                pred       = get_Block_cfgpred_block(block, edge->pos);
-               entry      = get_irn_link(block);
-               pred_entry = get_irn_link(pred);
+               entry      = get_blocksched_entry(block);
+               pred_entry = get_blocksched_entry(pred);
 
                assert(entry->prev == NULL && pred_entry->next == NULL);
                entry->prev      = pred_entry;