- Added 2 new blockschedulers, a greedy algorithm and an "optimal" ILP that
[libfirm] / ir / be / beuses.c
index 0db133a..9be130e 100644 (file)
@@ -1,13 +1,16 @@
 /**
  * @file   beuse.c
  * @date   27.06.2005
- * @author Sebastian Hack
+ * @author Sebastian Hack, Matthias Braun
  *
  * Methods to compute when a value will be used again.
  *
  * Copyright (C) 2005 Universitaet Karlsruhe
  * Released under the GPL
  */
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
 
 #include <limits.h>
 #include <stdlib.h>
 #include "beirgmod.h"
 #include "bearch.h"
 #include "beuses_t.h"
+#include "benodesets.h"
 
-#define DBG_LEVEL SET_LEVEL_0
+#define SCAN_INTERBLOCK_USES
 
 typedef struct _be_use_t {
-       const ir_node *bl;
-       const ir_node *irn;
+       const ir_node *block;
+       const ir_node *node;
        unsigned next_use;
-       int is_set;
 } be_use_t;
 
 struct _be_uses_t {
-  set *uses;
-  ir_graph *irg;
-  firm_dbg_module_t *dbg;
-  const arch_env_t *arch_env;
+       set *uses;
+       ir_graph *irg;
+       const ir_exec_freq *execfreqs;
+       const be_lv_t *lv;
+       DEBUG_ONLY(firm_dbg_module_t *dbg;)
 };
 
-
-#define MIN(a, b)                ((a) < (b) ? (a) : (b))
-
-static INLINE unsigned sadd(unsigned a, unsigned b)
+static int cmp_use(const void *a, const void *b, size_t n)
 {
-  return a + b;
+       const be_use_t *p = a;
+       const be_use_t *q = b;
+       return !(p->block == q->block && p->node == q->node);
 }
 
-static INLINE unsigned sdiv(unsigned a, unsigned b)
+static const be_use_t *get_or_set_use_block(be_uses_t *uses,
+                                            const ir_node *block,
+                                            const ir_node *def)
 {
-  return a / b;
-}
+       unsigned hash = HASH_COMBINE(nodeset_hash(block), nodeset_hash(def));
+       be_use_t temp;
+       be_use_t* result;
 
-static int cmp_use(const void *a, const void *b, size_t n)
-{
-  const be_use_t *p = a;
-  const be_use_t *q = b;
-  return !(p->bl == q->bl && p->irn == q->irn);
-}
+       temp.block = block;
+       temp.node = def;
+       result = set_find(uses->uses, &temp, sizeof(temp), hash);
 
-static INLINE be_use_t *get_or_set_use(be_uses_t *uses,
-    const ir_node *bl, const ir_node *irn, unsigned next_use)
-{
-  unsigned hash = HASH_COMBINE(HASH_PTR(bl), HASH_PTR(irn));
-  be_use_t templ;
-
-  templ.bl = bl;
-  templ.irn = irn;
-  templ.next_use = next_use;
-  templ.is_set = 0;
-  return set_insert(uses->uses, &templ, sizeof(templ), hash);
-}
+       if(result == NULL) {
+               // insert templ first as we might end in a loop in the get_next_use
+               // call otherwise
+               temp.next_use = USES_INFINITY;
+               result = set_insert(uses->uses, &temp, sizeof(temp), hash);
 
-unsigned be_get_next_use(be_uses_t *uses, const ir_node *from,
-    unsigned from_step, const ir_node *def, int skip_from_uses);
+               result->next_use = be_get_next_use(uses, sched_first(block), 0, def, 0);
+       }
 
-static unsigned get_next_use_bl(be_uses_t *uses, const ir_node *bl,
-    const ir_node *def)
-{
-  be_use_t *u;
-
-  u = get_or_set_use(uses, bl, def, 0);
-  if(!u->is_set) {
-       u->is_set = 1;
-       u->next_use = USES_INFINITY;
-       u->next_use = be_get_next_use(uses, sched_first(bl), 0, def, 0);
-  }
-  return u->next_use;
+       return result;
 }
 
-unsigned be_get_next_use(be_uses_t *uses,
-    const ir_node *from, unsigned from_step, const ir_node *def,
-    int skip_from_uses)
+unsigned be_get_next_use(be_uses_t *uses, const ir_node *from,
+                         unsigned from_step, const ir_node *def,
+                         int skip_from_uses)
 {
-  unsigned next_use = USES_INFINITY;
-  unsigned step = from_step;
-  unsigned n = 0;
-  const ir_node *irn;
-  const ir_node *bl = get_block(from);
-  const ir_edge_t *succ_edge;
-
-  sched_foreach_from(from, irn) {
-    int i, n;
-
-       if(!skip_from_uses) {
-           for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
-             ir_node *operand = get_irn_n(irn, i);
-
-             if(operand == def) {
-               DBG((uses->dbg, LEVEL_3, "found use of %+F at %+F\n", operand, irn));
-               return step;
-             }
-           }
+       unsigned step = from_step;
+       ir_node *block = get_nodes_block(from);
+       const ir_node *node;
+       const ir_edge_t *edge;
+
+       if(skip_from_uses) {
+               step++;
+               from = sched_next(from);
        }
 
-       skip_from_uses = 0;
-    step++;
-  }
+       sched_foreach_from(from, node) {
+               int i, arity;
 
-       next_use = USES_INFINITY;
-       foreach_block_succ(bl, succ_edge) {
-               const ir_node *succ_bl = succ_edge->src;
-               if(is_live_in(succ_bl, def)) {
-                       unsigned next = get_next_use_bl(uses, succ_bl, def);
+               arity = get_irn_arity(node);
+               for (i = 0; i < arity; ++i) {
+                       const ir_node *operand = get_irn_n(node, i);
 
-                       DBG((uses->dbg, LEVEL_2, "\t\tnext use in succ %+F: %d\n", succ_bl, next));
-                       next_use = MIN(next_use, next);
-                       n++;
-           }
+                       if (operand == def) {
+                               DBG((uses->dbg, LEVEL_3, "found use of %+F at %+F\n", operand, node));
+                               return step;
+                       }
+               }
+
+               step++;
        }
 
-       return next_use + step;
-}
+       if(be_is_live_end(uses->lv, block, def))
+               return step;
 
-be_uses_t *be_begin_uses(
-    ir_graph *irg,
-    const arch_env_t *arch_env,
-    const arch_register_class_t *cls)
-{
-  be_uses_t *uses = malloc(sizeof(uses[0]));
+#ifdef SCAN_INTERBLOCK_USES
+       {
+       double best_execfreq = -1;
+       unsigned next_use = USES_INFINITY;
+
+       foreach_block_succ(block, edge) {
+               const be_use_t *use;
+               const ir_node *succ_block = get_edge_src_irn(edge);
+               double execfreq = get_block_execfreq(uses->execfreqs, succ_block);
+
+               //execfreq_sum += execfreq;
+
+               if(execfreq > best_execfreq) {
+                       best_execfreq = execfreq;
 
-  edges_assure(irg);
+                       if(!be_is_live_in(uses->lv, succ_block, def)) {
+                               next_use = USES_INFINITY;
+                               continue;
+                       }
+
+                       use = get_or_set_use_block(uses, succ_block, def);
+                       //if(USES_IS_INFINITE(use->next_use))
+                       //      continue;
+
+                       next_use = use->next_use;
+               }
+
+               //next_use += use->next_use / execfreq;
+       }
 
-  uses->arch_env = arch_env;
-  uses->uses     = new_set(cmp_use, 512);
-  uses->dbg      = firm_dbg_register("be.uses");
-  firm_dbg_set_mask(uses->dbg, DBG_LEVEL);
+       /*if(next_use == 0)
+               return USES_INFINITY;*/
 
-  return uses;
+       //next_use /= execfreq_sum;
+
+       return ((unsigned) next_use) + step;
+       }
+#else
+       return USES_INFINITY;
+#endif
 }
 
-void be_end_uses(be_uses_t *uses)
+be_uses_t *be_begin_uses(ir_graph *irg, const ir_exec_freq *execfreqs, const be_lv_t *lv)
 {
-  del_set(uses->uses);
-  free(uses);
+       be_uses_t *uses = xmalloc(sizeof(uses[0]));
+
+       edges_assure(irg);
+
+       uses->uses = new_set(cmp_use, 512);
+       uses->irg = irg;
+       uses->execfreqs = execfreqs;
+       uses->lv = lv;
+       FIRM_DBG_REGISTER(uses->dbg, "firm.be.uses");
+
+       return uses;
 }
 
-int loc_compare(const void *a, const void *b)
+void be_end_uses(be_uses_t *uses)
 {
-  const loc_t *p = a;
-  const loc_t *q = b;
-  return p->time - q->time;
+       del_set(uses->uses);
+       free(uses);
 }