- BugFix: check mode_T nodes for different_constraints
[libfirm] / ir / be / bespillremat.c
index 882e951..99caaf4 100644 (file)
@@ -1,21 +1,36 @@
-/** vim: set sw=4 ts=4:
- * @file   bespillremat.c
- * @date   2006-04-06
- * @author Adam M. Szalkowski & Sebastian Hack
+/*
+ * Copyright (C) 1995-2008 University of Karlsruhe.  All right reserved.
  *
- * ILP based spilling & rematerialization
+ * This file is part of libFirm.
  *
- * Copyright (C) 2006 Universitaet Karlsruhe
- * Released under the GPL
+ * This file may be distributed and/or modified under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation and appearing in the file LICENSE.GPL included in the
+ * packaging of this file.
+ *
+ * Licensees holding valid libFirm Professional Edition licenses may use
+ * this file in accordance with the libFirm Commercial License.
+ * Agreement provided with the Software.
+ *
+ * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
+ * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
+/**
+ * @file
+ * @brief       ILP based spilling & rematerialization
+ * @author      Adam M. Szalkowski
+ * @date        06.04.2006
+ * @version     $Id$
  */
-#ifdef HAVE_CONFIG_H
 #include "config.h"
-#endif
 
 #ifdef WITH_ILP
 
 #include <math.h>
 
+#include "array_t.h"
 #include "hashptr.h"
 #include "debug.h"
 #include "obst.h"
 #include "irnode_t.h"
 #include "ircons_t.h"
 #include "irloop_t.h"
+#include "irnodeset.h"
 #include "phiclass.h"
-#include "iredges.h"
+#include "iredges_t.h"
 #include "execfreq.h"
 #include "irvrfy.h"
 #include "irbackedge_t.h"
+#include "irprofile.h"
 
 #include <lpp/lpp.h>
 #include <lpp/mps.h>
 #include <lpp/lpp_cplex.h>
 
 #include "be_t.h"
+#include "beirg_t.h"
 #include "belive_t.h"
 #include "besched_t.h"
-#include "beirgmod.h"
-#include "bearch.h"
+#include "bessaconstr.h"
+#include "bearch_t.h"
+#include "beintlive_t.h"
 #include "beabi.h"
 #include "benode_t.h"
 #include "beutil.h"
 #include "bespillremat.h"
 #include "bespill.h"
 #include "bepressurestat.h"
-#include "beprofile.h"
 #include "bespilloptions.h"
-
 #include "bechordal_t.h"
+#include "bemodule.h"
 
-#include <libcore/lc_opts.h>
-#include <libcore/lc_opts_enum.h>
+#include "lc_opts.h"
+#include "lc_opts_enum.h"
 
 #define DUMP_PROBLEM       1
 #define DUMP_MPS           2
@@ -150,7 +168,7 @@ static const lc_opt_table_entry_t options[] = {
        LC_OPT_ENT_DBL      ("cost_memoperand",  "cost of a memory operand",                        &opt_cost_memoperand),
        LC_OPT_ENT_DBL      ("cost_spill",  "cost of a spill instruction",                          &opt_cost_spill),
        LC_OPT_ENT_DBL      ("cost_remat",  "cost of a rematerialization",                          &opt_cost_remat),
-       { NULL }
+       LC_OPT_LAST
 };
 
 //#define EXECFREQ_LOOPDEPH   /* compute execution frequency from loop depth only */
@@ -263,8 +281,7 @@ typedef struct _memoperand_t {
 static INLINE int
 has_reg_class(const spill_ilp_t * si, const ir_node * irn)
 {
-       return arch_irn_consider_in_reg_alloc(si->birg->main_env->arch_env,
-                                             si->cls, irn);
+       return arch_irn_consider_in_reg_alloc(si->cls, irn);
 }
 
 #if 0
@@ -285,7 +302,7 @@ static int
 cmp_remat(const void *a, const void *b)
 {
        const remat_t  *r = a;
-       const remat_t  *s = a;
+       const remat_t  *s = b;
 
        return !(r == s || r->op == s->op);
 }
@@ -295,6 +312,7 @@ cmp_spill(const void *a, const void *b, size_t size)
 {
        const spill_t *p = a;
        const spill_t *q = b;
+       (void) size;
 
 //     return !(p->irn == q->irn && p->bb == q->bb);
        return !(p->irn == q->irn);
@@ -305,6 +323,7 @@ cmp_memoperands(const void *a, const void *b, size_t size)
 {
        const memoperand_t *p = a;
        const memoperand_t *q = b;
+       (void) size;
 
        return !(p->irn == q->irn && p->pos == q->pos);
 }
@@ -382,7 +401,7 @@ set_find_spill(set * set, const ir_node * value)
 #define pset_foreach(s,i) for((i)=pset_first((s)); (i); (i)=pset_next((s)))
 #define set_foreach(s,i) for((i)=set_first((s)); (i); (i)=set_next((s)))
 #define foreach_post_remat(s,i) for((i)=next_post_remat((s)); (i); (i)=next_post_remat((i)))
-#define foreach_pre_remat(si,s,i) for((i)=next_pre_remat((si),(s)); (i); (i)=next_pre_remat((si),(i)))
+#define foreach_pre_remat(s,i) for((i)=next_pre_remat((s)); (i); (i)=next_pre_remat((i)))
 #define        sched_foreach_op(s,i) for((i)=sched_next_op((s));!sched_is_end((i));(i)=sched_next_op((i)))
 
 static int
@@ -390,6 +409,7 @@ cmp_remat_info(const void *a, const void *b, size_t size)
 {
        const remat_info_t *p = a;
        const remat_info_t *q = b;
+       (void) size;
 
        return !(p->irn == q->irn);
 }
@@ -399,6 +419,7 @@ cmp_defs(const void *a, const void *b, size_t size)
 {
        const defs_t *p = a;
        const defs_t *q = b;
+       (void) size;
 
        return !(p->value == q->value);
 }
@@ -408,6 +429,7 @@ cmp_keyval(const void *a, const void *b, size_t size)
 {
        const keyval_t *p = a;
        const keyval_t *q = b;
+       (void) size;
 
        return !(p->key == q->key);
 }
@@ -416,11 +438,11 @@ static double
 execution_frequency(const spill_ilp_t *si, const ir_node * irn)
 {
 #define FUDGE 0.001
-       if(be_profile_has_data())
-               return ((double)be_profile_get_block_execcount(get_block(irn))) + FUDGE;
+       if(ir_profile_has_data())
+               return ((double)ir_profile_get_block_execcount(get_block_const(irn))) + FUDGE;
 
 #ifndef EXECFREQ_LOOPDEPH
-       return get_block_execfreq(si->birg->exec_freq, get_block(irn)) + FUDGE;
+       return get_block_execfreq(si->birg->exec_freq, get_block_const(irn)) + FUDGE;
 #else
        if(is_Block(irn))
                return exp(get_loop_depth(get_irn_loop(irn)) * log(10)) + FUDGE;
@@ -429,15 +451,14 @@ execution_frequency(const spill_ilp_t *si, const ir_node * irn)
 #endif
 }
 
-static double
-get_cost(const spill_ilp_t * si, const ir_node * irn)
+static double get_cost(const ir_node *irn)
 {
        if(be_is_Spill(irn)) {
                return opt_cost_spill;
        } else if(be_is_Reload(irn)){
                return opt_cost_reload;
        } else {
-               return arch_get_op_estimated_cost(si->birg->main_env->arch_env, irn);
+               return arch_get_op_estimated_cost(irn);
        }
 }
 
@@ -447,9 +468,8 @@ get_cost(const spill_ilp_t * si, const ir_node * irn)
 static INLINE int
 is_rematerializable(const spill_ilp_t * si, const ir_node * irn)
 {
-       int               n;
-       const arch_env_t *arch_env = si->birg->main_env->arch_env;
-       int               remat = (arch_irn_get_flags(arch_env, irn) & arch_irn_flags_rematerializable) != 0;
+       int n;
+       int remat = (arch_irn_get_flags(irn) & arch_irn_flags_rematerializable) != 0;
 
 #if 0
        if(!remat)
@@ -458,7 +478,7 @@ is_rematerializable(const spill_ilp_t * si, const ir_node * irn)
 
        for (n = get_irn_arity(irn)-1; n>=0 && remat; --n) {
                ir_node        *op = get_irn_n(irn, n);
-               remat &= has_reg_class(si, op) || arch_irn_get_flags(arch_env, op) & arch_irn_flags_ignore || (get_irn_op(op) == op_NoMem);
+               remat &= has_reg_class(si, op) || arch_irn_get_flags(op) & arch_irn_flags_ignore || is_NoMem(op);
 
 //             if(!remat)
 //                     ir_fprintf(stderr, "  Argument %d (%+F) of Node %+F has wrong regclass\n", i, op, irn);
@@ -491,7 +511,7 @@ get_remat_from_op(spill_ilp_t * si, const ir_node * dest_value, const ir_node *
 
                remat          = obstack_alloc(si->obst, sizeof(*remat));
                remat->op      = op;
-               remat->cost    = (int)get_cost(si, op);
+               remat->cost    = (int)get_cost(op);
                remat->value   = dest_value;
                remat->proj    = proj;
                remat->inverse = 0;
@@ -510,7 +530,7 @@ get_remat_from_op(spill_ilp_t * si, const ir_node * dest_value, const ir_node *
                DBG((si->dbg, LEVEL_5, "\t  requesting inverse op for argument %d of op %+F\n", n, op));
 
                /* else ask the backend to give an inverse op */
-               if(arch_get_inverse(si->birg->main_env->arch_env, op, n, &inverse, si->obst)) {
+               if(arch_get_inverse(op, n, &inverse, si->obst)) {
                        int   i;
 
                        DBG((si->dbg, LEVEL_4, "\t  backend gave us an inverse op with %d nodes and cost %d\n", inverse.n, inverse.costs));
@@ -658,6 +678,7 @@ value_is_defined_before(const spill_ilp_t * si, const ir_node * pos, const ir_no
        ir_node *block;
        ir_node *def_block = get_nodes_block(val);
        int      ret;
+       (void) si;
 
        if(val == pos)
                return 0;
@@ -684,10 +705,9 @@ value_is_defined_before(const spill_ilp_t * si, const ir_node * pos, const ir_no
        return ret;
 }
 
-static INLINE ir_node *
-sched_block_last_noncf(const spill_ilp_t * si, const ir_node * bb)
+static INLINE ir_node *sched_block_last_noncf(const ir_node * bb)
 {
-    return sched_skip((ir_node*)bb, 0, sched_skip_cf_predicator, (void *) si->birg->main_env->arch_env);
+       return sched_skip((ir_node*)bb, 0, sched_skip_cf_predicator, NULL);
 }
 
 /**
@@ -702,6 +722,7 @@ sched_block_first_nonphi(const ir_node * bb)
 static int
 sched_skip_proj_predicator(const ir_node * irn, void * data)
 {
+       (void) data;
        return (is_Proj(irn));
 }
 
@@ -753,11 +774,10 @@ sched_put_after(ir_node * insert, ir_node * irn)
        sched_add_before(insert, irn);
 }
 
-static void
-sched_put_before(const spill_ilp_t * si, ir_node * insert, ir_node * irn)
+static void sched_put_before(ir_node * insert, ir_node * irn)
 {
   if(is_Block(insert)) {
-         insert = sched_block_last_noncf(si, insert);
+         insert = sched_block_last_noncf(insert);
   } else {
          insert = sched_next_nonproj(insert, 0);
          insert = sched_prev(insert);
@@ -790,14 +810,13 @@ next_post_remat(const ir_node * irn)
 }
 
 
-static ir_node *
-next_pre_remat(const spill_ilp_t * si, const ir_node * irn)
+static ir_node *next_pre_remat(const ir_node * irn)
 {
        op_t      *op;
        ir_node   *ret;
 
        if(is_Block(irn)) {
-               ret = sched_block_last_noncf(si, irn);
+               ret = sched_block_last_noncf(irn);
                ret = sched_next(ret);
                ret = sched_prev_op(ret);
        } else {
@@ -827,7 +846,7 @@ can_remat_before(const spill_ilp_t * si, const remat_t * remat, const ir_node *
                           res = 1;
 
        if(is_Block(pos)) {
-               prev = sched_block_last_noncf(si, pos);
+               prev = sched_block_last_noncf(pos);
                prev = sched_next_nonproj(prev, 0);
        } else {
                prev = sched_prev_op(pos);
@@ -912,7 +931,7 @@ insert_copy_before(const spill_ilp_t * si, const ir_node * irn, ir_node * pos)
 
        set_phi_class(si->pc, copy, NULL);
        set_nodes_block(copy, bb);
-       sched_put_before(si, pos, copy);
+       sched_put_before(pos, copy);
 
        return copy;
 }
@@ -1076,7 +1095,7 @@ get_live_end(spill_ilp_t * si, ir_node * bb, pset * live)
        sched_foreach_reverse(bb, irn) {
                int  i;
 
-               if(!sched_skip_cf_predicator(irn, si->birg->main_env->arch_env)) break;
+               if (!sched_skip_cf_predicator(irn, NULL)) break;
 
                for(i=get_irn_arity(irn)-1; i>=0; --i) {
                        ir_node *arg = get_irn_n(irn,i);
@@ -1090,7 +1109,7 @@ get_live_end(spill_ilp_t * si, ir_node * bb, pset * live)
         * find values that are used by remats at end of block
         * and insert them into live set
         */
-       foreach_pre_remat(si, bb, irn) {
+       foreach_pre_remat(bb, irn) {
                int       n;
 
                for (n=get_irn_arity(irn)-1; n>=0; --n) {
@@ -1122,7 +1141,7 @@ walker_regclass_copy_insertor(ir_node * irn, void * data)
 
                        if(!has_reg_class(si, phi_arg)) {
                                ir_node   *copy = be_new_Copy(si->cls, si->birg->irg, bb, phi_arg);
-                               ir_node   *pos = sched_block_last_noncf(si, bb);
+                               ir_node   *pos = sched_block_last_noncf(bb);
                                op_t      *op = obstack_alloc(si->obst, sizeof(*op));
 
                                DBG((si->dbg, LEVEL_2, "\t copy to my regclass for arg %+F of %+F\n", phi_arg, irn));
@@ -1240,7 +1259,7 @@ walker_remat_insertor(ir_node * bb, void * data)
                }
 
                /* do not place post remats after jumps */
-               if(sched_skip_cf_predicator(irn, si->birg->main_env->arch_env)) {
+               if (sched_skip_cf_predicator(irn, si->birg->main_env->arch_env)) {
                        del_pset(used);
                        del_pset(args);
                        break;
@@ -1454,7 +1473,7 @@ luke_endwalker(ir_node * bb, void * data)
         * find values that are used by remats at end of block
         * and insert them into live set
         */
-       foreach_pre_remat(si, bb, irn) {
+       foreach_pre_remat(bb, irn) {
                int       n;
 
                for (n=get_irn_arity(irn)-1; n>=0; --n) {
@@ -1471,7 +1490,7 @@ luke_endwalker(ir_node * bb, void * data)
        sched_foreach_reverse(bb, irn) {
                int   n;
 
-               if(!sched_skip_cf_predicator(irn, si->birg->main_env->arch_env)) break;
+               if (!sched_skip_cf_predicator(irn, si->birg->main_env->arch_env)) break;
 
                for (n=get_irn_arity(irn)-1; n>=0; --n) {
                        ir_node        *irn_arg = get_irn_n(irn, n);
@@ -1730,7 +1749,7 @@ insert_mem_copy_position(spill_ilp_t * si, pset * live, const ir_node * block)
                lpp_set_factor_fast(si->lpp, cst, to_copy_spill->reg_out, -1.0);
                if(reload != ILP_UNDEF) lpp_set_factor_fast(si->lpp, cst, reload, -1.0);
                lpp_set_factor_fast(si->lpp, cst, to_copy_op->attr.live_range.ilp, -1.0);
-               foreach_pre_remat(si, block, tmp) {
+               foreach_pre_remat(block, tmp) {
                        op_t     *remat_op = get_irn_link(tmp);
                        if(remat_op->attr.remat.remat->value == to_copy) {
                                lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, -1.0);
@@ -1754,16 +1773,15 @@ insert_mem_copy_position(spill_ilp_t * si, pset * live, const ir_node * block)
 static void
 luke_blockwalker(ir_node * bb, void * data)
 {
-       spill_ilp_t    *si = (spill_ilp_t*)data;
-       ir_node        *irn;
-       pset           *live;
-       char            buf[256];
-       ilp_cst_t       cst;
-       spill_bb_t     *spill_bb = get_irn_link(bb);
-       ir_node        *tmp;
-       spill_t        *spill;
-       pset           *defs = pset_new_ptr_default();
-       const arch_env_t *arch_env = si->birg->main_env->arch_env;
+       spill_ilp_t *si = (spill_ilp_t*)data;
+       ir_node     *irn;
+       pset        *live;
+       char         buf[256];
+       ilp_cst_t    cst;
+       spill_bb_t  *spill_bb = get_irn_link(bb);
+       ir_node     *tmp;
+       spill_t     *spill;
+       pset        *defs = pset_new_ptr_default();
 
        live = pset_new_ptr_default();
 
@@ -1804,7 +1822,7 @@ luke_blockwalker(ir_node * bb, void * data)
                lpp_set_factor_fast(si->lpp, cst, spill->reg_out, 1.0);
                if(reload != ILP_UNDEF) lpp_set_factor_fast(si->lpp, cst, reload, -1.0);
                lpp_set_factor_fast(si->lpp, cst, op->attr.live_range.ilp, -1.0);
-               foreach_pre_remat(si, bb, tmp) {
+               foreach_pre_remat(bb, tmp) {
                        op_t     *remat_op = get_irn_link(tmp);
                        if(remat_op->attr.remat.remat->value == irn) {
                                lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, -1.0);
@@ -1818,7 +1836,7 @@ luke_blockwalker(ir_node * bb, void * data)
                lpp_set_factor_fast(si->lpp, cst, spill->reg_out, 1.0);
                if(reload != ILP_UNDEF) lpp_set_factor_fast(si->lpp, cst, reload, -1.0);
                lpp_set_factor_fast(si->lpp, cst, op->attr.live_range.ilp, -1.0);
-               foreach_pre_remat(si, bb, tmp) {
+               foreach_pre_remat(bb, tmp) {
                        op_t     *remat_op = get_irn_link(tmp);
                        if(remat_op->attr.remat.remat->value == irn) {
                                lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, -1.0);
@@ -1853,7 +1871,7 @@ luke_blockwalker(ir_node * bb, void * data)
        /*
         * assure the remat args are available
         */
-       foreach_pre_remat(si, bb, tmp) {
+       foreach_pre_remat(bb, tmp) {
                op_t     *remat_op = get_irn_link(tmp);
                int       n;
 
@@ -1899,7 +1917,7 @@ luke_blockwalker(ir_node * bb, void * data)
         *    B A S I C  B L O C K  B O D Y
         **************************************/
 
-       sched_foreach_reverse_from(sched_block_last_noncf(si, bb), irn) {
+       sched_foreach_reverse_from(sched_block_last_noncf(bb), irn) {
                op_t       *op;
                op_t       *tmp_op;
                int         n,
@@ -1960,7 +1978,7 @@ luke_blockwalker(ir_node * bb, void * data)
                                        }
                                }
                        }
-                       foreach_pre_remat(si, irn, tmp) {
+                       foreach_pre_remat(irn, tmp) {
                                for (n=get_irn_arity(tmp)-1; n>=0; --n) {
                                        ir_node        *remat_arg = get_irn_n(tmp, n);
                                        if(has_reg_class(si, remat_arg)) {
@@ -2216,7 +2234,8 @@ skip_one_must_die:
 
                        if(opt_memoperands && (!is_start_block(bb) || be_is_Barrier(irn))) {
                                for(n = get_irn_arity(irn)-1; n>=0; --n) {
-                                       if(get_irn_n(irn, n) == arg && arch_possible_memory_operand(arch_env, irn, n)) {
+                                       if (get_irn_n(irn, n) == arg &&
+                                                       arch_possible_memory_operand(irn, n)) {
                                                ilp_var_t       memoperand;
 
                                                ir_snprintf(buf, sizeof(buf), "memoperand_%N_%d", irn, n);
@@ -2252,7 +2271,7 @@ skip_one_must_die:
                                assert(spill);
 
                                ir_snprintf(buf, sizeof(buf), "delete_%N", tmp);
-                               delete = lpp_add_var_default(si->lpp, buf, lpp_binary, -1.0*get_cost(si, irn)*execution_frequency(si, bb), 0.0);
+                               delete = lpp_add_var_default(si->lpp, buf, lpp_binary, -1.0 * get_cost(irn) * execution_frequency(si, bb), 0.0);
 
                                /* op may not be killed if its first live_range is 1 */
                                ir_snprintf(buf, sizeof(buf), "killorig-lr_%N", tmp);
@@ -2270,7 +2289,7 @@ skip_one_must_die:
                                assert(spill);
 
                                ir_snprintf(buf, sizeof(buf), "keep_%N", tmp);
-                               keep = lpp_add_var_default(si->lpp, buf, lpp_binary, get_cost(si, irn)*execution_frequency(si, bb), 1.0);
+                               keep = lpp_add_var_default(si->lpp, buf, lpp_binary, get_cost(irn) * execution_frequency(si, bb), 1.0);
 
                                /* op may not be killed if its first live_range is 1 */
                                ir_snprintf(buf, sizeof(buf), "killorig-lr_%N", tmp);
@@ -2354,7 +2373,7 @@ skip_one_must_die:
 
                        lpp_set_factor_fast(si->lpp, requirements, arg_op->attr.live_range.ilp, 1.0);
                        lpp_set_factor_fast(si->lpp, requirements, op->attr.live_range.args.reloads[i], 1.0);
-                       foreach_pre_remat(si, irn, tmp) {
+                       foreach_pre_remat(irn, tmp) {
                                op_t     *remat_op = get_irn_link(tmp);
                                if(remat_op->attr.remat.remat->value == arg) {
                                        lpp_set_factor_fast(si->lpp, requirements, remat_op->attr.remat.ilp, 1.0);
@@ -2369,7 +2388,8 @@ skip_one_must_die:
                                        }
                                }
                                for(n = get_irn_arity(irn)-1; n>=0; --n) {
-                                       if(get_irn_n(irn, n) == arg && arch_possible_memory_operand(arch_env, irn, n)) {
+                                       if (get_irn_n(irn, n) == arg &&
+                                                       arch_possible_memory_operand(irn, n)) {
                                                memoperand_t  *memoperand;
                                                memoperand = set_find_memoperand(si->memoperands, irn, n);
 
@@ -2402,7 +2422,7 @@ skip_one_must_die:
                }
 
                /* requirements for remats */
-               foreach_pre_remat(si, irn, tmp) {
+               foreach_pre_remat(irn, tmp) {
                        op_t        *remat_op = get_irn_link(tmp);
                        int          n;
 
@@ -2851,6 +2871,7 @@ cmp_interference(const void *a, const void *b, size_t size)
 {
        const interference_t *p = a;
        const interference_t *q = b;
+       (void) size;
 
        return !(p->a == q->a && p->b == q->b);
 }
@@ -2893,19 +2914,19 @@ set_insert_interference(spill_ilp_t * si, set * set, ir_node * a, ir_node * b, i
        return result;
 }
 
-static int
-values_interfere_in_block(const spill_ilp_t * si, const ir_node * bb, const ir_node * a, const ir_node * b)
+static
+int values_interfere_in_block(const spill_ilp_t *si, const ir_node *bb, const ir_node *a, const ir_node *b)
 {
        const ir_edge_t *edge;
 
-       if(get_nodes_block(a) != bb && get_nodes_block(b) != bb) {
+       if (get_nodes_block(a) != bb && get_nodes_block(b) != bb) {
                /* both values are live in, so they interfere */
                return 1;
        }
 
        /* ensure a dominates b */
-       if(value_dominates(b,a)) {
-               const ir_node * t;
+       if (value_dominates(b, a)) {
+               const ir_node *t;
                t = b;
                b = a;
                a = t;
@@ -2914,15 +2935,15 @@ values_interfere_in_block(const spill_ilp_t * si, const ir_node * bb, const ir_n
 
 
        /* the following code is stolen from bera.c */
-       if(be_is_live_end(si->lv, bb, a))
+       if (be_is_live_end(si->lv, bb, a))
                return 1;
 
        foreach_out_edge(a, edge) {
                const ir_node *user = edge->src;
-               if(get_nodes_block(user) == bb
-                               && !is_Phi(user)
+               if (get_nodes_block(user) == bb
+                               && ! is_Phi(user)
                                && b != user
-                               && !pset_find_ptr(si->inverse_ops, user)
+                               && ! pset_find_ptr(si->inverse_ops, user)
                                && value_dominates(b, user))
                        return 1;
        }
@@ -3220,9 +3241,13 @@ is_zero(double x)
        return fabs(x) < 0.00001;
 }
 
+/**
+ * node attribute hook for changing colors
+ */
 static int mark_remat_nodes_hook(FILE *F, ir_node *n, ir_node *l)
 {
        spill_ilp_t *si = get_irg_link(current_ir_graph);
+       (void) l;
 
        if(pset_find_ptr(si->all_possible_remats, n)) {
                op_t   *op = (op_t*)get_irn_link(n);
@@ -3391,15 +3416,15 @@ connect_all_spills_with_keep(spill_ilp_t * si)
 }
 
 /** insert a spill at an arbitrary position */
-ir_node *be_spill2(const arch_env_t *arch_env, ir_node *irn, ir_node *insert)
+static ir_node *be_spill2(ir_node *irn, ir_node *insert)
 {
        ir_node  *bl    = is_Block(insert) ? insert : get_nodes_block(insert);
        ir_graph *irg   = get_irn_irg(bl);
        ir_node  *frame = get_irg_frame(irg);
        ir_node  *spill;
        ir_node  *next;
-       const arch_register_class_t *cls       = arch_get_irn_reg_class(arch_env, irn, -1);
-       const arch_register_class_t *cls_frame = arch_get_irn_reg_class(arch_env, frame, -1);
+       const arch_register_class_t *cls       = arch_get_irn_reg_class(irn, -1);
+       const arch_register_class_t *cls_frame = arch_get_irn_reg_class(frame, -1);
 
        spill = be_new_Spill(cls, cls_frame, irg, bl, frame, irn);
 
@@ -3573,7 +3598,7 @@ new_r_PhiM_nokeep(ir_graph * irg, ir_node *block, int arity, ir_node **in)
        assert( get_irn_arity(block) == arity );
 
        res = new_ir_node(NULL, irg, block, op_Phi, mode_M, arity, in);
-       res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
+       res->attr.phi.u.backedge = new_backedge_arr(irg->obst, arity);
 
        return res;
 }
@@ -3586,11 +3611,10 @@ insert_spill(spill_ilp_t * si, ir_node * irn, const ir_node * value, ir_node * b
 {
        defs_t   *defs;
        ir_node  *spill;
-       const arch_env_t *arch_env = si->birg->main_env->arch_env;
 
        DBG((si->dbg, LEVEL_3, "\t  inserting spill for value %+F after %+F\n", irn, before));
 
-       spill = be_spill2(arch_env, irn, before);
+       spill = be_spill2(irn, before);
 
        defs = set_insert_def(si->values, value);
        assert(defs);
@@ -3673,7 +3697,6 @@ insert_reload(spill_ilp_t * si, const ir_node * value, ir_node * after)
        defs_t   *defs;
        ir_node  *reload,
                         *spill;
-       const arch_env_t *arch_env = si->birg->main_env->arch_env;
 
        DBG((si->dbg, LEVEL_3, "\t  inserting reload for value %+F before %+F\n", value, after));
 
@@ -3682,7 +3705,7 @@ insert_reload(spill_ilp_t * si, const ir_node * value, ir_node * after)
        spill = defs->spills;
        assert(spill && "no spill placed before reload");
 
-       reload = be_reload(arch_env, si->cls, after, get_irn_mode(value), spill);
+       reload = be_reload(si->cls, after, get_irn_mode(value), spill);
 
        /* enter into the linked list */
        set_irn_link(reload, defs->remats);
@@ -3693,10 +3716,9 @@ insert_reload(spill_ilp_t * si, const ir_node * value, ir_node * after)
 
 void perform_memory_operand(spill_ilp_t * si, memoperand_t * memoperand)
 {
-       defs_t           *defs;
-       ir_node          *value = get_irn_n(memoperand->irn, memoperand->pos);
-       ir_node          *spill;
-       const arch_env_t *arch_env = si->birg->main_env->arch_env;
+       defs_t  *defs;
+       ir_node *value = get_irn_n(memoperand->irn, memoperand->pos);
+       ir_node *spill;
 
        DBG((si->dbg, LEVEL_2, "\t  inserting memory operand for value %+F at %+F\n", value, memoperand->irn));
 
@@ -3705,7 +3727,7 @@ void perform_memory_operand(spill_ilp_t * si, memoperand_t * memoperand)
        spill = defs->spills;
        assert(spill && "no spill placed before reload");
 
-       arch_perform_memory_operand(arch_env, memoperand->irn, spill, memoperand->pos);
+       arch_perform_memory_operand(memoperand->irn, spill, memoperand->pos);
 }
 
 void insert_memoperands(spill_ilp_t * si)
@@ -3799,7 +3821,6 @@ insert_mem_copy(spill_ilp_t * si, ir_node * bb, ir_node * value)
 {
        ir_node          *insert_pos = bb;
        ir_node          *spill;
-       const arch_env_t *arch_env = si->birg->main_env->arch_env;
 
        /* find last definition of arg value in block */
        ir_node  *next;
@@ -3824,7 +3845,7 @@ insert_mem_copy(spill_ilp_t * si, ir_node * bb, ir_node * value)
 
        DBG((si->dbg, LEVEL_2, "\t  inserting mem copy for value %+F after %+F\n", value, insert_pos));
 
-       spill = be_spill2(arch_env, is_Block(insert_pos)?value:insert_pos, insert_pos);
+       spill = be_spill2(is_Block(insert_pos)?value:insert_pos, insert_pos);
 
        return spill;
 }
@@ -3898,7 +3919,7 @@ walker_reload_placer(ir_node * bb, void * data) {
                        if(!is_zero(name->value)) {
                                ir_node    *reload;
                                ir_node    *insert_pos = bb;
-                               ir_node    *prev = sched_block_last_noncf(si, bb);
+                               ir_node    *prev = sched_block_last_noncf(bb);
                                op_t       *prev_op = get_irn_link(prev);
 
                                while(be_is_Spill(prev)) {
@@ -4018,7 +4039,7 @@ walker_kill_unused(ir_node * bb, void * data)
 
                if(!bitset_is_set(kh->used, get_irn_idx(irn))) {
                        if(be_is_Spill(irn) || be_is_Reload(irn)) {
-                               DBG((kh->si->dbg, LEVEL_1, "\t SUBOPTIMAL! %+F IS UNUSED (cost: %g)\n", irn, get_cost(kh->si, irn)*execution_frequency(kh->si, bb)));
+                               DBG((kh->si->dbg, LEVEL_1, "\t SUBOPTIMAL! %+F IS UNUSED (cost: %g)\n", irn, get_cost(irn) * execution_frequency(kh->si, bb)));
 #if 0
                                assert(lpp_get_sol_state(kh->si->lpp) != lpp_optimal && "optimal solution is suboptimal?");
 #endif
@@ -4130,10 +4151,10 @@ static void
 rewire_uses(spill_ilp_t * si)
 {
        defs_t               *defs;
-       pset                 *ignore = pset_new_ptr(1);
-       be_dom_front_info_t *dom_front = si->birg->dom_front;
+       ir_nodeset_t         ignore;
 
-       pset_insert_ptr(ignore, get_irg_end(si->birg->irg));
+       ir_nodeset_init(&ignore);
+       ir_nodeset_insert(&ignore, get_irg_end(si->birg->irg));
 
        /* then fix uses of spills */
        set_foreach(si->values, defs) {
@@ -4156,11 +4177,25 @@ rewire_uses(spill_ilp_t * si)
                spills = get_spills_for_value(si, defs->value);
                DBG((si->dbg, LEVEL_2, "\t  %d remats, %d reloads, and %d spills for value %+F\n", remats, pset_count(reloads), pset_count(spills), defs->value));
                if(pset_count(spills) > 1) {
+                       be_ssa_construction_env_t senv;
+                       ir_node *node;
                        //assert(pset_count(reloads) > 0);
                        //                              print_irn_pset(spills);
                        //                              print_irn_pset(reloads);
 
-                       be_ssa_constr_set_ignore(dom_front, si->lv, spills, ignore);
+                       be_ssa_construction_init(&senv, si->birg);
+                       be_ssa_construction_set_ignore_uses(&senv, &ignore);
+                       pset_foreach(spills, node) {
+                               be_ssa_construction_add_copy(&senv, node);
+                       }
+                       pset_foreach(spills, node) {
+                               be_ssa_construction_fix_users(&senv, node);
+                       }
+                       be_ssa_construction_update_liveness_phis(&senv, si->lv);
+                       pset_foreach(spills, node) {
+                               be_liveness_update(si->lv, node);
+                       }
+                       be_ssa_construction_destroy(&senv);
                }
 
                del_pset(reloads);
@@ -4169,29 +4204,51 @@ rewire_uses(spill_ilp_t * si)
 
        /* first fix uses of remats and reloads */
        set_foreach(si->values, defs) {
-               pset           *nodes;
                const ir_node  *next = defs->remats;
                int             orig_kept = 0;
 
                if(next) {
-                       nodes = pset_new_ptr_default();
+                       be_ssa_construction_env_t senv;
+
+                       be_ssa_construction_init(&senv, si->birg);
+
                        if(sched_is_scheduled(defs->value)) {
-                               pset_insert_ptr(nodes, defs->value);
+                               be_ssa_construction_add_copy(&senv, (ir_node*) defs->value);
                                orig_kept = 1;
                        }
 
+                       next = defs->remats;
+                       while(next) {
+                               be_ssa_construction_add_copy(&senv, (ir_node*) next);
+                               next = get_irn_link(next);
+                       }
+
+                       if(sched_is_scheduled(defs->value)) {
+                               be_ssa_construction_fix_users(&senv, (ir_node*) defs->value);
+                       }
+
+                       next = defs->remats;
                        while(next) {
-                               pset_insert_ptr(nodes, next);
+                               be_ssa_construction_fix_users(&senv, (ir_node*) next);
                                next = get_irn_link(next);
                        }
 
-                       DBG((si->dbg, LEVEL_4, "\t    %d new definitions for value %+F\n", pset_count(nodes)-orig_kept, defs->value));
-                       be_ssa_constr_set_ignore(dom_front, si->lv, nodes, NULL);
+                       be_ssa_construction_update_liveness_phis(&senv, si->lv);
+                       if(sched_is_scheduled(defs->value)) {
+                               be_liveness_update(si->lv, (ir_node*) defs->value);
+                       }
+
+                       next = defs->remats;
+                       while(next) {
+                               be_liveness_update(si->lv, (ir_node*) next);
+                               next = get_irn_link(next);
+                       }
 
-                       del_pset(nodes);
+                       be_ssa_construction_destroy(&senv);
                }
        }
 
+       ir_nodeset_destroy(&ignore);
 //     remove_unused_defs(si);
 }
 
@@ -4230,8 +4287,8 @@ get_n_regs(spill_ilp_t * si)
        bitset_t *arch_regs = bitset_malloc(arch_n_regs);
        bitset_t *abi_regs = bitset_malloc(arch_n_regs);
 
-       arch_put_non_ignore_regs(si->birg->main_env->arch_env, si->cls, arch_regs);
-    be_abi_put_ignore_regs(si->birg->abi, si->cls, abi_regs);
+       arch_put_non_ignore_regs(si->cls, arch_regs);
+       be_abi_put_ignore_regs(si->birg->abi, si->cls, abi_regs);
 
        bitset_andnot(arch_regs, abi_regs);
        arch_n_regs = bitset_popcnt(arch_regs);
@@ -4358,7 +4415,6 @@ be_spill_remat(be_irg_t *birg, const arch_register_class_t *cls)
        if(opt_verify & VERIFY_DOMINANCE)
                be_check_dominance(irg);
 
-       be_assure_dom_front(birg);
        be_assure_liveness(birg);
 
        obstack_init(&obst);
@@ -4553,8 +4609,8 @@ BE_REGISTER_MODULE_CONSTRUCTOR(be_init_spillremat);
 
 #else                          /* WITH_ILP */
 
-static void INLINE
-only_that_you_can_compile_without_WITH_ILP_defined(void)
+static __attribute__((unused))
+void only_that_you_can_compile_without_WITH_ILP_defined(void)
 {
 }