ia32: we cannot fold ia32_mode_E reloads
[libfirm] / ir / be / benode.c
index 49e64e1..3916616 100644 (file)
@@ -1,20 +1,6 @@
 /*
- * Copyright (C) 1995-2011 University of Karlsruhe.  All right reserved.
- *
  * This file is part of libFirm.
- *
- * This file may be distributed and/or modified under the terms of the
- * GNU General Public License version 2 as published by the Free Software
- * Foundation and appearing in the file LICENSE.GPL included in the
- * packaging of this file.
- *
- * Licensees holding valid libFirm Professional Edition licenses may use
- * this file in accordance with the libFirm Commercial License.
- * Agreement provided with the Software.
- *
- * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
- * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE.
+ * Copyright (C) 2012 University of Karlsruhe.
  */
 
 /**
@@ -22,7 +8,6 @@
  * @brief       Backend node support for generic backend nodes.
  * @author      Sebastian Hack
  * @date        17.05.2005
- * @version     $Id$
  *
  * Backend node support for generic backend nodes.
  * This file provides Perm, Copy, Spill and Reload nodes.
@@ -31,6 +16,7 @@
 
 #include <stdlib.h>
 
+#include "beirg.h"
 #include "obst.h"
 #include "set.h"
 #include "pmap.h"
@@ -49,6 +35,8 @@
 #include "irprintf.h"
 #include "irgwalk.h"
 #include "iropt_t.h"
+#include "irbackedge_t.h"
+#include "irverify_t.h"
 
 #include "be_t.h"
 #include "belive_t.h"
@@ -184,9 +172,8 @@ static int Call_cmp_attr(const ir_node *a, const ir_node *b)
        return be_nodes_equal(a, b);
 }
 
-static arch_register_req_t *allocate_reg_req(const ir_node *node)
+static arch_register_req_t *allocate_reg_req(ir_graph *const irg)
 {
-       ir_graph       *irg  = get_irn_irg(node);
        struct obstack *obst = be_get_be_obst(irg);
 
        arch_register_req_t *req = OALLOCZ(obst, arch_register_req_t);
@@ -211,6 +198,8 @@ void be_set_constr_out(ir_node *node, int pos, const arch_register_req_t *req)
  */
 static void init_node_attr(ir_node *node, int n_inputs, int n_outputs)
 {
+       assert(n_outputs >= 0);
+
        ir_graph       *irg  = get_irn_irg(node);
        struct obstack *obst = be_get_be_obst(irg);
        backend_info_t *info = be_get_info(node);
@@ -228,31 +217,16 @@ static void init_node_attr(ir_node *node, int n_inputs, int n_outputs)
        }
        info->in_reqs = in_reqs;
 
-       if (n_outputs >= 0) {
-               int i;
-               info->out_infos = NEW_ARR_D(reg_out_info_t, obst, n_outputs);
-               memset(info->out_infos, 0, n_outputs * sizeof(info->out_infos[0]));
-               for (i = 0; i < n_outputs; ++i) {
-                       info->out_infos[i].req = arch_no_register_req;
-               }
-       } else {
-               info->out_infos = NEW_ARR_F(reg_out_info_t, 0);
+       info->out_infos = NEW_ARR_DZ(reg_out_info_t, obst, n_outputs);
+       for (int i = 0; i < n_outputs; ++i) {
+               info->out_infos[i].req = arch_no_register_req;
        }
 }
 
-static void add_register_req_out(ir_node *node)
-{
-       backend_info_t *info = be_get_info(node);
-       reg_out_info_t  out_info;
-       memset(&out_info, 0, sizeof(out_info));
-       out_info.req = arch_no_register_req;
-       ARR_APP1(reg_out_info_t, info->out_infos, out_info);
-}
-
-static void add_register_req_in(ir_node *node)
+static void add_register_req_in(ir_node *node, const arch_register_req_t *req)
 {
        backend_info_t *info = be_get_info(node);
-       ARR_APP1(const arch_register_req_t*, info->in_reqs, arch_no_register_req);
+       ARR_APP1(const arch_register_req_t*, info->in_reqs, req);
 }
 
 ir_node *be_new_Spill(const arch_register_class_t *cls,
@@ -275,15 +249,8 @@ ir_node *be_new_Spill(const arch_register_class_t *cls,
 
        be_node_set_reg_class_in(res, n_be_Spill_frame, cls_frame);
        be_node_set_reg_class_in(res, n_be_Spill_val, cls);
-       /*
-        * For spills and reloads, we return "none" as requirement for frame
-        * pointer, so every input is ok. Some backends need this (STA).
-        * Matze: we should investigate if this is really needed, this solution
-        *        looks very hacky to me
-        */
-       be_set_constr_in(res, n_be_Spill_frame, arch_no_register_req);
-
        arch_set_irn_register_req_out(res, 0, arch_no_register_req);
+       arch_add_irn_flags(res, arch_irn_flags_spill);
 
        return res;
 }
@@ -312,14 +279,6 @@ ir_node *be_new_Reload(const arch_register_class_t *cls,
        a->offset = 0;
        a->base.exc.pin_state = op_pin_state_pinned;
 
-       /*
-        * For spills and reloads, we return "none" as requirement for frame
-        * pointer, so every input is ok. Some backends need this (e.g. STA).
-        * Matze: we should investigate if this is really needed, this solution
-        *        looks very hacky to me
-        */
-       be_set_constr_in(res, n_be_Reload_frame, arch_no_register_req);
-
        return res;
 }
 
@@ -359,8 +318,19 @@ ir_node *be_new_Perm(const arch_register_class_t *cls, ir_node *block,
        attr                = (be_node_attr_t*) get_irn_generic_attr(irn);
        attr->exc.pin_state = op_pin_state_pinned;
        for (i = 0; i < n; ++i) {
-               be_node_set_reg_class_in(irn, i, cls);
-               be_node_set_reg_class_out(irn, i, cls);
+               const ir_node             *input = in[i];
+               const arch_register_req_t *req   = arch_get_irn_register_req(input);
+               if (req->width == 1) {
+                       be_set_constr_in(irn, i, cls->class_req);
+                       be_set_constr_out(irn, i, cls->class_req);
+               } else {
+                       arch_register_req_t *const new_req = allocate_reg_req(irg);
+                       new_req->cls   = cls;
+                       new_req->type  = (req->type & arch_register_req_type_aligned);
+                       new_req->width = req->width;
+                       be_set_constr_in(irn, i, new_req);
+                       be_set_constr_out(irn, i, new_req);
+               }
        }
 
        return irn;
@@ -416,8 +386,8 @@ ir_node *be_new_MemPerm(ir_node *block, int n, ir_node *in[])
        be_node_set_reg_class_in(irn, 0, sp->reg_class);
 
        attr               = (be_memperm_attr_t*)get_irn_generic_attr(irn);
-       attr->in_entities  = OALLOCNZ(irg->obst, ir_entity*, n);
-       attr->out_entities = OALLOCNZ(irg->obst, ir_entity*, n);
+       attr->in_entities  = OALLOCNZ(get_irg_obstack(irg), ir_entity*, n);
+       attr->out_entities = OALLOCNZ(get_irg_obstack(irg), ir_entity*, n);
 
        return irn;
 }
@@ -426,7 +396,6 @@ ir_node *be_new_Copy(ir_node *bl, ir_node *op)
 {
        ir_node *in[1];
        ir_node *res;
-       arch_register_req_t *req;
        be_node_attr_t *attr;
        ir_graph *irg = get_Block_irg(bl);
        const arch_register_req_t   *in_req = arch_get_irn_register_req(op);
@@ -440,7 +409,7 @@ ir_node *be_new_Copy(ir_node *bl, ir_node *op)
        be_node_set_reg_class_in(res, 0, cls);
        be_node_set_reg_class_out(res, 0, cls);
 
-       req = allocate_reg_req(res);
+       arch_register_req_t *const req = allocate_reg_req(irg);
        req->cls        = cls;
        req->type       = arch_register_req_type_should_be_same
                | (in_req->type & arch_register_req_type_aligned);
@@ -456,11 +425,6 @@ ir_node *be_get_Copy_op(const ir_node *cpy)
        return get_irn_n(cpy, n_be_Copy_op);
 }
 
-void be_set_Copy_op(ir_node *cpy, ir_node *op)
-{
-       set_irn_n(cpy, n_be_Copy_op, op);
-}
-
 ir_node *be_new_Keep(ir_node *block, int n, ir_node *in[])
 {
        int i;
@@ -474,8 +438,11 @@ ir_node *be_new_Keep(ir_node *block, int n, ir_node *in[])
        attr->exc.pin_state = op_pin_state_pinned;
 
        for (i = 0; i < n; ++i) {
-               add_irn_n(res, in[i]);
-               add_register_req_in(res);
+               ir_node *pred = in[i];
+               add_irn_n(res, pred);
+               const arch_register_req_t *req = arch_get_irn_register_req(pred);
+               req = req->cls != NULL ? req->cls->class_req : arch_no_register_req;
+               add_register_req_in(res, req);
        }
        keep_alive(res);
 
@@ -484,21 +451,18 @@ ir_node *be_new_Keep(ir_node *block, int n, ir_node *in[])
 
 void be_Keep_add_node(ir_node *keep, const arch_register_class_t *cls, ir_node *node)
 {
-       int n;
-
        assert(be_is_Keep(keep));
-       n = add_irn_n(keep, node);
-       add_register_req_in(keep);
-       be_node_set_reg_class_in(keep, n, cls);
+       add_irn_n(keep, node);
+       add_register_req_in(keep, cls->class_req);
 }
 
-ir_node *be_new_Call(dbg_info *dbg, ir_graph *irg, ir_node *bl, ir_node *mem,
-               ir_node *sp, ir_node *ptr, int n_outs, int n, ir_node *in[],
-               ir_type *call_tp)
+ir_node *be_new_Call(dbg_info *dbg, ir_node *bl, ir_node *mem,
+               const arch_register_req_t *sp_req, ir_node *sp,
+               const arch_register_req_t *ptr_req, ir_node *ptr,
+               int n_outs, int n, ir_node *in[], ir_type *call_tp)
 {
        be_call_attr_t *a;
        int real_n = n_be_Call_first_arg + n;
-       ir_node *irn;
        ir_node **real_in;
 
        NEW_ARR_A(ir_node *, real_in, real_n);
@@ -507,13 +471,16 @@ ir_node *be_new_Call(dbg_info *dbg, ir_graph *irg, ir_node *bl, ir_node *mem,
        real_in[n_be_Call_ptr] = ptr;
        memcpy(&real_in[n_be_Call_first_arg], in, n * sizeof(in[0]));
 
-       irn = new_ir_node(dbg, irg, bl, op_be_Call, mode_T, real_n, real_in);
+       ir_graph *const irg = get_Block_irg(bl);
+       ir_node  *const irn = new_ir_node(dbg, irg, bl, op_be_Call, mode_T, real_n, real_in);
        init_node_attr(irn, real_n, n_outs);
        a                     = (be_call_attr_t*)get_irn_generic_attr(irn);
        a->ent                = NULL;
        a->call_tp            = call_tp;
        a->pop                = 0;
        a->base.exc.pin_state = op_pin_state_pinned;
+       be_set_constr_in(irn, n_be_Call_sp, sp_req);
+       be_set_constr_in(irn, n_be_Call_ptr, ptr_req);
        return irn;
 }
 
@@ -557,22 +524,14 @@ unsigned be_Call_get_pop(const ir_node *call)
        return a->pop;
 }
 
-ir_node *be_new_Return(dbg_info *dbg, ir_graph *irg, ir_node *block, int n_res,
-                       unsigned pop, int n, ir_node *in[])
+ir_node *be_new_Return(dbg_info *const dbg, ir_node *const block, int const n_res, unsigned const pop, int const n, ir_node **const in)
 {
-       be_return_attr_t *a;
-       ir_node *res;
-       int i;
-
-       res = new_ir_node(dbg, irg, block, op_be_Return, mode_X, -1, NULL);
-       init_node_attr(res, -1, 1);
-       for (i = 0; i < n; ++i) {
-               add_irn_n(res, in[i]);
-               add_register_req_in(res);
-       }
+       ir_graph *const irg = get_Block_irg(block);
+       ir_node  *const res = new_ir_node(dbg, irg, block, op_be_Return, mode_X, n, in);
+       init_node_attr(res, n, 1);
        be_set_constr_out(res, 0, arch_no_register_req);
 
-       a = (be_return_attr_t*)get_irn_generic_attr(res);
+       be_return_attr_t *const a = (be_return_attr_t*)get_irn_generic_attr(res);
        a->num_ret_vals       = n_res;
        a->pop                = pop;
        a->emit_pop           = 0;
@@ -605,16 +564,6 @@ void be_Return_set_emit_pop(ir_node *ret, int emit_pop)
        a->emit_pop = emit_pop;
 }
 
-int be_Return_append_node(ir_node *ret, ir_node *node)
-{
-       int pos;
-
-       pos = add_irn_n(ret, node);
-       add_register_req_in(ret);
-
-       return pos;
-}
-
 ir_node *be_new_IncSP(const arch_register_t *sp, ir_node *bl,
                       ir_node *old_sp, int offset, int align)
 {
@@ -625,7 +574,7 @@ ir_node *be_new_IncSP(const arch_register_t *sp, ir_node *bl,
 
        in[0]     = old_sp;
        irn       = new_ir_node(NULL, irg, bl, op_be_IncSP, sp->reg_class->mode,
-                               sizeof(in) / sizeof(in[0]), in);
+                               ARRAY_SIZE(in), in);
        init_node_attr(irn, 1, 1);
        a                     = (be_incsp_attr_t*)get_irn_generic_attr(irn);
        a->offset             = offset;
@@ -694,17 +643,13 @@ ir_node *be_new_SubSP(const arch_register_t *sp, ir_node *bl, ir_node *old_sp, i
 ir_node *be_new_Start(dbg_info *dbgi, ir_node *bl, int n_outs)
 {
        ir_node *res;
-       int i;
        ir_graph *irg = get_Block_irg(bl);
        be_node_attr_t *attr;
 
        res = new_ir_node(dbgi, irg, bl, op_be_Start, mode_T, 0, NULL);
-       init_node_attr(res, 0, -1);
+       init_node_attr(res, 0, n_outs);
        attr = (be_node_attr_t*) get_irn_generic_attr(res);
        attr->exc.pin_state = op_pin_state_pinned;
-       for (i = 0; i < n_outs; ++i) {
-               add_register_req_out(res);
-       }
 
        return res;
 }
@@ -759,6 +704,12 @@ ir_node *be_new_CopyKeep(ir_node *bl, ir_node *src, int n, ir_node *in_keep[])
        attr->exc.pin_state = op_pin_state_floats;
        be_node_set_reg_class_in(irn, 0, cls);
        be_node_set_reg_class_out(irn, 0, cls);
+       for (int i = 0; i < n; ++i) {
+               ir_node *pred = in_keep[i];
+               const arch_register_req_t *req = arch_get_irn_register_req(pred);
+               req = req->cls != NULL ? req->cls->class_req : arch_no_register_req;
+               be_set_constr_in(irn, i+1, req);
+       }
 
        return irn;
 }
@@ -858,11 +809,11 @@ const arch_register_req_t *be_create_reg_req(struct obstack *obst,
                const arch_register_t *reg, arch_register_req_type_t additional_types)
 {
        arch_register_req_t         *req = OALLOC(obst, arch_register_req_t);
-       const arch_register_class_t *cls = arch_register_get_class(reg);
+       const arch_register_class_t *cls = reg->reg_class;
        unsigned                    *limited_bitset;
 
        limited_bitset = rbitset_obstack_alloc(obst, arch_register_class_n_regs(cls));
-       rbitset_set(limited_bitset, arch_register_get_index(reg));
+       rbitset_set(limited_bitset, reg->index);
 
        req->type    = arch_register_req_type_limited | additional_types;
        req->cls     = cls;
@@ -954,17 +905,6 @@ int be_get_IncSP_align(const ir_node *irn)
        return a->align;
 }
 
-static arch_irn_class_t be_node_classify(const ir_node *irn)
-{
-       switch (get_irn_opcode(irn)) {
-               case beo_Spill:  return arch_irn_class_spill;
-               case beo_Reload: return arch_irn_class_reload;
-               case beo_Perm:   return arch_irn_class_perm;
-               case beo_Copy:   return arch_irn_class_copy;
-               default:         return arch_irn_class_none;
-       }
-}
-
 static ir_entity *be_node_get_frame_entity(const ir_node *irn)
 {
        return be_get_frame_entity(irn);
@@ -1005,11 +945,9 @@ static int be_node_get_sp_bias(const ir_node *irn)
 
 /* for be nodes */
 static const arch_irn_ops_t be_node_irn_ops = {
-       be_node_classify,
        be_node_get_frame_entity,
        be_node_set_frame_offset,
        be_node_get_sp_bias,
-       NULL,    /* get_inverse             */
        NULL,    /* get_op_estimated_cost   */
        NULL,    /* possible_memory_operand */
        NULL,    /* perform_memory_operand  */
@@ -1018,16 +956,13 @@ static const arch_irn_ops_t be_node_irn_ops = {
 static int get_start_reg_index(ir_graph *irg, const arch_register_t *reg)
 {
        ir_node *start  = get_irg_start(irg);
-       unsigned n_outs = arch_get_irn_n_outs(start);
-       int      i;
 
        /* do a naive linear search... */
-       for (i = 0; i < (int)n_outs; ++i) {
-               const arch_register_req_t *out_req
-                       = arch_get_irn_register_req_out(start, i);
-               if (! (out_req->type & arch_register_req_type_limited))
+       be_foreach_out(start, i) {
+               arch_register_req_t const *const out_req = arch_get_irn_register_req_out(start, i);
+               if (!arch_register_req_is(out_req, limited))
                        continue;
-               if (out_req->cls != arch_register_get_class(reg))
+               if (out_req->cls != reg->reg_class)
                        continue;
                if (!rbitset_is_set(out_req->limited, reg->index))
                        continue;
@@ -1040,8 +975,7 @@ ir_node *be_get_initial_reg_value(ir_graph *irg, const arch_register_t *reg)
 {
        int      i     = get_start_reg_index(irg, reg);
        ir_node *start = get_irg_start(irg);
-       ir_mode *mode  = arch_register_class_mode(arch_register_get_class(reg));
-       const ir_edge_t *edge;
+       ir_mode *mode  = arch_register_class_mode(reg->reg_class);
 
        foreach_out_edge(start, edge) {
                ir_node *proj = get_edge_src_irn(edge);
@@ -1054,30 +988,6 @@ ir_node *be_get_initial_reg_value(ir_graph *irg, const arch_register_t *reg)
        return new_r_Proj(start, mode, i);
 }
 
-int be_find_return_reg_input(ir_node *ret, const arch_register_t *reg)
-{
-       int arity = get_irn_arity(ret);
-       int i;
-       /* do a naive linear search... */
-       for (i = 0; i < arity; ++i) {
-               const arch_register_req_t *req = arch_get_irn_register_req_in(ret, i);
-               if (! (req->type & arch_register_req_type_limited))
-                       continue;
-               if (req->cls != arch_register_get_class(reg))
-                       continue;
-               if (!rbitset_is_set(req->limited, reg->index))
-                       continue;
-               return i;
-       }
-       panic("Tried querying undefined register '%s' at Return", reg->name);
-}
-
-static arch_irn_class_t dummy_classify(const ir_node *node)
-{
-       (void) node;
-       return arch_irn_class_none;
-}
-
 static ir_entity* dummy_get_frame_entity(const ir_node *node)
 {
        (void) node;
@@ -1088,7 +998,7 @@ static void dummy_set_frame_offset(ir_node *node, int bias)
 {
        (void) node;
        (void) bias;
-       panic("dummy_set_frame_offset() should not be called");
+       panic("should not be called");
 }
 
 static int dummy_get_sp_bias(const ir_node *node)
@@ -1099,11 +1009,9 @@ static int dummy_get_sp_bias(const ir_node *node)
 
 /* for "middleend" nodes */
 static const arch_irn_ops_t dummy_be_irn_ops = {
-       dummy_classify,
        dummy_get_frame_entity,
        dummy_set_frame_offset,
        dummy_get_sp_bias,
-       NULL,      /* get_inverse           */
        NULL,      /* get_op_estimated_cost */
        NULL,      /* possible_memory_operand */
        NULL,      /* perform_memory_operand */
@@ -1112,30 +1020,25 @@ static const arch_irn_ops_t dummy_be_irn_ops = {
 
 
 ir_node *be_new_Phi(ir_node *block, int n_ins, ir_node **ins, ir_mode *mode,
-                    const arch_register_class_t *cls)
+                    const arch_register_req_t *req)
 {
-       const arch_register_req_t *req;
        ir_graph       *irg  = get_irn_irg(block);
        struct obstack *obst = be_get_be_obst(irg);
        backend_info_t *info;
        int             i;
 
-       ir_node *phi = new_r_Phi(block, n_ins, ins, mode);
+       ir_node *phi = new_ir_node(NULL, irg, block, op_Phi, mode, n_ins, ins);
+       phi->attr.phi.u.backedge = new_backedge_arr(get_irg_obstack(irg), n_ins);
        info = be_get_info(phi);
-       info->out_infos = NEW_ARR_D(reg_out_info_t, obst, 1);
-       memset(info->out_infos, 0, 1 * sizeof(info->out_infos[0]));
-       info->in_reqs = OALLOCN(obst, const arch_register_req_t*, n_ins);
+       info->out_infos = NEW_ARR_DZ(reg_out_info_t, obst, 1);
+       info->in_reqs   = OALLOCN(obst, const arch_register_req_t*, n_ins);
 
-       if (cls == NULL) {
-               req = arch_no_register_req;
-       } else {
-               req = cls->class_req;
-       }
        info->out_infos[0].req = req;
        for (i = 0; i < n_ins; ++i) {
                info->in_reqs[i] = req;
        }
-
+       irn_verify_irg(phi, irg);
+       phi = optimize_node(phi);
        return phi;
 }
 
@@ -1153,8 +1056,12 @@ void be_set_phi_reg_req(ir_node *node, const arch_register_req_t *req)
        assert(mode_is_datab(get_irn_mode(node)));
 }
 
-void be_dump_phi_reg_reqs(FILE *F, ir_node *node, dump_reason_t reason)
+void be_dump_phi_reg_reqs(FILE *F, const ir_node *node, dump_reason_t reason)
 {
+       ir_graph *irg = get_irn_irg(node);
+       if (!irg_is_constrained(irg, IR_GRAPH_CONSTRAINT_BACKEND))
+               return;
+
        switch (reason) {
        case dump_node_opcode_txt:
                fputs(get_op_name(get_irn_op(node)), F);
@@ -1165,13 +1072,8 @@ void be_dump_phi_reg_reqs(FILE *F, ir_node *node, dump_reason_t reason)
        case dump_node_nodeattr_txt:
                break;
        case dump_node_info_txt:
-       {
-               backend_info_t *info = be_get_info(node);
-               if (info != NULL && info->out_infos[0].req != NULL) {
-                       arch_dump_reqs_and_registers(F, node);
-               }
+               arch_dump_reqs_and_registers(F, node);
                break;
-       }
 
        default:
                break;
@@ -1179,11 +1081,9 @@ void be_dump_phi_reg_reqs(FILE *F, ir_node *node, dump_reason_t reason)
 }
 
 static const arch_irn_ops_t phi_irn_ops = {
-       dummy_classify,
        dummy_get_frame_entity,
        dummy_set_frame_offset,
        dummy_get_sp_bias,
-       NULL,    /* get_inverse             */
        NULL,    /* get_op_estimated_cost   */
        NULL,    /* possible_memory_operand */
        NULL,    /* perform_memory_operand  */
@@ -1194,7 +1094,7 @@ static const arch_irn_ops_t phi_irn_ops = {
 /**
  * ir_op-Operation: dump a be node to file
  */
-static void dump_node(FILE *f, ir_node *irn, dump_reason_t reason)
+static void dump_node(FILE *f, const ir_node *irn, dump_reason_t reason)
 {
        assert(is_be_node(irn));
 
@@ -1284,20 +1184,8 @@ static void copy_attr(ir_graph *irg, const ir_node *old_node, ir_node *new_node)
 
        memcpy(new_attr, old_attr, get_op_attr_size(get_irn_op(old_node)));
 
-       new_info->flags = old_info->flags;
-       if (old_info->out_infos != NULL) {
-               size_t n_outs = ARR_LEN(old_info->out_infos);
-               /* need dyanmic out infos? */
-               if (be_is_Perm(new_node)) {
-                       new_info->out_infos = NEW_ARR_F(reg_out_info_t, n_outs);
-               } else {
-                       new_info->out_infos = NEW_ARR_D(reg_out_info_t, obst, n_outs);
-               }
-               memcpy(new_info->out_infos, old_info->out_infos,
-                          n_outs * sizeof(new_info->out_infos[0]));
-       } else {
-               new_info->out_infos = NULL;
-       }
+       new_info->flags     = old_info->flags;
+       new_info->out_infos = old_info->out_infos ? DUP_ARR_D(reg_out_info_t, obst, old_info->out_infos) : NULL;
 
        /* input infos */
        if (old_info->in_reqs != NULL) {
@@ -1315,51 +1203,44 @@ static void copy_attr(ir_graph *irg, const ir_node *old_node, ir_node *new_node)
        }
 }
 
-static const ir_op_ops be_node_op_ops = {
-       firm_default_hash,
-       NULL,
-       NULL,
-       NULL,
-       NULL,
-       NULL,
-       NULL,
-       NULL,
-       NULL,
-       copy_attr,
-       NULL,
-       NULL,
-       NULL,
-       NULL,
-       dump_node,
-       NULL,
-       &be_node_irn_ops
-};
-
 int is_be_node(const ir_node *irn)
 {
        return get_op_ops(get_irn_op(irn))->be_ops == &be_node_irn_ops;
 }
 
+static ir_op *new_be_op(unsigned code, const char *name, op_pin_state p,
+                        irop_flags flags, op_arity opar, size_t attr_size)
+{
+       ir_op *res = new_ir_op(code, name, p, flags, opar, 0, attr_size);
+       res->ops.dump_node = dump_node;
+       res->ops.copy_attr = copy_attr;
+       res->ops.be_ops    = &be_node_irn_ops;
+       return res;
+}
+
 void be_init_op(void)
 {
        unsigned opc;
 
+       assert(op_be_Spill == NULL);
+
        /* Acquire all needed opcodes. */
-       op_be_Spill     = new_ir_op(beo_Spill,     "be_Spill",     op_pin_state_exc_pinned, irop_flag_none,                          oparity_unary,    0, sizeof(be_frame_attr_t),   &be_node_op_ops);
-       op_be_Reload    = new_ir_op(beo_Reload,    "be_Reload",    op_pin_state_exc_pinned, irop_flag_none,                          oparity_zero,     0, sizeof(be_frame_attr_t),   &be_node_op_ops);
-       op_be_Perm      = new_ir_op(beo_Perm,      "be_Perm",      op_pin_state_exc_pinned, irop_flag_none,                          oparity_variable, 0, sizeof(be_node_attr_t),    &be_node_op_ops);
-       op_be_MemPerm   = new_ir_op(beo_MemPerm,   "be_MemPerm",   op_pin_state_exc_pinned, irop_flag_none,                          oparity_variable, 0, sizeof(be_memperm_attr_t), &be_node_op_ops);
-       op_be_Copy      = new_ir_op(beo_Copy,      "be_Copy",      op_pin_state_exc_pinned, irop_flag_none,                          oparity_unary,    0, sizeof(be_node_attr_t),    &be_node_op_ops);
-       op_be_Keep      = new_ir_op(beo_Keep,      "be_Keep",      op_pin_state_exc_pinned, irop_flag_keep,                          oparity_dynamic,  0, sizeof(be_node_attr_t),    &be_node_op_ops);
-       op_be_CopyKeep  = new_ir_op(beo_CopyKeep,  "be_CopyKeep",  op_pin_state_exc_pinned, irop_flag_keep,                          oparity_variable, 0, sizeof(be_node_attr_t),    &be_node_op_ops);
-       op_be_Call      = new_ir_op(beo_Call,      "be_Call",      op_pin_state_exc_pinned, irop_flag_fragile|irop_flag_uses_memory, oparity_variable, 0, sizeof(be_call_attr_t),    &be_node_op_ops);
-       ir_op_set_fragile_indices(op_be_Call, n_be_Call_mem, pn_be_Call_X_regular, pn_be_Call_X_except);
-       op_be_Return    = new_ir_op(beo_Return,    "be_Return",    op_pin_state_exc_pinned, irop_flag_cfopcode,                      oparity_dynamic,  0, sizeof(be_return_attr_t),  &be_node_op_ops);
-       op_be_AddSP     = new_ir_op(beo_AddSP,     "be_AddSP",     op_pin_state_exc_pinned, irop_flag_none,                          oparity_unary,    0, sizeof(be_node_attr_t),    &be_node_op_ops);
-       op_be_SubSP     = new_ir_op(beo_SubSP,     "be_SubSP",     op_pin_state_exc_pinned, irop_flag_none,                          oparity_unary,    0, sizeof(be_node_attr_t),    &be_node_op_ops);
-       op_be_IncSP     = new_ir_op(beo_IncSP,     "be_IncSP",     op_pin_state_exc_pinned, irop_flag_none,                          oparity_unary,    0, sizeof(be_incsp_attr_t),   &be_node_op_ops);
-       op_be_Start     = new_ir_op(beo_Start,     "be_Start",     op_pin_state_exc_pinned, irop_flag_none,                          oparity_zero,     0, sizeof(be_node_attr_t),    &be_node_op_ops);
-       op_be_FrameAddr = new_ir_op(beo_FrameAddr, "be_FrameAddr", op_pin_state_exc_pinned, irop_flag_none,                          oparity_unary,    0, sizeof(be_frame_attr_t),   &be_node_op_ops);
+       op_be_Spill     = new_be_op(beo_Spill,     "be_Spill",     op_pin_state_exc_pinned, irop_flag_none,                          oparity_unary,    sizeof(be_frame_attr_t));
+       op_be_Reload    = new_be_op(beo_Reload,    "be_Reload",    op_pin_state_exc_pinned, irop_flag_none,                          oparity_zero,     sizeof(be_frame_attr_t));
+       op_be_Perm      = new_be_op(beo_Perm,      "be_Perm",      op_pin_state_exc_pinned, irop_flag_none,                          oparity_variable, sizeof(be_node_attr_t));
+       op_be_MemPerm   = new_be_op(beo_MemPerm,   "be_MemPerm",   op_pin_state_exc_pinned, irop_flag_none,                          oparity_variable, sizeof(be_memperm_attr_t));
+       op_be_Copy      = new_be_op(beo_Copy,      "be_Copy",      op_pin_state_exc_pinned, irop_flag_none,                          oparity_unary,    sizeof(be_node_attr_t));
+       op_be_Keep      = new_be_op(beo_Keep,      "be_Keep",      op_pin_state_exc_pinned, irop_flag_keep,                          oparity_dynamic,  sizeof(be_node_attr_t));
+       op_be_CopyKeep  = new_be_op(beo_CopyKeep,  "be_CopyKeep",  op_pin_state_exc_pinned, irop_flag_keep,                          oparity_variable, sizeof(be_node_attr_t));
+       op_be_Call      = new_be_op(beo_Call,      "be_Call",      op_pin_state_exc_pinned, irop_flag_fragile|irop_flag_uses_memory, oparity_variable, sizeof(be_call_attr_t));
+       ir_op_set_memory_index(op_be_Call, n_be_Call_mem);
+       ir_op_set_fragile_indices(op_be_Call, pn_be_Call_X_regular, pn_be_Call_X_except);
+       op_be_Return    = new_be_op(beo_Return,    "be_Return",    op_pin_state_exc_pinned, irop_flag_cfopcode,                      oparity_variable, sizeof(be_return_attr_t));
+       op_be_AddSP     = new_be_op(beo_AddSP,     "be_AddSP",     op_pin_state_exc_pinned, irop_flag_none,                          oparity_unary,    sizeof(be_node_attr_t));
+       op_be_SubSP     = new_be_op(beo_SubSP,     "be_SubSP",     op_pin_state_exc_pinned, irop_flag_none,                          oparity_unary,    sizeof(be_node_attr_t));
+       op_be_IncSP     = new_be_op(beo_IncSP,     "be_IncSP",     op_pin_state_exc_pinned, irop_flag_none,                          oparity_unary,    sizeof(be_incsp_attr_t));
+       op_be_Start     = new_be_op(beo_Start,     "be_Start",     op_pin_state_exc_pinned, irop_flag_none,                          oparity_zero,     sizeof(be_node_attr_t));
+       op_be_FrameAddr = new_be_op(beo_FrameAddr, "be_FrameAddr", op_pin_state_exc_pinned, irop_flag_none,                          oparity_unary,    sizeof(be_frame_attr_t));
 
        op_be_Spill->ops.node_cmp_attr     = FrameAddr_cmp_attr;
        op_be_Reload->ops.node_cmp_attr    = FrameAddr_cmp_attr;
@@ -1378,10 +1259,28 @@ void be_init_op(void)
 
        /* attach out dummy_ops to middle end nodes */
        for (opc = iro_First; opc <= iro_Last; ++opc) {
-               ir_op *op = get_irp_opcode(opc);
+               ir_op *op = ir_get_opcode(opc);
                assert(op->ops.be_ops == NULL);
                op->ops.be_ops = &dummy_be_irn_ops;
        }
 
        op_Phi->ops.be_ops = &phi_irn_ops;
 }
+
+void be_finish_op(void)
+{
+       free_ir_op(op_be_Spill);     op_be_Spill     = NULL;
+       free_ir_op(op_be_Reload);    op_be_Reload    = NULL;
+       free_ir_op(op_be_Perm);      op_be_Perm      = NULL;
+       free_ir_op(op_be_MemPerm);   op_be_MemPerm   = NULL;
+       free_ir_op(op_be_Copy);      op_be_Copy      = NULL;
+       free_ir_op(op_be_Keep);      op_be_Keep      = NULL;
+       free_ir_op(op_be_CopyKeep);  op_be_CopyKeep  = NULL;
+       free_ir_op(op_be_Call);      op_be_Call      = NULL;
+       free_ir_op(op_be_Return);    op_be_Return    = NULL;
+       free_ir_op(op_be_IncSP);     op_be_IncSP     = NULL;
+       free_ir_op(op_be_AddSP);     op_be_AddSP     = NULL;
+       free_ir_op(op_be_SubSP);     op_be_SubSP     = NULL;
+       free_ir_op(op_be_Start);     op_be_Start     = NULL;
+       free_ir_op(op_be_FrameAddr); op_be_FrameAddr = NULL;
+}