beprefalloc: Fix a c&p-o in the change to use arch_register_req_is().
[libfirm] / ir / be / beprefalloc.c
index 7d19683..d0080e7 100644 (file)
@@ -60,7 +60,7 @@
 #include "unionfind.h"
 #include "pdeq.h"
 #include "hungarian.h"
-
+#include "statev.h"
 #include "beabi.h"
 #include "bechordal_t.h"
 #include "be.h"
@@ -253,17 +253,15 @@ static void give_penalties_for_limits(const ir_nodeset_t *live_nodes,
  * @param weight      the weight
  * @param node        the current node
  */
-static void check_defs(const ir_nodeset_t *live_nodes, float weight,
-                       ir_node *node)
+static void check_defs(ir_nodeset_t const *const live_nodes, float const weight, ir_node *const node, arch_register_req_t const *const req)
 {
-       const arch_register_req_t *req = arch_get_irn_register_req(node);
-       if (req->type & arch_register_req_type_limited) {
+       if (arch_register_req_is(req, limited)) {
                const unsigned *limited = req->limited;
                float           penalty = weight * DEF_FACTOR;
                give_penalties_for_limits(live_nodes, penalty, limited, node);
        }
 
-       if (req->type & arch_register_req_type_should_be_same) {
+       if (arch_register_req_is(req, should_be_same)) {
                ir_node           *insn  = skip_Proj(node);
                allocation_info_t *info  = get_allocation_info(node);
                int                arity = get_irn_arity(insn);
@@ -306,9 +304,8 @@ static void analyze_block(ir_node *block, void *data)
                if (is_Phi(node))
                        break;
 
-               ir_node *value;
-               be_foreach_definition(node, cls, value,
-                       check_defs(&live_nodes, weight, value);
+               be_foreach_definition(node, cls, value, req,
+                       check_defs(&live_nodes, weight, value, req);
                );
 
                /* mark last uses */
@@ -338,31 +335,21 @@ static void analyze_block(ir_node *block, void *data)
                be_liveness_transfer(cls, node, &live_nodes);
 
                /* update weights based on usage constraints */
-               for (int i = 0; i < arity; ++i) {
-                       ir_node *op = get_irn_n(node, i);
-                       if (!arch_irn_consider_in_reg_alloc(cls, op))
-                               continue;
-
-                       const arch_register_req_t *req
-                               = arch_get_irn_register_req_in(node, i);
-                       if (!(req->type & arch_register_req_type_limited))
+               be_foreach_use(node, cls, req, op, op_req,
+                       if (!arch_register_req_is(req, limited))
                                continue;
 
-                       const unsigned *limited = req->limited;
-                       give_penalties_for_limits(&live_nodes, weight * USE_FACTOR,
-                                                                         limited, op);
-               }
+                       give_penalties_for_limits(&live_nodes, weight * USE_FACTOR, req->limited, op);
+               );
        }
 
        ir_nodeset_destroy(&live_nodes);
 }
 
-static void congruence_def(ir_nodeset_t *live_nodes, const ir_node *node)
+static void congruence_def(ir_nodeset_t *const live_nodes, ir_node const *const node, arch_register_req_t const *const req)
 {
-       const arch_register_req_t *req = arch_get_irn_register_req(node);
-
        /* should be same constraint? */
-       if (req->type & arch_register_req_type_should_be_same) {
+       if (arch_register_req_is(req, should_be_same)) {
                const ir_node *insn     = skip_Proj_const(node);
                int            arity    = get_irn_arity(insn);
                unsigned       node_idx = get_irn_idx(node);
@@ -410,14 +397,13 @@ static void create_congruence_class(ir_node *block, void *data)
        /* check should be same constraints */
        ir_node *last_phi = NULL;
        sched_foreach_reverse(block, node) {
-               ir_node *value;
                if (is_Phi(node)) {
                        last_phi = node;
                        break;
                }
 
-               be_foreach_definition(node, cls, value,
-                       congruence_def(&live_nodes, value);
+               be_foreach_definition(node, cls, value, req,
+                       congruence_def(&live_nodes, value, req);
                );
                be_liveness_transfer(cls, node, &live_nodes);
        }
@@ -538,7 +524,7 @@ static void combine_congruence_classes(void)
  */
 static void use_reg(ir_node *node, const arch_register_t *reg, unsigned width)
 {
-       unsigned r = arch_register_get_index(reg);
+       unsigned r = reg->index;
        for (unsigned r0 = r; r0 < r + width; ++r0)
                assignments[r0] = node;
        arch_set_irn_register(node, reg);
@@ -551,7 +537,7 @@ static void free_reg_of_value(ir_node *node)
 
        const arch_register_t     *reg = arch_get_irn_register(node);
        const arch_register_req_t *req = arch_get_irn_register_req(node);
-       unsigned                   r   = arch_register_get_index(reg);
+       unsigned                   r   = reg->index;
        /* assignment->value may be NULL if a value is used at 2 inputs
         * so it gets freed twice. */
        for (unsigned r0 = r; r0 < r + req->width; ++r0) {
@@ -595,7 +581,7 @@ static bool try_optimistic_split(ir_node *to_split, ir_node *before,
        allocation_info_t *info = get_allocation_info(to_split);
        float              delta = 0;
 
-       /* stupid hack: don't optimisticallt split don't spill nodes...
+       /* stupid hack: don't optimistically split don't spill nodes...
         * (so we don't split away the values produced because of
         *  must_be_different constraints) */
        ir_node *original_insn = skip_Proj(info->original_value);
@@ -603,7 +589,7 @@ static bool try_optimistic_split(ir_node *to_split, ir_node *before,
                return false;
 
        const arch_register_t *from_reg        = arch_get_irn_register(to_split);
-       unsigned               from_r          = arch_register_get_index(from_reg);
+       unsigned               from_r          = from_reg->index;
        ir_node               *block           = get_nodes_block(before);
        float split_threshold = (float)get_block_execfreq(block) * SPLIT_DELTA;
 
@@ -670,7 +656,7 @@ static bool try_optimistic_split(ir_node *to_split, ir_node *before,
        unsigned               width = 1;
        mark_as_copy_of(copy, to_split);
        /* hacky, but correct here */
-       if (assignments[arch_register_get_index(from_reg)] == to_split)
+       if (assignments[from_reg->index] == to_split)
                free_reg_of_value(to_split);
        use_reg(copy, reg, width);
        sched_add_before(before, copy);
@@ -684,14 +670,12 @@ static bool try_optimistic_split(ir_node *to_split, ir_node *before,
 /**
  * Determine and assign a register for node @p node
  */
-static void assign_reg(const ir_node *block, ir_node *node,
-                       unsigned *forbidden_regs)
+static void assign_reg(ir_node const *const block, ir_node *const node, arch_register_req_t const *const req, unsigned *const forbidden_regs)
 {
        assert(!is_Phi(node));
        /* preassigned register? */
-       const arch_register_t     *final_reg = arch_get_irn_register(node);
-       const arch_register_req_t *req       = arch_get_irn_register_req(node);
-       unsigned                   width     = req->width;
+       arch_register_t const *final_reg = arch_get_irn_register(node);
+       unsigned         const width     = req->width;
        if (final_reg != NULL) {
                DB((dbg, LEVEL_2, "Preassignment %+F -> %s\n", node, final_reg->name));
                use_reg(node, final_reg, width);
@@ -699,12 +683,12 @@ static void assign_reg(const ir_node *block, ir_node *node,
        }
 
        /* ignore reqs must be preassigned */
-       assert (! (req->type & arch_register_req_type_ignore));
+       assert(!arch_register_req_is(req, ignore));
 
        /* give should_be_same boni */
        allocation_info_t *info    = get_allocation_info(node);
        ir_node           *in_node = skip_Proj(node);
-       if (req->type & arch_register_req_type_should_be_same) {
+       if (arch_register_req_is(req, should_be_same)) {
                float weight = (float)get_block_execfreq(block);
                int   arity  = get_irn_arity(in_node);
 
@@ -715,7 +699,7 @@ static void assign_reg(const ir_node *block, ir_node *node,
 
                        ir_node               *in        = get_irn_n(in_node, i);
                        const arch_register_t *reg       = arch_get_irn_register(in);
-                       unsigned               reg_index = arch_register_get_index(reg);
+                       unsigned               reg_index = reg->index;
 
                        /* if the value didn't die here then we should not propagate the
                         * should_be_same info */
@@ -740,7 +724,7 @@ static void assign_reg(const ir_node *block, ir_node *node,
        DB((dbg, LEVEL_2, "\n"));
 
        const unsigned *allowed_regs = normal_regs;
-       if (req->type & arch_register_req_type_limited) {
+       if (arch_register_req_is(req, limited)) {
                allowed_regs = req->limited;
        }
 
@@ -752,8 +736,7 @@ static void assign_reg(const ir_node *block, ir_node *node,
                        continue;
                /* alignment constraint? */
                if (width > 1) {
-                       if ((req->type & arch_register_req_type_aligned)
-                               && (final_reg_index % width) != 0)
+                       if (arch_register_req_is(req, aligned) && (final_reg_index % width) != 0)
                                continue;
                        bool fine = true;
                        for (unsigned r0 = r+1; r0 < r+width; ++r0) {
@@ -871,7 +854,7 @@ static void permute_values(ir_nodeset_t *live_nodes, ir_node *before,
                }
 
                /* old register has 1 user less, permutation is resolved */
-               assert(arch_register_get_index(arch_get_irn_register(src)) == old_r);
+               assert(arch_get_irn_register(src)->index == old_r);
                permutation[r] = r;
 
                assert(n_used[old_r] > 0);
@@ -1022,7 +1005,7 @@ static void determine_live_through_regs(unsigned *bitset, ir_node *node)
 
                ir_node               *op  = get_irn_n(node, i);
                const arch_register_t *reg = arch_get_irn_register(op);
-               rbitset_clear(bitset, arch_register_get_index(reg));
+               rbitset_clear(bitset, reg->index);
        }
 }
 
@@ -1037,26 +1020,20 @@ static void solve_lpp(ir_nodeset_t *live_nodes, ir_node *node,
        lpp_set_log(lpp, stdout);
 
        /** mark some edges as forbidden */
-       int arity = get_irn_arity(node);
-       for (int i = 0; i < arity; ++i) {
-               ir_node *op = get_irn_n(node, i);
-               if (!arch_irn_consider_in_reg_alloc(cls, op))
-                       continue;
-
-               const arch_register_req_t *req = arch_get_irn_register_req_in(node, i);
-               if (!(req->type & arch_register_req_type_limited))
+       be_foreach_use(node, cls, req, op, op_req,
+               if (!arch_register_req_is(req, limited))
                        continue;
 
                const unsigned        *limited     = req->limited;
                const arch_register_t *reg         = arch_get_irn_register(op);
-               unsigned               current_reg = arch_register_get_index(reg);
+               unsigned               current_reg = reg->index;
                for (unsigned r = 0; r < n_regs; ++r) {
                        if (rbitset_is_set(limited, r))
                                continue;
 
                        rbitset_set(forbidden_edges, current_reg*n_regs + r);
                }
-       }
+       );
 
        /* add all combinations, except for not allowed ones */
        for (unsigned l = 0; l < n_regs; ++l) {
@@ -1170,25 +1147,19 @@ static void enforce_constraints(ir_nodeset_t *live_nodes, ir_node *node,
         * values are involved */
        bool double_width = false;
        bool good = true;
-       int  arity = get_irn_arity(node);
-       for (int i = 0; i < arity; ++i) {
-               ir_node *op = get_irn_n(node, i);
-               if (!arch_irn_consider_in_reg_alloc(cls, op))
-                       continue;
-
+       be_foreach_use(node, cls, req, op, op_req,
                /* are there any limitations for the i'th operand? */
-               const arch_register_req_t *req = arch_get_irn_register_req_in(node, i);
                if (req->width > 1)
                        double_width = true;
                const arch_register_t *reg       = arch_get_irn_register(op);
-               unsigned               reg_index = arch_register_get_index(reg);
-               if (req->type & arch_register_req_type_aligned) {
+               unsigned               reg_index = reg->index;
+               if (arch_register_req_is(req, aligned)) {
                        if (!is_aligned(reg_index, req->width)) {
                                good = false;
                                continue;
                        }
                }
-               if (!(req->type & arch_register_req_type_limited))
+               if (!arch_register_req_is(req, limited))
                        continue;
 
                const unsigned *limited = req->limited;
@@ -1197,22 +1168,22 @@ static void enforce_constraints(ir_nodeset_t *live_nodes, ir_node *node,
                        good = false;
                        continue;
                }
-       }
+       );
 
        /* is any of the live-throughs using a constrained output register? */
-       ir_node  *value;
        unsigned *live_through_regs = NULL;
-       be_foreach_definition(node, cls, value,
-               if (req_->width > 1)
+       be_foreach_definition(node, cls, value, req,
+               (void)value;
+               if (req->width > 1)
                        double_width = true;
-               if (! (req_->type & arch_register_req_type_limited))
+               if (!arch_register_req_is(req, limited))
                        continue;
                if (live_through_regs == NULL) {
-                       rbitset_alloca(live_through_regs, n_regs);
+                       live_through_regs = rbitset_alloca(n_regs);
                        determine_live_through_regs(live_through_regs, node);
                }
-               rbitset_or(forbidden_regs, req_->limited, n_regs);
-               if (rbitsets_have_common(req_->limited, live_through_regs, n_regs))
+               rbitset_or(forbidden_regs, req->limited, n_regs);
+               if (rbitsets_have_common(req->limited, live_through_regs, n_regs))
                        good = false;
        );
 
@@ -1221,7 +1192,7 @@ static void enforce_constraints(ir_nodeset_t *live_nodes, ir_node *node,
 
        /* create these arrays if we haven't yet */
        if (live_through_regs == NULL) {
-               rbitset_alloca(live_through_regs, n_regs);
+               live_through_regs = rbitset_alloca(n_regs);
        }
 
        if (double_width) {
@@ -1258,24 +1229,19 @@ static void enforce_constraints(ir_nodeset_t *live_nodes, ir_node *node,
                }
        }
 
-       for (int i = 0; i < arity; ++i) {
-               ir_node *op = get_irn_n(node, i);
-               if (!arch_irn_consider_in_reg_alloc(cls, op))
-                       continue;
-
-               const arch_register_req_t *req = arch_get_irn_register_req_in(node, i);
-               if (!(req->type & arch_register_req_type_limited))
+       be_foreach_use(node, cls, req, op, op_req,
+               if (!arch_register_req_is(req, limited))
                        continue;
 
                const unsigned        *limited     = req->limited;
                const arch_register_t *reg         = arch_get_irn_register(op);
-               unsigned               current_reg = arch_register_get_index(reg);
+               unsigned               current_reg = reg->index;
                for (unsigned r = 0; r < n_regs; ++r) {
                        if (rbitset_is_set(limited, r))
                                continue;
                        hungarian_remove(bp, r, current_reg);
                }
-       }
+       );
 
        //hungarian_print_cost_matrix(bp, 1);
        hungarian_prepare_cost_matrix(bp, HUNGARIAN_MODE_MAXIMIZE_UTIL);
@@ -1358,16 +1324,15 @@ static void add_phi_permutations(ir_node *block, int p)
                assert(a >= 0);
 
                const arch_register_t *reg  = arch_get_irn_register(phi);
-               int                    regn = arch_register_get_index(reg);
+               int                    regn = reg->index;
                /* same register? nothing to do */
                if (regn == a)
                        continue;
 
                ir_node               *op     = pred_info->assignments[a];
                const arch_register_t *op_reg = arch_get_irn_register(op);
-               /* virtual or joker registers are ok too */
-               if ((op_reg->type & arch_register_type_joker)
-                               || (op_reg->type & arch_register_type_virtual))
+               /* Virtual registers are ok, too. */
+               if (op_reg->type & arch_register_type_virtual)
                        continue;
 
                permutation[regn] = a;
@@ -1392,7 +1357,7 @@ static void add_phi_permutations(ir_node *block, int p)
                /* we have permuted all values into the correct registers so we can
                   simply query which value occupies the phis register in the
                   predecessor */
-               int      a  = arch_register_get_index(arch_get_irn_register(phi));
+               int      a  = arch_get_irn_register(phi)->index;
                ir_node *op = pred_info->assignments[a];
                set_Phi_pred(phi, p, op);
        }
@@ -1423,9 +1388,8 @@ static void adapt_phi_prefs(ir_node *phi)
                        continue;
 
                /* give bonus for already assigned register */
-               float    weight = (float)get_block_execfreq(pred_block);
-               unsigned r      = arch_register_get_index(reg);
-               info->prefs[r] += weight * AFF_PHI;
+               float weight = (float)get_block_execfreq(pred_block);
+               info->prefs[reg->index] += weight * AFF_PHI;
        }
 }
 
@@ -1575,7 +1539,7 @@ static void allocate_coalesce_block(ir_node *block, void *data)
                if (req->cls != cls)
                        continue;
 
-               if (req->type & arch_register_req_type_ignore) {
+               if (arch_register_req_is(req, ignore)) {
                        allocation_info_t *info = get_allocation_info(node);
                        info->current_value = node;
 
@@ -1653,9 +1617,8 @@ static void allocate_coalesce_block(ir_node *block, void *data)
                ir_nodeset_insert(&live_nodes, node);
        }
 
-       unsigned *forbidden_regs; /**< collects registers which must
-                                      not be used for optimistic splits */
-       rbitset_alloca(forbidden_regs, n_regs);
+       /** Collects registers which must not be used for optimistic splits. */
+       unsigned *const forbidden_regs = rbitset_alloca(n_regs);
 
        /* handle phis... */
        assign_phi_registers(block);
@@ -1683,23 +1646,17 @@ static void allocate_coalesce_block(ir_node *block, void *data)
                rewire_inputs(node);
 
                /* we may not use registers used for inputs for optimistic splits */
-               int arity = get_irn_arity(node);
-               for (int i = 0; i < arity; ++i) {
-                       ir_node *op = get_irn_n(node, i);
-                       if (!arch_irn_consider_in_reg_alloc(cls, op))
-                               continue;
-
+               be_foreach_use(node, cls, in_req, op, op_req,
                        const arch_register_t *reg = arch_get_irn_register(op);
-                       rbitset_set(forbidden_regs, arch_register_get_index(reg));
-               }
+                       rbitset_set(forbidden_regs, reg->index);
+               );
 
                /* free registers of values last used at this instruction */
                free_last_uses(&live_nodes, node);
 
                /* assign output registers */
-               ir_node *value;
-               be_foreach_definition_(node, cls, value,
-                       assign_reg(block, value, forbidden_regs);
+               be_foreach_definition_(node, cls, value, req,
+                       assign_reg(block, value, req, forbidden_regs);
                );
        }