normalize some bittest constructs
[libfirm] / ir / be / bepeephole.c
index 8410e86..aba47c7 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 1995-2008 University of Karlsruhe.  All right reserved.
+ * Copyright (C) 1995-2011 University of Karlsruhe.  All right reserved.
  *
  * This file is part of libFirm.
  *
@@ -62,14 +62,14 @@ static void clear_reg_value(ir_node *node)
        if (reg == NULL) {
                panic("No register assigned at %+F", node);
        }
-       if (arch_register_type_is(reg, virtual))
+       if (reg->type & arch_register_type_virtual)
                return;
        cls     = arch_register_get_class(reg);
        reg_idx = arch_register_get_index(reg);
        cls_idx = arch_register_class_index(cls);
 
        //assert(register_values[cls_idx][reg_idx] != NULL);
-       DBG((dbg, LEVEL_1, "Clear Register %s\n", reg->name));
+       DB((dbg, LEVEL_1, "Clear Register %s\n", reg->name));
        register_values[cls_idx][reg_idx] = NULL;
 }
 
@@ -87,13 +87,13 @@ static void set_reg_value(ir_node *node)
        if (reg == NULL) {
                panic("No register assigned at %+F", node);
        }
-       if (arch_register_type_is(reg, virtual))
+       if (reg->type & arch_register_type_virtual)
                return;
        cls     = arch_register_get_class(reg);
        reg_idx = arch_register_get_index(reg);
        cls_idx = arch_register_class_index(cls);
 
-       DBG((dbg, LEVEL_1, "Set Register %s: %+F\n", reg->name, node));
+       DB((dbg, LEVEL_1, "Set Register %s: %+F\n", reg->name, node));
        register_values[cls_idx][reg_idx] = node;
 }
 
@@ -136,14 +136,15 @@ void be_peephole_new_node(ir_node * nw)
  * Note: killing a node and rewiring os only allowed if new_node produces
  * the same registers as old_node.
  */
-void be_peephole_before_exchange(const ir_node *old_node, ir_node *new_node)
+static void be_peephole_before_exchange(const ir_node *old_node,
+                                        ir_node *new_node)
 {
        const arch_register_t       *reg;
        const arch_register_class_t *cls;
        unsigned                     reg_idx;
        unsigned                     cls_idx;
 
-       DBG((dbg, LEVEL_1, "About to exchange and kill %+F with %+F\n", old_node, new_node));
+       DB((dbg, LEVEL_1, "About to exchange and kill %+F with %+F\n", old_node, new_node));
 
        if (current_node == old_node) {
                /* next node to be processed will be killed. Its scheduling predecessor
@@ -192,20 +193,20 @@ static void process_block(ir_node *block, void *data)
        (void) data;
 
        /* construct initial register assignment */
-       n_classes = arch_env_get_n_reg_class(arch_env);
+       n_classes = arch_env->n_register_classes;
        for (i = 0; i < n_classes; ++i) {
-               const arch_register_class_t *cls    = arch_env_get_reg_class(arch_env, i);
+               const arch_register_class_t *cls    = &arch_env->register_classes[i];
                unsigned                     n_regs = arch_register_class_n_regs(cls);
                memset(register_values[i], 0, sizeof(ir_node*) * n_regs);
        }
 
        assert(lv->nodes && "live sets must be computed");
-       DBG((dbg, LEVEL_1, "\nProcessing block %+F (from end)\n", block));
+       DB((dbg, LEVEL_1, "\nProcessing block %+F (from end)\n", block));
        be_lv_foreach(lv, block, be_lv_state_end, l) {
                ir_node *node = be_lv_get_irn(lv, block, l);
                set_reg_value(node);
        }
-       DBG((dbg, LEVEL_1, "\nstart processing\n"));
+       DB((dbg, LEVEL_1, "\nstart processing\n"));
 
        /* walk the block from last insn to the first */
        current_node = sched_last(block);
@@ -226,6 +227,7 @@ static void process_block(ir_node *block, void *data)
                if (peephole_node == NULL)
                        continue;
 
+               DB((dbg, LEVEL_2, "optimize %+F\n", current_node));
                peephole_node(current_node);
                assert(!is_Bad(current_node));
        }
@@ -233,13 +235,14 @@ static void process_block(ir_node *block, void *data)
 
 static void kill_node_and_preds(ir_node *node)
 {
+       ir_graph *irg = get_irn_irg(node);
        int arity, i;
 
        arity = get_irn_arity(node);
        for (i = 0; i < arity; ++i) {
                ir_node *pred = get_irn_n(node, i);
 
-               set_irn_n(node, i, new_Bad());
+               set_irn_n(node, i, new_r_Bad(irg));
                if (get_irn_n_edges(pred) != 0)
                        continue;
 
@@ -254,16 +257,24 @@ static void kill_node_and_preds(ir_node *node)
 /**
  * Walk through the block schedule and skip all barrier nodes.
  */
-static void skip_barrier(ir_node *ret_blk, ir_graph *irg)
+static void skip_barrier(ir_node *block, ir_graph *irg)
 {
        ir_node *irn;
 
-       sched_foreach_reverse(ret_blk, irn) {
+       sched_foreach_reverse(block, irn) {
+               int       arity;
+               unsigned *used;
+               size_t    n_used;
                const ir_edge_t *edge, *next;
 
                if (!be_is_Barrier(irn))
                        continue;
 
+               /* track which outputs are actually used, as we have to create
+                * keep nodes for unused outputs */
+               arity = get_irn_arity(irn);
+               rbitset_alloca(used, arity);
+
                foreach_out_edge_safe(irn, edge, next) {
                        ir_node *proj = get_edge_src_irn(edge);
                        int      pn;
@@ -275,10 +286,33 @@ static void skip_barrier(ir_node *ret_blk, ir_graph *irg)
                        pn   = (int) get_Proj_proj(proj);
                        pred = get_irn_n(irn, pn);
 
+                       rbitset_set(used, pn);
+
                        edges_reroute_kind(proj, pred, EDGE_KIND_NORMAL, irg);
                        edges_reroute_kind(proj, pred, EDGE_KIND_DEP, irg);
                }
 
+               /* the barrier also had the effect of a Keep for unused inputs.
+                * we now have to create an explicit Keep for them */
+               n_used = rbitset_popcount(used, arity);
+               if (n_used < (size_t) arity) {
+                       int       n_in = arity - (int) n_used;
+                       ir_node **in   = ALLOCAN(ir_node*, n_in);
+                       int       i    = 0;
+                       int       n    = 0;
+                       ir_node  *keep;
+
+                       for (i = 0; i < arity; ++i) {
+                               if (rbitset_is_set(used, i))
+                                       continue;
+                               assert(n < n_in);
+                               in[n++] = get_irn_n(irn, i);
+                       }
+                       keep = be_new_Barrier(get_nodes_block(irn), n_in, in);
+                       keep_alive(keep);
+                       sched_add_before(irn, keep);
+               }
+
                kill_node_and_preds(irn);
                break;
        }
@@ -287,10 +321,10 @@ static void skip_barrier(ir_node *ret_blk, ir_graph *irg)
 /**
  * Kill the Barrier nodes for better peephole optimization.
  */
-static void    kill_barriers(ir_graph *irg)
+static void kill_barriers(ir_graph *irg)
 {
        ir_node *end_blk = get_irg_end_block(irg);
-       ir_node *start_blk;
+       ir_node *start_blk = get_irg_start_block(irg);
        int i;
 
        /* skip the barrier on all return blocks */
@@ -298,6 +332,9 @@ static void kill_barriers(ir_graph *irg)
                ir_node *be_ret = get_Block_cfgpred(end_blk, i);
                ir_node *ret_blk = get_nodes_block(be_ret);
 
+               if (ret_blk == start_blk)
+                       continue;
+
                skip_barrier(ret_blk, irg);
        }
 
@@ -373,11 +410,10 @@ ir_node *be_peephole_IncSP_IncSP(ir_node *node)
        return pred;
 }
 
-void be_peephole_opt(be_irg_t *birg)
+void be_peephole_opt(ir_graph *irg)
 {
-       ir_graph   *irg = be_get_birg_irg(birg);
-       unsigned n_classes;
-       unsigned i;
+       unsigned  n_classes;
+       unsigned  i;
 
        /* barrier nodes are used for register allocations. They hinders
         * peephole optimizations, so remove them here. */
@@ -385,31 +421,30 @@ void be_peephole_opt(be_irg_t *birg)
 
        /* we sometimes find BadE nodes in float apps like optest_float.c or
         * kahansum.c for example... */
-       be_liveness_invalidate(birg->lv);
-       be_liveness_assure_sets(be_assure_liveness(birg));
+       be_liveness_invalidate(be_get_irg_liveness(irg));
+       be_liveness_assure_sets(be_assure_liveness(irg));
 
-       arch_env = be_get_birg_arch_env(birg);
-       lv       = be_get_birg_liveness(birg);
+       arch_env = be_get_irg_arch_env(irg);
+       lv       = be_get_irg_liveness(irg);
 
-       n_classes = arch_env_get_n_reg_class(arch_env);
-       register_values = ALLOCAN(ir_node**, n_classes);
+       n_classes = arch_env->n_register_classes;
+       register_values = XMALLOCN(ir_node**, n_classes);
        for (i = 0; i < n_classes; ++i) {
-               const arch_register_class_t *cls    = arch_env_get_reg_class(arch_env, i);
+               const arch_register_class_t *cls    = &arch_env->register_classes[i];
                unsigned                     n_regs = arch_register_class_n_regs(cls);
-               register_values[i] = ALLOCAN(ir_node*, n_regs);
+               register_values[i] = XMALLOCN(ir_node*, n_regs);
        }
 
        irg_block_walk_graph(irg, process_block, NULL, NULL);
-}
 
-void be_peephole_init(void)
-{
-       clear_irp_opcodes_generic_func();
+       for (i = 0; i < n_classes; ++i) {
+               xfree(register_values[i]);
+       }
+       xfree(register_values);
 }
 
+BE_REGISTER_MODULE_CONSTRUCTOR(be_init_peephole);
 void be_init_peephole(void)
 {
        FIRM_DBG_REGISTER(dbg, "firm.be.peephole");
 }
-
-BE_REGISTER_MODULE_CONSTRUCTOR(be_init_peephole);