fix warning
[libfirm] / ir / be / bepeephole.c
index d237790..7a349c3 100644 (file)
@@ -124,6 +124,19 @@ static void set_uses(ir_node *node)
        }
 }
 
+void be_peephole_new_node(ir_node * nw)
+{
+       be_liveness_introduce(lv, nw);
+}
+
+/**
+ * must be called from peephole optimisations before a node will be killed
+ * and its users will be redirected to new_node.
+ * so bepeephole can update it's internal state.
+ *
+ * Note: killing a node and rewiring os only allowed if new_node produces
+ * the same registers as old_node.
+ */
 void be_peephole_before_exchange(const ir_node *old_node, ir_node *new_node)
 {
        const arch_register_t       *reg;
@@ -131,39 +144,47 @@ void be_peephole_before_exchange(const ir_node *old_node, ir_node *new_node)
        unsigned                     reg_idx;
        unsigned                     cls_idx;
 
-       DBG((dbg, LEVEL_1, "About to exchange %+F with %+F\n", old_node, new_node));
+       DBG((dbg, LEVEL_1, "About to exchange and kill %+F with %+F\n", old_node, new_node));
 
-       if(old_node == current_node) {
-               if(is_Proj(new_node)) {
-                       current_node = get_Proj_pred(new_node);
-               } else {
-                       current_node = new_node;
-               }
+       if (current_node == old_node) {
+               /* next node to be processed will be killed. Its scheduling predecessor
+                * must be processed next. */
+               current_node = sched_next(current_node);
+               assert (!is_Bad(current_node));
        }
 
-       if(!mode_is_data(get_irn_mode(old_node)))
+       if (!mode_is_data(get_irn_mode(old_node)))
                return;
 
        reg = arch_get_irn_register(arch_env, old_node);
-       if(reg == NULL) {
+       if (reg == NULL) {
                panic("No register assigned at %+F\n", old_node);
        }
+       assert(reg == arch_get_irn_register(arch_env, new_node) &&
+             "KILLING a node and replacing by different register is not allowed");
+
        cls     = arch_register_get_class(reg);
        reg_idx = arch_register_get_index(reg);
        cls_idx = arch_register_class_index(cls);
 
-       if(register_values[cls_idx][reg_idx] == old_node) {
+       if (register_values[cls_idx][reg_idx] == old_node) {
                register_values[cls_idx][reg_idx] = new_node;
        }
 
        be_liveness_remove(lv, old_node);
 }
 
-void be_peephole_after_exchange(ir_node *new_node)
+void be_peephole_exchange(ir_node *old, ir_node *nw)
 {
-       be_liveness_introduce(lv, new_node);
+       be_peephole_before_exchange(old, nw);
+       sched_remove(old);
+       exchange(old, nw);
+       be_peephole_new_node(nw);
 }
 
+/**
+ * block-walker: run peephole optimization on the given block.
+ */
 static void process_block(ir_node *block, void *data)
 {
        unsigned n_classes;
@@ -192,26 +213,22 @@ static void process_block(ir_node *block, void *data)
        for( ; !sched_is_begin(current_node);
                        current_node = sched_prev(current_node)) {
                ir_op             *op;
-               ir_node           *last;
-               peephole_opt_func  func;
+               peephole_opt_func  peephole_node;
 
-               if(is_Phi(current_node))
+               assert(!is_Bad(current_node));
+               if (is_Phi(current_node))
                        break;
 
                clear_defs(current_node);
                set_uses(current_node);
 
-               op   = get_irn_op(current_node);
-               func = (peephole_opt_func) op->ops.generic;
-               if(func == NULL)
+               op            = get_irn_op(current_node);
+               peephole_node = (peephole_opt_func)op->ops.generic;
+               if (peephole_node == NULL)
                        continue;
 
-               last = current_node;
-               func(current_node);
-               /* was the current node replaced? */
-               if(current_node != last) {
-                       set_uses(current_node);
-               }
+               peephole_node(current_node);
+               assert(!is_Bad(current_node));
        }
 }
 
@@ -261,6 +278,49 @@ static void        kill_barriers(ir_graph *irg) {
        skip_barrier(start_blk, irg);
 }
 
+/*
+ * Tries to optimize a beIncSp node with it's previous IncSP node.
+ * Must be run from a be_peephole_opt() context.
+ */
+ir_node *be_peephole_IncSP_IncSP(ir_node *node)
+{
+       int      pred_offs;
+       int      curr_offs;
+       int      offs;
+       ir_node *pred = be_get_IncSP_pred(node);
+
+       if (!be_is_IncSP(pred))
+               return node;
+
+       if (get_irn_n_edges(pred) > 1)
+               return node;
+
+       pred_offs = be_get_IncSP_offset(pred);
+       curr_offs = be_get_IncSP_offset(node);
+
+       if (pred_offs == BE_STACK_FRAME_SIZE_EXPAND) {
+               if (curr_offs != BE_STACK_FRAME_SIZE_SHRINK) {
+                       return node;
+               }
+               offs = 0;
+       } else if (pred_offs == BE_STACK_FRAME_SIZE_SHRINK) {
+               if (curr_offs != BE_STACK_FRAME_SIZE_EXPAND) {
+                       return node;
+               }
+               offs = 0;
+       } else if (curr_offs == BE_STACK_FRAME_SIZE_EXPAND ||
+                  curr_offs == BE_STACK_FRAME_SIZE_SHRINK) {
+               return node;
+       } else {
+               offs = curr_offs + pred_offs;
+       }
+
+       /* add node offset to pred and remove our IncSP */
+       be_set_IncSP_offset(pred, offs);
+
+       be_peephole_exchange(node, pred);
+       return pred;
+}
 
 void be_peephole_opt(be_irg_t *birg)
 {