- implemented apply phase
[libfirm] / ir / opt / cfopt.c
index e0cd1ac..e9497a5 100644 (file)
@@ -23,9 +23,7 @@
  * @author  Goetz Lindenmaier, Michael Beck, Sebastian Hack
  * @version $Id$
  */
-#ifdef HAVE_CONFIG_H
-# include "config.h"
-#endif
+#include "config.h"
 
 #include "iroptimize.h"
 
@@ -45,7 +43,7 @@
 #include "irvrfy.h"
 #include "iredges.h"
 
-#include "array.h"
+#include "array_t.h"
 
 #include "irouts.h"
 #include "irbackedge_t.h"
@@ -165,7 +163,7 @@ static void merge_blocks(ir_node *node, void *ctx) {
                if (!is_Block_dead(b)) {
                        new_block = equivalent_node(b);
 
-                       while (irn_not_visited(b) && (!is_Block_dead(new_block)) && (new_block != b)) {
+                       while (!irn_visited(b) && !is_Block_dead(new_block) && new_block != b) {
                                /* We would have to run gigo() if new is bad, so we
                                   promote it directly below. Nevertheless, we sometimes reach a block
                                   the first time through a dataflow node.  In this case we optimized the
@@ -421,7 +419,7 @@ static void optimize_blocks(ir_node *b, void *ctx) {
        for (i = 0, k = get_Block_n_cfgpreds(b); i < k; ++i) {
                max_preds += test_whether_dispensable(b, i);
        }
-       in = xmalloc(max_preds * sizeof(*in));
+       in = XMALLOCN(ir_node*, max_preds);
 
        /*- Fix the Phi nodes of the current block -*/
        for (phi = get_irn_link(b); phi; ) {
@@ -787,20 +785,20 @@ restart:
        for (i = j = 0; i < n; i++) {
                ir_node *ka = get_End_keepalive(end, i);
 
-               if (irn_not_visited(ka)) {
-                       ir_op *op = get_irn_op(ka);
-
-                       if ((op == op_Block) && !Block_block_visited(ka)) {
-                               /* irg_block_walk() will increase the block visited flag, but we must visit only
-                                  these blocks that are not visited yet, so decrease it first. */
-                               set_irg_block_visited(irg, get_irg_block_visited(irg) - 1);
-                               irg_block_walk(ka, optimize_blocks, remove_simple_blocks, &env.changed);
-                               mark_irn_visited(ka);
-                               in[j++] = ka;
+               if (!irn_visited(ka)) {
+                       if (is_Block(ka)) {
+                               if (!Block_block_visited(ka)) {
+                                       /* irg_block_walk() will increase the block visited flag, but we must visit only
+                                          these blocks that are not visited yet, so decrease it first. */
+                                       set_irg_block_visited(irg, get_irg_block_visited(irg) - 1);
+                                       irg_block_walk(ka, optimize_blocks, remove_simple_blocks, &env.changed);
+                                       mark_irn_visited(ka);
+                                       in[j++] = ka;
+                               }
                        } else {
                                mark_irn_visited(ka);
                                /* don't keep alive dead blocks */
-                               if (! is_Block_dead(get_nodes_block(ka)))
+                               if (!is_Bad(ka) && !is_Block_dead(get_nodes_block(ka)))
                                        in[j++] = ka;
                        }
                }