#ifdef HAVE_CONFIG_H
-# include <config.h>
+# include "config.h"
#endif
#include <assert.h>
-#include <stdbool.h>
#include "irnode_t.h"
#include "irgraph_t.h"
#include "pset.h"
#include "eset.h"
#include "pdeq.h" /* Fuer code placement */
+#include "xmalloc.h"
#include "irouts.h"
#include "irloop_t.h"
/* Copy the attributes. These might point to additional data. If this
was allocated on the old obstack the pointers now are dangling. This
frees e.g. the memory of the graph_arr allocated in new_immBlock. */
- copy_attrs(n, nn);
+ copy_node_attr(n, nn);
new_backedge_info(nn);
set_new_node(n, nn);
-1,
NULL);
/* Copy the attributes. Well, there might be some in the future... */
- copy_attrs(oe, ne);
+ copy_node_attr(oe, ne);
set_new_node(oe, ne);
/* copy the Bad node */
/* arity changing: set new predecessors without bad nodes */
if (new_irn_arity < old_irn_arity) {
- /* get new predecessor array without Block predecessor */
+ /* Get new predecessor array. We do not resize the array, as we must
+ keep the old one to update Phis. */
new_in = NEW_ARR_D (ir_node *, current_ir_graph->obst, (new_irn_arity+1));
/* set new predeseccors in array */
new_in[0] = NULL;
new_irn_n = 1;
- for (i = 1; i < old_irn_arity; i++) {
- irn = get_irn_n(n, i);
- if (!is_Bad(irn)) new_in[new_irn_n++] = irn;
+ for (i = 0; i < old_irn_arity; i++) {
+ irn = get_irn_n(n, i);
+ if (!is_Bad(irn)) {
+ new_in[new_irn_n] = irn;
+ is_backedge(n, i) ? set_backedge(n, new_irn_n-1) : set_not_backedge(n, new_irn_n-1);
+ new_irn_n++;
+ }
}
+ //ARR_SETLEN(int, n->attr.block.backedge, new_irn_arity);
+ ARR_SHRINKLEN(n->attr.block.backedge, new_irn_arity);
n->in = new_in;
+
} /* ir node has bad predecessors */
} /* Block is not relinked */
/* Relink Phi predeseccors if count of predeseccors changed */
if (old_irn_arity != ARR_LEN(get_irn_in(block))) {
/* set new predeseccors in array
- n->in[0] remains the same block */
+ n->in[0] remains the same block */
new_irn_arity = 1;
for(i = 1; i < old_irn_arity; i++)
- if (!is_Bad((ir_node *)old_in[i])) n->in[new_irn_arity++] = n->in[i];
+ if (!is_Bad((ir_node *)old_in[i])) {
+ n->in[new_irn_arity] = n->in[i];
+ is_backedge(n, i) ? set_backedge(n, new_irn_arity) : set_not_backedge(n, new_irn_arity);
+ new_irn_arity++;
+ }
ARR_SETLEN(ir_node *, n->in, new_irn_arity);
+ ARR_SETLEN(int, n->attr.phi_backedge, new_irn_arity);
}
} /* n is a Phi node */
}
-/**
+/*
* Removes Bad Bad predecesors from Blocks and the corresponding
* inputs to Phi nodes as in dead_node_elimination but without
* copying the graph.
if (get_irg_outs_state(current_ir_graph) == outs_consistent)
set_irg_outs_inconsistent(current_ir_graph);
set_irg_loopinfo_inconsistent(current_ir_graph);
+ set_irg_callee_info_state(current_ir_graph, irg_callee_info_inconsistent);
/* -- Check preconditions -- */
assert(get_irn_op(call) == op_Call);
arity = get_irn_arity(end_bl); /* arity = n_exc + n_ret */
n_res = get_method_n_ress(get_Call_type(call));
- res_pred = (ir_node **) malloc (n_res * sizeof (ir_node *));
- cf_pred = (ir_node **) malloc (arity * sizeof (ir_node *));
+ res_pred = (ir_node **) xmalloc (n_res * sizeof (ir_node *));
+ cf_pred = (ir_node **) xmalloc (arity * sizeof (ir_node *));
set_irg_current_block(current_ir_graph, post_bl); /* just to make sure */
}
main_end_bl = get_irg_end_block(current_ir_graph);
main_end_bl_arity = get_irn_arity(main_end_bl);
- end_preds = (ir_node **) malloc ((n_exc + main_end_bl_arity) * sizeof (ir_node *));
+ end_preds = (ir_node **) xmalloc ((n_exc + main_end_bl_arity) * sizeof (ir_node *));
for (i = 0; i < main_end_bl_arity; ++i)
end_preds[i] = get_irn_n(main_end_bl, i);
if (i < get_Block_n_cfgpreds(end_bl)) {
bl = get_nodes_block(cf_op);
arity = get_Block_n_cfgpreds(end_bl) + get_Block_n_cfgpreds(bl) - 1;
- cf_pred = (ir_node **) malloc (arity * sizeof (ir_node *));
+ cf_pred = (ir_node **) xmalloc (arity * sizeof (ir_node *));
for (j = 0; j < i; j++)
cf_pred[j] = get_Block_cfgpred(end_bl, j);
for (j = j; j < i + get_Block_n_cfgpreds(bl); j++)
} inline_irg_env;
static inline_irg_env *new_inline_irg_env(void) {
- inline_irg_env *env = malloc(sizeof(inline_irg_env));
+ inline_irg_env *env = xmalloc(sizeof(inline_irg_env));
env->n_nodes = -2; /* uncount Start, End */
env->n_nodes_orig = -2; /* uncount Start, End */
env->call_nodes = eset_create();
current_ir_graph->op_pin_state_pinned = op_pin_state_pinned;
}
+/** Compute the deepest common ancestor of block and dca. */
+static ir_node *calc_dca(ir_node *dca, ir_node *block)
+{
+ assert(block);
+ if (!dca) return block;
+ while (get_Block_dom_depth(block) > get_Block_dom_depth(dca))
+ block = get_Block_idom(block);
+ while (get_Block_dom_depth(dca) > get_Block_dom_depth(block)) {
+ dca = get_Block_idom(dca);
+ }
+ while (block != dca)
+ { block = get_Block_idom(block); dca = get_Block_idom(dca); }
+
+ return dca;
+}
/** Deepest common dominance ancestor of DCA and CONSUMER of PRODUCER.
* I.e., DCA is the block where we might place PRODUCER.
for (i = 0; i < irn_arity; i++) {
if (get_irn_n(consumer, i) == producer) {
- block = get_nodes_block(get_Block_cfgpred(phi_block, i));
+ ir_node *new_block = get_nodes_block(get_Block_cfgpred(phi_block, i));
+
+ block = calc_dca(block, new_block);
}
}
} else {
}
/* Compute the deepest common ancestor of block and dca. */
- assert(block);
- if (!dca) return block;
- while (get_Block_dom_depth(block) > get_Block_dom_depth(dca))
- block = get_Block_idom(block);
- while (get_Block_dom_depth(dca) > get_Block_dom_depth(block)) {
- dca = get_Block_idom(dca);
- }
- while (block != dca)
- { block = get_Block_idom(block); dca = get_Block_idom(dca); }
-
- return dca;
+ return calc_dca(dca, block);
}
static INLINE int get_irn_loop_depth(ir_node *n) {
/* Add predecessors of all non-floating nodes on list. (Those of floating
nodes are placeded already and therefore are marked.) */
for (i = 0; i < get_irn_n_outs(n); i++) {
+ ir_node *succ = get_irn_out(n, i);
if (irn_not_visited(get_irn_out(n, i))) {
- pdeq_putr (worklist, get_irn_out(n, i));
+ pdeq_putr (worklist, succ);
}
}
}