#include "irprog_t.h"
#include "iroptimize.h"
-#include "ircons.h"
+#include "ircons_t.h"
#include "iropt_t.h"
#include "irgopt.h"
#include "irgmod.h"
static void opt_walker(ir_node *n, void *env) {
pdeq *waitq = env;
ir_node *optimized;
- ir_node *oldn = n;
-
- for (;;) {
- optimized = optimize_in_place_2(n);
- set_irn_link(optimized, NULL);
- if (optimized == n)
- break;
- n = optimized;
- }
- if (optimized != oldn) {
- enqueue_users(oldn, waitq);
- exchange(oldn, optimized);
+ optimized = optimize_in_place_2(n);
+ set_irn_link(optimized, NULL);
+
+ if (optimized != n) {
+ enqueue_users(n, waitq);
+ exchange(n, optimized);
}
}
* Copies new predecessors of old node to new node remembered in link.
* Spare the Bad predecessors of Phi and Block nodes.
*/
-void
-copy_preds(ir_node *n, void *env) {
+static void copy_preds(ir_node *n, void *env) {
ir_node *nn, *block;
int i, j, irn_arity;
(void) env;
nn = get_new_node(n);
if (is_Block(n)) {
+ /* copy the macro block header */
+ ir_node *mbh = get_Block_MacroBlock(n);
+
+ if (mbh == n) {
+ /* this block is a macroblock header */
+ set_irn_n(nn, -1, nn);
+ } else {
+ /* get the macro block header */
+ set_irn_n(nn, -1, get_new_node(mbh));
+ }
+
/* Don't copy Bad nodes. */
j = 0;
irn_arity = get_irn_arity(n);
for (i = 0; i < irn_arity; i++) {
if (! is_Bad(get_irn_n(n, i))) {
- set_irn_n (nn, j, get_new_node(get_irn_n(n, i)));
+ set_irn_n(nn, j, get_new_node(get_irn_n(n, i)));
/*if (is_backedge(n, i)) set_backedge(nn, j);*/
j++;
}
exchange(nn, old);
}
}
- } else if (get_irn_op(n) == op_Phi) {
+ } else if (is_Phi(n)) {
/* Don't copy node if corresponding predecessor in block is Bad.
The Block itself should not be Bad. */
block = get_nodes_block(n);
/* Note: from yet, the visited flag of the graph is equal to vfl + 1 */
/* visit the anchors as well */
- for (i = anchor_max - 1; i >= 0; --i) {
- ir_node *n = irg->anchors[i];
+ for (i = get_irg_n_anchors(irg) - 1; i >= 0; --i) {
+ ir_node *n = get_irg_anchor(irg, i);
if (n && (get_irn_visited(n) <= vfl)) {
set_irg_visited(irg, vfl);
static void
copy_graph_env(int copy_node_nr) {
ir_graph *irg = current_ir_graph;
- ir_node *old_end, *n;
+ ir_node *old_end, *new_anchor;
int i;
/* remove end_except and end_reg nodes */
/* Not all nodes remembered in irg might be reachable
from the end node. Assure their link is set to NULL, so that
we can test whether new nodes have been computed. */
- for (i = anchor_max - 1; i >= 0; --i) {
- if (irg->anchors[i])
- set_new_node(irg->anchors[i], NULL);
+ for (i = get_irg_n_anchors(irg) - 1; i >= 0; --i) {
+ ir_node *n = get_irg_anchor(irg, i);
+ if (n != NULL)
+ set_new_node(n, NULL);
}
/* we use the block walk flag for removing Bads from Blocks ins. */
inc_irg_block_visited(irg);
/* copy the graph */
copy_graph(irg, copy_node_nr);
- /* fix the fields in irg */
- old_end = get_irg_end(irg);
- for (i = anchor_max - 1; i >= 0; --i) {
- n = irg->anchors[i];
+ /* fix the anchor */
+ old_end = get_irg_end(irg);
+ new_anchor = new_Anchor(irg);
+
+ for (i = get_irg_n_anchors(irg) - 1; i >= 0; --i) {
+ ir_node *n = get_irg_anchor(irg, i);
if (n)
- irg->anchors[i] = get_new_node(n);
+ set_irn_n(new_anchor, i, get_new_node(n));
}
free_End(old_end);
+ irg->anchor = new_anchor;
+
+ /* ensure the new anchor is placed in the endblock */
+ set_irn_n(new_anchor, -1, get_irg_end_block(irg));
}
/**
/* Free memory from old unoptimized obstack */
obstack_free(graveyard_obst, 0); /* First empty the obstack ... */
- xfree (graveyard_obst); /* ... then free it. */
+ xfree(graveyard_obst); /* ... then free it. */
/* inform statistics that the run is over */
hook_dead_node_elim(irg, 0);
} else {
set_Tuple_pred(call, pn_Call_T_result, new_Bad());
}
+ /* handle the regular call */
+ set_Tuple_pred(call, pn_Call_X_regular, new_Jmp());
/* For now, we cannot inline calls with value_base */
set_Tuple_pred(call, pn_Call_P_value_res_base, new_Bad());
ir_node *ret, *irn;
ret = get_irn_n(end_bl, i);
irn = skip_Proj(ret);
- if (is_fragile_op(irn) || (get_irn_op(irn) == op_Raise)) {
+ if (is_fragile_op(irn) || is_Raise(irn)) {
cf_pred[n_exc] = ret;
++n_exc;
}
set_Tuple_pred(call, pn_Call_X_except, new_Bad());
set_Tuple_pred(call, pn_Call_M_except, new_Bad());
}
- set_Tuple_pred(call, pn_Call_X_regular, new_Bad());
} else {
ir_node *main_end_bl;
int main_end_bl_arity;
for (i = 0; i < n_exc; ++i)
end_preds[main_end_bl_arity + i] = cf_pred[i];
set_irn_in(main_end_bl, n_exc + main_end_bl_arity, end_preds);
- set_Tuple_pred(call, pn_Call_X_regular, new_Bad());
set_Tuple_pred(call, pn_Call_X_except, new_Bad());
set_Tuple_pred(call, pn_Call_M_except, new_Bad());
free(end_preds);
current_ir_graph = rem;
}
+typedef struct cf_env {
+ char changed; /**< flag indicates that the cf graphs has changed. */
+} cf_env;
+
/**
* Called by walker of remove_critical_cf_edges().
*
* predecessors and a block of multiple successors.
*
* @param n IR node
- * @param env Environment of walker. The changed field.
+ * @param env Environment of walker.
*/
static void walk_critical_cf_edges(ir_node *n, void *env) {
int arity, i;
ir_node *pre, *block, *jmp;
- int *changed = env;
+ cf_env *cenv = env;
ir_graph *irg = get_irn_irg(n);
/* Block has multiple predecessors */
pre = get_irn_n(n, i);
cfop = get_irn_op(skip_Proj(pre));
- /* Predecessor has multiple successors. Insert new control flow edge but
- ignore exception edges. */
- if (! is_op_fragile(cfop) && is_op_forking(cfop)) {
+
+ if (is_op_fragile(cfop)) {
+ if (cfop != op_Raise)
+ goto insert;
+ continue;
+ }
+ if (is_op_forking(cfop)) {
+ /* Predecessor has multiple successors. Insert new control flow edge edges. */
+insert:
/* set predecessor of new block */
block = new_r_Block(irg, 1, &pre);
/* insert new jmp node to new block */
jmp = new_r_Jmp(irg, block);
/* set successor of new block */
set_irn_n(n, i, jmp);
- *changed = 1;
+ cenv->changed = 1;
} /* predecessor has multiple successors */
} /* for all predecessors */
} /* n is a multi-entry block */
}
void remove_critical_cf_edges(ir_graph *irg) {
- int changed = 0;
+ cf_env env;
+
+ env.changed = 0;
- irg_block_walk_graph(irg, NULL, walk_critical_cf_edges, &changed);
- if (changed) {
+ irg_block_walk_graph(irg, NULL, walk_critical_cf_edges, &env);
+ if (env.changed) {
/* control flow changed */
set_irg_outs_inconsistent(irg);
set_irg_extblk_inconsistent(irg);