/*
* Normalize the Returns of a graph by creating a new End block
* with One Return(Phi).
- * This is the prefered input for the if-conversion.
+ * This is the preferred input for the if-conversion.
*
* In pseudocode, it means:
*
ir_node *endbl = get_irg_end_block(irg);
int i, j, k, n, last_idx, n_rets, n_ret_vals = -1;
unsigned char *returns;
- ir_node **in, **retvals;
+ ir_node **in, **retvals, **endbl_in;
- ir_node *block, *new_ret;
+ ir_node *block;
/* look, if we have more than one return */
- n = get_Block_n_cfgpreds(endbl);
+ n = get_Block_n_cfgpreds(endbl);
+ if (n <= 0) {
+ /* The end block has no predecessors, we have an endless
+ loop. In that case, no returns exists. */
+ return;
+ }
+
returns = alloca((n + 7) >> 3);
memset(returns, 0, (n + 7) >> 3);
for (n_rets = i = 0; i < n; ++i) {
ir_node *node = get_Block_cfgpred(endbl, i);
- if (get_irn_op(node) == op_Return) {
+ if (is_Return(node)) {
++n_rets;
set_bit(i);
if (n_rets <= 1)
return;
- in = alloca(sizeof(*in) * IMAX(n_rets, n_ret_vals));
- retvals = alloca(sizeof(*in) * n_rets * n_ret_vals);
+ in = alloca(sizeof(*in) * IMAX(n_rets, n_ret_vals));
+ retvals = alloca(sizeof(*retvals) * n_rets * n_ret_vals);
+ endbl_in = alloca(sizeof(*endbl_in) * n);
+ last_idx = 0;
for (j = i = 0; i < n; ++i) {
+ ir_node *ret = get_Block_cfgpred(endbl, i);
+
if (get_bit(i)) {
- ir_node *ret = get_Block_cfgpred(endbl, i);
ir_node *block = get_nodes_block(ret);
/* create a new Jmp for every Ret and place the in in */
for (k = 0; k < n_ret_vals; ++k)
retvals[j + k*n_rets] = get_irn_n(ret, k);
- set_Block_cfgpred(endbl, i, new_r_Bad(irg));
- last_idx = i;
-
++j;
}
+ else
+ endbl_in[last_idx++] = ret;
}
/* ok, create a new block with all created in's */
/* now create the Phi nodes */
for (j = i = 0; i < n_ret_vals; ++i, j += n_rets) {
+ int k;
+ ir_node *first;
/* the return values are already shuffled */
- in[i] = new_r_Phi(irg, block, n_rets, &retvals[j], get_irn_mode(retvals[j]));
+
+ /* Beware: normally the Phi constructor automatically replaces a Phi(a,...a) into a
+ but NOT, if a is Unknown. Here, we known that this case can be optimize also,
+ so do it here */
+ first = retvals[j + 0];
+ for (k = 1; k < n_rets; ++k) {
+ if (retvals[j + k] != first) {
+ first = NULL;
+ break;
+ }
+ }
+ if (first)
+ in[i] = first;
+ else
+ in[i] = new_r_Phi(irg, block, n_rets, &retvals[j], get_irn_mode(retvals[j]));
}
- new_ret = new_r_Return(irg, block, in[0], n_ret_vals-1, &in[1]);
+ endbl_in[last_idx++] = new_r_Return(irg, block, in[0], n_ret_vals-1, &in[1]);
- set_Block_cfgpred(endbl, last_idx, new_ret);
+ set_irn_in(endbl, last_idx, endbl_in);
/* invalidate analysis information:
* a new Block was added, so dominator, outs and loop are inconsistent,
* trouts and callee-state should be still valid
*/
- set_irg_dom_inconsistent(irg);
+ set_irg_doms_inconsistent(irg);
set_irg_outs_inconsistent(irg);
- set_irg_loopinfo_state(current_ir_graph, loopinfo_cf_inconsistent);
+ set_irg_extblk_inconsistent(irg);
+ set_irg_loopinfo_state(irg, loopinfo_cf_inconsistent);
}
/**
/*
* Normalize the Returns of a graph by moving
* the Returns upwards as much as possible.
- * This might be prefered for code generation.
+ * This might be preferred for code generation.
*
* In pseudocode, it means:
*
ir_node *endbl = get_irg_end_block(irg);
ir_node *end;
- /* first, link all returns */
+ /*
+ * First, link all returns:
+ * These must be predecessors of the endblock.
+ * Place Returns that can be moved on list, all others
+ * on final.
+ */
n = get_Block_n_cfgpreds(endbl);
for (n_finals = n_rets = i = 0; i < n; ++i) {
ir_node *ret = get_Block_cfgpred(endbl, i);
- if (get_irn_op(ret) == op_Return && can_move_ret(ret)) {
+ if (is_Return(ret) && can_move_ret(ret)) {
/*
* Ok, all conditions met, we can move this Return, put it
* on our work list.
n = get_Block_n_cfgpreds(block);
for (i = 0; i < n; ++i) {
- ir_node *jmp = get_Block_cfgpred(block, i);
- ir_node *new_bl = get_nodes_block(jmp);
- ir_node *new_ret;
+ ir_node *jmp = get_Block_cfgpred(block, i);
+ ir_node *new_bl, *new_ret;
+
+ if (get_irn_op(jmp) != op_Jmp)
+ continue;
+
+ new_bl = get_nodes_block(jmp);
/* create the in-array for the new Ret */
for (j = 0; j < n_ret_vals; ++j) {
ir_node *pred = get_irn_n(ret, j);
- in[j] = is_Phi(pred) ? get_Phi_pred(pred, i) : pred;
+ in[j] = (is_Phi(pred) && get_nodes_block(pred) == block) ? get_Phi_pred(pred, i) : pred;
}
new_ret = new_r_Return(irg, new_bl, in[0], n_ret_vals - 1, &in[1]);
++n_finals;
}
}
+
+ /* remove the Jmp, we have placed a Return here */
+ exchange(jmp, new_r_Bad(irg));
}
/*
exchange(endbl, new_r_Block(irg, n_finals, in));
- /* the end block is not automatically skiped, so do it here */
+ /* the end block is not automatically skipped, so do it here */
set_irg_end_block(irg, skip_Id(get_irg_end_block(irg)));
/* Invalidate analysis information:
- * Blocks become dead and new Eeturns were deleted, so dominator, outs and loop are inconsistent,
+ * Blocks become dead and new Returns were deleted, so dominator, outs and loop are inconsistent,
* trouts and callee-state should be still valid
*/
- set_irg_dom_inconsistent(irg);
+ set_irg_doms_inconsistent(irg);
set_irg_outs_inconsistent(irg);
set_irg_loopinfo_state(current_ir_graph, loopinfo_cf_inconsistent);
}