ir_node *endbl = get_irg_end_block(irg);
int i, j, k, n, last_idx, n_rets, n_ret_vals = -1;
unsigned char *returns;
- ir_node **in, **retvals;
+ ir_node **in, **retvals, **endbl_in;
- ir_node *block, *new_ret;
+ ir_node *block;
/* look, if we have more than one return */
- n = get_Block_n_cfgpreds(endbl);
- assert(n > 0);
+ n = get_Block_n_cfgpreds(endbl);
+ if (n <= 0) {
+ /* The end block has no predecessors, we have an endless
+ loop. In that case, no returns exists. */
+ return;
+ }
+
returns = alloca((n + 7) >> 3);
memset(returns, 0, (n + 7) >> 3);
for (n_rets = i = 0; i < n; ++i) {
ir_node *node = get_Block_cfgpred(endbl, i);
- if (get_irn_op(node) == op_Return) {
+ if (is_Return(node)) {
++n_rets;
set_bit(i);
if (n_rets <= 1)
return;
- in = alloca(sizeof(*in) * IMAX(n_rets, n_ret_vals));
- retvals = alloca(sizeof(*in) * n_rets * n_ret_vals);
+ in = alloca(sizeof(*in) * IMAX(n_rets, n_ret_vals));
+ retvals = alloca(sizeof(*retvals) * n_rets * n_ret_vals);
+ endbl_in = alloca(sizeof(*endbl_in) * n);
+ last_idx = 0;
for (j = i = 0; i < n; ++i) {
+ ir_node *ret = get_Block_cfgpred(endbl, i);
+
if (get_bit(i)) {
- ir_node *ret = get_Block_cfgpred(endbl, i);
ir_node *block = get_nodes_block(ret);
/* create a new Jmp for every Ret and place the in in */
for (k = 0; k < n_ret_vals; ++k)
retvals[j + k*n_rets] = get_irn_n(ret, k);
- set_Block_cfgpred(endbl, i, new_r_Bad(irg));
- last_idx = i;
-
++j;
}
+ else
+ endbl_in[last_idx++] = ret;
}
/* ok, create a new block with all created in's */
/* now create the Phi nodes */
for (j = i = 0; i < n_ret_vals; ++i, j += n_rets) {
+ int k;
+ ir_node *first;
/* the return values are already shuffled */
- in[i] = new_r_Phi(irg, block, n_rets, &retvals[j], get_irn_mode(retvals[j]));
+
+ /* Beware: normally the Phi constructor automatically replaces a Phi(a,...a) into a
+ but NOT, if a is Unknown. Here, we known that this case can be optimize also,
+ so do it here */
+ first = retvals[j + 0];
+ for (k = 1; k < n_rets; ++k) {
+ if (retvals[j + k] != first) {
+ first = NULL;
+ break;
+ }
+ }
+ if (first)
+ in[i] = first;
+ else
+ in[i] = new_r_Phi(irg, block, n_rets, &retvals[j], get_irn_mode(retvals[j]));
}
- new_ret = new_r_Return(irg, block, in[0], n_ret_vals-1, &in[1]);
+ endbl_in[last_idx++] = new_r_Return(irg, block, in[0], n_ret_vals-1, &in[1]);
- set_Block_cfgpred(endbl, last_idx, new_ret);
+ set_irn_in(endbl, last_idx, endbl_in);
/* invalidate analysis information:
* a new Block was added, so dominator, outs and loop are inconsistent,
* trouts and callee-state should be still valid
*/
- set_irg_dom_inconsistent(irg);
+ set_irg_doms_inconsistent(irg);
set_irg_outs_inconsistent(irg);
- set_irg_loopinfo_state(current_ir_graph, loopinfo_cf_inconsistent);
+ set_irg_extblk_inconsistent(irg);
+ set_irg_loopinfo_state(irg, loopinfo_cf_inconsistent);
}
/**
for (n_finals = n_rets = i = 0; i < n; ++i) {
ir_node *ret = get_Block_cfgpred(endbl, i);
- if (get_irn_op(ret) == op_Return && can_move_ret(ret)) {
+ if (is_Return(ret) && can_move_ret(ret)) {
/*
* Ok, all conditions met, we can move this Return, put it
* on our work list.
* Blocks become dead and new Returns were deleted, so dominator, outs and loop are inconsistent,
* trouts and callee-state should be still valid
*/
- set_irg_dom_inconsistent(irg);
+ set_irg_doms_inconsistent(irg);
set_irg_outs_inconsistent(irg);
set_irg_loopinfo_state(current_ir_graph, loopinfo_cf_inconsistent);
}