* @author Michael Beck
* @version $Id$
*/
-#ifdef HAVE_CONFIG_H
#include "config.h"
-#endif
#include "iroptimize.h"
#include "irgraph_t.h"
#include "ircons_t.h"
#include "irnode_t.h"
#include "irgmod.h"
-#include "xmalloc.h"
+#include "irpass.h"
#define set_bit(n) (returns[(n) >> 3] |= 1 << ((n) & 7))
#define get_bit(n) (returns[(n) >> 3] & (1 << ((n) & 7)))
* res = c;
* return res;
*/
-void normalize_one_return(ir_graph *irg) {
+void normalize_one_return(ir_graph *irg)
+{
ir_node *endbl = get_irg_end_block(irg);
int i, j, k, n, last_idx, n_rets, n_ret_vals = -1;
unsigned char *returns;
ir_node **in, **retvals, **endbl_in;
-
ir_node *block;
+ int filter_dbgi = 0;
+ dbg_info *combined_dbgi = NULL;
/* look, if we have more than one return */
n = get_Block_n_cfgpreds(endbl);
return;
}
- returns = alloca((n + 7) >> 3);
- memset(returns, 0, (n + 7) >> 3);
+ returns = ALLOCANZ(unsigned char, (n + 7) >> 3);
for (n_rets = i = 0; i < n; ++i) {
ir_node *node = get_Block_cfgpred(endbl, i);
if (is_Return(node)) {
+ dbg_info *dbgi = get_irn_dbg_info(node);
+
+ if (dbgi != NULL && dbgi != combined_dbgi) {
+ if (filter_dbgi) {
+ combined_dbgi = NULL;
+ } else {
+ combined_dbgi = dbgi;
+ filter_dbgi = 1;
+ }
+ }
+
++n_rets;
set_bit(i);
if (n_rets <= 1)
return;
- in = alloca(sizeof(*in) * IMAX(n_rets, n_ret_vals));
- retvals = alloca(sizeof(*retvals) * n_rets * n_ret_vals);
- endbl_in = alloca(sizeof(*endbl_in) * n);
+ in = ALLOCAN(ir_node*, IMAX(n_rets, n_ret_vals));
+ retvals = ALLOCAN(ir_node*, n_rets * n_ret_vals);
+ endbl_in = ALLOCAN(ir_node*, n);
last_idx = 0;
for (j = i = 0; i < n; ++i) {
ir_node *block = get_nodes_block(ret);
/* create a new Jmp for every Ret and place the in in */
- in[j] = new_r_Jmp(irg, block);
+ in[j] = new_r_Jmp(block);
/* save the return values and shuffle them */
for (k = 0; k < n_ret_vals; ++k)
if (first)
in[i] = first;
else
- in[i] = new_r_Phi(irg, block, n_rets, &retvals[j], get_irn_mode(retvals[j]));
+ in[i] = new_r_Phi(block, n_rets, &retvals[j], get_irn_mode(retvals[j]));
}
- endbl_in[last_idx++] = new_r_Return(irg, block, in[0], n_ret_vals-1, &in[1]);
+ endbl_in[last_idx++] = new_rd_Return(combined_dbgi, block, in[0], n_ret_vals-1, &in[1]);
set_irn_in(endbl, last_idx, endbl_in);
set_irg_loopinfo_inconsistent(irg);
}
+/* Create a graph pass. */
+ir_graph_pass_t *normalize_one_return_pass(const char *name)
+{
+ return def_graph_pass(name ? name : "one_ret", normalize_one_return);
+}
+
/**
* Check, whether a Return can be moved on block upwards.
*
* with the Return, otherwise they are dead (because the Return leaves
* the graph, so no more users of the other nodes can exists.
*
- * We can move a Return, if it's predecessors are Phi nodes or
+ * We can move a Return, if its predecessors are Phi nodes or
* comes from another block. In the later case, it is always possible
* to move the Return one block up, because the predecessor block must
* dominate the Return block (SSA) and then it dominates the predecessor
* block of the Return block as well.
*
* All predecessors of the Return block must be Jmp's of course, or we
- * cannot move it up, so we check this either.
+ * cannot move it up, so we add blocks if needed.
*/
-static int can_move_ret(ir_node *ret) {
+static int can_move_ret(ir_node *ret)
+{
ir_node *retbl = get_nodes_block(ret);
int i, n = get_irn_arity(ret);
/* check, that predecessors are Jmps */
n = get_Block_n_cfgpreds(retbl);
- for (i = 0; i < n; ++i)
- if (! is_Jmp(get_Block_cfgpred(retbl, i)))
- return 0;
-
- /* if we have 0 control flow predecessors, we cannot move :-) */
- return n > 0;
+ /* we cannot move above a labeled block, as this might kill the block */
+ if (n <= 1 || has_Block_entity(retbl))
+ return 0;
+ for (i = 0; i < n; ++i) {
+ ir_node *pred = get_Block_cfgpred(retbl, i);
+
+ pred = skip_Tuple(pred);
+ if (! is_Jmp(pred) && !is_Bad(pred)) {
+ /* simply place a new block here */
+ ir_graph *irg = get_irn_irg(retbl);
+ ir_node *block = new_r_Block(irg, 1, &pred);
+ ir_node *jmp = new_r_Jmp(block);
+ set_Block_cfgpred(retbl, i, jmp);
+ }
+ }
+ return 1;
}
/*
* else
* return c;
*/
-void normalize_n_returns(ir_graph *irg) {
+void normalize_n_returns(ir_graph *irg)
+{
int i, j, n, n_rets, n_finals, n_ret_vals;
ir_node *list = NULL;
ir_node *final = NULL;
*/
end = get_irg_end(irg);
n_ret_vals = get_irn_arity(list);
- in = alloca(sizeof(*in) * n_ret_vals);
+ in = ALLOCAN(ir_node*, n_ret_vals);
while (list) {
- ir_node *ret = list;
- ir_node *block = get_nodes_block(ret);
- ir_node *phiM;
+ ir_node *ret = list;
+ ir_node *block = get_nodes_block(ret);
+ dbg_info *dbgi = get_irn_dbg_info(ret);
+ ir_node *phiM;
- list = get_irn_link(ret);
+ list = (ir_node*)get_irn_link(ret);
--n_rets;
n = get_Block_n_cfgpreds(block);
ir_node *jmp = get_Block_cfgpred(block, i);
ir_node *new_bl, *new_ret;
- if (! is_Jmp(jmp))
+ if (is_Bad(jmp))
continue;
+ assert(is_Jmp(jmp));
new_bl = get_nodes_block(jmp);
in[j] = (is_Phi(pred) && get_nodes_block(pred) == block) ? get_Phi_pred(pred, i) : pred;
}
- new_ret = new_r_Return(irg, new_bl, in[0], n_ret_vals - 1, &in[1]);
+ new_ret = new_rd_Return(dbgi, new_bl, in[0], n_ret_vals - 1, &in[1]);
if (! is_Bad(new_ret)) {
/*
* Last step: Create a new endblock, with all nodes on the final
* list as predecessors.
*/
- in = alloca(sizeof(*in) * n_finals);
+ in = ALLOCAN(ir_node*, n_finals);
- for (i = 0; final; ++i, final = get_irn_link(final))
+ for (i = 0; final != NULL; ++i, final = (ir_node*)get_irn_link(final))
in[i] = final;
exchange(endbl, new_r_Block(irg, n_finals, in));
set_irg_doms_inconsistent(irg);
set_irg_extblk_inconsistent(irg); /* may not be needed */
set_irg_outs_inconsistent(irg);
- set_irg_loopinfo_inconsistent(current_ir_graph);
+ set_irg_loopinfo_inconsistent(irg);
+}
+
+/* Create a graph pass. */
+ir_graph_pass_t *normalize_n_returns_pass(const char *name)
+{
+ return def_graph_pass(name ? name : "n_rets", normalize_n_returns);
}