X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fopt%2Freturn.c;h=7fac730cf48baad976a1bc29430cea77f5a15a48;hb=eb7ac83b870791219e7a50cc262f2f1ee9c01b39;hp=9cffb1a1866de1b1b96e846bd552d492ecc2489e;hpb=fa9649a9766ace19d23acf80c0ef6791390b0dea;p=libfirm diff --git a/ir/opt/return.c b/ir/opt/return.c index 9cffb1a18..7fac730cf 100644 --- a/ir/opt/return.c +++ b/ir/opt/return.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved. + * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved. * * This file is part of libFirm. * @@ -23,15 +23,14 @@ * @author Michael Beck * @version $Id$ */ -#ifdef HAVE_CONFIG_H #include "config.h" -#endif +#include "iroptimize.h" #include "irgraph_t.h" #include "ircons_t.h" #include "irnode_t.h" #include "irgmod.h" -#include "xmalloc.h" +#include "irpass.h" #define set_bit(n) (returns[(n) >> 3] |= 1 << ((n) & 7)) #define get_bit(n) (returns[(n) >> 3] & (1 << ((n) & 7))) @@ -59,13 +58,15 @@ * res = c; * return res; */ -void normalize_one_return(ir_graph *irg) { +void normalize_one_return(ir_graph *irg) +{ ir_node *endbl = get_irg_end_block(irg); int i, j, k, n, last_idx, n_rets, n_ret_vals = -1; unsigned char *returns; ir_node **in, **retvals, **endbl_in; - ir_node *block; + int filter_dbgi = 0; + dbg_info *combined_dbgi = NULL; /* look, if we have more than one return */ n = get_Block_n_cfgpreds(endbl); @@ -75,13 +76,23 @@ void normalize_one_return(ir_graph *irg) { return; } - returns = alloca((n + 7) >> 3); - memset(returns, 0, (n + 7) >> 3); + returns = ALLOCANZ(unsigned char, (n + 7) >> 3); for (n_rets = i = 0; i < n; ++i) { ir_node *node = get_Block_cfgpred(endbl, i); if (is_Return(node)) { + dbg_info *dbgi = get_irn_dbg_info(node); + + if (dbgi != NULL && dbgi != combined_dbgi) { + if (filter_dbgi) { + combined_dbgi = NULL; + } else { + combined_dbgi = dbgi; + filter_dbgi = 1; + } + } + ++n_rets; set_bit(i); @@ -95,9 +106,9 @@ void normalize_one_return(ir_graph *irg) { if (n_rets <= 1) return; - in = alloca(sizeof(*in) * IMAX(n_rets, n_ret_vals)); - retvals = alloca(sizeof(*retvals) * n_rets * n_ret_vals); - endbl_in = alloca(sizeof(*endbl_in) * n); + in = ALLOCAN(ir_node*, IMAX(n_rets, n_ret_vals)); + retvals = ALLOCAN(ir_node*, n_rets * n_ret_vals); + endbl_in = ALLOCAN(ir_node*, n); last_idx = 0; for (j = i = 0; i < n; ++i) { @@ -107,7 +118,7 @@ void normalize_one_return(ir_graph *irg) { ir_node *block = get_nodes_block(ret); /* create a new Jmp for every Ret and place the in in */ - in[j] = new_r_Jmp(irg, block); + in[j] = new_r_Jmp(block); /* save the return values and shuffle them */ for (k = 0; k < n_ret_vals; ++k) @@ -140,10 +151,10 @@ void normalize_one_return(ir_graph *irg) { if (first) in[i] = first; else - in[i] = new_r_Phi(irg, block, n_rets, &retvals[j], get_irn_mode(retvals[j])); + in[i] = new_r_Phi(block, n_rets, &retvals[j], get_irn_mode(retvals[j])); } - endbl_in[last_idx++] = new_r_Return(irg, block, in[0], n_ret_vals-1, &in[1]); + endbl_in[last_idx++] = new_rd_Return(combined_dbgi, block, in[0], n_ret_vals-1, &in[1]); set_irn_in(endbl, last_idx, endbl_in); @@ -152,28 +163,33 @@ void normalize_one_return(ir_graph *irg) { * trouts and callee-state should be still valid */ set_irg_doms_inconsistent(irg); - set_irg_outs_inconsistent(irg); set_irg_extblk_inconsistent(irg); - set_irg_loopinfo_state(irg, loopinfo_cf_inconsistent); +} + +/* Create a graph pass. */ +ir_graph_pass_t *normalize_one_return_pass(const char *name) +{ + return def_graph_pass(name ? name : "one_ret", normalize_one_return); } /** - * check, whether a Ret can be moved on block upwards. + * Check, whether a Return can be moved on block upwards. * * In a block with a Return, all live nodes must be linked * with the Return, otherwise they are dead (because the Return leaves * the graph, so no more users of the other nodes can exists. * - * We can move a Return, if it's predecessors are Phi nodes or + * We can move a Return, if its predecessors are Phi nodes or * comes from another block. In the later case, it is always possible * to move the Return one block up, because the predecessor block must * dominate the Return block (SSA) and then it dominates the predecessor * block of the Return block as well. * * All predecessors of the Return block must be Jmp's of course, or we - * cannot move it up, so we check this either. + * cannot move it up, so we add blocks if needed. */ -static int can_move_ret(ir_node *ret) { +static int can_move_ret(ir_node *ret) +{ ir_node *retbl = get_nodes_block(ret); int i, n = get_irn_arity(ret); @@ -189,12 +205,22 @@ static int can_move_ret(ir_node *ret) { /* check, that predecessors are Jmps */ n = get_Block_n_cfgpreds(retbl); - for (i = 0; i < n; ++i) - if (get_irn_op(get_Block_cfgpred(retbl, i)) != op_Jmp) - return 0; - - /* if we have 0 control flow predecessors, we cannot move :-) */ - return n > 0; + /* we cannot move above a labeled block, as this might kill the block */ + if (n <= 1 || has_Block_entity(retbl)) + return 0; + for (i = 0; i < n; ++i) { + ir_node *pred = get_Block_cfgpred(retbl, i); + + pred = skip_Tuple(pred); + if (! is_Jmp(pred) && !is_Bad(pred)) { + /* simply place a new block here */ + ir_graph *irg = get_irn_irg(retbl); + ir_node *block = new_r_Block(irg, 1, &pred); + ir_node *jmp = new_r_Jmp(block); + set_Block_cfgpred(retbl, i, jmp); + } + } + return 1; } /* @@ -217,7 +243,8 @@ static int can_move_ret(ir_node *ret) { * else * return c; */ -void normalize_n_returns(ir_graph *irg) { +void normalize_n_returns(ir_graph *irg) +{ int i, j, n, n_rets, n_finals, n_ret_vals; ir_node *list = NULL; ir_node *final = NULL; @@ -237,9 +264,9 @@ void normalize_n_returns(ir_graph *irg) { if (is_Return(ret) && can_move_ret(ret)) { /* - * Ok, all conditions met, we can move this Return, put it - * on our work list. - */ + * Ok, all conditions met, we can move this Return, put it + * on our work list. + */ set_irn_link(ret, list); list = ret; ++n_rets; @@ -261,13 +288,14 @@ void normalize_n_returns(ir_graph *irg) { */ end = get_irg_end(irg); n_ret_vals = get_irn_arity(list); - in = alloca(sizeof(*in) * n_ret_vals); + in = ALLOCAN(ir_node*, n_ret_vals); while (list) { - ir_node *ret = list; - ir_node *block = get_nodes_block(ret); - ir_node *phiM; + ir_node *ret = list; + ir_node *block = get_nodes_block(ret); + dbg_info *dbgi = get_irn_dbg_info(ret); + ir_node *phiM; - list = get_irn_link(ret); + list = (ir_node*)get_irn_link(ret); --n_rets; n = get_Block_n_cfgpreds(block); @@ -275,19 +303,20 @@ void normalize_n_returns(ir_graph *irg) { ir_node *jmp = get_Block_cfgpred(block, i); ir_node *new_bl, *new_ret; - if (get_irn_op(jmp) != op_Jmp) + if (is_Bad(jmp)) continue; + assert(is_Jmp(jmp)); new_bl = get_nodes_block(jmp); - /* create the in-array for the new Ret */ + /* create the in-array for the new Return */ for (j = 0; j < n_ret_vals; ++j) { ir_node *pred = get_irn_n(ret, j); in[j] = (is_Phi(pred) && get_nodes_block(pred) == block) ? get_Phi_pred(pred, i) : pred; } - new_ret = new_r_Return(irg, new_bl, in[0], n_ret_vals - 1, &in[1]); + new_ret = new_rd_Return(dbgi, new_bl, in[0], n_ret_vals - 1, &in[1]); if (! is_Bad(new_ret)) { /* @@ -301,8 +330,7 @@ void normalize_n_returns(ir_graph *irg) { set_irn_link(new_ret, list); list = new_ret; ++n_rets; - } - else { + } else { set_irn_link(new_ret, final); final = new_ret; ++n_finals; @@ -310,7 +338,7 @@ void normalize_n_returns(ir_graph *irg) { } /* remove the Jmp, we have placed a Return here */ - exchange(jmp, new_r_Bad(irg)); + exchange(jmp, new_r_Bad(irg, mode_X)); } /* @@ -323,7 +351,7 @@ void normalize_n_returns(ir_graph *irg) { n = get_End_n_keepalives(end); for (i = 0; i < n; ++i) { if (get_End_keepalive(end, i) == phiM) { - set_End_keepalive(end, i, new_r_Bad(irg)); + set_End_keepalive(end, i, new_r_Bad(irg, mode_M)); break; } } @@ -334,9 +362,9 @@ void normalize_n_returns(ir_graph *irg) { * Last step: Create a new endblock, with all nodes on the final * list as predecessors. */ - in = alloca(sizeof(*in) * n_finals); + in = ALLOCAN(ir_node*, n_finals); - for (i = 0; final; ++i, final = get_irn_link(final)) + for (i = 0; final != NULL; ++i, final = (ir_node*)get_irn_link(final)) in[i] = final; exchange(endbl, new_r_Block(irg, n_finals, in)); @@ -350,6 +378,10 @@ void normalize_n_returns(ir_graph *irg) { */ set_irg_doms_inconsistent(irg); set_irg_extblk_inconsistent(irg); /* may not be needed */ - set_irg_outs_inconsistent(irg); - set_irg_loopinfo_state(current_ir_graph, loopinfo_cf_inconsistent); +} + +/* Create a graph pass. */ +ir_graph_pass_t *normalize_n_returns_pass(const char *name) +{ + return def_graph_pass(name ? name : "n_rets", normalize_n_returns); }