/*
- * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
+ * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
*
* This file is part of libFirm.
*
* @file
* @brief Normalize returns.
* @author Michael Beck
- * @version $Id$
*/
-#ifdef HAVE_CONFIG_H
#include "config.h"
-#endif
+
+#include <stdbool.h>
#include "iroptimize.h"
#include "irgraph_t.h"
#include "ircons_t.h"
#include "irnode_t.h"
#include "irgmod.h"
-#include "xmalloc.h"
-
-#define set_bit(n) (returns[(n) >> 3] |= 1 << ((n) & 7))
-#define get_bit(n) (returns[(n) >> 3] & (1 << ((n) & 7)))
-
-#undef IMAX
-#define IMAX(a, b) ((a) > (b) ? (a) : (b))
+#include "irpass.h"
+#include "util.h"
+#include "raw_bitset.h"
/*
* Normalize the Returns of a graph by creating a new End block
* res = c;
* return res;
*/
-void normalize_one_return(ir_graph *irg) {
- ir_node *endbl = get_irg_end_block(irg);
- int i, j, k, n, last_idx, n_rets, n_ret_vals = -1;
- unsigned char *returns;
+void normalize_one_return(ir_graph *irg)
+{
+ ir_node *endbl = get_irg_end_block(irg);
+ ir_entity *entity = get_irg_entity(irg);
+ ir_type *type = get_entity_type(entity);
+ int n_ret_vals = get_method_n_ress(type) + 1;
+ int n_rets = 0;
+ bool filter_dbgi = false;
+ dbg_info *combined_dbgi = NULL;
+ unsigned *returns;
+ int i, j, k, n, last_idx;
ir_node **in, **retvals, **endbl_in;
-
ir_node *block;
/* look, if we have more than one return */
if (n <= 0) {
/* The end block has no predecessors, we have an endless
loop. In that case, no returns exists. */
+ confirm_irg_properties(irg, IR_GRAPH_PROPERTIES_ALL);
+ add_irg_properties(irg, IR_GRAPH_PROPERTY_ONE_RETURN);
return;
}
- returns = alloca((n + 7) >> 3);
- memset(returns, 0, (n + 7) >> 3);
+ rbitset_alloca(returns, n);
- for (n_rets = i = 0; i < n; ++i) {
+ for (i = 0; i < n; ++i) {
ir_node *node = get_Block_cfgpred(endbl, i);
if (is_Return(node)) {
- ++n_rets;
-
- set_bit(i);
+ dbg_info *dbgi = get_irn_dbg_info(node);
+
+ if (dbgi != NULL && dbgi != combined_dbgi) {
+ if (filter_dbgi) {
+ combined_dbgi = NULL;
+ } else {
+ combined_dbgi = dbgi;
+ filter_dbgi = true;
+ }
+ }
- if (n_ret_vals < 0)
- n_ret_vals = get_irn_arity(node);
+ ++n_rets;
+ rbitset_set(returns, i);
}
}
- /* there should be at least one Return node in Firm */
- if (n_rets <= 1)
+ if (n_rets <= 1) {
+ confirm_irg_properties(irg, IR_GRAPH_PROPERTIES_ALL);
+ add_irg_properties(irg, IR_GRAPH_PROPERTY_ONE_RETURN);
return;
+ }
- in = alloca(sizeof(*in) * IMAX(n_rets, n_ret_vals));
- retvals = alloca(sizeof(*retvals) * n_rets * n_ret_vals);
- endbl_in = alloca(sizeof(*endbl_in) * n);
+ in = ALLOCAN(ir_node*, MAX(n_rets, n_ret_vals));
+ retvals = ALLOCAN(ir_node*, n_rets * n_ret_vals);
+ endbl_in = ALLOCAN(ir_node*, n);
last_idx = 0;
for (j = i = 0; i < n; ++i) {
ir_node *ret = get_Block_cfgpred(endbl, i);
- if (get_bit(i)) {
+ if (rbitset_is_set(returns, i)) {
ir_node *block = get_nodes_block(ret);
/* create a new Jmp for every Ret and place the in in */
- in[j] = new_r_Jmp(irg, block);
+ in[j] = new_r_Jmp(block);
/* save the return values and shuffle them */
for (k = 0; k < n_ret_vals; ++k)
retvals[j + k*n_rets] = get_irn_n(ret, k);
++j;
- } else
+ } else {
endbl_in[last_idx++] = ret;
+ }
}
/* ok, create a new block with all created in's */
/* now create the Phi nodes */
for (j = i = 0; i < n_ret_vals; ++i, j += n_rets) {
- int k;
- ir_node *first;
- /* the return values are already shuffled */
-
- /* Beware: normally the Phi constructor automatically replaces a Phi(a,...a) into a
- but NOT, if a is Unknown. Here, we known that this case can be optimize also,
- so do it here */
- first = retvals[j + 0];
- for (k = 1; k < n_rets; ++k) {
- if (retvals[j + k] != first) {
- first = NULL;
- break;
- }
- }
- if (first)
- in[i] = first;
- else
- in[i] = new_r_Phi(irg, block, n_rets, &retvals[j], get_irn_mode(retvals[j]));
+ ir_mode *mode = get_irn_mode(retvals[j]);
+ in[i] = new_r_Phi(block, n_rets, &retvals[j], mode);
}
- endbl_in[last_idx++] = new_r_Return(irg, block, in[0], n_ret_vals-1, &in[1]);
+ endbl_in[last_idx++] = new_rd_Return(combined_dbgi, block, in[0], n_ret_vals-1, &in[1]);
set_irn_in(endbl, last_idx, endbl_in);
/* invalidate analysis information:
* a new Block was added, so dominator, outs and loop are inconsistent,
- * trouts and callee-state should be still valid
- */
- set_irg_doms_inconsistent(irg);
- set_irg_outs_inconsistent(irg);
- set_irg_extblk_inconsistent(irg);
- set_irg_loopinfo_state(irg, loopinfo_cf_inconsistent);
+ * trouts and callee-state should be still valid */
+ confirm_irg_properties(irg,
+ IR_GRAPH_PROPERTY_NO_BADS
+ | IR_GRAPH_PROPERTY_NO_TUPLES
+ | IR_GRAPH_PROPERTY_NO_CRITICAL_EDGES
+ | IR_GRAPH_PROPERTY_NO_UNREACHABLE_CODE
+ | IR_GRAPH_PROPERTY_CONSISTENT_ENTITY_USAGE);
+ add_irg_properties(irg, IR_GRAPH_PROPERTY_ONE_RETURN);
+}
+
+/* Create a graph pass. */
+ir_graph_pass_t *normalize_one_return_pass(const char *name)
+{
+ return def_graph_pass(name ? name : "one_ret", normalize_one_return);
}
/**
- * check, whether a Ret can be moved on block upwards.
+ * Check, whether a Return can be moved on block upwards.
*
* In a block with a Return, all live nodes must be linked
* with the Return, otherwise they are dead (because the Return leaves
* the graph, so no more users of the other nodes can exists.
*
- * We can move a Return, if it's predecessors are Phi nodes or
+ * We can move a Return, if its predecessors are Phi nodes or
* comes from another block. In the later case, it is always possible
* to move the Return one block up, because the predecessor block must
* dominate the Return block (SSA) and then it dominates the predecessor
* block of the Return block as well.
*
* All predecessors of the Return block must be Jmp's of course, or we
- * cannot move it up, so we check this either.
+ * cannot move it up, so we add blocks if needed.
*/
-static int can_move_ret(ir_node *ret) {
+static bool can_move_ret(ir_node *ret)
+{
ir_node *retbl = get_nodes_block(ret);
int i, n = get_irn_arity(ret);
if (! is_Phi(pred) && retbl == get_nodes_block(pred)) {
/* first condition failed, found a non-Phi predecessor
* then is in the Return block */
- return 0;
+ return false;
}
}
/* check, that predecessors are Jmps */
n = get_Block_n_cfgpreds(retbl);
- for (i = 0; i < n; ++i)
- if (get_irn_op(get_Block_cfgpred(retbl, i)) != op_Jmp)
- return 0;
-
- /* if we have 0 control flow predecessors, we cannot move :-) */
- return n > 0;
+ /* we cannot move above a labeled block, as this might kill the block */
+ if (n <= 1 || get_Block_entity(retbl) != NULL)
+ return false;
+ for (i = 0; i < n; ++i) {
+ ir_node *pred = get_Block_cfgpred(retbl, i);
+
+ pred = skip_Tuple(pred);
+ if (! is_Jmp(pred) && !is_Bad(pred)) {
+ /* simply place a new block here */
+ ir_graph *irg = get_irn_irg(retbl);
+ ir_node *block = new_r_Block(irg, 1, &pred);
+ ir_node *jmp = new_r_Jmp(block);
+ set_Block_cfgpred(retbl, i, jmp);
+ }
+ }
+ return true;
}
/*
* else
* return c;
*/
-void normalize_n_returns(ir_graph *irg) {
- int i, j, n, n_rets, n_finals, n_ret_vals;
- ir_node *list = NULL;
- ir_node *final = NULL;
+void normalize_n_returns(ir_graph *irg)
+{
+ int i, j, n;
+ ir_node *list = NULL;
+ ir_node *final = NULL;
+ unsigned n_rets = 0;
+ unsigned n_finals = 0;
+ ir_node *endbl = get_irg_end_block(irg);
+ int n_ret_vals;
ir_node **in;
- ir_node *endbl = get_irg_end_block(irg);
- ir_node *end;
+ ir_node *end;
/*
* First, link all returns:
* on final.
*/
n = get_Block_n_cfgpreds(endbl);
- for (n_finals = n_rets = i = 0; i < n; ++i) {
+ for (i = 0; i < n; ++i) {
ir_node *ret = get_Block_cfgpred(endbl, i);
- if (is_Return(ret) && can_move_ret(ret)) {
+ if (is_Bad(ret)) {
+ continue;
+ } else if (is_Return(ret) && can_move_ret(ret)) {
/*
- * Ok, all conditions met, we can move this Return, put it
- * on our work list.
- */
+ * Ok, all conditions met, we can move this Return, put it
+ * on our work list.
+ */
set_irn_link(ret, list);
list = ret;
++n_rets;
}
}
- if (n_rets <= 0)
+ if (n_rets == 0) {
+ confirm_irg_properties(irg, IR_GRAPH_PROPERTIES_ALL);
+ add_irg_properties(irg, IR_GRAPH_PROPERTY_MANY_RETURNS);
return;
+ }
/*
* Now move the Returns upwards. We move always one block up (and create n
- * new Returns), than we check if a newly created Return can be moved even further.
- * If yes, we simply add it to our work list, else to the final list.
+ * new Returns), than we check if a newly created Return can be moved even
+ * further. If yes, we simply add it to our work list, else to the final
+ * list.
*/
end = get_irg_end(irg);
n_ret_vals = get_irn_arity(list);
- in = alloca(sizeof(*in) * n_ret_vals);
- while (list) {
- ir_node *ret = list;
- ir_node *block = get_nodes_block(ret);
- ir_node *phiM;
-
- list = get_irn_link(ret);
+ in = ALLOCAN(ir_node*, n_ret_vals);
+ while (list != NULL) {
+ ir_node *ret = list;
+ ir_node *block = get_nodes_block(ret);
+ dbg_info *dbgi = get_irn_dbg_info(ret);
+ ir_node *phiM;
+
+ list = (ir_node*)get_irn_link(ret);
--n_rets;
n = get_Block_n_cfgpreds(block);
ir_node *jmp = get_Block_cfgpred(block, i);
ir_node *new_bl, *new_ret;
- if (get_irn_op(jmp) != op_Jmp)
+ if (is_Bad(jmp))
continue;
+ assert(is_Jmp(jmp));
new_bl = get_nodes_block(jmp);
- /* create the in-array for the new Ret */
+ /* create the in-array for the new Return */
for (j = 0; j < n_ret_vals; ++j) {
ir_node *pred = get_irn_n(ret, j);
in[j] = (is_Phi(pred) && get_nodes_block(pred) == block) ? get_Phi_pred(pred, i) : pred;
}
- new_ret = new_r_Return(irg, new_bl, in[0], n_ret_vals - 1, &in[1]);
+ new_ret = new_rd_Return(dbgi, new_bl, in[0], n_ret_vals-1, &in[1]);
if (! is_Bad(new_ret)) {
/*
set_irn_link(new_ret, list);
list = new_ret;
++n_rets;
- }
- else {
+ } else {
set_irn_link(new_ret, final);
final = new_ret;
++n_finals;
}
/* remove the Jmp, we have placed a Return here */
- exchange(jmp, new_r_Bad(irg));
+ exchange(jmp, new_r_Bad(irg, mode_X));
}
/*
n = get_End_n_keepalives(end);
for (i = 0; i < n; ++i) {
if (get_End_keepalive(end, i) == phiM) {
- set_End_keepalive(end, i, new_r_Bad(irg));
+ set_End_keepalive(end, i, new_r_Bad(irg, mode_M));
break;
}
}
}
/*
- * Last step: Create a new endblock, with all nodes on the final
- * list as predecessors.
+ * Last step: Create a new endblock, with all nodes on the final list as
+ * predecessors.
*/
- in = alloca(sizeof(*in) * n_finals);
+ in = ALLOCAN(ir_node*, n_finals);
- for (i = 0; final; ++i, final = get_irn_link(final))
+ for (i = 0; final != NULL; ++i, final = (ir_node*)get_irn_link(final)) {
in[i] = final;
+ }
exchange(endbl, new_r_Block(irg, n_finals, in));
- /* the end block is not automatically skipped, so do it here */
- set_irg_end_block(irg, skip_Id(get_irg_end_block(irg)));
-
/* Invalidate analysis information:
- * Blocks become dead and new Returns were deleted, so dominator, outs and loop are inconsistent,
- * trouts and callee-state should be still valid
- */
- set_irg_doms_inconsistent(irg);
- set_irg_extblk_inconsistent(irg); /* may not be needed */
- set_irg_outs_inconsistent(irg);
- set_irg_loopinfo_state(current_ir_graph, loopinfo_cf_inconsistent);
+ * Blocks become dead and new Returns were deleted, so dominator, outs and
+ * loop are inconsistent, trouts and callee-state should be still valid */
+ confirm_irg_properties(irg,
+ IR_GRAPH_PROPERTY_NO_TUPLES
+ | IR_GRAPH_PROPERTY_NO_CRITICAL_EDGES
+ | IR_GRAPH_PROPERTY_CONSISTENT_ENTITY_USAGE);
+ add_irg_properties(irg, IR_GRAPH_PROPERTY_MANY_RETURNS);
+}
+
+/* Create a graph pass. */
+ir_graph_pass_t *normalize_n_returns_pass(const char *name)
+{
+ return def_graph_pass(name ? name : "n_rets", normalize_n_returns);
}