* Make Mux nodes from Conds where it its possible.
* @author Sebastian Hack
* @date 4.2.2005
+ * $Id$
*/
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+#ifdef HAVE_STDLIB_H
#include <stdlib.h>
+#endif
+#ifdef HAVE_STRING_H
#include <string.h>
+#endif
+#ifdef HAVE_ALLOCA_H
#include <alloca.h>
+#endif
+#ifdef HAVE_MALLOC_H
+#include <malloc.h>
+#endif
#include "irgraph_t.h"
#include "irnode_t.h"
#include "ifconv.h"
#include "irflag_t.h"
+#include "irprintf.h"
#include "debug.h"
#include "obst.h"
#include "set.h"
#include "bitset.h"
#include "bitfiddle.h"
+#include "irhooks.h"
+#include "return.h"
-#define MAX_DEPTH 4
-
-/*
- * Mux optimization routines.
- */
-
-#if 0
-static ir_node *local_optimize_mux(ir_node *mux)
-{
- int i, n;
- ir_node *res = mux;
- ir_node *sel = get_Mux_sel(mux);
- ir_node *cmp = skip_Proj(sel);
-
- /* Optimize the children */
- for(i = 1, n = get_irn_arity(mux); i < n; ++i) {
- ir_node *operand = get_irn_n(mux, i);
- if(get_irn_op(operand) == op_Mux)
- optimize_mux(operand);
- }
-
- /* If we have no cmp above the mux, get out. */
- if(is_Proj(sel) && get_irn_mode(sel) == mode_b && get_irn_opcode(cmp) == iro_Cmp) {
-
- pn_Cmp cc = get_Proj_proj(sel);
- ir_mode *mode = get_irn_mode(mux);
- ir_node *block = get_nodes_block(n);
- ir_node *cmp_left = get_Cmp_left(cmp);
- ir_node *cmp_right = get_Cmp_right(cmp);
- ir_node *mux_true = get_Mux_true(mux);
- ir_node *mux_false = get_Mux_false(mux);
-
- /*
- * Check for comparisons with signed integers.
- */
- if(mode_is_int(mode) /* We need an integral mode */
- && mode_is_signed(mode) /* which is signed */
- && cc == Lt) { /* and have to compare for < */
-
- /*
- * Mux(x:T < 0, -1, 0) -> Shrs(x, sizeof_bits(T) - 1)
- * Conditions:
- * T must be signed.
- */
- if(classify_Const(cmp_right) == CNST_NULL
- && classify_Const(mux_true) == CNST_ALL_ONE
- && classify_Const(mux_false) == CNST_NULL) {
-
- ir_mode *u_mode = find_unsigned_mode(mode);
-
- res = new_r_Shrs(current_ir_graph, block, cmp_left,
- new_r_Const_long(current_ir_graph, block, u_mode,
- get_mode_size_bits(mode) - 1),
- mode);
- }
-
- /*
- * Mux(0 < x:T, 1, 0) -> Shr(-x, sizeof_bits(T) - 1)
- * Conditions:
- * T must be signed.
- */
- else if(classify_Const(cmp_left) == CNST_NULL
- && classify_Const(mux_true) == CNST_ONE
- && classify_Const(mux_false) == CNST_NULL) {
-
- ir_mode *u_mode = find_unsigned_mode(mode);
-
- res = new_r_Shr(current_ir_graph, block,
-
- /* -x goes to 0 - x in Firm (cmp_left is 0, see the if) */
- new_r_Sub(current_ir_graph, block, cmp_left, cmp_right, mode),
-
- /* This is sizeof_bits(T) - 1 */
- new_r_Const_long(current_ir_graph, block, u_mode,
- get_mode_size_bits(mode) - 1),
- mode);
- }
- }
- }
-
- return res;
-}
-#endif
+#define MAX_DEPTH 4
/**
* check, if a node is const and return its tarval or
* Default options.
*/
static opt_if_conv_info_t default_info = {
- 4
+ MAX_DEPTH
};
/** The debugging module. */
static firm_dbg_module_t *dbg;
/**
- * A simple check for sde effects upton an opcode of a ir node.
+ * A simple check for side effects upto an opcode of a ir node.
* @param irn The ir node to check,
* @return 1 if the opcode itself may produce side effects, 0 if not.
*/
static INLINE int has_side_effects(const ir_node *irn)
{
- opcode opc = get_irn_opcode(irn);
+ ir_op *op = get_irn_op(irn);
- if(opc == iro_Cmp)
+ if (op == op_Cmp)
return 0;
return !mode_is_datab(get_irn_mode(irn));
}
+enum failure_reason_t {
+ SUCCESS = IF_RESULT_SUCCESS,
+ TO_DEEP = IF_RESULT_TOO_DEEP,
+ SIDE_EFFECTS = IF_RESULT_SIDE_EFFECT,
+ PHI_FOUND = IF_RESULT_SIDE_EFFECT_PHI
+};
+
/**
- * Decdies, if a given expression and its subexpressions
+ * Decides, if a given expression and its subexpressions
* (to certain, also given extent) can be moved to a block.
- * @param expr The expression to examine.
- * @param block The block where the expression should go.
- * @param depth The current depth, passed recursively. Use 0 for
- * non-recursive calls.
+ *
+ * @param expr The expression to examine.
+ * @param block The block where the expression should go.
+ * @param depth The current depth, passed recursively. Use 0 for
+ * non-recursive calls.
* @param max_depth The maximum depth to which the expression should be
* examined.
+ *
+ * @return a failure reason
*/
static int _can_move_to(ir_node *expr, ir_node *dest_block, int depth, int max_depth)
{
int i, n;
- int res = 1;
+ int res = SUCCESS;
ir_node *expr_block = get_nodes_block(expr);
-
/*
* If we are forced to look too deep into the expression,
* treat it like it could not be moved.
*/
if(depth >= max_depth) {
- res = 0;
+ res = TO_DEEP;
goto end;
}
* has side effects or anything else. It is executed on each
* path the destination block is reached.
*/
- if(block_dominates(expr_block, dest_block))
+ if (block_dominates(expr_block, dest_block))
goto end;
/*
* We cannot move phis!
*/
- if(is_Phi(expr)) {
- res = 0;
+ if (is_Phi(expr)) {
+ res = PHI_FOUND;
goto end;
}
/*
- * This should be superflous and could be converted into a assertion.
+ * This should be superfluous and could be converted into a assertion.
* The destination block _must_ dominate the block of the expression,
* else the expression could be used without its definition.
*/
- if(!block_dominates(dest_block, expr_block)) {
- res = 0;
+ if (! block_dominates(dest_block, expr_block)) {
+ res = IF_RESULT_SIDE_EFFECT;
goto end;
}
/*
* Surely, if the expression does not have a data mode, it is not
- * movable. Perhaps onw should also test the floating property of
+ * movable. Perhaps one should also test the floating property of
* the opcode/node.
*/
- if(has_side_effects(expr)) {
- res = 0;
+ if (has_side_effects(expr)) {
+ res = IF_RESULT_SIDE_EFFECT;
goto end;
}
* check them out. If one of them cannot be moved, this one
* cannot be moved either.
*/
- for(i = 0, n = get_irn_arity(expr); i < n; ++i) {
+ for (i = 0, n = get_irn_arity(expr); i < n; ++i) {
ir_node *op = get_irn_n(expr, i);
int new_depth = is_Proj(op) ? depth : depth + 1;
- if(!_can_move_to(op, dest_block, new_depth, max_depth)) {
- res = 0;
+
+ res = _can_move_to(op, dest_block, new_depth, max_depth);
+
+ if (res != SUCCESS)
goto end;
- }
}
end:
- DBG((dbg, LEVEL_5, "\t\t\t%Dcan move to %n: %d\n", depth, expr, res));
+ DBG((dbg, LEVEL_3, "\t\t\t%Dcan move to %n: %d\n", depth, expr, res));
return res;
}
* deeper into an expression than a given threshold to examine if
* it can be moved, the expression is rejected and the test returns
* false.
- * @param expr The expression to check for.
+ *
+ * @param expr The expression to check for.
* @param dest_block The destination block you want @p expr to be.
- * @param max_depth The maximum depth @p expr should be investigated.
- * @return 1, if the expression can be moved to the destination block,
- * 0 if not.
+ * @param max_depth The maximum depth @p expr should be investigated.
+ *
+ * @return return a failure reason
*/
static INLINE int can_move_to(ir_node *expr, ir_node *dest_block, int max_depth)
{
return _can_move_to(expr, dest_block, 0, max_depth);
}
+/**
+ * move a DAG given by a root node expr into a new block
+ *
+ * @param expr the root of a dag
+ * @param dest_block the destination block
+ */
static void move_to(ir_node *expr, ir_node *dest_block)
{
int i, n;
* If we reached the dominator, we are done.
* We will never put code through the dominator
*/
- if(block_dominates(expr_block, dest_block))
+ if (block_dominates(expr_block, dest_block))
return;
- for(i = 0, n = get_irn_arity(expr); i < n; ++i)
+ for (i = 0, n = get_irn_arity(expr); i < n; ++i)
move_to(get_irn_n(expr, i), dest_block);
set_nodes_block(expr, dest_block);
}
-static ir_node *common_idom(ir_node *b1, ir_node *b2)
+/**
+ * return the common dominator of two blocks
+ */
+static INLINE ir_node *common_idom(ir_node *b1, ir_node *b2)
{
if(block_dominates(b1, b2))
return b1;
else {
ir_node *p;
- for(p = b1; !block_dominates(p, b2); p = get_Block_idom(p));
+ for (p = get_Block_idom(b1); !block_dominates(p, b2); p = get_Block_idom(p));
return p;
}
}
*/
typedef struct _cond_t {
ir_node *cond; /**< The cond node. */
- ir_node *mux; /**< The mux node, that will be generated for this cond. */
- struct list_head list; /**< List head which is used for queueing this cond
+ struct list_head list; /**< List head which is used for queuing this cond
into the cond bunch it belongs to. */
- unsigned in_list : 1;
+ unsigned is_new : 1;
+ unsigned totally_covers : 1;
struct _cond_t *link;
long visited_nr;
reached. It is -1, if this branch is
only reached through another cond. */
- ir_node *masked_by; /**< If this cond's branch is only reached
- through another cond, we store this
- cond ir_node here. */
+ struct _cond_t *masked_by; /**< If this cond's branch is only reached
+ through another cond, we store this
+ cond ir_node here. */
} cases[2];
} cond_t;
+/**
+ * retrieve the conditional information from a Cond node
+ */
static INLINE cond_t *get_cond(ir_node *irn, set *cond_set)
{
cond_t templ;
typedef void (cond_walker_t)(cond_t *cond, void *env);
static void _walk_conds(cond_t *cond, cond_walker_t *pre, cond_walker_t *post,
- long visited_nr, set *cond_set, void *env)
+ long visited_nr, void *env)
{
int i;
pre(cond, env);
for(i = 0; i < 2; ++i) {
- cond_t *c = get_cond(cond->cases[i].masked_by, cond_set);
+ cond_t *c = cond->cases[i].masked_by;
if(c)
- _walk_conds(c, pre, post, visited_nr, cond_set, env);
+ _walk_conds(c, pre, post, visited_nr, env);
}
if(post)
post(cond, env);
}
-static void walk_conds(cond_t *cond, cond_walker_t *pre, cond_walker_t *post,
- set *cond_set, void *env)
-{
- static long visited_nr = 0;
+static long cond_visited_nr = 0;
- _walk_conds(cond, pre, post, ++visited_nr, cond_set, env);
+static void walk_conds(cond_t *cond, cond_walker_t *pre, cond_walker_t *post, void *env)
+{
+ _walk_conds(cond, pre, post, ++cond_visited_nr, env);
}
static void link_conds(cond_t *cond, void *env)
typedef struct _cond_info_t {
struct list_head list; /**< Used to list all of these structs per class. */
- struct list_head roots; /**< A list of non-depending conds. Two conds are
- independent, if yot can not reach the one from the
- other (all conds in this list have to dominate the
- block this struct is attached to. */
+ struct list_head roots; /**< A list of non-depending Conds. Two Conds are
+ independent, if it's not possible not reach one from the
+ other (all Conds in this list have to dominate the
+ block this struct is attached to). */
- set *cond_set; /**< A set of all dominating reachable conds. */
+ ir_node *first_phi; /**< The first phi node this cond info was made for. */
+ set *cond_set; /**< A set of all dominating reachable Conds. */
} cond_info_t;
/**
* @see find_conds.
*/
-static void _find_conds(ir_node *irn, ir_node *base_block, long visited_nr,
- ir_node *dominator, ir_node *masked_by, int pos, int depth, cond_info_t *ci)
+static void _find_conds(ir_node *irn, unsigned long visited_nr,
+ ir_node *dominator, cond_t *masked_by, int pos, int depth, cond_info_t *ci)
{
ir_node *block;
+ int saw_select_cond = 0;
block = get_nodes_block(irn);
- if(block_dominates(dominator, block)) {
- ir_node *cond = NULL;
+ /*
+ * Only check this block if it is dominated by the specified
+ * dominator or it has not been visited yet.
+ */
+ if (block_dominates(dominator, block) && get_Block_block_visited(block) < visited_nr) {
+ cond_t *res = masked_by;
int i, n;
/* check, if we're on a ProjX
if(is_Proj(irn) && get_irn_mode(irn) == mode_X) {
int proj = get_Proj_proj(irn);
- cond = get_Proj_pred(irn);
+ ir_node *cond = get_Proj_pred(irn);
+
+ /* true, if the mode is a mode_b cond _NO_ switch cond */
+ int is_modeb_cond = get_irn_opcode(cond) == iro_Cond
+ && get_irn_mode(get_Cond_selector(cond)) == mode_b;
+
+ saw_select_cond = !is_modeb_cond;
/* Check, if the pred of the proj is a Cond
* with a Projb as selector.
*/
- if(get_irn_opcode(cond) == iro_Cond
- && get_irn_mode(get_Cond_selector(cond)) == mode_b) {
-
- cond_t *res, c;
+ if(is_modeb_cond) {
+ cond_t c;
memset(&c, 0, sizeof(c));
c.cond = cond;
- INIT_LIST_HEAD(&c.list);
+ c.is_new = 1;
c.cases[0].pos = -1;
c.cases[1].pos = -1;
/* get or insert the cond info into the set. */
res = set_insert(ci->cond_set, &c, sizeof(c), HASH_PTR(cond));
- if(!res->in_list) {
- res->in_list = 1;
- list_add(&res->list, &ci->roots);
- }
-
/*
- * Link it to the cond ir_node. We need that later, since
- * one cond masks the other we want to retreive the cond_t
- * data from the masking cond ir_node.
+ * If this cond is already masked by the masked_by cond
+ * return immediately, since we don't have anything to add.
*/
- set_irn_link(cond, res);
+ if(masked_by && res->cases[proj].masked_by == masked_by)
+ return;
+
+ if(res->is_new) {
+ res->is_new = 0;
+ list_add(&res->list, &ci->roots);
+ }
/*
* Set masked by (either NULL or another cond node.
* to this branch, so we choose -1.
*/
res->cases[proj].masked_by = masked_by;
+
if(!masked_by)
res->cases[proj].pos = pos;
* root list of the conf trees.
*/
else {
- cond_t *m = get_cond(masked_by, ci->cond_set);
- struct list_head *list = &m->list;
-
- /*
- * If this cond was not removed before,
- * remove it now from the list.
- */
- if(!list_empty(list))
- list_del_init(list);
+ assert(res->cases[proj].pos < 0);
+ list_del_init(&masked_by->list);
}
- DBG((dbg, LEVEL_5, "%{firm:indent}found cond %n (%s branch) "
+ DBG((dbg, LEVEL_2, "%D%n (%s branch) "
"for pos %d in block %n reached by %n\n",
- depth, cond, get_Proj_proj(irn) ? "true" : "false", pos, block, masked_by));
+ depth, cond, proj ? "true" : "false", pos,
+ block, masked_by ? masked_by->cond : NULL));
}
-
- /*
- * We set cond to NULL if the cond was an switch cond, since it is
- * passed (as the masked_by argument) to recursive calls to this
- * function. We do not consider switch conds as masking conds for
- * other conds.
- */
- else
- cond = NULL;
}
- if(get_Block_block_visited(block) < visited_nr) {
+ if(get_Block_block_visited(block) < visited_nr && !saw_select_cond) {
set_Block_block_visited(block, visited_nr);
* as given by the caller. We also increase the depth for the
* recursively called functions.
*/
- _find_conds(pred, base_block, visited_nr, dominator, cond, pos, depth + 1, ci);
+ _find_conds(pred, visited_nr, dominator, res, pos, depth + (res != masked_by), ci);
}
}
}
* A convenience function for _find_conds.
* It sets some parameters needed for recursion to appropriate start
* values. Always use this function.
- * @param irn The node to start looking for conds from. This might
- * be the phi node we are investigating.
- * @param conds The set to record the found conds in.
+ *
+ * @param irn The node to start looking for Conds from. This might
+ * be the phi node we are investigating.
+ * @param conds The set to record the found Conds in.
*/
static INLINE void find_conds(ir_node *irn, cond_info_t *ci)
{
int i, n;
- long visited_nr;
+ unsigned long visited_nr;
ir_node *block = get_nodes_block(irn);
-
- inc_irg_block_visited(current_ir_graph);
- visited_nr = get_irg_block_visited(current_ir_graph);
+ ir_node *dom = get_Block_idom(block);
for(i = 0, n = get_irn_arity(block); i < n; ++i) {
ir_node *pred = get_irn_n(block, i);
- ir_node *pred_block = get_nodes_block(pred);
- ir_node *dom = get_Block_idom(pred_block);
- /*
- * If the pred_block is the start block, its idom is NULL
- * so we treat the block itself as its immediate dominator.
- */
- if(dom == NULL)
- dom = pred_block;
+ inc_irg_block_visited(current_ir_graph);
+ visited_nr = get_irg_block_visited(current_ir_graph);
+ set_Block_block_visited(block, visited_nr);
- _find_conds(pred, pred_block, visited_nr, dom, NULL, i, 0, ci);
+ DBG((dbg, LEVEL_2, "find conds at pred %d (%n) and idom %n\n", i, pred, dom));
+ _find_conds(pred, visited_nr, dom, NULL, i, 0, ci);
}
}
-
/**
* Make the mux for a given cond.
* @param phi The phi node which shall be replaced by a mux.
* @param cond The cond information.
* @return The mux node made for this cond.
*/
-static ir_node *make_mux_on_demand(ir_node *phi, ir_node *dom, cond_t *cond, set *cond_set)
+static ir_node *make_mux_on_demand(ir_node *phi, ir_node *dom, cond_t *cond,
+ int max_depth, ir_node **mux, bitset_t *positions, int *muxes_made, long visited_nr)
{
- int i;
+ int i, can_move[2];
ir_node *projb = get_Cond_selector(cond->cond);
+ ir_node *bl = get_nodes_block(cond->cond);
ir_node *operands[2];
+ int set[2];
- operands[0] = NULL;
- operands[1] = NULL;
- cond->mux = NULL;
-
+ cond->visited_nr = visited_nr;
+ DBG((dbg, LEVEL_2, "%n\n", cond->cond));
for(i = 0; i < 2; ++i) {
+ cond_t *masked_by = cond->cases[i].masked_by;
+ int pos = cond->cases[i].pos;
+
+ operands[i] = NULL;
+ set[i] = -1;
/*
- * If this cond branch is masked by another cond, make the mux
- * for that cond first, since the mux for this cond takes
+ * If this Cond branch is masked by another cond, make the mux
+ * for that Cond first, since the Mux for this cond takes
* it as an operand.
*/
- if(cond->cases[i].masked_by) {
- cond_t templ;
- cond_t *masking_cond;
-
- templ.cond = cond->cases[i].masked_by;
- masking_cond = set_find(cond_set, &templ, sizeof(templ), HASH_PTR(templ.cond));
-
- operands[i] = make_mux_on_demand(phi, dom, masking_cond, cond_set);
+ if(masked_by) {
+ assert(pos < 0);
+ DBG((dbg, LEVEL_2, "\tmasked by: %n\n", masked_by->cond));
+ if(masked_by->visited_nr < visited_nr)
+ operands[i] = make_mux_on_demand(phi, dom, masked_by, max_depth, mux, positions, muxes_made, visited_nr);
}
/*
* If this cond branch is not masked by another cond, take
* the corresponding phi operand as an operand to the mux.
*/
- else {
- if(cond->cases[i].pos >= 0)
- operands[i] = get_irn_n(phi, cond->cases[i].pos);
+ else if(pos >= 0) {
+ operands[i] = get_irn_n(phi, pos);
+ set[i] = pos;
}
}
/*
* Move the operands to the dominator block if the cond
- * made sense. Some conds found are not suitable for making a mux
+ * made sense. Some Conds found are not suitable for making a mux
* out of them, since one of their branches cannot be reached from
* the phi block. In that case we do not make a mux and return NULL.
*/
- if(operands[0] && operands[1]) {
- move_to(operands[0], dom);
- move_to(operands[1], dom);
- move_to(projb, dom);
-
- /* Make the mux. */
- cond->mux = new_r_Mux(current_ir_graph, dom, projb,
- operands[0], operands[1], get_irn_mode(operands[0]));
+ if(operands[0] && operands[1]) {
+ if (operands[0] == operands[1]) {
+ /* there is no gain in using mux in this case, as
+ it will be optimized away. We will NOT move the
+ content of the blocks either
+ */
+ for (i = 0; i < 2; ++i)
+ if(set[i] >= 0)
+ bitset_set(positions, set[i]);
+
+ *mux = operands[0];
+ return *mux;
+ }
+
+ can_move[0] = can_move_to(operands[0], bl, max_depth);
+ can_move[1] = can_move_to(operands[1], bl, max_depth);
+
+ if (can_move[0] == SUCCESS && can_move[1] == SUCCESS) {
+ move_to(operands[0], bl);
+ move_to(operands[1], bl);
+
+ /* Make the mux. */
+ *mux = new_r_Mux(current_ir_graph, bl, projb,
+ operands[0], operands[1], get_irn_mode(operands[0]));
+
+ *muxes_made += 1;
+
+ DBG((dbg, LEVEL_2, "\t%n(%n, %n, %n)[%d, %d]\n",
+ *mux, projb, operands[0], operands[1], set[0], set[1]));
+
+ for(i = 0; i < 2; ++i)
+ if(set[i] >= 0) {
+ bitset_set(positions, set[i]);
+
+ /* we have done one */
+ hook_if_conversion(current_ir_graph, phi, set[i], *mux, IF_RESULT_SUCCESS);
+ }
+ }
+ else {
+ if(can_move[0] != SUCCESS)
+ hook_if_conversion(current_ir_graph, phi, set[0], NULL, can_move[0]);
+ if(can_move[1] != SUCCESS)
+ hook_if_conversion(current_ir_graph, phi, set[1], NULL, can_move[1]);
+ }
}
-
- return cond->mux;
+ else {
+ if(operands[0])
+ hook_if_conversion(current_ir_graph, phi, set[0], NULL, IF_RESULT_BAD_CF);
+ if(operands[1])
+ hook_if_conversion(current_ir_graph, phi, set[1], NULL, IF_RESULT_BAD_CF);
+ }
+
+ return *mux;
}
typedef struct _phi_info_t {
* @param irn A phi node.
* @param info Parameters for the if conversion algorithm.
*/
-static void check_out_phi(phi_info_t *phi_info, opt_if_conv_info_t *info)
+static int check_out_phi(phi_info_t *phi_info, opt_if_conv_info_t *info)
{
int max_depth = info->max_depth;
- int i, n;
ir_node *irn = phi_info->irn;
ir_node *block, *nw;
cond_info_t *cond_info = phi_info->cond_info;
cond_t *cond;
- int arity;
-
- set *cond_set = cond_info->cond_set;
+ int i, arity;
+ int muxes_made = 0;
bitset_t *positions;
block = get_nodes_block(irn);
arity = get_irn_arity(irn);
+ positions = bitset_alloca(arity);
assert(is_Phi(irn));
assert(get_irn_arity(irn) == get_irn_arity(block));
assert(arity > 0);
- positions = bitset_alloca(arity);
-
- DBG((dbg, LEVEL_5, "phi candidate: %n\n", irn));
+ DBG((dbg, LEVEL_2, "phi candidate: %n\n", irn));
list_for_each_entry(cond_t, cond, &cond_info->roots, list) {
- int cannot_move = 0;
- ir_node *cidom = get_nodes_block(cond->cond);
-
+ ir_node *cidom = block;
+ ir_node *mux = NULL;
cond_t *p, *head = NULL;
+ long pos;
- DBG((dbg, LEVEL_5, "\tcond root: %n\n", cond->cond));
-
- /* clear the position array. */
bitset_clear_all(positions);
+ DBG((dbg, LEVEL_2, "\tcond root: %n\n", cond->cond));
/*
* Link all conds which are in the subtree of
* the current cond in the list together.
*/
- walk_conds(cond, link_conds, NULL, cond_set, &head);
-
- for(p = head, n = 0; p; p = p->link)
- cidom = common_idom(cidom, get_nodes_block(p->cond));
-
- DBG((dbg, LEVEL_5, "\tcommon idom: %n\n", cidom));
-
- for(p = head, n = 0; p && !cannot_move; p = p->link) {
-
- if(!can_move_to(get_Cond_selector(p->cond), cidom, max_depth)) {
- DBG((dbg, LEVEL_5, "\tcannot move selector of %n\n", p->cond));
- cannot_move = 1;
- break;
- }
+ walk_conds(cond, link_conds, NULL, &head);
+ cidom = block;
+ for(p = head; p; p = p->link) {
for(i = 0; i < 2; ++i) {
int pos = p->cases[i].pos;
-
- if(pos != -1) {
- bitset_set(positions, pos);
-
- if(!can_move_to(get_irn_n(irn, pos), cidom, max_depth)) {
- cannot_move = 1;
- DBG((dbg, LEVEL_5, "\tcannot move phi operand %d\n", pos));
- break;
- }
-
- DBG((dbg, LEVEL_5, "\tcan move phi operand %d\n", pos));
- }
+ if(pos != -1)
+ cidom = common_idom(cidom, get_nodes_block(get_irn_n(block, pos)));
}
}
- /*
- * If all operands and the cond condition can be moved to
- * the common immediate dominator, move them there, make a
- * mux and associate the corresponding phi operands with
- * the mux.
- */
- if(!cannot_move) {
- ir_node *mux = make_mux_on_demand(irn, cidom, cond, cond_info->cond_set);
-
- /* If a mux could be made, associate the phi operands with it. */
- DBG((dbg, LEVEL_5, "\tassociating:\n"));
- if(mux) {
- unsigned long elm;
- bitset_foreach(positions, elm) {
- DBG((dbg, LEVEL_5, "\t\t%d\n", positions[i]));
- set_irn_n(irn, (int) elm, mux);
- }
- }
+ DBG((dbg, LEVEL_2, "\tcommon idom: %n\n", cidom));
+ make_mux_on_demand(irn, cidom, cond, max_depth, &mux, positions, &muxes_made, ++cond_visited_nr);
+
+ if(mux) {
+ bitset_foreach(positions, pos)
+ set_irn_n(irn, (int) pos, mux);
}
}
nw = optimize_in_place_2(irn);
if(nw != irn)
exchange(irn, nw);
+
+ return muxes_made;
}
typedef struct _cond_walk_info_t {
if(!ci) {
ci = obstack_alloc(cwi->obst, sizeof(*ci));
ci->cond_set = new_set(cond_cmp, log2_ceil(get_irn_arity(block)));
+ ci->first_phi = irn;
+
INIT_LIST_HEAD(&ci->roots);
INIT_LIST_HEAD(&ci->list);
*/
list_add(&cwi->cond_info_head, &ci->list);
- DBG((dbg, LEVEL_5, "searching conds at %n\n", irn));
+ DBG((dbg, LEVEL_2, "searching conds at %n\n", irn));
/*
* Fill the set with conds we find on the way from
*/
if(set_count(ci->cond_set) == 0) {
del_set(ci->cond_set);
- ci->cond_set = NULL;
+ list_del(&ci->list);
obstack_free(cwi->obst, ci);
+ ci = NULL;
}
}
else
- DBG((dbg, LEVEL_5, "conds already computed for %n\n", irn));
+ DBG((dbg, LEVEL_2, "conds already computed for %n (look at %n)\n", irn, ci->first_phi));
set_irn_link(block, ci);
}
}
-#if 0
-/**
- * Free the sets which are put at some blocks.
- */
-static void free_sets(ir_node *irn, void *data)
+static void dump_conds(cond_t *cond, void *env)
+{
+ int i;
+ FILE *f = env;
+
+ ir_fprintf(f, "node:{title:\"n%p\" label:\"%n(%d, %d)\n%n\"}\n",
+ cond, cond->cond, cond->cases[0].pos, cond->cases[1].pos,
+ get_nodes_block(cond->cond));
+
+ for(i = 0; i < 2; ++i)
+ if(cond->cases[i].masked_by)
+ ir_fprintf(f, "edge:{sourcename:\"n%p\" targetname:\"n%p\" label:\"%d\"}\n",
+ cond, cond->cases[i].masked_by, i);
+}
+
+static void vcg_dump_conds(ir_graph *irg, cond_walk_info_t *cwi)
{
- if(is_Block(irn) && get_irn_link(irn)) {
- set *conds = get_irn_link(irn);
- del_set(conds);
+ char buf[512];
+ FILE *f;
+
+ snprintf(buf, sizeof(buf), "%s-conds.vcg", get_entity_name(get_irg_entity(irg)));
+
+ if((f = fopen(buf, "wt")) != NULL) {
+ cond_info_t *ci;
+ phi_info_t *phi;
+ cond_t *cond;
+
+ ir_fprintf(f, "graph:{\ndisplay_edge_labels:yes\n");
+ list_for_each_entry(cond_info_t, ci, &cwi->cond_info_head, list) {
+ ir_fprintf(f, "node:{title:\"n%p\" label:\"cond info\"}\n", ci);
+ list_for_each_entry(cond_t, cond, &ci->roots, list) {
+ walk_conds(cond, NULL, dump_conds, f);
+ ir_fprintf(f, "edge:{sourcename:\"n%p\" targetname:\"n%p\"}\n", ci, cond);
+ }
+ }
+
+ list_for_each_entry(phi_info_t, phi, &cwi->phi_head, list) {
+ ir_fprintf(f, "node:{title:\"n%p\" label:\"%n\n%n\"}\n",
+ phi->irn, phi->irn, get_nodes_block(phi->irn));
+ ir_fprintf(f, "edge:{sourcename:\"n%p\" targetname:\"n%p\"}\n", phi->irn, phi->cond_info);
+ }
+ fprintf(f, "}\n");
}
}
-#endif
void opt_if_conv(ir_graph *irg, opt_if_conv_info_t *params)
{
+ int muxes_made = 0;
struct obstack obst;
phi_info_t *phi_info;
cond_info_t *cond_info;
/* Init the debug stuff. */
dbg = firm_dbg_register("firm.opt.ifconv");
- firm_dbg_set_mask(dbg, 0);
+#if 0
+ firm_dbg_set_mask(dbg, LEVEL_1);
+#endif
+
+ /* if-conversion works better with normalized returns */
+ normalize_one_return(irg);
/* Ensure, that the dominators are computed. */
compute_doms(irg);
- DBG((dbg, LEVEL_4, "if conversion for irg %s(%p)\n",
+ DBG((dbg, LEVEL_1, "if conversion for irg %s(%p)\n",
get_entity_name(get_irg_entity(irg)), irg));
/*
*/
irg_walk_graph(irg, annotate_cond_info_pre, annotate_cond_info_post, &cwi);
+#if 0
+ vcg_dump_conds(irg, &cwi);
+#endif
+
/* Process each suitable phi found. */
list_for_each_entry(phi_info_t, phi_info, &cwi.phi_head, list) {
- DBG((dbg, LEVEL_4, "phi node %n\n", phi_info->irn));
- check_out_phi(phi_info, p);
+ DBG((dbg, LEVEL_2, "phi node %n\n", phi_info->irn));
+ muxes_made += check_out_phi(phi_info, p);
}
list_for_each_entry(cond_info_t, cond_info, &cwi.cond_info_head, list) {
del_set(cond_info->cond_set);
}
+ DBG((dbg, LEVEL_1, "muxes made: %d\n", muxes_made));
+
obstack_free(&obst, NULL);
}