* @brief Path-Sensitive Jump Threading
* @date 10. Sep. 2006
* @author Christoph Mallon, Matthias Braun
- * @version $Id$
*/
#include "config.h"
#include "iroptimize.h"
#include <assert.h>
+#include <stdbool.h>
#include "array_t.h"
#include "debug.h"
#include "ircons.h"
#undef AVOID_PHIB
-DEBUG_ONLY(static firm_dbg_module_t *dbg);
+DEBUG_ONLY(static firm_dbg_module_t *dbg;)
/**
* Add the new predecessor x to node node, which is either a Block or a Phi
int n;
int i;
- assert(is_Block(node) || is_Phi(node));
+ assert(is_Block(node));
n = get_irn_arity(node);
NEW_ARR_A(ir_node*, ins, n + 1);
ir_node **in;
ir_node *dummy;
- /* This is needed because we create bads sometimes */
- if (is_Bad(block) || is_Block_dead(block)) {
+ /* In case of a bad input to a block we need to return the bad value */
+ if (is_Bad(block)) {
ir_graph *irg = get_irn_irg(block);
- return new_r_Bad(irg);
+ return new_r_Bad(irg, mode);
}
/* the other defs can't be marked for cases where a user of the original
{
ir_graph *irg;
ir_mode *mode;
- const ir_edge_t *edge;
- const ir_edge_t *next;
/* no need to do anything */
if (orig_val == second_val)
ssa_second_def = second_val;
/* Only fix the users of the first, i.e. the original node */
- foreach_out_edge_safe(orig_val, edge, next) {
+ foreach_out_edge_safe(orig_val, edge) {
ir_node *user = get_edge_src_irn(edge);
int j = get_edge_src_pos(edge);
ir_node *user_block = get_nodes_block(user);
}
}
+/**
+ * jumpthreading produces critical edges, e.g. B-C:
+ * A A
+ * \ / \ |
+ * B => B |
+ * / \ / \|
+ * C C
+ *
+ * By splitting this critical edge more threadings might be possible.
+ */
static void split_critical_edge(ir_node *block, int pos)
{
ir_graph *irg = get_irn_irg(block);
static void copy_and_fix(const jumpthreading_env_t *env, ir_node *block,
ir_node *copy_block, int j)
{
- const ir_edge_t *edge;
-
/* Look at all nodes in the cond_block and copy them into pred */
foreach_out_edge(block, edge) {
ir_node *node = get_edge_src_irn(edge);
ir_node *copy;
ir_mode *mode;
+ if (is_End(node)) {
+ /* edge is a Keep edge. If the end block is unreachable via normal
+ * control flow, we must maintain end's reachability with Keeps.
+ */
+ keep_alive(copy_block);
+ continue;
+ }
/* ignore control flow */
mode = get_irn_mode(node);
- if (mode == mode_X || is_Cond(node))
+ if (mode == mode_X || is_Cond(node) || is_Switch(node))
continue;
#ifdef AVOID_PHIB
/* we may not copy mode_b nodes, because this could produce Phi with
* mode_bs which can't be handled in all backends. Instead we duplicate
* the node and move it to its users */
if (mode == mode_b) {
- const ir_edge_t *edge, *next;
ir_node *pred;
int pn;
pred = get_Proj_pred(node);
pn = get_Proj_proj(node);
- foreach_out_edge_safe(node, edge, next) {
+ foreach_out_edge_safe(node, edge) {
ir_node *cmp_copy;
ir_node *user = get_edge_src_irn(edge);
int pos = get_edge_src_pos(edge);
ir_mode *mode;
mode = get_irn_mode(node);
- if (mode == mode_X || is_Cond(node))
+ if (mode == mode_X || is_Cond(node) || is_Switch(node))
continue;
#ifdef AVOID_PHIB
if (mode == mode_b)
copy_node = (ir_node*)get_irn_link(node);
construct_ssa(block, node, copy_block, copy_node);
}
+
+ /* make sure new nodes are kept alive if old nodes were */
+ ir_graph *irg = get_irn_irg(block);
+ ir_node *end = get_irg_end(irg);
+ for (int i = 0, arity = get_End_n_keepalives(end); i < arity; ++i) {
+ ir_node *keep = get_End_keepalive(end, i);
+ if (get_irn_visited(keep) < env->visited_nr || is_Block(keep))
+ continue;
+ ir_node *copy = get_irn_link(keep);
+ add_End_keepalive(end, copy);
+ }
}
/**
DB((
dbg, LEVEL_1,
"> Found jump threading candidate %+F->%+F\n",
- env->true_block, block
+ block, env->true_block
));
/* adjust true_block to point directly towards our jump */
DB((
dbg, LEVEL_1,
"> Found jump threading candidate %+F->%+F\n",
- env->true_block, block
+ block, env->true_block
));
/* adjust true_block to point directly towards our jump */
static void thread_jumps(ir_node* block, void* data)
{
jumpthreading_env_t env;
- int *changed = (int*)data;
+ bool *changed = (bool*)data;
ir_node *selector;
ir_node *projx;
ir_node *cond;
ir_node *copy_block;
int selector_evaluated;
- const ir_edge_t *edge, *next;
ir_graph *irg;
- ir_node *bad;
+ ir_node *badX;
int cnst_pos;
+ /* we do not deal with Phis, so restrict this to exactly one cfgpred */
if (get_Block_n_cfgpreds(block) != 1)
return;
assert(get_irn_mode(projx) == mode_X);
cond = get_Proj_pred(projx);
- if (!is_Cond(cond))
- return;
-
- selector = get_Cond_selector(cond);
/* TODO handle switch Conds */
- if (get_irn_mode(selector) != mode_b)
+ if (!is_Cond(cond))
return;
/* handle cases that can be immediately evaluated */
+ selector = get_Cond_selector(cond);
selector_evaluated = -1;
if (is_Cmp(selector)) {
ir_node *left = get_Cmp_left(selector);
if (selector_evaluated == 0) {
ir_graph *irg = get_irn_irg(block);
- bad = new_r_Bad(irg);
+ ir_node *bad = new_r_Bad(irg, mode_X);
exchange(projx, bad);
- *changed = 1;
+ *changed = true;
return;
} else if (selector_evaluated == 1) {
dbg_info *dbgi = get_irn_dbg_info(selector);
ir_node *jmp = new_rd_Jmp(dbgi, get_nodes_block(projx));
DBG_OPT_JUMPTHREADING(projx, jmp);
exchange(projx, jmp);
- *changed = 1;
+ *changed = true;
return;
}
if (copy_block == NULL)
return;
+ /* We might thread the condition block of an infinite loop,
+ * such that there is no path to End anymore. */
+ keep_alive(block);
+
/* we have to remove the edge towards the pred as the pred now
* jumps into the true_block. We also have to shorten Phis
* in our block because of this */
- bad = new_r_Bad(irg);
+ badX = new_r_Bad(irg, mode_X);
cnst_pos = env.cnst_pos;
/* shorten Phis */
- foreach_out_edge_safe(env.cnst_pred, edge, next) {
+ foreach_out_edge_safe(env.cnst_pred, edge) {
ir_node *node = get_edge_src_irn(edge);
- if (is_Phi(node))
+ if (is_Phi(node)) {
+ ir_node *bad = new_r_Bad(irg, get_irn_mode(node));
set_Phi_pred(node, cnst_pos, bad);
+ }
}
- set_Block_cfgpred(env.cnst_pred, cnst_pos, bad);
+ set_Block_cfgpred(env.cnst_pred, cnst_pos, badX);
/* the graph is changed now */
- *changed = 1;
+ *changed = true;
}
void opt_jumpthreading(ir_graph* irg)
{
- int changed, rerun;
+ bool changed;
+ bool rerun;
+
+ assure_irg_properties(irg,
+ IR_GRAPH_PROPERTY_NO_UNREACHABLE_CODE
+ | IR_GRAPH_PROPERTY_CONSISTENT_OUT_EDGES
+ | IR_GRAPH_PROPERTY_NO_CRITICAL_EDGES);
FIRM_DBG_REGISTER(dbg, "firm.opt.jumpthreading");
DB((dbg, LEVEL_1, "===> Performing jumpthreading on %+F\n", irg));
- remove_critical_cf_edges(irg);
-
- edges_assure(irg);
ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK | IR_RESOURCE_IRN_VISITED);
- changed = 0;
+ changed = false;
do {
- rerun = 0;
+ rerun = false;
irg_block_walk_graph(irg, thread_jumps, NULL, &rerun);
changed |= rerun;
} while (rerun);
ir_free_resources(irg, IR_RESOURCE_IRN_LINK | IR_RESOURCE_IRN_VISITED);
- if (changed) {
- /* control flow changed, some blocks may become dead */
- set_irg_outs_inconsistent(irg);
- set_irg_doms_inconsistent(irg);
- set_irg_extblk_inconsistent(irg);
- set_irg_loopinfo_inconsistent(irg);
- set_irg_entity_usage_state(irg, ir_entity_usage_not_computed);
-
- /* Dead code might be created. Optimize it away as it is dangerous
- * to call optimize_df() an dead code. */
- optimize_cf(irg);
- }
+ confirm_irg_properties(irg,
+ changed ? IR_GRAPH_PROPERTIES_NONE : IR_GRAPH_PROPERTIES_ALL);
}
/* Creates an ir_graph pass for opt_jumpthreading. */