* SPECIAL OPERATIONS
* ------------------
*
- * ir_node *new_Bad (void)
- * -----------------------
- *
- * Returns the unique Bad node current_ir_graph->bad.
- * This node is used to express results of dead code elimination.
- *
* ir_node *new_NoMem (void)
* -----------------------------------------------------------------------------------
*
*
* - proj_args The proj nodes of the args node.
*
- * - bad The Bad node is an auxiliary node. It is needed only once,
- * so there is this globally reachable node.
- *
* - no_mem The NoMem node is an auxiliary node. It is needed only once,
* so there is this globally reachable node.
*
/** Sets the node that represents the argument pointer of the given IR graph. */
FIRM_API void set_irg_args(ir_graph *irg, ir_node *node);
-/** Returns the Bad node of the given IR graph. Use new_Bad() instead!! */
-FIRM_API ir_node *get_irg_bad(const ir_graph *irg);
-FIRM_API void set_irg_bad(ir_graph *irg, ir_node *node);
-
-/** Returns the NoMem node of the given IR graph. Use new_NoMem() instead!! */
+/** Returns the NoMem node of the given IR graph. */
FIRM_API ir_node *get_irg_no_mem(const ir_graph *irg);
FIRM_API void set_irg_no_mem(ir_graph *irg, ir_node *node);
if (get_Block_dom_depth(bl) == -1) {
/* This block is not reachable from Start */
ir_graph *irg = get_irn_irg(bl);
- return new_r_Bad(irg);
+ return new_r_Bad(irg, mode_BB);
}
return get_dom_info(bl)->idom;
}
if (get_Block_postdom_depth(bl) == -1) {
/* This block is not reachable from Start */
ir_graph *irg = get_irn_irg(bl);
- return new_r_Bad(irg);
+ return new_r_Bad(irg, mode_BB);
}
return get_pdom_info(bl)->idom;
}
#endif /* defined DEBUG_libfirm */
for (i = 1; i <= bl->out[0].pos; ++i) {
ir_node *succ = bl->out[i].use;
- if (get_irn_mode(succ) == mode_X && !is_End(succ))
+ if (get_irn_mode(succ) == mode_X && !is_End(succ) && !is_Bad(succ))
n_cfg_outs += succ->out[0].pos;
}
return n_cfg_outs;
for (i = 1; i <= bl->out[0].pos; ++i) {
ir_node *succ = bl->out[i].use;
if (get_irn_mode(succ) == mode_X) {
-
+ if (is_Bad(succ))
+ continue;
if (is_End(succ)) {
/* ignore End if we are in the Endblock */
if (get_nodes_block(succ) == bl)
#endif /* defined DEBUG_libfirm */
for (i = 1; i <= bl->out[0].pos; ++i) {
ir_node *succ = bl->out[i].use;
- if (get_irn_mode(succ) == mode_X && !is_End(succ)) {
+ if (get_irn_mode(succ) == mode_X && !is_End(succ) && !is_Bad(succ)) {
int n_outs = succ->out[0].pos;
if (pos < n_outs)
return succ->out[pos + 1].use;
for (i = 1; i <= bl->out[0].pos; ++i) {
ir_node *succ = bl->out[i].use;
if (get_irn_mode(succ) == mode_X) {
+ if (is_Bad(succ))
+ continue;
if (is_End(succ)) {
ir_node *end_bl = get_nodes_block(succ);
if (end_bl == bl) {
/* It's a completely bad loop: without Phi/Block nodes that can
be a head. I.e., the code is "dying". We break the loop by
setting Bad nodes. */
- int arity = get_irn_arity(n);
- ir_node *bad = get_irg_bad(get_irn_irg(n));
+ ir_graph *irg = get_irn_irg(n);
+ ir_mode *mode = get_irn_mode(n);
+ ir_node *bad = new_r_Bad(irg, mode);
+ int arity = get_irn_arity(n);
for (i = -1; i < arity; ++i) {
set_irn_n(n, i, bad);
}
case pn_Start_T_args:
/* we should never need this explicitely */
- return new_r_Bad(get_irn_irg(node));
+ break;
case pn_Start_P_frame_base:
return be_prolog_get_reg_value(abihelper, sp_reg);
assert(is_Proj(initial_X));
exchange(initial_X, jmp);
- set_irg_initial_exec(irg, new_r_Bad(irg));
+ set_irg_initial_exec(irg, new_r_Bad(irg, mode_X));
}
/**
/* the arg proj is not needed anymore now and should be only used by the anchor */
assert(get_irn_n_edges(arg_tuple) == 1);
kill_node(arg_tuple);
- set_irg_args(irg, new_r_Bad(irg));
+ set_irg_args(irg, new_r_Bad(irg, mode_T));
/* All Return nodes hang on the End node, so look for them there. */
end = get_irg_end_block(irg);
/* kill the Reload */
if (get_irn_n_edges(irn) == 0) {
ir_graph *irg = get_irn_irg(irn);
+ ir_mode *frame_mode = get_irn_mode(get_irn_n(irn, n_be_Reload_frame));
sched_remove(irn);
- set_irn_n(irn, n_be_Reload_mem, new_r_Bad(irg));
- set_irn_n(irn, n_be_Reload_frame, new_r_Bad(irg));
+ set_irn_n(irn, n_be_Reload_mem, new_r_Bad(irg, mode_X));
+ set_irn_n(irn, n_be_Reload_frame, new_r_Bad(irg, frame_mode));
}
}
panic("Unexpected node %+F in block %+F with empty schedule", node, block);
}
- set_Block_cfgpred(block, 0, new_r_Bad(irg));
+ set_Block_cfgpred(block, 0, new_r_Bad(irg, mode_X));
kill_node(jump);
blocks_removed = 1;
/* pre transform some anchors (so they are available in the other transform
* functions) */
- pre_transform_anchor(irg, anchor_bad);
pre_transform_anchor(irg, anchor_no_mem);
pre_transform_anchor(irg, anchor_end_block);
pre_transform_anchor(irg, anchor_end);
sp = create_spproj(node, push, pn_ia32_Push_stack);
}
- set_irn_n(node, i, new_r_Bad(irg));
+ set_irn_n(node, i, new_r_Bad(irg, mode_X));
}
/* create pops */
}
/* remove memperm */
- arity = get_irn_arity(node);
- for (i = 0; i < arity; ++i) {
- set_irn_n(node, i, new_r_Bad(irg));
- }
sched_remove(node);
+ kill_node(node);
}
/**
static void resolve_call(ir_node *call, ir_node *l_res, ir_node *h_res, ir_graph *irg, ir_node *block)
{
ir_node *jmp, *res, *in[2];
- ir_node *bad = get_irg_bad(irg);
ir_node *nomem = get_irg_no_mem(irg);
int old_cse;
case pn_Call_X_except:
/* should not happen here */
- edges_reroute(proj, bad);
+ edges_reroute(proj, new_r_Bad(irg, mode_X));
break;
case pn_Call_M:
/* should not happen here */
jmp = new_r_Jmp(block);
set_opt_cse(old_cse);
- set_Tuple_pred(call, pn_Call_M, nomem);
- set_Tuple_pred(call, pn_Call_X_regular, jmp);
- set_Tuple_pred(call, pn_Call_X_except, bad);
- set_Tuple_pred(call, pn_Call_T_result, res);
+ set_Tuple_pred(call, pn_Call_M, nomem);
+ set_Tuple_pred(call, pn_Call_X_regular, jmp);
+ set_Tuple_pred(call, pn_Call_X_except, new_r_Bad(irg, mode_X));
+ set_Tuple_pred(call, pn_Call_T_result, res);
}
}
case pn_Start_M:
return be_prolog_get_memory(abihelper);
case pn_Start_T_args:
- /* we should never need this explicitely */
- return new_r_Bad(get_irn_irg(block));
+ break;
case pn_Start_P_frame_base:
return get_frame_base();
case pn_Start_max:
ir_node *nn = NULL;
/* do not copy standard nodes */
- if (op == op_Bad)
- nn = get_irg_bad(irg);
- else if (op == op_NoMem)
+ if (op == op_NoMem)
n = get_irg_no_mem(irg);
else if (op == op_Block) {
old_irg = get_irn_irg(n);
ir_node *cfgpred = get_Block_cfgpred_block(block, i);
ir_node *value;
if (is_Bad(cfgpred)) {
- value = new_r_Bad(irg);
+ value = new_r_Bad(irg, mode);
} else {
inc_irg_visited(irg);
/* We ran into a cycle. This may happen in unreachable loops. */
if (irn_visited_else_mark(block)) {
/* Since the loop is unreachable, return a Bad. */
- return new_r_Bad(irg);
+ return new_r_Bad(irg, mode);
}
/* in a matured block we can immediately determine the phi arguments */
}
/* one predecessor just use its value */
} else if (arity == 1) {
- ir_node *cfgpred = get_Block_cfgpred_block(block, 0);
+ ir_node *cfgpred = get_Block_cfgpred(block, 0);
if (is_Bad(cfgpred)) {
- res = cfgpred;
+ res = new_r_Bad(irg, mode);
} else {
- res = get_r_value_internal(cfgpred, pos, mode);
+ ir_node *cfgpred_block = get_nodes_block(cfgpred);
+ res = get_r_value_internal(cfgpred_block, pos, mode);
}
/* multiple predecessors construct Phi */
} else {
/* walk over the blocks in the graph */
irg_block_walk(get_irg_end(irg), dump_block_to_cfg, NULL, F);
- dump_node(F, get_irg_bad(irg));
+ /* dump_node(F, get_irg_bad(irg)); */
dump_vcg_footer(F);
}
{
ir_graph *irg = get_irn_irg(node);
ir_node **in = ALLOCAN(ir_node*, arity);
+ ir_node *bad = new_r_Bad(irg, mode_ANY);
int i;
/* construct a new in array, with every input being bad */
for (i = 0; i < arity; ++i) {
- in[i] = new_r_Bad(irg);
+ in[i] = bad;
}
set_irn_in(node, arity, in);
set_irn_op(node, op_Tuple);
void kill_node(ir_node *node)
{
ir_graph *irg = get_irn_irg(node);
- ir_node *bad = get_irg_bad(irg);
- int i;
- for (i = get_irn_arity(node) - 1; i >= -1; --i) {
- set_irn_n(node, i, bad);
+ if (edges_activated(irg)) {
+ edges_node_deleted(node);
}
- exchange(node, bad);
+ /* noone is allowed to reference this node anymore */
+ set_irn_op(node, op_Deleted);
}
*/
ir_graph *irg = get_irn_irg(block);
enqueue_users(block, waitq);
- exchange(block, get_irg_bad(irg));
+ exchange(block, new_r_Bad(irg, mode_BB));
}
}
start_block = new_r_Block_noopt(res, 0, NULL);
set_irg_start_block(res, start_block);
- set_irg_bad (res, new_r_Bad(res));
set_irg_no_mem (res, new_r_NoMem(res));
start = new_r_Start(res);
set_irg_start (res, start);
ir_graph *new_const_code_irg(void)
{
ir_graph *res = alloc_graph();
- ir_node *bad;
ir_node *body_block;
ir_node *end;
ir_node *end_block;
/* -- The start block -- */
start_block = new_r_Block_noopt(res, 0, NULL);
set_irg_start_block(res, start_block);
- bad = new_r_Bad(res);
- set_irg_bad(res, bad);
no_mem = new_r_NoMem(res);
set_irg_no_mem(res, no_mem);
start = new_r_Start(res);
set_Block_block_visited(body_block, -1);
set_Block_block_visited(start_block, -1);
set_irn_visited(start_block, -1);
- set_irn_visited(bad, -1);
set_irn_visited(no_mem, -1);
return res;
/* -- The start block -- */
set_irg_start_block(res, get_new_node(get_irg_start_block(irg)));
- set_irg_bad (res, get_new_node(get_irg_bad(irg)));
set_irg_no_mem (res, get_new_node(get_irg_no_mem(irg)));
set_irg_start (res, get_new_node(get_irg_start(irg)));
_set_irg_args(irg, node);
}
-ir_node *(get_irg_bad)(const ir_graph *irg)
-{
- return _get_irg_bad(irg);
-}
-
-void (set_irg_bad)(ir_graph *irg, ir_node *node)
-{
- _set_irg_bad(irg, node);
-}
-
ir_node *(get_irg_no_mem)(const ir_graph *irg)
{
return _get_irg_no_mem(irg);
set_irn_n(irg->anchor, anchor_args, node);
}
-static inline ir_node *_get_irg_bad(const ir_graph *irg)
-{
- return get_irn_n(irg->anchor, anchor_bad);
-}
-
-static inline void _set_irg_bad(ir_graph *irg, ir_node *node)
-{
- set_irn_n(irg->anchor, anchor_bad, node);
-}
-
static inline ir_node * _get_irg_no_mem(const ir_graph *irg)
{
return get_irn_n(irg->anchor, anchor_no_mem);
* @param irn The node.
* @return The index allocated for the node.
*/
-static inline unsigned irg_register_node_idx(ir_graph *irg, ir_node *irn) {
+static inline unsigned irg_register_node_idx(ir_graph *irg, ir_node *irn)
+{
unsigned idx = irg->last_node_idx++;
if (idx >= (unsigned)ARR_LEN(irg->idx_irn_map))
ARR_RESIZE(ir_node *, irg->idx_irn_map, idx + 1);
#define set_irg_initial_mem(irg, node) _set_irg_initial_mem(irg, node)
#define get_irg_args(irg) _get_irg_args(irg)
#define set_irg_args(irg, node) _set_irg_args(irg, node)
-#define get_irg_bad(irg) _get_irg_bad(irg)
-#define set_irg_bad(irg, node) _set_irg_bad(irg, node)
#define get_irg_no_mem(irg) _get_irg_no_mem(irg)
#define set_irn_no_mem(irg, node) _set_irn_no_mem(irg, node)
#define get_irg_entity(irg) _get_irg_entity(irg)
int n_preds;
ir_graph *irg;
- /* don't optimize dead or labeled blocks */
+ /* don't optimize labeled blocks */
if (has_Block_entity(n))
return n;
+ if (!get_Block_matured(n))
+ return n;
n_preds = get_Block_n_cfgpreds(n);
- /* The Block constructor does not call optimize, but mature_immBlock()
- calls the optimization. */
- assert(get_Block_matured(n));
-
irg = get_irn_irg(n);
- /* if all predecessors of a block are unreachable, then the block is
- * unreachable */
- if (is_irg_state(irg, IR_GRAPH_STATE_BAD_BLOCK)) {
- int i;
- int n_cfgpreds = get_Block_n_cfgpreds(n);
-
- for (i = 0; i < n_cfgpreds; ++i) {
- ir_node *pred = get_Block_cfgpred(n, i);
- if (!is_Bad(pred))
- break;
- }
- /* only bad inputs? It's unreachable code (unless it is the start or
- * end block) */
- if (i >= n_cfgpreds && n != get_irg_start_block(irg)
- && n != get_irg_end_block(irg)) {
- return get_irg_bad(irg);
- }
- }
-
/* Straightening: a single entry Block following a single exit Block
* can be merged. */
if (n_preds == 1) {
}
}
- if (i >= n_preds) {
- ir_graph *irg = get_irn_irg(n);
- /* A totally Bad or self-referencing Phi (we didn't break the above loop) */
- return get_irg_bad(irg);
- }
-
/* search for rest of inputs, determine if any of these
are non-self-referencing */
while (++i < n_preds) {
return n;
} /* equivalent_node_Phi */
-/**
- * Several optimizations:
- * - fold Sync-nodes, iff they have only one predecessor except
- * themselves.
- */
-static ir_node *equivalent_node_Sync(ir_node *n)
-{
- int arity = get_Sync_n_preds(n);
- int i;
-
- for (i = 0; i < arity;) {
- ir_node *pred = get_Sync_pred(n, i);
- int j;
-
- /* Remove Bad predecessors */
- if (is_Bad(pred)) {
- del_Sync_n(n, i);
- --arity;
- continue;
- }
-
- /* Remove duplicate predecessors */
- for (j = 0;; ++j) {
- if (j >= i) {
- ++i;
- break;
- }
- if (get_Sync_pred(n, j) == pred) {
- del_Sync_n(n, i);
- --arity;
- break;
- }
- }
- }
-
- if (arity == 0) {
- ir_graph *irg = get_irn_irg(n);
- return get_irg_bad(irg);
- }
- if (arity == 1) return get_Sync_pred(n, 0);
- return n;
-} /* equivalent_node_Sync */
-
/**
* Optimize Proj(Tuple).
*/
proj = get_CopyB_mem(copyb);
DBG_OPT_ALGSIM0(oldn, proj, FS_OPT_NOP);
break;
-
- case pn_CopyB_X_except: {
- ir_graph *irg = get_irn_irg(proj);
- DBG_OPT_EXC_REM(proj);
- proj = get_irg_bad(irg);
- break;
- }
}
}
return proj;
DBG_OPT_EXC_REM(proj);
proj = get_Bound_mem(bound);
break;
- case pn_Bound_X_except: {
- ir_graph *irg = get_irn_irg(proj);
- DBG_OPT_EXC_REM(proj);
- proj = get_irg_bad(irg);
- break;
- }
case pn_Bound_res:
proj = idx;
DBG_OPT_ALGSIM0(oldn, proj, FS_OPT_NOP);
return proj;
} /* equivalent_node_Proj_Bound */
-/**
- * Optimize an Exception Proj(Load) with a non-null address.
- */
-static ir_node *equivalent_node_Proj_Load(ir_node *proj)
-{
- if (get_opt_ldst_only_null_ptr_exceptions()) {
- if (get_irn_mode(proj) == mode_X) {
- ir_node *load = get_Proj_pred(proj);
-
- /* get the Load address */
- const ir_node *addr = get_Load_ptr(load);
- const ir_node *confirm;
-
- if (value_not_null(addr, &confirm)) {
- if (get_Proj_proj(proj) == pn_Load_X_except) {
- ir_graph *irg = get_irn_irg(proj);
- DBG_OPT_EXC_REM(proj);
- return get_irg_bad(irg);
- }
- }
- }
- }
- return proj;
-} /* equivalent_node_Proj_Load */
-
-/**
- * Optimize an Exception Proj(Store) with a non-null address.
- */
-static ir_node *equivalent_node_Proj_Store(ir_node *proj)
-{
- if (get_opt_ldst_only_null_ptr_exceptions()) {
- if (get_irn_mode(proj) == mode_X) {
- ir_node *store = get_Proj_pred(proj);
-
- /* get the load/store address */
- const ir_node *addr = get_Store_ptr(store);
- const ir_node *confirm;
-
- if (value_not_null(addr, &confirm)) {
- if (get_Proj_proj(proj) == pn_Store_X_except) {
- ir_graph *irg = get_irn_irg(proj);
- DBG_OPT_EXC_REM(proj);
- return get_irg_bad(irg);
- }
- }
- }
- }
- return proj;
-} /* equivalent_node_Proj_Store */
-
/**
* Does all optimizations on nodes that must be done on its Projs
* because of creating new nodes.
CASE(And);
CASE(Conv);
CASE(Phi);
- CASE(Sync);
CASE_PROJ(Tuple);
CASE_PROJ(Div);
CASE_PROJ(CopyB);
CASE_PROJ(Bound);
- CASE_PROJ(Load);
- CASE_PROJ(Store);
CASE(Proj);
CASE(Id);
CASE(Mux);
turn_into_tuple(n, pn_Div_max);
set_Tuple_pred(n, pn_Div_M, mem);
set_Tuple_pred(n, pn_Div_X_regular, new_r_Jmp(blk));
- set_Tuple_pred(n, pn_Div_X_except, get_irg_bad(irg));
+ set_Tuple_pred(n, pn_Div_X_except, new_r_Bad(irg, mode_X));
set_Tuple_pred(n, pn_Div_res, value);
}
return n;
turn_into_tuple(n, pn_Mod_max);
set_Tuple_pred(n, pn_Mod_M, mem);
set_Tuple_pred(n, pn_Mod_X_regular, new_r_Jmp(blk));
- set_Tuple_pred(n, pn_Mod_X_except, get_irg_bad(irg));
+ set_Tuple_pred(n, pn_Mod_X_except, new_r_Bad(irg, mode_X));
set_Tuple_pred(n, pn_Mod_res, value);
}
return n;
jmp = new_r_Jmp(blk);
turn_into_tuple(n, pn_Cond_max);
if (ta == tarval_b_true) {
- set_Tuple_pred(n, pn_Cond_false, get_irg_bad(irg));
+ set_Tuple_pred(n, pn_Cond_false, new_r_Bad(irg, mode_X));
set_Tuple_pred(n, pn_Cond_true, jmp);
} else {
set_Tuple_pred(n, pn_Cond_false, jmp);
- set_Tuple_pred(n, pn_Cond_true, get_irg_bad(irg));
+ set_Tuple_pred(n, pn_Cond_true, new_r_Bad(irg, mode_X));
}
/* We might generate an endless loop, so keep it alive. */
add_End_keepalive(get_irg_end(irg), blk);
if (get_Proj_proj(proj) == pn_Load_X_except) {
ir_graph *irg = get_irn_irg(proj);
DBG_OPT_EXC_REM(proj);
- return get_irg_bad(irg);
+ return new_r_Bad(irg, mode_X);
} else {
ir_node *blk = get_nodes_block(load);
return new_r_Jmp(blk);
if (get_Proj_proj(proj) == pn_Store_X_except) {
ir_graph *irg = get_irn_irg(proj);
DBG_OPT_EXC_REM(proj);
- return get_irg_bad(irg);
+ return new_r_Bad(irg, mode_X);
} else {
ir_node *blk = get_nodes_block(store);
return new_r_Jmp(blk);
ir_graph *irg = get_irn_irg(proj);
/* we found an exception handler, remove it */
DBG_OPT_EXC_REM(proj);
- return get_irg_bad(irg);
+ return new_r_Bad(irg, mode_X);
}
case pn_Div_M: {
ir_graph *irg = get_irn_irg(proj);
/* we found an exception handler, remove it */
DBG_OPT_EXC_REM(proj);
- return get_irg_bad(irg);
+ return new_r_Bad(irg, mode_X);
}
case pn_Mod_M: {
} else {
ir_graph *irg = get_irn_irg(proj);
/* this case will NEVER be taken, kill it */
- return get_irg_bad(irg);
+ return new_r_Bad(irg, mode_X);
}
}
} else {
ir_relation cmp_result = tarval_cmp(b_vrp->range_bottom, tp);
ir_relation cmp_result2 = tarval_cmp(b_vrp->range_top, tp);
- if ((cmp_result & ir_relation_greater) == cmp_result && (cmp_result2
- & ir_relation_less) == cmp_result2) {
+ if ((cmp_result & ir_relation_greater) == cmp_result
+ && (cmp_result2 & ir_relation_less) == cmp_result2) {
ir_graph *irg = get_irn_irg(proj);
- return get_irg_bad(irg);
+ return new_r_Bad(irg, mode_X);
}
} else if (b_vrp->range_type == VRP_ANTIRANGE) {
ir_relation cmp_result = tarval_cmp(b_vrp->range_bottom, tp);
ir_relation cmp_result2 = tarval_cmp(b_vrp->range_top, tp);
- if ((cmp_result & ir_relation_less_equal) == cmp_result && (cmp_result2
- & ir_relation_greater_equal) == cmp_result2) {
+ if ((cmp_result & ir_relation_less_equal) == cmp_result
+ && (cmp_result2 & ir_relation_greater_equal) == cmp_result2) {
ir_graph *irg = get_irn_irg(proj);
- return get_irg_bad(irg);
+ return new_r_Bad(irg, mode_X);
}
}
b_vrp->bits_set
) == ir_relation_equal)) {
ir_graph *irg = get_irn_irg(proj);
- return get_irg_bad(irg);
+ return new_r_Bad(irg, mode_X);
}
if (!(tarval_cmp(
tarval_not(b_vrp->bits_not_set))
== ir_relation_equal)) {
ir_graph *irg = get_irn_irg(proj);
- return get_irg_bad(irg);
+ return new_r_Bad(irg, mode_X);
}
-
-
}
}
}
DBG_OPT_EXC_REM(proj);
proj = new_r_Jmp(get_nodes_block(copyb));
break;
- case pn_CopyB_X_except:
+ case pn_CopyB_X_except: {
+ ir_graph *irg = get_irn_irg(proj);
DBG_OPT_EXC_REM(proj);
- proj = get_irg_bad(get_irn_irg(proj));
+ proj = new_r_Bad(irg, mode_X);
break;
+ }
default:
break;
}
break;
case pn_Bound_X_except:
DBG_OPT_EXC_REM(proj);
- proj = get_irg_bad(get_irn_irg(proj));
+ proj = new_r_Bad(get_irn_irg(proj), mode_X);
break;
case pn_Bound_res:
proj = idx;
return proj;
} /* transform_node_Proj */
+static ir_node *transform_node_Block(ir_node *block)
+{
+ ir_graph *irg = get_irn_irg(block);
+
+ if (!is_irg_state(irg, IR_GRAPH_STATE_BAD_BLOCK))
+ return block;
+ /* don't optimize labeled blocks */
+ if (has_Block_entity(block))
+ return block;
+ if (!get_Block_matured(block))
+ return block;
+
+ /* remove blocks with only Bad inputs (or no inputs) */
+ {
+ int i;
+ int n_cfgpreds = get_Block_n_cfgpreds(block);
+
+ for (i = 0; i < n_cfgpreds; ++i) {
+ ir_node *pred = get_Block_cfgpred(block, i);
+ if (!is_Bad(pred))
+ break;
+ }
+ /* only bad unreachable inputs? It's unreachable code (unless it is the
+ * start or end block) */
+ if (i >= n_cfgpreds && block != get_irg_start_block(irg)
+ && block != get_irg_end_block(irg)) {
+ return new_r_Bad(irg, mode_BB);
+ }
+ }
+ return block;
+}
+
static ir_node *transform_node_Phi(ir_node *phi)
{
int n = get_irn_arity(phi);
ir_mode *mode = get_irn_mode(phi);
ir_node *block = get_nodes_block(phi);
ir_graph *irg = get_irn_irg(phi);
- ir_node *bad = get_irg_bad(irg);
+ ir_node *bad = NULL;
int i;
/* Set phi-operands for bad-block inputs to bad */
ir_node *pred = get_Block_cfgpred(block, i);
if (!is_Bad(pred))
continue;
+ if (bad == NULL)
+ bad = new_r_Bad(irg, mode);
set_irn_n(phi, i, bad);
}
int pred_arity;
int j;
+ /* Remove Bad predecessors */
+ if (is_Bad(pred)) {
+ del_Sync_n(n, i);
+ --arity;
+ continue;
+ }
+
+ /* Remove duplicate predecessors */
+ for (j = 0; j < i; ++j) {
+ if (get_Sync_pred(n, j) == pred) {
+ del_Sync_n(n, i);
+ --arity;
+ break;
+ }
+ }
+ if (j < i)
+ continue;
+
if (!is_Sync(pred)) {
++i;
continue;
}
}
+ if (arity == 0) {
+ ir_graph *irg = get_irn_irg(n);
+ return new_r_Bad(irg, mode_M);
+ }
+ if (arity == 1) {
+ return get_Sync_pred(n, 0);
+ }
+
/* rehash the sync node */
add_identities(n);
-
return n;
-} /* transform_node_Sync */
+}
static ir_node *transform_node_Load(ir_node *n)
{
ir_node *block = get_nodes_block(n);
ir_node *jmp = new_r_Jmp(block);
ir_graph *irg = get_irn_irg(n);
- ir_node *bad = get_irg_bad(irg);
+ ir_node *bad = new_r_Bad(irg, mode_X);
ir_mode *mode = get_Load_mode(n);
ir_node *res = new_r_Proj(pred_load, mode, pn_Load_res);
ir_node *in[pn_Load_max] = { mem, jmp, bad, res };
ir_node *block = get_nodes_block(n);
ir_node *jmp = new_r_Jmp(block);
ir_graph *irg = get_irn_irg(n);
- ir_node *bad = get_irg_bad(irg);
+ ir_node *bad = new_r_Bad(irg, mode_X);
ir_node *res = value;
ir_node *in[pn_Load_max] = { mem, jmp, bad, res };
ir_node *tuple = new_r_Tuple(block, ARRAY_SIZE(in), in);
switch (code) {
CASE(Add);
CASE(And);
+ CASE(Block);
CASE(Call);
CASE(Cmp);
CASE(Conv);
current_ir_graph = rem;
} /* visit_all_identities */
-/**
- * Garbage in, garbage out. If a node has a dead input, i.e., the
- * Bad node is input to the node, return the Bad node.
- */
-static ir_node *gigo(ir_node *node)
+static bool is_unreachable(ir_node *node)
{
ir_op *op = get_irn_op(node);
/* Code in "Bad" blocks is unreachable and can be replaced by Bad */
if (op != op_Block && is_Bad(get_nodes_block(node))) {
- ir_graph *irg = get_irn_irg(node);
- return get_irg_bad(irg);
+ return true;
}
return false;
/* Remove nodes with dead (Bad) input.
Run always for transformation induced Bads. */
- n = gigo(n);
- if (n != oldn) {
- edges_node_deleted(oldn);
-
- /* We found an existing, better node, so we can deallocate the old node. */
- irg_kill_node(irg, oldn);
- return n;
+ if (is_unreachable(n)) {
+ ir_mode *mode = get_irn_mode(n);
+ edges_node_deleted(n);
+ irg_kill_node(irg, n);
+ return new_r_Bad(irg, mode);
}
/* constant expression evaluation / constant folding */
/* Remove nodes with dead (Bad) input.
Run always for transformation induced Bads. */
- n = gigo(n);
- if (is_Bad(n))
- return n;
+ if (is_unreachable(n)) {
+ ir_graph *irg = get_irn_irg(n);
+ ir_mode *mode = get_irn_mode(n);
+ return new_r_Bad(irg, mode);
+ }
/* constant expression evaluation / constant folding */
if (get_opt_constant_folding()) {
anchor_frame, /**< methods frame */
anchor_initial_mem, /**< initial memory of this graph */
anchor_args, /**< methods arguments */
- anchor_bad, /**< bad node of this ir_graph, the one and
- only in this graph */
anchor_no_mem, /**< NoMem node of this ir_graph, the one and only in this graph */
anchor_last
};
ASSERT_AND_RET_DBG(
(
(proj >= 0 && mode == mode_X && get_irn_mode(get_Cond_selector(pred)) == mode_b) || /* compare */
- (mode == mode_X && mode_is_int(get_irn_mode(get_Cond_selector(pred)))) || /* switch */
- is_Bad(get_Cond_selector(pred)) /* rare */
+ (mode == mode_X && mode_is_int(get_irn_mode(get_Cond_selector(pred)))) /* switch */
),
"wrong Proj from Cond", 0,
show_proj_failure(p);
/* We don't test */
break;
- case iro_Bad:
- /* hmm, optimization did not remove it */
- break;
-
default:
/* ASSERT_AND_RET(0, "Unknown opcode", 0); */
break;
ir_mode *mode = get_irn_mode(p);
long proj = get_Proj_proj(p);
- /* ignore Bound checks of Bad */
- if (is_Bad(get_Bound_index(n)))
- return 1;
ASSERT_AND_RET_DBG(
(
(proj == pn_Bound_M && mode == mode_M) ||
for (i = get_Block_n_cfgpreds(n) - 1; i >= 0; --i) {
ir_node *pred = get_Block_cfgpred(n, i);
- ASSERT_AND_RET(
- is_Bad(pred) || (get_irn_mode(pred) == mode_X),
+ ASSERT_AND_RET(get_irn_mode(pred) == mode_X,
"Block node must have a mode_X predecessor", 0);
ASSERT_AND_RET(is_cfop(skip_Proj(skip_Tuple(pred))), "Block predecessor must be a cfop", 0);
}
/* Phi: BB x dataM^n --> dataM */
for (i = get_Phi_n_preds(n) - 1; i >= 0; --i) {
ir_node *pred = get_Phi_pred(n, i);
- if (!is_Bad(pred)) {
- ASSERT_AND_RET_DBG(
- get_irn_mode(pred) == mymode,
- "Phi node", 0,
- show_phi_failure(n, pred, i);
- );
- }
+ ASSERT_AND_RET_DBG(get_irn_mode(pred) == mymode,
+ "Phi node", 0, show_phi_failure(n, pred, i);
+ );
}
ASSERT_AND_RET(mode_is_dataM(mymode) || mymode == mode_b, "Phi node", 0 );
+#if 0
if (mymode == mode_M) {
for (i = get_Phi_n_preds(n) - 1; i >= 0; --i) {
int j;
ir_node *pred_i = get_Phi_pred(n, i);
- if (is_Bad(pred_i))
- continue;
for (j = i - 1; j >= 0; --j) {
ir_node *pred_j = get_Phi_pred(n, j);
- if (is_Bad(pred_j))
- continue;
-#if 0
/* currently this checks fails for blocks with exception
outputs (and these are NOT basic blocks). So it is disabled yet. */
ASSERT_AND_RET_DBG(
0,
ir_printf("%+F and %+F of %+F are in %+F\n", pred_i, pred_j, n, get_irn_n(pred_i, -1))
);
-#endif
}
}
}
+#endif
return 1;
}
ir_printf("node %+F", n));
}
- /* We don't want to test nodes whose predecessors are Bad,
- as we would have to special case that for each operation. */
- if (op != op_Phi && op != op_Block) {
- int i;
- for (i = get_irn_arity(n) - 1; i >= 0; --i) {
- if (is_Bad(get_irn_n(n, i)))
- return 1;
- }
- }
-
if (op->ops.verify_node)
return op->ops.verify_node(n, irg);
turn_into_tuple(p, pn_CopyB_max);
set_Tuple_pred(p, pn_CopyB_M, mem);
set_Tuple_pred(p, pn_CopyB_X_regular, new_r_Jmp(blk));
- set_Tuple_pred(p, pn_CopyB_X_except, get_irg_bad(irg));
+ set_Tuple_pred(p, pn_CopyB_X_except, new_r_Bad(irg, mode_X));
++n_args;
}
turn_into_tuple(irn, pn_CopyB_max);
set_Tuple_pred(irn, pn_CopyB_M, mem);
- set_Tuple_pred(irn, pn_CopyB_X_regular, get_irg_bad(irg));
- set_Tuple_pred(irn, pn_CopyB_X_except, get_irg_bad(irg));
+ set_Tuple_pred(irn, pn_CopyB_X_regular, new_r_Bad(irg, mode_X));
+ set_Tuple_pred(irn, pn_CopyB_X_except, new_r_Bad(irg, mode_X));
}
/**
set_opt_cse(0);
reg_jmp = new_r_Jmp(block);
set_opt_cse(old_cse);
- exc_jmp = new_r_Bad(irg);
+ exc_jmp = new_r_Bad(irg, mode_X);
}
irn = new_r_Tuple(block, 1, &irn);
/* we are ready */
turn_into_tuple(node, n_proj);
- for (i = 0; i < n_proj; ++i)
- set_Tuple_pred(node, i, new_r_Bad(irg));
if (rt->mem_proj_nr >= 0)
set_Tuple_pred(node, rt->mem_proj_nr, new_r_Proj(call, mode_M, pn_Call_M));
if (!is_NoMem(mem)) {
*/
static void maybe_kill_node(ir_node *node)
{
- ir_graph *irg;
- int i, arity;
-
if (get_irn_n_edges(node) != 0)
return;
- irg = get_irn_irg(node);
-
- assert(!is_Bad(node));
-
- arity = get_irn_arity(node);
- for (i = 0; i < arity; ++i) {
- set_irn_n(node, i, new_r_Bad(irg));
- }
- set_nodes_block(node, new_r_Bad(irg));
-
- edges_node_deleted(node);
+ kill_node(node);
}
static ir_node *create_not(dbg_info *dbgi, ir_node *node)
if (get_Block_idom(b) != predb) {
/* predb is not the dominator. There can't be uses of pred's Phi nodes, kill them .*/
- ir_graph *irg = get_irn_irg(b);
- exchange(phi, get_irg_bad(irg));
+ ir_graph *irg = get_irn_irg(b);
+ ir_mode *mode = get_irn_mode(phi);
+ exchange(phi, new_r_Bad(irg, mode));
} else {
/* predb is the direct dominator of b. There might be uses of the Phi nodes from
predb in further block, so move this phi from the predecessor into the block b */
in[n_preds++] = predpred;
}
/* Remove block+jump as it might be kept alive. */
- exchange(pred, get_irg_bad(get_irn_irg(b)));
- exchange(predb, get_irg_bad(get_irn_irg(b)));
+ exchange(pred, new_r_Bad(get_irn_irg(b), mode_X));
+ exchange(predb, new_r_Bad(get_irn_irg(b), mode_BB));
} else {
/* case 3: */
in[n_preds++] = pred;
long num = get_tarval_long(tv);
long def_num = get_Cond_default_proj(cond);
ir_graph *irg = get_irn_irg(cond);
- ir_node *bad = get_irg_bad(irg);
+ ir_node *bad = new_r_Bad(irg, mode_X);
if (def_num == get_Proj_proj(proj1)) {
/* first one is the defProj */
node_t *block = get_irn_node(get_nodes_block(irn));
if (block->type.tv == tarval_unreachable) {
- ir_node *bad = get_irg_bad(current_ir_graph);
+ ir_graph *irg = get_irn_irg(irn);
+ ir_mode *mode = get_irn_mode(node->node);
+ ir_node *bad = new_r_Bad(irg, mode);
/* here, bad might already have a node, but this can be safely ignored
as long as bad has at least ONE valid node */
turn_into_tuple(alloc, pn_Alloc_max);
set_Tuple_pred(alloc, pn_Alloc_M, mem);
set_Tuple_pred(alloc, pn_Alloc_X_regular, new_r_Jmp(blk));
- set_Tuple_pred(alloc, pn_Alloc_X_except, new_r_Bad(irg));
+ set_Tuple_pred(alloc, pn_Alloc_X_except, new_r_Bad(irg, mode_X));
++env->nr_deads;
}
turn_into_tuple(alloc, pn_Alloc_max);
set_Tuple_pred(alloc, pn_Alloc_M, mem);
set_Tuple_pred(alloc, pn_Alloc_X_regular, new_r_Jmp(blk));
- set_Tuple_pred(alloc, pn_Alloc_X_except, new_r_Bad(irg));
+ set_Tuple_pred(alloc, pn_Alloc_X_except, new_r_Bad(irg, mode_X));
set_Tuple_pred(alloc, pn_Alloc_res, sel);
++env->nr_removed;
mem = get_Call_mem(call);
blk = get_nodes_block(call);
turn_into_tuple(call, pn_Call_max);
- set_Tuple_pred(call, pn_Call_M, mem);
- set_Tuple_pred(call, pn_Call_X_regular, new_r_Jmp(blk));
- set_Tuple_pred(call, pn_Call_X_except, new_r_Bad(irg));
- set_Tuple_pred(call, pn_Call_T_result, new_r_Bad(irg));
+ set_Tuple_pred(call, pn_Call_M, mem);
+ set_Tuple_pred(call, pn_Call_X_regular, new_r_Jmp(blk));
+ set_Tuple_pred(call, pn_Call_X_except, new_r_Bad(irg, mode_X));
+ set_Tuple_pred(call, pn_Call_T_result, new_r_Bad(irg, mode_T));
++env->nr_deads;
}
}
}
+static ir_node *make_bad_block(ir_graph *irg)
+{
+ ir_node *bad = new_r_Bad(irg, mode_BB);
+ bitinfo *bb = get_bitinfo(bad);
+ if (bb == NULL) {
+ ir_tarval* const f = get_tarval_b_false();
+ ir_tarval* const t = get_tarval_b_true();
+ set_bitinfo(bad, f, t); /* Undefined. */
+ }
+ return bad;
+}
+
static void apply_result(ir_node* const irn, void* ctx)
{
environment_t* env = (environment_t*)ctx;
block_b = get_bitinfo(irn);
/* Trivially unreachable blocks have no info. */
if (block_b == NULL || block_b->z == get_tarval_b_false()) {
- exchange(irn, get_irg_bad(get_Block_irg(irn)));
+ ir_node *bad = make_bad_block(get_irn_irg(irn));
+ exchange(irn, bad);
env->modified = 1;
}
return;
/* Trivially unreachable blocks have no info. */
if (block_b == NULL || block_b->z == get_tarval_b_false()) {
/* Unreachable blocks might be replaced before the nodes in them. */
- exchange(irn, is_Bad(block) ? block : get_irg_bad(get_Block_irg(block)));
+ ir_mode *mode = get_irn_mode(irn);
+ ir_graph *irg = get_irn_irg(irn);
+ ir_node *bad = new_r_Bad(irg, mode);
+ exchange(irn, bad);
env->modified = 1;
return;
}
add_End_keepalive(get_irg_end(irg), block);
n = new_r_Jmp(block);
} else {
- n = new_r_Bad(irg);
+ n = new_r_Bad(irg, mode_X);
/* Transferring analysis information to the bad node makes it a
* candidate for replacement. */
goto exchange_only;
{
pdeq* const q = new_pdeq();
- /* We need this extra step because the dom tree does not contain unreachable
- blocks in Firm. Moreover build phi list. */
+ /* We need this extra step because the dom tree does not contain
+ * unreachable blocks in Firm. Moreover build phi list. */
irg_walk_anchors(irg, clear_links, build_phi_lists, NULL);
- { ir_tarval* const f = get_tarval_b_false();
+ {
+ ir_tarval* const f = get_tarval_b_false();
ir_tarval* const t = get_tarval_b_true();
- set_bitinfo(get_irg_bad(irg), f, t); /* Undefined. */
set_bitinfo(get_irg_end_block(irg), t, f); /* Reachable. */
}
}
case pn_Call_X_except:
exc_changed = 1;
- exchange(proj, get_irg_bad(irg));
+ exchange(proj, new_r_Bad(irg, mode_X));
break;
case pn_Call_X_regular: {
ir_node *block = get_nodes_block(call);
switch (get_Proj_proj(proj)) {
case pn_Call_X_except:
exc_changed = 1;
- exchange(proj, get_irg_bad(irg));
+ exchange(proj, new_r_Bad(irg, mode_X));
break;
case pn_Call_X_regular: {
ir_node *block = get_nodes_block(call);
/* ignore bad blocks. */
if (is_Bad(pred_blk)) {
ir_graph *irg = get_irn_irg(pred_blk);
- in[pos] = new_r_Bad(irg);
+ in[pos] = new_r_Bad(irg, mode_X);
continue;
}
/* In case of a bad input to a block we need to return the bad value */
if (is_Bad(block)) {
ir_graph *irg = get_irn_irg(block);
- return new_r_Bad(irg);
+ return new_r_Bad(irg, mode);
}
/* the other defs can't be marked for cases where a user of the original
int selector_evaluated;
const ir_edge_t *edge, *next;
ir_graph *irg;
- ir_node *bad;
+ ir_node *badX;
int cnst_pos;
if (get_Block_n_cfgpreds(block) != 1)
if (selector_evaluated == 0) {
ir_graph *irg = get_irn_irg(block);
- bad = new_r_Bad(irg);
+ ir_node *bad = new_r_Bad(irg, mode_X);
exchange(projx, bad);
*changed = 1;
return;
/* we have to remove the edge towards the pred as the pred now
* jumps into the true_block. We also have to shorten Phis
* in our block because of this */
- bad = new_r_Bad(irg);
+ badX = new_r_Bad(irg, mode_X);
cnst_pos = env.cnst_pos;
/* shorten Phis */
foreach_out_edge_safe(env.cnst_pred, edge, next) {
ir_node *node = get_edge_src_irn(edge);
- if (is_Phi(node))
+ if (is_Phi(node)) {
+ ir_node *bad = new_r_Bad(irg, get_irn_mode(node));
set_Phi_pred(node, cnst_pos, bad);
+ }
}
- set_Block_cfgpred(env.cnst_pred, cnst_pos, bad);
+ set_Block_cfgpred(env.cnst_pred, cnst_pos, badX);
/* the graph is changed now */
*changed = 1;
/* no exception */
if (info->projs[pn_Load_X_except]) {
ir_graph *irg = get_irn_irg(load);
- exchange( info->projs[pn_Load_X_except], new_r_Bad(irg));
+ exchange( info->projs[pn_Load_X_except], new_r_Bad(irg, mode_X));
res |= CF_CHANGED;
}
if (info->projs[pn_Load_X_regular]) {
/* no exception */
if (info->projs[pn_Load_X_except]) {
ir_graph *irg = get_irn_irg(load);
- exchange(info->projs[pn_Load_X_except], new_r_Bad(irg));
+ exchange(info->projs[pn_Load_X_except], new_r_Bad(irg, mode_X));
res |= CF_CHANGED;
}
if (info->projs[pn_Load_X_regular]) {
/* no exception, clear the info field as it might be checked later again */
if (info->projs[pn_Load_X_except]) {
ir_graph *irg = get_irn_irg(load);
- exchange(info->projs[pn_Load_X_except], new_r_Bad(irg));
+ exchange(info->projs[pn_Load_X_except], new_r_Bad(irg, mode_X));
info->projs[pn_Load_X_except] = NULL;
res |= CF_CHANGED;
}
/* we completely replace the load by this value */
if (info->projs[pn_Load_X_except]) {
ir_graph *irg = get_irn_irg(load);
- exchange(info->projs[pn_Load_X_except], new_r_Bad(irg));
+ exchange(info->projs[pn_Load_X_except], new_r_Bad(irg, mode_X));
info->projs[pn_Load_X_except] = NULL;
res |= CF_CHANGED;
}
{
int i;
int n_cfgpreds;
- ir_graph *irg;
+ ir_graph *irg = get_irn_irg(block);
ir_node *phi;
ir_node **in;
* Dead and bad blocks. */
if (get_irn_arity(block) < 1 || is_Bad(block)) {
DB((dbg, LEVEL_5, "ssa bad %N\n", block));
- return new_Bad();
+ return new_r_Bad(irg, mode);
}
if (block == ssa_second_def_block && !first) {
return value;
}
- irg = get_irn_irg(block);
assert(block != get_irg_start_block(irg));
/* a Block with only 1 predecessor needs no Phi */
{
ir_node *start_block;
ir_node *start;
- ir_node *bad;
ir_node *nomem;
start_block = get_irg_start_block(called_graph);
set_new_node(start, pre_call);
mark_irn_visited(start);
- bad = get_irg_bad(called_graph);
- set_new_node(bad, get_irg_bad(irg));
- mark_irn_visited(bad);
-
nomem = get_irg_no_mem(called_graph);
set_new_node(nomem, get_irg_no_mem(irg));
mark_irn_visited(nomem);
ir_mode *mode = get_irn_mode(cf_pred[0]);
phi = new_r_Phi(post_bl, n_ret, cf_pred, mode);
} else {
- phi = new_r_Bad(irg);
+ ir_mode *mode = get_irn_mode(cf_pred[0]);
+ phi = new_r_Bad(irg, mode);
}
res_pred[j] = phi;
/* Conserve Phi-list for further inlinings -- but might be optimized */
result_tuple = new_r_Tuple(post_bl, n_res, res_pred);
set_Tuple_pred(call, pn_Call_T_result, result_tuple);
} else {
- set_Tuple_pred(call, pn_Call_T_result, new_r_Bad(irg));
+ set_Tuple_pred(call, pn_Call_T_result, new_r_Bad(irg, mode_T));
}
/* handle the regular call */
set_Tuple_pred(call, pn_Call_X_regular, new_r_Jmp(post_bl));
set_Tuple_pred(call, pn_Call_X_except, new_r_Jmp(block));
}
} else {
- set_Tuple_pred(call, pn_Call_X_except, new_r_Bad(irg));
+ set_Tuple_pred(call, pn_Call_X_except, new_r_Bad(irg, mode_X));
}
} else {
ir_node *main_end_bl;
for (i = 0; i < n_exc; ++i)
end_preds[main_end_bl_arity + i] = cf_pred[i];
set_irn_in(main_end_bl, n_exc + main_end_bl_arity, end_preds);
- set_Tuple_pred(call, pn_Call_X_except, new_r_Bad(irg));
+ set_Tuple_pred(call, pn_Call_X_except, new_r_Bad(irg, mode_X));
free(end_preds);
}
free(res_pred);
/* no exception, clear the m fields as it might be checked later again */
if (m->projs[pn_Load_X_except]) {
ir_graph *irg = get_irn_irg(ptr);
- exchange(m->projs[pn_Load_X_except], new_r_Bad(irg));
+ exchange(m->projs[pn_Load_X_except], new_r_Bad(irg, mode_X));
m->projs[pn_Load_X_except] = NULL;
m->flags &= ~FLAG_EXCEPTION;
env.changed = 1;
proj = op->projs[pn_Load_X_except];
if (proj != NULL) {
ir_graph *irg = get_irn_irg(load);
- exchange(proj, new_r_Bad(irg));
+ exchange(proj, new_r_Bad(irg, mode_X));
}
proj = op->projs[pn_Load_X_regular];
if (proj != NULL) {
proj = op->projs[pn_Store_X_except];
if (proj != NULL) {
ir_graph *irg = get_irn_irg(store);
- exchange(proj, new_r_Bad(irg));
+ exchange(proj, new_r_Bad(irg, mode_X));
}
proj = op->projs[pn_Store_X_regular];
if (proj != NULL) {
}
/* remove the Jmp, we have placed a Return here */
- exchange(jmp, new_r_Bad(irg));
+ exchange(jmp, new_r_Bad(irg, mode_X));
}
/*
n = get_End_n_keepalives(end);
for (i = 0; i < n; ++i) {
if (get_End_keepalive(end, i) == phiM) {
- set_End_keepalive(end, i, new_r_Bad(irg));
+ set_End_keepalive(end, i, new_r_Bad(irg, mode_M));
break;
}
}
set_Tuple_pred(node, pn_Load_M, mem);
set_Tuple_pred(node, pn_Load_res, val);
set_Tuple_pred(node, pn_Load_X_regular, new_r_Jmp(block));
- set_Tuple_pred(node, pn_Load_X_except, new_r_Bad(irg));
+ set_Tuple_pred(node, pn_Load_X_except, new_r_Bad(irg, mode_X));
} else if (is_Store(node)) {
DB((dbg, SET_LEVEL_3, " checking %+F for replacement ", node));
turn_into_tuple(node, pn_Store_max);
set_Tuple_pred(node, pn_Store_M, mem);
set_Tuple_pred(node, pn_Store_X_regular, new_r_Jmp(block));
- set_Tuple_pred(node, pn_Store_X_except, new_r_Bad(irg));
+ set_Tuple_pred(node, pn_Store_X_except, new_r_Bad(irg, mode_X));
}
}
}
if (n_locs > 0) {
- ir_node *bad, *start_block;
+ ir_node *start_block;
ir_node **in;
ir_mode **modes;
mature_immBlock(start_block);
/* no: we can kill all returns */
- bad = get_irg_bad(irg);
-
for (p = env->rets; p; p = n) {
ir_node *block = get_nodes_block(p);
ir_node *call, *mem, *jmp, *tuple;
set_optimize(rem);
for (i = 0; i < env->n_ress; ++i) {
+ ir_mode *mode = modes[i];
if (env->variants[i] != TR_DIRECT) {
- in[i] = get_r_value(irg, i, modes[i]);
+ in[i] = get_r_value(irg, i, mode);
} else {
- in[i] = bad;
+ in[i] = new_r_Bad(irg, mode);
}
}
/* create a new tuple for the return values */
tuple = new_r_Tuple(block, env->n_ress, in);
turn_into_tuple(call, pn_Call_max);
- set_Tuple_pred(call, pn_Call_M, mem);
- set_Tuple_pred(call, pn_Call_X_regular, jmp);
- set_Tuple_pred(call, pn_Call_X_except, bad);
- set_Tuple_pred(call, pn_Call_T_result, tuple);
+ set_Tuple_pred(call, pn_Call_M, mem);
+ set_Tuple_pred(call, pn_Call_X_regular, jmp);
+ set_Tuple_pred(call, pn_Call_X_except, new_r_Bad(irg, mode_X));
+ set_Tuple_pred(call, pn_Call_T_result, tuple);
for (i = 0; i < env->n_ress; ++i) {
ir_node *res = get_Return_res(p, i);
}
}
- exchange(p, bad);
+ exchange(p, new_r_Bad(irg, mode_X));
}
/* finally fix all other returns */
}
ssa_cons_finish(irg);
} else {
- ir_node *bad = get_irg_bad(irg);
+ ir_node *bad = new_r_Bad(irg, mode_X);
/* no: we can kill all returns */
for (p = env->rets; p; p = n) {
they are set to Bad, and the actual removal is left to the control flow
optimisation phase. Block, Phi, Tuple with only Bad inputs however are
replaced by Bad right away."""
- mode = "mode_T"
flags = [ "cfopcode", "start_block", "dump_noblock" ]
pinned = "yes"
knownBlock = True
block = "get_irg_start_block(irg)"
- singleton = True
attr_struct = "bad_attr"
init = '''
res->attr.bad.irg.irg = irg;