From: Matthias Braun Date: Wed, 4 May 2011 11:48:18 +0000 (+0200) Subject: give Bad nodes a mode X-Git-Url: http://nsz.repo.hu/git/?a=commitdiff_plain;h=6f068af98daa4725d60e5d23a8f98ec2841cfa44;p=libfirm give Bad nodes a mode --- diff --git a/include/libfirm/ircons.h b/include/libfirm/ircons.h index 340efdbbd..71eb3979c 100644 --- a/include/libfirm/ircons.h +++ b/include/libfirm/ircons.h @@ -924,12 +924,6 @@ * SPECIAL OPERATIONS * ------------------ * - * ir_node *new_Bad (void) - * ----------------------- - * - * Returns the unique Bad node current_ir_graph->bad. - * This node is used to express results of dead code elimination. - * * ir_node *new_NoMem (void) * ----------------------------------------------------------------------------------- * diff --git a/include/libfirm/irgraph.h b/include/libfirm/irgraph.h index c261d42af..63324ad83 100644 --- a/include/libfirm/irgraph.h +++ b/include/libfirm/irgraph.h @@ -81,9 +81,6 @@ * * - proj_args The proj nodes of the args node. * - * - bad The Bad node is an auxiliary node. It is needed only once, - * so there is this globally reachable node. - * * - no_mem The NoMem node is an auxiliary node. It is needed only once, * so there is this globally reachable node. * @@ -237,11 +234,7 @@ FIRM_API ir_node *get_irg_args(const ir_graph *irg); /** Sets the node that represents the argument pointer of the given IR graph. */ FIRM_API void set_irg_args(ir_graph *irg, ir_node *node); -/** Returns the Bad node of the given IR graph. Use new_Bad() instead!! */ -FIRM_API ir_node *get_irg_bad(const ir_graph *irg); -FIRM_API void set_irg_bad(ir_graph *irg, ir_node *node); - -/** Returns the NoMem node of the given IR graph. Use new_NoMem() instead!! */ +/** Returns the NoMem node of the given IR graph. */ FIRM_API ir_node *get_irg_no_mem(const ir_graph *irg); FIRM_API void set_irg_no_mem(ir_graph *irg, ir_node *node); diff --git a/ir/ana/irdom.c b/ir/ana/irdom.c index 7db7234ac..555262a79 100644 --- a/ir/ana/irdom.c +++ b/ir/ana/irdom.c @@ -53,7 +53,7 @@ ir_node *get_Block_idom(const ir_node *bl) if (get_Block_dom_depth(bl) == -1) { /* This block is not reachable from Start */ ir_graph *irg = get_irn_irg(bl); - return new_r_Bad(irg); + return new_r_Bad(irg, mode_BB); } return get_dom_info(bl)->idom; } @@ -85,7 +85,7 @@ ir_node *get_Block_ipostdom(const ir_node *bl) if (get_Block_postdom_depth(bl) == -1) { /* This block is not reachable from Start */ ir_graph *irg = get_irn_irg(bl); - return new_r_Bad(irg); + return new_r_Bad(irg, mode_BB); } return get_pdom_info(bl)->idom; } diff --git a/ir/ana/irouts.c b/ir/ana/irouts.c index cb32aa994..3cc6f30e8 100644 --- a/ir/ana/irouts.c +++ b/ir/ana/irouts.c @@ -116,7 +116,7 @@ int get_Block_n_cfg_outs(const ir_node *bl) #endif /* defined DEBUG_libfirm */ for (i = 1; i <= bl->out[0].pos; ++i) { ir_node *succ = bl->out[i].use; - if (get_irn_mode(succ) == mode_X && !is_End(succ)) + if (get_irn_mode(succ) == mode_X && !is_End(succ) && !is_Bad(succ)) n_cfg_outs += succ->out[0].pos; } return n_cfg_outs; @@ -133,7 +133,8 @@ int get_Block_n_cfg_outs_ka(const ir_node *bl) for (i = 1; i <= bl->out[0].pos; ++i) { ir_node *succ = bl->out[i].use; if (get_irn_mode(succ) == mode_X) { - + if (is_Bad(succ)) + continue; if (is_End(succ)) { /* ignore End if we are in the Endblock */ if (get_nodes_block(succ) == bl) @@ -157,7 +158,7 @@ ir_node *get_Block_cfg_out(const ir_node *bl, int pos) #endif /* defined DEBUG_libfirm */ for (i = 1; i <= bl->out[0].pos; ++i) { ir_node *succ = bl->out[i].use; - if (get_irn_mode(succ) == mode_X && !is_End(succ)) { + if (get_irn_mode(succ) == mode_X && !is_End(succ) && !is_Bad(succ)) { int n_outs = succ->out[0].pos; if (pos < n_outs) return succ->out[pos + 1].use; @@ -179,6 +180,8 @@ ir_node *get_Block_cfg_out_ka(const ir_node *bl, int pos) for (i = 1; i <= bl->out[0].pos; ++i) { ir_node *succ = bl->out[i].use; if (get_irn_mode(succ) == mode_X) { + if (is_Bad(succ)) + continue; if (is_End(succ)) { ir_node *end_bl = get_nodes_block(succ); if (end_bl == bl) { diff --git a/ir/ana/irscc.c b/ir/ana/irscc.c index 13c1d7c21..d858a871d 100644 --- a/ir/ana/irscc.c +++ b/ir/ana/irscc.c @@ -619,8 +619,10 @@ static ir_node *find_tail(ir_node *n) /* It's a completely bad loop: without Phi/Block nodes that can be a head. I.e., the code is "dying". We break the loop by setting Bad nodes. */ - int arity = get_irn_arity(n); - ir_node *bad = get_irg_bad(get_irn_irg(n)); + ir_graph *irg = get_irn_irg(n); + ir_mode *mode = get_irn_mode(n); + ir_node *bad = new_r_Bad(irg, mode); + int arity = get_irn_arity(n); for (i = -1; i < arity; ++i) { set_irn_n(n, i, bad); } diff --git a/ir/be/arm/arm_transform.c b/ir/be/arm/arm_transform.c index 91bb8f143..c2a79b2f4 100644 --- a/ir/be/arm/arm_transform.c +++ b/ir/be/arm/arm_transform.c @@ -1429,7 +1429,7 @@ static ir_node *gen_Proj_Start(ir_node *node) case pn_Start_T_args: /* we should never need this explicitely */ - return new_r_Bad(get_irn_irg(node)); + break; case pn_Start_P_frame_base: return be_prolog_get_reg_value(abihelper, sp_reg); diff --git a/ir/be/beabi.c b/ir/be/beabi.c index d46121a04..689b53d02 100644 --- a/ir/be/beabi.c +++ b/ir/be/beabi.c @@ -1556,7 +1556,7 @@ static void fix_start_block(ir_graph *irg) assert(is_Proj(initial_X)); exchange(initial_X, jmp); - set_irg_initial_exec(irg, new_r_Bad(irg)); + set_irg_initial_exec(irg, new_r_Bad(irg, mode_X)); } /** @@ -1893,7 +1893,7 @@ static void modify_irg(ir_graph *irg) /* the arg proj is not needed anymore now and should be only used by the anchor */ assert(get_irn_n_edges(arg_tuple) == 1); kill_node(arg_tuple); - set_irg_args(irg, new_r_Bad(irg)); + set_irg_args(irg, new_r_Bad(irg, mode_T)); /* All Return nodes hang on the End node, so look for them there. */ end = get_irg_end_block(irg); diff --git a/ir/be/bechordal_main.c b/ir/be/bechordal_main.c index 1ee54e922..b539d8591 100644 --- a/ir/be/bechordal_main.c +++ b/ir/be/bechordal_main.c @@ -217,9 +217,10 @@ static void memory_operand_walker(ir_node *irn, void *env) /* kill the Reload */ if (get_irn_n_edges(irn) == 0) { ir_graph *irg = get_irn_irg(irn); + ir_mode *frame_mode = get_irn_mode(get_irn_n(irn, n_be_Reload_frame)); sched_remove(irn); - set_irn_n(irn, n_be_Reload_mem, new_r_Bad(irg)); - set_irn_n(irn, n_be_Reload_frame, new_r_Bad(irg)); + set_irn_n(irn, n_be_Reload_mem, new_r_Bad(irg, mode_X)); + set_irn_n(irn, n_be_Reload_frame, new_r_Bad(irg, frame_mode)); } } diff --git a/ir/be/beirgmod.c b/ir/be/beirgmod.c index fb1114268..6b44f6457 100644 --- a/ir/be/beirgmod.c +++ b/ir/be/beirgmod.c @@ -227,7 +227,7 @@ static void remove_empty_block(ir_node *block) panic("Unexpected node %+F in block %+F with empty schedule", node, block); } - set_Block_cfgpred(block, 0, new_r_Bad(irg)); + set_Block_cfgpred(block, 0, new_r_Bad(irg, mode_X)); kill_node(jump); blocks_removed = 1; diff --git a/ir/be/betranshlp.c b/ir/be/betranshlp.c index 660ec7aac..085ab7420 100644 --- a/ir/be/betranshlp.c +++ b/ir/be/betranshlp.c @@ -350,7 +350,6 @@ static void transform_nodes(ir_graph *irg, arch_pretrans_nodes *pre_transform) /* pre transform some anchors (so they are available in the other transform * functions) */ - pre_transform_anchor(irg, anchor_bad); pre_transform_anchor(irg, anchor_no_mem); pre_transform_anchor(irg, anchor_end_block); pre_transform_anchor(irg, anchor_end); diff --git a/ir/be/ia32/bearch_ia32.c b/ir/be/ia32/bearch_ia32.c index f9a8d1a90..c362a8659 100644 --- a/ir/be/ia32/bearch_ia32.c +++ b/ir/be/ia32/bearch_ia32.c @@ -1022,7 +1022,7 @@ static void transform_MemPerm(ir_node *node) sp = create_spproj(node, push, pn_ia32_Push_stack); } - set_irn_n(node, i, new_r_Bad(irg)); + set_irn_n(node, i, new_r_Bad(irg, mode_X)); } /* create pops */ @@ -1068,11 +1068,8 @@ static void transform_MemPerm(ir_node *node) } /* remove memperm */ - arity = get_irn_arity(node); - for (i = 0; i < arity; ++i) { - set_irn_n(node, i, new_r_Bad(irg)); - } sched_remove(node); + kill_node(node); } /** diff --git a/ir/be/ia32/ia32_intrinsics.c b/ir/be/ia32/ia32_intrinsics.c index f2c1a1715..192a20241 100644 --- a/ir/be/ia32/ia32_intrinsics.c +++ b/ir/be/ia32/ia32_intrinsics.c @@ -101,7 +101,6 @@ static void reroute_result(ir_node *proj, ir_node *l_res, ir_node *h_res) static void resolve_call(ir_node *call, ir_node *l_res, ir_node *h_res, ir_graph *irg, ir_node *block) { ir_node *jmp, *res, *in[2]; - ir_node *bad = get_irg_bad(irg); ir_node *nomem = get_irg_no_mem(irg); int old_cse; @@ -130,7 +129,7 @@ static void resolve_call(ir_node *call, ir_node *l_res, ir_node *h_res, ir_graph case pn_Call_X_except: /* should not happen here */ - edges_reroute(proj, bad); + edges_reroute(proj, new_r_Bad(irg, mode_X)); break; case pn_Call_M: /* should not happen here */ @@ -168,10 +167,10 @@ static void resolve_call(ir_node *call, ir_node *l_res, ir_node *h_res, ir_graph jmp = new_r_Jmp(block); set_opt_cse(old_cse); - set_Tuple_pred(call, pn_Call_M, nomem); - set_Tuple_pred(call, pn_Call_X_regular, jmp); - set_Tuple_pred(call, pn_Call_X_except, bad); - set_Tuple_pred(call, pn_Call_T_result, res); + set_Tuple_pred(call, pn_Call_M, nomem); + set_Tuple_pred(call, pn_Call_X_regular, jmp); + set_Tuple_pred(call, pn_Call_X_except, new_r_Bad(irg, mode_X)); + set_Tuple_pred(call, pn_Call_T_result, res); } } diff --git a/ir/be/sparc/sparc_transform.c b/ir/be/sparc/sparc_transform.c index e7b3dc58e..d44651bb8 100644 --- a/ir/be/sparc/sparc_transform.c +++ b/ir/be/sparc/sparc_transform.c @@ -1848,8 +1848,7 @@ static ir_node *gen_Proj_Start(ir_node *node) case pn_Start_M: return be_prolog_get_memory(abihelper); case pn_Start_T_args: - /* we should never need this explicitely */ - return new_r_Bad(get_irn_irg(block)); + break; case pn_Start_P_frame_base: return get_frame_base(); case pn_Start_max: diff --git a/ir/common/irtools.c b/ir/common/irtools.c index ed498cfe8..6065951cf 100644 --- a/ir/common/irtools.c +++ b/ir/common/irtools.c @@ -58,9 +58,7 @@ void copy_irn_to_irg(ir_node *n, ir_graph *irg) ir_node *nn = NULL; /* do not copy standard nodes */ - if (op == op_Bad) - nn = get_irg_bad(irg); - else if (op == op_NoMem) + if (op == op_NoMem) n = get_irg_no_mem(irg); else if (op == op_Block) { old_irg = get_irn_irg(n); diff --git a/ir/ir/ircons.c b/ir/ir/ircons.c index f38994b28..5a8c0257c 100644 --- a/ir/ir/ircons.c +++ b/ir/ir/ircons.c @@ -243,7 +243,7 @@ static ir_node *set_phi_arguments(ir_node *phi, int pos) ir_node *cfgpred = get_Block_cfgpred_block(block, i); ir_node *value; if (is_Bad(cfgpred)) { - value = new_r_Bad(irg); + value = new_r_Bad(irg, mode); } else { inc_irg_visited(irg); @@ -287,7 +287,7 @@ static ir_node *get_r_value_internal(ir_node *block, int pos, ir_mode *mode) /* We ran into a cycle. This may happen in unreachable loops. */ if (irn_visited_else_mark(block)) { /* Since the loop is unreachable, return a Bad. */ - return new_r_Bad(irg); + return new_r_Bad(irg, mode); } /* in a matured block we can immediately determine the phi arguments */ @@ -306,11 +306,12 @@ static ir_node *get_r_value_internal(ir_node *block, int pos, ir_mode *mode) } /* one predecessor just use its value */ } else if (arity == 1) { - ir_node *cfgpred = get_Block_cfgpred_block(block, 0); + ir_node *cfgpred = get_Block_cfgpred(block, 0); if (is_Bad(cfgpred)) { - res = cfgpred; + res = new_r_Bad(irg, mode); } else { - res = get_r_value_internal(cfgpred, pos, mode); + ir_node *cfgpred_block = get_nodes_block(cfgpred); + res = get_r_value_internal(cfgpred_block, pos, mode); } /* multiple predecessors construct Phi */ } else { diff --git a/ir/ir/irdump.c b/ir/ir/irdump.c index f27bb1d7b..249b01ef8 100644 --- a/ir/ir/irdump.c +++ b/ir/ir/irdump.c @@ -2402,7 +2402,7 @@ void dump_cfg(FILE *F, ir_graph *irg) /* walk over the blocks in the graph */ irg_block_walk(get_irg_end(irg), dump_block_to_cfg, NULL, F); - dump_node(F, get_irg_bad(irg)); + /* dump_node(F, get_irg_bad(irg)); */ dump_vcg_footer(F); } diff --git a/ir/ir/irgmod.c b/ir/ir/irgmod.c index a19f30d60..51391c78c 100644 --- a/ir/ir/irgmod.c +++ b/ir/ir/irgmod.c @@ -47,11 +47,12 @@ void turn_into_tuple(ir_node *node, int arity) { ir_graph *irg = get_irn_irg(node); ir_node **in = ALLOCAN(ir_node*, arity); + ir_node *bad = new_r_Bad(irg, mode_ANY); int i; /* construct a new in array, with every input being bad */ for (i = 0; i < arity; ++i) { - in[i] = new_r_Bad(irg); + in[i] = bad; } set_irn_in(node, arity, in); set_irn_op(node, op_Tuple); @@ -284,11 +285,10 @@ ir_node *part_block_edges(ir_node *node) void kill_node(ir_node *node) { ir_graph *irg = get_irn_irg(node); - ir_node *bad = get_irg_bad(irg); - int i; - for (i = get_irn_arity(node) - 1; i >= -1; --i) { - set_irn_n(node, i, bad); + if (edges_activated(irg)) { + edges_node_deleted(node); } - exchange(node, bad); + /* noone is allowed to reference this node anymore */ + set_irn_op(node, op_Deleted); } diff --git a/ir/ir/irgopt.c b/ir/ir/irgopt.c index 7a312e6ed..a4c454bda 100644 --- a/ir/ir/irgopt.c +++ b/ir/ir/irgopt.c @@ -137,7 +137,7 @@ static void kill_dead_blocks(ir_node *block, void *env) */ ir_graph *irg = get_irn_irg(block); enqueue_users(block, waitq); - exchange(block, get_irg_bad(irg)); + exchange(block, new_r_Bad(irg, mode_BB)); } } diff --git a/ir/ir/irgraph.c b/ir/ir/irgraph.c index c6d7c4140..d46969436 100644 --- a/ir/ir/irgraph.c +++ b/ir/ir/irgraph.c @@ -216,7 +216,6 @@ ir_graph *new_r_ir_graph(ir_entity *ent, int n_loc) start_block = new_r_Block_noopt(res, 0, NULL); set_irg_start_block(res, start_block); - set_irg_bad (res, new_r_Bad(res)); set_irg_no_mem (res, new_r_NoMem(res)); start = new_r_Start(res); set_irg_start (res, start); @@ -260,7 +259,6 @@ ir_graph *new_ir_graph(ir_entity *ent, int n_loc) ir_graph *new_const_code_irg(void) { ir_graph *res = alloc_graph(); - ir_node *bad; ir_node *body_block; ir_node *end; ir_node *end_block; @@ -303,8 +301,6 @@ ir_graph *new_const_code_irg(void) /* -- The start block -- */ start_block = new_r_Block_noopt(res, 0, NULL); set_irg_start_block(res, start_block); - bad = new_r_Bad(res); - set_irg_bad(res, bad); no_mem = new_r_NoMem(res); set_irg_no_mem(res, no_mem); start = new_r_Start(res); @@ -323,7 +319,6 @@ ir_graph *new_const_code_irg(void) set_Block_block_visited(body_block, -1); set_Block_block_visited(start_block, -1); set_irn_visited(start_block, -1); - set_irn_visited(bad, -1); set_irn_visited(no_mem, -1); return res; @@ -420,7 +415,6 @@ ir_graph *create_irg_copy(ir_graph *irg) /* -- The start block -- */ set_irg_start_block(res, get_new_node(get_irg_start_block(irg))); - set_irg_bad (res, get_new_node(get_irg_bad(irg))); set_irg_no_mem (res, get_new_node(get_irg_no_mem(irg))); set_irg_start (res, get_new_node(get_irg_start(irg))); @@ -582,16 +576,6 @@ void (set_irg_args)(ir_graph *irg, ir_node *node) _set_irg_args(irg, node); } -ir_node *(get_irg_bad)(const ir_graph *irg) -{ - return _get_irg_bad(irg); -} - -void (set_irg_bad)(ir_graph *irg, ir_node *node) -{ - _set_irg_bad(irg, node); -} - ir_node *(get_irg_no_mem)(const ir_graph *irg) { return _get_irg_no_mem(irg); diff --git a/ir/ir/irgraph_t.h b/ir/ir/irgraph_t.h index 6b0e0ad97..6e393ef6f 100644 --- a/ir/ir/irgraph_t.h +++ b/ir/ir/irgraph_t.h @@ -186,16 +186,6 @@ static inline void _set_irg_args(ir_graph *irg, ir_node *node) set_irn_n(irg->anchor, anchor_args, node); } -static inline ir_node *_get_irg_bad(const ir_graph *irg) -{ - return get_irn_n(irg->anchor, anchor_bad); -} - -static inline void _set_irg_bad(ir_graph *irg, ir_node *node) -{ - set_irn_n(irg->anchor, anchor_bad, node); -} - static inline ir_node * _get_irg_no_mem(const ir_graph *irg) { return get_irn_n(irg->anchor, anchor_no_mem); @@ -425,7 +415,8 @@ static inline int _is_irg_state(const ir_graph *irg, ir_graph_state_t state) * @param irn The node. * @return The index allocated for the node. */ -static inline unsigned irg_register_node_idx(ir_graph *irg, ir_node *irn) { +static inline unsigned irg_register_node_idx(ir_graph *irg, ir_node *irn) +{ unsigned idx = irg->last_node_idx++; if (idx >= (unsigned)ARR_LEN(irg->idx_irn_map)) ARR_RESIZE(ir_node *, irg->idx_irn_map, idx + 1); @@ -530,8 +521,6 @@ static inline ir_phase *irg_get_phase(const ir_graph *irg, ir_phase_id id) #define set_irg_initial_mem(irg, node) _set_irg_initial_mem(irg, node) #define get_irg_args(irg) _get_irg_args(irg) #define set_irg_args(irg, node) _set_irg_args(irg, node) -#define get_irg_bad(irg) _get_irg_bad(irg) -#define set_irg_bad(irg, node) _set_irg_bad(irg, node) #define get_irg_no_mem(irg) _get_irg_no_mem(irg) #define set_irn_no_mem(irg, node) _set_irn_no_mem(irg, node) #define get_irg_entity(irg) _get_irg_entity(irg) diff --git a/ir/ir/iropt.c b/ir/ir/iropt.c index 6a6dfbae8..3fa187a72 100644 --- a/ir/ir/iropt.c +++ b/ir/ir/iropt.c @@ -679,37 +679,16 @@ static ir_node *equivalent_node_Block(ir_node *n) int n_preds; ir_graph *irg; - /* don't optimize dead or labeled blocks */ + /* don't optimize labeled blocks */ if (has_Block_entity(n)) return n; + if (!get_Block_matured(n)) + return n; n_preds = get_Block_n_cfgpreds(n); - /* The Block constructor does not call optimize, but mature_immBlock() - calls the optimization. */ - assert(get_Block_matured(n)); - irg = get_irn_irg(n); - /* if all predecessors of a block are unreachable, then the block is - * unreachable */ - if (is_irg_state(irg, IR_GRAPH_STATE_BAD_BLOCK)) { - int i; - int n_cfgpreds = get_Block_n_cfgpreds(n); - - for (i = 0; i < n_cfgpreds; ++i) { - ir_node *pred = get_Block_cfgpred(n, i); - if (!is_Bad(pred)) - break; - } - /* only bad inputs? It's unreachable code (unless it is the start or - * end block) */ - if (i >= n_cfgpreds && n != get_irg_start_block(irg) - && n != get_irg_end_block(irg)) { - return get_irg_bad(irg); - } - } - /* Straightening: a single entry Block following a single exit Block * can be merged. */ if (n_preds == 1) { @@ -1276,12 +1255,6 @@ static ir_node *equivalent_node_Phi(ir_node *n) } } - if (i >= n_preds) { - ir_graph *irg = get_irn_irg(n); - /* A totally Bad or self-referencing Phi (we didn't break the above loop) */ - return get_irg_bad(irg); - } - /* search for rest of inputs, determine if any of these are non-self-referencing */ while (++i < n_preds) { @@ -1299,49 +1272,6 @@ static ir_node *equivalent_node_Phi(ir_node *n) return n; } /* equivalent_node_Phi */ -/** - * Several optimizations: - * - fold Sync-nodes, iff they have only one predecessor except - * themselves. - */ -static ir_node *equivalent_node_Sync(ir_node *n) -{ - int arity = get_Sync_n_preds(n); - int i; - - for (i = 0; i < arity;) { - ir_node *pred = get_Sync_pred(n, i); - int j; - - /* Remove Bad predecessors */ - if (is_Bad(pred)) { - del_Sync_n(n, i); - --arity; - continue; - } - - /* Remove duplicate predecessors */ - for (j = 0;; ++j) { - if (j >= i) { - ++i; - break; - } - if (get_Sync_pred(n, j) == pred) { - del_Sync_n(n, i); - --arity; - break; - } - } - } - - if (arity == 0) { - ir_graph *irg = get_irn_irg(n); - return get_irg_bad(irg); - } - if (arity == 1) return get_Sync_pred(n, 0); - return n; -} /* equivalent_node_Sync */ - /** * Optimize Proj(Tuple). */ @@ -1406,13 +1336,6 @@ static ir_node *equivalent_node_Proj_CopyB(ir_node *proj) proj = get_CopyB_mem(copyb); DBG_OPT_ALGSIM0(oldn, proj, FS_OPT_NOP); break; - - case pn_CopyB_X_except: { - ir_graph *irg = get_irn_irg(proj); - DBG_OPT_EXC_REM(proj); - proj = get_irg_bad(irg); - break; - } } } return proj; @@ -1456,12 +1379,6 @@ static ir_node *equivalent_node_Proj_Bound(ir_node *proj) DBG_OPT_EXC_REM(proj); proj = get_Bound_mem(bound); break; - case pn_Bound_X_except: { - ir_graph *irg = get_irn_irg(proj); - DBG_OPT_EXC_REM(proj); - proj = get_irg_bad(irg); - break; - } case pn_Bound_res: proj = idx; DBG_OPT_ALGSIM0(oldn, proj, FS_OPT_NOP); @@ -1474,56 +1391,6 @@ static ir_node *equivalent_node_Proj_Bound(ir_node *proj) return proj; } /* equivalent_node_Proj_Bound */ -/** - * Optimize an Exception Proj(Load) with a non-null address. - */ -static ir_node *equivalent_node_Proj_Load(ir_node *proj) -{ - if (get_opt_ldst_only_null_ptr_exceptions()) { - if (get_irn_mode(proj) == mode_X) { - ir_node *load = get_Proj_pred(proj); - - /* get the Load address */ - const ir_node *addr = get_Load_ptr(load); - const ir_node *confirm; - - if (value_not_null(addr, &confirm)) { - if (get_Proj_proj(proj) == pn_Load_X_except) { - ir_graph *irg = get_irn_irg(proj); - DBG_OPT_EXC_REM(proj); - return get_irg_bad(irg); - } - } - } - } - return proj; -} /* equivalent_node_Proj_Load */ - -/** - * Optimize an Exception Proj(Store) with a non-null address. - */ -static ir_node *equivalent_node_Proj_Store(ir_node *proj) -{ - if (get_opt_ldst_only_null_ptr_exceptions()) { - if (get_irn_mode(proj) == mode_X) { - ir_node *store = get_Proj_pred(proj); - - /* get the load/store address */ - const ir_node *addr = get_Store_ptr(store); - const ir_node *confirm; - - if (value_not_null(addr, &confirm)) { - if (get_Proj_proj(proj) == pn_Store_X_except) { - ir_graph *irg = get_irn_irg(proj); - DBG_OPT_EXC_REM(proj); - return get_irg_bad(irg); - } - } - } - } - return proj; -} /* equivalent_node_Proj_Store */ - /** * Does all optimizations on nodes that must be done on its Projs * because of creating new nodes. @@ -1724,13 +1591,10 @@ static ir_op_ops *firm_set_default_equivalent_node(ir_opcode code, ir_op_ops *op CASE(And); CASE(Conv); CASE(Phi); - CASE(Sync); CASE_PROJ(Tuple); CASE_PROJ(Div); CASE_PROJ(CopyB); CASE_PROJ(Bound); - CASE_PROJ(Load); - CASE_PROJ(Store); CASE(Proj); CASE(Id); CASE(Mux); @@ -2782,7 +2646,7 @@ make_tuple: turn_into_tuple(n, pn_Div_max); set_Tuple_pred(n, pn_Div_M, mem); set_Tuple_pred(n, pn_Div_X_regular, new_r_Jmp(blk)); - set_Tuple_pred(n, pn_Div_X_except, get_irg_bad(irg)); + set_Tuple_pred(n, pn_Div_X_except, new_r_Bad(irg, mode_X)); set_Tuple_pred(n, pn_Div_res, value); } return n; @@ -2874,7 +2738,7 @@ make_tuple: turn_into_tuple(n, pn_Mod_max); set_Tuple_pred(n, pn_Mod_M, mem); set_Tuple_pred(n, pn_Mod_X_regular, new_r_Jmp(blk)); - set_Tuple_pred(n, pn_Mod_X_except, get_irg_bad(irg)); + set_Tuple_pred(n, pn_Mod_X_except, new_r_Bad(irg, mode_X)); set_Tuple_pred(n, pn_Mod_res, value); } return n; @@ -2907,11 +2771,11 @@ static ir_node *transform_node_Cond(ir_node *n) jmp = new_r_Jmp(blk); turn_into_tuple(n, pn_Cond_max); if (ta == tarval_b_true) { - set_Tuple_pred(n, pn_Cond_false, get_irg_bad(irg)); + set_Tuple_pred(n, pn_Cond_false, new_r_Bad(irg, mode_X)); set_Tuple_pred(n, pn_Cond_true, jmp); } else { set_Tuple_pred(n, pn_Cond_false, jmp); - set_Tuple_pred(n, pn_Cond_true, get_irg_bad(irg)); + set_Tuple_pred(n, pn_Cond_true, new_r_Bad(irg, mode_X)); } /* We might generate an endless loop, so keep it alive. */ add_End_keepalive(get_irg_end(irg), blk); @@ -3465,7 +3329,7 @@ static ir_node *transform_node_Proj_Load(ir_node *proj) if (get_Proj_proj(proj) == pn_Load_X_except) { ir_graph *irg = get_irn_irg(proj); DBG_OPT_EXC_REM(proj); - return get_irg_bad(irg); + return new_r_Bad(irg, mode_X); } else { ir_node *blk = get_nodes_block(load); return new_r_Jmp(blk); @@ -3497,7 +3361,7 @@ static ir_node *transform_node_Proj_Store(ir_node *proj) if (get_Proj_proj(proj) == pn_Store_X_except) { ir_graph *irg = get_irn_irg(proj); DBG_OPT_EXC_REM(proj); - return get_irg_bad(irg); + return new_r_Bad(irg, mode_X); } else { ir_node *blk = get_nodes_block(store); return new_r_Jmp(blk); @@ -3539,7 +3403,7 @@ static ir_node *transform_node_Proj_Div(ir_node *proj) ir_graph *irg = get_irn_irg(proj); /* we found an exception handler, remove it */ DBG_OPT_EXC_REM(proj); - return get_irg_bad(irg); + return new_r_Bad(irg, mode_X); } case pn_Div_M: { @@ -3594,7 +3458,7 @@ static ir_node *transform_node_Proj_Mod(ir_node *proj) ir_graph *irg = get_irn_irg(proj); /* we found an exception handler, remove it */ DBG_OPT_EXC_REM(proj); - return get_irg_bad(irg); + return new_r_Bad(irg, mode_X); } case pn_Mod_M: { @@ -3651,7 +3515,7 @@ static ir_node *transform_node_Proj_Cond(ir_node *proj) } else { ir_graph *irg = get_irn_irg(proj); /* this case will NEVER be taken, kill it */ - return get_irg_bad(irg); + return new_r_Bad(irg, mode_X); } } } else { @@ -3665,19 +3529,19 @@ static ir_node *transform_node_Proj_Cond(ir_node *proj) ir_relation cmp_result = tarval_cmp(b_vrp->range_bottom, tp); ir_relation cmp_result2 = tarval_cmp(b_vrp->range_top, tp); - if ((cmp_result & ir_relation_greater) == cmp_result && (cmp_result2 - & ir_relation_less) == cmp_result2) { + if ((cmp_result & ir_relation_greater) == cmp_result + && (cmp_result2 & ir_relation_less) == cmp_result2) { ir_graph *irg = get_irn_irg(proj); - return get_irg_bad(irg); + return new_r_Bad(irg, mode_X); } } else if (b_vrp->range_type == VRP_ANTIRANGE) { ir_relation cmp_result = tarval_cmp(b_vrp->range_bottom, tp); ir_relation cmp_result2 = tarval_cmp(b_vrp->range_top, tp); - if ((cmp_result & ir_relation_less_equal) == cmp_result && (cmp_result2 - & ir_relation_greater_equal) == cmp_result2) { + if ((cmp_result & ir_relation_less_equal) == cmp_result + && (cmp_result2 & ir_relation_greater_equal) == cmp_result2) { ir_graph *irg = get_irn_irg(proj); - return get_irg_bad(irg); + return new_r_Bad(irg, mode_X); } } @@ -3686,7 +3550,7 @@ static ir_node *transform_node_Proj_Cond(ir_node *proj) b_vrp->bits_set ) == ir_relation_equal)) { ir_graph *irg = get_irn_irg(proj); - return get_irg_bad(irg); + return new_r_Bad(irg, mode_X); } if (!(tarval_cmp( @@ -3696,10 +3560,8 @@ static ir_node *transform_node_Proj_Cond(ir_node *proj) tarval_not(b_vrp->bits_not_set)) == ir_relation_equal)) { ir_graph *irg = get_irn_irg(proj); - return get_irg_bad(irg); + return new_r_Bad(irg, mode_X); } - - } } } @@ -4408,10 +4270,12 @@ static ir_node *transform_node_Proj_CopyB(ir_node *proj) DBG_OPT_EXC_REM(proj); proj = new_r_Jmp(get_nodes_block(copyb)); break; - case pn_CopyB_X_except: + case pn_CopyB_X_except: { + ir_graph *irg = get_irn_irg(proj); DBG_OPT_EXC_REM(proj); - proj = get_irg_bad(get_irn_irg(proj)); + proj = new_r_Bad(irg, mode_X); break; + } default: break; } @@ -4459,7 +4323,7 @@ static ir_node *transform_node_Proj_Bound(ir_node *proj) break; case pn_Bound_X_except: DBG_OPT_EXC_REM(proj); - proj = get_irg_bad(get_irn_irg(proj)); + proj = new_r_Bad(get_irn_irg(proj), mode_X); break; case pn_Bound_res: proj = idx; @@ -4489,13 +4353,45 @@ static ir_node *transform_node_Proj(ir_node *proj) return proj; } /* transform_node_Proj */ +static ir_node *transform_node_Block(ir_node *block) +{ + ir_graph *irg = get_irn_irg(block); + + if (!is_irg_state(irg, IR_GRAPH_STATE_BAD_BLOCK)) + return block; + /* don't optimize labeled blocks */ + if (has_Block_entity(block)) + return block; + if (!get_Block_matured(block)) + return block; + + /* remove blocks with only Bad inputs (or no inputs) */ + { + int i; + int n_cfgpreds = get_Block_n_cfgpreds(block); + + for (i = 0; i < n_cfgpreds; ++i) { + ir_node *pred = get_Block_cfgpred(block, i); + if (!is_Bad(pred)) + break; + } + /* only bad unreachable inputs? It's unreachable code (unless it is the + * start or end block) */ + if (i >= n_cfgpreds && block != get_irg_start_block(irg) + && block != get_irg_end_block(irg)) { + return new_r_Bad(irg, mode_BB); + } + } + return block; +} + static ir_node *transform_node_Phi(ir_node *phi) { int n = get_irn_arity(phi); ir_mode *mode = get_irn_mode(phi); ir_node *block = get_nodes_block(phi); ir_graph *irg = get_irn_irg(phi); - ir_node *bad = get_irg_bad(irg); + ir_node *bad = NULL; int i; /* Set phi-operands for bad-block inputs to bad */ @@ -4503,6 +4399,8 @@ static ir_node *transform_node_Phi(ir_node *phi) ir_node *pred = get_Block_cfgpred(block, i); if (!is_Bad(pred)) continue; + if (bad == NULL) + bad = new_r_Bad(irg, mode); set_irn_n(phi, i, bad); } @@ -5582,6 +5480,24 @@ static ir_node *transform_node_Sync(ir_node *n) int pred_arity; int j; + /* Remove Bad predecessors */ + if (is_Bad(pred)) { + del_Sync_n(n, i); + --arity; + continue; + } + + /* Remove duplicate predecessors */ + for (j = 0; j < i; ++j) { + if (get_Sync_pred(n, j) == pred) { + del_Sync_n(n, i); + --arity; + break; + } + } + if (j < i) + continue; + if (!is_Sync(pred)) { ++i; continue; @@ -5606,11 +5522,18 @@ static ir_node *transform_node_Sync(ir_node *n) } } + if (arity == 0) { + ir_graph *irg = get_irn_irg(n); + return new_r_Bad(irg, mode_M); + } + if (arity == 1) { + return get_Sync_pred(n, 0); + } + /* rehash the sync node */ add_identities(n); - return n; -} /* transform_node_Sync */ +} static ir_node *transform_node_Load(ir_node *n) { @@ -5640,7 +5563,7 @@ static ir_node *transform_node_Load(ir_node *n) ir_node *block = get_nodes_block(n); ir_node *jmp = new_r_Jmp(block); ir_graph *irg = get_irn_irg(n); - ir_node *bad = get_irg_bad(irg); + ir_node *bad = new_r_Bad(irg, mode_X); ir_mode *mode = get_Load_mode(n); ir_node *res = new_r_Proj(pred_load, mode, pn_Load_res); ir_node *in[pn_Load_max] = { mem, jmp, bad, res }; @@ -5661,7 +5584,7 @@ static ir_node *transform_node_Load(ir_node *n) ir_node *block = get_nodes_block(n); ir_node *jmp = new_r_Jmp(block); ir_graph *irg = get_irn_irg(n); - ir_node *bad = get_irg_bad(irg); + ir_node *bad = new_r_Bad(irg, mode_X); ir_node *res = value; ir_node *in[pn_Load_max] = { mem, jmp, bad, res }; ir_node *tuple = new_r_Tuple(block, ARRAY_SIZE(in), in); @@ -5794,6 +5717,7 @@ static ir_op_ops *firm_set_default_transform_node(ir_opcode code, ir_op_ops *ops switch (code) { CASE(Add); CASE(And); + CASE(Block); CASE(Call); CASE(Cmp); CASE(Conv); @@ -6270,18 +6194,13 @@ void visit_all_identities(ir_graph *irg, irg_walk_func visit, void *env) current_ir_graph = rem; } /* visit_all_identities */ -/** - * Garbage in, garbage out. If a node has a dead input, i.e., the - * Bad node is input to the node, return the Bad node. - */ -static ir_node *gigo(ir_node *node) +static bool is_unreachable(ir_node *node) { ir_op *op = get_irn_op(node); /* Code in "Bad" blocks is unreachable and can be replaced by Bad */ if (op != op_Block && is_Bad(get_nodes_block(node))) { - ir_graph *irg = get_irn_irg(node); - return get_irg_bad(irg); + return true; } return false; @@ -6306,13 +6225,11 @@ ir_node *optimize_node(ir_node *n) /* Remove nodes with dead (Bad) input. Run always for transformation induced Bads. */ - n = gigo(n); - if (n != oldn) { - edges_node_deleted(oldn); - - /* We found an existing, better node, so we can deallocate the old node. */ - irg_kill_node(irg, oldn); - return n; + if (is_unreachable(n)) { + ir_mode *mode = get_irn_mode(n); + edges_node_deleted(n); + irg_kill_node(irg, n); + return new_r_Bad(irg, mode); } /* constant expression evaluation / constant folding */ @@ -6415,9 +6332,11 @@ ir_node *optimize_in_place_2(ir_node *n) /* Remove nodes with dead (Bad) input. Run always for transformation induced Bads. */ - n = gigo(n); - if (is_Bad(n)) - return n; + if (is_unreachable(n)) { + ir_graph *irg = get_irn_irg(n); + ir_mode *mode = get_irn_mode(n); + return new_r_Bad(irg, mode); + } /* constant expression evaluation / constant folding */ if (get_opt_constant_folding()) { diff --git a/ir/ir/irtypes.h b/ir/ir/irtypes.h index 23e2cc0c2..b17299ce3 100644 --- a/ir/ir/irtypes.h +++ b/ir/ir/irtypes.h @@ -432,8 +432,6 @@ enum irg_anchors { anchor_frame, /**< methods frame */ anchor_initial_mem, /**< initial memory of this graph */ anchor_args, /**< methods arguments */ - anchor_bad, /**< bad node of this ir_graph, the one and - only in this graph */ anchor_no_mem, /**< NoMem node of this ir_graph, the one and only in this graph */ anchor_last }; diff --git a/ir/ir/irverify.c b/ir/ir/irverify.c index 0b1f326f9..51d6c3a93 100644 --- a/ir/ir/irverify.c +++ b/ir/ir/irverify.c @@ -353,8 +353,7 @@ static int verify_node_Proj_Cond(ir_node *pred, ir_node *p) ASSERT_AND_RET_DBG( ( (proj >= 0 && mode == mode_X && get_irn_mode(get_Cond_selector(pred)) == mode_b) || /* compare */ - (mode == mode_X && mode_is_int(get_irn_mode(get_Cond_selector(pred)))) || /* switch */ - is_Bad(get_Cond_selector(pred)) /* rare */ + (mode == mode_X && mode_is_int(get_irn_mode(get_Cond_selector(pred)))) /* switch */ ), "wrong Proj from Cond", 0, show_proj_failure(p); @@ -662,10 +661,6 @@ static int verify_node_Proj_Proj(ir_node *pred, ir_node *p) /* We don't test */ break; - case iro_Bad: - /* hmm, optimization did not remove it */ - break; - default: /* ASSERT_AND_RET(0, "Unknown opcode", 0); */ break; @@ -720,9 +715,6 @@ static int verify_node_Proj_Bound(ir_node *n, ir_node *p) ir_mode *mode = get_irn_mode(p); long proj = get_Proj_proj(p); - /* ignore Bound checks of Bad */ - if (is_Bad(get_Bound_index(n))) - return 1; ASSERT_AND_RET_DBG( ( (proj == pn_Bound_M && mode == mode_M) || @@ -766,8 +758,7 @@ static int verify_node_Block(ir_node *n, ir_graph *irg) for (i = get_Block_n_cfgpreds(n) - 1; i >= 0; --i) { ir_node *pred = get_Block_cfgpred(n, i); - ASSERT_AND_RET( - is_Bad(pred) || (get_irn_mode(pred) == mode_X), + ASSERT_AND_RET(get_irn_mode(pred) == mode_X, "Block node must have a mode_X predecessor", 0); ASSERT_AND_RET(is_cfop(skip_Proj(skip_Tuple(pred))), "Block predecessor must be a cfop", 0); } @@ -1437,29 +1428,21 @@ static int verify_node_Phi(ir_node *n, ir_graph *irg) /* Phi: BB x dataM^n --> dataM */ for (i = get_Phi_n_preds(n) - 1; i >= 0; --i) { ir_node *pred = get_Phi_pred(n, i); - if (!is_Bad(pred)) { - ASSERT_AND_RET_DBG( - get_irn_mode(pred) == mymode, - "Phi node", 0, - show_phi_failure(n, pred, i); - ); - } + ASSERT_AND_RET_DBG(get_irn_mode(pred) == mymode, + "Phi node", 0, show_phi_failure(n, pred, i); + ); } ASSERT_AND_RET(mode_is_dataM(mymode) || mymode == mode_b, "Phi node", 0 ); +#if 0 if (mymode == mode_M) { for (i = get_Phi_n_preds(n) - 1; i >= 0; --i) { int j; ir_node *pred_i = get_Phi_pred(n, i); - if (is_Bad(pred_i)) - continue; for (j = i - 1; j >= 0; --j) { ir_node *pred_j = get_Phi_pred(n, j); - if (is_Bad(pred_j)) - continue; -#if 0 /* currently this checks fails for blocks with exception outputs (and these are NOT basic blocks). So it is disabled yet. */ ASSERT_AND_RET_DBG( @@ -1468,10 +1451,10 @@ static int verify_node_Phi(ir_node *n, ir_graph *irg) 0, ir_printf("%+F and %+F of %+F are in %+F\n", pred_i, pred_j, n, get_irn_n(pred_i, -1)) ); -#endif } } } +#endif return 1; } @@ -1791,16 +1774,6 @@ int irn_verify_irg(ir_node *n, ir_graph *irg) ir_printf("node %+F", n)); } - /* We don't want to test nodes whose predecessors are Bad, - as we would have to special case that for each operation. */ - if (op != op_Phi && op != op_Block) { - int i; - for (i = get_irn_arity(n) - 1; i >= 0; --i) { - if (is_Bad(get_irn_n(n, i))) - return 1; - } - } - if (op->ops.verify_node) return op->ops.verify_node(n, irg); diff --git a/ir/lower/lower_calls.c b/ir/lower/lower_calls.c index 51ae47fb0..6fdd20b07 100644 --- a/ir/lower/lower_calls.c +++ b/ir/lower/lower_calls.c @@ -500,7 +500,7 @@ static void add_hidden_param(ir_graph *irg, size_t n_com, ir_node **ins, cl_entr turn_into_tuple(p, pn_CopyB_max); set_Tuple_pred(p, pn_CopyB_M, mem); set_Tuple_pred(p, pn_CopyB_X_regular, new_r_Jmp(blk)); - set_Tuple_pred(p, pn_CopyB_X_except, get_irg_bad(irg)); + set_Tuple_pred(p, pn_CopyB_X_except, new_r_Bad(irg, mode_X)); ++n_args; } diff --git a/ir/lower/lower_copyb.c b/ir/lower/lower_copyb.c index fd38e3f4a..963900b06 100644 --- a/ir/lower/lower_copyb.c +++ b/ir/lower/lower_copyb.c @@ -120,8 +120,8 @@ static void lower_copyb_nodes(ir_node *irn, unsigned mode_bytes) turn_into_tuple(irn, pn_CopyB_max); set_Tuple_pred(irn, pn_CopyB_M, mem); - set_Tuple_pred(irn, pn_CopyB_X_regular, get_irg_bad(irg)); - set_Tuple_pred(irn, pn_CopyB_X_except, get_irg_bad(irg)); + set_Tuple_pred(irn, pn_CopyB_X_regular, new_r_Bad(irg, mode_X)); + set_Tuple_pred(irn, pn_CopyB_X_except, new_r_Bad(irg, mode_X)); } /** diff --git a/ir/lower/lower_intrinsics.c b/ir/lower/lower_intrinsics.c index bab53c600..68be19397 100644 --- a/ir/lower/lower_intrinsics.c +++ b/ir/lower/lower_intrinsics.c @@ -227,7 +227,7 @@ static void replace_call(ir_node *irn, ir_node *call, ir_node *mem, ir_node *reg set_opt_cse(0); reg_jmp = new_r_Jmp(block); set_opt_cse(old_cse); - exc_jmp = new_r_Bad(irg); + exc_jmp = new_r_Bad(irg, mode_X); } irn = new_r_Tuple(block, 1, &irn); @@ -1217,8 +1217,6 @@ int i_mapper_RuntimeCall(ir_node *node, runtime_rt *rt) /* we are ready */ turn_into_tuple(node, n_proj); - for (i = 0; i < n_proj; ++i) - set_Tuple_pred(node, i, new_r_Bad(irg)); if (rt->mem_proj_nr >= 0) set_Tuple_pred(node, rt->mem_proj_nr, new_r_Proj(call, mode_M, pn_Call_M)); if (!is_NoMem(mem)) { diff --git a/ir/lower/lower_mode_b.c b/ir/lower/lower_mode_b.c index 2821a67a6..1c3dce6d0 100644 --- a/ir/lower/lower_mode_b.c +++ b/ir/lower/lower_mode_b.c @@ -69,23 +69,10 @@ static ir_node **check_later; */ static void maybe_kill_node(ir_node *node) { - ir_graph *irg; - int i, arity; - if (get_irn_n_edges(node) != 0) return; - irg = get_irn_irg(node); - - assert(!is_Bad(node)); - - arity = get_irn_arity(node); - for (i = 0; i < arity; ++i) { - set_irn_n(node, i, new_r_Bad(irg)); - } - set_nodes_block(node, new_r_Bad(irg)); - - edges_node_deleted(node); + kill_node(node); } static ir_node *create_not(dbg_info *dbgi, ir_node *node) diff --git a/ir/opt/cfopt.c b/ir/opt/cfopt.c index a68d28674..a9a31ff95 100644 --- a/ir/opt/cfopt.c +++ b/ir/opt/cfopt.c @@ -369,8 +369,9 @@ static void optimize_blocks(ir_node *b, void *ctx) if (get_Block_idom(b) != predb) { /* predb is not the dominator. There can't be uses of pred's Phi nodes, kill them .*/ - ir_graph *irg = get_irn_irg(b); - exchange(phi, get_irg_bad(irg)); + ir_graph *irg = get_irn_irg(b); + ir_mode *mode = get_irn_mode(phi); + exchange(phi, new_r_Bad(irg, mode)); } else { /* predb is the direct dominator of b. There might be uses of the Phi nodes from predb in further block, so move this phi from the predecessor into the block b */ @@ -455,8 +456,8 @@ static void optimize_blocks(ir_node *b, void *ctx) in[n_preds++] = predpred; } /* Remove block+jump as it might be kept alive. */ - exchange(pred, get_irg_bad(get_irn_irg(b))); - exchange(predb, get_irg_bad(get_irn_irg(b))); + exchange(pred, new_r_Bad(get_irn_irg(b), mode_X)); + exchange(predb, new_r_Bad(get_irn_irg(b), mode_BB)); } else { /* case 3: */ in[n_preds++] = pred; @@ -521,7 +522,7 @@ static bool handle_switch_cond(ir_node *cond) long num = get_tarval_long(tv); long def_num = get_Cond_default_proj(cond); ir_graph *irg = get_irn_irg(cond); - ir_node *bad = get_irg_bad(irg); + ir_node *bad = new_r_Bad(irg, mode_X); if (def_num == get_Proj_proj(proj1)) { /* first one is the defProj */ diff --git a/ir/opt/combo.c b/ir/opt/combo.c index ad3d38511..3278a7295 100644 --- a/ir/opt/combo.c +++ b/ir/opt/combo.c @@ -3253,7 +3253,9 @@ static void apply_result(ir_node *irn, void *ctx) node_t *block = get_irn_node(get_nodes_block(irn)); if (block->type.tv == tarval_unreachable) { - ir_node *bad = get_irg_bad(current_ir_graph); + ir_graph *irg = get_irn_irg(irn); + ir_mode *mode = get_irn_mode(node->node); + ir_node *bad = new_r_Bad(irg, mode); /* here, bad might already have a node, but this can be safely ignored as long as bad has at least ONE valid node */ diff --git a/ir/opt/escape_ana.c b/ir/opt/escape_ana.c index ea3cffd65..ea8dd5c90 100644 --- a/ir/opt/escape_ana.c +++ b/ir/opt/escape_ana.c @@ -397,7 +397,7 @@ static void transform_allocs(ir_graph *irg, walk_env_t *env) turn_into_tuple(alloc, pn_Alloc_max); set_Tuple_pred(alloc, pn_Alloc_M, mem); set_Tuple_pred(alloc, pn_Alloc_X_regular, new_r_Jmp(blk)); - set_Tuple_pred(alloc, pn_Alloc_X_except, new_r_Bad(irg)); + set_Tuple_pred(alloc, pn_Alloc_X_except, new_r_Bad(irg, mode_X)); ++env->nr_deads; } @@ -442,7 +442,7 @@ static void transform_allocs(ir_graph *irg, walk_env_t *env) turn_into_tuple(alloc, pn_Alloc_max); set_Tuple_pred(alloc, pn_Alloc_M, mem); set_Tuple_pred(alloc, pn_Alloc_X_regular, new_r_Jmp(blk)); - set_Tuple_pred(alloc, pn_Alloc_X_except, new_r_Bad(irg)); + set_Tuple_pred(alloc, pn_Alloc_X_except, new_r_Bad(irg, mode_X)); set_Tuple_pred(alloc, pn_Alloc_res, sel); ++env->nr_removed; @@ -488,10 +488,10 @@ static void transform_alloc_calls(ir_graph *irg, walk_env_t *env) mem = get_Call_mem(call); blk = get_nodes_block(call); turn_into_tuple(call, pn_Call_max); - set_Tuple_pred(call, pn_Call_M, mem); - set_Tuple_pred(call, pn_Call_X_regular, new_r_Jmp(blk)); - set_Tuple_pred(call, pn_Call_X_except, new_r_Bad(irg)); - set_Tuple_pred(call, pn_Call_T_result, new_r_Bad(irg)); + set_Tuple_pred(call, pn_Call_M, mem); + set_Tuple_pred(call, pn_Call_X_regular, new_r_Jmp(blk)); + set_Tuple_pred(call, pn_Call_X_except, new_r_Bad(irg, mode_X)); + set_Tuple_pred(call, pn_Call_T_result, new_r_Bad(irg, mode_T)); ++env->nr_deads; } diff --git a/ir/opt/fp-vrp.c b/ir/opt/fp-vrp.c index e7ba49a31..3719d6c12 100644 --- a/ir/opt/fp-vrp.c +++ b/ir/opt/fp-vrp.c @@ -637,6 +637,18 @@ static void first_round(ir_node* const irn, void* const env) } } +static ir_node *make_bad_block(ir_graph *irg) +{ + ir_node *bad = new_r_Bad(irg, mode_BB); + bitinfo *bb = get_bitinfo(bad); + if (bb == NULL) { + ir_tarval* const f = get_tarval_b_false(); + ir_tarval* const t = get_tarval_b_true(); + set_bitinfo(bad, f, t); /* Undefined. */ + } + return bad; +} + static void apply_result(ir_node* const irn, void* ctx) { environment_t* env = (environment_t*)ctx; @@ -650,7 +662,8 @@ static void apply_result(ir_node* const irn, void* ctx) block_b = get_bitinfo(irn); /* Trivially unreachable blocks have no info. */ if (block_b == NULL || block_b->z == get_tarval_b_false()) { - exchange(irn, get_irg_bad(get_Block_irg(irn))); + ir_node *bad = make_bad_block(get_irn_irg(irn)); + exchange(irn, bad); env->modified = 1; } return; @@ -661,7 +674,10 @@ static void apply_result(ir_node* const irn, void* ctx) /* Trivially unreachable blocks have no info. */ if (block_b == NULL || block_b->z == get_tarval_b_false()) { /* Unreachable blocks might be replaced before the nodes in them. */ - exchange(irn, is_Bad(block) ? block : get_irg_bad(get_Block_irg(block))); + ir_mode *mode = get_irn_mode(irn); + ir_graph *irg = get_irn_irg(irn); + ir_node *bad = new_r_Bad(irg, mode); + exchange(irn, bad); env->modified = 1; return; } @@ -690,7 +706,7 @@ static void apply_result(ir_node* const irn, void* ctx) add_End_keepalive(get_irg_end(irg), block); n = new_r_Jmp(block); } else { - n = new_r_Bad(irg); + n = new_r_Bad(irg, mode_X); /* Transferring analysis information to the bad node makes it a * candidate for replacement. */ goto exchange_only; @@ -815,13 +831,13 @@ void fixpoint_vrp(ir_graph* const irg) { pdeq* const q = new_pdeq(); - /* We need this extra step because the dom tree does not contain unreachable - blocks in Firm. Moreover build phi list. */ + /* We need this extra step because the dom tree does not contain + * unreachable blocks in Firm. Moreover build phi list. */ irg_walk_anchors(irg, clear_links, build_phi_lists, NULL); - { ir_tarval* const f = get_tarval_b_false(); + { + ir_tarval* const f = get_tarval_b_false(); ir_tarval* const t = get_tarval_b_true(); - set_bitinfo(get_irg_bad(irg), f, t); /* Undefined. */ set_bitinfo(get_irg_end_block(irg), t, f); /* Reachable. */ } diff --git a/ir/opt/funccall.c b/ir/opt/funccall.c index ad2e295f8..eec1b362c 100644 --- a/ir/opt/funccall.c +++ b/ir/opt/funccall.c @@ -225,7 +225,7 @@ static void fix_const_call_lists(ir_graph *irg, env_t *ctx) } case pn_Call_X_except: exc_changed = 1; - exchange(proj, get_irg_bad(irg)); + exchange(proj, new_r_Bad(irg, mode_X)); break; case pn_Call_X_regular: { ir_node *block = get_nodes_block(call); @@ -360,7 +360,7 @@ static void fix_nothrow_call_list(ir_graph *irg, ir_node *call_list, ir_node *pr switch (get_Proj_proj(proj)) { case pn_Call_X_except: exc_changed = 1; - exchange(proj, get_irg_bad(irg)); + exchange(proj, new_r_Bad(irg, mode_X)); break; case pn_Call_X_regular: { ir_node *block = get_nodes_block(call); diff --git a/ir/opt/gvn_pre.c b/ir/opt/gvn_pre.c index f4407faa2..8ef34a4b9 100644 --- a/ir/opt/gvn_pre.c +++ b/ir/opt/gvn_pre.c @@ -640,7 +640,7 @@ static void insert_nodes(ir_node *block, void *ctx) /* ignore bad blocks. */ if (is_Bad(pred_blk)) { ir_graph *irg = get_irn_irg(pred_blk); - in[pos] = new_r_Bad(irg); + in[pos] = new_r_Bad(irg, mode_X); continue; } diff --git a/ir/opt/jumpthreading.c b/ir/opt/jumpthreading.c index 39e07dbe9..095f6a001 100644 --- a/ir/opt/jumpthreading.c +++ b/ir/opt/jumpthreading.c @@ -86,7 +86,7 @@ static ir_node *search_def_and_create_phis(ir_node *block, ir_mode *mode, /* In case of a bad input to a block we need to return the bad value */ if (is_Bad(block)) { ir_graph *irg = get_irn_irg(block); - return new_r_Bad(irg); + return new_r_Bad(irg, mode); } /* the other defs can't be marked for cases where a user of the original @@ -631,7 +631,7 @@ static void thread_jumps(ir_node* block, void* data) int selector_evaluated; const ir_edge_t *edge, *next; ir_graph *irg; - ir_node *bad; + ir_node *badX; int cnst_pos; if (get_Block_n_cfgpreds(block) != 1) @@ -693,7 +693,7 @@ static void thread_jumps(ir_node* block, void* data) if (selector_evaluated == 0) { ir_graph *irg = get_irn_irg(block); - bad = new_r_Bad(irg); + ir_node *bad = new_r_Bad(irg, mode_X); exchange(projx, bad); *changed = 1; return; @@ -719,18 +719,20 @@ static void thread_jumps(ir_node* block, void* data) /* we have to remove the edge towards the pred as the pred now * jumps into the true_block. We also have to shorten Phis * in our block because of this */ - bad = new_r_Bad(irg); + badX = new_r_Bad(irg, mode_X); cnst_pos = env.cnst_pos; /* shorten Phis */ foreach_out_edge_safe(env.cnst_pred, edge, next) { ir_node *node = get_edge_src_irn(edge); - if (is_Phi(node)) + if (is_Phi(node)) { + ir_node *bad = new_r_Bad(irg, get_irn_mode(node)); set_Phi_pred(node, cnst_pos, bad); + } } - set_Block_cfgpred(env.cnst_pred, cnst_pos, bad); + set_Block_cfgpred(env.cnst_pred, cnst_pos, badX); /* the graph is changed now */ *changed = 1; diff --git a/ir/opt/ldstopt.c b/ir/opt/ldstopt.c index 1b22324ef..f6f4f2fa3 100644 --- a/ir/opt/ldstopt.c +++ b/ir/opt/ldstopt.c @@ -913,7 +913,7 @@ static int try_load_after_store(ir_node *load, /* no exception */ if (info->projs[pn_Load_X_except]) { ir_graph *irg = get_irn_irg(load); - exchange( info->projs[pn_Load_X_except], new_r_Bad(irg)); + exchange( info->projs[pn_Load_X_except], new_r_Bad(irg, mode_X)); res |= CF_CHANGED; } if (info->projs[pn_Load_X_regular]) { @@ -1019,7 +1019,7 @@ static unsigned follow_Mem_chain(ir_node *load, ir_node *curr) /* no exception */ if (info->projs[pn_Load_X_except]) { ir_graph *irg = get_irn_irg(load); - exchange(info->projs[pn_Load_X_except], new_r_Bad(irg)); + exchange(info->projs[pn_Load_X_except], new_r_Bad(irg, mode_X)); res |= CF_CHANGED; } if (info->projs[pn_Load_X_regular]) { @@ -1156,7 +1156,7 @@ static unsigned optimize_load(ir_node *load) /* no exception, clear the info field as it might be checked later again */ if (info->projs[pn_Load_X_except]) { ir_graph *irg = get_irn_irg(load); - exchange(info->projs[pn_Load_X_except], new_r_Bad(irg)); + exchange(info->projs[pn_Load_X_except], new_r_Bad(irg, mode_X)); info->projs[pn_Load_X_except] = NULL; res |= CF_CHANGED; } @@ -1191,7 +1191,7 @@ static unsigned optimize_load(ir_node *load) /* we completely replace the load by this value */ if (info->projs[pn_Load_X_except]) { ir_graph *irg = get_irn_irg(load); - exchange(info->projs[pn_Load_X_except], new_r_Bad(irg)); + exchange(info->projs[pn_Load_X_except], new_r_Bad(irg, mode_X)); info->projs[pn_Load_X_except] = NULL; res |= CF_CHANGED; } diff --git a/ir/opt/loop.c b/ir/opt/loop.c index a7310420d..7d88ca56d 100644 --- a/ir/opt/loop.c +++ b/ir/opt/loop.c @@ -378,7 +378,7 @@ static ir_node *search_def_and_create_phis(ir_node *block, ir_mode *mode, int fi { int i; int n_cfgpreds; - ir_graph *irg; + ir_graph *irg = get_irn_irg(block); ir_node *phi; ir_node **in; @@ -388,7 +388,7 @@ static ir_node *search_def_and_create_phis(ir_node *block, ir_mode *mode, int fi * Dead and bad blocks. */ if (get_irn_arity(block) < 1 || is_Bad(block)) { DB((dbg, LEVEL_5, "ssa bad %N\n", block)); - return new_Bad(); + return new_r_Bad(irg, mode); } if (block == ssa_second_def_block && !first) { @@ -403,7 +403,6 @@ static ir_node *search_def_and_create_phis(ir_node *block, ir_mode *mode, int fi return value; } - irg = get_irn_irg(block); assert(block != get_irg_start_block(irg)); /* a Block with only 1 predecessor needs no Phi */ diff --git a/ir/opt/opt_inline.c b/ir/opt/opt_inline.c index edadecd66..309e694f6 100644 --- a/ir/opt/opt_inline.c +++ b/ir/opt/opt_inline.c @@ -414,7 +414,6 @@ int inline_method(ir_node *call, ir_graph *called_graph) { ir_node *start_block; ir_node *start; - ir_node *bad; ir_node *nomem; start_block = get_irg_start_block(called_graph); @@ -425,10 +424,6 @@ int inline_method(ir_node *call, ir_graph *called_graph) set_new_node(start, pre_call); mark_irn_visited(start); - bad = get_irg_bad(called_graph); - set_new_node(bad, get_irg_bad(irg)); - mark_irn_visited(bad); - nomem = get_irg_no_mem(called_graph); set_new_node(nomem, get_irg_no_mem(irg)); mark_irn_visited(nomem); @@ -540,7 +535,8 @@ int inline_method(ir_node *call, ir_graph *called_graph) ir_mode *mode = get_irn_mode(cf_pred[0]); phi = new_r_Phi(post_bl, n_ret, cf_pred, mode); } else { - phi = new_r_Bad(irg); + ir_mode *mode = get_irn_mode(cf_pred[0]); + phi = new_r_Bad(irg, mode); } res_pred[j] = phi; /* Conserve Phi-list for further inlinings -- but might be optimized */ @@ -552,7 +548,7 @@ int inline_method(ir_node *call, ir_graph *called_graph) result_tuple = new_r_Tuple(post_bl, n_res, res_pred); set_Tuple_pred(call, pn_Call_T_result, result_tuple); } else { - set_Tuple_pred(call, pn_Call_T_result, new_r_Bad(irg)); + set_Tuple_pred(call, pn_Call_T_result, new_r_Bad(irg, mode_T)); } /* handle the regular call */ set_Tuple_pred(call, pn_Call_X_regular, new_r_Jmp(post_bl)); @@ -588,7 +584,7 @@ int inline_method(ir_node *call, ir_graph *called_graph) set_Tuple_pred(call, pn_Call_X_except, new_r_Jmp(block)); } } else { - set_Tuple_pred(call, pn_Call_X_except, new_r_Bad(irg)); + set_Tuple_pred(call, pn_Call_X_except, new_r_Bad(irg, mode_X)); } } else { ir_node *main_end_bl; @@ -615,7 +611,7 @@ int inline_method(ir_node *call, ir_graph *called_graph) for (i = 0; i < n_exc; ++i) end_preds[main_end_bl_arity + i] = cf_pred[i]; set_irn_in(main_end_bl, n_exc + main_end_bl_arity, end_preds); - set_Tuple_pred(call, pn_Call_X_except, new_r_Bad(irg)); + set_Tuple_pred(call, pn_Call_X_except, new_r_Bad(irg, mode_X)); free(end_preds); } free(res_pred); diff --git a/ir/opt/opt_ldst.c b/ir/opt/opt_ldst.c index f03f2fcd4..9226a47bc 100644 --- a/ir/opt/opt_ldst.c +++ b/ir/opt/opt_ldst.c @@ -1065,7 +1065,7 @@ static void update_Load_memop(memop_t *m) /* no exception, clear the m fields as it might be checked later again */ if (m->projs[pn_Load_X_except]) { ir_graph *irg = get_irn_irg(ptr); - exchange(m->projs[pn_Load_X_except], new_r_Bad(irg)); + exchange(m->projs[pn_Load_X_except], new_r_Bad(irg, mode_X)); m->projs[pn_Load_X_except] = NULL; m->flags &= ~FLAG_EXCEPTION; env.changed = 1; @@ -1746,7 +1746,7 @@ static void replace_load(memop_t *op) proj = op->projs[pn_Load_X_except]; if (proj != NULL) { ir_graph *irg = get_irn_irg(load); - exchange(proj, new_r_Bad(irg)); + exchange(proj, new_r_Bad(irg, mode_X)); } proj = op->projs[pn_Load_X_regular]; if (proj != NULL) { @@ -1773,7 +1773,7 @@ static void remove_store(memop_t *op) proj = op->projs[pn_Store_X_except]; if (proj != NULL) { ir_graph *irg = get_irn_irg(store); - exchange(proj, new_r_Bad(irg)); + exchange(proj, new_r_Bad(irg, mode_X)); } proj = op->projs[pn_Store_X_regular]; if (proj != NULL) { diff --git a/ir/opt/return.c b/ir/opt/return.c index 10dd2c725..4b20cf36f 100644 --- a/ir/opt/return.c +++ b/ir/opt/return.c @@ -340,7 +340,7 @@ void normalize_n_returns(ir_graph *irg) } /* remove the Jmp, we have placed a Return here */ - exchange(jmp, new_r_Bad(irg)); + exchange(jmp, new_r_Bad(irg, mode_X)); } /* @@ -353,7 +353,7 @@ void normalize_n_returns(ir_graph *irg) n = get_End_n_keepalives(end); for (i = 0; i < n; ++i) { if (get_End_keepalive(end, i) == phiM) { - set_End_keepalive(end, i, new_r_Bad(irg)); + set_End_keepalive(end, i, new_r_Bad(irg, mode_M)); break; } } diff --git a/ir/opt/scalar_replace.c b/ir/opt/scalar_replace.c index eab6b1d9b..465f45564 100644 --- a/ir/opt/scalar_replace.c +++ b/ir/opt/scalar_replace.c @@ -612,7 +612,7 @@ static void topologic_walker(ir_node *node, void *ctx) set_Tuple_pred(node, pn_Load_M, mem); set_Tuple_pred(node, pn_Load_res, val); set_Tuple_pred(node, pn_Load_X_regular, new_r_Jmp(block)); - set_Tuple_pred(node, pn_Load_X_except, new_r_Bad(irg)); + set_Tuple_pred(node, pn_Load_X_except, new_r_Bad(irg, mode_X)); } else if (is_Store(node)) { DB((dbg, SET_LEVEL_3, " checking %+F for replacement ", node)); @@ -648,7 +648,7 @@ static void topologic_walker(ir_node *node, void *ctx) turn_into_tuple(node, pn_Store_max); set_Tuple_pred(node, pn_Store_M, mem); set_Tuple_pred(node, pn_Store_X_regular, new_r_Jmp(block)); - set_Tuple_pred(node, pn_Store_X_except, new_r_Bad(irg)); + set_Tuple_pred(node, pn_Store_X_except, new_r_Bad(irg, mode_X)); } } diff --git a/ir/opt/tailrec.c b/ir/opt/tailrec.c index 230dd6b65..d1af572a4 100644 --- a/ir/opt/tailrec.c +++ b/ir/opt/tailrec.c @@ -292,7 +292,7 @@ static void do_opt_tail_rec(ir_graph *irg, tr_env *env) } if (n_locs > 0) { - ir_node *bad, *start_block; + ir_node *start_block; ir_node **in; ir_mode **modes; @@ -318,8 +318,6 @@ static void do_opt_tail_rec(ir_graph *irg, tr_env *env) mature_immBlock(start_block); /* no: we can kill all returns */ - bad = get_irg_bad(irg); - for (p = env->rets; p; p = n) { ir_node *block = get_nodes_block(p); ir_node *call, *mem, *jmp, *tuple; @@ -338,20 +336,21 @@ static void do_opt_tail_rec(ir_graph *irg, tr_env *env) set_optimize(rem); for (i = 0; i < env->n_ress; ++i) { + ir_mode *mode = modes[i]; if (env->variants[i] != TR_DIRECT) { - in[i] = get_r_value(irg, i, modes[i]); + in[i] = get_r_value(irg, i, mode); } else { - in[i] = bad; + in[i] = new_r_Bad(irg, mode); } } /* create a new tuple for the return values */ tuple = new_r_Tuple(block, env->n_ress, in); turn_into_tuple(call, pn_Call_max); - set_Tuple_pred(call, pn_Call_M, mem); - set_Tuple_pred(call, pn_Call_X_regular, jmp); - set_Tuple_pred(call, pn_Call_X_except, bad); - set_Tuple_pred(call, pn_Call_T_result, tuple); + set_Tuple_pred(call, pn_Call_M, mem); + set_Tuple_pred(call, pn_Call_X_regular, jmp); + set_Tuple_pred(call, pn_Call_X_except, new_r_Bad(irg, mode_X)); + set_Tuple_pred(call, pn_Call_T_result, tuple); for (i = 0; i < env->n_ress; ++i) { ir_node *res = get_Return_res(p, i); @@ -360,7 +359,7 @@ static void do_opt_tail_rec(ir_graph *irg, tr_env *env) } } - exchange(p, bad); + exchange(p, new_r_Bad(irg, mode_X)); } /* finally fix all other returns */ @@ -402,7 +401,7 @@ static void do_opt_tail_rec(ir_graph *irg, tr_env *env) } ssa_cons_finish(irg); } else { - ir_node *bad = get_irg_bad(irg); + ir_node *bad = new_r_Bad(irg, mode_X); /* no: we can kill all returns */ for (p = env->rets; p; p = n) { diff --git a/scripts/ir_spec.py b/scripts/ir_spec.py index 8ca318ba1..e4b0a5d71 100755 --- a/scripts/ir_spec.py +++ b/scripts/ir_spec.py @@ -146,12 +146,10 @@ class Bad(Op): they are set to Bad, and the actual removal is left to the control flow optimisation phase. Block, Phi, Tuple with only Bad inputs however are replaced by Bad right away.""" - mode = "mode_T" flags = [ "cfopcode", "start_block", "dump_noblock" ] pinned = "yes" knownBlock = True block = "get_irg_start_block(irg)" - singleton = True attr_struct = "bad_attr" init = ''' res->attr.bad.irg.irg = irg;