From 67f5a55959bdc47406a8c2ec0cc44c1e5df48844 Mon Sep 17 00:00:00 2001 From: =?utf8?q?G=C3=B6tz=20Lindenmaier?= Date: Mon, 5 May 2003 10:04:08 +0000 Subject: [PATCH] Removed 2 bugs in option handling [r1136] --- ir/ir/irgopt.c | 32 +++++++++++++++++++------------- 1 file changed, 19 insertions(+), 13 deletions(-) diff --git a/ir/ir/irgopt.c b/ir/ir/irgopt.c index 0bbec9c1c..f2b0dc589 100644 --- a/ir/ir/irgopt.c +++ b/ir/ir/irgopt.c @@ -555,10 +555,12 @@ void inline_method(ir_node *call, ir_graph *called_graph) { get_Call_type(call))); */ assert(get_type_tpop(get_Call_type(call)) == type_method); - if (called_graph == current_ir_graph) return; - + if (called_graph == current_ir_graph) { + set_optimize(rem_opt); + return; + } -/* -- + /* -- the procedure and later replaces the Start node of the called graph. Post_call is the old Call node and collects the results of the called graph. Both will end up being a tuple. -- */ @@ -573,7 +575,7 @@ void inline_method(ir_node *call, ir_graph *called_graph) { pre_call = new_Tuple(5, in); post_call = call; -/* -- + /* -- The new block gets the ins of the old block, pre_call and all its predecessors and all Phi nodes. -- */ part_block(pre_call); @@ -1148,7 +1150,8 @@ void place_code(ir_graph *irg) { /********************************************************************/ /* Removes Tuples from Block control flow predecessors. - Optimizes blocks with equivalent_node(). */ + Optimizes blocks with equivalent_node(). + Replaces n by Bad if n is unreachable control flow. */ static void merge_blocks(ir_node *n, void *env) { int i; set_irn_link(n, NULL); @@ -1156,22 +1159,25 @@ static void merge_blocks(ir_node *n, void *env) { if (get_irn_op(n) == op_Block) { /* Remove Tuples */ for (i = 0; i < get_Block_n_cfgpreds(n); i++) - set_Block_cfgpred(n, i, skip_Tuple(get_Block_cfgpred(n, i))); - } else if (get_irn_mode(n) == mode_X) { + /* GL @@@ : is this possible? if (get_opt_normalize()) -- added, all tests go throug. + A different order of optimizations might cause problems. */ + if (get_opt_normalize()) + set_Block_cfgpred(n, i, skip_Tuple(get_Block_cfgpred(n, i))); + } else if (get_optimize() && (get_irn_mode(n) == mode_X)) { /* We will soon visit a block. Optimize it before visiting! */ ir_node *b = get_nodes_Block(n); ir_node *new = equivalent_node(b); while (irn_not_visited(b) && (!is_Bad(new)) && (new != b)) { - /* We would have to run gigo if new is bad. */ - if (!get_optimize() || (!get_opt_control_flow_straightening() - && !get_opt_control_flow_weak_simplification())) - /* how could something be optimized if flags are not set? */ - assert(0 && "strange ?? !!"); + /* We would have to run gigo if new is bad, so we + promote it directly below. */ + assert(((b == new) || get_opt_control_flow_straightening() || get_opt_control_flow_weak_simplification()) && + ("strange flag setting")); exchange (b, new); b = new; new = equivalent_node(b); } - if (is_Bad(new)) exchange (n, new_Bad()); + /* GL @@@ get_opt_normalize hinzugefuegt, 5.5.2003 */ + if (is_Bad(new) && get_opt_normalize()) exchange (n, new_Bad()); } } -- 2.20.1