X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fir%2Firgopt.c;h=53829b59fa0147eedae4b54414954818cde4e42d;hb=b78bdd4d94de46de4156272e6dbfe44e97933a5b;hp=8da7a066392c641aa2aeb7b853bc28261783ecf8;hpb=fed1bdc07c9ec7b4d07a9243ad093c9fdd239fbd;p=libfirm diff --git a/ir/ir/irgopt.c b/ir/ir/irgopt.c index 8da7a0663..53829b59f 100644 --- a/ir/ir/irgopt.c +++ b/ir/ir/irgopt.c @@ -46,51 +46,26 @@ #include "iredges_t.h" #include "irtools.h" -/* Defined in iropt.c */ -pset *new_identities (void); -void del_identities (pset *value_table); -void add_identities (pset *value_table, ir_node *node); - /*------------------------------------------------------------------*/ /* apply optimizations of iropt to all nodes. */ /*------------------------------------------------------------------*/ -static void init_link (ir_node *n, void *env) { - set_irn_link(n, NULL); -} - -#if 0 /* Old version. Avoids Ids. - This is not necessary: we do a post walk, and get_irn_n - removes ids anyways. So it's much cheaper to call the - optimization less often and use the exchange() algorithm. */ -static void -optimize_in_place_wrapper (ir_node *n, void *env) { - int i, irn_arity; - ir_node *optimized, *old; - - irn_arity = get_irn_arity(n); - for (i = 0; i < irn_arity; i++) { - /* get_irn_n skips Id nodes, so comparison old != optimized does not - show all optimizations. Therefore always set new predecessor. */ - old = get_irn_intra_n(n, i); - optimized = optimize_in_place_2(old); - set_irn_n(n, i, optimized); - } - - if (get_irn_op(n) == op_Block) { - optimized = optimize_in_place_2(n); - if (optimized != n) exchange (n, optimized); - } -} -#else -static void -optimize_in_place_wrapper (ir_node *n, void *env) { +/** + * A wrapper around optimize_inplace_2() to be called from a walker. + */ +static void optimize_in_place_wrapper (ir_node *n, void *env) { ir_node *optimized = optimize_in_place_2(n); if (optimized != n) exchange (n, optimized); } -#endif - +/** + * Do local optimizations for a node. + * + * @param n the IR-node where to start. Typically the End node + * of a graph + * + * @note current_ir_graph must be set + */ static INLINE void do_local_optimize(ir_node *n) { /* Handle graph state */ assert(get_irg_phase_state(current_ir_graph) != phase_building); @@ -106,9 +81,10 @@ static INLINE void do_local_optimize(ir_node *n) { current_ir_graph->value_table = new_identities(); /* walk over the graph */ - irg_walk(n, init_link, optimize_in_place_wrapper, NULL); + irg_walk(n, firm_clear_link, optimize_in_place_wrapper, NULL); } +/* Applies local optimizations (see iropt.h) to all nodes reachable from node n */ void local_optimize_node(ir_node *n) { ir_graph *rem = current_ir_graph; current_ir_graph = get_irn_irg(n); @@ -123,12 +99,13 @@ void local_optimize_node(ir_node *n) { */ static void kill_dead_blocks(ir_node *block, void *env) { - if (get_Block_dom_depth(block) < 0) - if (block != get_irg_end_block(current_ir_graph)) { - /* we don't want that the end block of graphs with - endless loops is marked bad (although it is of course */ - set_Block_dead(block); - } + if (get_Block_dom_depth(block) < 0) { + /* + * Note that the new dominance code correctly handles + * the End block, i.e. it is always reachable from Start + */ + set_Block_dead(block); + } } void @@ -136,7 +113,7 @@ local_optimize_graph (ir_graph *irg) { ir_graph *rem = current_ir_graph; current_ir_graph = irg; - if (get_irg_dom_state(current_ir_graph) == dom_consistent) + if (get_irg_dom_state(irg) == dom_consistent) irg_block_walk_graph(irg, NULL, kill_dead_blocks, NULL); do_local_optimize(get_irg_end(irg)); @@ -144,6 +121,73 @@ local_optimize_graph (ir_graph *irg) { current_ir_graph = rem; } +/** + * Data flow optimization walker. + */ +static void opt_walker(ir_node *n, void *env) { + pdeq *waitq = env; + ir_node *optimized; + + optimized = optimize_in_place_2(n); + set_irn_link(optimized, NULL); + + if (optimized != n) { + const ir_edge_t *edge; + + foreach_out_edge(n, edge) { + ir_node *succ = get_edge_src_irn(edge); + + if (get_irn_link(succ) != waitq) { + pdeq_putr(waitq, succ); + set_irn_link(succ, waitq); + } + } + exchange(n, optimized); + } +} + +void optimize_graph_df(ir_graph *irg) { + pdeq *waitq = new_pdeq(); + int state = edges_activated(irg); + ir_graph *rem = current_ir_graph; + + current_ir_graph = irg; + + if (! state) + edges_activate(irg); + + if (get_opt_global_cse()) + set_irg_pinned(current_ir_graph, op_pin_state_floats); + + /* Clean the value_table in irg for the CSE. */ + del_identities(irg->value_table); + irg->value_table = new_identities(); + + if (get_irg_dom_state(current_ir_graph) == dom_consistent) + irg_block_walk_graph(irg, NULL, kill_dead_blocks, NULL); + + /* invalidate info */ + set_irg_outs_inconsistent(irg); + set_irg_doms_inconsistent(irg); + set_irg_loopinfo_inconsistent(irg); + + /* walk over the graph */ + irg_walk_graph(irg, NULL, opt_walker, waitq); + + /* finish the wait queue */ + while (! pdeq_empty(waitq)) { + ir_node *n = pdeq_getl(waitq); + opt_walker(n, waitq); + } + + del_pdeq(waitq); + + if (! state) + edges_deactivate(irg); + + current_ir_graph = rem; +} + /*------------------------------------------------------------------*/ /* Routines for dead node elimination / copying garbage collection */ @@ -528,14 +572,14 @@ dead_node_elimination(ir_graph *irg) { graveyard_obst = irg->obst; /* A new obstack, where the reachable nodes will be copied to. */ - rebirth_obst = xmalloc (sizeof(*rebirth_obst)); + rebirth_obst = xmalloc(sizeof(*rebirth_obst)); current_ir_graph->obst = rebirth_obst; obstack_init (current_ir_graph->obst); current_ir_graph->last_node_idx = 0; - /* We also need a new hash table for cse */ - del_identities (irg->value_table); - irg->value_table = new_identities (); + /* We also need a new value table for CSE */ + del_identities(irg->value_table); + irg->value_table = new_identities(); /* Copy the graph from the old to the new obstack */ copy_graph_env(1); @@ -657,7 +701,7 @@ static void relink_bad_predecessors(ir_node *n, void *env) { * changes). */ void remove_bad_predecessors(ir_graph *irg) { - irg_walk_graph(irg, init_link, relink_bad_predecessors, NULL); + irg_walk_graph(irg, firm_clear_link, relink_bad_predecessors, NULL); } @@ -856,7 +900,7 @@ static int can_inline(ir_node *call, ir_graph *called_graph) int inline_method(ir_node *call, ir_graph *called_graph) { ir_node *pre_call; ir_node *post_call, *post_bl; - ir_node *in[5]; + ir_node *in[pn_Start_max]; ir_node *end, *end_bl; ir_node **res_pred; ir_node **cf_pred; @@ -939,14 +983,16 @@ int inline_method(ir_node *call, ir_graph *called_graph) { graph. Both will end up being a tuple. -- */ post_bl = get_nodes_block(call); set_irg_current_block(current_ir_graph, post_bl); - /* XxMxPxP of Start + parameter of Call */ - in[pn_Start_X_initial_exec] = new_Jmp(); - in[pn_Start_M] = get_Call_mem(call); - in[pn_Start_P_frame_base] = get_irg_frame(current_ir_graph); - in[pn_Start_P_globals] = get_irg_globals(current_ir_graph); - in[pn_Start_T_args] = new_Tuple(get_Call_n_params(call), get_Call_param_arr(call)); + /* XxMxPxPxPxT of Start + parameter of Call */ + in[pn_Start_X_initial_exec] = new_Jmp(); + in[pn_Start_M] = get_Call_mem(call); + in[pn_Start_P_frame_base] = get_irg_frame(current_ir_graph); + in[pn_Start_P_globals] = get_irg_globals(current_ir_graph); + in[pn_Start_P_tls] = get_irg_tls(current_ir_graph); + in[pn_Start_T_args] = new_Tuple(get_Call_n_params(call), get_Call_param_arr(call)); /* in[pn_Start_P_value_arg_base] = ??? */ - pre_call = new_Tuple(5, in); + assert(pn_Start_P_value_arg_base == pn_Start_max - 1 && "pn_Start_P_value_arg_base not supported, fix"); + pre_call = new_Tuple(pn_Start_max - 1, in); post_call = call; /* -- @@ -1973,8 +2019,7 @@ void place_code(ir_graph *irg) { /* Handle graph state */ assert(get_irg_phase_state(irg) != phase_building); - if (get_irg_dom_state(irg) != dom_consistent) - compute_doms(irg); + assure_doms(irg); if (1 || get_irg_loopinfo_state(irg) != loopinfo_consistent) { free_loop_information(irg);