X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fbe%2Fbelistsched.c;h=16712488ba6a60766ea76a31fb366952aae772c7;hb=f6803e61a5c32b49e3d8f9cdd3ce10d0036c5b37;hp=02617470c1d95f5adf60a80f9bbef39cabcfd86c;hpb=c49f890e2075c541c38544d7b1f9c6cfc5be4fb4;p=libfirm diff --git a/ir/be/belistsched.c b/ir/be/belistsched.c index 02617470c..16712488b 100644 --- a/ir/be/belistsched.c +++ b/ir/be/belistsched.c @@ -14,6 +14,8 @@ #include #include +#include "benode_t.h" + #include "obst.h" #include "list.h" #include "iterator.h" @@ -81,16 +83,21 @@ static ir_node *trivial_select(void *block_env, pset *ready_set) return res; } -static int default_to_appear_in_schedule(void *env, const ir_node *irn) +static INLINE int must_appear_in_schedule(const list_sched_selector_t *sel, void *block_env, const ir_node *irn) { - return to_appear_in_schedule(irn); + int res = 0; + + if(sel->to_appear_in_schedule) + res = sel->to_appear_in_schedule(block_env, irn); + + return res || to_appear_in_schedule(irn) || be_is_Keep(irn); } static const list_sched_selector_t trivial_selector_struct = { NULL, NULL, trivial_select, - default_to_appear_in_schedule, + NULL, NULL, NULL }; @@ -137,14 +144,23 @@ static INLINE usage_stats_t *get_usage_stats(ir_node *irn) return us; } -static int max_hops_walker(ir_node *irn, ir_node *tgt, int depth, unsigned visited_nr) +static int max_hops_walker(reg_pressure_selector_env_t *env, ir_node *irn, ir_node *curr_bl, int depth, unsigned visited_nr) { - int i, n; - int res = 0; - - if(irn != tgt) { - res = INT_MAX; + ir_node *bl = get_nodes_block(irn); + /* + * If the reached node is not in the block desired, + * return the value passed for this situation. + */ + if(get_nodes_block(irn) != bl) + return block_dominates(bl, curr_bl) ? 0 : INT_MAX; + /* + * If the node is in the current block but not + * yet scheduled, we keep on searching from that node. + */ + if(!pset_find_ptr(env->already_scheduled, irn)) { + int i, n; + int res = 0; for(i = 0, n = get_irn_arity(irn); i < n; ++i) { ir_node *operand = get_irn_n(irn, i); @@ -152,34 +168,38 @@ static int max_hops_walker(ir_node *irn, ir_node *tgt, int depth, unsigned visit int tmp; set_irn_visited(operand, visited_nr); - tmp = max_hops_walker(operand, tgt, depth + 1, visited_nr); + tmp = max_hops_walker(env, operand, bl, depth + 1, visited_nr); res = MAX(tmp, res); } } + + return res; } - return res; + /* + * If the node is in the current block and scheduled, return + * the depth which indicates the number of steps to the + * region of scheduled nodes. + */ + return depth; } static int compute_max_hops(reg_pressure_selector_env_t *env, ir_node *irn) { ir_node *bl = get_nodes_block(irn); ir_graph *irg = get_irn_irg(bl); - int res = INT_MAX; + int res = 0; const ir_edge_t *edge; foreach_out_edge(irn, edge) { - ir_node *user = get_edge_src_irn(edge); - - if(get_nodes_block(user) == bl && !pset_find_ptr(env->already_scheduled, user)) { - unsigned visited_nr = get_irg_visited(irg) + 1; - int max_hops; + ir_node *user = get_edge_src_irn(edge); + unsigned visited_nr = get_irg_visited(irg) + 1; + int max_hops; - set_irg_visited(irg, visited_nr); - max_hops = max_hops_walker(user, irn, 0, visited_nr); - res = MAX(res, max_hops); - } + set_irg_visited(irg, visited_nr); + max_hops = max_hops_walker(env, user, irn, 0, visited_nr); + res = MAX(res, max_hops); } return res; @@ -205,12 +225,12 @@ static void *reg_pressure_block_init(void *graph_env, ir_node *bl) * Collect usage statistics. */ sched_foreach(bl, irn) { - if(env->vtab->to_appear_in_schedule(env, irn)) { + if(must_appear_in_schedule(env->vtab, env, irn)) { int i, n; for(i = 0, n = get_irn_arity(irn); i < n; ++i) { ir_node *op = get_irn_n(irn, i); - if(env->vtab->to_appear_in_schedule(env, irn)) { + if(must_appear_in_schedule(env->vtab, env, irn)) { usage_stats_t *us = get_or_set_usage_stats(env, irn); if(is_live_end(bl, op)) us->uses_in_block = 99999; @@ -237,6 +257,23 @@ static void reg_pressure_block_free(void *block_env) free(env); } +static int get_result_hops_sum(reg_pressure_selector_env_t *env, ir_node *irn) +{ + int res = 0; + if(get_irn_mode(irn) == mode_T) { + const ir_edge_t *edge; + + foreach_out_edge(irn, edge) + res += get_result_hops_sum(env, get_edge_src_irn(edge)); + } + + else if(mode_is_data(get_irn_mode(irn))) + res = compute_max_hops(env, irn); + + + return res; +} + static INLINE int reg_pr_costs(reg_pressure_selector_env_t *env, ir_node *irn) { int i, n; @@ -245,14 +282,16 @@ static INLINE int reg_pr_costs(reg_pressure_selector_env_t *env, ir_node *irn) for(i = 0, n = get_irn_arity(irn); i < n; ++i) { ir_node *op = get_irn_n(irn, i); - if(env->vtab->to_appear_in_schedule(env, op)) + if(must_appear_in_schedule(env->vtab, env, op)) sum += compute_max_hops(env, op); } + sum += get_result_hops_sum(env, irn); + return sum; } -ir_node *reg_pressure_select(void *block_env, pset *ready_set) +static ir_node *reg_pressure_select(void *block_env, pset *ready_set) { reg_pressure_selector_env_t *env = block_env; ir_node *irn, *res = NULL; @@ -276,7 +315,7 @@ static const list_sched_selector_t reg_pressure_selector_struct = { reg_pressure_graph_init, reg_pressure_block_init, reg_pressure_select, - default_to_appear_in_schedule, + NULL, reg_pressure_block_free, NULL }; @@ -404,15 +443,15 @@ static int node_cmp_func(const void *p1, const void *p2) /** * Append an instruction to a schedule. - * @param env The block scheduleing environment. + * @param env The block scheduling environment. * @param irn The node to add to the schedule. - * @return The given node. + * @return The given node. */ static ir_node *add_to_sched(block_sched_env_t *env, ir_node *irn) { /* If the node consumes/produces data, it is appended to the schedule * list, otherwise, it is not put into the list */ - if(to_appear_in_schedule(irn)) { + if(must_appear_in_schedule(env->selector, env->selector_block_env, irn)) { sched_info_t *info = get_irn_sched_info(irn); INIT_LIST_HEAD(&info->list); info->scheduled = 1; @@ -431,7 +470,6 @@ static ir_node *add_to_sched(block_sched_env_t *env, ir_node *irn) return irn; } - /** * Add the proj nodes of a tuple-mode irn to the schedule immediately * after the tuple-moded irn. By pinning the projs after the irn, no @@ -470,6 +508,20 @@ static void add_tuple_projs(block_sched_env_t *env, ir_node *irn) } } +static ir_node *select_node(block_sched_env_t *be) +{ + ir_node *irn; + + for(irn = pset_first(be->ready_set); irn; irn = pset_next(be->ready_set)) { + if(be_is_Keep(irn)) { + pset_break(be->ready_set); + return irn; + } + } + + return be->selector->select(be->selector_block_env, be->ready_set); +} + /** * Perform list scheduling on a block. * @@ -505,8 +557,6 @@ static void list_sched_block(ir_node *block, void *env_ptr) be.already_scheduled = new_pset(node_cmp_func, get_irn_n_edges(block)); be.selector = selector; - firm_dbg_set_mask(be.dbg, 0); - if(selector->init_block) be.selector_block_env = selector->init_block(env->selector_env, block); @@ -565,7 +615,7 @@ static void list_sched_block(ir_node *block, void *env_ptr) while(pset_count(be.ready_set) > 0) { /* select a node to be scheduled and check if it was ready */ - irn = selector->select(be.selector_block_env, be.ready_set); + irn = select_node(&be); DBG((be.dbg, LEVEL_3, "\tpicked node %+F\n", irn));