X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fbe%2Fbelistsched.c;h=7668732ffb6cabd69c2a690cc065f9437d86b496;hb=9e45062f03f139df6068019c3f4733d809cf9e2c;hp=8ffed4d9e7787fde0b7cdbd21d2ef09e933d6a06;hpb=f7aeba1c435b8ea674263f30aff4deb53c84eed0;p=libfirm diff --git a/ir/be/belistsched.c b/ir/be/belistsched.c index 8ffed4d9e..7668732ff 100644 --- a/ir/be/belistsched.c +++ b/ir/be/belistsched.c @@ -41,9 +41,10 @@ * Scheduling environment for the whole graph. */ typedef struct _sched_env_t { - const ir_graph *irg; /**< The graph to schedule. */ - const list_sched_selector_t *selector; /**< The node selector. */ - void *selector_env; /**< A pointer to give to the selector. */ + const list_sched_selector_t *selector; /**< The node selector. */ + const arch_env_t *arch_env; /**< The architecture enviromnent. */ + const ir_graph *irg; /**< The graph to schedule. */ + void *selector_env; /**< A pointer to give to the selector. */ } sched_env_t; #if 0 @@ -74,13 +75,38 @@ static int cmp_usage(const void *a, const void *b) } #endif -static ir_node *trivial_select(void *block_env, pset *ready_set) +/** + * The trivial selector: + * Just assure that branches are executed last, otherwise select + * the first node ready. + */ +static ir_node *trivial_select(void *block_env, nodeset *ready_set) { - ir_node *res; + const arch_env_t *arch_env = block_env; + ir_node *irn = NULL; - res = pset_first(ready_set); - pset_break(ready_set); - return res; + /* assure that branches are executed last */ + for (irn = nodeset_first(ready_set); irn; irn = nodeset_next(ready_set)) { + if (arch_irn_classify(arch_env, irn) != arch_irn_class_branch) { + nodeset_break(ready_set); + return irn; + } + } + + irn = nodeset_first(ready_set); + nodeset_break(ready_set); + + return irn; +} + +static void *trivial_init_graph(const list_sched_selector_t *vtab, const arch_env_t *arch_env, ir_graph *irg) +{ + return (void *) arch_env; +} + +static void *trivial_init_block(void *graph_env, ir_node *bl) +{ + return graph_env; } static INLINE int must_appear_in_schedule(const list_sched_selector_t *sel, void *block_env, const ir_node *irn) @@ -90,12 +116,12 @@ static INLINE int must_appear_in_schedule(const list_sched_selector_t *sel, void if(sel->to_appear_in_schedule) res = sel->to_appear_in_schedule(block_env, irn); - return res || to_appear_in_schedule(irn) || be_is_Keep(irn); + return res || to_appear_in_schedule(irn) || be_is_Keep(irn) || be_is_RegParams(irn); } static const list_sched_selector_t trivial_selector_struct = { - NULL, - NULL, + trivial_init_graph, + trivial_init_block, trivial_select, NULL, NULL, @@ -113,11 +139,16 @@ typedef struct _usage_stats_t { scheduled. */ } usage_stats_t; +typedef struct { + const list_sched_selector_t *vtab; + const arch_env_t *arch_env; +} reg_pressure_main_env_t; + typedef struct { struct obstack obst; + const reg_pressure_main_env_t *main_env; usage_stats_t *root; - pset *already_scheduled; - const list_sched_selector_t *vtab; + nodeset *already_scheduled; } reg_pressure_selector_env_t; static INLINE usage_stats_t *get_or_set_usage_stats(reg_pressure_selector_env_t *env, ir_node *irn) @@ -144,14 +175,23 @@ static INLINE usage_stats_t *get_usage_stats(ir_node *irn) return us; } -static int max_hops_walker(ir_node *irn, ir_node *tgt, int depth, unsigned visited_nr) +static int max_hops_walker(reg_pressure_selector_env_t *env, ir_node *irn, ir_node *curr_bl, int depth, unsigned visited_nr) { - int i, n; - int res = 0; - - if(irn != tgt) { - res = INT_MAX; + ir_node *bl = get_nodes_block(irn); + /* + * If the reached node is not in the block desired, + * return the value passed for this situation. + */ + if(get_nodes_block(irn) != bl) + return block_dominates(bl, curr_bl) ? 0 : INT_MAX; + /* + * If the node is in the current block but not + * yet scheduled, we keep on searching from that node. + */ + if(!nodeset_find(env->already_scheduled, irn)) { + int i, n; + int res = 0; for(i = 0, n = get_irn_arity(irn); i < n; ++i) { ir_node *operand = get_irn_n(irn, i); @@ -159,43 +199,52 @@ static int max_hops_walker(ir_node *irn, ir_node *tgt, int depth, unsigned visit int tmp; set_irn_visited(operand, visited_nr); - tmp = max_hops_walker(operand, tgt, depth + 1, visited_nr); + tmp = max_hops_walker(env, operand, bl, depth + 1, visited_nr); res = MAX(tmp, res); } } + + return res; } - return res; + /* + * If the node is in the current block and scheduled, return + * the depth which indicates the number of steps to the + * region of scheduled nodes. + */ + return depth; } static int compute_max_hops(reg_pressure_selector_env_t *env, ir_node *irn) { ir_node *bl = get_nodes_block(irn); ir_graph *irg = get_irn_irg(bl); - int res = INT_MAX; + int res = 0; const ir_edge_t *edge; foreach_out_edge(irn, edge) { - ir_node *user = get_edge_src_irn(edge); - - if(get_nodes_block(user) == bl && !pset_find_ptr(env->already_scheduled, user)) { - unsigned visited_nr = get_irg_visited(irg) + 1; - int max_hops; + ir_node *user = get_edge_src_irn(edge); + unsigned visited_nr = get_irg_visited(irg) + 1; + int max_hops; - set_irg_visited(irg, visited_nr); - max_hops = max_hops_walker(user, irn, 0, visited_nr); - res = MAX(res, max_hops); - } + set_irg_visited(irg, visited_nr); + max_hops = max_hops_walker(env, user, irn, 0, visited_nr); + res = MAX(res, max_hops); } return res; } -static void *reg_pressure_graph_init(const list_sched_selector_t *vtab, const arch_isa_t *isa, ir_graph *irg) +static void *reg_pressure_graph_init(const list_sched_selector_t *vtab, const arch_env_t *arch_env, ir_graph *irg) { + reg_pressure_main_env_t *main_env = xmalloc(sizeof(main_env[0])); + + main_env->arch_env = arch_env; + main_env->vtab = vtab; irg_walk_graph(irg, firm_clear_link, NULL, NULL); - return (void *) vtab; + + return main_env; } static void *reg_pressure_block_init(void *graph_env, ir_node *bl) @@ -204,20 +253,20 @@ static void *reg_pressure_block_init(void *graph_env, ir_node *bl) reg_pressure_selector_env_t *env = xmalloc(sizeof(env[0])); obstack_init(&env->obst); - env->already_scheduled = pset_new_ptr(32); + env->already_scheduled = new_nodeset(32); env->root = NULL; - env->vtab = graph_env; + env->main_env = graph_env; /* * Collect usage statistics. */ sched_foreach(bl, irn) { - if(must_appear_in_schedule(env->vtab, env, irn)) { + if(must_appear_in_schedule(env->main_env->vtab, env, irn)) { int i, n; for(i = 0, n = get_irn_arity(irn); i < n; ++i) { ir_node *op = get_irn_n(irn, i); - if(must_appear_in_schedule(env->vtab, env, irn)) { + if(must_appear_in_schedule(env->main_env->vtab, env, irn)) { usage_stats_t *us = get_or_set_usage_stats(env, irn); if(is_live_end(bl, op)) us->uses_in_block = 99999; @@ -240,10 +289,27 @@ static void reg_pressure_block_free(void *block_env) set_irn_link(us->irn, NULL); obstack_free(&env->obst, NULL); - del_pset(env->already_scheduled); + del_nodeset(env->already_scheduled); free(env); } +static int get_result_hops_sum(reg_pressure_selector_env_t *env, ir_node *irn) +{ + int res = 0; + if(get_irn_mode(irn) == mode_T) { + const ir_edge_t *edge; + + foreach_out_edge(irn, edge) + res += get_result_hops_sum(env, get_edge_src_irn(edge)); + } + + else if(mode_is_data(get_irn_mode(irn))) + res = compute_max_hops(env, irn); + + + return res; +} + static INLINE int reg_pr_costs(reg_pressure_selector_env_t *env, ir_node *irn) { int i, n; @@ -252,30 +318,50 @@ static INLINE int reg_pr_costs(reg_pressure_selector_env_t *env, ir_node *irn) for(i = 0, n = get_irn_arity(irn); i < n; ++i) { ir_node *op = get_irn_n(irn, i); - if(must_appear_in_schedule(env->vtab, env, op)) + if(must_appear_in_schedule(env->main_env->vtab, env, op)) sum += compute_max_hops(env, op); } + sum += get_result_hops_sum(env, irn); + return sum; } -ir_node *reg_pressure_select(void *block_env, pset *ready_set) +static ir_node *reg_pressure_select(void *block_env, nodeset *ready_set) { reg_pressure_selector_env_t *env = block_env; ir_node *irn, *res = NULL; int curr_cost = INT_MAX; - assert(pset_count(ready_set) > 0); - - for(irn = pset_first(ready_set); irn; irn = pset_next(ready_set)) { - int costs = reg_pr_costs(env, irn); - if(costs <= curr_cost) { - res = irn; - curr_cost = costs; + assert(nodeset_count(ready_set) > 0); + + for (irn = nodeset_first(ready_set); irn; irn = nodeset_next(ready_set)) { + /* + Ignore branch instructions for the time being. + They should only be scheduled if there is nothing else. + */ + if (arch_irn_classify(env->main_env->arch_env, irn) != arch_irn_class_branch) { + int costs = reg_pr_costs(env, irn); + if (costs <= curr_cost) { + res = irn; + curr_cost = costs; + } } } - pset_insert_ptr(env->already_scheduled, res); + /* + There was no result so we only saw a branch. + Take it and finish. + */ + + if(!res) { + res = nodeset_first(ready_set); + nodeset_break(ready_set); + + assert(res && "There must be a node scheduled."); + } + + nodeset_insert(env->already_scheduled, res); return res; } @@ -285,22 +371,24 @@ static const list_sched_selector_t reg_pressure_selector_struct = { reg_pressure_select, NULL, reg_pressure_block_free, - NULL + free }; const list_sched_selector_t *reg_pressure_selector = ®_pressure_selector_struct; static void list_sched_block(ir_node *block, void *env_ptr); -void list_sched(const struct _arch_isa_t *isa, ir_graph *irg) +void list_sched(const arch_env_t *arch_env, ir_graph *irg) { sched_env_t env; - const list_sched_selector_t *selector; memset(&env, 0, sizeof(env)); - selector = env.selector = isa->impl->get_list_sched_selector(isa); - env.selector_env = selector->init_graph ? selector->init_graph(selector, isa, irg) : NULL; - env.irg = irg; + env.selector = arch_env->isa->impl->get_list_sched_selector(arch_env->isa); + env.arch_env = arch_env; + env.irg = irg; + + if(env.selector->init_graph) + env.selector_env = env.selector->init_graph(env.selector, arch_env, irg); /* Assure, that the out edges are computed */ edges_assure(irg); @@ -308,8 +396,8 @@ void list_sched(const struct _arch_isa_t *isa, ir_graph *irg) /* Schedule each single block. */ irg_block_walk_graph(irg, list_sched_block, NULL, &env); - if(selector->finish_graph) - selector->finish_graph(env.selector_env); + if(env.selector->finish_graph) + env.selector->finish_graph(env.selector_env); } @@ -318,12 +406,12 @@ void list_sched(const struct _arch_isa_t *isa, ir_graph *irg) */ typedef struct _block_sched_env_t { int curr_time; - pset *ready_set; - pset *already_scheduled; + nodeset *ready_set; + nodeset *already_scheduled; ir_node *block; - firm_dbg_module_t *dbg; const list_sched_selector_t *selector; void *selector_block_env; + DEBUG_ONLY(firm_dbg_module_t *dbg;) } block_sched_env_t; /** @@ -352,12 +440,12 @@ static INLINE int make_ready(block_sched_env_t *env, ir_node *irn) /* If the operand is local to the scheduled block and not yet * scheduled, this nodes cannot be made ready, so exit. */ - if(!pset_find_ptr(env->already_scheduled, op) && get_nodes_block(op) == env->block) + if(!nodeset_find(env->already_scheduled, op) && get_nodes_block(op) == env->block) return 0; } DBG((env->dbg, LEVEL_2, "\tmaking ready: %+F\n", irn)); - pset_insert_ptr(env->ready_set, irn); + nodeset_insert(env->ready_set, irn); return 1; } @@ -369,7 +457,7 @@ static INLINE int make_ready(block_sched_env_t *env, ir_node *irn) * @return 1 if the node was ready, 0 if not. */ #define is_ready(env,irn) \ - (pset_find_ptr((env)->ready_set, irn) != NULL) + (nodeset_find((env)->ready_set, irn) != NULL) /** * Check, if a node has already been schedules. @@ -378,7 +466,7 @@ static INLINE int make_ready(block_sched_env_t *env, ir_node *irn) * @return 1 if the node was already scheduled, 0 if not. */ #define is_scheduled(env,irn) \ - (pset_find_ptr((env)->already_scheduled, irn) != NULL) + (nodeset_find((env)->already_scheduled, irn) != NULL) /** * Try, to make all users of a node ready. @@ -429,11 +517,11 @@ static ir_node *add_to_sched(block_sched_env_t *env, ir_node *irn) } /* Insert the node in the set of all already scheduled nodes. */ - pset_insert_ptr(env->already_scheduled, irn); + nodeset_insert(env->already_scheduled, irn); /* Remove the node from the ready set */ - if(pset_find_ptr(env->ready_set, irn)) - pset_remove_ptr(env->ready_set, irn); + if(nodeset_find(env->ready_set, irn)) + nodeset_remove(env->ready_set, irn); return irn; } @@ -480,9 +568,9 @@ static ir_node *select_node(block_sched_env_t *be) { ir_node *irn; - for(irn = pset_first(be->ready_set); irn; irn = pset_next(be->ready_set)) { - if(be_is_Keep(irn)) { - pset_break(be->ready_set); + for (irn = nodeset_first(be->ready_set); irn; irn = nodeset_next(be->ready_set)) { + if (be_is_Keep(irn)) { + nodeset_break(be->ready_set); return irn; } } @@ -503,27 +591,27 @@ static ir_node *select_node(block_sched_env_t *be) */ static void list_sched_block(ir_node *block, void *env_ptr) { - sched_env_t *env = env_ptr; - block_sched_env_t be; + sched_env_t *env = env_ptr; const list_sched_selector_t *selector = env->selector; + ir_node *start_node = get_irg_start(get_irn_irg(block)); + int phi_seen = 0; + sched_info_t *info = get_irn_sched_info(block); + + block_sched_env_t be; const ir_edge_t *edge; ir_node *irn; - ir_node *start_node = get_irg_start(get_irn_irg(block)); - ir_node *final_jmp = NULL; int j, m; - int phi_seen = 0; - sched_info_t *info = get_irn_sched_info(block); /* Initialize the block's list head that will hold the schedule. */ INIT_LIST_HEAD(&info->list); /* Initialize the block scheduling environment */ - be.dbg = firm_dbg_register("firm.be.sched"); be.block = block; be.curr_time = 0; - be.ready_set = new_pset(node_cmp_func, get_irn_n_edges(block)); - be.already_scheduled = new_pset(node_cmp_func, get_irn_n_edges(block)); + be.ready_set = new_nodeset(get_irn_n_edges(block)); + be.already_scheduled = new_nodeset(get_irn_n_edges(block)); be.selector = selector; + FIRM_DBG_REGISTER(be.dbg, "firm.be.sched"); if(selector->init_block) be.selector_block_env = selector->init_block(env->selector_env, block); @@ -546,14 +634,12 @@ static void list_sched_block(ir_node *block, void *env_ptr) phi_seen = 1; } + /* The start block will be scheduled as the first node */ else if(irn == start_node) { add_to_sched(&be, irn); add_tuple_projs(&be, irn); } - else if(get_irn_opcode(irn) == iro_Jmp) { - final_jmp = irn; - } /* Other nodes must have all operands in other blocks to be made * ready */ @@ -581,7 +667,7 @@ static void list_sched_block(ir_node *block, void *env_ptr) /* Increase the time, if some phi functions have been scheduled */ be.curr_time += phi_seen; - while(pset_count(be.ready_set) > 0) { + while (nodeset_count(be.ready_set) > 0) { /* select a node to be scheduled and check if it was ready */ irn = select_node(&be); @@ -599,16 +685,13 @@ static void list_sched_block(ir_node *block, void *env_ptr) be.curr_time += 1; /* remove the scheduled node from the ready list. */ - if(pset_find_ptr(be.ready_set, irn)) - pset_remove_ptr(be.ready_set, irn); + if (nodeset_find(be.ready_set, irn)) + nodeset_remove(be.ready_set, irn); } if(selector->finish_block) selector->finish_block(be.selector_block_env); - if(final_jmp) - add_to_sched(&be, final_jmp); - - del_pset(be.ready_set); - del_pset(be.already_scheduled); + del_nodeset(be.ready_set); + del_nodeset(be.already_scheduled); }