X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fbe%2Fbeschednormal.c;h=c23a2b774ef4089b9c779d3b6e5134afeaeecb92;hb=df2faee01a5832057bb3ca0ba5f67e979c916e19;hp=fb131a4ac3044d3d55989007949fe7a5c43ec57c;hpb=c64c36899c2f0d04e82ff1e2223ff41efe2efdfa;p=libfirm diff --git a/ir/be/beschednormal.c b/ir/be/beschednormal.c index fb131a4ac..c23a2b774 100644 --- a/ir/be/beschednormal.c +++ b/ir/be/beschednormal.c @@ -20,20 +20,20 @@ /** * @brief Use the strong normal form theorem (though it does not hold) * @author Christoph Mallon - * @version $Id$ */ #include "config.h" #include -#include "besched_t.h" +#include "besched.h" #include "belistsched.h" #include "belive_t.h" #include "beutil.h" -#include "height.h" -#include "irtools.h" +#include "heights.h" #include "irgwalk.h" -#include "benode_t.h" +#include "benode.h" +#include "bemodule.h" +#include "util.h" #include "array_t.h" // XXX there is no one time init for schedulers @@ -43,46 +43,38 @@ /** An instance of the normal scheduler. */ typedef struct instance_t { ir_graph* irg; /**< the IR graph of this instance */ - heights_t* heights; /**< height for the graph of this instance */ struct obstack obst; /**< obstack for temporary data */ - ir_node*** block_lists; /**< list of all block scheduling list */ + ir_node* curr_list; /**< current block schedule list */ } instance_t; static int must_be_scheduled(const ir_node* const irn) { - return !is_Proj(irn) && !is_Sync(irn); + return !is_Proj(irn) && !arch_irn_is(irn, not_scheduled); } -static ir_node *normal_select(void *block_env, ir_nodeset_t *ready_set, - ir_nodeset_t *live_set) +static ir_node *normal_select(void *block_env, ir_nodeset_t *ready_set) { - ir_nodeset_iterator_t iter; - ir_node* block; - ir_node* irn; - ir_node** sched; - int sched_count; - - (void)block_env; - (void)live_set; - - ir_nodeset_iterator_init(&iter, ready_set); - irn = ir_nodeset_iterator_next(&iter); - block = get_nodes_block(irn); - sched = get_irn_link(block); - sched_count = ARR_LEN(sched); - for (; sched_count-- != 0; ++sched) { - ir_node* irn = *sched; - if (ir_nodeset_contains(ready_set, irn) && - !arch_irn_class_is(irn, branch)) { + instance_t* inst = (instance_t*)block_env; + ir_node* irn; + ir_node* next; + ir_node* last = NULL; + + for (irn = inst->curr_list; irn != NULL; last = irn, irn = next) { + next = (ir_node*)get_irn_link(irn); + if (ir_nodeset_contains(ready_set, irn)) { #if defined NORMAL_DBG ir_fprintf(stderr, "scheduling %+F\n", irn); #endif + if (last == NULL) + inst->curr_list = next; + else + set_irn_link(last, next); return irn; } } - return irn; + return ir_nodeset_first(ready_set); } @@ -93,8 +85,8 @@ typedef struct irn_cost_pair { static int cost_cmp(const void* a, const void* b) { - const irn_cost_pair* const a1 = a; - const irn_cost_pair* const b1 = b; + const irn_cost_pair* const a1 = (const irn_cost_pair*)a; + const irn_cost_pair* const b1 = (const irn_cost_pair*)b; int ret = b1->cost - a1->cost; if (ret == 0) ret = (int)get_irn_idx(a1->irn) - (int)get_irn_idx(b1->irn); @@ -121,7 +113,11 @@ static int count_result(const ir_node* irn) if (mode == mode_M || mode == mode_X) return 0; - if (arch_get_register_req_out(irn)->type & arch_register_req_type_ignore) + if (mode == mode_T) + return 1; + + arch_register_req_t const *const req = arch_get_irn_register_req(irn); + if (arch_register_req_is(req, ignore)) return 0; return 1; @@ -153,18 +149,16 @@ static int normal_tree_cost(ir_node* irn, instance_t *inst) if (fc == NULL) { irn_cost_pair* costs; - int i; ir_node* block = get_nodes_block(irn); - fc = obstack_alloc(&inst->obst, sizeof(*fc) + sizeof(*fc->costs) * arity); + fc = OALLOCF(&inst->obst, flag_and_cost, costs, arity); fc->no_root = 0; costs = fc->costs; for (i = 0; i < arity; ++i) { ir_node* pred = get_irn_n(irn, i); - int cost; - if (is_Phi(irn) || get_irn_mode(pred) == mode_M || is_Block(pred)) { + if (is_Phi(irn) || get_irn_mode(pred) == mode_M) { cost = 0; } else if (get_nodes_block(pred) != block) { cost = 1; @@ -173,7 +167,6 @@ static int normal_tree_cost(ir_node* irn, instance_t *inst) ir_node* real_pred; cost = normal_tree_cost(pred, inst); - if (be_is_Barrier(pred)) cost = 1; // XXX hack: the barrier causes all users to have a reguse of #regs if (!arch_irn_is_ignore(pred)) { real_pred = (is_Proj(pred) ? get_Proj_pred(pred) : pred); pred_fc = get_irn_fc(real_pred); @@ -196,9 +189,14 @@ static int normal_tree_cost(ir_node* irn, instance_t *inst) last = 0; for (i = 0; i < arity; ++i) { ir_node* op = fc->costs[i].irn; - if (op == last) continue; - if (get_irn_mode(op) == mode_M) continue; - if (arch_irn_is_ignore(op)) continue; + ir_mode* mode; + if (op == last) + continue; + mode = get_irn_mode(op); + if (mode == mode_M) + continue; + if (arch_irn_is_ignore(op)) + continue; cost = MAX(fc->costs[i].cost + n_op_res, cost); last = op; ++n_op_res; @@ -216,12 +214,16 @@ static int normal_tree_cost(ir_node* irn, instance_t *inst) static void normal_cost_walker(ir_node* irn, void* env) { - instance_t *inst = env; + instance_t *inst = (instance_t*)env; #if defined NORMAL_DBG ir_fprintf(stderr, "cost walking node %+F\n", irn); #endif - if (is_Block(irn)) return; + if (is_Block(irn)) { + ir_node **const roots = NEW_ARR_F(ir_node*, 0); + set_irn_link(irn, roots); + return; + } if (!must_be_scheduled(irn)) return; normal_tree_cost(irn, inst); } @@ -233,7 +235,6 @@ static void collect_roots(ir_node* irn, void* env) (void)env; - if (is_Block(irn)) return; if (!must_be_scheduled(irn)) return; is_root = be_is_Keep(irn) || !get_irn_fc(irn)->no_root; @@ -244,10 +245,7 @@ static void collect_roots(ir_node* irn, void* env) if (is_root) { ir_node* block = get_nodes_block(irn); - ir_node** roots = get_irn_link(block); - if (roots == NULL) { - roots = NEW_ARR_F(ir_node*, 0); - } + ir_node** roots = (ir_node**)get_irn_link(block); ARR_APP1(ir_node*, roots, irn); set_irn_link(block, roots); } @@ -257,7 +255,6 @@ static void collect_roots(ir_node* irn, void* env) static ir_node** sched_node(ir_node** sched, ir_node* irn) { if (irn_visited_else_mark(irn)) return sched; - if (is_End(irn)) return sched; if (!is_Phi(irn) && !be_is_Keep(irn)) { ir_node* block = get_nodes_block(irn); @@ -282,18 +279,22 @@ static ir_node** sched_node(ir_node** sched, ir_node* irn) static int root_cmp(const void* a, const void* b) { - const irn_cost_pair* const a1 = a; - const irn_cost_pair* const b1 = b; + const irn_cost_pair* const a1 = (const irn_cost_pair*)a; + const irn_cost_pair* const b1 = (const irn_cost_pair*)b; int ret; - if (is_irn_forking(a1->irn)) { + if (is_irn_forking(a1->irn) && !is_irn_forking(b1->irn)) { ret = 1; - } else if (is_irn_forking(b1->irn)) { + } else if (is_irn_forking(b1->irn) && !is_irn_forking(a1->irn)) { ret = -1; } else { ret = b1->cost - a1->cost; if (ret == 0) { /* place live-out nodes later */ ret = (count_result(a1->irn) != 0) - (count_result(b1->irn) != 0); + if (ret == 0) { + /* compare node idx */ + ret = get_irn_idx(a1->irn) - get_irn_idx(b1->irn); + } } } #if defined NORMAL_DBG @@ -305,10 +306,8 @@ static int root_cmp(const void* a, const void* b) static void normal_sched_block(ir_node* block, void* env) { - instance_t* inst = env; - ir_node** roots = get_irn_link(block); - heights_t* heights; - int root_count; + ir_node** roots = (ir_node**)get_irn_link(block); + ir_heights_t* heights = (ir_heights_t*)env; irn_cost_pair* root_costs; int i; ir_node** sched; @@ -317,15 +316,14 @@ static void normal_sched_block(ir_node* block, void* env) ir_fprintf(stderr, "sched walking block %+F\n", block); #endif - if (roots == NULL) { + int const root_count = ARR_LEN(roots); + if (root_count == 0) { #if defined NORMAL_DBG fprintf(stderr, "has no roots\n"); #endif return; } - heights = inst->heights; - root_count = ARR_LEN(roots); NEW_ARR_A(irn_cost_pair, root_costs, root_count); for (i = 0; i < root_count; ++i) { root_costs[i].irn = roots[i]; @@ -354,9 +352,8 @@ static void normal_sched_block(ir_node* block, void* env) assert(must_be_scheduled(irn)); sched = sched_node(sched, irn); } - DEL_ARR_F(roots); set_irn_link(block, sched); - ARR_APP1(ir_node**, inst->block_lists, sched); + DEL_ARR_F(roots); #if defined NORMAL_DBG { @@ -373,43 +370,58 @@ static void normal_sched_block(ir_node* block, void* env) } -static void *normal_init_graph(const list_sched_selector_t *vtab, - const be_irg_t *birg) +static void *normal_init_graph(ir_graph *irg) { - instance_t *inst = XMALLOC(instance_t); - ir_graph *irg = be_get_birg_irg(birg); - - (void)vtab; + instance_t *inst = XMALLOC(instance_t); + ir_heights_t *heights; be_clear_links(irg); obstack_init(&inst->obst); inst->irg = irg; - inst->heights = heights_new(irg); - inst->block_lists = NEW_ARR_F(ir_node**, 0); + + heights = heights_new(irg); ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK); irg_walk_graph(irg, normal_cost_walker, NULL, inst); irg_walk_graph(irg, collect_roots, NULL, NULL); inc_irg_visited(irg); ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED); - irg_block_walk_graph(irg, normal_sched_block, NULL, inst); + irg_block_walk_graph(irg, normal_sched_block, NULL, heights); ir_free_resources(irg, IR_RESOURCE_IRN_VISITED); - heights_free(inst->heights); + heights_free(heights); return inst; } -void normal_finish_graph(void *env) +static void *normal_init_block(void *graph_env, ir_node *block) { - instance_t *inst = env; - int i; - - for (i = ARR_LEN(inst->block_lists) - 1; i >= 0; --i) { - DEL_ARR_F(inst->block_lists[i]); + instance_t* inst = (instance_t*)graph_env; + ir_node** sched = (ir_node**)get_irn_link(block); + ir_node* first = NULL; + int i; + + /* turn into a list, so we can easily remove nodes. + The link field is used anyway. */ + for (i = ARR_LEN(sched) - 1; i >= 0; --i) { + ir_node* irn = sched[i]; + if (!is_cfop(irn)) { + set_irn_link(irn, first); + first = irn; + } } - DEL_ARR_F(inst->block_lists); + /* note: we can free sched here, there should be no attempt to schedule + a block twice */ + DEL_ARR_F(sched); + set_irn_link(block, sched); + inst->curr_list = first; + return inst; +} + +static void normal_finish_graph(void *env) +{ + instance_t *inst = (instance_t*)env; /* block uses the link field to store the schedule */ ir_free_resources(inst->irg, IR_RESOURCE_IRN_LINK); @@ -417,15 +429,22 @@ void normal_finish_graph(void *env) xfree(inst); } -const list_sched_selector_t normal_selector = { - normal_init_graph, - NULL, /* init_block */ - normal_select, - NULL, /* to_appear_in_schedule */ - NULL, /* node_ready */ - NULL, /* node_selected */ - NULL, /* exectime */ - NULL, /* latency */ - NULL, /* finish_block */ - normal_finish_graph -}; +static void sched_normal(ir_graph *irg) +{ + static const list_sched_selector_t normal_selector = { + normal_init_graph, + normal_init_block, + normal_select, + NULL, /* node_ready */ + NULL, /* node_selected */ + NULL, /* finish_block */ + normal_finish_graph + }; + be_list_sched_graph(irg, &normal_selector); +} + +BE_REGISTER_MODULE_CONSTRUCTOR(be_init_sched_normal) +void be_init_sched_normal(void) +{ + be_register_scheduler("normal", sched_normal); +}