X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fbe%2Fbespilldaemel.c;h=f015f6e6b8e32370503b190409fe541bd0c5e962;hb=8ef2480ec4b8e4ab5af6819773b552e2c1371abc;hp=ea8908609370db59e628346985ed9767d8c22030;hpb=1a26f4853c07d1ecd68a097409dd602edfe29eff;p=libfirm diff --git a/ir/be/bespilldaemel.c b/ir/be/bespilldaemel.c index ea8908609..f015f6e6b 100644 --- a/ir/be/bespilldaemel.c +++ b/ir/be/bespilldaemel.c @@ -24,9 +24,9 @@ * @date 20.09.2005 * @version $Id: bespillbelady.c 13913 2007-05-18 12:48:56Z matze $ * @brief - * This implements a naive spilling algorithm. It is designed to produce similar - * effects to the spill decisions produced by traditional graph coloring - * register allocators that spill while they are coloring the graph. + * This implements a naive spilling algorithm. It is designed to produce + * similar effects to the spill decisions produced by traditional graph + * coloring register allocators that spill while they are coloring the graph. * * This spiller walks over all blocks and looks for places with too high * register pressure where it spills the values that are cheapest to spill. @@ -101,8 +101,6 @@ static double get_spill_costs(ir_node *node) } } - /* TODO cache costs? */ - return costs; } @@ -135,6 +133,12 @@ static void spill_node(ir_node *node) bitset_set(spilled_nodes, get_irn_idx(node)); } +static unsigned get_value_width(const ir_node *node) +{ + const arch_register_req_t *req = arch_get_register_req_out(node); + return req->width; +} + /** * spill @p n nodes from a nodeset. Removes the nodes from the nodeset and * sets the spilled bits in spilled_nodes. @@ -150,21 +154,12 @@ static void do_spilling(ir_nodeset_t *live_nodes, ir_node *node) int spills_needed; size_t cand_idx; ir_node *n; + ir_node *value; - /* mode_T nodes define several values at once. Count them */ - if (get_irn_mode(node) == mode_T) { - const ir_edge_t *edge; - - foreach_out_edge(node, edge) { - const ir_node *proj = get_edge_src_irn(edge); - - if (arch_irn_consider_in_reg_alloc(cls, proj)) { - ++values_defined; - } - } - } else if (arch_irn_consider_in_reg_alloc(cls, node)) { - ++values_defined; - } + be_foreach_definition(node, cls, value, + assert(req_->width >= 1); + values_defined += req_->width; + ); /* we need registers for the non-live argument values */ arity = get_irn_arity(node); @@ -172,12 +167,12 @@ static void do_spilling(ir_nodeset_t *live_nodes, ir_node *node) ir_node *pred = get_irn_n(node, i); if (arch_irn_consider_in_reg_alloc(cls, pred) && !ir_nodeset_contains(live_nodes, pred)) { - ++free_regs_needed; + free_regs_needed += get_value_width(pred); } } /* we can reuse all reloaded values for the defined values, but we might - need even more registers */ + * need even more registers */ if (values_defined > free_regs_needed) free_regs_needed = values_defined; @@ -208,15 +203,14 @@ static void do_spilling(ir_nodeset_t *live_nodes, ir_node *node) /* spill cheapest ones */ cand_idx = 0; while (spills_needed > 0) { - spill_candidate_t *candidate; - ir_node *cand_node; - int is_use; + bool is_use = false; + spill_candidate_t *candidate; + ir_node *cand_node; if (cand_idx >= n_live_nodes) { panic("can't spill enough values for node %+F", node); } - candidate = &candidates[cand_idx]; cand_node = candidate->node; ++cand_idx; @@ -225,21 +219,19 @@ static void do_spilling(ir_nodeset_t *live_nodes, ir_node *node) continue; /* make sure the node is not an argument of the instruction */ - is_use = 0; for (i = 0; i < arity; ++i) { ir_node *in = get_irn_n(node, i); if (in == cand_node) { - is_use = 1; + is_use = true; break; } } - if (is_use) { + if (is_use) continue; - } spill_node(cand_node); ir_nodeset_remove(live_nodes, cand_node); - --spills_needed; + spills_needed -= get_value_width(cand_node); } } @@ -248,25 +240,13 @@ static void do_spilling(ir_nodeset_t *live_nodes, ir_node *node) */ static void remove_defs(ir_node *node, ir_nodeset_t *nodeset) { - /* You should better break out of your loop when hitting the first phi - * function. */ - assert(!is_Phi(node) && "liveness_transfer produces invalid results for phi nodes"); - - if (get_irn_mode(node) == mode_T) { - const ir_edge_t *edge; - - foreach_out_edge(node, edge) { - const ir_node *proj = get_edge_src_irn(edge); - - if (arch_irn_consider_in_reg_alloc(cls, proj)) { - ir_nodeset_remove(nodeset, proj); - } - } - } + ir_node *value; + /* You must break out of your loop when hitting the first phi function. */ + assert(!is_Phi(node)); - if (arch_irn_consider_in_reg_alloc(cls, node)) { - ir_nodeset_remove(nodeset, node); - } + be_foreach_definition(node, cls, value, + ir_nodeset_remove(nodeset, value); + ); } static void add_uses(ir_node *node, ir_nodeset_t *nodeset) @@ -307,6 +287,7 @@ static void spill_block(ir_node *block, void *data) ir_node *node; int n_phi_values_spilled; int regpressure; + int live_nodes_pressure; int phi_spills_needed; (void) data; @@ -340,20 +321,24 @@ static void spill_block(ir_node *block, void *data) /* until now only the values of some phis have been spilled the phis itself * are still there and occupy registers, so we need to count them and might - * have to spill some of them. - */ + * have to spill some of them. */ n_phi_values_spilled = 0; sched_foreach(block, node) { if (!is_Phi(node)) break; if (bitset_is_set(spilled_nodes, get_irn_idx(node))) { - ++n_phi_values_spilled; + n_phi_values_spilled += get_value_width(node); } } + live_nodes_pressure = 0; + foreach_ir_nodeset(&live_nodes, node, iter) { + live_nodes_pressure += get_value_width(node); + } + /* calculate how many of the phis need to be spilled */ - regpressure = ir_nodeset_size(&live_nodes) + n_phi_values_spilled; + regpressure = live_nodes_pressure + n_phi_values_spilled; phi_spills_needed = regpressure - n_regs; DBG((dbg, LEVEL_3, "Regpressure before phis: %d phispills: %d\n", regpressure, phi_spills_needed)); @@ -367,29 +352,28 @@ static void spill_block(ir_node *block, void *data) if (phi_spills_needed <= 0) break; - if (bitset_is_set(spilled_nodes, get_irn_idx(node))) { - be_spill_phi(spill_env, node); - --phi_spills_needed; - } + if (!bitset_is_set(spilled_nodes, get_irn_idx(node))) + continue; + + be_spill_phi(spill_env, node); + phi_spills_needed -= get_value_width(node); } assert(phi_spills_needed <= 0); ir_nodeset_destroy(&live_nodes); } -void be_spill_daemel(be_irg_t *birg, const arch_register_class_t *new_cls) +static void be_spill_daemel(ir_graph *irg, const arch_register_class_t *new_cls) { - ir_graph *irg = be_get_birg_irg(birg); - n_regs = new_cls->n_regs - be_put_ignore_regs(birg, new_cls, NULL); - + n_regs = new_cls->n_regs - be_put_ignore_regs(irg, new_cls, NULL); if (n_regs == 0) return; - be_liveness_assure_sets(be_assure_liveness(birg)); + be_liveness_assure_sets(be_assure_liveness(irg)); - spill_env = be_new_spill_env(birg); + spill_env = be_new_spill_env(irg); cls = new_cls; - lv = be_get_birg_liveness(birg); + lv = be_get_irg_liveness(irg); spilled_nodes = bitset_malloc(get_irg_last_idx(irg)); DBG((dbg, LEVEL_1, "*** RegClass %s\n", cls->name)); @@ -397,14 +381,12 @@ void be_spill_daemel(be_irg_t *birg, const arch_register_class_t *new_cls) irg_block_walk_graph(irg, spill_block, NULL, NULL); bitset_free(spilled_nodes); - spilled_nodes = NULL; be_insert_spills_reloads(spill_env); - be_delete_spill_env(spill_env); - spill_env = NULL; } +BE_REGISTER_MODULE_CONSTRUCTOR(be_init_daemelspill); void be_init_daemelspill(void) { static be_spiller_t daemel_spiller = { @@ -414,5 +396,3 @@ void be_init_daemelspill(void) be_register_spiller("daemel", &daemel_spiller); FIRM_DBG_REGISTER(dbg, "firm.be.spilldaemel"); } - -BE_REGISTER_MODULE_CONSTRUCTOR(be_init_daemelspill);