2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Naiv spilling algorithm
23 * @author Matthias Braun
25 * @version $Id: bespillbelady.c 13913 2007-05-18 12:48:56Z matze $
27 * This implements a naiv spilling algorithm. It is design to produce similar
28 * effects to the spill decisions produced by traditional graph coloring
29 * register allocators that spill while they are coloring the graph.
31 * This spiller walks over all blocks and looks for places with too high
32 * register pressure where it spills the values that are cheapest to spill.
33 * Spilling in this context means placing a spill instruction behind the
34 * definition of the value and a reload before each usage.
42 #include "irnodeset.h"
45 #include "iredges_t.h"
49 #include "bespilloptions.h"
58 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
60 static spill_env_t *spill_env;
62 static const arch_env_t *arch_env;
63 static const arch_register_class_t *cls;
64 static const be_lv_t *lv;
65 static bitset_t *spilled_nodes;
67 typedef struct spill_candidate_t spill_candidate_t;
68 struct spill_candidate_t {
73 static int compare_spill_candidates_desc(const void *d1, const void *d2)
75 const spill_candidate_t *c1 = d1;
76 const spill_candidate_t *c2 = d2;
78 return (int) (c1->costs - c2->costs);
81 static double get_spill_costs(ir_node *node)
83 const ir_edge_t *edge;
84 ir_node *spill_place = skip_Proj(node);
85 double costs = be_get_spill_costs(spill_env, node,
88 foreach_out_edge(node, edge) {
89 ir_node *use = get_edge_src_irn(edge);
91 /* keeps should be directly below the node */
97 int in = get_edge_src_pos(edge);
98 ir_node *block = get_nodes_block(use);
100 costs += be_get_reload_costs_on_edge(spill_env, node, block, in);
102 costs += be_get_reload_costs(spill_env, node, use);
106 /* TODO cache costs? */
112 * spills a node by placing a reload before each usage
114 static void spill_node(ir_node *node)
116 const ir_edge_t *edge;
118 DBG((dbg, LEVEL_3, "\tspilling %+F\n", node));
120 foreach_out_edge(node, edge) {
121 ir_node *use = get_edge_src_irn(edge);
128 int in = get_edge_src_pos(edge);
129 ir_node *block = get_nodes_block(use);
131 be_add_reload_on_edge(spill_env, node, block, in, cls, 1);
133 be_add_reload(spill_env, node, use, cls, 1);
137 bitset_set(spilled_nodes, get_irn_idx(node));
141 * spill @p n nodes from a nodeset. Removes the nodes from the nodeset and
142 * sets the spilled bits in spilled_nodes.
144 static void do_spilling(ir_nodeset_t *live_nodes, ir_node *node)
146 size_t n_live_nodes = ir_nodeset_size(live_nodes);
147 size_t values_defined = 0;
148 size_t free_regs_needed = 0;
149 spill_candidate_t *candidates;
150 ir_nodeset_iterator_t iter;
156 /* mode_T nodes define several values at once. Count them */
157 if(get_irn_mode(node) == mode_T) {
158 const ir_edge_t *edge;
160 foreach_out_edge(node, edge) {
161 const ir_node *proj = get_edge_src_irn(edge);
163 if(arch_irn_consider_in_reg_alloc(arch_env, cls, proj)) {
167 } else if(arch_irn_consider_in_reg_alloc(arch_env, cls, node)) {
171 /* we need registers for the non-live argument values */
172 arity = get_irn_arity(node);
173 for(i = 0; i < arity; ++i) {
174 ir_node *pred = get_irn_n(node, i);
175 if(arch_irn_consider_in_reg_alloc(arch_env, cls, pred)
176 && !ir_nodeset_contains(live_nodes, pred)) {
181 /* we can reuse all reloaded values for the defined values, but we might
182 need even more registers */
183 if(values_defined > free_regs_needed)
184 free_regs_needed = values_defined;
186 spills_needed = (n_live_nodes + free_regs_needed) - n_regs;
187 if(spills_needed <= 0)
189 DBG((dbg, LEVEL_2, "\tspills needed after %+F: %d\n", node, spills_needed));
191 candidates = alloca(n_live_nodes * sizeof(candidates[0]));
193 /* construct array with spill candidates and calculate their costs */
195 foreach_ir_nodeset(live_nodes, n, iter) {
196 spill_candidate_t *candidate = & candidates[i];
198 assert(!bitset_is_set(spilled_nodes, get_irn_idx(n)));
201 candidate->costs = get_spill_costs(n);
204 assert(i == n_live_nodes);
206 /* sort spill candidates */
207 qsort(candidates, n_live_nodes, sizeof(candidates[0]),
208 compare_spill_candidates_desc);
210 /* spill cheapest ones */
212 while(spills_needed > 0) {
213 spill_candidate_t *candidate;
217 if (cand_idx >= n_live_nodes) {
218 panic("can't spill enough values for node %+F\n", node);
222 candidate = &candidates[cand_idx];
223 cand_node = candidate->node;
226 if(arch_irn_is(arch_env, cand_node, dont_spill))
229 /* make sure the node is not an argument of the instruction */
231 for (i = 0; i < arity; ++i) {
232 ir_node *in = get_irn_n(node, i);
233 if(in == cand_node) {
242 spill_node(cand_node);
243 ir_nodeset_remove(live_nodes, cand_node);
249 * removes all values from the nodeset that are defined by node
251 static void remove_defs(ir_node *node, ir_nodeset_t *nodeset)
253 /* You should better break out of your loop when hitting the first phi
255 assert(!is_Phi(node) && "liveness_transfer produces invalid results for phi nodes");
257 if (get_irn_mode(node) == mode_T) {
258 const ir_edge_t *edge;
260 foreach_out_edge(node, edge) {
261 const ir_node *proj = get_edge_src_irn(edge);
263 if (arch_irn_consider_in_reg_alloc(arch_env, cls, proj)) {
264 ir_nodeset_remove(nodeset, proj);
269 if(arch_irn_consider_in_reg_alloc(arch_env, cls, node)) {
270 ir_nodeset_remove(nodeset, node);
274 static void add_uses(ir_node *node, ir_nodeset_t *nodeset)
278 arity = get_irn_arity(node);
279 for(i = 0; i < arity; ++i) {
280 ir_node *op = get_irn_n(node, i);
282 if(arch_irn_consider_in_reg_alloc(arch_env, cls, op)
283 && !bitset_is_set(spilled_nodes, get_irn_idx(op))) {
284 ir_nodeset_insert(nodeset, op);
289 static __attribute__((unused))
290 void print_nodeset(ir_nodeset_t *nodeset)
292 ir_nodeset_iterator_t iter;
295 foreach_ir_nodeset(nodeset, node, iter) {
296 ir_fprintf(stderr, "%+F ", node);
298 fprintf(stderr, "\n");
302 * make sure register pressure in a block is always equal or below the number
303 * of available registers
305 static void spill_block(ir_node *block, void *data)
307 ir_nodeset_t live_nodes;
308 ir_nodeset_iterator_t iter;
310 int n_phi_values_spilled;
312 int phi_spills_needed;
315 DBG((dbg, LEVEL_1, "spilling block %+F\n", block));
317 /* construct set of live nodes at end of block */
318 ir_nodeset_init(&live_nodes);
319 be_liveness_end_of_block(lv, arch_env, cls, block, &live_nodes);
321 /* remove already spilled nodes from liveset */
322 foreach_ir_nodeset(&live_nodes, node, iter) {
323 DBG((dbg, LEVEL_2, "\t%+F is live-end... ", node));
324 if(bitset_is_set(spilled_nodes, get_irn_idx(node))) {
325 DBG((dbg, LEVEL_2, "but spilled; removing.\n"));
326 ir_nodeset_remove_iterator(&live_nodes, &iter);
328 DBG((dbg, LEVEL_2, "keeping.\n"));
332 /* walk schedule backwards and spill until register pressure is fine at
334 sched_foreach_reverse(block, node) {
338 remove_defs(node, &live_nodes);
339 do_spilling(&live_nodes, node);
340 add_uses(node, &live_nodes);
343 /* until now only the values of some phis have been spilled the phis itself
344 * are still there and occupy registers, so we need to count them and might
345 * have to spill some of them.
347 n_phi_values_spilled = 0;
348 sched_foreach(block, node) {
352 if(bitset_is_set(spilled_nodes, get_irn_idx(node))) {
353 ++n_phi_values_spilled;
357 /* calculate how many of the phis need to be spilled */
358 regpressure = ir_nodeset_size(&live_nodes) + n_phi_values_spilled;
359 phi_spills_needed = regpressure - n_regs;
360 DBG((dbg, LEVEL_3, "Regpressure before phis: %d phispills: %d\n",
361 regpressure, phi_spills_needed));
363 /* spill as many phis as needed */
364 /* TODO: we should really estimate costs of the phi spill as well...
365 * and preferably spill phis with lower costs... */
366 sched_foreach(block, node) {
369 if(phi_spills_needed <= 0)
372 if(bitset_is_set(spilled_nodes, get_irn_idx(node))) {
373 be_spill_phi(spill_env, node);
377 assert(phi_spills_needed <= 0);
379 ir_nodeset_destroy(&live_nodes);
382 void be_spill_daemel(be_irg_t *birg, const arch_register_class_t *new_cls)
384 ir_graph *irg = be_get_birg_irg(birg);
385 n_regs = new_cls->n_regs - be_put_ignore_regs(birg, new_cls, NULL);
390 be_liveness_assure_sets(be_assure_liveness(birg));
392 spill_env = be_new_spill_env(birg);
393 arch_env = be_get_birg_arch_env(birg);
395 lv = be_get_birg_liveness(birg);
396 spilled_nodes = bitset_malloc(get_irg_last_idx(irg));
398 DBG((dbg, LEVEL_1, "*** RegClass %s\n", cls->name));
400 irg_block_walk_graph(irg, spill_block, NULL, NULL);
402 bitset_free(spilled_nodes);
403 spilled_nodes = NULL;
405 be_insert_spills_reloads(spill_env);
407 be_delete_spill_env(spill_env);
411 void be_init_daemelspill(void)
413 static be_spiller_t daemel_spiller = {
417 be_register_spiller("daemel", &daemel_spiller);
418 FIRM_DBG_REGISTER(dbg, "ir.be.spilldaemel");
421 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_doedelspill);