2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Driver for the chordal register allocator.
23 * @author Sebastian Hack
41 #include "lc_opts_enum.h"
45 #include "irgraph_t.h"
46 #include "irprintf_t.h"
57 #include "iredges_t.h"
59 #include "bechordal_t.h"
63 #include "besched_t.h"
67 #include "beifg_impl.h"
69 #include "bestatevent.h"
76 #include "bespillslots.h"
77 #include "bespilloptions.h"
80 #include "becopystat.h"
81 #include "becopyopt.h"
82 #include "bessadestr.h"
86 static be_ra_chordal_opts_t options = {
88 BE_CH_LOWER_PERM_SWAP,
94 typedef struct _post_spill_env_t {
95 be_chordal_env_t cenv;
97 const arch_register_class_t *cls;
98 double pre_spill_cost;
101 static be_options_t *main_opts;
103 static const lc_opt_enum_int_items_t lower_perm_items[] = {
104 { "copy", BE_CH_LOWER_PERM_COPY },
105 { "swap", BE_CH_LOWER_PERM_SWAP },
109 static const lc_opt_enum_int_items_t lower_perm_stat_items[] = {
113 static const lc_opt_enum_int_items_t dump_items[] = {
114 { "none", BE_CH_DUMP_NONE },
115 { "spill", BE_CH_DUMP_SPILL },
116 { "live", BE_CH_DUMP_LIVE },
117 { "color", BE_CH_DUMP_COLOR },
118 { "copymin", BE_CH_DUMP_COPYMIN },
119 { "ssadestr", BE_CH_DUMP_SSADESTR },
120 { "tree", BE_CH_DUMP_TREE_INTV },
121 { "constr", BE_CH_DUMP_CONSTR },
122 { "lower", BE_CH_DUMP_LOWER },
123 { "spillslots", BE_CH_DUMP_SPILLSLOTS },
124 { "appel", BE_CH_DUMP_APPEL },
125 { "all", BE_CH_DUMP_ALL },
129 static const lc_opt_enum_int_items_t be_ch_vrfy_items[] = {
130 { "off", BE_CH_VRFY_OFF },
131 { "warn", BE_CH_VRFY_WARN },
132 { "assert", BE_CH_VRFY_ASSERT },
136 static lc_opt_enum_int_var_t lower_perm_var = {
137 &options.lower_perm_opt, lower_perm_items
140 static lc_opt_enum_int_var_t dump_var = {
141 &options.dump_flags, dump_items
144 static lc_opt_enum_int_var_t be_ch_vrfy_var = {
145 &options.vrfy_option, be_ch_vrfy_items
148 static const lc_opt_table_entry_t be_chordal_options[] = {
149 LC_OPT_ENT_ENUM_PTR ("perm", "perm lowering options", &lower_perm_var),
150 LC_OPT_ENT_ENUM_MASK("dump", "select dump phases", &dump_var),
151 LC_OPT_ENT_ENUM_PTR ("vrfy", "verify options", &be_ch_vrfy_var),
155 static void dump(unsigned mask, ir_graph *irg,
156 const arch_register_class_t *cls,
158 void (*dump_func)(ir_graph *, const char *))
160 if((options.dump_flags & mask) == mask) {
163 snprintf(buf, sizeof(buf), "-%s%s", cls->name, suffix);
164 be_dump(irg, buf, dump_func);
167 be_dump(irg, suffix, dump_func);
172 * Checks for every reload if its user can perform the load on itself.
174 static void memory_operand_walker(ir_node *irn, void *env)
176 const ir_edge_t *edge, *ne;
182 if (! be_is_Reload(irn))
185 /* only use memory operands, if the reload is only used by 1 node */
186 if(get_irn_n_edges(irn) > 1)
189 spill = be_get_Reload_mem(irn);
190 block = get_nodes_block(irn);
192 foreach_out_edge_safe(irn, edge, ne) {
193 ir_node *src = get_edge_src_irn(edge);
194 int pos = get_edge_src_pos(edge);
196 assert(src && "outedges broken!");
198 if (get_nodes_block(src) == block && arch_possible_memory_operand(src, pos)) {
199 arch_perform_memory_operand(src, spill, pos);
203 /* kill the Reload */
204 if (get_irn_n_edges(irn) == 0) {
206 set_irn_n(irn, be_pos_Reload_mem, new_Bad());
207 set_irn_n(irn, be_pos_Reload_frame, new_Bad());
212 * Starts a walk for memory operands if supported by the backend.
214 static INLINE void check_for_memory_operands(ir_graph *irg)
216 irg_walk_graph(irg, NULL, memory_operand_walker, NULL);
220 static be_node_stats_t last_node_stats;
223 * Perform things which need to be done per register class before spilling.
225 static void pre_spill(post_spill_env_t *pse, const arch_register_class_t *cls)
227 be_chordal_env_t *chordal_env = &pse->cenv;
228 be_irg_t *birg = pse->birg;
229 ir_graph *irg = be_get_birg_irg(birg);
230 const be_main_env_t *main_env = birg->main_env;
233 chordal_env->cls = cls;
234 chordal_env->border_heads = pmap_create();
235 chordal_env->ignore_colors = bitset_malloc(chordal_env->cls->n_regs);
237 be_assure_liveness(birg);
238 be_liveness_assure_chk(be_get_birg_liveness(birg));
240 stat_ev_do(pse->pre_spill_cost = be_estimate_irg_costs(irg, main_env->arch_env, birg->exec_freq));
242 /* put all ignore registers into the ignore register set. */
243 be_put_ignore_regs(birg, pse->cls, chordal_env->ignore_colors);
245 BE_TIMER_PUSH(t_ra_constr);
246 be_pre_spill_prepare_constr(chordal_env);
247 BE_TIMER_POP(t_ra_constr);
249 dump(BE_CH_DUMP_CONSTR, birg->irg, pse->cls, "-constr-pre", dump_ir_block_graph_sched);
253 * Perform things which need to be done per register class after spilling.
255 static void post_spill(post_spill_env_t *pse, int iteration) {
256 be_chordal_env_t *chordal_env = &pse->cenv;
257 be_irg_t *birg = pse->birg;
258 ir_graph *irg = birg->irg;
259 const be_main_env_t *main_env = birg->main_env;
260 int colors_n = arch_register_class_n_regs(chordal_env->cls);
261 int allocatable_regs = colors_n - be_put_ignore_regs(birg, chordal_env->cls, NULL);
263 /* some special classes contain only ignore regs, no work to be done */
264 if (allocatable_regs > 0) {
265 stat_ev_dbl("bechordal_spillcosts", be_estimate_irg_costs(irg, main_env->arch_env, birg->exec_freq) - pse->pre_spill_cost);
268 If we have a backend provided spiller, post spill is
269 called in a loop after spilling for each register class.
270 But we only need to fix stack nodes once in this case.
272 BE_TIMER_PUSH(t_ra_spill_apply);
273 check_for_memory_operands(irg);
274 if (iteration == 0) {
275 be_abi_fix_stack_nodes(birg->abi);
277 BE_TIMER_POP(t_ra_spill_apply);
279 BE_TIMER_PUSH(t_verify);
281 /* verify schedule and register pressure */
282 if (chordal_env->opts->vrfy_option == BE_CH_VRFY_WARN) {
283 be_verify_schedule(birg);
284 be_verify_register_pressure(birg, pse->cls, irg);
285 } else if (chordal_env->opts->vrfy_option == BE_CH_VRFY_ASSERT) {
286 assert(be_verify_schedule(birg) && "Schedule verification failed");
287 assert(be_verify_register_pressure(birg, pse->cls, irg)
288 && "Register pressure verification failed");
290 BE_TIMER_POP(t_verify);
292 /* Color the graph. */
293 BE_TIMER_PUSH(t_ra_color);
294 be_ra_chordal_color(chordal_env);
295 BE_TIMER_POP(t_ra_color);
297 dump(BE_CH_DUMP_CONSTR, irg, pse->cls, "-color", dump_ir_block_graph_sched);
299 /* Create the ifg with the selected flavor */
300 BE_TIMER_PUSH(t_ra_ifg);
301 chordal_env->ifg = be_create_ifg(chordal_env);
302 BE_TIMER_POP(t_ra_ifg);
306 be_node_stats_t node_stats;
308 be_ifg_stat(birg, chordal_env->ifg, &stat);
309 stat_ev_dbl("bechordal_ifg_nodes", stat.n_nodes);
310 stat_ev_dbl("bechordal_ifg_edges", stat.n_edges);
311 stat_ev_dbl("bechordal_ifg_comps", stat.n_comps);
313 be_collect_node_stats(&node_stats, birg);
314 be_subtract_node_stats(&node_stats, &last_node_stats);
316 stat_ev_dbl("bechordal_perms_before_coal",
317 node_stats[BE_STAT_PERMS]);
318 stat_ev_dbl("bechordal_copies_before_coal",
319 node_stats[BE_STAT_COPIES]);
322 /* copy minimization */
323 BE_TIMER_PUSH(t_ra_copymin);
324 co_driver(chordal_env);
325 BE_TIMER_POP(t_ra_copymin);
327 dump(BE_CH_DUMP_COPYMIN, irg, pse->cls, "-copymin", dump_ir_block_graph_sched);
330 /* ssa destruction */
331 BE_TIMER_PUSH(t_ra_ssa);
332 be_ssa_destruction(chordal_env);
333 BE_TIMER_POP(t_ra_ssa);
335 dump(BE_CH_DUMP_SSADESTR, irg, pse->cls, "-ssadestr", dump_ir_block_graph_sched);
337 if (chordal_env->opts->vrfy_option != BE_CH_VRFY_OFF) {
338 BE_TIMER_PUSH(t_verify);
339 be_ssa_destruction_check(chordal_env);
340 BE_TIMER_POP(t_verify);
343 /* the ifg exists only if there are allocatable regs */
344 be_ifg_free(chordal_env->ifg);
347 /* free some always allocated data structures */
348 pmap_destroy(chordal_env->border_heads);
349 bitset_free(chordal_env->ignore_colors);
353 * Performs chordal register allocation for each register class on given irg.
355 * @param birg Backend irg object
356 * @return Structure containing timer for the single phases or NULL if no timing requested.
358 static void be_ra_chordal_main(be_irg_t *birg)
360 const be_main_env_t *main_env = birg->main_env;
361 const arch_env_t *arch_env = main_env->arch_env;
362 ir_graph *irg = birg->irg;
364 be_chordal_env_t chordal_env;
367 main_opts = main_env->options;
369 BE_TIMER_PUSH(t_ra_other);
371 BE_TIMER_PUSH(t_ra_prolog);
373 be_assure_liveness(birg);
375 chordal_env.obst = &obst;
376 chordal_env.opts = &options;
377 chordal_env.irg = irg;
378 chordal_env.birg = birg;
379 chordal_env.border_heads = NULL;
380 chordal_env.ifg = NULL;
381 chordal_env.ignore_colors = NULL;
385 BE_TIMER_POP(t_ra_prolog);
388 be_collect_node_stats(&last_node_stats, birg);
391 if (! arch_code_generator_has_spiller(birg->cg)) {
392 /* use one of the generic spiller */
394 /* Perform the following for each register class. */
395 for (j = 0, m = arch_env_get_n_reg_class(arch_env); j < m; ++j) {
396 post_spill_env_t pse;
397 const arch_register_class_t *cls
398 = arch_env_get_reg_class(arch_env, j);
400 if(arch_register_class_flags(cls) & arch_register_class_flag_manual_ra)
404 stat_ev_ctx_push_str("bechordal_cls", cls->name);
407 be_do_stat_reg_pressure(birg, cls);
410 memcpy(&pse.cenv, &chordal_env, sizeof(chordal_env));
412 pre_spill(&pse, cls);
414 BE_TIMER_PUSH(t_ra_spill);
415 be_do_spill(birg, cls);
416 BE_TIMER_POP(t_ra_spill);
418 dump(BE_CH_DUMP_SPILL, irg, pse.cls, "-spill",
419 dump_ir_block_graph_sched);
424 be_node_stats_t node_stats;
426 be_collect_node_stats(&node_stats, birg);
427 be_subtract_node_stats(&node_stats, &last_node_stats);
428 be_emit_node_stats(&node_stats, "bechordal_");
430 be_copy_node_stats(&last_node_stats, &node_stats);
431 stat_ev_ctx_pop("bechordal_cls");
435 post_spill_env_t *pse;
437 /* the backend has its own spiller */
438 m = arch_env_get_n_reg_class(arch_env);
440 pse = alloca(m * sizeof(pse[0]));
442 for (j = 0; j < m; ++j) {
443 memcpy(&pse[j].cenv, &chordal_env, sizeof(chordal_env));
445 pre_spill(&pse[j], pse[j].cls);
448 BE_TIMER_PUSH(t_ra_spill);
449 arch_code_generator_spill(birg->cg, birg);
450 BE_TIMER_POP(t_ra_spill);
451 dump(BE_CH_DUMP_SPILL, irg, NULL, "-spill", dump_ir_block_graph_sched);
453 for (j = 0; j < m; ++j) {
454 post_spill(&pse[j], j);
458 BE_TIMER_PUSH(t_verify);
459 be_verify_register_allocation(birg);
460 BE_TIMER_POP(t_verify);
462 BE_TIMER_PUSH(t_ra_epilog);
463 lower_nodes_after_ra(birg, options.lower_perm_opt & BE_CH_LOWER_PERM_COPY ? 1 : 0);
464 dump(BE_CH_DUMP_LOWER, irg, NULL, "-belower-after-ra", dump_ir_block_graph_sched);
466 obstack_free(&obst, NULL);
467 be_liveness_invalidate(be_get_birg_liveness(birg));
468 BE_TIMER_POP(t_ra_epilog);
470 BE_TIMER_POP(t_ra_other);
473 static be_ra_t be_ra_chordal_allocator = {
477 void be_init_chordal_main(void)
479 lc_opt_entry_t *be_grp = lc_opt_get_grp(firm_opt_get_root(), "be");
480 lc_opt_entry_t *ra_grp = lc_opt_get_grp(be_grp, "ra");
481 lc_opt_entry_t *chordal_grp = lc_opt_get_grp(ra_grp, "chordal");
483 lc_opt_add_table(chordal_grp, be_chordal_options);
485 be_register_allocator("chordal", &be_ra_chordal_allocator);
488 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_chordal_main);