2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Driver for the chordal register allocator.
23 * @author Sebastian Hack
39 #include "firm_config.h"
42 #include "lc_opts_enum.h"
46 #include "irgraph_t.h"
47 #include "irprintf_t.h"
58 #include "iredges_t.h"
60 #include "bechordal_t.h"
64 #include "besched_t.h"
68 #include "beifg_impl.h"
70 #include "bestatevent.h"
77 #include "bespillslots.h"
78 #include "bespilloptions.h"
82 #include "bespillremat.h"
85 #include "becopystat.h"
86 #include "becopyopt.h"
87 #include "bessadestr.h"
91 static be_ra_chordal_opts_t options = {
93 BE_CH_LOWER_PERM_SWAP,
99 typedef struct _post_spill_env_t {
100 be_chordal_env_t cenv;
102 const arch_register_class_t *cls;
103 double pre_spill_cost;
106 static be_options_t *main_opts;
108 static const lc_opt_enum_int_items_t lower_perm_items[] = {
109 { "copy", BE_CH_LOWER_PERM_COPY },
110 { "swap", BE_CH_LOWER_PERM_SWAP },
114 static const lc_opt_enum_int_items_t lower_perm_stat_items[] = {
118 static const lc_opt_enum_int_items_t dump_items[] = {
119 { "none", BE_CH_DUMP_NONE },
120 { "spill", BE_CH_DUMP_SPILL },
121 { "live", BE_CH_DUMP_LIVE },
122 { "color", BE_CH_DUMP_COLOR },
123 { "copymin", BE_CH_DUMP_COPYMIN },
124 { "ssadestr", BE_CH_DUMP_SSADESTR },
125 { "tree", BE_CH_DUMP_TREE_INTV },
126 { "constr", BE_CH_DUMP_CONSTR },
127 { "lower", BE_CH_DUMP_LOWER },
128 { "spillslots", BE_CH_DUMP_SPILLSLOTS },
129 { "appel", BE_CH_DUMP_APPEL },
130 { "all", BE_CH_DUMP_ALL },
134 static const lc_opt_enum_int_items_t be_ch_vrfy_items[] = {
135 { "off", BE_CH_VRFY_OFF },
136 { "warn", BE_CH_VRFY_WARN },
137 { "assert", BE_CH_VRFY_ASSERT },
141 static lc_opt_enum_int_var_t lower_perm_var = {
142 &options.lower_perm_opt, lower_perm_items
145 static lc_opt_enum_int_var_t dump_var = {
146 &options.dump_flags, dump_items
149 static lc_opt_enum_int_var_t be_ch_vrfy_var = {
150 &options.vrfy_option, be_ch_vrfy_items
153 static const lc_opt_table_entry_t be_chordal_options[] = {
154 LC_OPT_ENT_ENUM_PTR ("perm", "perm lowering options", &lower_perm_var),
155 LC_OPT_ENT_ENUM_MASK("dump", "select dump phases", &dump_var),
156 LC_OPT_ENT_ENUM_PTR ("vrfy", "verify options", &be_ch_vrfy_var),
160 static void dump(unsigned mask, ir_graph *irg,
161 const arch_register_class_t *cls,
163 void (*dump_func)(ir_graph *, const char *))
165 if((options.dump_flags & mask) == mask) {
168 snprintf(buf, sizeof(buf), "-%s%s", cls->name, suffix);
169 be_dump(irg, buf, dump_func);
172 be_dump(irg, suffix, dump_func);
177 * Checks for every reload if its user can perform the load on itself.
179 static void memory_operand_walker(ir_node *irn, void *env)
181 const ir_edge_t *edge, *ne;
187 if (! be_is_Reload(irn))
190 /* only use memory operands, if the reload is only used by 1 node */
191 if(get_irn_n_edges(irn) > 1)
194 spill = be_get_Reload_mem(irn);
195 block = get_nodes_block(irn);
197 foreach_out_edge_safe(irn, edge, ne) {
198 ir_node *src = get_edge_src_irn(edge);
199 int pos = get_edge_src_pos(edge);
201 assert(src && "outedges broken!");
203 if (get_nodes_block(src) == block && arch_possible_memory_operand(src, pos)) {
204 arch_perform_memory_operand(src, spill, pos);
208 /* kill the Reload */
209 if (get_irn_n_edges(irn) == 0) {
211 set_irn_n(irn, be_pos_Reload_mem, new_Bad());
212 set_irn_n(irn, be_pos_Reload_frame, new_Bad());
217 * Starts a walk for memory operands if supported by the backend.
219 static INLINE void check_for_memory_operands(ir_graph *irg)
221 irg_walk_graph(irg, NULL, memory_operand_walker, NULL);
225 static be_node_stats_t last_node_stats;
228 * Perform things which need to be done per register class before spilling.
230 static void pre_spill(post_spill_env_t *pse, const arch_register_class_t *cls)
232 be_chordal_env_t *chordal_env = &pse->cenv;
233 be_irg_t *birg = pse->birg;
234 ir_graph *irg = be_get_birg_irg(birg);
235 const be_main_env_t *main_env = birg->main_env;
238 chordal_env->cls = cls;
239 chordal_env->border_heads = pmap_create();
240 chordal_env->ignore_colors = bitset_malloc(chordal_env->cls->n_regs);
242 be_assure_liveness(birg);
243 be_liveness_assure_chk(be_get_birg_liveness(birg));
245 stat_ev_do(pse->pre_spill_cost = be_estimate_irg_costs(irg, main_env->arch_env, birg->exec_freq));
247 /* put all ignore registers into the ignore register set. */
248 be_put_ignore_regs(birg, pse->cls, chordal_env->ignore_colors);
250 BE_TIMER_PUSH(t_ra_constr);
251 be_pre_spill_prepare_constr(chordal_env);
252 BE_TIMER_POP(t_ra_constr);
254 dump(BE_CH_DUMP_CONSTR, birg->irg, pse->cls, "-constr-pre", dump_ir_block_graph_sched);
258 * Perform things which need to be done per register class after spilling.
260 static void post_spill(post_spill_env_t *pse, int iteration) {
261 be_chordal_env_t *chordal_env = &pse->cenv;
262 be_irg_t *birg = pse->birg;
263 ir_graph *irg = birg->irg;
264 const be_main_env_t *main_env = birg->main_env;
265 int colors_n = arch_register_class_n_regs(chordal_env->cls);
266 int allocatable_regs = colors_n - be_put_ignore_regs(birg, chordal_env->cls, NULL);
268 /* some special classes contain only ignore regs, no work to be done */
269 if (allocatable_regs > 0) {
270 stat_ev_dbl("bechordal_spillcosts", be_estimate_irg_costs(irg, main_env->arch_env, birg->exec_freq) - pse->pre_spill_cost);
273 If we have a backend provided spiller, post spill is
274 called in a loop after spilling for each register class.
275 But we only need to fix stack nodes once in this case.
277 BE_TIMER_PUSH(t_ra_spill_apply);
278 check_for_memory_operands(irg);
279 if (iteration == 0) {
280 be_abi_fix_stack_nodes(birg->abi);
282 BE_TIMER_POP(t_ra_spill_apply);
284 BE_TIMER_PUSH(t_verify);
286 /* verify schedule and register pressure */
287 if (chordal_env->opts->vrfy_option == BE_CH_VRFY_WARN) {
288 be_verify_schedule(birg);
289 be_verify_register_pressure(birg, pse->cls, irg);
290 } else if (chordal_env->opts->vrfy_option == BE_CH_VRFY_ASSERT) {
291 assert(be_verify_schedule(birg) && "Schedule verification failed");
292 assert(be_verify_register_pressure(birg, pse->cls, irg)
293 && "Register pressure verification failed");
295 BE_TIMER_POP(t_verify);
297 /* Color the graph. */
298 BE_TIMER_PUSH(t_ra_color);
299 be_ra_chordal_color(chordal_env);
300 BE_TIMER_POP(t_ra_color);
302 dump(BE_CH_DUMP_CONSTR, irg, pse->cls, "-color", dump_ir_block_graph_sched);
304 /* Create the ifg with the selected flavor */
305 BE_TIMER_PUSH(t_ra_ifg);
306 chordal_env->ifg = be_create_ifg(chordal_env);
307 BE_TIMER_POP(t_ra_ifg);
311 be_node_stats_t node_stats;
313 be_ifg_stat(birg, chordal_env->ifg, &stat);
314 stat_ev_dbl("bechordal_ifg_nodes", stat.n_nodes);
315 stat_ev_dbl("bechordal_ifg_edges", stat.n_edges);
316 stat_ev_dbl("bechordal_ifg_comps", stat.n_comps);
318 be_collect_node_stats(&node_stats, birg);
319 be_subtract_node_stats(&node_stats, &last_node_stats);
321 stat_ev_dbl("bechordal_perms_before_coal",
322 node_stats[BE_STAT_PERMS]);
323 stat_ev_dbl("bechordal_copies_before_coal",
324 node_stats[BE_STAT_COPIES]);
327 /* copy minimization */
328 BE_TIMER_PUSH(t_ra_copymin);
329 co_driver(chordal_env);
330 BE_TIMER_POP(t_ra_copymin);
332 dump(BE_CH_DUMP_COPYMIN, irg, pse->cls, "-copymin", dump_ir_block_graph_sched);
335 /* ssa destruction */
336 BE_TIMER_PUSH(t_ra_ssa);
337 be_ssa_destruction(chordal_env);
338 BE_TIMER_POP(t_ra_ssa);
340 dump(BE_CH_DUMP_SSADESTR, irg, pse->cls, "-ssadestr", dump_ir_block_graph_sched);
342 if (chordal_env->opts->vrfy_option != BE_CH_VRFY_OFF) {
343 BE_TIMER_PUSH(t_verify);
344 be_ssa_destruction_check(chordal_env);
345 BE_TIMER_POP(t_verify);
348 /* the ifg exists only if there are allocatable regs */
349 be_ifg_free(chordal_env->ifg);
352 /* free some always allocated data structures */
353 pmap_destroy(chordal_env->border_heads);
354 bitset_free(chordal_env->ignore_colors);
358 * Performs chordal register allocation for each register class on given irg.
360 * @param birg Backend irg object
361 * @return Structure containing timer for the single phases or NULL if no timing requested.
363 static void be_ra_chordal_main(be_irg_t *birg)
365 const be_main_env_t *main_env = birg->main_env;
366 const arch_env_t *arch_env = main_env->arch_env;
367 ir_graph *irg = birg->irg;
369 be_chordal_env_t chordal_env;
372 main_opts = main_env->options;
374 BE_TIMER_PUSH(t_ra_other);
376 BE_TIMER_PUSH(t_ra_prolog);
378 be_assure_liveness(birg);
380 chordal_env.obst = &obst;
381 chordal_env.opts = &options;
382 chordal_env.irg = irg;
383 chordal_env.birg = birg;
384 chordal_env.border_heads = NULL;
385 chordal_env.ifg = NULL;
386 chordal_env.ignore_colors = NULL;
390 BE_TIMER_POP(t_ra_prolog);
393 be_collect_node_stats(&last_node_stats, birg);
396 if (! arch_code_generator_has_spiller(birg->cg)) {
397 /* use one of the generic spiller */
399 /* Perform the following for each register class. */
400 for (j = 0, m = arch_env_get_n_reg_class(arch_env); j < m; ++j) {
401 post_spill_env_t pse;
402 const arch_register_class_t *cls
403 = arch_env_get_reg_class(arch_env, j);
405 if(arch_register_class_flags(cls) & arch_register_class_flag_manual_ra)
409 stat_ev_ctx_push_str("bechordal_cls", cls->name);
412 be_do_stat_reg_pressure(birg, cls);
415 memcpy(&pse.cenv, &chordal_env, sizeof(chordal_env));
417 pre_spill(&pse, cls);
419 BE_TIMER_PUSH(t_ra_spill);
420 be_do_spill(birg, cls);
421 BE_TIMER_POP(t_ra_spill);
423 dump(BE_CH_DUMP_SPILL, irg, pse.cls, "-spill",
424 dump_ir_block_graph_sched);
429 be_node_stats_t node_stats;
431 be_collect_node_stats(&node_stats, birg);
432 be_subtract_node_stats(&node_stats, &last_node_stats);
433 be_emit_node_stats(&node_stats, "bechordal_");
435 be_copy_node_stats(&last_node_stats, &node_stats);
436 stat_ev_ctx_pop("bechordal_cls");
440 post_spill_env_t *pse;
442 /* the backend has its own spiller */
443 m = arch_env_get_n_reg_class(arch_env);
445 pse = alloca(m * sizeof(pse[0]));
447 for (j = 0; j < m; ++j) {
448 memcpy(&pse[j].cenv, &chordal_env, sizeof(chordal_env));
450 pre_spill(&pse[j], pse[j].cls);
453 BE_TIMER_PUSH(t_ra_spill);
454 arch_code_generator_spill(birg->cg, birg);
455 BE_TIMER_POP(t_ra_spill);
456 dump(BE_CH_DUMP_SPILL, irg, NULL, "-spill", dump_ir_block_graph_sched);
458 for (j = 0; j < m; ++j) {
459 post_spill(&pse[j], j);
463 BE_TIMER_PUSH(t_verify);
464 be_verify_register_allocation(birg);
465 BE_TIMER_POP(t_verify);
467 BE_TIMER_PUSH(t_ra_epilog);
468 lower_nodes_after_ra(birg, options.lower_perm_opt & BE_CH_LOWER_PERM_COPY ? 1 : 0);
469 dump(BE_CH_DUMP_LOWER, irg, NULL, "-belower-after-ra", dump_ir_block_graph_sched);
471 obstack_free(&obst, NULL);
472 be_liveness_invalidate(be_get_birg_liveness(birg));
473 BE_TIMER_POP(t_ra_epilog);
475 BE_TIMER_POP(t_ra_other);
478 static be_ra_t be_ra_chordal_allocator = {
482 void be_init_chordal_main(void)
484 lc_opt_entry_t *be_grp = lc_opt_get_grp(firm_opt_get_root(), "be");
485 lc_opt_entry_t *ra_grp = lc_opt_get_grp(be_grp, "ra");
486 lc_opt_entry_t *chordal_grp = lc_opt_get_grp(ra_grp, "chordal");
488 lc_opt_add_table(chordal_grp, be_chordal_options);
490 be_register_allocator("chordal", &be_ra_chordal_allocator);
493 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_chordal_main);