4 * @author Sebastian Hack
8 * Copyright (C) 2005 Universitaet Karlsruhe
9 * Released under the GPL
31 #include <lpp/lpp_net.h>
32 #include <lpp/lpp_cplex.h>
36 #include "besched_t.h"
41 #include "bespillilp.h"
44 #include "bechordal_t.h"
48 #define MAX(a,b) ((a) > (b) ? (a) : (b))
50 #define DBG_LEVEL SET_LEVEL_0 // 3
57 #define LPP_SERVER "i44pc52"
58 #define LPP_SOLVER "cplex"
62 #define COST_REMAT (-9)
64 #define is_end_of_block_use(lr) (is_Block((lr)->user))
69 typedef struct _edge_reload_t {
74 struct _edge_reload_t *next;
77 typedef struct _spill_stat_t {
83 typedef struct _spill_ilp_t {
85 const arch_register_class_t *cls;
86 const be_chordal_env_t *chordal_env;
87 firm_dbg_module_t *dbg;
99 typedef struct _live_range_t live_range_t;
101 typedef struct _irn_use_head_t {
102 struct list_head head;
106 live_range_t *closest_use;
109 struct _live_range_t {
110 struct list_head list;
111 irn_use_head_t *use_head;
120 * Associates the first use of a live-in in a block
121 * with its live range.
123 typedef struct _first_use_t {
125 ir_node *irn; /**< A value live in at bl. */
126 live_range_t *lr; /**< The live range for the first use of irn in bl. */
131 * Get weight for spill/reload costs
132 * Actually computed with loop depth.
133 * @param irn The location where to check for the weights.
134 * @return The weights at this points.
136 static double get_weight(const ir_node *irn)
138 ir_loop *loop = get_irn_loop((ir_node *) irn);
142 int depth = get_loop_depth(loop);
143 res += depth * depth;
150 static INLINE int has_reg_class(const spill_ilp_t *si, const ir_node *irn)
152 return chordal_has_class(si->chordal_env, irn);
155 static int cmp_live_range(const void *a, const void *b, size_t n)
157 const live_range_t *p = a;
158 const live_range_t *q = b;
160 return !(p->user == q->user && p->irn == q->irn && p->pos == q->pos);
163 static int cmp_irn_use_head(const void *a, const void *b, size_t n)
165 const irn_use_head_t *p = a;
166 const irn_use_head_t *q = b;
168 return !(p->irn == q->irn);
171 static irn_use_head_t *get_use_head(spill_ilp_t *si, const ir_node *irn)
173 irn_use_head_t templ;
174 templ.irn = (ir_node *) irn;
175 return set_find(si->irn_use_heads, &templ, sizeof(templ), HASH_PTR(irn));
178 static int cmp_first_use(const void *a, const void *b, size_t n)
180 const first_use_t *p = a;
181 const first_use_t *q = b;
183 return !(p->irn == q->irn && p->bl == q->bl);
186 static void add_first_use(spill_ilp_t *si, ir_node *bl, ir_node *irn, live_range_t *lr)
193 set_insert(si->first_uses, &templ, sizeof(templ),
194 HASH_COMBINE(HASH_PTR(bl), HASH_PTR(irn)));
197 static live_range_t *get_first_use_lr(spill_ilp_t *si, ir_node *bl, ir_node *irn)
204 res = set_find(si->first_uses, &templ, sizeof(templ),
205 HASH_COMBINE(HASH_PTR(bl), HASH_PTR(irn)));
207 return res ? res->lr : NULL;
211 * Checks, if a vertain node can be recomputed at a certain position.
212 * @param si The spill ILP environment.
213 * @param irn The node to recompute.
214 * @param live The nodes live at the place where @p irn shall be
216 * @return 1, if irn can be recomputed, 0 if not.
218 static INLINE int can_remat(const spill_ilp_t *si, const ir_node *irn, pset *live)
221 const arch_env_t *arch_env = si->chordal_env->main_env->arch_env;
222 int remat = (arch_irn_get_flags(arch_env, irn) & arch_irn_flags_rematerializable) != 0;
224 for(i = 0, n = get_irn_arity(irn); i < n && remat; ++i) {
225 ir_node *op = get_irn_n(irn, i);
226 remat &= !has_reg_class(si, op) || pset_find_ptr(live, op);
232 static live_range_t *get_live_range(spill_ilp_t *si, ir_node *irn, ir_node *user, int pos)
234 live_range_t lr, *res;
235 irn_use_head_t iuh, *head;
237 unsigned hash = HASH_COMBINE(HASH_PTR(irn), HASH_PTR(user));
243 lr.is_remat_var = -1;
245 res = set_insert(si->live_ranges, &lr, sizeof(lr), hash);
246 is_new = res->in_mem_var == -1;
253 cost = get_weight(user) * COST_LOAD;
255 ir_snprintf(buf, sizeof(buf), "m_%s%N_%N_%d",
256 is_Phi(irn) ? "phi_" : "", irn, user, MAX(pos, 0));
257 res->in_mem_var = lpp_add_var(si->lpp, buf, lpp_binary, cost);
260 memset(&iuh, 0, sizeof(iuh));
263 head = set_insert(si->irn_use_heads, &iuh, sizeof(iuh), HASH_PTR(irn));
264 if(head->n_uses == -1) {
266 INIT_LIST_HEAD(&head->head);
270 list_add_tail(&res->list, &head->head);
274 res->use_head = head;
279 static void print_live_set(spill_ilp_t *si, pset *s) {
281 for(n=pset_first(s); n; n=pset_next(s))
282 DBG((si->dbg, LEVEL_3, " %+F\n", n));
285 static void process_block(ir_node *bl, void *data)
289 spill_ilp_t *si = data;
291 int n_regs = arch_register_class_n_regs(si->cls);
292 int n_preds = get_irn_arity(bl);
293 pset *live = pset_new_ptr_default();
297 DBG((si->dbg, LEVEL_3, "\n"));
298 DBG((si->dbg, LEVEL_3, "Processing %+F\n", bl));
301 * Get all live-end values of this block
303 live_foreach(bl, li) {
304 if(live_is_end(li) && has_reg_class(si, li->irn)) {
305 ir_node *irn = (ir_node *) li->irn;
306 pset_insert_ptr(live, irn);
308 /*The "user" of the live range to the end of a block
309 * is the block itself. This is quite arbitrary. */
310 set_irn_link(irn, get_live_range(si, irn, bl, -1));
313 DBG((si->dbg, LEVEL_3, "Live-End:\n"));
314 print_live_set(si, live);
317 * Walk through the schedule of this block from end to begin.
318 * Phis are handled togther with live ins after this loop.
320 for(irn = sched_last(bl); !sched_is_begin(irn) && !is_Phi(irn); irn = sched_prev(irn)) {
323 int relevant_args, results;
330 * Determine the number of results
332 /* Special handling of Projs */
334 if(has_reg_class(si, irn)) {
335 assert(pset_find_ptr(live, irn) && "node must be live");
336 pset_remove_ptr(live, irn);
340 DBG((si->dbg, LEVEL_2, "Skipped %+F\n", irn));
344 DBG((si->dbg, LEVEL_1, "Irn %+F\n", irn));
347 assert(get_irn_mode(irn) == mode_T && "node before projs must be tuple");
352 if(has_reg_class(si, irn)) {
353 assert(get_irn_mode(irn) != mode_T && "node must not be a tuple");
354 assert(pset_find_ptr(live, irn) && "node must be live");
355 pset_remove_ptr(live, irn);
362 /* cand holds the irns which may be spilled */
363 cand = pset_new_ptr(8);
364 for(l=pset_first(live); l; l=pset_next(live))
365 pset_insert_ptr(cand, l);
368 * Determine number of arguments
371 for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
372 ir_node *op = get_irn_n(irn, i);
373 if(has_reg_class(si, op)) {
374 DBG((si->dbg, LEVEL_2, " arg %+F\n", op));
377 /* arguments must not be spilled */
378 if(pset_find_ptr(cand, op))
379 pset_remove_ptr(cand, op);
384 * Determine, how many values must be in memory.
385 * We have 'n_regs' registers.
386 * The instr. needs 'demand'.
387 * So (R:= n_regs - demand) registers can be used for candidates 'cand'.
388 * The rest (M:= n_cand - R) must reside in memory.
390 demand = MAX(results, relevant_args);
391 n_cand = pset_count(cand);
392 must_be_in_mem = n_cand - (n_regs - demand);
394 DBG((si->dbg, LEVEL_1, " Demand: %d, Cands: %d, InMem: %d\n", demand, n_cand, must_be_in_mem));
395 DBG((si->dbg, LEVEL_3, " Cand-Set:\n"));
396 print_live_set(si, cand);
399 * Generate the corresponding constraint spilling
400 * enough candidates at this label.
402 if(must_be_in_mem > 0) {
403 ir_snprintf(buf, sizeof(buf), "cp_%N_%N_%d", bl, irn, step);
404 cst = lpp_add_cst(si->lpp, buf, lpp_greater, must_be_in_mem);
406 for(l = pset_first(cand); l; l = pset_next(cand)) {
407 live_range_t *lr = get_irn_link(l);
408 lpp_set_factor_fast(si->lpp, cst, lr->in_mem_var, 1.0);
412 for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
413 ir_node *op = get_irn_n(irn, i);
415 if(has_reg_class(si, op)) {
416 live_range_t *op_lr = get_live_range(si, op, irn, i);
417 set_irn_link(op, op_lr);
421 * The operand is reloaded at its usage, so it must not occur
422 * in the constraint which determines which values live at the
423 * instruction must reside in memory.
425 if(must_be_in_mem > 0) {
426 DBG((si->dbg, LEVEL_3, " Resetting %+F to 0:\n", op));
427 lpp_set_factor_fast(si->lpp, cst, op_lr->in_mem_var, 0.0);
432 * Check, if the node is a rematerializable node and
433 * if its operands are live here.
435 if(si->enable_remat && can_remat(si, op, live)) {
440 for(j = 0, n = get_irn_arity(op); j < n; ++j)
441 n_operands += has_reg_class(si, get_irn_n(op, j));
443 /* Make the remat constraint for this operand */
444 ir_snprintf(buf, sizeof(buf), "ce1_%N_%N_%d", op, irn, i);
445 cst = lpp_add_cst(si->lpp, buf, lpp_less, n_operands);
447 /* Make the rematerialize variable for the operand */
448 ir_snprintf(buf, sizeof(buf), "e_%N_%N_%d", op, irn, i);
449 op_lr->is_remat_var = lpp_add_var(si->lpp, buf, lpp_binary, COST_REMAT);
450 lpp_set_factor_fast(si->lpp, cst, op_lr->is_remat_var, n_operands);
452 for(j = 0, n = get_irn_arity(op); j < n; ++j) {
453 ir_node *oop = get_irn_n(op, j);
454 if(has_reg_class(si, oop)) {
455 live_range_t *lr = get_irn_link(oop);
456 lpp_set_factor_fast(si->lpp, cst, lr->in_mem_var, 1.0);
460 ir_snprintf(buf, sizeof(buf), "ce2_%N_%N_%d", op, irn, i);
461 cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
462 lpp_set_factor_fast(si->lpp, cst, op_lr->is_remat_var, 1.0);
463 lpp_set_factor_fast(si->lpp, cst, op_lr->in_mem_var, -1.0);
469 * Insert arguments of current instr into the live set
471 for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
472 ir_node *op = get_irn_n(irn, i);
473 if(has_reg_class(si, op))
474 pset_insert_ptr(live, op);
481 if(bl == get_irg_start_block(get_irn_irg(bl)))
485 * Here, the live set contains
486 * - phis of the block
487 * - live-in values of the block
489 * TODO: comment is wrong
490 * If a value is live in, it must be in a register in all predecessor
491 * blocks or in memory at the end of all predecessor blocks. Also, the
492 * closest use in the current block must then be from register or
493 * memory, respectively.
495 for(irn = pset_first(live); irn; irn = pset_next(live)) {
496 live_range_t *lr = get_irn_link(irn);
497 int is_phi = is_Phi(irn) && get_nodes_block(irn) == bl;
500 assert(has_reg_class(si, irn));
501 assert(is_Phi(irn) || is_live_in(bl, irn));
503 /* Deprecated: Can be done with the first uses map */
505 lr->use_head->closest_use = lr;
508 * Remind the liverange of the first use of a live (or phi) in the
511 add_first_use(si, bl, irn, lr);
513 for(i = 0; i < n_preds; ++i) {
514 ir_node *pred_bl = get_Block_cfgpred_block(bl, i);
515 ir_node *end_node = is_phi ? get_irn_n(irn, i) : irn;
516 live_range_t *op_lr = get_live_range(si, end_node, pred_bl, -1);
517 edge_reload_t *edge = obstack_alloc(si->obst, sizeof(edge[0]));
519 ir_snprintf(buf, sizeof(buf), "edge_b%N_p%N_i%N", bl, pred_bl, end_node);
520 edge->in_mem_var = lpp_add_var(si->lpp, buf, lpp_binary, get_weight(pred_bl) * COST_LOAD);
522 edge->irn = end_node;
524 edge->next = si->edges;
527 ir_snprintf(buf, sizeof(buf), "cedge_b%N_p%N_i%N", bl, pred_bl, end_node);
528 cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
529 lpp_set_factor_fast(si->lpp, cst, op_lr->in_mem_var, 1.0);
530 lpp_set_factor_fast(si->lpp, cst, lr->in_mem_var, -1.0);
531 lpp_set_factor_fast(si->lpp, cst, edge->in_mem_var, -1.0);
540 * Add the costs for a store.
542 * If one of the uses is from memory, add additional costs for the
545 * m_1 + ... + m_n - M * s <= 0
547 * @param si The ILP spilling environment.
549 static void add_store_costs(spill_ilp_t *si)
551 if(si->enable_store) {
555 for(uh = set_first(si->irn_use_heads); uh; uh = set_next(si->irn_use_heads)) {
559 ir_snprintf(buf, sizeof(buf), "cs_%N", uh->irn);
560 cst = lpp_add_cst(si->lpp, buf, lpp_less, 0);
562 ir_snprintf(buf, sizeof(buf), "s_%N", uh->irn);
563 uh->spill_var = lpp_add_var(si->lpp, buf, lpp_binary,
564 get_weight(uh->irn) * COST_STORE);
565 lpp_set_factor_fast(si->lpp, cst, uh->spill_var, -BIGM);
567 list_for_each_entry(live_range_t, lr, &uh->head, list)
568 lpp_set_factor_fast(si->lpp, cst, lr->in_mem_var, 1.0);
573 static INLINE int is_zero(double x)
575 return fabs(x) < 0.00001;
578 static int is_spilled(const spill_ilp_t *si, const live_range_t *lr)
580 return !is_zero(lpp_get_var_sol(si->lpp, lr->in_mem_var));
583 static int is_mem_phi(const ir_node *phi, void *data)
585 spill_ilp_t *si = data;
586 return is_spilled(si, get_use_head(si, phi)->closest_use);
589 static void writeback_results(spill_ilp_t *si)
594 /* Look at each node and examine the usages. */
595 for(uh = set_first(si->irn_use_heads); uh; uh = set_next(si->irn_use_heads)) {
598 si->stats.n_spills += !is_zero(lpp_get_var_sol(si->lpp, uh->spill_var));
600 /* Go through all live ranges of the node. */
601 list_for_each_entry(live_range_t, lr, &uh->head, list) {
602 if(is_spilled(si, lr) && !is_end_of_block_use(lr)) {
603 DBG((si->dbg, LEVEL_2, "%+F: inserting reload at user %+F\n",
605 be_add_reload(si->senv, lr->irn, lr->user);
606 si->stats.n_reloads += 1;
611 for(edge = si->edges; edge; edge = edge->next) {
612 if(!is_zero(lpp_get_var_sol(si->lpp, edge->in_mem_var))) {
613 DBG((si->dbg, LEVEL_2, "%+F: insert reload on edge %d from %+F\n",
614 edge->irn, edge->pos, edge->bl));
615 be_add_reload_on_edge(si->senv, edge->irn, edge->bl, edge->pos);
616 si->stats.n_reloads += 1;
620 be_insert_spills_reloads(si->senv, NULL);
623 void be_spill_ilp(const be_chordal_env_t *chordal_env)
625 char problem_name[256];
629 ir_snprintf(problem_name, sizeof(problem_name), "%F_%s",
630 chordal_env->irg, chordal_env->cls->name);
633 memset(&si.stats, 0, sizeof(si.stats));
634 si.chordal_env = chordal_env;
636 si.dbg = firm_dbg_register("be.ra.spillilp");
637 si.senv = be_new_spill_env(si.dbg, chordal_env, is_mem_phi, &si);
638 si.cls = chordal_env->cls;
639 si.lpp = new_lpp(problem_name, lpp_minimize);
640 si.irn_use_heads = new_set(cmp_irn_use_head, 4096);
641 si.live_ranges = new_set(cmp_live_range, 16384);
642 si.first_uses = new_set(cmp_first_use, 4096);
647 firm_dbg_set_mask(si.dbg, DBG_LEVEL);
648 irg_block_walk_graph(chordal_env->irg, process_block, NULL, &si);
650 add_store_costs(&si);
657 ir_snprintf(buf, sizeof(buf), "%s-spill.ilp", problem_name);
658 if((f = fopen(buf, "wt")) != NULL) {
659 lpp_dump_plain(si.lpp, f);
665 DBG((si.dbg, LEVEL_1, "%F\n", chordal_env->irg));
667 lpp_solve_cplex(si.lpp);
669 lpp_solve_net(si.lpp, LPP_SERVER, LPP_SOLVER);
671 assert(lpp_is_sol_valid(si.lpp) && "solution of ILP must be valid");
673 DBG((si.dbg, LEVEL_1, "\tnodes: %d, vars: %d, csts: %d\n",
674 set_count(si.irn_use_heads), si.lpp->var_next, si.lpp->cst_next));
675 DBG((si.dbg, LEVEL_1, "\titerations: %d, solution time: %g\n",
676 si.lpp->iterations, si.lpp->sol_time));
683 ir_snprintf(buf, sizeof(buf), "%s-spill.sol", problem_name);
684 if((f = fopen(buf, "wt")) != NULL) {
686 for(i = 0; i < si.lpp->var_next; ++i) {
687 lpp_name_t *name = si.lpp->vars[i];
688 fprintf(f, "%20s %4d %10f\n", name->name, name->nr, name->value);
695 writeback_results(&si);
702 ir_snprintf(buf, sizeof(buf), "%s-spill.stat", problem_name);
703 if((f = fopen(buf, "wt")) != NULL) {
704 fprintf(f, "%20s: %d\n", "nodes", set_count(si.irn_use_heads));
705 fprintf(f, "%20s: %d\n", "vars", si.lpp->var_next);
706 fprintf(f, "%20s: %d\n", "csts", si.lpp->cst_next);
707 fprintf(f, "%20s: %f\n", "sol time", si.lpp->sol_time);
708 fprintf(f, "%20s: %d\n", "spills", si.stats.n_spills);
709 fprintf(f, "%20s: %d\n", "reloads", si.stats.n_reloads);
710 fprintf(f, "%20s: %d\n", "remats", si.stats.n_remat);
716 del_set(si.irn_use_heads);
717 del_set(si.live_ranges);
719 obstack_free(&obst, NULL);