4 * @author Sebastian Hack
8 * Copyright (C) 2005 Universitaet Karlsruhe
9 * Released under the GPL
26 #include <lpp/lpp_net.h>
27 #include <lpp/lpp_cplex.h>
31 #include "besched_t.h"
36 #include "bespillilp.h"
40 #define MAX(a,b) ((a) > (b) ? (a) : (b))
42 #define DBG_LEVEL SET_LEVEL_0
47 #define LPP_SERVER "i44pc52"
48 #define LPP_SOLVER "cplex"
52 #define COST_REMAT (-9)
54 #define is_end_of_block_use(lr) (is_Block((lr)->user))
56 typedef struct _spill_ilp_t {
57 const arch_register_class_t *cls;
58 firm_dbg_module_t *dbg;
69 typedef struct _live_range_t live_range_t;
71 typedef struct _irn_use_head_t {
72 struct list_head head;
76 live_range_t *closest_use;
79 struct _live_range_t {
80 struct list_head list;
81 irn_use_head_t *use_head;
89 static int has_reg_class(const spill_ilp_t *si, const ir_node *irn)
91 return arch_irn_has_reg_class(si->senv.session->main_env->arch_env,
92 irn, arch_pos_make_out(0), si->cls);
95 static int register_demand(spill_ilp_t *si, const ir_node *irn)
97 const arch_env_t *arch_env = si->senv.session->main_env->arch_env;
98 int n_in = arch_get_n_operands(arch_env, irn, 0);
99 int n_out = arch_get_n_operands(arch_env, irn, -1);
101 return MAX(n_in, n_out);
104 static int cmp_live_range(const void *a, const void *b, size_t n)
106 const live_range_t *p = a;
107 const live_range_t *q = b;
109 return !(p->user == q->user && p->irn == q->irn && p->pos == q->pos);
112 static int cmp_irn_use_head(const void *a, const void *b, size_t n)
114 const irn_use_head_t *p = a;
115 const irn_use_head_t *q = b;
117 return !(p->irn == q->irn);
121 * Checks, if a vertain node can be recomputed at a certain position.
122 * @param si The spill ILP environment.
123 * @param irn The node to recompute.
124 * @param live The nodes live at the place where @p irn shall be
126 * @return 1, if irn can be recomputed, 0 if not.
128 static INLINE int can_remat(const spill_ilp_t *si, const ir_node *irn, pset *live)
131 const arch_env_t *arch_env = si->senv.session->main_env->arch_env;
132 int remat = (arch_irn_get_flags(arch_env, irn) & arch_irn_flags_rematerializable) != 0;
134 for(i = 0, n = get_irn_arity(irn); i < n && remat; ++i) {
135 ir_node *op = get_irn_n(irn, i);
136 remat &= !has_reg_class(si, op) || pset_find_ptr(live, op);
142 static live_range_t *get_live_range(spill_ilp_t *si, ir_node *irn, ir_node *user, int pos)
144 live_range_t lr, *res;
145 irn_use_head_t iuh, *head;
147 unsigned hash = HASH_COMBINE(HASH_PTR(irn), HASH_PTR(user));
153 lr.is_remat_var = -1;
155 res = set_insert(si->live_ranges, &lr, sizeof(lr), hash);
156 is_new = res->in_mem_var == -1;
160 ir_snprintf(buf, sizeof(buf), "m_%s%N_%N_%d",
161 is_Phi(irn) ? "phi_" : "", irn, user, MAX(pos, 0));
162 res->in_mem_var = lpp_add_var(si->lpp, buf, lpp_binary, pos >= 0 ? COST_LOAD : 0.0);
165 memset(&iuh, 0, sizeof(iuh));
168 head = set_insert(si->irn_use_heads, &iuh, sizeof(iuh), HASH_PTR(irn));
169 if(head->n_uses == -1) {
171 INIT_LIST_HEAD(&head->head);
175 list_add_tail(&res->list, &head->head);
179 res->use_head = head;
185 static void annotate_live_ranges(ir_node *irn, void *data)
187 const ir_edge_t *edge;
189 foreach_out_edge(irn, edge) {
192 ir_node *user = edge->use;
194 ir_node *bl = get_nodes_block(user);
197 bl = get_Block_cfgpred_block(bl, pos);
199 lr_set = get_irn_link(bl);
206 static void process_block(ir_node *bl, void *data)
210 spill_ilp_t *si = data;
212 int n_regs = arch_register_class_n_regs(si->cls);
213 int n_preds = get_irn_arity(bl);
214 pset *live = pset_new_ptr_default();
218 /* as always, bring the live end nodes to life here */
219 live_foreach(bl, li) {
220 if(live_is_end(li) && has_reg_class(si, li->irn)) {
221 ir_node *irn = (ir_node *) li->irn;
222 pset_insert_ptr(live, irn);
225 * The "user" of the live range to the end of a block
226 * is the block itself. This is quite arbitrary.
228 set_irn_link(irn, get_live_range(si, irn, bl, -1));
232 sched_foreach_reverse(bl, irn) {
239 /* We handle phi togther with live ins after this loop (see below). */
243 if(has_reg_class(si, irn))
244 pset_remove_ptr(live, irn);
246 demand = register_demand(si, irn);
247 n_live = pset_count(live);
250 * Determine, how many values (which are not used at the label)
252 * demand means the number of registers, the operation will consume.
253 * So there are n_regs - demand registers available to store values
254 * which are not used at this label. The rest must reside in memory.
256 must_be_in_mem = MAX(n_live - (n_regs - demand), 0);
258 if(must_be_in_mem > 0) {
261 * The constraint limiting the pressure at this label to
262 * the number of free registers.
264 ir_snprintf(buf, sizeof(buf), "cp_%N_%d", bl, step);
265 cst = lpp_add_cst(si->lpp, buf, lpp_greater, must_be_in_mem);
267 for(l = pset_first(live); l; l = pset_next(live)) {
268 live_range_t *lr = get_irn_link(l);
269 lpp_set_factor_fast(si->lpp, cst, lr->in_mem_var, 1.0);
273 for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
274 ir_node *op = get_irn_n(irn, i);
276 if(has_reg_class(si, op)) {
277 live_range_t *op_lr = get_live_range(si, op, irn, i);
279 set_irn_link(op, op_lr);
282 * The operand is reloaded at its usage, so it must not occur
283 * in the constraint which determines which values live at the
284 * instruction must reside in memory.
286 if(must_be_in_mem > 0) {
287 lpp_set_factor_fast(si->lpp, cst, op_lr->in_mem_var, 0.0);
291 * Check, if the node is a rematerializable node and
292 * if its operands are live here.
294 if(si->enable_remat && can_remat(si, op, live)) {
299 for(j = 0, n = get_irn_arity(op); j < n; ++j)
300 n_operands += has_reg_class(si, get_irn_n(op, j));
302 /* Make the remat constraint for this operand */
303 ir_snprintf(buf, sizeof(buf), "ce1_%N_%N_%d", op, irn, i);
304 cst = lpp_add_cst(si->lpp, buf, lpp_less, n_operands);
306 /* Make the rematerialize variable for the operand */
307 ir_snprintf(buf, sizeof(buf), "e_%N_%N_%d", op, irn, i);
308 op_lr->is_remat_var = lpp_add_var(si->lpp, buf, lpp_binary, COST_REMAT);
309 lpp_set_factor_fast(si->lpp, cst, op_lr->is_remat_var, n_operands);
311 for(j = 0, n = get_irn_arity(op); j < n; ++j) {
312 ir_node *oop = get_irn_n(op, j);
313 if(has_reg_class(si, oop)) {
314 live_range_t *lr = get_irn_link(oop);
315 lpp_set_factor_fast(si->lpp, cst, lr->in_mem_var, 1.0);
319 ir_snprintf(buf, sizeof(buf), "ce2_%N_%N_%d", op, irn, i);
320 cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
321 lpp_set_factor_fast(si->lpp, cst, op_lr->is_remat_var, 1.0);
322 lpp_set_factor_fast(si->lpp, cst, op_lr->in_mem_var, -1.0);
327 for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
328 ir_node *op = get_irn_n(irn, i);
329 if(has_reg_class(si, op) && !is_Phi(irn))
330 pset_insert_ptr(live, op);
336 if(bl == get_irg_start_block(get_irn_irg(bl)))
340 * Here, only the phis in the block and the values live in are in the
343 * If a value is live in, it must be in a register in all predecessor
344 * blocks or in memory at the end of all predecessor blocks. Also, the
345 * closest use in the current block must then be from register or
346 * memory, respectively.
348 for(irn = pset_first(live); irn; irn = pset_next(live)) {
349 live_range_t *lr = get_irn_link(irn);
350 int is_phi = is_Phi(irn) && get_nodes_block(irn) == bl;
354 lr->use_head->closest_use = lr;
356 assert(has_reg_class(si, irn));
357 assert(is_Phi(irn) || is_live_in(bl, irn));
360 ir_snprintf(buf, sizeof(buf), "c%s_%N_%N", (is_phi ? "phi" : "li"), irn, bl);
361 cst = lpp_add_cst(si->lpp, buf, lpp_equal, 0.0);
362 lpp_set_factor_fast(si->lpp, cst, lr->in_mem_var, -n_preds);
364 for(i = 0; i < n_preds; ++i) {
365 ir_node *pred_bl = get_Block_cfgpred_block(bl, i);
366 ir_node *end_node = is_phi ? get_irn_n(irn, i) : irn;
367 live_range_t *op_lr = get_live_range(si, end_node, pred_bl, -1);
369 lpp_set_factor_fast(si->lpp, cst, op_lr->in_mem_var, 1.0);
373 for(i = 0; i < n_preds; ++i) {
374 ir_node *pred_bl = get_Block_cfgpred_block(bl, i);
375 ir_node *end_node = is_phi ? get_irn_n(irn, i) : irn;
376 live_range_t *op_lr = get_live_range(si, end_node, pred_bl, -1);
378 ir_snprintf(buf, sizeof(buf), "cpred_%N_%N_%d", lr->irn, bl, i);
379 cst = lpp_add_cst(si->lpp, buf, lpp_equal, 0.0);
380 lpp_set_factor_fast(si->lpp, cst, op_lr->in_mem_var, 1.0);
381 lpp_set_factor_fast(si->lpp, cst, lr->in_mem_var, -1.0);
391 * Add the costs for a store.
393 * If one of the uses is from memory, add additional costs for the
396 * m_1 + ... + m_n - M * s <= 0
398 * @param si The ILP spilling environment.
400 static void add_store_costs(spill_ilp_t *si)
404 double costs = si->enable_store ? COST_STORE : 0.0;
406 for(uh = set_first(si->irn_use_heads); uh; uh = set_next(si->irn_use_heads)) {
410 ir_snprintf(buf, sizeof(buf), "cs_%N", uh->irn);
411 cst = lpp_add_cst(si->lpp, buf, lpp_less, 0);
413 ir_snprintf(buf, sizeof(buf), "s_%N", uh->irn);
414 uh->spill_var = lpp_add_var(si->lpp, buf, lpp_binary, costs);
415 lpp_set_factor_fast(si->lpp, cst, uh->spill_var, -BIGM);
417 list_for_each_entry(live_range_t, lr, &uh->head, list)
418 lpp_set_factor_fast(si->lpp, cst, lr->in_mem_var, 1.0);
422 static INLINE int is_zero(double x)
424 return fabs(x) < 0.00001;
427 static int is_spilled(const spill_ilp_t *si, const live_range_t *lr)
429 return !is_zero(lpp_get_var_sol(si->lpp, lr->in_mem_var));
432 static void writeback_results(spill_ilp_t *si)
434 const be_node_factory_t *fact = si->senv.session->main_env->node_factory;
436 si->senv.mem_phis = pset_new_ptr_default();
438 for(uh = set_first(si->irn_use_heads); uh; uh = set_next(si->irn_use_heads)) {
439 if(is_Phi(uh->irn) && is_spilled(si, uh->closest_use))
440 pset_insert_ptr(si->senv.mem_phis, uh->irn);
443 /* Look at each node and examine the usages. */
444 for(uh = set_first(si->irn_use_heads); uh; uh = set_next(si->irn_use_heads)) {
449 ir_node *irn = uh->irn;
450 ir_mode *mode = get_irn_mode(irn);
452 /* Go through all live ranges of the node. */
453 list_for_each_entry(live_range_t, lr, &uh->head, list) {
454 int spilled = is_spilled(si, lr);
455 // int rematd = !is_zero(lpp_get_var_sol(si->lpp, lr->is_remat_var));
457 if(spilled && !is_end_of_block_use(lr)) {
458 ir_node *bl = get_nodes_block(lr->user);
461 ir_node *spill = be_spill_node(&si->senv, lr->irn);
462 ir_node *reload = new_Reload(fact, si->cls, si->senv.session->irg, bl, mode, spill);
464 obstack_ptr_grow(si->obst, reload);
467 sched_add_before(lr->user, reload);
472 reloads = obstack_finish(si->obst);
473 be_introduce_copies_ignore(si->senv.session->dom_front, irn, n_reloads, reloads, si->senv.mem_phis);
474 obstack_free(si->obst, reloads);
477 be_remove_spilled_phis(&si->senv);
480 void be_spill_ilp(const be_main_session_env_t *session_env,
481 const arch_register_class_t *cls)
483 char problem_name[256];
487 ir_snprintf(problem_name, sizeof(problem_name), "%F_%s", session_env->irg, cls->name);
491 si.dbg = firm_dbg_register("be.ra.spillilp");
492 si.senv.session = session_env;
494 si.lpp = new_lpp(problem_name, lpp_minimize);
495 si.irn_use_heads = new_set(cmp_irn_use_head, 4096);
496 si.live_ranges = new_set(cmp_live_range, 16384);
497 si.senv.spill_ctxs= new_set(be_set_cmp_spillctx, 4096);
501 firm_dbg_set_mask(si.dbg, DBG_LEVEL);
502 irg_block_walk_graph(session_env->irg, process_block, NULL, &si);
504 add_store_costs(&si);
511 ir_snprintf(buf, sizeof(buf), "spill-%s.ilp", problem_name);
512 if((f = fopen(buf, "wt")) != NULL) {
513 lpp_dump_plain(si.lpp, f);
519 DBG((si.dbg, LEVEL_1, "%F\n", session_env->irg));
520 // lpp_solve_net(si.lpp, LPP_SERVER, LPP_SOLVER);
521 lpp_solve_cplex(si.lpp);
522 assert(lpp_is_sol_valid(si.lpp) && "ILP not feasible");
524 assert(lpp_is_sol_valid(si.lpp) && "solution of ILP must be valid");
526 DBG((si.dbg, LEVEL_1, "\tnodes: %d, vars: %d, csts: %d\n",
527 set_count(si.irn_use_heads), si.lpp->var_next, si.lpp->cst_next));
528 DBG((si.dbg, LEVEL_1, "\titerations: %d, solution time: %g\n",
529 si.lpp->iterations, si.lpp->sol_time));
536 ir_snprintf(buf, sizeof(buf), "spill-%s.sol", problem_name);
537 if((f = fopen(buf, "wt")) != NULL) {
539 for(i = 0; i < si.lpp->var_next; ++i) {
540 lpp_name_t *name = si.lpp->vars[i];
541 fprintf(f, "%10s %4d %10f\n", name->name, name->nr, name->value);
547 writeback_results(&si);
549 del_set(si.irn_use_heads);
550 del_set(si.live_ranges);
552 obstack_free(&obst, NULL);