1 /** vim: set sw=4 ts=4:
4 * @author Adam M. Szalkowski & Sebastian Hack
6 * ILP based spilling & rematerialization
8 * Copyright (C) 2006 Universitaet Karlsruhe
9 * Released under the GPL
37 #include <lpp/lpp_net.h>
38 #include <lpp/lpp_cplex.h>
39 //#include <lc_pset.h>
40 #include <libcore/lc_bitset.h>
44 #include "besched_t.h"
49 #include "bespillremat.h"
51 #include "bepressurestat.h"
53 #include "bechordal_t.h"
59 #define KEEPALIVE /* keep alive all inserted remats and dump graph with remats */
60 #define COLLECT_REMATS /* enable rematerialization */
61 #define COLLECT_INVERSE_REMATS /* enable placement of inverse remats */
62 #define REMAT_WHILE_LIVE /* only remat values that are live */
63 //#define NO_ENLARGE_L1V3N355 /* do not remat after the death of some operand */
64 //#define EXECFREQ_LOOPDEPH /* compute execution frequency from loop depth only */
65 //#define MAY_DIE_AT_PRE_REMAT /* allow values to die after a pre remat */
66 #define CHECK_POST_REMAT /* check pressure after post remats (conservative but otherwise we can temporarily exceed the register pressure) */
67 #define NO_SINGLE_USE_REMATS /* do not repair schedule */
68 //#define KEEPALIVE_SPILLS
69 //#define KEEPALIVE_RELOADS
70 #define GOODWIN_REDUCTION
75 #define LPP_SERVER "i44pc52"
76 #define LPP_SOLVER "cplex"
82 #define ILP_TIMEOUT 120
86 typedef struct _spill_ilp_t {
87 const arch_register_class_t *cls;
89 const be_chordal_env_t *chordal_env;
93 pset *all_possible_remats;
98 set *values; /**< for collecting all definitions of values before running ssa-construction */
103 DEBUG_ONLY(firm_dbg_module_t * dbg);
106 typedef int ilp_var_t;
107 typedef int ilp_cst_t;
109 typedef struct _spill_bb_t {
114 typedef struct _remat_t {
115 const ir_node *op; /**< for copy_irn */
116 const ir_node *value; /**< the value which is being recomputed by this remat */
117 ir_node *proj; /**< not NULL if the above op produces a tuple */
118 int cost; /**< cost of this remat */
119 int inverse; /**< nonzero if this is an inverse remat */
123 * Data to be attached to each IR node. For remats this contains the ilp_var
124 * for this remat and for normal ops this contains the ilp_vars for
125 * reloading each operand
127 typedef struct _op_t {
132 remat_t *remat; /** the remat this op belongs to */
133 int pre; /** 1, if this is a pressure-increasing remat */
137 ir_node *op; /** the operation this live range belongs to */
146 typedef struct _defs_t {
148 ir_node *spills; /**< points to the first spill for this value (linked by link field) */
149 ir_node *remats; /**< points to the first definition for this value (linked by link field) */
152 typedef struct _remat_info_t {
153 const ir_node *irn; /**< the irn to which these remats belong */
154 pset *remats; /**< possible remats for this value */
155 pset *remats_by_operand; /**< remats with this value as operand */
158 typedef struct _keyval_t {
163 typedef struct _spill_t {
173 has_reg_class(const spill_ilp_t * si, const ir_node * irn)
175 return chordal_has_class(si->chordal_env, irn);
180 cmp_remat(const void *a, const void *b)
182 const keyval_t *p = a;
183 const keyval_t *q = b;
184 const remat_t *r = p->val;
185 const remat_t *s = q->val;
189 return !(r == s || r->op == s->op);
193 cmp_remat(const void *a, const void *b)
195 const remat_t *r = a;
196 const remat_t *s = a;
198 return !(r == s || r->op == s->op);
202 cmp_spill(const void *a, const void *b, size_t size)
204 const spill_t *p = a;
205 const spill_t *q = b;
207 // return !(p->irn == q->irn && p->bb == q->bb);
208 return !(p->irn == q->irn);
212 set_find_keyval(set * set, void * key)
217 return set_find(set, &query, sizeof(query), HASH_PTR(key));
221 set_insert_keyval(set * set, void * key, void * val)
227 return set_insert(set, &query, sizeof(query), HASH_PTR(key));
231 set_find_def(set * set, ir_node * value)
236 return set_find(set, &query, sizeof(query), HASH_PTR(value));
240 set_insert_def(set * set, ir_node * value)
247 return set_insert(set, &query, sizeof(query), HASH_PTR(value));
251 set_find_spill(set * set, ir_node * value)
256 return set_find(set, &query, sizeof(query), HASH_PTR(value));
259 #define pset_foreach(s,i) for((i)=pset_first((s)); (i); (i)=pset_next((s)))
260 #define set_foreach(s,i) for((i)=set_first((s)); (i); (i)=set_next((s)))
261 #define foreach_post_remat(s,i) for((i)=next_post_remat((s)); (i); (i)=next_post_remat((i)))
262 #define foreach_pre_remat(si,s,i) for((i)=next_pre_remat((si),(s)); (i); (i)=next_pre_remat((si),(i)))
263 #define sched_foreach_op(s,i) for((i)=sched_next_op((s));!sched_is_end((i));(i)=sched_next_op((i)))
266 cmp_remat_info(const void *a, const void *b, size_t size)
268 const remat_info_t *p = a;
269 const remat_info_t *q = b;
271 return !(p->irn == q->irn);
275 cmp_defs(const void *a, const void *b, size_t size)
280 return !(p->value == q->value);
284 cmp_keyval(const void *a, const void *b, size_t size)
286 const keyval_t *p = a;
287 const keyval_t *q = b;
289 return !(p->key == q->key);
293 execution_frequency(const spill_ilp_t * si, const ir_node * irn)
298 return get_block_execfreq(si->execfreqs, irn) + FUDGE;
300 return get_block_execfreq(si->execfreqs, get_nodes_block(irn)) + FUDGE;
304 return exp(get_loop_depth(get_irn_loop(irn)) * log(10)) + FUDGE;
306 return exp(get_loop_depth(get_irn_loop(get_nodes_block(irn))) * log(10)) + FUDGE;
311 get_cost(const spill_ilp_t * si, const ir_node * irn)
313 if(be_is_Spill(irn)) {
315 } else if(be_is_Reload(irn)){
318 return arch_get_op_estimated_cost(si->chordal_env->birg->main_env->arch_env, irn);
324 * Checks, whether node and its operands have suitable reg classes
327 is_rematerializable(const spill_ilp_t * si, const ir_node * irn)
331 const arch_env_t *arch_env = si->chordal_env->birg->main_env->arch_env;
332 int remat = (arch_irn_get_flags(arch_env, irn) & arch_irn_flags_rematerializable) != 0;
336 ir_fprintf(stderr, " Node %+F is not rematerializable\n", irn);
339 for (i = 0, n = get_irn_arity(irn); i < n && remat; ++i) {
340 ir_node *op = get_irn_n(irn, i);
341 remat &= has_reg_class(si, op) || arch_irn_get_flags(arch_env, op) & arch_irn_flags_ignore || (get_irn_op(op) == op_NoMem);
344 // ir_fprintf(stderr, " Argument %d (%+F) of Node %+F has wrong regclass\n", i, op, irn);
351 * Try to create a remat from @p op with destination value @p dest_value
353 static INLINE remat_t *
354 get_remat_from_op(spill_ilp_t * si, const ir_node * dest_value, const ir_node * op)
356 remat_t *remat = NULL;
358 // if(!mode_is_datab(get_irn_mode(dest_value)))
361 if(dest_value == op) {
362 const ir_node *proj = NULL;
364 if(is_Proj(dest_value)) {
365 op = get_irn_n(op, 0);
369 if(!is_rematerializable(si, op))
372 remat = obstack_alloc(si->obst, sizeof(*remat));
374 remat->cost = get_cost(si, op);
375 remat->value = dest_value;
379 arch_inverse_t inverse;
383 /* get the index of the operand we want to retrieve by the inverse op */
384 for (i = 0, n = get_irn_arity(op); i < n; ++i) {
385 ir_node *arg = get_irn_n(op, i);
387 if(arg == dest_value) break;
389 if(i == n) return NULL;
391 DBG((si->dbg, LEVEL_5, "\t requesting inverse op for argument %d of op %+F\n", i, op));
393 /* else ask the backend to give an inverse op */
394 if(arch_get_inverse(si->chordal_env->birg->main_env->arch_env, op, i, &inverse, si->obst)) {
397 DBG((si->dbg, LEVEL_4, "\t backend gave us an inverse op with %d nodes and cost %d\n", inverse.n, inverse.costs));
399 assert(inverse.n > 0 && "inverse op should have at least one node");
401 for(i=0; i<inverse.n; ++i) {
402 pset_insert_ptr(si->inverse_ops, inverse.nodes[i]);
406 remat = obstack_alloc(si->obst, sizeof(*remat));
407 remat->op = inverse.nodes[0];
408 remat->cost = inverse.costs;
409 remat->value = dest_value;
410 remat->proj = (inverse.n==2)?inverse.nodes[1]:NULL;
413 assert(is_Proj(remat->proj));
415 assert(0 && "I can not handle remats with more than 2 nodes");
422 DBG((si->dbg, LEVEL_3, "\t >Found remat %+F for %+F from %+F with %+F\n", remat->op, dest_value, op, remat->proj));
424 DBG((si->dbg, LEVEL_3, "\t >Found remat %+F for %+F from %+F\n", remat->op, dest_value, op));
432 add_remat(const spill_ilp_t * si, const remat_t * remat)
434 remat_info_t *remat_info,
440 assert(remat->value);
442 query.irn = remat->value;
444 query.remats_by_operand = NULL;
445 remat_info = set_insert(si->remat_info, &query, sizeof(query), HASH_PTR(remat->value));
447 if(remat_info->remats == NULL) {
448 remat_info->remats = new_pset(cmp_remat, 4096);
450 pset_insert(remat_info->remats, remat, HASH_PTR(remat->op));
452 /* insert the remat into the remats_be_operand set of each argument of the remat op */
453 for (i = 0, n = get_irn_arity(remat->op); i < n; ++i) {
454 ir_node *arg = get_irn_n(remat->op, i);
458 query.remats_by_operand = NULL;
459 remat_info = set_insert(si->remat_info, &query, sizeof(query), HASH_PTR(arg));
461 if(remat_info->remats_by_operand == NULL) {
462 remat_info->remats_by_operand = new_pset(cmp_remat, 4096);
464 pset_insert(remat_info->remats_by_operand, remat, HASH_PTR(remat->op));
469 get_irn_n_nonremat_edges(const spill_ilp_t * si, const ir_node * irn)
471 const ir_edge_t *edge = get_irn_out_edge_first(irn);
475 if(!pset_find_ptr(si->inverse_ops, edge->src)) {
478 edge = get_irn_out_edge_next(irn, edge);
485 get_remats_from_op(spill_ilp_t * si, const ir_node * op)
491 #ifdef NO_SINGLE_USE_REMATS
492 if(has_reg_class(si, op) && (get_irn_n_nonremat_edges(si, op) > 1)) {
494 if(has_reg_class(si, op)) {
496 remat = get_remat_from_op(si, op, op);
498 add_remat(si, remat);
502 #ifdef COLLECT_INVERSE_REMATS
503 /* repeat the whole stuff for each remat retrieved by get_remat_from_op(op, arg)
505 for (i = 0, n = get_irn_arity(op); i < n; ++i) {
506 ir_node *arg = get_irn_n(op, i);
508 if(has_reg_class(si, arg)) {
509 /* try to get an inverse remat */
510 remat = get_remat_from_op(si, arg, op);
512 add_remat(si, remat);
521 value_is_defined_before(const spill_ilp_t * si, const ir_node * pos, const ir_node * val)
524 ir_node *def_block = get_nodes_block(val);
530 /* if pos is at end of a basic block */
532 ret = (pos == def_block || block_dominates(def_block, pos));
533 // ir_fprintf(stderr, "(def(bb)=%d) ", ret);
537 /* else if this is a normal operation */
538 block = get_nodes_block(pos);
539 if(block == def_block) {
540 if(!sched_is_scheduled(val)) return 1;
542 ret = sched_comes_after(val, pos);
543 // ir_fprintf(stderr, "(def(same block)=%d) ",ret);
547 ret = block_dominates(def_block, block);
548 // ir_fprintf(stderr, "(def(other block)=%d) ", ret);
552 static INLINE ir_node *
553 sched_block_last_noncf(const spill_ilp_t * si, const ir_node * bb)
555 return sched_skip((ir_node*)bb, 0, sched_skip_cf_predicator, (void *) si->chordal_env->birg->main_env->arch_env);
559 * Returns first non-Phi node of block @p bb
561 static INLINE ir_node *
562 sched_block_first_nonphi(const ir_node * bb)
564 return sched_skip((ir_node*)bb, 1, sched_skip_phi_predicator, NULL);
568 sched_skip_proj_predicator(const ir_node * irn, void * data)
570 return (is_Proj(irn));
573 static INLINE ir_node *
574 sched_next_nonproj(const ir_node * irn, int forward)
576 return sched_skip((ir_node*)irn, forward, sched_skip_proj_predicator, NULL);
580 * Returns next operation node (non-Proj) after @p irn
581 * or the basic block of this node
583 static INLINE ir_node *
584 sched_next_op(const ir_node * irn)
586 ir_node *next = sched_next(irn);
591 return sched_next_nonproj(next, 1);
595 * Returns previous operation node (non-Proj) before @p irn
596 * or the basic block of this node
598 static INLINE ir_node *
599 sched_prev_op(const ir_node * irn)
601 ir_node *prev = sched_prev(irn);
606 return sched_next_nonproj(prev, 0);
610 sched_put_after(ir_node * insert, ir_node * irn)
612 if(is_Block(insert)) {
613 insert = sched_block_first_nonphi(insert);
615 insert = sched_next_op(insert);
617 sched_add_before(insert, irn);
621 sched_put_before(const spill_ilp_t * si, ir_node * insert, ir_node * irn)
623 if(is_Block(insert)) {
624 insert = sched_block_last_noncf(si, insert);
626 insert = sched_next_nonproj(insert, 0);
627 insert = sched_prev(insert);
629 sched_add_after(insert, irn);
633 * Tells you whether a @p remat can be placed before the irn @p pos
636 can_remat_before(const spill_ilp_t * si, const remat_t * remat, const ir_node * pos, const pset * live)
638 const ir_node *op = remat->op;
645 prev = sched_block_last_noncf(si, pos);
646 prev = sched_next_nonproj(prev, 0);
648 prev = sched_prev_op(pos);
650 /* do not remat if the rematted value is defined immediately before this op */
651 if(prev == remat->op) {
656 /* this should be just fine, the following OP will be using this value, right? */
658 /* only remat AFTER the real definition of a value (?) */
659 if(!value_is_defined_before(si, pos, remat->value)) {
660 // ir_fprintf(stderr, "error(not defined)");
665 for(i=0, n=get_irn_arity(op); i<n && res; ++i) {
666 const ir_node *arg = get_irn_n(op, i);
668 #ifdef NO_ENLARGE_L1V3N355
669 if(has_reg_class(si, arg) && live) {
670 res &= pset_find_ptr(live, arg)?1:0;
672 res &= value_is_defined_before(si, pos, arg);
675 res &= value_is_defined_before(si, pos, arg);
683 * Tells you whether a @p remat can be placed after the irn @p pos
686 can_remat_after(const spill_ilp_t * si, const remat_t * remat, const ir_node * pos, const pset * live)
689 pos = sched_block_first_nonphi(pos);
691 pos = sched_next_op(pos);
694 /* only remat AFTER the real definition of a value (?) */
695 if(!value_is_defined_before(si, pos, remat->value)) {
699 return can_remat_before(si, remat, pos, live);
703 * Collect potetially rematerializable OPs
706 walker_remat_collector(ir_node * irn, void * data)
708 spill_ilp_t *si = data;
710 if(!is_Block(irn) && !is_Phi(irn)) {
711 DBG((si->dbg, LEVEL_4, "\t Processing %+F\n", irn));
712 get_remats_from_op(si, irn);
717 * Inserts a copy of @p irn before @p pos
720 insert_copy_before(const spill_ilp_t * si, const ir_node * irn, ir_node * pos)
725 bb = is_Block(pos)?pos:get_nodes_block(pos);
726 copy = exact_copy(irn);
727 set_nodes_block(copy, bb);
728 sched_put_before(si, pos, copy);
734 * Inserts a copy of @p irn after @p pos
737 insert_copy_after(const spill_ilp_t * si, const ir_node * irn, ir_node * pos)
742 bb = is_Block(pos)?pos:get_nodes_block(pos);
743 copy = exact_copy(irn);
744 set_nodes_block(copy, bb);
745 sched_put_after(pos, copy);
751 insert_remat_after(spill_ilp_t * si, const remat_t * remat, const ir_node * pos, const pset * live)
755 if(can_remat_after(si, remat, pos, live)) {
760 DBG((si->dbg, LEVEL_3, "\t >inserting remat %+F\n", remat->op));
762 copy = insert_copy_after(si, remat->op, pos);
764 // ir_snprintf(buf, sizeof(buf), "remat2_%N_%N", remat->value, pos);
765 ir_snprintf(buf, sizeof(buf), "remat2_%N_%N", copy, pos);
766 op = obstack_alloc(si->obst, sizeof(*op));
768 op->attr.remat.remat = remat;
769 op->attr.remat.pre = 0;
770 op->attr.remat.ilp = lpp_add_var(si->lpp, buf, lpp_binary, remat->cost*execution_frequency(si, pos));
772 set_irn_link(copy, op);
773 pset_insert_ptr(si->all_possible_remats, copy);
775 proj_copy = insert_copy_after(si, remat->proj, copy);
776 set_irn_n(proj_copy, 0, copy);
777 set_irn_link(proj_copy, op);
778 pset_insert_ptr(si->all_possible_remats, proj_copy);
786 insert_remat_before(spill_ilp_t * si, const remat_t * remat, const ir_node * pos, const pset * live)
790 if(can_remat_before(si, remat, pos, live)) {
795 DBG((si->dbg, LEVEL_3, "\t >inserting remat %+F\n", remat->op));
797 copy = insert_copy_before(si, remat->op, pos);
799 // ir_snprintf(buf, sizeof(buf), "remat_%N_%N", remat->value, pos);
800 ir_snprintf(buf, sizeof(buf), "remat_%N_%N", copy, pos);
801 op = obstack_alloc(si->obst, sizeof(*op));
803 op->attr.remat.remat = remat;
804 op->attr.remat.pre = 1;
805 op->attr.remat.ilp = lpp_add_var(si->lpp, buf, lpp_binary, remat->cost*execution_frequency(si, pos));
807 set_irn_link(copy, op);
808 pset_insert_ptr(si->all_possible_remats, copy);
810 proj_copy = insert_copy_after(si, remat->proj, copy);
811 set_irn_n(proj_copy, 0, copy);
812 set_irn_link(proj_copy, op);
813 pset_insert_ptr(si->all_possible_remats, proj_copy);
821 get_block_n_succs(const ir_node *block) {
822 const ir_edge_t *edge;
824 assert(edges_activated(current_ir_graph));
826 edge = get_block_succ_first(block);
830 edge = get_block_succ_next(block, edge);
835 is_merge_edge(const ir_node * bb)
837 #ifdef GOODWIN_REDUCTION
838 return get_block_n_succs(bb) == 1;
845 is_diverge_edge(const ir_node * bb)
847 #ifdef GOODWIN_REDUCTION
848 return get_Block_n_cfgpreds(bb) == 1;
855 * Insert (so far unused) remats into the irg to
856 * recompute the potential liveness of all values
859 walker_remat_insertor(ir_node * bb, void * data)
861 spill_ilp_t *si = data;
862 spill_bb_t *spill_bb;
866 pset *live = pset_new_ptr_default();
868 DBG((si->dbg, LEVEL_3, "\t Entering %+F\n\n", bb));
870 live_foreach(bb, li) {
871 ir_node *value = (ir_node *) li->irn;
873 /* add remats at end of block */
874 if (live_is_end(li) && has_reg_class(si, value)) {
875 pset_insert_ptr(live, value);
879 spill_bb = obstack_alloc(si->obst, sizeof(*spill_bb));
880 set_irn_link(bb, spill_bb);
882 irn = sched_last(bb);
883 while(!sched_is_end(irn)) {
889 next = sched_prev(irn);
891 DBG((si->dbg, LEVEL_5, "\t at %+F (next: %+F)\n", irn, next));
893 if(is_Phi(irn) || is_Proj(irn)) {
896 if(has_reg_class(si, irn)) {
897 pset_remove_ptr(live, irn);
900 op = obstack_alloc(si->obst, sizeof(*op));
902 op->attr.live_range.args.reloads = NULL;
903 op->attr.live_range.ilp = ILP_UNDEF;
904 set_irn_link(irn, op);
910 op = obstack_alloc(si->obst, sizeof(*op));
912 op->attr.live_range.ilp = ILP_UNDEF;
913 op->attr.live_range.args.reloads = obstack_alloc(si->obst, sizeof(*op->attr.live_range.args.reloads) * get_irn_arity(irn));
914 memset(op->attr.live_range.args.reloads, 0xFF, sizeof(*op->attr.live_range.args.reloads) * get_irn_arity(irn));
915 set_irn_link(irn, op);
917 args = pset_new_ptr_default();
919 /* collect arguments of op */
920 for (n = get_irn_arity(irn)-1; n>=0; --n) {
921 ir_node *arg = get_irn_n(irn, n);
923 pset_insert_ptr(args, arg);
926 /* set args of op already live in epilog */
927 pset_foreach(args, arg) {
928 if(has_reg_class(si, arg)) {
929 pset_insert_ptr(live, arg);
933 /* insert all possible remats after irn */
934 pset_foreach(args, arg) {
935 remat_info_t *remat_info,
939 /* continue if the operand has the wrong reg class */
940 if(!has_reg_class(si, arg))
945 query.remats_by_operand = NULL;
946 remat_info = set_find(si->remat_info, &query, sizeof(query), HASH_PTR(arg));
952 /* do not place post remats after jumps */
953 if(sched_skip_cf_predicator(irn, si->chordal_env->birg->main_env->arch_env)) continue;
955 if(remat_info->remats_by_operand) {
956 pset_foreach(remat_info->remats_by_operand, remat) {
957 /* do not insert remats producing the same value as one of the operands */
958 if(!pset_find_ptr(args, remat->value)) {
959 DBG((si->dbg, LEVEL_4, "\t considering remat %+F with arg %+F\n", remat->op, arg));
960 #ifdef REMAT_WHILE_LIVE
961 if(pset_find_ptr(live, remat->value)) {
962 insert_remat_after(si, remat, irn, live);
965 insert_remat_after(si, remat, irn, live);
972 /* delete defined value from live set */
973 if(has_reg_class(si, irn)) {
974 pset_remove_ptr(live, irn);
977 /* insert all possible remats before irn */
978 pset_foreach(args, arg) {
979 remat_info_t *remat_info,
983 /* continue if the operand has the wrong reg class
985 if(!has_reg_class(si, arg))
990 query.remats_by_operand = NULL;
991 remat_info = set_find(si->remat_info, &query, sizeof(query), HASH_PTR(arg));
997 if(remat_info->remats) {
998 pset_foreach(remat_info->remats, remat) {
999 DBG((si->dbg, LEVEL_4, "\t considering remat %+F for arg %+F\n", remat->op, arg));
1000 #ifdef REMAT_WHILE_LIVE
1001 if(pset_find_ptr(live, remat->value)) {
1002 insert_remat_before(si, remat, irn, live);
1005 insert_remat_before(si, remat, irn, live);
1015 live_foreach(bb, li) {
1016 ir_node *value = (ir_node *) li->irn;
1018 /* add remats at end if successor has multiple predecessors */
1019 if(is_merge_edge(bb)) {
1020 /* add remats at end of block */
1021 if (live_is_end(li) && has_reg_class(si, value)) {
1022 remat_info_t *remat_info,
1027 query.remats = NULL;
1028 query.remats_by_operand = NULL;
1029 remat_info = set_find(si->remat_info, &query, sizeof(query), HASH_PTR(value));
1031 if(remat_info && remat_info->remats) {
1032 pset_foreach(remat_info->remats, remat) {
1033 DBG((si->dbg, LEVEL_4, "\t considering remat %+F at end of block %+F\n", remat->op, bb));
1035 insert_remat_before(si, remat, bb, NULL);
1040 if(is_diverge_edge(bb)) {
1041 /* add remat2s at beginning of block */
1042 if ((live_is_in(li) || (is_Phi(value) && get_nodes_block(value)==bb)) && has_reg_class(si, value)) {
1043 remat_info_t *remat_info,
1048 query.remats = NULL;
1049 query.remats_by_operand = NULL;
1050 remat_info = set_find(si->remat_info, &query, sizeof(query), HASH_PTR(value));
1052 if(remat_info && remat_info->remats) {
1053 pset_foreach(remat_info->remats, remat) {
1054 DBG((si->dbg, LEVEL_4, "\t considering remat %+F at beginning of block %+F\n", remat->op, bb));
1056 /* put the remat here if all its args are available */
1057 insert_remat_after(si, remat, bb, NULL);
1067 * Preparation of blocks' ends for Luke Blockwalker(tm)(R)
1070 luke_endwalker(ir_node * bb, void * data)
1072 spill_ilp_t *si = (spill_ilp_t*)data;
1079 spill_bb_t *spill_bb = get_irn_link(bb);
1082 live = pset_new_ptr_default();
1083 use_end = pset_new_ptr_default();
1085 live_foreach(bb, li) {
1086 irn = (ir_node *) li->irn;
1087 if (live_is_end(li) && has_reg_class(si, irn) && !pset_find_ptr(si->all_possible_remats, irn)) {
1090 pset_insert_ptr(live, irn);
1091 op = get_irn_link(irn);
1092 assert(!op->is_remat);
1096 /* collect values used by cond jumps etc. at bb end (use_end) -> always live */
1097 /* their reg_out must always be set */
1098 sched_foreach_reverse(bb, irn) {
1101 if(!sched_skip_cf_predicator(irn, si->chordal_env->birg->main_env->arch_env)) break;
1103 for (n=get_irn_arity(irn)-1; n>=0; --n) {
1104 ir_node *irn_arg = get_irn_n(irn, n);
1106 if(has_reg_class(si, irn_arg)) {
1107 pset_insert_ptr(use_end, irn_arg);
1112 ir_snprintf(buf, sizeof(buf), "check_end_%N", bb);
1113 //cst = lpp_add_cst(si->lpp, buf, lpp_less, si->n_regs);
1114 cst = lpp_add_cst(si->lpp, buf, lpp_less, si->n_regs - pset_count(use_end));
1116 spill_bb->ilp = new_set(cmp_spill, pset_count(live)+pset_count(use_end));
1118 pset_foreach(live,irn) {
1124 /* handle values used by control flow nodes later separately */
1125 if(pset_find_ptr(use_end, irn)) continue;
1128 spill = set_insert(spill_bb->ilp, &query, sizeof(query), HASH_PTR(irn));
1130 spill_cost = is_Unknown(irn)?0.0001:COST_STORE*execution_frequency(si, bb);
1132 ir_snprintf(buf, sizeof(buf), "reg_out_%N_%N", irn, bb);
1133 spill->reg_out = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
1134 lpp_set_factor_fast(si->lpp, cst, spill->reg_out, 1.0);
1136 ir_snprintf(buf, sizeof(buf), "mem_out_%N_%N", irn, bb);
1137 spill->mem_out = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
1139 ir_snprintf(buf, sizeof(buf), "spill_%N_%N", irn, bb);
1140 spill->spill = lpp_add_var(si->lpp, buf, lpp_binary, spill_cost);
1142 spill->reg_in = ILP_UNDEF;
1143 spill->mem_in = ILP_UNDEF;
1146 pset_foreach(use_end,irn) {
1150 ilp_cst_t end_use_req;
1153 spill = set_insert(spill_bb->ilp, &query, sizeof(query), HASH_PTR(irn));
1155 spill_cost = is_Unknown(irn)?0.0001:COST_STORE*execution_frequency(si, bb);
1157 ir_snprintf(buf, sizeof(buf), "reg_out_%N_%N", irn, bb);
1158 spill->reg_out = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
1159 /* if irn is used at the end of the block, then it is live anyway */
1160 //lpp_set_factor_fast(si->lpp, cst, spill->reg_out, 1.0);
1162 ir_snprintf(buf, sizeof(buf), "mem_out_%N_%N", irn, bb);
1163 spill->mem_out = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
1165 ir_snprintf(buf, sizeof(buf), "spill_%N_%N", irn, bb);
1166 spill->spill = lpp_add_var(si->lpp, buf, lpp_binary, spill_cost);
1168 spill->reg_in = ILP_UNDEF;
1169 spill->mem_in = ILP_UNDEF;
1171 ir_snprintf(buf, sizeof(buf), "req_cf_end_%N_%N", irn, bb);
1172 end_use_req = lpp_add_cst(si->lpp, buf, lpp_equal, 1);
1173 lpp_set_factor_fast(si->lpp, end_use_req, spill->reg_out, 1.0);
1181 next_post_remat(const ir_node * irn)
1186 irn = sched_block_first_nonphi(irn);
1188 irn = sched_next_op(irn);
1191 if(sched_is_end(irn))
1194 op = (op_t*)get_irn_link(irn);
1195 if(op->is_remat && !op->attr.remat.pre) {
1204 next_pre_remat(const spill_ilp_t * si, const ir_node * irn)
1210 ret = sched_block_last_noncf(si, irn);
1211 ret = sched_next(ret);
1212 ret = sched_prev_op(ret);
1214 ret = sched_prev_op(irn);
1217 if(sched_is_end(ret) || is_Phi(ret))
1220 op = (op_t*)get_irn_link(ret);
1221 if(op->is_remat && op->attr.remat.pre) {
1229 * Find a remat of value @p value in the epilog of @p pos
1232 find_post_remat(const ir_node * value, const ir_node * pos)
1234 while((pos = next_post_remat(pos)) != NULL) {
1237 op = get_irn_link(pos);
1238 assert(op->is_remat && !op->attr.remat.pre);
1240 if(op->attr.remat.remat->value == value)
1241 return (ir_node*)pos;
1244 const ir_edge_t *edge;
1245 foreach_out_edge(pos, edge) {
1246 ir_node *proj = get_edge_src_irn(edge);
1247 assert(is_Proj(proj));
1257 add_to_spill_bb(spill_ilp_t * si, ir_node * bb, ir_node * irn)
1259 spill_bb_t *spill_bb = get_irn_link(bb);
1265 spill = set_find(spill_bb->ilp, &query, sizeof(query), HASH_PTR(irn));
1267 double spill_cost = is_Unknown(irn)?0.0001:COST_STORE*execution_frequency(si, bb);
1269 spill = set_insert(spill_bb->ilp, &query, sizeof(query), HASH_PTR(irn));
1271 spill->reg_out = ILP_UNDEF;
1272 spill->reg_in = ILP_UNDEF;
1273 spill->mem_in = ILP_UNDEF;
1275 ir_snprintf(buf, sizeof(buf), "mem_out_%N_%N", irn, bb);
1276 spill->mem_out = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
1278 ir_snprintf(buf, sizeof(buf), "spill_%N_%N", irn, bb);
1279 spill->spill = lpp_add_var(si->lpp, buf, lpp_binary, spill_cost);
1286 get_live_end(spill_ilp_t * si, ir_node * bb, pset * live)
1291 live_foreach(bb, li) {
1292 irn = (ir_node *) li->irn;
1294 if (live_is_end(li) && has_reg_class(si, irn) && !pset_find_ptr(si->all_possible_remats, irn)) {
1295 pset_insert_ptr(live, irn);
1299 irn = sched_last(bb);
1301 /* all values eaten by control flow operations are also live until the end of the block */
1302 sched_foreach_reverse(bb, irn) {
1305 if(!sched_skip_cf_predicator(irn, si->chordal_env->birg->main_env->arch_env)) break;
1307 for(i=get_irn_arity(irn)-1; i>=0; --i) {
1308 ir_node *arg = get_irn_n(irn,i);
1310 if(has_reg_class(si, arg)) {
1311 pset_insert_ptr(live, arg);
1318 * Walk all irg blocks and emit this ILP
1321 luke_blockwalker(ir_node * bb, void * data)
1323 spill_ilp_t *si = (spill_ilp_t*)data;
1328 spill_bb_t *spill_bb = get_irn_link(bb);
1333 pset *defs = pset_new_ptr_default();
1336 live = pset_new_ptr_default();
1338 /****************************************
1339 * B A S I C B L O C K E N D
1340 ***************************************/
1343 /* init live values at end of block */
1344 get_live_end(si, bb, live);
1347 /* if this is a merge edge we can reload at the end of this block */
1348 if(is_merge_edge(bb)) {
1349 spill_bb->reloads = new_set(cmp_keyval, pset_count(live));
1351 spill_bb->reloads = NULL;
1354 pset_foreach(live, irn) {
1358 spill = set_find_spill(spill_bb->ilp, irn);
1361 if(spill_bb->reloads) {
1362 ir_snprintf(buf, sizeof(buf), "reload_%N_%N", bb, irn);
1363 reload = lpp_add_var(si->lpp, buf, lpp_binary, COST_LOAD*execution_frequency(si, bb));
1365 set_insert_keyval(spill_bb->reloads, irn, INT_TO_PTR(reload));
1367 /* reload <= mem_out */
1368 cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
1369 lpp_set_factor_fast(si->lpp, cst, reload, 1.0);
1370 lpp_set_factor_fast(si->lpp, cst, spill->mem_out, -1.0);
1373 op = get_irn_link(irn);
1374 assert(!op->is_remat);
1376 ir_snprintf(buf, sizeof(buf), "lr_%N_%N", irn, bb);
1377 op->attr.live_range.ilp = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
1378 op->attr.live_range.op = bb;
1380 ir_snprintf(buf, sizeof(buf), "reg_out_%N_%N", bb, irn);
1381 cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
1383 /* reg_out - reload - remat - live_range <= 0 */
1384 lpp_set_factor_fast(si->lpp, cst, spill->reg_out, 1.0);
1385 if(spill_bb->reloads) lpp_set_factor_fast(si->lpp, cst, reload, -1.0);
1386 lpp_set_factor_fast(si->lpp, cst, op->attr.live_range.ilp, -1.0);
1387 foreach_pre_remat(si, bb, tmp) {
1388 op_t *remat_op = get_irn_link(tmp);
1389 if(remat_op->attr.remat.remat->value == irn) {
1390 lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, -1.0);
1393 /* maybe we should also assure that reg_out >= live_range etc. */
1396 /* start new live ranges for values used by remats at end of block */
1397 foreach_pre_remat(si, bb, tmp) {
1400 for (n=get_irn_arity(tmp)-1; n>=0; --n) {
1401 ir_node *remat_arg = get_irn_n(tmp, n);
1402 op_t *arg_op = get_irn_link(remat_arg);
1405 if(!has_reg_class(si, remat_arg)) continue;
1407 /* if value is becoming live through use by remat */
1408 if(!pset_find_ptr(live, remat_arg)) {
1409 ir_snprintf(buf, sizeof(buf), "lr_%N_begin%N", remat_arg, bb);
1410 prev_lr = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
1412 arg_op->attr.live_range.ilp = prev_lr;
1413 arg_op->attr.live_range.op = bb;
1415 DBG((si->dbg, LEVEL_4, " value %+F becoming live through use by remat at end of block %+F\n", remat_arg, tmp));
1417 pset_insert_ptr(live, remat_arg);
1418 add_to_spill_bb(si, bb, remat_arg);
1423 DBG((si->dbg, LEVEL_4, "\t %d values live at end of block %+F\n", pset_count(live), bb));
1428 /**************************************
1429 * B A S I C B L O C K B O D Y
1430 **************************************/
1432 sched_foreach_reverse_from(sched_block_last_noncf(si, bb), irn) {
1438 ilp_cst_t check_pre,
1440 #ifdef CHECK_POST_REMAT
1441 ilp_cst_t check_post_remat;
1446 /* iterate only until first phi */
1450 op = get_irn_link(irn);
1452 if(op->is_remat) continue;
1453 DBG((si->dbg, LEVEL_4, "\t at node %+F\n", irn));
1455 /* collect defined values */
1456 if(has_reg_class(si, irn)) {
1457 pset_insert_ptr(defs, irn);
1461 if(is_Proj(irn)) continue;
1464 /* remove defined values from live set */
1465 if(has_reg_class(si, irn)) {
1466 assert(pset_find_ptr(live, irn));
1467 pset_remove_ptr(live, irn);
1471 /* init set of irn's arguments */
1472 args = new_set(cmp_keyval, get_irn_arity(irn));
1473 for (n=get_irn_arity(irn)-1; n>=0; --n) {
1474 ir_node *irn_arg = get_irn_n(irn, n);
1475 if(has_reg_class(si, irn_arg)) {
1476 set_insert_keyval(args, irn_arg, (void*)n);
1480 /**********************************
1481 * I N E P I L O G O F irn
1482 **********************************/
1484 /* new live ranges for values from L\U defined by post remats */
1485 pset_foreach(live, tmp) {
1486 ir_node *value = tmp;//remat_op->attr.remat.remat->value;
1487 op_t *value_op = get_irn_link(value);
1489 if(!set_find_keyval(args, value) && !pset_find_ptr(defs, value)) {
1490 ilp_var_t prev_lr = ILP_UNDEF;
1494 foreach_post_remat(irn, remat) {
1495 op_t *remat_op = get_irn_link(remat);
1497 /* if value is being rematerialized by this remat */
1498 if(value == remat_op->attr.remat.remat->value) {
1499 if(cst == ILP_UNDEF) {
1500 /* next_live_range <= prev_live_range + sum remat2s */
1501 ir_snprintf(buf, sizeof(buf), "next_lr_%N_%N", value, irn);
1502 cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
1504 ir_snprintf(buf, sizeof(buf), "lr_%N_%N", value, irn);
1505 prev_lr = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
1507 lpp_set_factor_fast(si->lpp, cst, value_op->attr.live_range.ilp, 1.0);
1508 lpp_set_factor_fast(si->lpp, cst, prev_lr, -1.0);
1511 lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, -1.0);
1515 if(prev_lr != ILP_UNDEF) {
1516 value_op->attr.live_range.ilp = prev_lr;
1517 value_op->attr.live_range.op = irn;
1522 /* requirements for post remats and start live ranges from L/U for values dying here */
1523 foreach_post_remat(irn, tmp) {
1524 op_t *remat_op = get_irn_link(tmp);
1527 for (n=get_irn_arity(tmp)-1; n>=0; --n) {
1528 ir_node *remat_arg = get_irn_n(tmp, n);
1529 op_t *arg_op = get_irn_link(remat_arg);
1531 if(!has_reg_class(si, remat_arg)) continue;
1533 /* only for values in L\U (TODO and D?), the others are handled with post_use */
1534 if(!set_find_keyval(args, remat_arg)) {
1535 /* remat <= live_rang(remat_arg) */
1536 ir_snprintf(buf, sizeof(buf), "req_remat2_%N_arg_%N", tmp, remat_arg);
1537 cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
1539 /* if value is becoming live through use by remat2 */
1540 if(!pset_find_ptr(live, remat_arg)) {
1543 ir_snprintf(buf, sizeof(buf), "lr_%N_%N", remat_arg, irn);
1544 lr = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
1546 arg_op->attr.live_range.ilp = lr;
1547 arg_op->attr.live_range.op = irn;
1549 DBG((si->dbg, LEVEL_3, " value %+F becoming live through use by remat2 %+F\n", remat_arg, tmp));
1551 pset_insert_ptr(live, remat_arg);
1552 add_to_spill_bb(si, bb, remat_arg);
1555 lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, 1.0);
1556 lpp_set_factor_fast(si->lpp, cst, arg_op->attr.live_range.ilp, -1.0);
1561 #ifdef CHECK_POST_REMAT
1562 /* check the register pressure after the epilog */
1563 ir_snprintf(buf, sizeof(buf), "check_post_remat_%N", irn);
1564 check_post_remat = lpp_add_cst(si->lpp, buf, lpp_less, si->n_regs);
1566 /* iterate over L\U */
1567 pset_foreach(live, tmp) {
1568 if(!set_find_keyval(args, tmp)) {
1569 /* if a live value is not used by irn */
1570 tmp_op = get_irn_link(tmp);
1571 lpp_set_factor_fast(si->lpp, check_post_remat, tmp_op->attr.live_range.ilp, 1.0);
1574 /* iterate over remats in epilog and remove possibly defined values again from check_post_remat */
1575 foreach_post_remat(irn, tmp) {
1576 op_t *remat_op = get_irn_link(tmp);
1577 const ir_node *value = remat_op->attr.remat.remat->value;
1578 op_t *val_op = get_irn_link(value);
1580 assert(remat_op->is_remat && !remat_op->attr.remat.pre);
1582 /* values that are defined by remats are not counted */
1583 #ifdef REMAT_WHILE_LIVE
1584 assert(val_op->attr.live_range.ilp);
1585 lpp_set_factor_fast(si->lpp, check_post_remat, val_op->attr.live_range.ilp, 0.0);
1587 if(val_op->attr.live_range.ilp != ILP_UNDEF) {
1588 lpp_set_factor_fast(si->lpp, check_post_remat, val_op->attr.live_range.ilp, 0.0);
1590 #endif /* REMAT_WHILE_LIVE */
1592 /* instead add these post remats to the check */
1593 lpp_set_factor_fast(si->lpp, check_post_remat, remat_op->attr.remat.ilp, 1.0);
1596 #if 0 /* defs are still in live */
1597 /* get count of values in my register class defined by irn */
1598 /* also add defined values to check_post_remat; do this before iterating over args */
1599 pset_foreach(defs,tmp) {
1600 op_t *def_op = get_irn_link(tmp);
1602 lpp_set_factor_fast(si->lpp, check_post_remat, def_op->attr.live_range.ilp, 1.0);
1605 #endif /* CHECK_POST_REMAT */
1608 d = pset_count(defs);
1609 DBG((si->dbg, LEVEL_4, "\t %+F produces %d values in my register class\n", irn, d));
1611 /* count how many regs irn needs for arguments */
1612 u = set_count(args);
1615 /* check the register pressure in the epilog */
1616 /* sum_{L\U} lr + sum_{U} post_use <= k - |D| */
1617 ir_snprintf(buf, sizeof(buf), "check_post_%N", irn);
1618 check_post = lpp_add_cst(si->lpp, buf, lpp_less, si->n_regs - d);
1620 /* add L\U to check_post */
1621 pset_foreach(live, tmp) {
1622 if(!set_find_keyval(args, tmp) && !pset_find_ptr(defs, tmp)) {
1623 /* if a live value is not used by irn */
1624 tmp_op = get_irn_link(tmp);
1625 lpp_set_factor_fast(si->lpp, check_post, tmp_op->attr.live_range.ilp, 1.0);
1629 /***********************************************************
1630 * I T E R A T I O N O V E R A R G S F O R E P I L O G
1631 **********************************************************/
1634 set_foreach(args, keyval) {
1639 ir_node *arg = keyval->key;
1640 op_t *arg_op = get_irn_link(arg);
1642 spill = add_to_spill_bb(si, bb, arg);
1644 /* new live range for each argument */
1645 ir_snprintf(buf, sizeof(buf), "lr_%N_%N", arg, irn);
1646 next_lr = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
1648 i = (int)keyval->val;
1650 /* the epilog stuff - including post_use, check_post, check_post_remat */
1651 ir_snprintf(buf, sizeof(buf), "post_use_%N_%N", arg, irn);
1652 post_use = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
1654 lpp_set_factor_fast(si->lpp, check_post, post_use, 1.0);
1656 /* arg is live throughout epilog if the next live_range is in a register */
1657 if(pset_find_ptr(live, arg)) {
1658 DBG((si->dbg, LEVEL_3, "\t arg %+F is possibly live in epilog of %+F\n", arg, irn));
1660 ir_snprintf(buf, sizeof(buf), "post_use_%N_%N-%d", arg, irn, p++);
1661 cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
1662 lpp_set_factor_fast(si->lpp, cst, post_use, -1.0);
1663 lpp_set_factor_fast(si->lpp, cst, arg_op->attr.live_range.ilp, 1.0);
1665 #ifdef CHECK_POST_REMAT
1666 //lpp_set_factor_fast(si->lpp, check_post_remat, arg_op->attr.live_range.ilp, 1.0);
1667 lpp_set_factor_fast(si->lpp, check_post_remat, post_use, 1.0);
1671 /*forall remat2 which use arg add a similar cst*/
1672 foreach_post_remat(irn, tmp) {
1675 for (n=get_irn_arity(tmp)-1; n>=0; --n) {
1676 ir_node *remat_arg = get_irn_n(tmp, n);
1677 op_t *remat_op = get_irn_link(tmp);
1679 if(remat_arg == arg) {
1680 DBG((si->dbg, LEVEL_3, "\t found remat with arg %+F in epilog of %+F\n", arg, irn));
1682 ir_snprintf(buf, sizeof(buf), "post_use_%N_%N-%d", arg, irn, p++);
1683 cst = lpp_add_cst(si->lpp, buf, lpp_greater, 0.0);
1684 lpp_set_factor_fast(si->lpp, cst, post_use, 1.0);
1685 lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, -1.0);
1690 /* new live range begins for each argument */
1691 arg_op->attr.live_range.ilp = next_lr;
1692 arg_op->attr.live_range.op = irn;
1694 pset_insert_ptr(live, arg);
1697 /* just to be sure */
1698 check_post = ILP_UNDEF;
1699 #ifdef CHECK_POST_REMAT
1700 check_post_remat = ILP_UNDEF;
1710 /* check the register pressure in the prolog */
1711 /* sum_{L\U} lr <= k - |U| */
1712 ir_snprintf(buf, sizeof(buf), "check_pre_%N", irn);
1713 check_pre = lpp_add_cst(si->lpp, buf, lpp_less, si->n_regs - u);
1715 /* for the prolog remove defined values from the live set */
1716 pset_foreach(defs, tmp) {
1717 pset_remove_ptr(live, tmp);
1720 /***********************************************************
1721 * I T E R A T I O N O V E R A R G S F O R P R O L O G
1722 **********************************************************/
1725 set_foreach(args, keyval) {
1727 ir_node *arg = keyval->key;
1728 int i = PTR_TO_INT(keyval->val);
1729 op_t *arg_op = get_irn_link(arg);
1731 spill = set_find_spill(spill_bb->ilp, arg);
1734 ir_snprintf(buf, sizeof(buf), "reload_%N_%N", arg, irn);
1735 op->attr.live_range.args.reloads[i] = lpp_add_var(si->lpp, buf, lpp_binary, COST_LOAD*execution_frequency(si, bb));
1737 /* reload <= mem_out */
1738 ir_snprintf(buf, sizeof(buf), "req_reload_%N_%N", arg, irn);
1739 cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
1740 lpp_set_factor_fast(si->lpp, cst, op->attr.live_range.args.reloads[i], 1.0);
1741 lpp_set_factor_fast(si->lpp, cst, spill->mem_out, -1.0);
1743 /* requirement: arg must be in register for use */
1744 /* reload + remat + live_range == 1 */
1745 ir_snprintf(buf, sizeof(buf), "req_%N_%N", irn, arg);
1746 cst = lpp_add_cst(si->lpp, buf, lpp_equal, 1.0);
1748 lpp_set_factor_fast(si->lpp, cst, arg_op->attr.live_range.ilp, 1.0);
1749 lpp_set_factor_fast(si->lpp, cst, op->attr.live_range.args.reloads[i], 1.0);
1750 foreach_pre_remat(si, irn, tmp) {
1751 op_t *remat_op = get_irn_link(tmp);
1752 if(remat_op->attr.remat.remat->value == arg) {
1753 lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, 1.0);
1758 /* iterate over L\U */
1759 pset_foreach(live, tmp) {
1760 if(!set_find_keyval(args, tmp)) {
1761 /* if a live value is not used by irn */
1762 tmp_op = get_irn_link(tmp);
1763 lpp_set_factor_fast(si->lpp, check_pre, tmp_op->attr.live_range.ilp, 1.0);
1768 /* TODO allow new live ranges even if value does not die after remat??? MAY_DIE_AT_PRE_REMAT */
1770 /* requirements for remats */
1771 /* start new live ranges for values used by remats */
1772 foreach_pre_remat(si, irn, tmp) {
1773 op_t *remat_op = get_irn_link(tmp);
1776 for (n=get_irn_arity(tmp)-1; n>=0; --n) {
1777 ir_node *remat_arg = get_irn_n(tmp, n);
1778 op_t *arg_op = get_irn_link(remat_arg);
1781 if(!has_reg_class(si, remat_arg)) continue;
1783 /* if value is becoming live through use by remat */
1784 if(!pset_find_ptr(live, remat_arg)) {
1785 ir_snprintf(buf, sizeof(buf), "lr_%N_%N", remat_arg, irn);
1786 prev_lr = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
1788 arg_op->attr.live_range.ilp = prev_lr;
1789 arg_op->attr.live_range.op = irn;
1791 DBG((si->dbg, LEVEL_4, " value %+F becoming live through use by remat %+F\n", remat_arg, tmp));
1793 pset_insert_ptr(live, remat_arg);
1794 add_to_spill_bb(si, bb, remat_arg);
1797 /* remat <= live_rang(remat_arg) [ + reload(remat_arg) ] */
1798 ir_snprintf(buf, sizeof(buf), "req_remat_%N_arg_%N", tmp, remat_arg);
1799 cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
1801 lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, 1.0);
1802 lpp_set_factor_fast(si->lpp, cst, arg_op->attr.live_range.ilp, -1.0);
1804 /* if remat arg is also used by current op then we can use reload placed for this argument */
1805 if((keyval = set_find_keyval(args, remat_arg)) != NULL) {
1806 int index = (int)keyval->val;
1808 lpp_set_factor_fast(si->lpp, cst, op->attr.live_range.args.reloads[index], -1.0);
1816 /*************************
1817 * D O N E W I T H O P
1818 *************************/
1820 DBG((si->dbg, LEVEL_4, "\t %d values live at %+F\n", pset_count(live), irn));
1822 pset_foreach(live, tmp) {
1823 assert(has_reg_class(si, tmp));
1826 for (n=get_irn_arity(irn)-1; n>=0; --n) {
1827 ir_node *arg = get_irn_n(irn, n);
1829 assert(!find_post_remat(arg, irn) && "there should be no post remat for an argument of an op");
1834 defs = pset_new_ptr_default();
1839 /***************************************
1840 * B E G I N N I N G O F B L O C K
1841 ***************************************/
1844 /* we are now at the beginning of the basic block, there are only \Phis in front of us */
1845 DBG((si->dbg, LEVEL_3, "\t %d values live at beginning of block %+F\n", pset_count(live), bb));
1847 pset_foreach(live, irn) {
1848 assert(is_Phi(irn) || get_nodes_block(irn) != bb);
1851 /* construct mem_outs for all values */
1853 set_foreach(spill_bb->ilp, spill) {
1854 ir_snprintf(buf, sizeof(buf), "mem_out_%N_%N", spill->irn, bb);
1855 cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
1857 lpp_set_factor_fast(si->lpp, cst, spill->mem_out, 1.0);
1858 lpp_set_factor_fast(si->lpp, cst, spill->spill, -1.0);
1860 if(pset_find_ptr(live, spill->irn)) {
1861 DBG((si->dbg, LEVEL_5, "\t %+F live at beginning of block %+F\n", spill->irn, bb));
1863 ir_snprintf(buf, sizeof(buf), "mem_in_%N_%N", spill->irn, bb);
1864 spill->mem_in = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
1865 lpp_set_factor_fast(si->lpp, cst, spill->mem_in, -1.0);
1867 if(is_Phi(spill->irn) && get_nodes_block(spill->irn) == bb) {
1869 op_t *op = get_irn_link(spill->irn);
1871 /* do we have to copy a phi argument? */
1872 op->attr.live_range.args.copies = obstack_alloc(si->obst, sizeof(*op->attr.live_range.args.copies) * get_irn_arity(spill->irn));
1873 memset(op->attr.live_range.args.copies, 0xFF, sizeof(*op->attr.live_range.args.copies) * get_irn_arity(spill->irn));
1875 for(n=get_irn_arity(spill->irn)-1; n>=0; --n) {
1876 const ir_node *arg = get_irn_n(spill->irn, n);
1882 /* argument already done? */
1883 if(op->attr.live_range.args.copies[n] != ILP_UNDEF) continue;
1885 /* get sum of execution frequencies of blocks with the same phi argument */
1886 for(m=n; m>=0; --m) {
1887 const ir_node *arg2 = get_irn_n(spill->irn, m);
1890 freq += execution_frequency(si, get_Block_cfgpred_block(bb, m));
1894 /* copies are not for free */
1895 ir_snprintf(buf, sizeof(buf), "copy_%N_%N", arg, spill->irn);
1896 var = lpp_add_var(si->lpp, buf, lpp_binary, COST_STORE * freq);
1898 for(m=n; m>=0; --m) {
1899 const ir_node *arg2 = get_irn_n(spill->irn, m);
1902 op->attr.live_range.args.copies[m] = var;
1906 /* copy <= mem_in */
1907 ir_snprintf(buf, sizeof(buf), "nocopy_%N_%N", arg, spill->irn);
1908 cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
1909 lpp_set_factor_fast(si->lpp, cst, var, 1.0);
1910 lpp_set_factor_fast(si->lpp, cst, spill->mem_in, -1.0);
1917 /* L\U is empty at bb start */
1918 /* arg is live throughout epilog if it is reg_in into this block */
1920 /* check the register pressure at the beginning of the block
1923 ir_snprintf(buf, sizeof(buf), "check_start_%N", bb);
1924 cst = lpp_add_cst(si->lpp, buf, lpp_less, si->n_regs);
1926 pset_foreach(live, irn) {
1929 spill = set_find_spill(spill_bb->ilp, irn);
1932 ir_snprintf(buf, sizeof(buf), "reg_in_%N_%N", irn, bb);
1933 spill->reg_in = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
1935 lpp_set_factor_fast(si->lpp, cst, spill->reg_in, 1.0);
1937 /* spill + reg_in <= 1 */
1938 ir_snprintf(buf, sizeof(buf), "nospill_%N_%N", irn, bb);
1939 nospill = lpp_add_cst(si->lpp, buf, lpp_less, 1);
1941 lpp_set_factor_fast(si->lpp, nospill, spill->reg_in, 1.0);
1942 lpp_set_factor_fast(si->lpp, nospill, spill->spill, 1.0);
1945 foreach_post_remat(bb, irn) {
1946 op_t *remat_op = get_irn_link(irn);
1948 DBG((si->dbg, LEVEL_4, "\t next post remat: %+F\n", irn));
1949 assert(remat_op->is_remat && !remat_op->attr.remat.pre);
1951 lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, 1.0);
1954 /* forall remat2 add requirements */
1955 foreach_post_remat(bb, tmp) {
1958 for (n=get_irn_arity(tmp)-1; n>=0; --n) {
1959 ir_node *remat_arg = get_irn_n(tmp, n);
1960 op_t *remat_op = get_irn_link(tmp);
1962 if(!has_reg_class(si, remat_arg)) continue;
1964 spill = set_find_spill(spill_bb->ilp, remat_arg);
1967 /* TODO verify this is placed correctly */
1968 ir_snprintf(buf, sizeof(buf), "req_remat2_%N_%N_arg_%N", tmp, bb, remat_arg);
1969 cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
1970 lpp_set_factor_fast(si->lpp, cst, spill->reg_in, -1.0);
1971 lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, 1.0);
1975 /* mem_in/reg_in for live_in values, especially phis and their arguments */
1976 pset_foreach(live, irn) {
1980 spill = set_find_spill(spill_bb->ilp, irn);
1981 assert(spill && spill->irn == irn);
1983 if(is_Phi(irn) && get_nodes_block(irn) == bb) {
1984 for (n=get_Phi_n_preds(irn)-1; n>=0; --n) {
1987 ir_node *phi_arg = get_Phi_pred(irn, n);
1988 ir_node *bb_p = get_Block_cfgpred_block(bb, n);
1989 spill_bb_t *spill_bb_p = get_irn_link(bb_p);
1992 /* although the phi is in the right regclass one or more of
1993 * its arguments can be in a different one or at least to
1996 if(has_reg_class(si, phi_arg)) {
1997 ir_snprintf(buf, sizeof(buf), "mem_in_%N_%N-%d", irn, bb, p);
1998 mem_in = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
1999 ir_snprintf(buf, sizeof(buf), "reg_in_%N_%N-%d", irn, bb, p++);
2000 reg_in = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
2002 lpp_set_factor_fast(si->lpp, mem_in, spill->mem_in, 1.0);
2003 lpp_set_factor_fast(si->lpp, reg_in, spill->reg_in, 1.0);
2005 spill_p = set_find_spill(spill_bb_p->ilp, phi_arg);
2008 lpp_set_factor_fast(si->lpp, mem_in, spill_p->mem_out, -1.0);
2009 lpp_set_factor_fast(si->lpp, reg_in, spill_p->reg_out, -1.0);
2013 /* else assure the value arrives on all paths in the same resource */
2015 for (n=get_Block_n_cfgpreds(bb)-1; n>=0; --n) {
2018 ir_node *bb_p = get_Block_cfgpred_block(bb, n);
2019 spill_bb_t *spill_bb_p = get_irn_link(bb_p);
2022 ir_snprintf(buf, sizeof(buf), "mem_in_%N_%N-%d", irn, bb, p);
2023 mem_in = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
2024 ir_snprintf(buf, sizeof(buf), "reg_in_%N_%N-%d", irn, bb, p++);
2025 reg_in = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
2027 lpp_set_factor_fast(si->lpp, mem_in, spill->mem_in, 1.0);
2028 lpp_set_factor_fast(si->lpp, reg_in, spill->reg_in, 1.0);
2030 spill_p = set_find_spill(spill_bb_p->ilp, irn);
2033 lpp_set_factor_fast(si->lpp, mem_in, spill_p->mem_out, -1.0);
2034 lpp_set_factor_fast(si->lpp, reg_in, spill_p->reg_out, -1.0);
2039 /* first live ranges from reg_ins */
2040 pset_foreach(live, irn) {
2041 op_t *op = get_irn_link(irn);
2043 spill = set_find_spill(spill_bb->ilp, irn);
2044 assert(spill && spill->irn == irn);
2046 ir_snprintf(buf, sizeof(buf), "first_lr_%N_%N", irn, bb);
2047 cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
2048 lpp_set_factor_fast(si->lpp, cst, op->attr.live_range.ilp, 1.0);
2049 lpp_set_factor_fast(si->lpp, cst, spill->reg_in, -1.0);
2051 foreach_post_remat(bb, tmp) {
2052 op_t *remat_op = get_irn_link(tmp);
2054 if(remat_op->attr.remat.remat->value == irn) {
2055 lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, -1.0);
2060 /* walk forward now and compute constraints for placing spills */
2061 /* this must only be done for values that are not defined in this block */
2062 /* TODO are these values at start of block? if yes, just check whether this is a diverge edge and skip the loop */
2063 pset_foreach(live, irn) {
2064 spill = set_find_spill(spill_bb->ilp, irn);
2067 ir_snprintf(buf, sizeof(buf), "req_spill_%N_%N", irn, bb);
2068 cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
2070 lpp_set_factor_fast(si->lpp, cst, spill->spill, 1.0);
2071 if(is_diverge_edge(bb)) lpp_set_factor_fast(si->lpp, cst, spill->reg_in, -1.0);
2073 sched_foreach_op(bb, tmp) {
2074 op_t *op = get_irn_link(tmp);
2076 if(is_Phi(tmp)) continue;
2077 assert(!is_Proj(tmp));
2080 ir_node *value = op->attr.remat.remat->value;
2083 /* only collect remats up to the first use of a value */
2084 lpp_set_factor_fast(si->lpp, cst, op->attr.remat.ilp, -1.0);
2089 for (n=get_irn_arity(tmp)-1; n>=0; --n) {
2090 ir_node *arg = get_irn_n(tmp, n);
2093 /* if a value is used stop collecting remats */
2099 if(cst == ILP_UNDEF) break;
2104 /* if a value is used by a mem-phi, then mem_in of this value is 0 (has to be spilled again into a different slot)
2105 mem_in(phi) -> not mem_in(orig_value) TODO: how does this depend on a certain predecessor?
2108 /* mem_in of mem-phi has associated costs (but first one is free) */
2109 /* define n_mem_copies as positive integer in each predecessor block,
2110 #mem_in into this block from predecessor block - 1 weighted with SPILL_COST*execfreq(predecessor)
2118 typedef struct _irnlist_t {
2119 struct list_head list;
2123 typedef struct _interference_t {
2124 struct list_head blocklist;
2130 cmp_interference(const void *a, const void *b, size_t size)
2132 const interference_t *p = a;
2133 const interference_t *q = b;
2135 return !(p->a == q->a && p->b == q->b);
2138 static interference_t *
2139 set_find_interference(set * set, ir_node * a, ir_node * b)
2141 interference_t query;
2143 query.a = (a>b)?a:b;
2144 query.b = (a>b)?b:a;
2146 return set_find(set, &query, sizeof(query), HASH_PTR(PTR_TO_INT(a) ^ PTR_TO_INT(b)));
2149 static interference_t *
2150 set_insert_interference(spill_ilp_t * si, set * set, ir_node * a, ir_node * b, ir_node * bb)
2152 interference_t query,
2154 irnlist_t *list = obstack_alloc(si->obst, sizeof(*list));
2158 result = set_find_interference(set, a, b);
2161 list_add(&list->list, &result->blocklist);
2165 query.a = (a>b)?a:b;
2166 query.b = (a>b)?b:a;
2168 result = set_insert(set, &query, sizeof(query), HASH_PTR(PTR_TO_INT(a) ^ PTR_TO_INT(b)));
2170 INIT_LIST_HEAD(&result->blocklist);
2171 list_add(&list->list, &result->blocklist);
2177 values_interfere_in_block(ir_node * bb, ir_node * a, ir_node * b)
2179 const ir_edge_t *edge;
2181 if(get_nodes_block(a) != bb && get_nodes_block(b) != bb) {
2182 /* both values are live in, so they interfere */
2186 /* ensure a dominates b */
2187 if(value_dominates(b,a)) {
2193 assert(get_nodes_block(b) == bb && "at least b should be defined here in this block");
2196 /* the following code is stolen from bera.c */
2197 if(is_live_end(bb, a))
2200 foreach_out_edge(a, edge) {
2201 const ir_node *user = edge->src;
2202 if(get_nodes_block(user) == bb
2205 && value_dominates(b, user))
2213 * Walk all irg blocks and collect interfering values inside of phi classes
2216 luke_interferencewalker(ir_node * bb, void * data)
2218 spill_ilp_t *si = (spill_ilp_t*)data;
2222 live_foreach(bb, li1) {
2223 ir_node *a = (ir_node *) li1->irn;
2224 op_t *a_op = get_irn_link(a);
2226 if(a_op->is_remat) continue;
2228 /* a is only interesting if it is in my register class and if it is inside a phi class */
2229 if (has_reg_class(si, a) && get_phi_class(a)) {
2230 for(li2=li1->next; li2; li2 = li2->next) {
2231 ir_node *b = (ir_node *) li2->irn;
2232 op_t *b_op = get_irn_link(b);
2234 if(b_op->is_remat) continue;
2236 /* a and b are only interesting if they are in the same phi class */
2237 if(has_reg_class(si, b) && get_phi_class(a) == get_phi_class(b)) {
2238 if(values_interfere_in_block(bb, a, b)) {
2239 //DBG((si->dbg, LEVEL_1, "\tvalues interfere in %+F: %+F, %+F\n", bb, a, b));
2240 ir_fprintf(stderr, "\tvalues interfere in %+F: %+F, %+F\n", bb, a, b);
2241 set_insert_interference(si, si->interferences, a, b, bb);
2249 static unsigned int copy_path_id = 0;
2252 write_copy_path_cst(spill_ilp_t *si, pset * copies, ilp_var_t any_interfere)
2259 ir_snprintf(buf, sizeof(buf), "copy_path-%d", copy_path_id++);
2260 cst = lpp_add_cst(si->lpp, buf, lpp_less, 0);
2262 lpp_set_factor_fast(si->lpp, cst, any_interfere, 1.0);
2264 pset_foreach(copies, ptr) {
2265 copy = PTR_TO_INT(ptr);
2266 lpp_set_factor_fast(si->lpp, cst, copy, -1.0);
2271 * @parameter copies contains a path of copies which lead us to irn
2272 * @parameter visited contains a set of nodes already visited on this path
2275 find_copy_path(spill_ilp_t * si, ir_node * irn, ir_node * target, ilp_var_t any_interfere, pset * copies, pset * visited)
2278 op_t *op = get_irn_link(irn);
2280 if(op->is_remat) return;
2282 pset_insert_ptr(visited, irn);
2287 /* visit all operands */
2288 for(n=get_irn_arity(irn)-1; n>=0; --n) {
2289 ir_node *arg = get_irn_n(irn, n);
2290 ilp_var_t copy = op->attr.live_range.args.copies[n];
2292 if(!has_reg_class(si, arg)) continue;
2295 pset_insert(copies, INT_TO_PTR(copy), copy);
2296 write_copy_path_cst(si, copies, any_interfere);
2297 pset_remove(copies, INT_TO_PTR(copy), copy);
2299 if(!pset_find_ptr(visited, arg)) {
2300 pset_insert(copies, INT_TO_PTR(copy), copy);
2301 find_copy_path(si, arg, target, any_interfere, copies, visited);
2302 pset_remove(copies, INT_TO_PTR(copy), copy);
2308 /* visit all uses which are phis */
2309 foreach_out_edge(irn, edge) {
2310 ir_node *user = edge->src;
2311 int pos = edge->pos;
2312 op_t *op = get_irn_link(user);
2315 if(!is_Phi(user)) continue;
2316 if(!has_reg_class(si, user)) continue;
2318 copy = op->attr.live_range.args.copies[pos];
2320 if(user == target) {
2321 pset_insert(copies, INT_TO_PTR(copy), copy);
2322 write_copy_path_cst(si, copies, any_interfere);
2323 pset_remove(copies, INT_TO_PTR(copy), copy);
2325 if(!pset_find_ptr(visited, user)) {
2326 pset_insert(copies, INT_TO_PTR(copy), copy);
2327 find_copy_path(si, user, target, any_interfere, copies, visited);
2328 pset_remove(copies, INT_TO_PTR(copy), copy);
2333 pset_remove_ptr(visited, irn);
2337 gen_copy_constraints(spill_ilp_t * si, ir_node * a, ir_node * b, ilp_var_t any_interfere)
2339 pset * copies = pset_new_ptr_default();
2340 pset * visited = pset_new_ptr_default();
2342 find_copy_path(si, a, b, any_interfere, copies, visited);
2350 memcopyhandler(spill_ilp_t * si)
2352 interference_t *interference;
2354 /* teste Speicherwerte auf Interferenz */
2356 /* analyze phi classes */
2357 phi_class_compute(si->chordal_env->irg);
2359 DBG((si->dbg, LEVEL_2, "\t calling interferencewalker\n"));
2360 irg_block_walk_graph(si->chordal_env->irg, luke_interferencewalker, NULL, si);
2362 // phi_class_free(si->chordal_env->irg);
2364 /* now lets emit the ILP unequations for the crap */
2365 set_foreach(si->interferences, interference) {
2367 ilp_var_t interfere,
2369 ilp_cst_t any_interfere_cst,
2371 const ir_node *a = interference->a;
2372 const ir_node *b = interference->b;
2373 struct list_head *pos;
2375 /* any_interf <= \sum interf */
2376 ir_snprintf(buf, sizeof(buf), "interfere_%N_%N", a, b);
2377 any_interfere_cst = lpp_add_cst(si->lpp, buf, lpp_less, 0);
2378 any_interfere = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
2380 lpp_set_factor_fast(si->lpp, any_interfere_cst, any_interfere, 1.0);
2382 list_for_each_entry(irnlist_t, irnlist, &interference->blocklist, list) {
2383 const ir_node *bb = irnlist->irn;
2384 spill_bb_t *spill_bb = get_irn_link(bb);
2391 spilla = set_find_spill(spill_bb->ilp, a);
2395 spillb = set_find_spill(spill_bb->ilp, b);
2398 /* interfere <-> (mem_in_a or spill_a) and (mem_in_b or spill_b): */
2399 /* 1: mem_in_a + mem_in_b + spill_a + spill_b - interfere <= 1 */
2400 /* 2: - mem_in_a - spill_a + interfere <= 0 */
2401 /* 3: - mem_in_b - spill_b + interfere <= 0 */
2402 ir_snprintf(buf, sizeof(buf), "interfere_%N_%N_%N", bb, a, b);
2403 interfere = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
2405 ir_snprintf(buf, sizeof(buf), "interfere_%N_%N_%N-1", bb, a, b);
2406 cst = lpp_add_cst(si->lpp, buf, lpp_less, 1);
2408 lpp_set_factor_fast(si->lpp, cst, interfere, -1.0);
2409 if(spilla->mem_in != ILP_UNDEF) lpp_set_factor_fast(si->lpp, cst, spilla->mem_in, 1.0);
2410 lpp_set_factor_fast(si->lpp, cst, spilla->spill, 1.0);
2411 if(spillb->mem_in != ILP_UNDEF) lpp_set_factor_fast(si->lpp, cst, spillb->mem_in, 1.0);
2412 lpp_set_factor_fast(si->lpp, cst, spillb->spill, 1.0);
2414 ir_snprintf(buf, sizeof(buf), "interfere_%N_%N_%N-2", bb, a, b);
2415 cst = lpp_add_cst(si->lpp, buf, lpp_less, 0);
2417 lpp_set_factor_fast(si->lpp, cst, interfere, 1.0);
2418 if(spilla->mem_in != ILP_UNDEF) lpp_set_factor_fast(si->lpp, cst, spilla->mem_in, -1.0);
2419 lpp_set_factor_fast(si->lpp, cst, spilla->spill, -1.0);
2421 ir_snprintf(buf, sizeof(buf), "interfere_%N_%N_%N-3", bb, a, b);
2422 cst = lpp_add_cst(si->lpp, buf, lpp_less, 0);
2424 lpp_set_factor_fast(si->lpp, cst, interfere, 1.0);
2425 if(spillb->mem_in != ILP_UNDEF) lpp_set_factor_fast(si->lpp, cst, spillb->mem_in, -1.0);
2426 lpp_set_factor_fast(si->lpp, cst, spillb->spill, -1.0);
2429 lpp_set_factor_fast(si->lpp, any_interfere_cst, interfere, -1.0);
2431 /* any_interfere >= interf */
2432 ir_snprintf(buf, sizeof(buf), "interfere_%N_%N-%N", a, b, bb);
2433 cst = lpp_add_cst(si->lpp, buf, lpp_less, 0);
2435 lpp_set_factor_fast(si->lpp, cst, interfere, 1.0);
2436 lpp_set_factor_fast(si->lpp, cst, any_interfere, -1.0);
2439 /* now that we know whether the two values interfere in memory we can drop constraints to enforce copies */
2440 gen_copy_constraints(si,a,b,any_interfere);
2447 memcopyinsertor(spill_ilp_t * si)
2449 /* weise Spillkontexte zu. Sorge bei Phis dafuer, dass gleiche
2450 * Kontexte zusammenfliessen (Operanden und Ergebnis hat gleichen
2466 return fabs(x) < 0.00001;
2470 static int mark_remat_nodes_hook(FILE *F, ir_node *n, ir_node *l)
2472 spill_ilp_t *si = get_irg_link(current_ir_graph);
2474 if(pset_find_ptr(si->all_possible_remats, n)) {
2475 op_t *op = (op_t*)get_irn_link(n);
2476 assert(op && op->is_remat);
2478 if(!op->attr.remat.remat->inverse) {
2479 if(op->attr.remat.pre) {
2480 ir_fprintf(F, "color:red info3:\"remat value: %+F\"", op->attr.remat.remat->value);
2482 ir_fprintf(F, "color:orange info3:\"remat2 value: %+F\"", op->attr.remat.remat->value);
2487 op_t *op = (op_t*)get_irn_link(n);
2488 assert(op && op->is_remat);
2490 if(op->attr.remat.pre) {
2491 ir_fprintf(F, "color:cyan info3:\"remat inverse value: %+F\"", op->attr.remat.remat->value);
2493 ir_fprintf(F, "color:lightcyan info3:\"remat2 inverse value: %+F\"", op->attr.remat.remat->value);
2504 dump_graph_with_remats(ir_graph * irg, const char * suffix)
2506 set_dump_node_vcgattr_hook(mark_remat_nodes_hook);
2507 be_dump(irg, suffix, dump_ir_block_graph_sched);
2508 set_dump_node_vcgattr_hook(NULL);
2513 * Edge hook to dump the schedule edges with annotated register pressure.
2516 sched_pressure_edge_hook(FILE *F, ir_node *irn)
2518 if(sched_is_scheduled(irn) && sched_has_prev(irn)) {
2519 ir_node *prev = sched_prev(irn);
2520 fprintf(F, "edge:{sourcename:\"");
2522 fprintf(F, "\" targetname:\"");
2524 fprintf(F, "\" label:\"%d", (int)get_irn_link(irn));
2525 fprintf(F, "\" color:magenta}\n");
2531 dump_ir_block_graph_sched_pressure(ir_graph *irg, const char *suffix)
2533 DUMP_NODE_EDGE_FUNC old = get_dump_node_edge_hook();
2535 dump_consts_local(0);
2536 set_dump_node_edge_hook(sched_pressure_edge_hook);
2537 dump_ir_block_graph(irg, suffix);
2538 set_dump_node_edge_hook(old);
2542 walker_pressure_annotator(ir_node * bb, void * data)
2544 spill_ilp_t *si = data;
2548 pset *live = pset_new_ptr_default();
2551 live_foreach(bb, li) {
2552 irn = (ir_node *) li->irn;
2554 if (live_is_end(li) && has_reg_class(si, irn)) {
2555 pset_insert_ptr(live, irn);
2559 set_irn_link(bb, INT_TO_PTR(pset_count(live)));
2561 sched_foreach_reverse(bb, irn) {
2563 set_irn_link(irn, INT_TO_PTR(pset_count(live)));
2567 if(has_reg_class(si, irn)) {
2568 pset_remove_ptr(live, irn);
2569 if(is_Proj(irn)) ++projs;
2572 if(!is_Proj(irn)) projs = 0;
2574 for (n=get_irn_arity(irn)-1; n>=0; --n) {
2575 ir_node *arg = get_irn_n(irn, n);
2577 if(has_reg_class(si, arg)) pset_insert_ptr(live, arg);
2579 set_irn_link(irn, INT_TO_PTR(pset_count(live)+projs));
2586 dump_pressure_graph(spill_ilp_t * si, const char *suffix)
2588 be_dump(si->chordal_env->irg, suffix, dump_ir_block_graph_sched_pressure);
2593 connect_all_remats_with_keep(spill_ilp_t * si)
2601 n_remats = pset_count(si->all_possible_remats);
2603 ins = obstack_alloc(si->obst, n_remats * sizeof(*ins));
2606 pset_foreach(si->all_possible_remats, irn) {
2611 si->keep = be_new_Keep(si->chordal_env->cls, si->chordal_env->irg, get_irg_end_block(si->chordal_env->irg), n_remats, ins);
2613 obstack_free(si->obst, ins);
2619 connect_all_spills_with_keep(spill_ilp_t * si)
2628 n_spills = pset_count(si->spills);
2630 ins = obstack_alloc(si->obst, n_spills * sizeof(*ins));
2633 pset_foreach(si->spills, irn) {
2638 keep = be_new_Keep(si->chordal_env->cls, si->chordal_env->irg, get_irg_end_block(si->chordal_env->irg), n_spills, ins);
2640 obstack_free(si->obst, ins);
2644 /** insert a spill at an arbitrary position */
2645 ir_node *be_spill2(const arch_env_t *arch_env, ir_node *irn, ir_node *insert, ir_node *ctx)
2647 ir_node *bl = is_Block(insert)?insert:get_nodes_block(insert);
2648 ir_graph *irg = get_irn_irg(bl);
2649 ir_node *frame = get_irg_frame(irg);
2653 const arch_register_class_t *cls = arch_get_irn_reg_class(arch_env, irn, -1);
2654 const arch_register_class_t *cls_frame = arch_get_irn_reg_class(arch_env, frame, -1);
2656 spill = be_new_Spill(cls, cls_frame, irg, bl, frame, irn, ctx);
2659 * search the right insertion point. a spill of a phi cannot be put
2660 * directly after the phi, if there are some phis behind the one which
2661 * is spilled. Also, a spill of a Proj must be after all Projs of the
2664 * Here's one special case:
2665 * If the spill is in the start block, the spill must be after the frame
2666 * pointer is set up. This is done by setting insert to the end of the block
2667 * which is its default initialization (see above).
2670 if(bl == get_irg_start_block(irg) && sched_get_time_step(frame) >= sched_get_time_step(insert))
2673 for (next = sched_next(insert); is_Phi(next) || is_Proj(next); next = sched_next(insert))
2676 sched_add_after(insert, spill);
2681 delete_remat(spill_ilp_t * si, ir_node * remat) {
2683 ir_node *bad = get_irg_bad(si->chordal_env->irg);
2685 sched_remove(remat);
2687 /* kill links to operands */
2688 for (n=get_irn_arity(remat)-1; n>=-1; --n) {
2689 set_irn_n(remat, n, bad);
2694 clean_remat_info(spill_ilp_t * si)
2698 remat_info_t *remat_info;
2699 ir_node *bad = get_irg_bad(si->chordal_env->irg);
2701 set_foreach(si->remat_info, remat_info) {
2702 if(!remat_info->remats) continue;
2704 pset_foreach(remat_info->remats, remat)
2706 if(remat->proj && get_irn_n_edges(remat->proj) == 0) {
2707 set_irn_n(remat->proj, -1, bad);
2708 set_irn_n(remat->proj, 0, bad);
2711 if(get_irn_n_edges(remat->op) == 0) {
2712 for (n=get_irn_arity(remat->op)-1; n>=-1; --n) {
2713 set_irn_n(remat->op, n, bad);
2718 if(remat_info->remats) del_pset(remat_info->remats);
2719 if(remat_info->remats_by_operand) del_pset(remat_info->remats_by_operand);
2724 delete_unnecessary_remats(spill_ilp_t * si)
2728 ir_node *bad = get_irg_bad(si->chordal_env->irg);
2731 ir_node *end = get_irg_end(si->chordal_env->irg);
2734 for (n=get_irn_arity(si->keep)-1; n>=0; --n) {
2735 ir_node *keep_arg = get_irn_n(si->keep, n);
2736 op_t *arg_op = get_irn_link(keep_arg);
2739 assert(arg_op->is_remat);
2741 name = si->lpp->vars[arg_op->attr.remat.ilp];
2743 if(is_zero(name->value)) {
2744 DBG((si->dbg, LEVEL_3, "\t deleting remat %+F\n", keep_arg));
2745 /* TODO check whether reload is preferred over remat (could be bug) */
2746 delete_remat(si, keep_arg);
2748 if(!arg_op->attr.remat.remat->inverse) {
2749 if(arg_op->attr.remat.pre) {
2750 DBG((si->dbg, LEVEL_2, "\t**remat kept: %+F\n", keep_arg));
2752 DBG((si->dbg, LEVEL_2, "\t%%%%remat2 kept: %+F\n", keep_arg));
2755 if(arg_op->attr.remat.pre) {
2756 DBG((si->dbg, LEVEL_2, "\t**INVERSE remat kept: %+F\n", keep_arg));
2758 DBG((si->dbg, LEVEL_2, "\t%%%%INVERSE remat2 kept: %+F\n", keep_arg));
2763 set_irn_n(si->keep, n, bad);
2766 for (i = 0, n = get_End_n_keepalives(end); i < n; ++i) {
2767 ir_node *end_arg = get_End_keepalive(end, i);
2769 if(end_arg != si->keep) {
2770 obstack_grow(si->obst, &end_arg, sizeof(end_arg));
2773 keeps = obstack_finish(si->obst);
2774 set_End_keepalives(end, n-1, keeps);
2775 obstack_free(si->obst, keeps);
2778 DBG((si->dbg, LEVEL_2, "\t no remats to delete (none have been inserted)\n"));
2783 pset_foreach(si->all_possible_remats, remat) {
2784 op_t *remat_op = get_irn_link(remat);
2785 lpp_name_t *name = si->lpp->vars[remat_op->attr.remat.ilp];
2787 if(is_zero(name->value)) {
2788 DBG((si->dbg, LEVEL_3, "\t deleting remat %+F\n", remat));
2789 /* TODO check whether reload is preferred over remat (could be bug) */
2790 delete_remat(si, remat);
2792 if(!remat_op->attr.remat.remat->inverse) {
2793 if(remat_op->attr.remat.pre) {
2794 DBG((si->dbg, LEVEL_2, "\t**remat kept: %+F\n", remat));
2796 DBG((si->dbg, LEVEL_2, "\t%%%%remat2 kept: %+F\n", remat));
2799 if(remat_op->attr.remat.pre) {
2800 DBG((si->dbg, LEVEL_2, "\t**INVERSE remat kept: %+F\n", remat));
2802 DBG((si->dbg, LEVEL_2, "\t%%%%INVERSE remat2 kept: %+F\n", remat));
2811 * @param before The node after which the spill will be placed in the schedule
2813 /* TODO set context properly */
2815 insert_spill(spill_ilp_t * si, ir_node * irn, ir_node * value, ir_node * before)
2819 const arch_env_t *arch_env = si->chordal_env->birg->main_env->arch_env;
2821 DBG((si->dbg, LEVEL_3, "\t inserting spill for value %+F after %+F\n", irn, before));
2823 spill = be_spill2(arch_env, irn, before, irn);
2825 defs = set_insert_def(si->values, value);
2828 /* enter into the linked list */
2829 set_irn_link(spill, defs->spills);
2830 defs->spills = spill;
2832 #ifdef KEEPALIVE_SPILLS
2833 pset_insert_ptr(si->spills, spill);
2840 * @param before The Phi node which has to be spilled
2843 insert_mem_phi(spill_ilp_t * si, const ir_node * phi)
2850 NEW_ARR_A(ir_node*, ins, get_irn_arity(phi));
2852 for(n=get_irn_arity(phi)-1; n>=0; --n) {
2853 ins[n] = si->m_unknown;
2856 mem_phi = new_r_Phi(si->chordal_env->irg, get_nodes_block(phi), get_irn_arity(phi), ins, mode_M);
2858 defs = set_insert_def(si->values, phi);
2861 /* enter into the linked list */
2862 set_irn_link(mem_phi, defs->spills);
2863 defs->spills = mem_phi;
2865 sched_add_after(phi, mem_phi);
2867 #ifdef KEEPALIVE_SPILLS
2868 pset_insert_ptr(si->spills, mem_phi);
2875 * Add remat to list of defs, destroys link field!
2878 insert_remat(spill_ilp_t * si, ir_node * remat)
2881 op_t *remat_op = get_irn_link(remat);
2883 assert(remat_op->is_remat);
2885 defs = set_insert_def(si->values, remat_op->attr.remat.remat->value);
2888 /* enter into the linked list */
2889 set_irn_link(remat, defs->remats);
2890 defs->remats = remat;
2895 collect_spills(spill_ilp_t * si, ir_node * value, pset * spills, pset * visited)
2900 defs = set_find_def(si->values, value);
2902 if(defs && defs->spills) {
2903 for(next = defs->spills; next; next = get_irn_link(next)) {
2904 pset_insert_ptr(spills, next);
2906 } else if (is_Phi(value)) {
2908 if(!pset_find_ptr(visited, value)) {
2912 pset_insert_ptr(visited, value);
2913 for(i=0, n=get_irn_arity(value); i<n; ++i) {
2914 ir_node *arg = get_irn_n(value, i);
2916 collect_spills(si, arg, spills, visited);
2920 // assert(0 && "Phi operand not spilled");
2926 get_spills_for_value(spill_ilp_t * si, ir_node * value)
2928 pset *spills = pset_new_ptr_default();
2929 // pset *visited = pset_new_ptr_default();
2931 // collect_spills(si, value, spills, visited);
2932 // del_pset(visited);
2936 defs = set_find_def(si->values, value);
2938 if(defs && defs->spills) {
2939 for(next = defs->spills; next; next = get_irn_link(next)) {
2940 pset_insert_ptr(spills, next);
2948 * Add reload before operation and add to list of defs
2951 insert_reload(spill_ilp_t * si, const ir_node * value, const ir_node * after)
2956 const arch_env_t *arch_env = si->chordal_env->birg->main_env->arch_env;
2958 DBG((si->dbg, LEVEL_3, "\t inserting reload for value %+F before %+F\n", value, after));
2960 defs = set_find_def(si->values, value);
2961 /* get a spill of this value */
2963 if((!defs || !defs->spills) && is_Phi(value)) {
2966 spills = get_spills_for_value(si, value);
2968 spill = pset_first(spills);
2972 defs = set_insert_def(si->values, value);
2974 defs->spills = spill;
2975 set_irn_link(spill, NULL);
2977 spill = defs->spills;
2980 spill = defs->spills;
2981 assert(spill && "no spill placed before reload");
2983 reload = be_reload(arch_env, si->cls, after, get_irn_mode(value), spill);
2985 /* enter into the linked list */
2986 set_irn_link(reload, defs->remats);
2987 defs->remats = reload;
2993 walker_spill_placer(ir_node * bb, void * data) {
2994 spill_ilp_t *si = (spill_ilp_t*)data;
2996 spill_bb_t *spill_bb = get_irn_link(bb);
2997 pset *spills_to_do = pset_new_ptr_default();
3000 set_foreach(spill_bb->ilp, spill) {
3003 if(is_Phi(spill->irn) && get_nodes_block(spill->irn) == bb) {
3004 name = si->lpp->vars[spill->mem_in];
3005 if(!is_zero(name->value)) {
3008 mem_phi = insert_mem_phi(si, spill->irn);
3010 DBG((si->dbg, LEVEL_2, "\t >>spilled Phi %+F -> %+F\n", spill->irn, mem_phi));
3014 name = si->lpp->vars[spill->spill];
3015 if(!is_zero(name->value)) {
3016 if(spill->reg_in > 0) {
3017 name = si->lpp->vars[spill->reg_in];
3018 if(!is_zero(name->value)) {
3019 insert_spill(si, spill->irn, spill->irn, bb);
3023 pset_insert_ptr(spills_to_do, spill->irn);
3026 DBG((si->dbg, LEVEL_3, "\t %d spills to do in block %+F\n", pset_count(spills_to_do), bb));
3029 for(irn = sched_block_first_nonphi(bb); !sched_is_end(irn); irn = sched_next(irn)) {
3030 op_t *op = get_irn_link(irn);
3032 if(be_is_Spill(irn)) continue;
3035 /* TODO fix this if we want to support remats with more than two nodes */
3036 if(get_irn_mode(irn) != mode_T && pset_find_ptr(spills_to_do, op->attr.remat.remat->value)) {
3037 pset_remove_ptr(spills_to_do, op->attr.remat.remat->value);
3039 insert_spill(si, irn, op->attr.remat.remat->value, irn);
3042 if(pset_find_ptr(spills_to_do, irn)) {
3043 pset_remove_ptr(spills_to_do, irn);
3045 insert_spill(si, irn, irn, irn);
3051 assert(pset_count(spills_to_do) == 0);
3053 /* afterwards free data in block */
3054 del_pset(spills_to_do);
3058 phim_fixer(spill_ilp_t *si) {
3061 set_foreach(si->values, defs) {
3062 const ir_node *phi = defs->value;
3063 ir_node *phi_m = NULL;
3064 ir_node *next = defs->spills;
3067 if(!is_Phi(phi)) continue;
3070 if(is_Phi(next) && get_irn_mode(next) == mode_M) {
3074 next = get_irn_link(next);
3077 if(!phi_m) continue;
3079 for(n=get_irn_arity(phi)-1; n>=0; --n) {
3080 const ir_node *value = get_irn_n(phi, n);
3081 defs_t *val_defs = set_find_def(si->values, value);
3083 /* get a spill of this value */
3084 ir_node *spill = val_defs->spills;
3086 assert(spill && "no spill placed before PhiM");
3088 set_irn_n(phi_m, n, spill);
3094 walker_reload_placer(ir_node * bb, void * data) {
3095 spill_ilp_t *si = (spill_ilp_t*)data;
3097 spill_bb_t *spill_bb = get_irn_link(bb);
3101 sched_foreach_reverse(bb, irn) {
3102 op_t *op = get_irn_link(irn);
3104 if(be_is_Reload(irn) || be_is_Spill(irn)) continue;
3105 if(is_Phi(irn)) break;
3108 if(get_irn_mode(irn) != mode_T) {
3109 insert_remat(si, irn);
3114 for (n=get_irn_arity(irn)-1; n>=0; --n) {
3115 ir_node *arg = get_irn_n(irn, n);
3117 if(op->attr.live_range.args.reloads && op->attr.live_range.args.reloads[n] != ILP_UNDEF) {
3120 name = si->lpp->vars[op->attr.live_range.args.reloads[n]];
3121 if(!is_zero(name->value)) {
3123 ir_node *insert_pos = irn;
3124 ir_node *prev = insert_pos;
3128 prev = sched_prev(prev);
3129 } while(be_is_Spill(prev));
3131 prev_op = get_irn_link(prev);
3133 /* insert reload before pre-remats */
3134 while(!sched_is_end(prev) && !be_is_Reload(prev) && !is_Phi(prev)
3135 && prev_op->is_remat && prev_op->attr.remat.pre) {
3139 prev = sched_prev(prev);
3140 } while(be_is_Spill(prev));
3142 prev_op = get_irn_link(prev);
3146 reload = insert_reload(si, arg, insert_pos);
3148 set_irn_n(irn, n, reload);
3150 #ifdef KEEPALIVE_RELOADS
3151 pset_insert_ptr(si->spills, reload);
3159 /* reloads at end of block */
3160 if(spill_bb->reloads) {
3163 set_foreach(spill_bb->reloads, keyval) {
3164 ir_node *irn = (ir_node*)keyval->key;
3165 ilp_var_t reload = PTR_TO_INT(keyval->val);
3168 name = si->lpp->vars[reload];
3169 if(!is_zero(name->value)) {
3171 ir_node *insert_pos = bb;
3172 ir_node *prev = sched_prev(insert_pos);
3173 op_t *prev_op = get_irn_link(prev);
3175 /* insert reload before pre-remats */
3176 while(!sched_is_end(prev) && !be_is_Reload(prev) && !be_is_Spill(prev)
3177 && prev_op->is_remat && prev_op->attr.remat.pre) {
3180 prev = sched_prev(insert_pos);
3181 prev_op = get_irn_link(prev);
3184 reload = insert_reload(si, irn, insert_pos);
3186 #ifdef KEEPALIVE_RELOADS
3187 pset_insert_ptr(si->spills, reload);
3193 del_set(spill_bb->ilp);
3194 if(spill_bb->reloads) del_set(spill_bb->reloads);
3198 walker_collect_used(ir_node * irn, void * data)
3200 lc_bitset_t *used = data;
3202 lc_bitset_set(used, get_irn_idx(irn));
3205 struct kill_helper {
3211 walker_kill_unused(ir_node * bb, void * data)
3213 struct kill_helper *kh = data;
3214 const ir_node *bad = get_irg_bad(get_irn_irg(bb));
3218 for(irn=sched_first(bb); !sched_is_end(irn);) {
3219 ir_node *next = sched_next(irn);
3222 if(!lc_bitset_is_set(kh->used, get_irn_idx(irn))) {
3223 if(be_is_Spill(irn) || be_is_Reload(irn)) {
3224 DBG((kh->si->dbg, LEVEL_1, "\t SUBOPTIMAL! %+F IS UNUSED (cost: %g)\n", irn, get_cost(kh->si, irn)*execution_frequency(kh->si, bb)));
3225 #if !defined(KEEPALIVE_SPILLS) && !defined(KEEPALIVE_RELOADS)
3226 assert(lpp_get_sol_state(kh->si->lpp) != lpp_optimal && "optimal solution is suboptimal?");
3232 set_nodes_block(irn, bad);
3233 for (n=get_irn_arity(irn)-1; n>=0; --n) {
3234 set_irn_n(irn, n, bad);
3242 kill_all_unused_values_in_schedule(spill_ilp_t * si)
3244 struct kill_helper kh;
3246 kh.used = lc_bitset_malloc(get_irg_last_idx(si->chordal_env->irg));
3249 irg_walk_graph(si->chordal_env->irg, walker_collect_used, NULL, kh.used);
3250 irg_block_walk_graph(si->chordal_env->irg, walker_kill_unused, NULL, &kh);
3252 lc_bitset_free(kh.used);
3256 print_irn_pset(pset * p)
3260 pset_foreach(p, irn) {
3261 ir_printf("%+F\n", irn);
3266 rewire_uses(spill_ilp_t * si)
3268 dom_front_info_t *dfi = be_compute_dominance_frontiers(si->chordal_env->irg);
3270 pset *ignore = pset_new_ptr(1);
3272 pset_insert_ptr(ignore, get_irg_end(si->chordal_env->irg));
3274 /* then fix uses of spills */
3275 set_foreach(si->values, defs) {
3278 ir_node *next = defs->remats;
3281 reloads = pset_new_ptr_default();
3284 if(be_is_Reload(next)) {
3285 pset_insert_ptr(reloads, next);
3289 next = get_irn_link(next);
3292 spills = get_spills_for_value(si, defs->value);
3293 DBG((si->dbg, LEVEL_2, "\t %d remats, %d reloads, and %d spills for value %+F\n", remats, pset_count(reloads), pset_count(spills), defs->value));
3294 if(pset_count(spills) > 1) {
3295 //assert(pset_count(reloads) > 0);
3296 // print_irn_pset(spills);
3297 // print_irn_pset(reloads);
3299 be_ssa_constr_set_ignore(dfi, spills, ignore);
3306 /* first fix uses of remats and reloads */
3307 set_foreach(si->values, defs) {
3309 ir_node *next = defs->remats;
3312 nodes = pset_new_ptr_default();
3313 pset_insert_ptr(nodes, defs->value);
3316 pset_insert_ptr(nodes, next);
3317 next = get_irn_link(next);
3320 if(pset_count(nodes) > 1) {
3321 DBG((si->dbg, LEVEL_4, "\t %d new definitions for value %+F\n", pset_count(nodes)-1, defs->value));
3322 be_ssa_constr_set(dfi, nodes);
3329 // remove_unused_defs(si);
3331 be_free_dominance_frontiers(dfi);
3335 writeback_results(spill_ilp_t * si)
3337 /* walk through the graph and collect all spills, reloads and remats for a value */
3339 si->values = new_set(cmp_defs, 4096);
3341 DBG((si->dbg, LEVEL_1, "Applying results\n"));
3342 delete_unnecessary_remats(si);
3343 si->m_unknown = new_r_Unknown(si->chordal_env->irg, mode_M);
3344 irg_block_walk_graph(si->chordal_env->irg, walker_spill_placer, NULL, si);
3346 irg_block_walk_graph(si->chordal_env->irg, walker_reload_placer, NULL, si);
3348 /* clean the remat info! there are still back-edges leading there! */
3349 clean_remat_info(si);
3353 connect_all_spills_with_keep(si);
3355 del_set(si->values);
3359 get_n_regs(spill_ilp_t * si)
3361 int arch_n_regs = arch_register_class_n_regs(si->cls);
3365 for(i=0; i<arch_n_regs; i++) {
3366 if(!arch_register_type_is(&si->cls->regs[i], ignore)) {
3371 DBG((si->dbg, LEVEL_1, "\tArchitecture has %d free registers in class %s\n", free, si->cls->name));
3376 walker_reload_mover(ir_node * bb, void * data)
3378 spill_ilp_t *si = data;
3381 sched_foreach(bb, tmp) {
3382 if(be_is_Reload(tmp) && has_reg_class(si, tmp)) {
3383 ir_node *reload = tmp;
3386 /* move reload upwards */
3388 int pressure = (int)get_irn_link(reload);
3389 if(pressure < si->n_regs) {
3390 irn = sched_prev(reload);
3391 DBG((si->dbg, LEVEL_5, "regpressure before %+F: %d\n", reload, pressure));
3392 sched_remove(reload);
3393 pressure = (int)get_irn_link(irn);
3395 while(pressure < si->n_regs) {
3396 if(sched_is_end(irn) || (be_is_Reload(irn) && has_reg_class(si, irn))) break;
3398 set_irn_link(irn, INT_TO_PTR(pressure+1));
3399 DBG((si->dbg, LEVEL_5, "new regpressure before %+F: %d\n", irn, pressure+1));
3400 irn = sched_prev(irn);
3402 pressure = (int)get_irn_link(irn);
3405 DBG((si->dbg, LEVEL_3, "putting reload %+F after %+F\n", reload, irn));
3406 sched_put_after(irn, reload);
3413 move_reloads_upward(spill_ilp_t * si)
3415 irg_block_walk_graph(si->chordal_env->irg, walker_reload_mover, NULL, si);
3419 be_spill_remat(const be_chordal_env_t * chordal_env)
3421 char problem_name[256];
3422 char dump_suffix[256];
3423 char dump_suffix2[256];
3424 char dump_suffix3[256];
3425 struct obstack obst;
3428 ir_snprintf(problem_name, sizeof(problem_name), "%F_%s", chordal_env->irg, chordal_env->cls->name);
3429 ir_snprintf(dump_suffix, sizeof(dump_suffix), "-%s-remats", chordal_env->cls->name);
3430 ir_snprintf(dump_suffix2, sizeof(dump_suffix2), "-%s-pressure", chordal_env->cls->name);
3432 FIRM_DBG_REGISTER(si.dbg, "firm.be.ra.spillremat");
3433 DBG((si.dbg, LEVEL_1, "\n\n\t\t===== Processing %s =====\n\n", problem_name));
3435 obstack_init(&obst);
3436 si.chordal_env = chordal_env;
3438 si.cls = chordal_env->cls;
3439 si.lpp = new_lpp(problem_name, lpp_minimize);
3440 si.remat_info = new_set(cmp_remat_info, 4096);
3441 si.interferences = new_set(cmp_interference, 4096);
3442 si.all_possible_remats = pset_new_ptr_default();
3443 si.spills = pset_new_ptr_default();
3444 si.inverse_ops = pset_new_ptr_default();
3445 #ifndef EXECFREQ_LOOPDEPH
3446 si.execfreqs = compute_execfreq(chordal_env->irg);
3448 si.execfreqs = NULL;
3453 si.n_regs = get_n_regs(&si);
3455 set_irg_link(chordal_env->irg, &si);
3456 compute_doms(chordal_env->irg);
3458 /* compute phi classes */
3459 // phi_class_compute(chordal_env->irg);
3461 be_analyze_regpressure(chordal_env, "-pre");
3463 #ifdef COLLECT_REMATS
3464 /* collect remats */
3465 DBG((si.dbg, LEVEL_1, "Collecting remats\n"));
3466 irg_walk_graph(chordal_env->irg, walker_remat_collector, NULL, &si);
3469 /* insert possible remats */
3470 DBG((si.dbg, LEVEL_1, "Inserting possible remats\n"));
3471 irg_block_walk_graph(chordal_env->irg, walker_remat_insertor, NULL, &si);
3472 DBG((si.dbg, LEVEL_2, " -> inserted %d possible remats\n", pset_count(si.all_possible_remats)));
3475 DBG((si.dbg, LEVEL_1, "Connecting remats with keep and dumping\n"));
3476 connect_all_remats_with_keep(&si);
3477 /* dump graph with inserted remats */
3478 dump_graph_with_remats(chordal_env->irg, dump_suffix);
3482 /* recompute liveness */
3483 DBG((si.dbg, LEVEL_1, "Recomputing liveness\n"));
3484 be_liveness(chordal_env->irg);
3488 DBG((si.dbg, LEVEL_1, "\tBuilding ILP\n"));
3489 DBG((si.dbg, LEVEL_2, "\t endwalker\n"));
3490 irg_block_walk_graph(chordal_env->irg, luke_endwalker, NULL, &si);
3492 DBG((si.dbg, LEVEL_2, "\t blockwalker\n"));
3493 irg_block_walk_graph(chordal_env->irg, luke_blockwalker, NULL, &si);
3495 #ifndef NO_MEMCOPIES
3496 DBG((si.dbg, LEVEL_2, "\t memcopyhandler\n"));
3497 memcopyhandler(&si);
3505 ir_snprintf(buf, sizeof(buf), "%s-spillremat.ilp", problem_name);
3506 if ((f = fopen(buf, "wt")) != NULL) {
3507 lpp_dump_plain(si.lpp, f);
3514 DBG((si.dbg, LEVEL_1, "\tSolving %F\n", chordal_env->irg));
3516 lpp_set_time_limit(si.lpp, ILP_TIMEOUT);
3520 lpp_solve_cplex(si.lpp);
3522 lpp_solve_net(si.lpp, LPP_SERVER, LPP_SOLVER);
3524 assert(lpp_is_sol_valid(si.lpp)
3525 && "solution of ILP must be valid");
3527 DBG((si.dbg, LEVEL_1, "\t%s: iterations: %d, solution time: %g, objective function: %g\n", problem_name, si.lpp->iterations, si.lpp->sol_time, is_zero(si.lpp->objval)?0.0:si.lpp->objval));
3529 #ifdef DUMP_SOLUTION
3534 ir_snprintf(buf, sizeof(buf), "%s-spillremat.sol", problem_name);
3535 if ((f = fopen(buf, "wt")) != NULL) {
3537 for (i = 0; i < si.lpp->var_next; ++i) {
3538 lpp_name_t *name = si.lpp->vars[i];
3539 fprintf(f, "%20s %4d %10f\n", name->name, name->nr, name->value);
3546 writeback_results(&si);
3550 kill_all_unused_values_in_schedule(&si);
3552 #if defined(KEEPALIVE_SPILLS) || defined(KEEPALIVE_RELOADS)
3553 be_dump(chordal_env->irg, "-spills-placed", dump_ir_block_graph);
3556 // move reloads upwards
3557 be_liveness(chordal_env->irg);
3558 irg_block_walk_graph(chordal_env->irg, walker_pressure_annotator, NULL, &si);
3559 move_reloads_upward(&si);
3561 irg_block_walk_graph(chordal_env->irg, walker_pressure_annotator, NULL, &si);
3563 dump_pressure_graph(&si, dump_suffix2);
3565 // TODO fix temporarily exceeded regpressure due to remat2s
3567 // TODO insert copys to fix interferences in memory
3569 be_analyze_regpressure(chordal_env, "-post");
3571 free_dom(chordal_env->irg);
3572 del_set(si.interferences);
3573 del_pset(si.inverse_ops);
3574 del_pset(si.all_possible_remats);
3575 del_pset(si.spills);
3576 #ifndef EXECFREQ_LOOPDEPH
3577 free_execfreq(si.execfreqs);
3580 obstack_free(&obst, NULL);
3581 DBG((si.dbg, LEVEL_1, "\tdone.\n"));
3584 #else /* WITH_ILP */
3587 only_that_you_can_compile_without_WITH_ILP_defined(void)
3591 #endif /* WITH_ILP */