+ for (n=get_irn_arity(tmp)-1; n>=0; --n) {
+ ir_node *remat_arg = get_irn_n(tmp, n);
+ op_t *arg_op = get_irn_link(remat_arg);
+
+ if(!has_reg_class(si, remat_arg)) continue;
+
+ spill = set_find_spill(spill_bb->ilp, remat_arg);
+ assert(spill);
+
+ /* arguments of remats have to be live until the very end of the block
+ * remat = reg_out(remat_arg) and (reload(remat_arg) or live_range(remat_arg)),
+ * no remats, they could be in wrong order
+ */
+
+ ir_snprintf(buf, sizeof(buf), "req_remat_%N_arg_%N", tmp, remat_arg);
+ cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
+
+ lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, 3.0);
+ lpp_set_factor_fast(si->lpp, cst, spill->reg_out, -2.0);
+ lpp_set_factor_fast(si->lpp, cst, arg_op->attr.live_range.ilp, -1.0);
+
+ /* use reload placed for this argument */
+ if(spill_bb->reloads) {
+ keyval_t *keyval = set_find_keyval(spill_bb->reloads, remat_arg);
+
+ if(keyval) {
+ ilp_var_t reload = PTR_TO_INT(keyval->val);
+
+ lpp_set_factor_fast(si->lpp, cst, reload, -1.0);
+ }
+ }
+ }
+ }
+ DBG((si->dbg, LEVEL_4, "\t %d values live at end of block %+F\n", pset_count(live), bb));
+
+
+
+
+ /**************************************
+ * B A S I C B L O C K B O D Y
+ **************************************/
+
+ sched_foreach_reverse_from(sched_block_last_noncf(si, bb), irn) {
+ op_t *op;
+ op_t *tmp_op;
+ int n,
+ u = 0,
+ d = 0;
+ ilp_cst_t check_pre,
+ check_post;
+ set *args;
+ pset *used;
+ pset *remat_defs;
+ keyval_t *keyval;
+ ilp_cst_t one_memoperand = -1;
+
+ /* iterate only until first phi */
+ if(is_Phi(irn))
+ break;
+
+ op = get_irn_link(irn);
+ /* skip remats */
+ if(op->is_remat) continue;
+
+ DBG((si->dbg, LEVEL_4, "\t at node %+F\n", irn));
+
+ /* collect defined values */
+ if(has_reg_class(si, irn)) {
+ pset_insert_ptr(defs, irn);
+ }
+
+ /* skip projs */
+ if(is_Proj(irn)) continue;
+
+ /*
+ * init set of irn's arguments
+ * and all possibly used values around this op
+ * and values defined by post remats
+ */
+ args = new_set(cmp_keyval, get_irn_arity(irn));
+ used = pset_new_ptr(pset_count(live) + get_irn_arity(irn));
+ remat_defs = pset_new_ptr(pset_count(live));
+
+ if(!is_start_block(bb) || !be_is_Barrier(irn)) {
+ for (n=get_irn_arity(irn)-1; n>=0; --n) {
+ ir_node *irn_arg = get_irn_n(irn, n);
+ if(has_reg_class(si, irn_arg)) {
+ set_insert_keyval(args, irn_arg, (void*)n);
+ pset_insert_ptr(used, irn_arg);
+ }
+ }
+ foreach_post_remat(irn, tmp) {
+ op_t *remat_op = get_irn_link(tmp);
+
+ pset_insert_ptr(remat_defs, remat_op->attr.remat.remat->value);
+
+ for (n=get_irn_arity(tmp)-1; n>=0; --n) {
+ ir_node *remat_arg = get_irn_n(tmp, n);
+ if(has_reg_class(si, remat_arg)) {
+ pset_insert_ptr(used, remat_arg);
+ }
+ }
+ }
+ foreach_pre_remat(si, irn, tmp) {
+ for (n=get_irn_arity(tmp)-1; n>=0; --n) {
+ ir_node *remat_arg = get_irn_n(tmp, n);
+ if(has_reg_class(si, remat_arg)) {
+ pset_insert_ptr(used, remat_arg);
+ }
+ }
+ }
+ }
+
+ /**********************************
+ * I N E P I L O G O F irn
+ **********************************/
+
+ /* ensure each dying value is used by only one post remat */
+ pset_foreach(used, tmp) {
+ ir_node *value = tmp;
+ op_t *value_op = get_irn_link(value);
+ ir_node *remat;
+ int n_remats = 0;
+
+ cst = ILP_UNDEF;
+ foreach_post_remat(irn, remat) {
+ op_t *remat_op = get_irn_link(remat);
+
+ for(n=get_irn_arity(remat)-1; n>=0; --n) {
+ ir_node *remat_arg = get_irn_n(remat, n);
+
+ /* if value is used by this remat add it to constraint */
+ if(remat_arg == value) {
+ if(n_remats == 0) {
+ /* sum remat2s <= 1 + n_remats*live_range */
+ ir_snprintf(buf, sizeof(buf), "dying_lr_%N_%N", value, irn);
+ cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 1.0);
+ }
+
+ n_remats++;
+ lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, 1.0);
+ break;
+ }
+ }
+ }
+
+ if(pset_find_ptr(live, value) && cst != ILP_UNDEF) {
+ lpp_set_factor_fast(si->lpp, cst, value_op->attr.live_range.ilp, -n_remats);