+ /* make all Phis ready and remember the single cf op */
+ cfop = NULL;
+ foreach_out_edge(block, edge) {
+ irn = get_edge_src_irn(edge);
+
+ switch (get_irn_opcode(irn)) {
+ case iro_Phi:
+ add_to_sched(env, block, irn, 0);
+ break;
+ case iro_Start:
+ case iro_End:
+ case iro_Proj:
+ case iro_Bad:
+ case iro_Unknown:
+ break;
+ default:
+ if (is_cfop(irn)) {
+ assert(cfop == NULL && "Highlander - there can be only one");
+ cfop = irn;
+ }
+ break;
+ }
+ }
+
+ /* add all nodes from list */
+ for (i = 0, l = ARR_LEN(sched_nodes); i < l; ++i) {
+ ilpsched_node_attr_t *na = get_ilpsched_node_attr(sched_nodes[i]);
+ if (sched_nodes[i]->irn != cfop)
+ add_to_sched(env, block, sched_nodes[i]->irn, na->sched_point);
+ }
+
+ /* schedule control flow node if not already done */
+ if (cfop && ! sched_is_scheduled(cfop))
+ add_to_sched(env, block, cfop, 0);
+
+ DEL_ARR_F(sched_nodes);
+}
+
+/***************************************************************
+ * _____ _ _____ _____ _ _
+ * |_ _| | | __ \ / ____| | | (_)
+ * | | | | | |__) | | (___ ___ ___| |_ _ ___ _ __
+ * | | | | | ___/ \___ \ / _ \/ __| __| |/ _ \| '_ \
+ * _| |_| |____| | ____) | __/ (__| |_| | (_) | | | |
+ * |_____|______|_| |_____/ \___|\___|\__|_|\___/|_| |_|
+ *
+ ***************************************************************/
+
+/**
+ * Check if node can be executed on given unit type.
+ */
+static INLINE int is_valid_unit_type_for_node(const be_execution_unit_type_t *tp, be_ilpsched_irn_t *node) {
+ int i;
+ ilpsched_node_attr_t *na = get_ilpsched_node_attr(node);
+
+ for (i = na->n_unit_types - 1; i >= 0; --i) {
+ if (na->type_info[i].tp == tp)
+ return i;
+ }
+
+ return -1;
+}
+
+/************************************************
+ * _ _ _
+ * (_) | | | |
+ * __ ____ _ _ __ _ __ _| |__ | | ___ ___
+ * \ \ / / _` | '__| |/ _` | '_ \| |/ _ \/ __|
+ * \ V / (_| | | | | (_| | |_) | | __/\__ \
+ * \_/ \__,_|_| |_|\__,_|_.__/|_|\___||___/
+ *
+ ************************************************/
+
+/**
+ * Create the following variables:
+ * - x_{nt}^k binary weigthed with: t
+ * node n is scheduled at time step t to unit type k
+ * ==>> These variables represent the schedule
+ *
+ * - d_{nt}^k binary weighted with: t
+ * node n dies at time step t on unit type k
+ * - a_{nt}^k binary weighted with num_nodes
+ * node n is alive at time step t on unit type k
+ *
+ * - y_{nt}^k binary weighted with: num_nodes^2
+ * node n is scheduled at time step t to unit type k
+ * although all units of this type are occupied
+ * ==>> These variables represent the register pressure
+ *
+ */
+static void create_variables(be_ilpsched_env_t *env, lpp_t *lpp, be_ilpsched_irn_t *block_node, struct obstack *var_obst) {
+ char buf[1024];
+ ir_node *irn;
+ unsigned num_block_var, num_nodes;
+ ilpsched_block_attr_t *ba = get_ilpsched_block_attr(block_node);
+ unsigned weigth_y = ba->n_interesting_nodes * ba->n_interesting_nodes;
+#ifdef WITH_LIBCORE
+ lc_timer_t *t_var = lc_timer_register("beilpsched_var", "create ilp variables");
+#endif /* WITH_LIBCORE */
+
+ ilp_timer_push(t_var);
+ num_block_var = num_nodes = 0;
+ foreach_linked_irns(ba->head_ilp_nodes, irn) {
+ const be_execution_unit_t ***execunits = arch_isa_get_allowed_execution_units(env->arch_env->isa, irn);
+ be_ilpsched_irn_t *node;
+ ilpsched_node_attr_t *na;
+ unsigned n_unit_types, tp_idx, unit_idx, n_var, cur_unit;
+ unsigned cur_var_ad, cur_var_x, cur_var_y, num_ad;
+
+ /* count number of available unit types for this node */
+ for (n_unit_types = 0; execunits[n_unit_types]; ++n_unit_types)
+ /* just count */ ;
+
+ node = get_ilpsched_irn(env, irn);
+ na = get_ilpsched_node_attr(node);
+
+ na->n_unit_types = n_unit_types;
+ na->type_info = NEW_ARR_D(unit_type_info_t, var_obst, n_unit_types);
+
+ /* fill the type info array */
+ for (tp_idx = 0; tp_idx < n_unit_types; ++tp_idx) {
+ for (unit_idx = 0; execunits[tp_idx][unit_idx]; ++unit_idx) {
+ /* beware: we also count number of available units here */
+ if (be_machine_is_dummy_unit(execunits[tp_idx][unit_idx]))
+ na->is_dummy_node = 1;
+ }
+
+ na->type_info[tp_idx].tp = execunits[tp_idx][0]->tp;
+ na->type_info[tp_idx].n_units = unit_idx;
+ }
+
+ /* allocate space for ilp variables */
+ na->ilp_vars.x = NEW_ARR_D(int, var_obst, n_unit_types * VALID_SCHED_INTERVAL(na));
+ memset(na->ilp_vars.x, -1, ARR_LEN(na->ilp_vars.x) * sizeof(na->ilp_vars.x[0]));
+
+ /* we need these variables only for "real" nodes */
+ if (! na->is_dummy_node) {
+ na->ilp_vars.y = NEW_ARR_D(int, var_obst, n_unit_types * VALID_SCHED_INTERVAL(na));
+ memset(na->ilp_vars.y, -1, ARR_LEN(na->ilp_vars.y) * sizeof(na->ilp_vars.y[0]));
+
+ num_ad = ba->max_steps - na->asap + 1;
+
+ if (ba->n_interesting_nodes > env->opts->limit_dead) {
+ na->ilp_vars.a = NEW_ARR_D(int, var_obst, n_unit_types * num_ad);
+ memset(na->ilp_vars.a, -1, ARR_LEN(na->ilp_vars.a) * sizeof(na->ilp_vars.a[0]));
+ }
+ else {
+ na->ilp_vars.d = NEW_ARR_D(int, var_obst, n_unit_types * num_ad);
+ memset(na->ilp_vars.d, -1, ARR_LEN(na->ilp_vars.d) * sizeof(na->ilp_vars.d[0]));
+ }
+ }
+
+ DBG((env->dbg, LEVEL_3, "\thandling %+F (asap %u, alap %u, unit types %u):\n",
+ irn, na->asap, na->alap, na->n_unit_types));
+
+ cur_var_x = cur_var_ad = cur_var_y = cur_unit = n_var = 0;
+ /* create variables */
+ for (tp_idx = 0; tp_idx < n_unit_types; ++tp_idx) {
+ unsigned t;
+
+ for (t = na->asap - 1; t <= na->alap - 1; ++t) {
+ /* x_{nt}^k variables */
+ snprintf(buf, sizeof(buf), "x_n%u_%s_%u",
+ get_irn_idx(irn), na->type_info[tp_idx].tp->name, t);
+ na->ilp_vars.x[cur_var_x++] = lpp_add_var(lpp, buf, lpp_binary, (double)(t + 1));
+ DBG((env->dbg, LEVEL_4, "\t\tcreated ILP variable %s\n", buf));
+ /* variable counter */
+ n_var++;
+ num_block_var++;
+
+ if (! na->is_dummy_node) {
+ /* y_{nt}^k variables */
+ snprintf(buf, sizeof(buf), "y_n%u_%s_%u",
+ get_irn_idx(irn), na->type_info[tp_idx].tp->name, t);
+ na->ilp_vars.y[cur_var_y++] = lpp_add_var(lpp, buf, lpp_binary, (double)(weigth_y));
+ DBG((env->dbg, LEVEL_4, "\t\tcreated ILP variable %s\n", buf));
+
+ /* variable counter */
+ n_var++;
+ num_block_var++;
+ }
+ }
+
+ /* a node can die at any step t: asap(n) <= t <= U */
+ if (! na->is_dummy_node) {
+ for (t = na->asap - 1; t <= ba->max_steps; ++t) {
+
+ if (ba->n_interesting_nodes > env->opts->limit_dead) {
+ /* a_{nt}^k variables */
+ snprintf(buf, sizeof(buf), "a_n%u_%s_%u",
+ get_irn_idx(irn), na->type_info[tp_idx].tp->name, t);
+ na->ilp_vars.a[cur_var_ad++] = lpp_add_var(lpp, buf, lpp_binary, (double)(ba->n_interesting_nodes));
+ }
+ else {
+ /* d_{nt}^k variables */
+ snprintf(buf, sizeof(buf), "d_n%u_%s_%u",
+ get_irn_idx(irn), na->type_info[tp_idx].tp->name, t);
+ na->ilp_vars.d[cur_var_ad++] = lpp_add_var(lpp, buf, lpp_binary, (double)(t + 1));
+ }
+ DBG((env->dbg, LEVEL_4, "\t\tcreated ILP variable %s\n", buf));
+
+ /* variable counter */
+ n_var++;
+ num_block_var++;
+ }
+ }
+ }
+
+ DB((env->dbg, LEVEL_3, "%u variables created\n", n_var));
+ num_nodes++;
+ }
+ ilp_timer_pop();
+ DBG((env->dbg, LEVEL_1, "... %u variables for %u nodes created (%g sec)\n",
+ num_block_var, num_nodes, ilp_timer_elapsed_usec(t_var) / 1000000.0));
+}
+
+/*******************************************************
+ * _ _ _
+ * | | (_) | |
+ * ___ ___ _ __ ___| |_ _ __ __ _ _ _ __ | |_ ___
+ * / __/ _ \| '_ \/ __| __| '__/ _` | | '_ \| __/ __|
+ * | (_| (_) | | | \__ \ |_| | | (_| | | | | | |_\__ \
+ * \___\___/|_| |_|___/\__|_| \__,_|_|_| |_|\__|___/
+ *
+ *******************************************************/
+
+/**
+ * Create following ILP constraints:
+ * - the assignment constraints:
+ * assure each node is executed once by exactly one (allowed) execution unit
+ * - the dead node assignment constraints:
+ * assure a node can only die at most once
+ * - the precedence constraints:
+ * assure that no data dependencies are violated
+ */
+static void create_assignment_and_precedence_constraints(be_ilpsched_env_t *env, lpp_t *lpp, be_ilpsched_irn_t *block_node) {
+ unsigned num_cst_assign, num_cst_prec, num_cst_dead;
+ char buf[1024];
+ ir_node *irn;
+ ilpsched_block_attr_t *ba = get_ilpsched_block_attr(block_node);
+ bitset_t *bs_block_irns = bitset_alloca(ba->block_last_idx);
+#ifdef WITH_LIBCORE
+ lc_timer_t *t_cst_assign = lc_timer_register("beilpsched_cst_assign", "create assignment constraints");
+ lc_timer_t *t_cst_dead = lc_timer_register("beilpsched_cst_assign_dead", "create dead node assignment constraints");
+ lc_timer_t *t_cst_prec = lc_timer_register("beilpsched_cst_prec", "create precedence constraints");
+#endif /* WITH_LIBCORE */
+
+ num_cst_assign = num_cst_prec = num_cst_dead = 0;
+ foreach_linked_irns(ba->head_ilp_nodes, irn) {
+ int cst, tp_idx, i;
+ unsigned cur_var;
+ be_ilpsched_irn_t *node;
+ ilpsched_node_attr_t *na;
+
+ node = get_ilpsched_irn(env, irn);
+ na = get_ilpsched_node_attr(node);
+ cur_var = 0;
+
+ /* the assignment constraint */
+ ilp_timer_push(t_cst_assign);
+ snprintf(buf, sizeof(buf), "assignment_cst_n%u", get_irn_idx(irn));
+ cst = lpp_add_cst_uniq(lpp, buf, lpp_equal, 1.0);
+ DBG((env->dbg, LEVEL_2, "added constraint %s\n", buf));
+ num_cst_assign++;
+
+ lpp_set_factor_fast_bulk(lpp, cst, na->ilp_vars.x, ARR_LEN(na->ilp_vars.x), 1.0);
+ ilp_timer_pop();
+
+ /* the dead node assignment constraint */
+ if (! na->is_dummy_node && ba->n_interesting_nodes <= env->opts->limit_dead) {
+ ilp_timer_push(t_cst_dead);
+ snprintf(buf, sizeof(buf), "dead_node_assign_cst_n%u", get_irn_idx(irn));
+ cst = lpp_add_cst_uniq(lpp, buf, lpp_less, 1.0);
+ DBG((env->dbg, LEVEL_2, "added constraint %s\n", buf));
+
+ lpp_set_factor_fast_bulk(lpp, cst, na->ilp_vars.d, ARR_LEN(na->ilp_vars.d), 1.0);
+ ilp_timer_pop();
+ }
+
+ /* We have separate constraints for Projs and Keeps */
+ // ILP becomes infeasible ?!?
+// if (is_Proj(irn) || be_is_Keep(irn))
+// continue;
+
+ /* the precedence constraints */
+ ilp_timer_push(t_cst_prec);
+ bs_block_irns = bitset_clear_all(bs_block_irns);
+ for (i = get_irn_ins_or_deps(irn) - 1; i >= 0; --i) {
+ ir_node *pred = skip_normal_Proj(env->arch_env->isa, get_irn_in_or_dep(irn, i));
+ unsigned t_low, t_high, t;
+ be_ilpsched_irn_t *pred_node;
+ ilpsched_node_attr_t *pna;
+ unsigned delay;
+
+ if (is_Phi(pred) || block_node->irn != get_nodes_block(pred) || is_NoMem(pred))
+ continue;
+
+ pred_node = get_ilpsched_irn(env, pred);
+ pna = get_ilpsched_node_attr(pred_node);
+
+ assert(pna->asap > 0 && pna->alap >= pna->asap && "Invalid scheduling interval.");
+
+ if (! bitset_is_set(bs_block_irns, pna->block_idx))
+ bitset_set(bs_block_irns, pna->block_idx);
+ else
+ continue;
+
+ /* irn = n, pred = m */
+ delay = fixed_latency(env->sel, pred, env->block_env);
+ t_low = MAX(na->asap, pna->asap + delay - 1);
+ t_high = MIN(na->alap, pna->alap + delay - 1);
+ for (t = t_low - 1; t <= t_high - 1; ++t) {
+ unsigned tn, tm;
+ int *tmp_var_idx = NEW_ARR_F(int, 0);
+
+ snprintf(buf, sizeof(buf), "precedence_n%u_n%u_%u", get_irn_idx(pred), get_irn_idx(irn), t);
+ cst = lpp_add_cst_uniq(lpp, buf, lpp_less, 1.0);
+ DBG((env->dbg, LEVEL_2, "added constraint %s\n", buf));
+ num_cst_prec++;
+
+ /* lpp_set_factor_fast_bulk needs variables sorted ascending by index */
+ if (na->ilp_vars.x[0] < pna->ilp_vars.x[0]) {
+ /* node variables have smaller index than pred variables */
+ for (tp_idx = na->n_unit_types - 1; tp_idx >= 0; --tp_idx) {
+ for (tn = na->asap - 1; tn <= t; ++tn) {
+ unsigned idx = ILPVAR_IDX(na, tp_idx, tn);
+ ARR_APP1(int, tmp_var_idx, na->ilp_vars.x[idx]);
+ }
+ }
+
+ for (tp_idx = pna->n_unit_types - 1; tp_idx >= 0; --tp_idx) {
+ for (tm = t - delay + 1; tm < pna->alap; ++tm) {
+ unsigned idx = ILPVAR_IDX(pna, tp_idx, tm);
+ ARR_APP1(int, tmp_var_idx, pna->ilp_vars.x[idx]);
+ }
+ }
+ }
+ else {
+ /* pred variables have smaller index than node variables */
+ for (tp_idx = pna->n_unit_types - 1; tp_idx >= 0; --tp_idx) {
+ for (tm = t - delay + 1; tm < pna->alap; ++tm) {
+ unsigned idx = ILPVAR_IDX(pna, tp_idx, tm);
+ ARR_APP1(int, tmp_var_idx, pna->ilp_vars.x[idx]);
+ }
+ }
+
+ for (tp_idx = na->n_unit_types - 1; tp_idx >= 0; --tp_idx) {
+ for (tn = na->asap - 1; tn <= t; ++tn) {
+ unsigned idx = ILPVAR_IDX(na, tp_idx, tn);
+ ARR_APP1(int, tmp_var_idx, na->ilp_vars.x[idx]);
+ }
+ }
+ }
+
+ if (ARR_LEN(tmp_var_idx) > 0)
+ lpp_set_factor_fast_bulk(lpp, cst, tmp_var_idx, ARR_LEN(tmp_var_idx), 1.0);
+
+ DEL_ARR_F(tmp_var_idx);
+ }
+ }
+ ilp_timer_pop();
+ }
+ DBG((env->dbg, LEVEL_1, "\t%u assignement constraints (%g sec)\n",
+ num_cst_assign, ilp_timer_elapsed_usec(t_cst_assign) / 1000000.0));
+ DBG((env->dbg, LEVEL_1, "\t%u precedence constraints (%g sec)\n",
+ num_cst_prec, ilp_timer_elapsed_usec(t_cst_prec) / 1000000.0));
+}
+
+/**
+ * Create ILP resource constraints:
+ * - assure that for each time step not more instructions are scheduled
+ * to the same unit types as units of this type are available
+ */
+static void create_ressource_constraints(be_ilpsched_env_t *env, lpp_t *lpp, be_ilpsched_irn_t *block_node) {
+ int glob_type_idx;
+ char buf[1024];
+ unsigned num_cst_resrc = 0;
+ ilpsched_block_attr_t *ba = get_ilpsched_block_attr(block_node);
+#ifdef WITH_LIBCORE
+ lc_timer_t *t_cst_rsrc = lc_timer_register("beilpsched_cst_rsrc", "create resource constraints");
+#endif /* WITH_LIBCORE */
+
+ ilp_timer_push(t_cst_rsrc);
+ for (glob_type_idx = env->cpu->n_unit_types - 1; glob_type_idx >= 0; --glob_type_idx) {
+ unsigned t;
+ be_execution_unit_type_t *cur_tp = &env->cpu->unit_types[glob_type_idx];
+
+ /* BEWARE: the DUMMY unit type is not in CPU, so it's skipped automatically */
+
+ /* check each time step */
+ for (t = 0; t < ba->max_steps; ++t) {
+ ir_node *irn;
+ int cst;
+ int *tmp_var_idx = NEW_ARR_F(int, 0);
+
+ snprintf(buf, sizeof(buf), "resource_cst_%s_%u", cur_tp->name, t);
+ cst = lpp_add_cst_uniq(lpp, buf, lpp_less, (double)cur_tp->n_units);
+ DBG((env->dbg, LEVEL_2, "added constraint %s\n", buf));
+ num_cst_resrc++;
+
+ foreach_linked_irns(ba->head_ilp_nodes, irn) {
+ be_ilpsched_irn_t *node = get_ilpsched_irn(env, irn);
+ ilpsched_node_attr_t *na = get_ilpsched_node_attr(node);
+ int tp_idx;
+
+ tp_idx = is_valid_unit_type_for_node(cur_tp, node);
+
+ if (tp_idx >= 0 && t >= na->asap - 1 && t <= na->alap - 1) {
+ int cur_var = ILPVAR_IDX(na, tp_idx, t);
+ ARR_APP1(int, tmp_var_idx, na->ilp_vars.x[cur_var]);
+ }
+ }
+
+ /* set constraints if we have some */
+ if (ARR_LEN(tmp_var_idx) > 0)
+ lpp_set_factor_fast_bulk(lpp, cst, tmp_var_idx, ARR_LEN(tmp_var_idx), 1.0);
+
+ DEL_ARR_F(tmp_var_idx);
+ }
+ }
+ ilp_timer_pop();
+ DBG((env->dbg, LEVEL_1, "\t%u resource constraints (%g sec)\n",
+ num_cst_resrc, ilp_timer_elapsed_usec(t_cst_rsrc) / 1000000.0));
+}
+
+/**
+ * Create ILP bundle constraints:
+ * - assure, at most bundle_size * bundles_per_cycle instructions
+ * can be started at a certain point.
+ */
+static void create_bundle_constraints(be_ilpsched_env_t *env, lpp_t *lpp, be_ilpsched_irn_t *block_node) {
+ char buf[1024];
+ unsigned t;
+ unsigned num_cst_bundle = 0;
+ unsigned n_instr_max = env->cpu->bundle_size * env->cpu->bundels_per_cycle;
+ ilpsched_block_attr_t *ba = get_ilpsched_block_attr(block_node);
+#ifdef WITH_LIBCORE
+ lc_timer_t *t_cst_bundle = lc_timer_register("beilpsched_cst_bundle", "create bundle constraints");
+#endif /* WITH_LIBCORE */
+
+ ilp_timer_push(t_cst_bundle);
+ for (t = 0; t < ba->max_steps; ++t) {
+ ir_node *irn;
+ int cst;
+ int *tmp_var_idx = NEW_ARR_F(int, 0);
+
+ snprintf(buf, sizeof(buf), "bundle_cst_%u", t);
+ cst = lpp_add_cst_uniq(lpp, buf, lpp_less, (double)n_instr_max);
+ DBG((env->dbg, LEVEL_2, "added constraint %s\n", buf));
+ num_cst_bundle++;
+
+ foreach_linked_irns(ba->head_ilp_nodes, irn) {
+ be_ilpsched_irn_t *node;
+ ilpsched_node_attr_t *na;
+ int tp_idx;
+
+ /* Projs and Keeps do not contribute to bundle size */
+ if (is_Proj(irn) || be_is_Keep(irn))
+ continue;
+
+ node = get_ilpsched_irn(env, irn);
+ na = get_ilpsched_node_attr(node);
+
+ /* nodes assigned to DUMMY unit do not contribute to bundle size */
+ if (na->is_dummy_node)
+ continue;
+
+ if (t >= na->asap - 1 && t <= na->alap - 1) {
+ for (tp_idx = na->n_unit_types - 1; tp_idx >= 0; --tp_idx) {
+ int idx = ILPVAR_IDX(na, tp_idx, t);
+ ARR_APP1(int, tmp_var_idx, na->ilp_vars.x[idx]);
+ }
+ }
+ }
+
+ if (ARR_LEN(tmp_var_idx) > 0)
+ lpp_set_factor_fast_bulk(lpp, cst, tmp_var_idx, ARR_LEN(tmp_var_idx), 1.0);
+
+ DEL_ARR_F(tmp_var_idx);
+ }
+ ilp_timer_pop();
+ DBG((env->dbg, LEVEL_1, "\t%u bundle constraints (%g sec)\n",
+ num_cst_bundle, ilp_timer_elapsed_usec(t_cst_bundle) / 1000000.0));
+}
+
+/**
+ * Create ILP dying nodes constraints:
+ * - set variable d_{nt}^k to 1 if nodes n dies at step t on unit k
+ */
+static void create_dying_nodes_constraint(be_ilpsched_env_t *env, lpp_t *lpp, be_ilpsched_irn_t *block_node) {
+ char buf[1024];
+ unsigned t;
+ unsigned num_cst = 0;
+ ilpsched_block_attr_t *ba = get_ilpsched_block_attr(block_node);
+#ifdef WITH_LIBCORE
+ lc_timer_t *t_cst = lc_timer_register("beilpsched_cst_dying_nodes", "create dying nodes constraints");
+#endif /* WITH_LIBCORE */
+
+ ilp_timer_push(t_cst);
+ /* check all time_steps */
+ for (t = 0; t < ba->max_steps; ++t) {
+ ir_node *irn;
+
+ /* for all nodes */
+ foreach_linked_irns(ba->head_ilp_nodes, irn) {
+ be_ilpsched_irn_t *node = get_ilpsched_irn(env, irn);
+ ilpsched_node_attr_t *na = get_ilpsched_node_attr(node);
+
+ /* if node has no consumer within current block, it cannot die here */
+ /* we also ignore nodes assigned to dummy unit */
+ if (ARR_LEN(na->block_consumer) < 1 || na->is_dummy_node)
+ continue;
+
+ /* node can only die here if t at least asap(n) */
+ if (t >= na->asap - 1) {
+ int node_tp_idx;
+
+ /* for all unit types */
+ for (node_tp_idx = na->n_unit_types - 1; node_tp_idx >= 0; --node_tp_idx) {
+ int tp_idx, i, cst;
+ int *tmp_var_idx = NEW_ARR_F(int, 0);
+
+ snprintf(buf, sizeof(buf), "dying_node_cst_%u_n%u", t, get_irn_idx(irn));
+ cst = lpp_add_cst_uniq(lpp, buf, lpp_less, (double)(na->n_consumer - 1));
+ DBG((env->dbg, LEVEL_2, "added constraint %s\n", buf));
+ num_cst++;
+
+ /* number of consumer scheduled till t */
+ for (i = ARR_LEN(na->block_consumer) - 1; i >= 0; --i) {
+ be_ilpsched_irn_t *cons = get_ilpsched_irn(env, na->block_consumer[i]);
+ ilpsched_node_attr_t *ca = get_ilpsched_node_attr(cons);
+
+ for (tp_idx = ca->n_unit_types - 1; tp_idx >= 0; --tp_idx) {
+ unsigned tm;
+
+ for (tm = ca->asap - 1; tm <= t && tm <= ca->alap - 1; ++tm) {
+ int idx = ILPVAR_IDX(ca, tp_idx, tm);
+ ARR_APP1(int, tmp_var_idx, ca->ilp_vars.x[idx]);
+ }
+ }
+ }
+
+ /* could be that no consumer can be scheduled at this point */
+ if (ARR_LEN(tmp_var_idx)) {
+ int idx;
+ unsigned tn;
+
+ /* subtract possible prior kill points */
+ for (tn = na->asap - 1; tn < t; ++tn) {
+ idx = ILPVAR_IDX_DEAD(ba, na, node_tp_idx, tn);
+ lpp_set_factor_fast(lpp, cst, na->ilp_vars.d[idx], -1.0);
+ }
+
+ idx = ILPVAR_IDX_DEAD(ba, na, node_tp_idx, t);
+ lpp_set_factor_fast(lpp, cst, na->ilp_vars.d[idx], 0.0 - (double)(na->n_consumer));
+ lpp_set_factor_fast_bulk(lpp, cst, tmp_var_idx, ARR_LEN(tmp_var_idx), 1.0);
+ }
+
+ DEL_ARR_F(tmp_var_idx);
+ }
+ }
+
+ }
+ }
+ ilp_timer_pop();
+ DBG((env->dbg, LEVEL_1, "\t%u dying nodes constraints (%g sec)\n",
+ num_cst, ilp_timer_elapsed_usec(t_cst) / 1000000.0));
+}
+
+/**
+* Create ILP alive nodes constraints:
+* - set variable a_{nt}^k to 1 if nodes n is alive at step t on unit k
+*/
+static void create_alive_nodes_constraint(be_ilpsched_env_t *env, lpp_t *lpp, be_ilpsched_irn_t *block_node) {
+ char buf[1024];
+ ir_node *irn;
+ unsigned num_cst = 0;
+ ilpsched_block_attr_t *ba = get_ilpsched_block_attr(block_node);
+#ifdef WITH_LIBCORE
+ lc_timer_t *t_cst = lc_timer_register("beilpsched_cst_alive_nodes", "create alive nodes constraints");
+#endif /* WITH_LIBCORE */
+
+ ilp_timer_push(t_cst);
+ /* for each node */
+ foreach_linked_irns(ba->head_ilp_nodes, irn) {
+ be_ilpsched_irn_t *node = get_ilpsched_irn(env, irn);