+ /* Third step: calculate the Delay. Note that our
+ * list is now in pre-order, starting at root
+ */
+ for (cur_pos = 0, curr = root; curr; curr = get_irn_link(curr), cur_pos++) {
+ sched_timestep_t d;
+
+ if (arch_irn_class_is(env->arch_env, curr, branch)) {
+ /* assure, that branches can be executed last */
+ d = 0;
+ }
+ else {
+ if (is_root_node(&be, curr))
+ d = exectime(env, curr);
+ else {
+ d = 0;
+ foreach_out_edge(curr, edge) {
+ ir_node *n = get_edge_src_irn(edge);
+
+ if (get_nodes_block(n) == block) {
+ sched_timestep_t ld;
+
+ ld = latency(env, curr, 1, n, 0) + get_irn_delay(&be, n);
+ d = ld > d ? ld : d;
+ }
+ }
+ }
+ }
+ set_irn_delay(&be, curr, d);
+ DB((be.dbg, LEVEL_2, "\t%+F delay %u\n", curr, d));
+
+ /* set the etime of all nodes to 0 */
+ set_irn_etime(&be, curr, 0);
+
+ set_irn_preorder(&be, curr, cur_pos);
+ }
+
+
+ /* Then one can add all nodes are ready to the set. */
+ foreach_out_edge(block, edge) {
+ ir_node *irn = get_edge_src_irn(edge);
+
+ /* Skip the end node because of keepalive edges. */
+ if (get_irn_opcode(irn) == iro_End)
+ continue;
+
+ if (is_Phi(irn)) {
+ /* Phi functions are scheduled immediately, since they only transfer
+ * data flow from the predecessors to this block. */
+
+ /* Increase the time step. */
+ be.curr_time += get_irn_etime(&be, irn);
+ add_to_sched(&be, irn);
+ make_users_ready(&be, irn);
+ }
+ else if (irn == start_node) {
+ /* The start block will be scheduled as the first node */
+ be.curr_time += get_irn_etime(&be, irn);
+
+ add_to_sched(&be, irn);
+ add_tuple_projs(&be, irn);
+ }
+ else {
+ /* Other nodes must have all operands in other blocks to be made
+ * ready */
+ int ready = 1;
+
+ /* Check, if the operands of a node are not local to this block */
+ for (j = 0, m = get_irn_arity(irn); j < m; ++j) {
+ ir_node *operand = get_irn_n(irn, j);
+
+ if (get_nodes_block(operand) == block) {
+ ready = 0;
+ break;
+ }
+ else {
+ /* live in values increase register pressure */
+ update_sched_liveness(&be, operand);
+ }
+ }
+
+ /* Make the node ready, if all operands live in a foreign block */
+ if (ready) {
+ DBG((be.dbg, LEVEL_2, "\timmediately ready: %+F\n", irn));
+ make_ready(&be, NULL, irn);
+ }
+
+ /* calculate number of users (needed for heuristic) */
+ set_irn_num_user(&be, irn, get_num_successors(irn));
+
+ /* calculate register difference (needed for heuristic) */
+ set_irn_reg_diff(&be, irn, get_reg_difference(&be, irn));
+ }
+ }
+
+ while (nodeset_count(be.cands) > 0) {
+ nodeset *mcands; /**< the set of candidates with maximum delay time */
+ nodeset *ecands; /**< the set of nodes in mcands whose etime <= curr_time */
+ nodeset *local_cands;
+ sched_timestep_t max_delay = 0;
+
+ /* collect statistics about amount of ready nodes */
+ be_do_stat_sched_ready(block, be.cands);
+
+ /* calculate the max delay of all candidates */
+ foreach_nodeset(be.cands, irn) {
+ sched_timestep_t d = get_irn_delay(&be, irn);
+
+ max_delay = d > max_delay ? d : max_delay;
+ }
+
+ if (be.sched_env->sel_strategy & BE_SCHED_SELECT_MUCHNIK) {
+ mcands = new_nodeset(8);
+ ecands = new_nodeset(8);
+ }
+ else {
+ local_cands = new_nodeset(8);
+ }
+
+ /* calculate mcands and ecands */
+ foreach_nodeset(be.cands, irn) {
+ if (be_is_Keep(irn)) {
+ nodeset_break(be.cands);
+ break;
+ }
+ if (be.sched_env->sel_strategy & BE_SCHED_SELECT_MUCHNIK) {
+ if (get_irn_delay(&be, irn) == max_delay) {
+ nodeset_insert(mcands, irn);
+ if (get_irn_etime(&be, irn) <= be.curr_time)
+ nodeset_insert(ecands, irn);
+ }
+ }
+ else {
+ nodeset_insert(local_cands, irn);
+ }
+ }
+
+ if (irn) {
+ /* Keeps must be immediately scheduled */
+ }
+ else {
+ DB((be.dbg, LEVEL_2, "\tbe.curr_time = %u\n", be.curr_time));
+
+ /* select a node to be scheduled and check if it was ready */
+ if (! (be.sched_env->sel_strategy & BE_SCHED_SELECT_MUCHNIK)) {
+ DB((be.dbg, LEVEL_3, "\tlocal_cands = %d\n", nodeset_count(local_cands)));
+ irn = select_node_heuristic(&be, local_cands);
+ }
+ else if (nodeset_count(mcands) == 1) {
+ irn = nodeset_first(mcands);
+ DB((be.dbg, LEVEL_3, "\tirn = %+F, mcand = 1, max_delay = %u\n", irn, max_delay));
+ }
+ else {
+ int cnt = nodeset_count(ecands);
+ if (cnt == 1) {
+ irn = nodeset_first(ecands);
+
+ if (arch_irn_class_is(env->arch_env, irn, branch)) {
+ /* BEWARE: don't select a JUMP if others are still possible */
+ goto force_mcands;
+ }
+ DB((be.dbg, LEVEL_3, "\tirn = %+F, ecand = 1, max_delay = %u\n", irn, max_delay));
+ }
+ else if (cnt > 1) {
+ DB((be.dbg, LEVEL_3, "\tecand = %d, max_delay = %u\n", cnt, max_delay));
+ irn = select_node_heuristic(&be, ecands);
+ }
+ else {
+force_mcands:
+ DB((be.dbg, LEVEL_3, "\tmcand = %d\n", nodeset_count(mcands)));
+ irn = select_node_heuristic(&be, mcands);
+ }
+ }
+ }
+
+ if (be.sched_env->sel_strategy & BE_SCHED_SELECT_MUCHNIK) {
+ del_nodeset(mcands);
+ del_nodeset(ecands);
+ }
+ else {
+ del_nodeset(local_cands);
+ }
+
+ DB((be.dbg, LEVEL_2, "\tpicked node %+F\n", irn));
+
+ /* Increase the time step. */
+ be.curr_time += exectime(env, irn);
+
+ /* Add the node to the schedule. */
+ add_to_sched(&be, irn);
+
+ if (get_irn_mode(irn) == mode_T)
+ add_tuple_projs(&be, irn);
+ else
+ make_users_ready(&be, irn);
+
+ /* remove the scheduled node from the ready list. */
+ if (nodeset_find(be.cands, irn))
+ nodeset_remove(be.cands, irn);
+ }
+
+ if (selector->finish_block)
+ selector->finish_block(be.selector_block_env);
+
+ del_nodeset(be.cands);
+ del_nodeset(be.live);
+}
+
+static const list_sched_selector_t reg_pressure_selector_struct = {
+ reg_pressure_graph_init,
+ reg_pressure_block_init,
+ reg_pressure_select,
+ NULL, /* to_appear_in_schedule */
+ NULL, /* exectime */
+ NULL, /* latency */
+ reg_pressure_block_free,
+ free
+};
+
+const list_sched_selector_t *reg_pressure_selector = ®_pressure_selector_struct;
+
+/* List schedule a graph. */
+void list_sched(const be_irg_t *birg, be_options_t *be_opts)
+{
+ const arch_env_t *arch_env = birg->main_env->arch_env;
+ ir_graph *irg = birg->irg;
+
+ int num_nodes;
+ sched_env_t env;
+ mris_env_t *mris;
+
+ /* Assure, that the out edges are computed */
+ edges_assure(irg);
+
+ if(be_opts->mris)
+ mris = be_sched_mris_preprocess(birg);
+
+ num_nodes = get_irg_last_idx(irg);
+
+ memset(&env, 0, sizeof(env));
+ env.selector = arch_env->isa->impl->get_list_sched_selector(arch_env->isa);
+ env.arch_env = arch_env;
+ env.irg = irg;
+ env.sel_strategy = be_opts->sched_select;
+ env.sched_info = NEW_ARR_F(sched_irn_t, num_nodes);
+
+ memset(env.sched_info, 0, num_nodes * sizeof(*env.sched_info));
+
+ if (env.selector->init_graph)
+ env.selector_env = env.selector->init_graph(env.selector, arch_env, irg);
+
+ /* Schedule each single block. */
+ irg_block_walk_graph(irg, list_sched_block, NULL, &env);
+
+ if (env.selector->finish_graph)
+ env.selector->finish_graph(env.selector_env);
+
+ if(be_opts->mris)
+ be_sched_mris_free(mris);
+
+ DEL_ARR_F(env.sched_info);