+/*
+ * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
+ *
+ * This file is part of libFirm.
+ *
+ * This file may be distributed and/or modified under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation and appearing in the file LICENSE.GPL included in the
+ * packaging of this file.
+ *
+ * Licensees holding valid libFirm Professional Edition licenses may use
+ * this file in accordance with the libFirm Commercial License.
+ * Agreement provided with the Software.
+ *
+ * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
+ * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
/**
- * Implements a trace scheduler as presented in Muchnik[TM].
- * Originally implemented by Michael Beck.
- * @author Christian Wuerdig
- * @date 28.08.2006
- * @cvs-id $Id$
+ * @file
+ * @brief Implements a trace scheduler as presented in Muchnik[TM].
+ * @author Michael Beck
+ * @date 28.08.2006
+ * @version $Id$
*/
-#ifdef HAVE_CONFIG_H
-#include <config.h>
-#endif
+#include "config.h"
#include <stdlib.h>
typedef struct _trace_env {
trace_irn_t *sched_info; /**< trace scheduling information about the nodes */
- const arch_env_t *arch_env; /**< the arch environment */
sched_timestep_t curr_time; /**< current time of the scheduler */
void *selector_env; /**< the backend selector environment */
const list_sched_selector_t *selector; /**< the actual backend selector */
/**
* Returns non-zero if the node is a root node
*/
-static INLINE unsigned is_root_node(trace_env_t *env, ir_node *n)
+static inline unsigned is_root_node(trace_env_t *env, ir_node *n)
{
int idx = get_irn_idx(n);
/**
* Mark a node as root node
*/
-static INLINE void mark_root_node(trace_env_t *env, ir_node *n)
+static inline void mark_root_node(trace_env_t *env, ir_node *n)
{
int idx = get_irn_idx(n);
/**
* Get the current delay.
*/
-static INLINE sched_timestep_t get_irn_delay(trace_env_t *env, ir_node *n) {
+static inline sched_timestep_t get_irn_delay(trace_env_t *env, ir_node *n) {
int idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
/**
* Set the current delay.
*/
-static INLINE void set_irn_delay(trace_env_t *env, ir_node *n, sched_timestep_t delay) {
+static inline void set_irn_delay(trace_env_t *env, ir_node *n, sched_timestep_t delay) {
int idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
/**
* Get the current etime.
*/
-static INLINE sched_timestep_t get_irn_etime(trace_env_t *env, ir_node *n) {
+static inline sched_timestep_t get_irn_etime(trace_env_t *env, ir_node *n) {
int idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
/**
* Set the current etime.
*/
-static INLINE void set_irn_etime(trace_env_t *env, ir_node *n, sched_timestep_t etime) {
+static inline void set_irn_etime(trace_env_t *env, ir_node *n, sched_timestep_t etime) {
int idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
/**
* Get the number of users.
*/
-static INLINE unsigned get_irn_num_user(trace_env_t *env, ir_node *n) {
+static inline unsigned get_irn_num_user(trace_env_t *env, ir_node *n) {
int idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
/**
* Set the number of users.
*/
-static INLINE void set_irn_num_user(trace_env_t *env, ir_node *n, unsigned num_user) {
+static inline void set_irn_num_user(trace_env_t *env, ir_node *n, unsigned num_user) {
int idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
/**
* Get the register difference.
*/
-static INLINE int get_irn_reg_diff(trace_env_t *env, ir_node *n) {
+static inline int get_irn_reg_diff(trace_env_t *env, ir_node *n) {
int idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
/**
* Set the register difference.
*/
-static INLINE void set_irn_reg_diff(trace_env_t *env, ir_node *n, int reg_diff) {
+static inline void set_irn_reg_diff(trace_env_t *env, ir_node *n, int reg_diff) {
int idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
/**
* Get the pre-order position.
*/
-static INLINE int get_irn_preorder(trace_env_t *env, ir_node *n) {
+static inline int get_irn_preorder(trace_env_t *env, ir_node *n) {
int idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
/**
* Set the pre-order position.
*/
-static INLINE void set_irn_preorder(trace_env_t *env, ir_node *n, int pos) {
+static inline void set_irn_preorder(trace_env_t *env, ir_node *n, int pos) {
int idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
/**
* Get the pre-order position.
*/
-static INLINE unsigned get_irn_critical_path_len(trace_env_t *env, ir_node *n) {
+static inline unsigned get_irn_critical_path_len(trace_env_t *env, ir_node *n) {
int idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
/**
* Set the pre-order position.
*/
-static INLINE void set_irn_critical_path_len(trace_env_t *env, ir_node *n, unsigned len) {
+static inline void set_irn_critical_path_len(trace_env_t *env, ir_node *n, unsigned len) {
int idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
ir_node *block = get_nodes_block(irn);
if (be_is_Call(irn)) {
- /* we want calls prefered */
+ /* we want calls preferred */
return -5;
}
for (i = get_irn_arity(irn) - 1; i >= 0; i--) {
ir_node *in = get_irn_n(irn, i);
- if (! be_is_live_end(env->liveness, block, in) && /* if the value lives outside of block: do not count */
- mode_is_datab(get_irn_mode(in)) && /* must be data node */
- ! arch_irn_is(env->arch_env, in, ignore)) /* ignore "ignore" nodes :) */
- num_in++;
+ if (!mode_is_datab(get_irn_mode(in)))
+ continue;
+
+ if (arch_irn_is_ignore(in))
+ continue;
+
+ if (be_is_live_end(env->liveness, block, in))
+ continue;
+
+ num_in++;
}
return num_out - num_in;
foreach_out_edge(block, edge) {
ir_node *succ = get_edge_src_irn(edge);
+ if (is_Block(succ)) {
+ /* A Block-Block edge. This should be the MacroBlock
+ * edge, ignore it. */
+ assert(get_Block_MacroBlock(succ) == block && "Block-Block edge found");
+ continue;
+ }
+ if (is_Anchor(succ)) {
+ /* ignore a keep alive edge */
+ continue;
+ }
if (is_root(succ, block)) {
mark_root_node(env, succ);
set_irn_link(succ, root);
for (cur_pos = 0, curr = root; curr; curr = get_irn_link(curr), cur_pos++) {
sched_timestep_t d;
- if (arch_irn_class_is(env->arch_env, curr, branch)) {
+ if (arch_irn_class_is(curr, branch)) {
/* assure, that branches can be executed last */
d = 0;
}
* @param birg The backend irg object
* @return The environment
*/
-static trace_env_t *trace_init(const arch_env_t *arch_env, ir_graph *irg) {
- trace_env_t *env = xcalloc(1, sizeof(*env));
+static trace_env_t *trace_init(const be_irg_t *birg) {
+ trace_env_t *env = XMALLOCZ(trace_env_t);
+ ir_graph *irg = be_get_birg_irg(birg);
int nn = get_irg_last_idx(irg);
- env->arch_env = arch_env;
env->curr_time = 0;
env->sched_info = NEW_ARR_F(trace_irn_t, nn);
env->liveness = be_liveness(irg);
FIRM_DBG_REGISTER(env->dbg, "firm.be.sched.trace");
+ be_liveness_assure_chk(env->liveness);
memset(env->sched_info, 0, nn * sizeof(*(env->sched_info)));
return env;
/**
* Simple selector. Just assure that jumps are scheduled last.
*/
-static ir_node *basic_selection(const arch_env_t *arch_env, ir_nodeset_t *ready_set) {
+static ir_node *basic_selection(ir_nodeset_t *ready_set)
+{
ir_node *irn = NULL;
ir_nodeset_iterator_t iter;
/* assure that branches and constants are executed last */
foreach_ir_nodeset(ready_set, irn, iter) {
- if (! arch_irn_class_is(arch_env, irn, branch)) {
+ if (!arch_irn_class_is(irn, branch)) {
return irn;
}
}
ir_nodeset_iterator_t iter;
sched_timestep_t max_delay = 0;
ir_node *irn;
+ (void) live_set;
/* calculate the max delay of all candidates */
foreach_ir_nodeset(ready_set, irn, iter) {
if (cnt == 1) {
irn = get_nodeset_node(&ecands);
- if (arch_irn_class_is(env->arch_env, irn, branch)) {
+ if (arch_irn_class_is(irn, branch)) {
/* BEWARE: don't select a JUMP if others are still possible */
goto force_mcands;
}
}
else if (cnt > 1) {
DB((env->dbg, LEVEL_3, "\tecand = %d, max_delay = %u\n", cnt, max_delay));
- irn = basic_selection(env->arch_env, &ecands);
+ irn = basic_selection(&ecands);
}
else {
force_mcands:
DB((env->dbg, LEVEL_3, "\tmcand = %d\n", ir_nodeset_size(&mcands)));
- irn = basic_selection(env->arch_env, &mcands);
+ irn = basic_selection(&mcands);
}
}
return irn;
}
-static void *muchnik_init_graph(const list_sched_selector_t *vtab, const arch_env_t *arch_env, ir_graph *irg)
+static void *muchnik_init_graph(const list_sched_selector_t *vtab, const be_irg_t *birg)
{
- trace_env_t *env = trace_init(arch_env, irg);
+ trace_env_t *env = trace_init(birg);
env->selector = vtab;
- env->selector_env = (void*) arch_env;
+ env->selector_env = (void*) be_get_birg_arch_env(birg);
return (void *)env;
}
return graph_env;
}
-static const list_sched_selector_t muchnik_selector_struct = {
+const list_sched_selector_t muchnik_selector = {
muchnik_init_graph,
muchnik_init_block,
muchnik_select,
trace_free /* finish_graph */
};
-const list_sched_selector_t *muchnik_selector = &muchnik_selector_struct;
-
/**
* Execute the heuristic function.
*/
/* priority based selection, heuristic inspired by mueller diss */
foreach_ir_nodeset(ns, irn, iter) {
/* make sure that branches are scheduled last */
- if (! arch_irn_class_is(trace_env->arch_env, irn, branch)) {
+ if (!arch_irn_class_is(irn, branch)) {
int rdiff = get_irn_reg_diff(trace_env, irn);
int sign = rdiff < 0;
int chg = (rdiff < 0 ? -rdiff : rdiff) << PRIO_CHG_PRESS;
- //reg_fact = chg << cur_pressure;
+ /* reg_fact = chg << cur_pressure; */
reg_fact = chg * cur_pressure;
if (reg_fact < chg)
reg_fact = INT_MAX - 2;
DBG((trace_env->dbg, LEVEL_4, "heuristic selected %+F:\n", cand));
}
else {
- cand = basic_selection(trace_env->arch_env, ns);
+ cand = basic_selection(ns);
}
return cand;
}
-static const list_sched_selector_t heuristic_selector_struct = {
+const list_sched_selector_t heuristic_selector = {
muchnik_init_graph,
muchnik_init_block,
heuristic_select,
NULL, /* finish_block */
trace_free /* finish_graph */
};
-
-const list_sched_selector_t *heuristic_selector = &heuristic_selector_struct;