static char _mark;
#define MARK &_mark
-typedef struct _trace_irn {
+typedef struct trace_irn {
sched_timestep_t delay; /**< The delay for this node if already calculated, else 0. */
sched_timestep_t etime; /**< The earliest time of this node. */
unsigned num_user; /**< The number real users (mode datab) of this node */
unsigned is_root : 1; /**< is a root node of a block */
} trace_irn_t;
-typedef struct _trace_env {
+typedef struct trace_env {
trace_irn_t *sched_info; /**< trace scheduling information about the nodes */
sched_timestep_t curr_time; /**< current time of the scheduler */
void *selector_env; /**< the backend selector environment */
/**
* Get the current delay.
*/
-static inline sched_timestep_t get_irn_delay(trace_env_t *env, ir_node *n) {
+static inline sched_timestep_t get_irn_delay(trace_env_t *env, ir_node *n)
+{
int idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
/**
* Set the current delay.
*/
-static inline void set_irn_delay(trace_env_t *env, ir_node *n, sched_timestep_t delay) {
+static inline void set_irn_delay(trace_env_t *env, ir_node *n, sched_timestep_t delay)
+{
int idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
/**
* Get the current etime.
*/
-static inline sched_timestep_t get_irn_etime(trace_env_t *env, ir_node *n) {
+static inline sched_timestep_t get_irn_etime(trace_env_t *env, ir_node *n)
+{
int idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
/**
* Set the current etime.
*/
-static inline void set_irn_etime(trace_env_t *env, ir_node *n, sched_timestep_t etime) {
+static inline void set_irn_etime(trace_env_t *env, ir_node *n, sched_timestep_t etime)
+{
int idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
/**
* Get the number of users.
*/
-static inline unsigned get_irn_num_user(trace_env_t *env, ir_node *n) {
+static inline unsigned get_irn_num_user(trace_env_t *env, ir_node *n)
+{
int idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
/**
* Set the number of users.
*/
-static inline void set_irn_num_user(trace_env_t *env, ir_node *n, unsigned num_user) {
+static inline void set_irn_num_user(trace_env_t *env, ir_node *n, unsigned num_user)
+{
int idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
/**
* Get the register difference.
*/
-static inline int get_irn_reg_diff(trace_env_t *env, ir_node *n) {
+static inline int get_irn_reg_diff(trace_env_t *env, ir_node *n)
+{
int idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
/**
* Set the register difference.
*/
-static inline void set_irn_reg_diff(trace_env_t *env, ir_node *n, int reg_diff) {
+static inline void set_irn_reg_diff(trace_env_t *env, ir_node *n, int reg_diff)
+{
int idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
/**
* Get the pre-order position.
*/
-static inline int get_irn_preorder(trace_env_t *env, ir_node *n) {
+static inline int get_irn_preorder(trace_env_t *env, ir_node *n)
+{
int idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
/**
* Set the pre-order position.
*/
-static inline void set_irn_preorder(trace_env_t *env, ir_node *n, int pos) {
+static inline void set_irn_preorder(trace_env_t *env, ir_node *n, int pos)
+{
int idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
/**
* Get the pre-order position.
*/
-static inline unsigned get_irn_critical_path_len(trace_env_t *env, ir_node *n) {
+static inline unsigned get_irn_critical_path_len(trace_env_t *env, ir_node *n)
+{
int idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
/**
* Set the pre-order position.
*/
-static inline void set_irn_critical_path_len(trace_env_t *env, ir_node *n, unsigned len) {
+static inline void set_irn_critical_path_len(trace_env_t *env, ir_node *n, unsigned len)
+{
int idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
/**
* returns the exec-time for node n.
*/
-static sched_timestep_t exectime(trace_env_t *env, ir_node *n) {
+static sched_timestep_t exectime(trace_env_t *env, ir_node *n)
+{
if (be_is_Keep(n) || is_Proj(n))
return 0;
if (env->selector->exectime)
/**
* Calculates the latency for between two ops
*/
-static sched_timestep_t latency(trace_env_t *env, ir_node *pred, int pred_cycle, ir_node *curr, int curr_cycle) {
+static sched_timestep_t latency(trace_env_t *env, ir_node *pred, int pred_cycle, ir_node *curr, int curr_cycle)
+{
/* a Keep hides a root */
if (be_is_Keep(curr))
return exectime(env, pred);
/**
* Returns the number of users of a node having mode datab.
*/
-static int get_num_successors(ir_node *irn) {
+static int get_num_successors(ir_node *irn)
+{
int sum = 0;
const ir_edge_t *edge;
/**
* Returns the difference of regs_output - regs_input;
*/
-static int get_reg_difference(trace_env_t *env, ir_node *irn) {
+static int get_reg_difference(trace_env_t *env, ir_node *irn)
+{
int num_out = 0;
int num_in = 0;
int i;
/**
* descent into a dag and create a pre-order list.
*/
-static void descent(ir_node *root, ir_node *block, ir_node **list, trace_env_t *env, unsigned path_len) {
+static void descent(ir_node *root, ir_node *block, ir_node **list, trace_env_t *env, unsigned path_len)
+{
int i;
if (! is_Phi(root)) {
/**
* Returns non-zero if root is a root in the block block.
*/
-static int is_root(ir_node *root, ir_node *block) {
+static int is_root(ir_node *root, ir_node *block)
+{
const ir_edge_t *edge;
foreach_out_edge(root, edge) {
/**
* Performs initial block calculations for trace scheduling.
*/
-static void trace_preprocess_block(trace_env_t *env, ir_node *block) {
+static void trace_preprocess_block(trace_env_t *env, ir_node *block)
+{
ir_node *root = NULL, *preord = NULL;
ir_node *curr, *irn;
int cur_pos;
foreach_out_edge(block, edge) {
ir_node *succ = get_edge_src_irn(edge);
- if (is_Block(succ)) {
- /* A Block-Block edge. This should be the MacroBlock
- * edge, ignore it. */
- assert(get_Block_MacroBlock(succ) == block && "Block-Block edge found");
- continue;
- }
if (is_Anchor(succ)) {
/* ignore a keep alive edge */
continue;
/**
* This functions gets called after a node finally has been made ready.
*/
-static void trace_node_ready(void *data, ir_node *irn, ir_node *pred) {
+static void trace_node_ready(void *data, ir_node *irn, ir_node *pred)
+{
trace_env_t *env = data;
sched_timestep_t etime_p, etime;
/**
* Update the current time after irn has been selected.
*/
-static void trace_update_time(void *data, ir_node *irn) {
+static void trace_update_time(void *data, ir_node *irn)
+{
trace_env_t *env = data;
- if (is_Phi(irn) || get_irn_opcode(irn) == iro_Start) {
+ if (is_Phi(irn) || get_irn_opcode(irn) == beo_Start) {
env->curr_time += get_irn_etime(env, irn);
}
else {
/**
* Allocates memory and initializes trace scheduling environment.
- * @param birg The backend irg object
+ * @param irg The backend irg object
* @return The environment
*/
-static trace_env_t *trace_init(const be_irg_t *birg) {
+static trace_env_t *trace_init(ir_graph *irg)
+{
trace_env_t *env = XMALLOCZ(trace_env_t);
- ir_graph *irg = be_get_birg_irg(birg);
int nn = get_irg_last_idx(irg);
env->curr_time = 0;
* Frees all memory allocated for trace scheduling environment.
* @param env The environment
*/
-static void trace_free(void *data) {
+static void trace_free(void *data)
+{
trace_env_t *env = data;
be_liveness_free(env->liveness);
DEL_ARR_F(env->sched_info);
return irn;
}
-static void *muchnik_init_graph(const list_sched_selector_t *vtab, const be_irg_t *birg)
+static void *muchnik_init_graph(const list_sched_selector_t *vtab, ir_graph *irg)
{
- trace_env_t *env = trace_init(birg);
+ trace_env_t *env = trace_init(irg);
env->selector = vtab;
- env->selector_env = (void*) be_get_birg_arch_env(birg);
+ env->selector_env = (void*) be_get_irg_arch_env(irg);
return (void *)env;
}
muchnik_init_graph,
muchnik_init_block,
muchnik_select,
- NULL, /* to_appear_in_schedule */
trace_node_ready, /* node_ready */
trace_update_time, /* node_selected */
NULL, /* exectime */
muchnik_init_graph,
muchnik_init_block,
heuristic_select,
- NULL, /* to_appear_in_schedule */
trace_node_ready, /* node_ready */
trace_update_time, /* node_selected */
NULL, /* exectime */