DBG((dbg, LEVEL_1, "\tinsn: %+F, pressure: %d\n", irn, pressure));
DBG((dbg, LEVEL_2, "\tlive: %B\n", live));
+#ifndef SCHEDULE_PROJS
+ if (get_irn_mode(irn) == mode_T) {
+ const ir_edge_t *edge;
+
+ foreach_out_edge(irn, edge) {
+ ir_node *proj = get_edge_src_irn(edge);
+
+ /*
+ * If the node defines some value, which can put into a
+ * register of the current class, make a border for it.
+ */
+ if(has_reg_class(env, proj)) {
+ int nr = get_irn_idx(proj);
+
+ bitset_clear(live, nr);
+ border_def(proj, step, 1);
+ }
+ }
+ }
+#endif
/*
* If the node defines some value, which can put into a
* register of the current class, make a border for it.
ir_node *proj = new_r_Proj(irg, bl, perm, mode, i);
arch_set_irn_register(arch_env, proj, reg);
+#ifdef SCHEDULE_PROJS
sched_add_after(curr, proj);
+#endif
curr = proj;
be_ssa_construction_init(&senv, birg);
/**
* Mark a node as already scheduled
*/
-static INLINE void mark_already_scheduled(block_sched_env_t *env, ir_node *n)
+static INLINE void set_already_scheduled(block_sched_env_t *env, ir_node *n)
{
int idx = get_irn_idx(n);
return 1;
}
+/**
+ * Returns non-zero if a node must be placed in the schedule.
+ */
+static INLINE int must_appear_in_schedule(const list_sched_selector_t *sel, void *block_env, const ir_node *irn)
+{
+ int res = -1;
+
+ /* if there are no uses, don't schedule */
+ if (get_irn_n_edges(irn) < 1)
+ return 0;
+
+ /* else ask the scheduler */
+ if (sel->to_appear_in_schedule)
+ res = sel->to_appear_in_schedule(block_env, irn);
+
+ return res >= 0 ? res : ((to_appear_in_schedule(irn) || BE_SCHED_NODE(irn)) && ! is_Unknown(irn));
+}
+
+/* forward */
+static void make_users_ready(block_sched_env_t *env, ir_node *irn);
+
+static void make_user_ready(block_sched_env_t *env, ir_node *pred, ir_node *user) {
+ if (! is_Phi(user)) {
+ if (! must_appear_in_schedule(env->selector, env, user)) {
+ /* notify the selector about the finally selected node. */
+ if (env->selector->node_selected)
+ env->selector->node_selected(env->selector_block_env, user);
+
+ /* Insert the node in the set of all available scheduled nodes. */
+ set_already_scheduled(env, user);
+
+ make_users_ready(env, user);
+ } else {
+ if (! ir_nodeset_contains(&env->cands, user)) {
+ /* work-around: this should NEVER be true, else we have a cycle in the basic block.
+ for now it's needed to compile bzip2.c */
+ if (sched_is_scheduled(user)) {
+ //assert(!"make an already scheduled user ready");
+ }
+ else {
+ make_ready(env, pred, user);
+ }
+ }
+ }
+ }
+}
+
+
/**
* Try, to make all users of a node ready.
* In fact, a usage node can only be made ready, if all its operands
* @param env The block schedule environment.
* @param irn The node, which usages (successors) are to be made ready.
*/
-static INLINE void make_users_ready(block_sched_env_t *env, ir_node *irn)
+static void make_users_ready(block_sched_env_t *env, ir_node *irn)
{
const ir_edge_t *edge;
+ /* make all data users ready */
foreach_out_edge(irn, edge) {
ir_node *user = get_edge_src_irn(edge);
- if (! is_Phi(user))
- make_ready(env, irn, user);
+
+ if (get_block(user) == env->block)
+ make_user_ready(env, irn, user);
}
+ /* and the dependent nodes as well */
foreach_out_edge_kind(irn, edge, EDGE_KIND_DEP) {
ir_node *user = get_edge_src_irn(edge);
- if (! is_Phi(user))
- make_ready(env, irn, user);
+
+ if (get_block(user) == env->block)
+ make_user_ready(env, irn, user);
}
}
}
}
-static INLINE int must_appear_in_schedule(const list_sched_selector_t *sel, void *block_env, const ir_node *irn)
-{
- int res = -1;
-
- if (get_irn_n_edges(irn) < 1)
- return 0;
-
- if (sel->to_appear_in_schedule)
- res = sel->to_appear_in_schedule(block_env, irn);
-
- return res >= 0 ? res : ((to_appear_in_schedule(irn) || BE_SCHED_NODE(irn)) && ! is_Unknown(irn));
-}
-
/**
* Append an instruction to a schedule.
* @param env The block scheduling environment.
if (env->selector->node_selected)
env->selector->node_selected(env->selector_block_env, irn);
- /* Insert the node in the set of all already scheduled nodes. */
- mark_already_scheduled(env, irn);
+ /* Insert the node in the set of all available scheduled nodes. */
+ set_already_scheduled(env, irn);
/* Remove the node from the ready set */
ir_nodeset_remove(&env->cands, irn);
return irn;
}
+#ifdef SCHEDULE_PROJS
/**
* Add the proj nodes of a tuple-mode irn to the schedule immediately
* after the tuple-moded irn. By pinning the projs after the irn, no
}
}
}
+#endif
/**
* Perform list scheduling on a block.
else if (irn == start_node) {
/* The start block will be scheduled as the first node */
add_to_sched(&be, irn);
+#ifdef SCHEDULE_PROJS
add_tuple_projs(&be, irn);
+#else
+ make_users_ready(&be, irn);
+#endif
}
else {
/* Other nodes must have all operands in other blocks to be made
/* collect statistics about amount of ready nodes */
be_do_stat_sched_ready(block, &be.cands);
- /* Keeps must be scheduled immediatly */
+ /* Keeps must be scheduled immediately */
foreach_ir_nodeset(&be.cands, irn, iter) {
if (be_is_Keep(irn) || be_is_CopyKeep(irn) || is_Sync(irn)) {
break;
/* Add the node to the schedule. */
add_to_sched(&be, irn);
+#ifdef SCHEDULE_PROJS
if (get_irn_mode(irn) == mode_T)
add_tuple_projs(&be, irn);
else
+#endif
+ {
make_users_ready(&be, irn);
+ }
/* remove the scheduled node from the ready list. */
ir_nodeset_remove(&be.cands, irn);
/* You should better break out of your loop when hitting the first phi function. */
assert(!is_Phi(irn) && "liveness_transfer produces invalid results for phi nodes");
- if(arch_irn_consider_in_reg_alloc(arch_env, cls, irn)) {
+#ifndef SCHEDULE_PROJS
+ /* kill all Proj's if a node is killed */
+ if (get_irn_mode(irn) == mode_T) {
+ const ir_edge_t *edge;
+
+ foreach_out_edge(irn, edge) {
+ ir_node *proj = get_edge_src_irn(edge);
+
+ if (arch_irn_consider_in_reg_alloc(arch_env, cls, proj)) {
+ ir_node *del = pset_remove_ptr(live, proj);
+ (void) del;
+ assert(proj == del);
+ }
+ }
+ }
+#endif
+
+ if (arch_irn_consider_in_reg_alloc(arch_env, cls, irn)) {
ir_node *del = pset_remove_ptr(live, irn);
(void) del;
assert(irn == del);
}
- for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
+ for (i = 0, n = get_irn_arity(irn); i < n; ++i) {
ir_node *op = get_irn_n(irn, i);
- if(arch_irn_consider_in_reg_alloc(arch_env, cls, op))
+ if (arch_irn_consider_in_reg_alloc(arch_env, cls, op))
pset_insert_ptr(live, op);
}
* function. */
assert(!is_Phi(node) && "liveness_transfer produces invalid results for phi nodes");
- if(arch_irn_consider_in_reg_alloc(arch_env, cls, node)) {
+#ifndef SCHEDULE_PROJS
+ /* kill all Proj's if a node is killed */
+ if (get_irn_mode(node) == mode_T) {
+ const ir_edge_t *edge;
+
+ foreach_out_edge(node, edge) {
+ ir_node *proj = get_edge_src_irn(edge);
+
+ if (arch_irn_consider_in_reg_alloc(arch_env, cls, proj)) {
+ ir_nodeset_remove(nodeset, proj);
+ }
+ }
+ }
+#endif
+
+ if (arch_irn_consider_in_reg_alloc(arch_env, cls, node)) {
ir_nodeset_remove(nodeset, node);
}
arity = get_irn_arity(node);
- for(i = 0; i < arity; ++i) {
+ for (i = 0; i < arity; ++i) {
ir_node *op = get_irn_n(node, i);
- if(arch_irn_consider_in_reg_alloc(arch_env, cls, op))
+ if (arch_irn_consider_in_reg_alloc(arch_env, cls, op))
ir_nodeset_insert(nodeset, op);
}
}
set_Proj_proj(pairs[i].out_node, get_Proj_proj(pairs[i].in_node));
}
+#ifdef SCHEDULE_PROJS
/* remove the proj from the schedule */
sched_remove(pairs[i].out_node);
+#endif
/* reroute the edges from the proj to the argument */
exchange(pairs[i].out_node, pairs[i].in_node);
pairs[pidx].in_node = res1;
}
else {
+#ifdef SCHEDULE_PROJS
sched_remove(res1);
+#endif
}
+#ifdef SCHEDULE_PROJS
sched_remove(res2);
+#endif
set_Proj_pred(res2, cpyxchg);
set_Proj_proj(res2, 0);
set_Proj_pred(res1, cpyxchg);
set_Proj_proj(res1, 1);
+#ifdef SCHEDULE_PROJS
sched_add_after(sched_point, res1);
sched_add_after(sched_point, res2);
-
+#endif
arch_set_irn_register(arch_env, res2, cycle->elems[i + 1]);
arch_set_irn_register(arch_env, res1, cycle->elems[i]);
arch_set_irn_register(arch_env, cpyxchg, cycle->elems[i + 1]);
n_ops++;
+#ifdef SCHEDULE_PROJS
/* remove the proj from the schedule */
sched_remove(res2);
-
+#endif
/* exchange copy node and proj */
exchange(res2, cpyxchg);
/* reroute all users of the proj to the moved node. */
edges_reroute(proj, move, irg);
+#ifdef SCHEDULE_PROJS
/* remove the proj from the schedule. */
sched_remove(proj);
+#endif
/* and like it to bad so it is no more in the use array of the perm */
set_Proj_pred(proj, get_irg_bad(irg));
FIRM_IMPL1(sched_is_scheduled, int, const ir_node *)
FIRM_IMPL1(sched_first, ir_node *, const ir_node *)
FIRM_IMPL1(sched_last, ir_node *, const ir_node *)
-FIRM_IMPL2(sched_add_after, ir_node *, ir_node *, ir_node *)
-FIRM_IMPL2(sched_add_before, ir_node *, ir_node *, ir_node *)
+FIRM_IMPL2_VOID(sched_add_after, ir_node *, ir_node *)
+FIRM_IMPL2_VOID(sched_add_before, ir_node *, ir_node *)
FIRM_IMPL1_VOID(sched_init_block, ir_node *)
FIRM_IMPL1_VOID(sched_reset, ir_node *)
FIRM_IMPL2(sched_comes_after, int, const ir_node *, const ir_node *)
ir_node *sched_prev(const ir_node *irn);
ir_node *sched_first(const ir_node *block);
ir_node *sched_last(const ir_node *block);
-ir_node *sched_add_before(ir_node *before, ir_node *irn);
-ir_node *sched_add_after(ir_node *before, ir_node *irn);
+void sched_add_before(ir_node *before, ir_node *irn);
+void sched_add_after(ir_node *before, ir_node *irn);
void sched_init_block(ir_node *block);
void sched_reset(ir_node *node);
void sched_remove(ir_node *irn);
#define _sched_entry(list_head) (list_entry(list_head, sched_info_t, list))
+#ifndef SCHEDULE_PROJS
+#define get_irn_sched_info(irn) get_irn_data(skip_Proj_const(irn), sched_info_t, sched_irn_data_offset)
+#else
#define get_irn_sched_info(irn) get_irn_data(irn, sched_info_t, sched_irn_data_offset)
+#endif
+
#define get_sched_info_irn(sched_info) get_irn_data_base(sched_info, sched_irn_data_offset)
/**
case iro_Jmp:
case iro_Break:
return 1;
+#ifndef SCHEDULE_PROJS
+ case iro_Proj:
+ return 0;
+#endif
default:
return is_data_node(irn);
}
* @param irn The node to add.
* @return The given node.
*/
-static INLINE ir_node *_sched_add_before(ir_node *before, ir_node *irn)
+static INLINE void _sched_add_before(ir_node *before, ir_node *irn)
{
sched_info_t *info = get_irn_sched_info(irn);
- assert(_sched_is_scheduled(before) && !_sched_is_scheduled(irn));
+ assert(_sched_is_scheduled(before));
+ assert(!_sched_is_scheduled(irn));
+#ifndef SCHEDULE_PROJS
+ assert(!is_Proj(irn));
+#endif
list_add_tail(&info->list, &get_irn_sched_info(before)->list);
_sched_set_time_stamp(irn);
info->scheduled = 1;
- return irn;
}
/**
* @param irn The node to add.
* @return The given node.
*/
-static INLINE ir_node *_sched_add_after(ir_node *after, ir_node *irn)
+static INLINE void _sched_add_after(ir_node *after, ir_node *irn)
{
sched_info_t *info = get_irn_sched_info(irn);
- assert(_sched_is_scheduled(after) && !_sched_is_scheduled(irn));
+ assert(_sched_is_scheduled(after));
+ assert(!_sched_is_scheduled(irn));
+#ifndef SCHEDULE_PROJS
+ assert(!is_Proj(irn));
+#endif
list_add(&info->list, &get_irn_sched_info(after)->list);
_sched_set_time_stamp(irn);
info->scheduled = 1;
- return irn;
}
static INLINE void _sched_init_block(ir_node *block)
*/
static INLINE void _sched_remove(ir_node *irn)
{
- sched_info_t *info = get_irn_sched_info(irn);
- list_del(&info->list);
+ sched_info_t *info;
+#ifndef SCHEDULE_PROJ
+ assert(!is_Proj(irn));
+#endif
+ info = get_irn_sched_info(irn);
+ list_del(&info->list);
INIT_LIST_HEAD(&info->list);
- info->scheduled = 0;
+ info->scheduled = 0;
}
/**
*/
static INLINE int _sched_cmp(const ir_node *a, const ir_node *b)
{
- assert(_sched_is_scheduled(a) && _sched_is_scheduled(b));
- assert(get_nodes_block(a) == get_nodes_block(b));
+ assert(_sched_is_scheduled(a) && _sched_is_scheduled(b));
+ assert(get_nodes_block(a) == get_nodes_block(b));
- return get_irn_sched_info(a)->time_step - get_irn_sched_info(b)->time_step;
+ return get_irn_sched_info(a)->time_step - get_irn_sched_info(b)->time_step;
}
/**
get_irn_arity(spilled), ins);
copy_node_attr(spilled, res);
new_backedge_info(res);
- sched_reset(res);
DBG((dbg, LEVEL_1, "Insert remat %+F of %+F before reloader %+F\n", res, spilled, reloader));
+#ifdef SCHEDULE_PROJS
/* insert in schedule */
+ sched_reset(res);
sched_add_before(reloader, res);
#ifdef FIRM_STATISTICS
- env->remat_count++;
+ if (! is_Proj(res))
+ env->remat_count++;
#endif
+#else
+ if (! is_Proj(res)) {
+ /* insert in schedule */
+ sched_reset(res);
+ sched_add_before(reloader, res);
+#ifdef FIRM_STATISTICS
+ env->remat_count++;
+#endif
+ }
+#endif /* SCHEDULE_PROJS */
return res;
}
/* allocate all values _defined_ by this instruction */
workset_clear(new_vals);
if (get_irn_mode(irn) == mode_T) { /* special handling for tuples and projs */
- ir_node *proj;
- for(proj=sched_next(irn); is_Proj(proj); proj=sched_next(proj))
+ const ir_edge_t *edge;
+
+ foreach_out_edge(irn, edge) {
+ ir_node *proj = get_edge_src_irn(edge);
workset_insert(env, new_vals, proj);
+ }
} else {
workset_insert(env, new_vals, irn);
}
pp->proj = proj;
assert(get_reg(pp->arg));
set_reg(proj, get_reg(pp->arg));
+#ifdef SCHEDULE_PROJS
sched_add_after(insert_after, proj);
+#endif
insert_after = proj;
DBG((dbg, LEVEL_2, "Copy register assignment %s from %+F to %+F\n", get_reg(pp->arg)->name, pp->arg, pp->proj));
}
/* record state changes by the node */
if (get_irn_mode(node) == mode_T) {
- ir_node *proj;
- for(proj = sched_next(node); is_Proj(proj);
- proj = sched_next(proj)) {
+ const ir_edge_t *edge;
+
+ foreach_out_edge(node, edge) {
+ ir_node *proj = get_edge_src_irn(edge);
const arch_register_t *reg =
arch_get_irn_register(env->arch_env, proj);
if(reg == env->reg) {
{
ir_node *last = sched_last(block);
- /* skip projs and keepanies behind the jump... */
+ /* skip Projs and Keep-alikes behind the jump... */
while(is_Proj(last) || be_is_Keep(last)) {
last = sched_prev(last);
}
if(!is_cfop(last)) {
last = sched_next(last);
- // last node must be a cfop, only exception is the start block
+ /* last node must be a cfop, only exception is the start block */
assert(last == get_irg_start_block(get_irn_irg(block)));
}
*/
static int sched_edge_hook(FILE *F, ir_node *irn)
{
+#ifndef SCHEDULE_PROJS
+ if (is_Proj(irn))
+ return 1;
+#endif
if(sched_is_scheduled(irn) && sched_has_prev(irn)) {
ir_node *prev = sched_prev(irn);
fprintf(F, "edge:{sourcename:\"");
-//---------------------------------------------------------------------------
+/*--------------------------------------------------------------------------- */
ir_node *node;
int non_phi_found = 0;
int cfchange_found = 0;
- // TODO ask arch about delay branches
+ /* TODO ask arch about delay branches */
int delay_branches = 0;
int last_timestep = INT_MIN;
int i, arity;
int timestep;
- // this node is scheduled
+ /* this node is scheduled */
if(bitset_is_set(env->scheduled, get_irn_idx(node))) {
ir_fprintf(stderr, "Verify warning: %+F appears to be schedule twice\n");
env->problem_found = 1;
}
bitset_set(env->scheduled, get_irn_idx(node));
- // Check that scheduled nodes are in the correct block
+ /* Check that scheduled nodes are in the correct block */
if(get_nodes_block(node) != block) {
ir_fprintf(stderr, "Verify warning: %+F is in block %+F but scheduled in %+F\n", node, get_nodes_block(node), block);
env->problem_found = 1;
}
- // Check that timesteps are increasing
+ /* Check that timesteps are increasing */
timestep = sched_get_time_step(node);
if(timestep <= last_timestep) {
ir_fprintf(stderr, "Verify warning: Schedule timestep did not increase at node %+F\n",
}
last_timestep = timestep;
- // Check that phis come before any other node
+ /* Check that phis come before any other node */
if (is_Phi(node)) {
if (non_phi_found) {
ir_fprintf(stderr, "Verify Warning: Phi node %+F scheduled after non-Phi nodes in block %+F (%s)\n",
non_phi_found = 1;
}
- // Check for control flow changing nodes
+ /* Check for control flow changing nodes */
if (is_cfop(node) && get_irn_opcode(node) != iro_Start) {
/* check, that only one CF operation is scheduled */
if (cfchange_found == 1) {
}
cfchange_found = 1;
} else if (cfchange_found) {
- // proj and keepany aren't real instructions...
+ /* proj and keepany aren't real instructions... */
if(!is_Proj(node) && !be_is_Keep(node)) {
/* check for delay branches */
if (delay_branches == 0) {
}
}
- // Check that all uses come before their definitions
+ /* Check that all uses come before their definitions */
if(!is_Phi(node)) {
int nodetime = sched_get_time_step(node);
for(i = 0, arity = get_irn_arity(node); i < arity; ++i) {
}
}
- // Check that no dead nodes are scheduled
+ /* Check that no dead nodes are scheduled */
if(get_irn_n_edges(node) == 0) {
ir_fprintf(stderr, "Verify warning: Node %+F is dead but scheduled in block %+F (%s)\n",
node, block, get_irg_dump_name(env->irg));
env->problem_found = 1;
}
- // check that all projs/keeps are behind their nodes
+#ifdef SCHEDULE_PROJS
+ /* check that all projs/keeps are behind their nodes */
if(is_Proj(node)) {
ir_node *prev = sched_prev(node);
while(is_Proj(prev))
env->problem_found = 1;
}
}
+#endif
if(be_is_Keep(node)) {
int arity = get_irn_arity(node);
int problem = 0;
if(is_Phi(node) || is_Sync(node) || is_Pin(node))
return 0;
}
+#ifdef SCHEDULE_PROJS
if(is_Proj(node)) {
if(get_irn_mode(node) == mode_X)
return 0;
return should_be_scheduled(env, get_Proj_pred(node));
}
+#else
+ if(is_Proj(node))
+ return 0;
+#endif
if(be_is_Keep(node) && get_irn_opcode(get_nodes_block(node)) == iro_Bad)
return 0;
env.arch_env = birg->main_env->arch_env;
irg_block_walk_graph(env.irg, verify_schedule_walker, NULL, &env);
- // check if all nodes are scheduled
+ /* check if all nodes are scheduled */
irg_walk_graph(env.irg, check_schedule, NULL, &env);
return ! env.problem_found;
-//---------------------------------------------------------------------------
+/*--------------------------------------------------------------------------- */
spill.ent = ent;
res = set_insert(env->spills, &spill, sizeof(spill), hash);
- // is 1 of the arguments a spill?
+ /* is 1 of the arguments a spill? */
for(i = 0, arity = get_irn_arity(node); i < arity; ++i) {
ir_node* arg = get_irn_n(node, i);
collect(env, arg, reload, ent);
} else if(is_Phi(node) && get_irn_mode(node) == mode_M) {
collect_memphi(env, node, reload, ent);
} else {
- // Disabled for now, spills might get transformed by the backend
+ /* Disabled for now, spills might get transformed by the backend */
#if 0
ir_fprintf(stderr, "Verify warning: No spill, memperm or memphi attached to node %+F found from node %+F in block %+F(%s)\n",
node, reload, get_nodes_block(node), get_irg_dump_name(env->irg));
be_verify_spillslots_env_t *env = data;
const arch_env_t *arch_env = env->arch_env;
- // @@@ ia32_classify returns classification of Proj_pred :-/
+ /* @@@ ia32_classify returns classification of Proj_pred :-/ */
if(is_Proj(node))
return;
-//---------------------------------------------------------------------------
+/*--------------------------------------------------------------------------- */
if(get_irn_opcode(user) == iro_End)
continue;
- // in case of phi arguments we compare with the block the value comes from
+ /* in case of phi arguments we compare with the block the value comes from */
if(is_Phi(user)) {
ir_node *phiblock = get_nodes_block(user);
if(phiblock == bb)
-//---------------------------------------------------------------------------
+/*--------------------------------------------------------------------------- */
-//---------------------------------------------------------------------------
+/*--------------------------------------------------------------------------- */
if (sched_point) {
sched_add_after(sched_point, new_op);
+#ifdef SCHEDULE_PROJS
sched_add_after(new_op, proj);
-
+#endif
sched_remove(node);
}
sp = new_rd_Proj(dbg, irg, block, pred, spmode, pos);
arch_set_irn_register(cg->arch_env, sp, spreg);
+#ifdef SCHEDULE_PROJS
sched_add_before(schedpoint, sp);
+#endif
return sp;
}