return 0;
}
- if(arch_register_req_is(req, limited))
- return req->limited(irn, pos, bs);
+ if(arch_register_req_is(req, limited)) {
+ req->limited(irn, pos, bs);
+ return bitset_popcnt(bs);
+ }
arch_register_class_put(req->cls, bs);
return req->cls->n_regs;
#define arch_register_for_index(cls, idx) \
_arch_register_for_index(cls, idx)
-/**
- * Get the register set for a register class.
- * @param cls The register class.
- * @return The set containing all registers in the class.
- */
-#define arch_get_register_set_for_class(cls) ((cls)->set)
-
typedef enum _arch_operand_type_t {
arch_operand_type_invalid,
arch_operand_type_memory,
arch_register_req_type_t type; /**< The type of the constraint. */
const arch_register_class_t *cls; /**< The register class this constraint belongs to. */
- int (*limited)(const ir_node *irn, int pos, bitset_t *bs);
+ void (*limited)(const ir_node *irn, int pos, bitset_t *bs);
/**< In case of the 'limited'
constraint, this function
must put all allowable
#define NO_COLOR (-1)
-#undef DUMP_INTERVALS
+#define DUMP_INTERVALS
typedef struct _be_chordal_alloc_env_t {
be_chordal_env_t *chordal_env;
char buf[128];
plotter_t *plotter;
- ir_snprintf(buf, sizeof(buf), "ifg_%s_%F.eps", cls->name, irg);
+ ir_snprintf(buf, sizeof(buf), "ifg_%s_%F.eps", chordal_env->cls->name, irg);
plotter = new_plotter_ps(buf);
- draw_interval_tree(&draw_chordal_def_opts, chordal_env, plotter, env->arch_env, cls);
+ draw_interval_tree(&draw_chordal_def_opts, chordal_env, plotter);
plotter_free(plotter);
}
#endif
struct block_dims *dom_dims = pmap_get(env->block_dims, dom);
for(irn = pset_first(live_in); irn; irn = pset_next(live_in)) {
- if(arch_irn_has_reg_class(env->arch_env, irn, 0, env->cls)) {
+ if(arch_irn_has_reg_class(env->arch_env, irn, -1, env->cls)) {
const arch_register_t *reg = arch_get_irn_register(env->arch_env, irn);
int col = arch_register_get_index(reg);
int x = (col + 1) * opts->h_inter_gap;
chordal_env.cls = arch_isa_get_reg_class(isa, j);
be_liveness(irg);
+ dump(BE_CH_DUMP_LIVE, irg, chordal_env.cls, "-live", dump_ir_block_graph_sched);
/* spilling */
switch(options.spill_method) {
/* Dump flags */
BE_CH_DUMP_NONE = (1 << 0),
BE_CH_DUMP_SPILL = (1 << 1),
- BE_CH_DUMP_COLOR = (1 << 2),
- BE_CH_DUMP_COPYMIN = (1 << 3),
- BE_CH_DUMP_SSADESTR = (1 << 4),
- BE_CH_DUMP_TREE_INTV = (1 << 5),
- BE_CH_DUMP_CONSTR = (1 << 6),
- BE_CH_DUMP_LOWER = (1 << 7),
+ BE_CH_DUMP_LIVE = (1 << 2),
+ BE_CH_DUMP_COLOR = (1 << 3),
+ BE_CH_DUMP_COPYMIN = (1 << 4),
+ BE_CH_DUMP_SSADESTR = (1 << 5),
+ BE_CH_DUMP_TREE_INTV = (1 << 6),
+ BE_CH_DUMP_CONSTR = (1 << 7),
+ BE_CH_DUMP_LOWER = (1 << 8),
BE_CH_DUMP_ALL = 2 * BE_CH_DUMP_LOWER - 1,
/* copymin method */
#include <string.h>
#include <limits.h>
+#include "benode_t.h"
+
#include "obst.h"
#include "list.h"
#include "iterator.h"
return res;
}
-static int default_to_appear_in_schedule(void *env, const ir_node *irn)
+static INLINE int must_appear_in_schedule(const list_sched_selector_t *sel, void *block_env, const ir_node *irn)
{
- return to_appear_in_schedule(irn);
+ int res = 0;
+
+ if(sel->to_appear_in_schedule)
+ res = sel->to_appear_in_schedule(block_env, irn);
+
+ return res || to_appear_in_schedule(irn) || be_is_Keep(irn);
}
static const list_sched_selector_t trivial_selector_struct = {
NULL,
NULL,
trivial_select,
- default_to_appear_in_schedule,
+ NULL,
NULL,
NULL
};
* Collect usage statistics.
*/
sched_foreach(bl, irn) {
- if(env->vtab->to_appear_in_schedule(env, irn)) {
+ if(must_appear_in_schedule(env->vtab, env, irn)) {
int i, n;
for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
ir_node *op = get_irn_n(irn, i);
- if(env->vtab->to_appear_in_schedule(env, irn)) {
+ if(must_appear_in_schedule(env->vtab, env, irn)) {
usage_stats_t *us = get_or_set_usage_stats(env, irn);
if(is_live_end(bl, op))
us->uses_in_block = 99999;
for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
ir_node *op = get_irn_n(irn, i);
- if(env->vtab->to_appear_in_schedule(env, op))
+ if(must_appear_in_schedule(env->vtab, env, op))
sum += compute_max_hops(env, op);
}
reg_pressure_graph_init,
reg_pressure_block_init,
reg_pressure_select,
- default_to_appear_in_schedule,
+ NULL,
reg_pressure_block_free,
NULL
};
/**
* Append an instruction to a schedule.
- * @param env The block scheduleing environment.
+ * @param env The block scheduling environment.
* @param irn The node to add to the schedule.
- * @return The given node.
+ * @return The given node.
*/
static ir_node *add_to_sched(block_sched_env_t *env, ir_node *irn)
{
/* If the node consumes/produces data, it is appended to the schedule
* list, otherwise, it is not put into the list */
- if(to_appear_in_schedule(irn)) {
+ if(must_appear_in_schedule(env->selector, env->selector_block_env, irn)) {
sched_info_t *info = get_irn_sched_info(irn);
INIT_LIST_HEAD(&info->list);
info->scheduled = 1;
return irn;
}
-
/**
* Add the proj nodes of a tuple-mode irn to the schedule immediately
* after the tuple-moded irn. By pinning the projs after the irn, no
}
}
+static ir_node *select_node(block_sched_env_t *be)
+{
+ return be->selector->select(be->selector_block_env, be->ready_set);
+}
+
/**
* Perform list scheduling on a block.
*
be.already_scheduled = new_pset(node_cmp_func, get_irn_n_edges(block));
be.selector = selector;
- firm_dbg_set_mask(be.dbg, 0);
-
if(selector->init_block)
be.selector_block_env = selector->init_block(env->selector_env, block);
while(pset_count(be.ready_set) > 0) {
/* select a node to be scheduled and check if it was ready */
- irn = selector->select(be.selector_block_env, be.ready_set);
+ irn = select_node(&be);
DBG((be.dbg, LEVEL_3, "\tpicked node %+F\n", irn));
};
-/**
- * A default implementation of to_appear_in_schedule,
- * as required in list_sched_selector_t.
- */
-extern int be_default_to_appear_in_schedule(void *env, const ir_node *irn);
/**
* A trivial selector, that just selects the first ready node.
#include "belower.h"
#include "benode_t.h"
#include "bechordal_t.h"
-#include "besched.h"
+#include "besched_t.h"
#include "irgmod.h"
#include "iredges_t.h"
arch_isa_t *isa = arch_env_get_isa(arch_env);
const ir_node *proj_T = NULL;
ir_node **in_keep, *block = get_nodes_block(call);
+ ir_node *last_proj = NULL;
bitset_t *proj_set;
const ir_edge_t *edge;
const arch_register_t *reg;
/* set all used arguments */
if (proj_T) {
+
+
foreach_out_edge(proj_T, edge) {
+ ir_node *proj = get_edge_src_irn(edge);
+
+ assert(is_Proj(proj));
bitset_set(proj_set, get_Proj_proj(get_edge_src_irn(edge)));
+
+ /*
+ * Filter out the last proj in the schedule.
+ * After that one, we have to insert the Keep node.
+ */
+ if(!last_proj || sched_comes_after(last_proj, proj))
+ last_proj = proj;
+
}
}
else {
if (arch_register_type_is(reg, caller_saved)) {
pn = isa->impl->get_projnum_for_register(isa, reg);
if (!bitset_is_set(proj_set, pn)) {
- in_keep[keep_arity++] = new_r_Proj(current_ir_graph, block, (ir_node *)proj_T, mode_Is, pn);
+ ir_node *proj = new_r_Proj(current_ir_graph, block, (ir_node *)proj_T, mode_Is, pn);
+
+ in_keep[keep_arity++] = proj;
+ sched_add_after(last_proj, proj);
+ last_proj = proj;
}
}
}
/* ok, we found some caller save register which are not in use but must be saved */
if (keep_arity) {
- be_new_Keep(reg_class, current_ir_graph, block, keep_arity, in_keep);
+ ir_node *keep;
+
+ keep = be_new_Keep(reg_class, current_ir_graph, block, keep_arity, in_keep);
+ sched_add_after(last_proj, keep);
}
}
lower_env_t *env = walk_env;
const arch_env_t *arch_env = env->chord_env->main_env->arch_env;
- if (!is_Block(irn)) {
- if (!is_Proj(irn)) {
- if (is_Perm(arch_env, irn)) {
- lower_perm_node(irn, walk_env);
- }
- else if (is_Call(arch_env, irn)) {
- lower_call_node(irn, walk_env);
- }
- else if (be_is_Spill(irn) || be_is_Reload(irn)) {
- lower_spill_reload(irn, walk_env);
- }
+ if (!is_Block(irn) && !is_Proj(irn)) {
+ if (is_Perm(arch_env, irn)) {
+ lower_perm_node(irn, walk_env);
+ }
+ else if (is_Call(arch_env, irn)) {
+ lower_call_node(irn, walk_env);
+ }
+ else if (be_is_Spill(irn) || be_is_Reload(irn)) {
+ lower_spill_reload(irn, walk_env);
}
}
return res;
}
+int (sched_comes_after)(const ir_node *n1, const ir_node *n2)
+{
+ return _sched_comes_after(n1, n2);
+}
+
int sched_skip_cf_predicator(const ir_node *irn, void *data) {
arch_env_t *ae = data;
return arch_irn_classify(ae, irn) == arch_irn_class_branch;
*/
extern int sched_verify_irg(ir_graph *irg);
+/**
+ * Checks, if one node is scheduled before another.
+ * @param n1 A node.
+ * @param n2 Another node.
+ * @return 1, if n1 is in front of n2 in the schedule, 0 else.
+ * @note Both nodes must be in the same block.
+ */
+static INLINE int _sched_comes_after(const ir_node *n1, const ir_node *n2)
+{
+ assert(_sched_is_scheduled(n1));
+ assert(_sched_is_scheduled(n2));
+ assert(get_nodes_block(n1) == get_nodes_block(n2));
+ return _sched_get_time_step(n1) < _sched_get_time_step(n2);
+}
+
/**
* A predicate for a node.
* @param irn The node.
sched_predicator_t *predicator, void *data);
#define sched_get_time_step(irn) _sched_get_time_step(irn)
-#define sched_has_succ(irn) _sched_has_succ(irn)
-#define sched_has_prev(irn) _sched_has_prev(irn)
-#define sched_succ(irn) _sched_succ(irn)
-#define sched_prev(irn) _sched_prev(irn)
-#define sched_first(irn) _sched_first(irn)
-#define sched_last(irn) _sched_last(irn)
+#define sched_has_succ(irn) _sched_has_succ(irn)
+#define sched_has_prev(irn) _sched_has_prev(irn)
+#define sched_succ(irn) _sched_succ(irn)
+#define sched_prev(irn) _sched_prev(irn)
+#define sched_first(irn) _sched_first(irn)
+#define sched_last(irn) _sched_last(irn)
#define sched_add_before(before, irn) _sched_add_before(before, irn)
#define sched_add_after(after, irn) _sched_add_after(after, irn)
#define sched_remove(irn) _sched_remove(irn)
-#define sched_is_scheduled(irn) _sched_is_scheduled(irn)
-#define sched_cmp(a, b) _sched_cmp(a, b)
+#define sched_is_scheduled(irn) _sched_is_scheduled(irn)
+#define sched_comes_after(n1, n2) _sched_comes_after(n1, n1)
+#define sched_cmp(a, b) _sched_cmp(a, b)
#endif