return req->cls->n_regs;
}
+void arch_put_non_ignore_regs(const arch_env_t *env, const arch_register_class_t *cls, bitset_t *bs)
+{
+ int i;
+
+ for(i = 0; i < cls->n_regs; ++i) {
+ if(!arch_register_type_is(&cls->regs[i], ignore))
+ bitset_set(bs, i);
+ }
+}
+
int arch_is_register_operand(const arch_env_t *env,
const ir_node *irn, int pos)
{
#include <libcore/lc_opts.h>
#endif
+#include "type.h"
+
#include "irnode.h"
#include "irmode.h"
#include "ident.h"
#include "belistsched.h"
+#include "beabi_t.h"
typedef struct _arch_register_class_t arch_register_class_t;
typedef struct _arch_register_t arch_register_t;
typedef enum _arch_register_type_t {
arch_register_type_none = 0,
- arch_register_type_caller_saved, /**< The register must be saved by the caller
- upon a function call. It thus can be overwritten
- in the called function. */
- arch_register_type_callee_saved, /**< The register must be saved by the called function,
- it thus survives a function call. */
- arch_register_type_ignore /**< Do not consider this register when allocating. */
+ arch_register_type_caller_save = 1, /**< The register must be saved by the caller
+ upon a function call. It thus can be overwritten
+ in the called function. */
+ arch_register_type_callee_save = 2, /**< The register must be saved by the caller
+ upon a function call. It thus can be overwritten
+ in the called function. */
+ arch_register_type_ignore = 4, /**< Do not consider this register when allocating. */
+ arch_register_type_sp = 8, /**< This register is the stack pointer of the architecture. */
+ arch_register_type_bp = 16, /**< The register is the base pointer of the architecture. */
} arch_register_type_t;
/**
struct _arch_register_class_t {
const char *name; /**< The name of the register class. */
int n_regs; /**< Number of registers in this class. */
+ ir_mode *mode; /**< The mode of the register class. */
const arch_register_t *regs; /**< The array of registers. */
};
return the number of registers
in the bitset. */
- void *limited_env; /**< This is passed to limited. */
+ void *limited_env; /**< This must passed to limited. */
+
+ ir_node *other_same; /**< The other which shall have the same reg
+ as this one. (for case should_be_same). */
- ir_node *other; /**< In case of "should be equal"
- or should be different, this gives
- the node to whose register this
- one's should be the same/different. */
+ ir_node *other_different; /**< The other node from which this one's register
+ must be different (case must_be_different). */
} arch_register_req_t;
/**
*/
extern int arch_get_allocatable_regs(const arch_env_t *env, const ir_node *irn, int pos, bitset_t *bs);
+/**
+ * Put all registers which shall not be ignored by the register
+ * allocator in a bit set.
+ * @param env The arch env.
+ * @param cls The register class to consider.
+ * @param bs The bit set to put the registers to.
+ */
+extern void arch_put_non_ignore_regs(const arch_env_t *env, const arch_register_class_t *cls, bitset_t *bs);
+
/**
* Check, if a register is assignable to an operand of a node.
* @param env The architecture environment.
*/
extern arch_irn_flags_t arch_irn_get_flags(const arch_env_t *env, const ir_node *irn);
-#define arch_irn_is_ignore(env, irn) 0
- // ((arch_irn_get_flags(env, irn) & arch_irn_flags_ignore) != 0)
+#define arch_irn_is_ignore(env, irn) ((arch_irn_get_flags(env, irn) & arch_irn_flags_ignore) != 0)
#define arch_irn_has_reg_class(env, irn, pos, cls) \
((cls) == arch_get_irn_reg_class(env, irn, pos))
*/
struct _arch_isa_t {
const arch_isa_if_t *impl;
+ const arch_register_t *sp; /** The stack pointer register. */
+ const arch_register_t *bp; /** The base pointer register. */
+ const int stack_dir; /** -1 for decreasing, 1 for increasing. */
};
+#define arch_isa_stack_dir(isa) ((isa)->stack_dir)
+#define arch_isa_sp(isa) ((isa)->sp)
+#define arch_isa_bp(isa) ((isa)->bp)
+
/**
* Architecture interface.
*/
*/
const arch_register_class_t *(*get_reg_class)(const void *self, int i);
+ /**
+ * Get the register class which shall be used to store a value of a given mode.
+ * @param self The this pointer.
+ * @param mode The mode in question.
+ * @return A register class which can hold values of the given mode.
+ */
+ const arch_register_class_t *(*get_reg_class_for_mode)(const void *self, const ir_mode *mode);
+
+ /**
+ * Get the ABI restrictions for procedure calls.
+ * @param self The this pointer.
+ * @param method_type The type of the method (procedure) in question.
+ * @param p The array of parameter locations to be filled.
+ */
+ void (*get_call_abi)(const void *self, ir_type *method_type, be_abi_call_t *abi);
+
/**
* The irn handler for this architecture.
* The irn handler is registered by the Firm back end
long (*handle_call_proj)(const void *self, ir_node *proj, int is_keep);
};
-#define arch_isa_get_n_reg_class(isa) ((isa)->impl->get_n_reg_class(isa))
-#define arch_isa_get_reg_class(isa,i) ((isa)->impl->get_reg_class(isa, i))
-#define arch_isa_get_irn_handler(isa) ((isa)->impl->get_irn_handler(isa))
-#define arch_isa_make_code_generator(isa,irg) ((isa)->impl->make_code_generator(isa, irg))
+#define arch_isa_get_n_reg_class(isa) ((isa)->impl->get_n_reg_class(isa))
+#define arch_isa_get_reg_class(isa,i) ((isa)->impl->get_reg_class(isa, i))
+#define arch_isa_get_irn_handler(isa) ((isa)->impl->get_irn_handler(isa))
+#define arch_isa_get_call_abi(isa,tp,abi) ((isa)->impl->get_call_abi((isa), (tp), (abi)))
+#define arch_isa_get_reg_class_for_mode(isa,mode) ((isa)->impl->get_reg_class_for_mode((isa), (mode)))
+#define arch_isa_make_code_generator(isa,irg) ((isa)->impl->make_code_generator(isa, irg))
#define ARCH_MAX_HANDLERS 8
* @param handler A node handler.
* @return The environment itself.
*/
-extern arch_env_t *arch_env_add_irn_handler(arch_env_t *env,
- const arch_irn_handler_t *handler);
+extern arch_env_t *arch_env_add_irn_handler(arch_env_t *env, const arch_irn_handler_t *handler);
#endif /* _FIRM_BEARCH_H */
pset *pre_colored; /**< Set of precolored nodes. */
bitset_t *live; /**< A liveness bitset. */
bitset_t *colors; /**< The color mask. */
- bitset_t *ignore_colors; /**< A mask of colors which shall be not used in allocation (ignored). */
+ bitset_t *valid_colors; /**< A mask of colors which shall be considered during allocation.
+ Registers with the ignore bit on, must not be considered. */
bitset_t *in_colors; /**< Colors used by live in values. */
int colors_n; /**< The number of colors. */
} be_chordal_alloc_env_t;
int has_constraint = arch_register_req_is(&op->req, limited);
if(!values_interfere(op->carrier, op->irn) && !op->partner && (!has_constraint || can_be_constrained)) {
- if(arch_register_req_is(&op->req, should_be_same) && op->req.other == op->carrier)
+ if(arch_register_req_is(&op->req, should_be_same) && op->req.other_same == op->carrier)
return op;
else
res = op;
bitset_clear_all(bs);
op->req.limited(op->req.limited_env, bs);
- bitset_andnot(bs, alloc_env->ignore_colors);
+ bitset_and(bs, alloc_env->valid_colors);
+
+ DBG((dbg, LEVEL_2, "\tallowed registers for %+F: %B\n", op->carrier, bs));
bitset_foreach(bs, col)
bipartite_add(bp, n_alloc, col);
bitset_clear_all(bs);
arch_get_allocatable_regs(aenv, proj, -1, bs);
- bitset_andnot(bs, alloc_env->ignore_colors);
+ bitset_and(bs, alloc_env->valid_colors);
bitset_foreach(bs, col)
bipartite_add(bp, n_alloc, col);
bipartite_matching(bp, assignment);
+
for(i = 0; i < n_alloc; ++i) {
int j;
ir_node *nodes[2];
- const arch_register_t *reg = arch_register_for_index(env->cls, assignment[i]);
+ const arch_register_t *reg;
+
+ assert(assignment[i] >= 0 && "there must have been a register assigned");
+ reg = arch_register_for_index(env->cls, assignment[i]);
nodes[0] = alloc_nodes[i];
nodes[1] = pmap_get(partners, alloc_nodes[i]);
pset *live_in = put_live_in(block, pset_new_ptr_default());
bitset_clear_all(live);
- bitset_clear_all(colors);
bitset_clear_all(in_colors);
+ bitset_copy(colors, alloc_env->valid_colors);
+ bitset_flip_all(colors);
+
DBG((dbg, LEVEL_4, "Assigning colors for block %+F\n", block));
DBG((dbg, LEVEL_4, "\tusedef chain for block\n"));
list_for_each_entry(border_t, b, head, list) {
del_pset(live_in);
}
-
-
void be_ra_chordal_color(be_chordal_env_t *chordal_env)
{
- int i;
+ be_chordal_alloc_env_t env;
+ char buf[256];
+
int colors_n = arch_register_class_n_regs(chordal_env->cls);
ir_graph *irg = chordal_env->irg;
- be_chordal_alloc_env_t env;
if(get_irg_dom_state(irg) != dom_consistent)
compute_doms(irg);
env.chordal_env = chordal_env;
env.colors_n = colors_n;
env.colors = bitset_malloc(colors_n);
- env.ignore_colors = bitset_malloc(colors_n);
+ env.valid_colors = bitset_malloc(colors_n);
env.in_colors = bitset_malloc(colors_n);
env.pre_colored = pset_new_ptr_default();
- bitset_clear_all(env.ignore_colors);
-#if 0
- for(i = 0; i < chordal_env->cls->n_regs; ++i) {
- const arch_register_t *reg = &chordal_env->cls->regs[i];
- if(arch_register_type_is(reg, ignore))
- bitset_set(env.ignore_colors, reg->index);
- }
-#endif
+ arch_put_non_ignore_regs(chordal_env->main_env->arch_env, chordal_env->cls, env.valid_colors);
/* Handle register targeting constraints */
dom_tree_walk_irg(irg, constraints, NULL, &env);
- {
- char buf[128];
+ if(chordal_env->opts->dump_flags & BE_CH_DUMP_CONSTR) {
snprintf(buf, sizeof(buf), "-%s-constr", chordal_env->cls->name);
dump_ir_block_graph_sched(chordal_env->irg, buf);
}
be_numbering_done(irg);
-#ifdef DUMP_INTERVALS
- {
- char buf[128];
+ if(chordal_env->opts->dump_flags & BE_CH_DUMP_TREE_INTV) {
plotter_t *plotter;
ir_snprintf(buf, sizeof(buf), "ifg_%s_%F.eps", chordal_env->cls->name, irg);
plotter = new_plotter_ps(buf);
-
draw_interval_tree(&draw_chordal_def_opts, chordal_env, plotter);
plotter_free(plotter);
}
-#endif
free(env.live);
free(env.colors);
free(env.in_colors);
- free(env.ignore_colors);
+ free(env.valid_colors);
del_pset(env.pre_colored);
}
FILE *f;
} ps_plotter_t;
+
+/*
+ ____ ____ ____ _ _ _
+ | _ \/ ___| | _ \| | ___ | |_| |_ ___ _ __
+ | |_) \___ \ | |_) | |/ _ \| __| __/ _ \ '__|
+ | __/ ___) | | __/| | (_) | |_| || __/ |
+ |_| |____/ |_| |_|\___/ \__|\__\___|_|
+
+*/
+
static void ps_begin(plotter_t *_self, const rect_t *vis)
{
FILE *f;
return p;
}
+/*
+ _____ _ _ _____ ____ _ _ _
+ |_ _(_) | __|__ / | _ \| | ___ | |_| |_ ___ _ __
+ | | | | |/ / / / | |_) | |/ _ \| __| __/ _ \ '__|
+ | | | | < / /_ | __/| | (_) | |_| || __/ |
+ |_| |_|_|\_\/____| |_| |_|\___/ \__|\__\___|_|
+
+*/
+
+typedef struct {
+ base_plotter_t inh;
+ const char *filename;
+ FILE *f;
+} tikz_plotter_t;
+
+static void tikz_begin(plotter_t *_self, const rect_t *vis)
+{
+ FILE *f;
+ decl_self(tikz_plotter_t, _self);
+
+ f = self->f = fopen(self->filename, "wt");
+ fprintf(f, "\\begin{tikzpicture}\n");
+}
+
+static void tikz_setcolor(plotter_t *_self, const color_t *color)
+{
+ decl_self(tikz_plotter_t, _self);
+ set_color(_self, color);
+}
+
+static void tikz_line(plotter_t *_self, int x1, int y1, int x2, int y2)
+{
+ decl_self(tikz_plotter_t, _self);
+ fprintf(self->f, "\t\\draw (%d,%d) -- (%d,%d);\n", x1, y1, x2, y2);
+}
+
+static void tikz_box(plotter_t *_self, const rect_t *rect)
+{
+ decl_self(tikz_plotter_t, _self);
+
+ fprintf(self->f, "\t\\draw (%d,%d) rectangle (%d, %d)\n",
+ rect->x, rect->y, rect->x + rect->w, rect->y + rect->h);
+}
+
+void tikz_text(plotter_t *_self, int x, int y, const char *str)
+{
+ decl_self(tikz_plotter_t, _self);
+ fprintf(self->f, "\t\\draw (%d,%d) node {%s};\n", x, y, str);
+}
+
+static void tikz_finish(plotter_t *_self)
+{
+ decl_self(tikz_plotter_t, _self);
+ fclose(self->f);
+}
+
+
extern void plotter_free(plotter_t *self)
{
self->vtab->free(self);
static be_ra_chordal_opts_t options = {
- BE_CH_DUMP_ALL,
+ BE_CH_DUMP_NONE,
BE_CH_SPILL_BELADY,
BE_CH_COPYMIN_HEUR,
BE_CH_IFG_STD,
{ NULL, 0 }
};
+static const lc_opt_enum_int_items_t dump_items[] = {
+ { "spill", BE_CH_DUMP_SPILL },
+ { "live", BE_CH_DUMP_LIVE },
+ { "color", BE_CH_DUMP_COLOR },
+ { "copymin", BE_CH_DUMP_COPYMIN },
+ { "ssadestr", BE_CH_DUMP_SSADESTR },
+ { "tree", BE_CH_DUMP_TREE_INTV },
+ { "constr", BE_CH_DUMP_CONSTR },
+ { "lower", BE_CH_DUMP_LOWER },
+ { NULL, 0 }
+};
+
static lc_opt_enum_int_var_t spill_var = {
&options.spill_method, spill_items
};
&options.lower_perm_method, lower_perm_items
};
+static lc_opt_enum_int_var_t dump_var = {
+ &options.dump_flags, dump_items
+};
+
static void be_ra_chordal_register_options(lc_opt_entry_t *grp)
{
grp = lc_opt_get_grp(grp, "chordal");
for(j = 0, m = arch_isa_get_n_reg_class(isa); j < m; ++j) {
chordal_env.cls = arch_isa_get_reg_class(isa, j);
chordal_env.border_heads = pmap_create();
- chordal_env.constr_irn = pset_new_ptr(32);
be_liveness(irg);
dump(BE_CH_DUMP_LIVE, irg, chordal_env.cls, "-live", dump_ir_block_graph_sched);
be_liveness(irg);
be_check_pressure(&chordal_env);
-#if 0
- /* Insert perms before reg-constrained instructions */
- be_insert_constr_perms(&chordal_env);
- dump(BE_CH_DUMP_CONSTR, irg, chordal_env.cls, "-constr", dump_ir_block_graph_sched);
-#endif
-
be_liveness(irg);
be_check_pressure(&chordal_env);
be_ifg_free(chordal_env.ifg);
pmap_destroy(chordal_env.border_heads);
- del_pset(chordal_env.constr_irn);
}
be_compute_spill_offsets(&chordal_env);
#include "bechordal.h"
#include "beirgmod.h"
+typedef struct _be_ra_chordal_opts_t be_ra_chordal_opts_t;
+
/** Defines an invalid register index. */
#define NO_COLOR (-1)
* Environment for each of the chordal register allocator phases
*/
struct _be_chordal_env_t {
- struct obstack obst; /**< An obstack for temporary storage. */
- firm_dbg_module_t *dbg; /**< Debug module for the chordal register allocator. */
- const be_main_env_t *main_env; /**< Environment with back-end data. */
- dom_front_info_t *dom_front; /**< Dominance frontiers. */
- ir_graph *irg; /**< The graph under examination. */
- const arch_register_class_t *cls; /**< The current register class. */
- pmap *border_heads; /**< Maps blocks to border heads. */
- pset *constr_irn; /**< Nodes which deserve special constraint handling. */
- be_ifg_t *ifg; /**< The interference graph. */
- void *data; /**< Some pointer, to which different
- phases can attach data to. */
+ struct obstack obst; /**< An obstack for temporary storage. */
+ be_ra_chordal_opts_t *opts; /**< A pointer to the chordal ra options. */
+ firm_dbg_module_t *dbg; /**< Debug module for the chordal register allocator. */
+ const be_main_env_t *main_env; /**< Environment with back-end data. */
+ dom_front_info_t *dom_front; /**< Dominance frontiers. */
+ ir_graph *irg; /**< The graph under examination. */
+ const arch_register_class_t *cls; /**< The current register class. */
+ pmap *border_heads; /**< Maps blocks to border heads. */
+ be_ifg_t *ifg; /**< The interference graph. */
+ void *data; /**< Some pointer, to which different phases can attach data to. */
};
static INLINE struct list_head *_get_block_border_head(const be_chordal_env_t *inf, ir_node *bl) {
BE_CH_LOWER_PERM_COPY = 2
};
-typedef struct {
+struct _be_ra_chordal_opts_t {
unsigned dump_flags;
int spill_method;
int copymin_method;
char ilp_server[128];
char ilp_solver[128];
-} be_ra_chordal_opts_t;
+};
#endif /* _BECHORDAL_T_H */
/* Src == Tgt of a 2-addr-code instruction */
if (is_2addr_code(get_arch_env(co), irn, &req)) {
- ir_node *other = req.other;
+ ir_node *other = req.other_same;
if (!nodes_interfere(co->chordal_env, irn, other)) {
unit->nodes = xmalloc(2 * sizeof(*unit->nodes));
unit->costs = xmalloc(2 * sizeof(*unit->costs));
if(is_Reg_Phi(n) ||
is_Perm(aenv, n) ||
- (arch_register_req_is(&req, should_be_same) && req.other == irn)
+ (arch_register_req_is(&req, should_be_same) && req.other_same == irn)
)
return 1;
}
return pmap_get(info->df_map, block);
}
-static void determine_phi_blocks(ir_node *orig, pset *copies,
- pset *copy_blocks, pset *phi_blocks, dom_front_info_t *df_info)
+static void determine_phi_blocks(pset *copies, pset *copy_blocks, pset *phi_blocks, dom_front_info_t *df_info)
{
ir_node *bl;
- ir_node *orig_block = get_nodes_block(orig);
- pdeq *worklist = new_pdeq();
- firm_dbg_module_t *dbg = DBG_MODULE;
-
- /*
- * Fill the worklist queue and the rest of the orig blocks array.
- */
- for(bl = pset_first(copy_blocks); bl; bl = pset_next(copy_blocks)) {
- assert(block_dominates(orig_block, bl)
- && "The block of the copy must be dominated by the block of the value");
+ pdeq *worklist = new_pdeq();
+ firm_dbg_module_t *dbg = DBG_MODULE;
+
+ /*
+ * Fill the worklist queue and the rest of the orig blocks array.
+ */
+ for(bl = pset_first(copy_blocks); bl; bl = pset_next(copy_blocks)) {
+ pdeq_putr(worklist, bl);
+ }
- pdeq_putr(worklist, bl);
- }
+ while(!pdeq_empty(worklist)) {
+ ir_node *bl = pdeq_getl(worklist);
+ pset *df = be_get_dominance_frontier(df_info, bl);
- while(!pdeq_empty(worklist)) {
- ir_node *bl = pdeq_getl(worklist);
- ir_node *y;
- pset *df = be_get_dominance_frontier(df_info, bl);
+ ir_node *y;
- DBG((dbg, LEVEL_3, "dom front of %+F\n", bl));
- for(y = pset_first(df); y; y = pset_next(df))
- DBG((dbg, LEVEL_3, "\t%+F\n", y));
+ DBG((dbg, LEVEL_3, "dom front of %+F\n", bl));
+ for(y = pset_first(df); y; y = pset_next(df))
+ DBG((dbg, LEVEL_3, "\t%+F\n", y));
- for(y = pset_first(df); y; y = pset_next(df)) {
- if(!pset_find_ptr(phi_blocks, y)) {
- pset_insert_ptr(phi_blocks, y);
+ for(y = pset_first(df); y; y = pset_next(df)) {
+ if(!pset_find_ptr(phi_blocks, y)) {
+ pset_insert_ptr(phi_blocks, y);
/*
- * Clear the link field of a possible phi block, since
- * the possibly created phi will be stored there. See,
- * search_def()
- */
+ * Clear the link field of a possible phi block, since
+ * the possibly created phi will be stored there. See,
+ * search_def()
+ */
set_irn_link(y, NULL);
if(!pset_find_ptr(copy_blocks, y))
pdeq_putr(worklist, y);
- }
- }
- }
+ }
+ }
+ }
- del_pdeq(worklist);
+ del_pdeq(worklist);
}
/**
* original node.
* @return The valid copy for usage.
*/
-static ir_node *search_def(ir_node *usage, int pos, pset *copies,
- pset *copy_blocks, pset *phi_blocks, ir_mode *mode)
+static ir_node *search_def(ir_node *usage, int pos, pset *copies, pset *copy_blocks, pset *phi_blocks, ir_mode *mode)
{
- ir_node *curr_bl;
- ir_node *start_irn;
- firm_dbg_module_t *dbg = DBG_MODULE;
-
- curr_bl = get_nodes_block(usage);
-
-
- DBG((dbg, LEVEL_1, "Searching valid def for use %+F at pos %d\n", usage, pos));
- /*
- * If the usage is in a phi node, search the copy in the
- * predecessor denoted by pos.
- */
- if(is_Phi(usage)) {
- curr_bl = get_Block_cfgpred_block(curr_bl, pos);
- start_irn = sched_last(curr_bl);
- } else {
- start_irn = sched_prev(usage);
- }
+ ir_node *curr_bl;
+ ir_node *start_irn;
+ firm_dbg_module_t *dbg = DBG_MODULE;
+
+ curr_bl = get_nodes_block(usage);
+
+ DBG((dbg, LEVEL_1, "Searching valid def for use %+F at pos %d\n", usage, pos));
+ /*
+ * If the usage is in a phi node, search the copy in the
+ * predecessor denoted by pos.
+ */
+ if(is_Phi(usage)) {
+ curr_bl = get_Block_cfgpred_block(curr_bl, pos);
+ start_irn = sched_last(curr_bl);
+ } else {
+ start_irn = sched_prev(usage);
+ }
- /*
- * Traverse the dominance tree upwards from the
- * predecessor block of the usage.
- */
- while(curr_bl != NULL) {
+ /*
+ * Traverse the dominance tree upwards from the
+ * predecessor block of the usage.
+ */
+ while(curr_bl != NULL) {
- /*
- * If this block contains a copy, search the block
- * instruction by instruction.
- */
- if(pset_find_ptr(copy_blocks, curr_bl)) {
- ir_node *irn;
+ /*
+ * If this block contains a copy, search the block
+ * instruction by instruction.
+ */
+ if(pset_find_ptr(copy_blocks, curr_bl)) {
+ ir_node *irn;
- /* Look at each instruction from last to first. */
+ /* Look at each instruction from last to first. */
sched_foreach_reverse_from(start_irn, irn) {
- /* Take the first copy we find. */
- if(pset_find_ptr(copies, irn))
- return irn;
- }
- }
+ /* Take the first copy we find. */
+ if(pset_find_ptr(copies, irn))
+ return irn;
+ }
+ }
if(pset_find_ptr(phi_blocks, curr_bl)) {
ir_node *phi = get_irn_link(curr_bl);
return phi;
}
- /* If were not done yet, look in the immediate dominator */
- curr_bl = get_Block_idom(curr_bl);
- if(curr_bl)
- start_irn = sched_last(curr_bl);
- }
+ /* If were not done yet, look in the immediate dominator */
+ curr_bl = get_Block_idom(curr_bl);
+ if(curr_bl)
+ start_irn = sched_last(curr_bl);
+ }
- return NULL;
+ return NULL;
}
-static void fix_usages(ir_node *orig, pset *copies, pset *copy_blocks,
- pset *phi_blocks, pset *ignore_uses)
+static void fix_usages(int n_origs, ir_node *orig[], pset *copies,
+ pset *copy_blocks, pset *phi_blocks, pset *ignore_uses)
{
- int i = 0;
- int n_outs = 0;
- const ir_edge_t *edge;
- ir_mode *mode = get_irn_mode(orig);
-
- firm_dbg_module_t *dbg = DBG_MODULE;
-
- struct {
- ir_node *irn;
- int pos;
- } *outs;
-
- /* Count the number of outs. */
- foreach_out_edge(orig, edge)
- n_outs += !pset_find_ptr(ignore_uses, get_edge_src_irn(edge));
+ firm_dbg_module_t *dbg = DBG_MODULE;
+ int n_outs = 0;
+ ir_mode *mode = get_irn_mode(orig[0]);
+
+ int i, j;
+
+ struct {
+ ir_node *irn;
+ int pos;
+ } *outs;
+
+ /* Count the number of outs. */
+ for(i = 0; i < n_origs; ++i) {
+ const ir_edge_t *edge;
+ foreach_out_edge(orig[i], edge)
+ n_outs += !pset_find_ptr(ignore_uses, get_edge_src_irn(edge));
+ }
- /*
- * Put all outs into an array.
- * This is neccessary, since the outs would be modified while
- * interating on them what could bring the outs module in trouble.
- */
- outs = malloc(n_outs * sizeof(outs[0]));
- foreach_out_edge(orig, edge) {
- if(!pset_find_ptr(ignore_uses, get_edge_src_irn(edge))) {
- outs[i].irn = get_edge_src_irn(edge);
- outs[i].pos = get_edge_src_pos(edge);
- i += 1;
+ /*
+ * Put all outs into an array.
+ * This is necessary, since the outs would be modified while
+ * iterating on them what could bring the outs module in trouble.
+ */
+ outs = alloca(n_outs * sizeof(outs[0]));
+ for(i = 0, j = 0; i < n_origs; ++i) {
+ const ir_edge_t *edge;
+ foreach_out_edge(orig[i], edge) {
+ if(!pset_find_ptr(ignore_uses, get_edge_src_irn(edge))) {
+ outs[j].irn = get_edge_src_irn(edge);
+ outs[j].pos = get_edge_src_pos(edge);
+ j += 1;
+ }
}
- }
-
- /*
- * Search the valid def for each out and set it.
- */
- for(i = 0; i < n_outs; ++i) {
- ir_node *def;
- ir_node *irn = outs[i].irn;
- int pos = outs[i].pos;
+ }
- def = search_def(irn, pos, copies, copy_blocks, phi_blocks, mode);
- DBG((dbg, LEVEL_2, "\t%+F(%d) -> %+F\n", irn, pos, def));
+ /*
+ * Search the valid def for each out and set it.
+ */
+ for(i = 0; i < n_outs; ++i) {
+ ir_node *def;
+ ir_node *irn = outs[i].irn;
+ int pos = outs[i].pos;
- if(def != NULL)
- set_irn_n(irn, pos, def);
- }
+ def = search_def(irn, pos, copies, copy_blocks, phi_blocks, mode);
+ DBG((dbg, LEVEL_2, "\t%+F(%d) -> %+F\n", irn, pos, def));
- free(outs);
+ if(def != NULL)
+ set_irn_n(irn, pos, def);
+ }
}
/**
- * Remove phis which are not neccesary.
+ * Remove phis which are not necessary.
* During place_phi_functions() phi functions are put on the dominance
* frontiers blindly. However some of them will never be used (these
* have at least one predecessor which is NULL, see search_def() for
}
}
-void be_introduce_copies_ignore(dom_front_info_t *info, ir_node *orig,
- int n, ir_node *copy_nodes[], pset *ignore_uses)
+void be_ssa_constr_single_ignore(dom_front_info_t *info, ir_node *orig, int n, ir_node *copies[], pset *ignore_uses)
{
- pset *copies = pset_new_ptr(2 * n);
- pset *copy_blocks = pset_new_ptr(2 * n);
- pset *phi_blocks = pset_new_ptr(2 * n);
- int save_optimize = get_optimize();
- int save_normalize = get_opt_normalize();
- firm_dbg_module_t *dbg = DBG_MODULE;
- int i;
-
-#if 0
- {
- static int ser = 0;
- char buf[128];
-
- snprintf(buf, sizeof(buf), "-post-%d", ser++);
- dump_ir_block_graph_sched(get_irn_irg(orig), buf);
- }
-#endif
-
- firm_dbg_set_mask(dbg, DBG_LEVEL);
- DBG((dbg, LEVEL_1, "Introducing following copies of %+F\n", orig));
-
- /* Fill the sets. */
- pset_insert_ptr(copies, orig);
- pset_insert_ptr(copy_blocks, get_nodes_block(orig));
+ ir_node *origs[1];
+ origs[0] = orig;
+ be_ssa_constr_ignore(info, 1, origs, n, copies, ignore_uses);
+}
- /*
- * All phis using the original value are also copies of it
- * and must be present in the copies set.
- */
- for(i = 0; i < n; ++i) {
- DBG((dbg, LEVEL_1,
- " %+F in block %+F\n", copy_nodes[i], get_nodes_block(copy_nodes[i])));
- pset_insert_ptr(copies, copy_nodes[i]);
- pset_insert_ptr(copy_blocks, get_nodes_block(copy_nodes[i]));
- }
+void be_ssa_constr_ignore(dom_front_info_t *info, int n_origs, ir_node *orig_nodes[],
+ int n_copies, ir_node *copy_nodes[], pset *ignore_uses)
+{
+ int n_all = n_copies + n_origs;
+ pset *copies = pset_new_ptr(2 * n_all);
+ pset *copy_blocks = pset_new_ptr(2 * n_all);
+ pset *phi_blocks = pset_new_ptr(2 * n_all);
+ int save_optimize = get_optimize();
+ int save_normalize = get_opt_normalize();
+ firm_dbg_module_t *dbg = DBG_MODULE;
+
+ int i;
+
+ firm_dbg_set_mask(dbg, DBG_LEVEL);
+ // DBG((dbg, LEVEL_1, "Introducing following copies of %+F\n", orig));
+
+ /* Fill the sets. */
+ for(i = 0; i < n_origs; ++i) {
+ pset_insert_ptr(copies, orig_nodes[i]);
+ pset_insert_ptr(copy_blocks, get_nodes_block(orig_nodes[i]));
+ }
- /*
- * Disable optimization so that the phi functions do not
- * disappear.
- */
- set_optimize(0);
- set_opt_normalize(0);
+ /*
+ * All phis using the original values are also copies of it
+ * and must be present in the copies set.
+ */
+ for(i = 0; i < n_copies; ++i) {
+ ir_node *bl = get_nodes_block(copy_nodes[i]);
+ DBG((dbg, LEVEL_1, "\t%+F in block %+F\n", copy_nodes[i], bl));
+ pset_insert_ptr(copies, copy_nodes[i]);
+ pset_insert_ptr(copy_blocks, get_nodes_block(bl));
+ }
- /*
- * Place the phi functions and reroute the usages.
- */
- determine_phi_blocks(orig, copies, copy_blocks, phi_blocks, info);
- fix_usages(orig, copies, copy_blocks, phi_blocks, ignore_uses);
+ /*
+ * Disable optimization so that the phi functions do not
+ * disappear.
+ */
+ set_optimize(0);
+ set_opt_normalize(0);
- /* reset the optimizations */
- set_optimize(save_optimize);
- set_opt_normalize(save_normalize);
+ /*
+ * Place the phi functions and reroute the usages.
+ */
+ determine_phi_blocks(copies, copy_blocks, phi_blocks, info);
+ fix_usages(n_origs, orig_nodes, copies, copy_blocks, phi_blocks, ignore_uses);
- del_pset(copies);
- del_pset(phi_blocks);
- del_pset(copy_blocks);
+ /* reset the optimizations */
+ set_optimize(save_optimize);
+ set_opt_normalize(save_normalize);
+ del_pset(copies);
+ del_pset(phi_blocks);
+ del_pset(copy_blocks);
}
-void be_introduce_copies(dom_front_info_t *info, ir_node *orig, int n, ir_node *copy_nodes[])
-{
- static pset *empty_set = NULL;
- if(!empty_set)
- empty_set = pset_new_ptr_default();
+void be_ssa_constr_single(dom_front_info_t *info, ir_node *orig, int n, ir_node *copy_nodes[])
+{
+ pset *empty_set = be_empty_set();
- be_introduce_copies_ignore(info, orig, n, copy_nodes, empty_set);
+ assert(pset_count(empty_set) == 0);
+ be_ssa_constr_single_ignore(info, orig, n, copy_nodes, empty_set);
}
-void be_introduce_copies_for_set(dom_front_info_t *info, pset *origs, pset *copies) {
- /* TODO */
- assert(0 && "NYI");
- exit(0xDeadBeef);
-}
+void be_ssa_constr_sets(dom_front_info_t *info, pset *origs, pset *copies)
+{
+ int n_origs = pset_count(origs);
+ int n_copies = pset_count(copies);
+ ir_node **orig_nodes = alloca(n_origs * sizeof(orig_nodes[0]));
+ ir_node **copy_nodes = alloca(n_copies * sizeof(orig_nodes[0]));
-void be_introduce_copies_pset(dom_front_info_t *info, pset *nodes) {
- int i, n = pset_count(nodes);
- ir_node *orig, *irn, **copy_nodes;
- static pset *empty_set = NULL;
+ ir_node *irn;
+ int i;
- if (n<2)
- return;
+ for(i = 0, irn = pset_first(origs); irn; irn = pset_next(origs))
+ orig_nodes[i++] = irn;
- copy_nodes = alloca((n-1)*sizeof(*copy_nodes));
- irn = pset_first(nodes);
- orig = irn;
- for (i=0, irn = pset_next(nodes); irn; irn=pset_next(nodes))
+ for(i = 0, irn = pset_first(copies); irn; irn = pset_next(copies))
copy_nodes[i++] = irn;
-
- if(!empty_set)
- empty_set = pset_new_ptr_default();
-
- be_introduce_copies_ignore(info, orig, n-1, copy_nodes, empty_set);
+ be_ssa_constr(info, n_origs, orig_nodes, n_copies, copy_nodes);
}
* represent the same concrete value. This is the case if you
* - copy
* - spill and reload
- * - rematerialize
+ * - re-materialize
* a value.
*
* This function reroutes all uses of the original value to the copies in the
- * corresponding dominance subtrees and creates Phi functions if neccessary.
+ * corresponding dominance subtrees and creates Phi functions if necessary.
*
- * @param info Dominance frontier information.
- * @param orig The node for which you want to introduce copies.
- * @param n The number of copies ypu introduce.
- * @param copies An array of nodes which are copies of @p orig.
+ * @param info Dominance frontier information.
+ * @param n_origs The number of nodes for which the copies are introduced.
+ * @param orig_nodes The nodes for which you want to introduce copies.
+ * @param n_copies The number of copies you introduce.
+ * @param copy_nodes An array of nodes which are copies of @p orig.
+ * @param ignore_uses A set containing uses which shall not be rerouted.
*/
-void be_introduce_copies_ignore(dom_front_info_t *info, ir_node *orig,
- int n, ir_node *copies[], pset *irgore_uses);
+void be_ssa_constr_ignore(dom_front_info_t *info, int n_origs, ir_node *orig_nodes[],
+ int n_copies, ir_node *copy_nodes[], pset *ignore_uses);
-void be_introduce_copies(dom_front_info_t *info, ir_node *orig, int n, ir_node *copies[]);
+/**
+ * Same as be_ssa_constr_ignore() but with a single original node.
+ */
+void be_ssa_constr_single_ignore(dom_front_info_t *info, ir_node *orig, int n, ir_node *copies[], pset *ignore_uses);
-void be_introduce_copies_for_set(dom_front_info_t *info, pset *origs, pset *copies);
+/**
+ * Same as be_ssa_constr_single_ignore() but without ignoring nodes.
+ */
+void be_ssa_constr_single(dom_front_info_t *info, ir_node *orig, int n, ir_node *copy_nodes[]);
-/* obsolete
-void be_introduce_copies_pset(dom_front_info_t *info, pset *nodes);
-*/
+/**
+ * Same as be_ssa_constr_ignore() but without ignoring nodes.
+ */
+void be_ssa_constr(dom_front_info_t *info, int n_orig, ir_node *orig[], int n, ir_node *copy_nodes[]);
+
+/**
+ * Same as be_ssa_constr() but with psets.
+ */
+void be_ssa_constr_sets(dom_front_info_t *info, pset *origs, pset *copies);
#endif
DBG((mod, LEVEL_1, "replacing %+F with %+F, placed new node after %+F\n", irn, cpyxchg, sched_point));
}
- free(cycle->elems);
+ free((void *) cycle->elems);
free(cycle);
}
reg = arch_register_for_index(reg_class, j);
/* only check caller save registers */
- if (arch_register_type_is(reg, caller_saved)) {
+ if (arch_register_type_is(reg, caller_save)) {
/* Only create new proj, iff not already present */
if (!bitset_is_set(proj_set, bitset_idx)) {
curr = proj;
copies[0] = proj;
- be_introduce_copies(dom_front, perm_op, 1, copies);
+ be_ssa_constr_single(dom_front, perm_op, 1, copies);
}
return perm;
}
set_irn_n(irn, pos, cpy);
/* set an out constraint for the copy */
- arch_set_register_req(raenv->aenv, -1, &req);
+ /* TODO: Insert right code here. *
+ be_set_constr_single_reg(cpy, -1, ®);
+ */
}
}
}
arch_get_register_req(raenv->aenv, &req, irn, pos);
if (arch_register_req_is(&req, should_be_same)) {
- vi2 = get_var_info(req.other);
+ vi2 = get_var_info(req.other_same);
fprintf(raenv->f, "(%d, %d)\n", vi1->var_nr, vi2->var_nr);
}
}
/* correct the reload->spill pointers... */
- be_introduce_copies_for_set(raenv->dom_info, spills, reloads);
+ be_ssa_constr_sets(raenv->dom_info, spills, reloads);
/****** correct the variable <--> values mapping: ******
assert(n_reloads > 0);
reloads = obstack_finish(&ob);
- be_introduce_copies_ignore(senv->chordal_env->dom_front, si->spilled_node,
- n_reloads, reloads, senv->mem_phis);
+ be_ssa_constr_single_ignore(senv->chordal_env->dom_front, si->spilled_node, n_reloads, reloads, senv->mem_phis);
obstack_free(&ob, reloads);
}
#include <stdio.h>
+#include "pset.h"
+
#include "irgraph.h"
#include "irgwalk.h"
#include "irdump_t.h"
#include "besched_t.h"
#include "bearch.h"
+pset *be_empty_set(void)
+{
+ static pset *empty_set = NULL;
+
+ if(!empty_set)
+ empty_set = pset_new_ptr_default();
+
+ return empty_set;
+}
+
struct dump_env {
FILE *f;
arch_env_t *env;
#include "irnode.h"
#include "bearch.h"
+/**
+ * Get an empty set.
+ * This function always returns the same set.
+ */
+pset *be_empty_set(void);
+
+
/** Undefine this to disable debugging mode. */
#define BE_DEBUG 1
#include "../bearch.h"
#include "../besched.h"
#include "../beutil.h"
+#include "../beabi.h"
#define N_REGS 3
static arch_register_t datab_regs[N_REGS];
static arch_register_class_t reg_classes[] = {
- { "datab", N_REGS, datab_regs },
+ { "datab", N_REGS, NULL, datab_regs },
};
static ir_op *op_push;
obstack_init(&obst);
for(k = 0; k < N_CLASSES; ++k) {
- const arch_register_class_t *cls = ®_classes[k];
+ arch_register_class_t *cls = ®_classes[k];
int i;
+ cls->mode = mode_Is;
for(i = 0; i < cls->n_regs; ++i) {
int n;
char buf[8];
return ®_classes[i];
}
+static const arch_register_class_t *firm_get_reg_class_for_mode(const void *self, const ir_mode *irm)
+{
+ return mode_is_datab(irm) ? ®_classes[CLS_DATAB] : NULL;
+}
+
+static void firm_get_call_abi(const void *self, ir_type *method_type, be_abi_call_t *abi)
+{
+ const arch_register_class_t *cls = ®_classes[CLS_DATAB];
+ int i, n;
+
+ for(i = 0, n = get_method_n_params(method_type); i < n; ++i) {
+ ir_type *t = get_method_param_type(method_type, i);
+ if(is_Primitive_type(t))
+ be_abi_call_param_reg(abi, i, &cls->regs[i]);
+ else
+ be_abi_call_param_stack(abi, i);
+ }
+
+ for(i = 0, n = get_method_n_ress(method_type); i < n; ++i) {
+ ir_type *t = get_method_res_type(method_type, i);
+ if(is_Primitive_type(t))
+ be_abi_call_res_reg(abi, i, &cls->regs[i]);
+ }
+
+ be_abi_call_set_flags(abi, BE_ABI_NONE);
+}
+
+
static const arch_register_req_t firm_std_reg_req = {
arch_register_req_type_normal,
®_classes[CLS_DATAB],
firm_done,
firm_get_n_reg_class,
firm_get_reg_class,
+ firm_get_reg_class_for_mode,
+ firm_get_call_abi,
firm_get_irn_handler,
firm_get_code_generator_if,
firm_get_list_sched_selector,