struct _be_options_t *options;
struct _arch_code_generator_t *cg;
firm_dbg_module_t *dbg;
-
- const arch_register_t **caller_save; /**< NULL-terminated list of caller save registers. */
- const arch_register_t **callee_save; /**< NULL-terminated list of callee save registers. */
};
struct _be_irg_t {
/* Let the isa fill out the abi description for that call node. */
arch_isa_get_call_abi(isa, mt, call);
+ assert(get_method_variadicity(mt) == variadicity_non_variadic);
+
/* Insert code to put the stack arguments on the stack. */
- for(i = get_irn_arity(irn); i >= 0; --i) {
- if(is_on_stack(call, i)) {
+ /* TODO: Vargargs */
+ for(i = 0, n = get_Call_n_params(irn); i < n; ++i) {
+ be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
+ if(arg && !arg->in_reg) {
stack_size += get_type_size_bytes(get_method_param_type(mt, i));
obstack_int_grow(obst, i);
n_pos++;
pos = obstack_finish(obst);
/* Collect all arguments which are passed in registers. */
- for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
+ for(i = 0, n = get_Call_n_params(irn); i < n; ++i) {
be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
if(arg && arg->in_reg) {
obstack_int_grow(obst, i);
}
low_args = obstack_finish(obst);
- /* If there are some parameters which shall be passed on the stack. */
+ /* If there are some parameters shich shall be passed on the stack. */
if(n_pos > 0) {
int curr_ofs = 0;
int do_seq = (call->flags & BE_ABI_USE_PUSH);
assert(mode_is_reference(mach_mode) && "machine mode must be pointer");
for(i = 0; i < n_pos; ++i) {
int p = pos[i];
- ir_node *param = get_irn_n(irn, p);
+ ir_node *param = get_Call_param(irn, p);
ir_node *addr = curr_sp;
ir_node *mem = NULL;
type *param_type = get_method_param_type(mt, p);
}
/* Collect caller save registers */
- for(i = 0; env->birg->main_env->caller_save[i]; ++i)
- pset_insert_ptr(caller_save, env->birg->main_env->caller_save[i]);
+ for(i = 0, n = arch_isa_get_n_reg_class(isa); i < n; ++i) {
+ int j;
+ const arch_register_class_t *cls = arch_isa_get_reg_class(isa, i);
+ for(j = 0; j < cls->n_regs; ++j) {
+ const arch_register_t *reg = arch_register_for_index(cls, j);
+ if(arch_register_type_is(reg, caller_save))
+ pset_insert_ptr(caller_save, (void *) reg);
+ }
+ }
/* search the greatest result proj number */
foreach_out_edge(irn, edge) {
/* at last make the backend call node and set its register requirements. */
for(i = 0; i < n_low_args; ++i)
- obstack_ptr_grow(obst, get_irn_n(irn, low_args[i]));
+ obstack_ptr_grow(obst, get_Call_param(irn, low_args[i]));
+
in = obstack_finish(obst);
low_call = be_new_Call(irg, bl, curr_mem, curr_sp, get_Call_ptr(irn), curr_res_proj, n_low_args, in);
obstack_free(obst, in);
frame = be_new_Copy(bp->reg_class, irg, bl, stack);
+ be_node_set_flags(frame, -1, arch_irn_flags_dont_spill);
if(env->dedicated_fp) {
be_set_constr_single_reg(frame, -1, bp);
be_node_set_flags(frame, -1, arch_irn_flags_ignore);
return frame;
}
-static ir_node *clearup_frame(be_abi_irg_t *env, ir_node *stack, ir_node *frame)
+static void clearup_frame(be_abi_irg_t *env, ir_node *bl, struct obstack *obst)
{
+ const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
+ const arch_register_t *sp = isa->sp;
+ const arch_register_t *bp = isa->bp;
+ ir_graph *irg = env->birg->irg;
+ ir_node *no_mem = get_irg_no_mem(irg);
+ ir_node *frame = get_irg_frame(irg);
+ ir_node *stack = env->init_sp;
+ int store_old_fp = 1;
+
+ pmap_entry *ent;
+
+
+ if(env->omit_fp) {
+ stack = be_new_IncSP(sp, irg, bl, stack, no_mem, BE_STACK_FRAME_SIZE, be_stack_dir_against);
+ }
+
+ else {
+ stack = be_new_Copy(sp->reg_class, irg, bl, frame);
+ if(store_old_fp) {
+ ir_mode *mode = sp->reg_class->mode;
+ ir_node *irn;
+
+ stack = be_new_IncSP(sp, irg, bl, stack, no_mem, get_mode_size_bytes(mode), be_stack_dir_against);
+ irn = new_r_Load(irg, bl, no_mem, stack, mode);
+ irn = new_r_Proj(irg, bl, irn, mode, pn_Load_res);
+ frame = be_new_Copy(bp->reg_class, irg, bl, irn);
+ }
+
+ if(env->dedicated_fp) {
+ be_set_constr_single_reg(frame, -1, bp);
+ }
+
+ }
+
+ pmap_foreach(env->regs, ent) {
+ const arch_register_t *reg = ent->key;
+ ir_node *irn = ent->value;
+
+ if(reg == sp)
+ irn = stack;
+ else if(reg == bp)
+ irn = frame;
+
+ obstack_ptr_grow(obst, irn);
+ }
}
/**
if(inc_dir < 0)
arg_offset -= size;
- if(is_atomic_type(param_type)) {
+ /* For atomic parameters which are actually used, we create a StackParam node. */
+ if(is_atomic_type(param_type) && get_irn_n_edges(args[i]) > 0) {
ir_mode *mode = get_type_mode(param_type);
const arch_register_class_t *cls = arch_isa_get_reg_class_for_mode(isa, mode);
args_repl[i] = be_new_StackParam(cls, irg, reg_params_bl, mode, frame_pointer, arg_offset);
ir_node *irn = get_irn_n(end, i);
if(get_irn_opcode(irn) == iro_Return) {
- ir_node *bl = get_nodes_block(irn);
+ ir_node *bl = get_nodes_block(irn);
+ int n_res = get_Return_n_ress(irn);
+ pmap *reg_map = pmap_create_ex(n_res);
ir_node *ret;
int i, n;
ir_node **in;
/* collect all arguments of the return */
- for(i = 0, n = get_irn_arity(irn); i < n; ++i)
- obstack_ptr_grow(&env->obst, get_irn_n(irn, i));
-
- /* Add the Proj nodes representing the caller save registers. */
- for(ent = pmap_first(env->regs); ent; ent = pmap_next(env->regs), ++n) {
- const arch_register_t *reg = ent->key;
- ir_node *irn = ent->value;
+ for(i = 0; i < n_res; ++i) {
+ ir_node *res = get_Return_res(irn, i);
+ be_abi_call_arg_t *arg = get_call_arg(call, 1, i);
- /*
- * If the register is the stack pointer,
- * add the fix up code. Either add the size of the stack
- * frame if we omitted the frame pointer or move the
- * frame pointer back to the stack register.
- */
- if(reg == sp) {
- irn = be_new_IncSP(sp, irg, bl, frame_pointer, no_mem, env->omit_fp ? BE_STACK_FRAME_SIZE : 0, be_stack_dir_against);
- }
- obstack_ptr_grow(&env->obst, irn);
+ assert(arg->in_reg && "return value must be passed in register");
+ pmap_insert(reg_map, res, (void *) arg->reg);
+ obstack_ptr_grow(&env->obst, res);
}
+ /* generate the clean up code and add additional parameters to the return. */
+ clearup_frame(env, bl, &env->obst);
+
/* The in array for the new back end return is now ready. */
+ n = obstack_object_size(&env->obst) / sizeof(in[0]);
in = obstack_finish(&env->obst);
ret = be_new_Return(irg, bl, n, in);
- edges_reroute(irn, ret, irg);
+
+ /* Set the constraints for some arguments of the return. */
+ for(i = 0; i < n; i++) {
+ const arch_register_t *reg = pmap_get(reg_map, in[i]);
+ if(reg != NULL)
+ be_set_constr_single_reg(ret, i, reg);
+ }
+ exchange(irn, ret);
obstack_free(&env->obst, in);
+ pmap_destroy(reg_map);
}
}
const arch_irn_ops_t *ops = get_irn_ops(env, irn);
return ops->impl->get_flags(ops, irn);
}
+
+extern const char *arch_irn_flag_str(arch_irn_flags_t fl)
+{
+ switch(fl) {
+#define XXX(x) case arch_irn_flags_ ## x: return #x;
+ XXX(dont_spill);
+ XXX(ignore);
+ XXX(rematerializable);
+ XXX(none);
+#undef XXX
+ }
+ return "n/a";
+}
+
+extern char *arch_register_req_format(char *buf, size_t len, const arch_register_req_t *req)
+{
+ char tmp[128];
+ snprintf(buf, len, "class: %s", req->cls->name);
+
+ if(arch_register_req_is(req, limited)) {
+ bitset_pos_t elm;
+ bitset_t *bs = bitset_alloca(req->cls->n_regs);
+ req->limited(req->limited_env, bs);
+ strncat(buf, " limited:", len);
+ bitset_foreach(bs, elm) {
+ strncat(buf, " ", len);
+ strncat(buf, req->cls->regs[elm].name, len);
+ }
+ }
+
+ if(arch_register_req_is(req, should_be_same)) {
+ snprintf(tmp, sizeof(tmp), " same to: %+F", req->other_different);
+ strncat(buf, tmp, len);
+ }
+
+ if(arch_register_req_is(req, should_be_different)) {
+ snprintf(tmp, sizeof(tmp), " different to: %+F", req->other_different);
+ strncat(buf, tmp, len);
+ }
+
+ return buf;
+}
#include "list.h"
#include "ident.h"
-#include "be.h"
#include "belistsched.h"
#include "beabi_t.h"
+#include "be_t.h"
typedef struct _arch_register_class_t arch_register_class_t;
typedef struct _arch_register_t arch_register_t;
upon a function call. It thus can be overwritten
in the called function. */
arch_register_type_ignore = 4, /**< Do not consider this register when allocating. */
- arch_register_type_sp = 8, /**< This register is the stack pointer of the architecture. */
- arch_register_type_bp = 16, /**< The register is the base pointer of the architecture. */
} arch_register_type_t;
/**
* @return 1, If register is of given kind, 0 if not.
*/
#define arch_register_type_is(reg, kind) \
- ((reg)->type == arch_register_type_ ## kind)
+ (((reg)->type & arch_register_type_ ## kind) != 0)
/**
* A register.
must be different (case must_be_different). */
} arch_register_req_t;
+/**
+ * Format a register requirements information into a string.
+ * @param buf The string where to put it to.
+ * @param len The size of @p buf.
+ * @param req The requirements structure to format.
+ * @return A pointer to buf.
+ */
+extern char *arch_register_req_format(char *buf, size_t len, const arch_register_req_t *req);
+
+
/**
* Certain node classes which are relevant for the register allocator.
*/
arch_irn_flags_none = 0, /**< Node flags. */
arch_irn_flags_dont_spill = 1, /**< This must not be spilled. */
arch_irn_flags_rematerializable = 2, /**< This should be replicated instead of spilled/reloaded. */
- arch_irn_flags_ignore = 4, /**< Do not consider the node during register allocation. */
+ arch_irn_flags_ignore = 4, /**< Ignore node during register allocation. */
+ arch_irn_flags_last = arch_irn_flags_ignore
} arch_irn_flags_t;
+/**
+ * Get the string representation of a flag.
+ * This functions does not handle or'ed bitmasks of flags.
+ * @param flag The flag.
+ * @return The flag as a string.
+ */
+extern const char *arch_irn_flag_str(arch_irn_flags_t flag);
+
struct _arch_irn_ops_if_t {
/**
/**
* Initialize the code generator.
* @param file The file to dump to.
- * @param irg The function to generate code for.
- * @param env The architecture environment.
+ * @param birg A backend IRG session.
* @return A newly created code generator.
*/
void *(*init)(FILE *file, const be_irg_t *birg);
* @return The list scheduler selector.
*/
const list_sched_selector_t *(*get_list_sched_selector)(const void *self);
+
+ /**
+ * Take a proj from a call, set the correct register and projnum for this proj
+ * @param self The isa object.
+ * @param proj The proj
+ * @param is_keep Non-zero if proj is a Keep argument
+ * @return The backend proj number assigned to this proj
+ */
+ long (*handle_call_proj)(const void *self, ir_node *proj, int is_keep);
};
#define arch_isa_get_n_reg_class(isa) ((isa)->impl->get_n_reg_class(isa))
pset *pre_colored; /**< Set of precolored nodes. */
bitset_t *live; /**< A liveness bitset. */
bitset_t *colors; /**< The color mask. */
- bitset_t *valid_colors; /**< A mask of colors which shall be considered during allocation.
- Registers with the ignore bit on, must not be considered. */
bitset_t *in_colors; /**< Colors used by live in values. */
int colors_n; /**< The number of colors. */
} be_chordal_alloc_env_t;
static INLINE int has_reg_class(const be_chordal_env_t *env, const ir_node *irn)
{
// return arch_irn_has_reg_class(env->main_env->arch_env, irn, -1, env->cls);
- return arch_irn_consider_in_reg_alloc(env->main_env->arch_env, env->cls, irn);
+ return arch_irn_consider_in_reg_alloc(env->birg->main_env->arch_env, env->cls, irn);
}
#define has_limited_constr(req, irn) \
static insn_t *scan_insn(be_chordal_env_t *env, ir_node *irn, struct obstack *obst)
{
- const arch_env_t *arch_env = env->main_env->arch_env;
+ const arch_env_t *arch_env = env->birg->main_env->arch_env;
operand_t o;
insn_t *insn;
int i, n;
if(insn->has_constraints) {
firm_dbg_module_t *dbg = firm_dbg_register("firm.be.chordal.constr");
- const arch_env_t *aenv = env->main_env->arch_env;
+ const arch_env_t *aenv = env->birg->main_env->arch_env;
int n_regs = env->cls->n_regs;
bitset_t *bs = bitset_alloca(n_regs);
ir_node **alloc_nodes = alloca(n_regs * sizeof(alloc_nodes[0]));
bitset_clear_all(bs);
op->req.limited(op->req.limited_env, bs);
- bitset_and(bs, alloc_env->valid_colors);
DBG((dbg, LEVEL_2, "\tallowed registers for %+F: %B\n", op->carrier, bs));
bitset_clear_all(bs);
arch_get_allocatable_regs(aenv, proj, -1, bs);
- bitset_and(bs, alloc_env->valid_colors);
bitset_foreach(bs, col)
bipartite_add(bp, n_alloc, col);
bipartite_matching(bp, assignment);
-
for(i = 0; i < n_alloc; ++i) {
int j;
ir_node *nodes[2];
{
firm_dbg_module_t *dbg = firm_dbg_register("firm.be.chordal.constr");
be_chordal_alloc_env_t *env = data;
- arch_env_t *arch_env = env->chordal_env->main_env->arch_env;
+ arch_env_t *arch_env = env->chordal_env->birg->main_env->arch_env;
ir_node *irn;
for(irn = sched_first(bl); !sched_is_end(irn);) {
be_chordal_alloc_env_t *alloc_env = env_ptr;
be_chordal_env_t *env = alloc_env->chordal_env;
- const arch_env_t *arch_env = env->main_env->arch_env;
+ const arch_env_t *arch_env = env->birg->main_env->arch_env;
bitset_t *live = alloc_env->live;
firm_dbg_module_t *dbg = env->dbg;
ir_node *irn;
bitset_t *live = alloc_env->live;
bitset_t *colors = alloc_env->colors;
bitset_t *in_colors = alloc_env->in_colors;
- const arch_env_t *arch_env = env->main_env->arch_env;
+ const arch_env_t *arch_env = env->birg->main_env->arch_env;
const ir_node *irn;
border_t *b;
struct list_head *head = get_block_border_head(env, block);
pset *live_in = put_live_in(block, pset_new_ptr_default());
+ bitset_clear_all(colors);
bitset_clear_all(live);
bitset_clear_all(in_colors);
- bitset_copy(colors, alloc_env->valid_colors);
- bitset_flip_all(colors);
-
DBG((dbg, LEVEL_4, "Assigning colors for block %+F\n", block));
DBG((dbg, LEVEL_4, "\tusedef chain for block\n"));
list_for_each_entry(border_t, b, head, list) {
}
/*
- * Mind that the sequence of defs from back to front defines a perfect
+ * Mind that the sequence
+ * of defs from back to front defines a perfect
* elimination order. So, coloring the definitions from first to last
* will work.
*/
env.chordal_env = chordal_env;
env.colors_n = colors_n;
env.colors = bitset_malloc(colors_n);
- env.valid_colors = bitset_malloc(colors_n);
env.in_colors = bitset_malloc(colors_n);
env.pre_colored = pset_new_ptr_default();
- arch_put_non_ignore_regs(chordal_env->main_env->arch_env, chordal_env->cls, env.valid_colors);
-
/* Handle register targeting constraints */
dom_tree_walk_irg(irg, constraints, NULL, &env);
-#if 0
if(chordal_env->opts->dump_flags & BE_CH_DUMP_CONSTR) {
snprintf(buf, sizeof(buf), "-%s-constr", chordal_env->cls->name);
dump_ir_block_graph_sched(chordal_env->irg, buf);
}
-#endif
be_numbering(irg);
env.live = bitset_malloc(get_graph_node_count(chordal_env->irg));
be_numbering_done(irg);
-#if 0
if(chordal_env->opts->dump_flags & BE_CH_DUMP_TREE_INTV) {
plotter_t *plotter;
-
ir_snprintf(buf, sizeof(buf), "ifg_%s_%F.eps", chordal_env->cls->name, irg);
plotter = new_plotter_ps(buf);
draw_interval_tree(&draw_chordal_def_opts, chordal_env, plotter);
plotter_free(plotter);
}
-#endif
free(env.live);
free(env.colors);
free(env.in_colors);
- free(env.valid_colors);
-
del_pset(env.pre_colored);
}
struct block_dims *start_dims;
ir_node *start_block = get_irg_start_block(chordal_env->irg);
- env.arch_env = chordal_env->main_env->arch_env;
+ env.arch_env = chordal_env->birg->main_env->arch_env;
env.opts = opts;
env.block_dims = pmap_create();
env.plotter = plotter;
void be_ra_chordal_check(be_chordal_env_t *chordal_env) {
firm_dbg_module_t *dbg = chordal_env->dbg;
- const arch_env_t *arch_env = chordal_env->main_env->arch_env;
+ const arch_env_t *arch_env = chordal_env->birg->main_env->arch_env;
struct obstack ob;
pmap_entry *pme;
ir_node **nodes, *n1, *n2;
compute_doms(irg);
+ chordal_env.opts = &options;
chordal_env.irg = irg;
chordal_env.dbg = firm_dbg_register("firm.be.chordal");
- chordal_env.main_env = main_env;
+ chordal_env.birg = bi;
chordal_env.dom_front = be_compute_dominance_frontiers(irg);
obstack_init(&chordal_env.obst);
struct obstack obst; /**< An obstack for temporary storage. */
be_ra_chordal_opts_t *opts; /**< A pointer to the chordal ra options. */
firm_dbg_module_t *dbg; /**< Debug module for the chordal register allocator. */
- const be_main_env_t *main_env; /**< Environment with back-end data. */
+ be_irg_t *birg; /**< Backend IRG session. */
dom_front_info_t *dom_front; /**< Dominance frontiers. */
ir_graph *irg; /**< The graph under examination. */
const arch_register_class_t *cls; /**< The current register class. */
#define border_prev(b) (list_entry((b)->list.prev, border_t, list))
#define chordal_has_class(chordal_env, irn) \
- arch_irn_has_reg_class(chordal_env->main_env->arch_env, irn, -1, chordal_env->cls)
+ arch_irn_has_reg_class(chordal_env->birg->main_env->arch_env, irn, -1, chordal_env->cls)
int nodes_interfere(const be_chordal_env_t *env, const ir_node *a, const ir_node *b);
co = xcalloc(1, sizeof(*co));
co->cenv = chordal_env;
- co->aenv = chordal_env->main_env->arch_env;
+ co->aenv = chordal_env->birg->main_env->arch_env;
co->irg = chordal_env->irg;
co->cls = chordal_env->cls;
co->get_costs = get_costs;
for(i = 0; i < n_outs; ++i) {
ir_node *irn = outs[i].irn;
int pos = outs[i].pos;
- ir_mode *mode = get_irn_mode(irn);
+ ir_mode *mode = get_irn_mode(get_irn_n(irn, pos));
ir_node *def;
if(sel->to_appear_in_schedule)
res = sel->to_appear_in_schedule(block_env, irn);
- return res || to_appear_in_schedule(irn) || be_is_Keep(irn);
+ return res || to_appear_in_schedule(irn) || be_is_Keep(irn) || be_is_RegParams(irn);
}
static const list_sched_selector_t trivial_selector_struct = {
ir_node *arg1, *arg2, *res1, *res2;
ir_node *cpyxchg = NULL;
- arch_env = env->chord_env->main_env->arch_env;
+ arch_env = env->chord_env->birg->main_env->arch_env;
do_copy = env->do_copy;
mod = env->dbg_module;
block = get_nodes_block(irn);
*/
static void lower_spill_reload(ir_node *irn, void *walk_env) {
lower_env_t *env = walk_env;
- arch_code_generator_t *cg = env->chord_env->main_env->cg;
- const arch_env_t *aenv = env->chord_env->main_env->arch_env;
+ arch_code_generator_t *cg = env->chord_env->birg->cg;
+ const arch_env_t *aenv = env->chord_env->birg->main_env->arch_env;
ir_node *res = NULL;
ir_node *sched_point;
*/
static void lower_nodes_after_ra_walker(ir_node *irn, void *walk_env) {
lower_env_t *env = walk_env;
- const arch_env_t *arch_env = env->chord_env->main_env->arch_env;
+ const arch_env_t *arch_env = env->chord_env->birg->main_env->arch_env;
if (!is_Block(irn) && !is_Proj(irn)) {
if (is_Perm(arch_env, irn)) {
static unsigned dump_flags = 2 * DUMP_FINAL - 1;
/* register allocator to use. */
-static const be_ra_t *ra = &be_ra_external_allocator;
+static const be_ra_t *ra = &be_ra_chordal_allocator;
/* back end instruction set architecture to use */
static const arch_isa_if_t *isa_if = &ia32_isa_if;
static be_main_env_t *be_init_env(be_main_env_t *env)
{
- int i, j, n;
-
- memset(env, 0, sizeof(*env));
+ memset(env, 0, sizeof(*env));
obstack_init(&env->obst);
env->dbg = firm_dbg_register("be.main");
*/
arch_env_add_irn_handler(env->arch_env, &be_node_irn_handler);
- /*
- * Create the list of caller save registers.
- */
- for(i = 0, n = arch_isa_get_n_reg_class(env->arch_env->isa); i < n; ++i) {
- const arch_register_class_t *cls = arch_isa_get_reg_class(env->arch_env->isa, i);
- for(j = 0; j < cls->n_regs; ++j) {
- const arch_register_t *reg = arch_register_for_index(cls, j);
- if(arch_register_type_is(reg, caller_save))
- obstack_ptr_grow(&env->obst, reg);
- }
- }
- obstack_ptr_grow(&env->obst, NULL);
- env->caller_save = obstack_finish(&env->obst);
-
- /*
- * Create the list of callee save registers.
- */
- for(i = 0, n = arch_isa_get_n_reg_class(env->arch_env->isa); i < n; ++i) {
- const arch_register_class_t *cls = arch_isa_get_reg_class(env->arch_env->isa, i);
- for(j = 0; j < cls->n_regs; ++j) {
- const arch_register_t *reg = arch_register_for_index(cls, j);
- if(arch_register_type_is(reg, callee_save))
- obstack_ptr_grow(&env->obst, reg);
- }
- }
- obstack_ptr_grow(&env->obst, NULL);
- env->callee_save = obstack_finish(&env->obst);
-
return env;
}
/* Normalize proj nodes. */
normalize_proj_nodes(irg);
+ /* Make just one return node. */
+ // normalize_one_return(irg);
+
/* Remove critical edges */
remove_critical_cf_edges(irg);
dump(DUMP_SCHED, irg, "-sched", dump_ir_block_graph_sched);
/* connect all stack modifying nodes together (see beabi.c) */
- // be_abi_fix_stack(birg.abi);
+ be_abi_fix_stack_nodes(birg.abi);
/* Verify the schedule */
sched_verify_irg(irg);
/* Do register allocation */
arch_code_generator_before_ra(birg.cg);
ra->allocate(&birg);
-
dump(DUMP_RA, irg, "-ra", dump_ir_block_graph_sched);
+ be_abi_fix_stack_bias(birg.abi);
+
arch_code_generator_done(birg.cg);
dump(DUMP_FINAL, irg, "-end", dump_ir_block_graph_sched);
+ be_abi_free(birg.abi);
}
be_done_env(&env);
#include "util.h"
#include "debug.h"
#include "fourcc.h"
+#include "bitfiddle.h"
#include "irop_t.h"
#include "irmode_t.h"
static arch_irn_flags_t be_node_get_flags(const void *_self, const ir_node *irn)
{
+ int out_pos;
be_node_attr_t *a;
- redir_proj((const ir_node **) &irn, -1);
+ out_pos = redir_proj((const ir_node **) &irn, -1);
+ a = get_irn_attr(irn);
+
assert(is_be_node(irn));
- a = get_irn_attr(irn);
- return a->max_reg_data > 0 ? a->reg_data[0].req.flags : arch_irn_flags_none;
+ assert(out_pos < a->max_reg_data && "position too high");
+
+ return a->reg_data[out_pos].req.flags;
}
static const arch_irn_ops_if_t be_node_irn_ops_if = {
be_node_get_arch_ops
};
+
+static void dump_node_req(FILE *f, be_req_t *req)
+{
+ int i;
+ int did_something = 0;
+ const char *suffix = "";
+
+ if(req->flags != arch_irn_flags_none) {
+ fprintf(f, "flags: ");
+ for(i = arch_irn_flags_none; i <= log2_ceil(arch_irn_flags_last); ++i) {
+ if(req->flags & (1 << i)) {
+ fprintf(f, "%s%s", suffix, arch_irn_flag_str(1 << i));
+ suffix = "|";
+ }
+ }
+ suffix = ", ";
+ did_something = 1;
+ }
+
+ if(req->req.cls != 0) {
+ char tmp[256];
+ fprintf(f, suffix);
+ arch_register_req_format(tmp, sizeof(tmp), &req->req);
+ fprintf(f, "%s", tmp);
+ did_something = 1;
+ }
+
+ if(did_something)
+ fprintf(f, "\n");
+}
+
+static void dump_node_reqs(FILE *f, ir_node *irn)
+{
+ int i;
+ be_node_attr_t *a = get_irn_attr(irn);
+
+ fprintf(f, "registers: \n");
+ for(i = 0; i < a->max_reg_data; ++i) {
+ be_reg_data_t *rd = &a->reg_data[i];
+ if(rd->reg)
+ fprintf(f, "#%d: %s\n", i, rd->reg->name);
+ }
+
+ fprintf(f, "in requirements\n");
+ for(i = 0; i < a->max_reg_data; ++i) {
+ dump_node_req(f, &a->reg_data[i].in_req);
+ }
+
+ fprintf(f, "\nout requirements\n");
+ for(i = 0; i < a->max_reg_data; ++i) {
+ dump_node_req(f, &a->reg_data[i].req);
+ }
+}
+
static int dump_node(ir_node *irn, FILE *f, dump_reason_t reason)
{
be_node_attr_t *at = get_irn_attr(irn);
- int i;
assert(is_be_node(irn));
case dump_node_nodeattr_txt:
break;
case dump_node_info_txt:
- fprintf(f, "reg class: %s\n", at->cls ? at->cls->name : "n/a");
- for(i = 0; i < at->max_reg_data; ++i) {
- const arch_register_t *reg = at->reg_data[i].reg;
- fprintf(f, "reg #%d: %s\n", i, reg ? reg->name : "n/a");
- }
+ dump_node_reqs(f, irn);
switch(be_get_irn_opcode(irn)) {
case beo_Spill:
ctx = be_get_spill_ctx(senv->spill_ctxs, irn, ctx_irn);
if(!ctx->spill) {
- const be_main_env_t *env = senv->chordal_env->main_env;
+ const be_main_env_t *env = senv->chordal_env->birg->main_env;
ctx->spill = be_spill(env->arch_env, irn, ctx_irn);
}
static void phi_walker(ir_node *irn, void *env) {
spill_env_t *senv = env;
- const arch_env_t *arch = senv->chordal_env->main_env->arch_env;
+ const arch_env_t *arch = senv->chordal_env->birg->main_env->arch_env;
if (is_Phi(irn) && arch_irn_has_reg_class(arch, irn, 0, senv->cls)
&& senv->is_mem_phi(irn, senv->data)) {
assert(n_reloads > 0);
obstack_ptr_grow(&ob, si->spilled_node);
reloads = obstack_finish(&ob);
- be_ssa_constr_ignore(senv->chordal_env->dom_front, n_reloads, reloads, senv->mem_phis);
+ be_ssa_constr_ignore(senv->chordal_env->dom_front, n_reloads + 1, reloads, senv->mem_phis);
obstack_free(&ob, reloads);
}
loc_t vals[1]; /**< inlined array of the values/distances in this working set */
};
+void workset_print(const workset_t *w)
+{
+ int i;
+
+ for(i = 0; i < w->len; ++i) {
+ ir_printf("%+F %d\n", w->vals[i].irn, w->vals[i].time);
+ }
+}
+
/**
* Alloc a new workset on obstack @p ob with maximum size @p max
*/
* @return The distance to the next use
* Or 0 if irn is an ignore node
*/
-#define get_distance(bel, from, from_step, def, skip_from_uses) \
- ((arch_irn_is_ignore(bel->arch, def) ) ? 0 : be_get_next_use(bel->uses, from, from_step, def, skip_from_uses))
+static INLINE unsigned get_distance(belady_env_t *bel, const ir_node *from, unsigned from_step, const ir_node *def, int skip_from_uses)
+{
+ arch_irn_flags_t fl = arch_irn_get_flags(bel->arch, def);
+ if((fl & (arch_irn_flags_ignore | arch_irn_flags_dont_spill)) != 0)
+ return 0;
+ else
+ return be_get_next_use(bel->uses, from, from_step, def, skip_from_uses);
+}
/**
- * Performs the actions neccessary to grant the request that:
+ * Performs the actions necessary to grant the request that:
* - new_vals can be held in registers
* - as few as possible other values are disposed
* - the worst values get disposed
/* If we have only one predecessor, we want the start_set of blk to be the end_set of pred */
- if (get_irn_arity(blk) == 1 && blk != get_irg_start_block(get_irn_irg(blk))) {
+ if (get_Block_n_cfgpreds(blk) == 1 && blk != get_irg_start_block(get_irn_irg(blk))) {
ir_node *pred_blk = get_Block_cfgpred_block(blk, 0);
block_info_t *pred_info = get_block_info(pred_blk);
/* projs are handled with the tuple value.
- * Phis are no real instr (see insert_starters)
+ * Phis are no real instr (see insert_starters())
* instr_nr does not increase */
if (is_Proj(irn) || is_Phi(irn)) {
DBG((dbg, DBG_DECIDE, " ...%+F skipped\n", irn));
/* init belady env */
obstack_init(&bel.ob);
- bel.arch = chordal_env->main_env->arch_env;
+ bel.arch = chordal_env->birg->main_env->arch_env;
bel.cls = chordal_env->cls;
bel.n_regs = arch_register_class_n_regs(bel.cls);
bel.ws = new_workset(&bel.ob, &bel);
- bel.uses = be_begin_uses(chordal_env->irg, chordal_env->main_env->arch_env, bel.cls);
+ bel.uses = be_begin_uses(chordal_env->irg, chordal_env->birg->main_env->arch_env, bel.cls);
bel.senv = be_new_spill_env(dbg, chordal_env, is_mem_phi, NULL);
bel.reloads = pset_new_ptr_default();
bel.copies = pset_new_ptr_default();
static INLINE int can_remat(const spill_ilp_t *si, const ir_node *irn, pset *live)
{
int i, n;
- const arch_env_t *arch_env = si->chordal_env->main_env->arch_env;
+ const arch_env_t *arch_env = si->chordal_env->birg->main_env->arch_env;
int remat = (arch_irn_get_flags(arch_env, irn) & arch_irn_flags_rematerializable) != 0;
for(i = 0, n = get_irn_arity(irn); i < n && remat; ++i) {
static firm_dbg_module_t *dbg = NULL;
#define DUMP_GRAPHS
-#define get_chordal_arch(ce) ((ce)->main_env->arch_env)
+#define get_chordal_arch(ce) ((ce)->birg->main_env->arch_env)
#define get_reg(irn) arch_get_irn_register(get_chordal_arch(chordal_env), irn)
#define set_reg(irn, reg) arch_set_irn_register(get_chordal_arch(chordal_env), irn, reg)
perm = be_new_Perm(chordal_env->cls, irg, pred_bl, n_projs, in);
free(in);
- insert_after = sched_skip(sched_last(pred_bl), 0, sched_skip_cf_predicator, chordal_env->main_env->arch_env);
+ insert_after = sched_skip(sched_last(pred_bl), 0, sched_skip_cf_predicator, chordal_env->birg->main_env->arch_env);
sched_add_after(insert_after, perm);
/*
assert(get_irn_mode(phi) == get_irn_mode(dupl));
set_irn_n(phi, i, dupl);
set_reg(dupl, phi_reg);
- sched_add_after(sched_skip(sched_last(arg_block), 0, sched_skip_cf_predicator, chordal_env->main_env->arch_env), dupl);
+ sched_add_after(sched_skip(sched_last(arg_block), 0, sched_skip_cf_predicator, chordal_env->birg->main_env->arch_env), dupl);
pin_irn(dupl, phi_block);
DBG((dbg, LEVEL_1, " they do interfere: insert %+F(%s)\n", dupl, get_reg(dupl)->name));
continue; /* with next argument */
void dump_ir_block_graph_sched(ir_graph *irg, const char *suffix) {
DUMP_NODE_EDGE_FUNC old = get_dump_node_edge_hook();
- dump_consts_local(0);
+ dump_consts_local(0);
set_dump_node_edge_hook(sched_edge_hook);
dump_ir_block_graph(irg, suffix);
set_dump_node_edge_hook(old);