+/*
+ * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
+ *
+ * This file is part of libFirm.
+ *
+ * This file may be distributed and/or modified under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation and appearing in the file LICENSE.GPL included in the
+ * packaging of this file.
+ *
+ * Licensees holding valid libFirm Professional Edition licenses may use
+ * this file in accordance with the libFirm Commercial License.
+ * Agreement provided with the Software.
+ *
+ * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
+ * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
/**
- * Author: Daniel Grund, Sebastian Hack
- * Date: 29.09.2005
- * Copyright: (c) Universitaet Karlsruhe
- * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
+ * @file
+ * @brief Spill module selection; Preparation steps
+ * @author Matthias Braun
+ * @date 29.09.2005
*/
-#ifdef HAVE_CONFIG_H
#include "config.h"
-#endif
-
-#include <stdlib.h>
-#include "pset.h"
-#include "irnode_t.h"
-#include "ircons_t.h"
-#include "iredges_t.h"
-#include "ident_t.h"
-#include "type_t.h"
-#include "entity_t.h"
+#include "irtools.h"
#include "debug.h"
+#include "iredges_t.h"
+#include "raw_bitset.h"
+#include "statev.h"
#include "irgwalk.h"
-#include "besched.h"
#include "bespill.h"
-#include "benode_t.h"
-#include "bechordal_t.h"
-
-typedef struct _reloader_t reloader_t;
-typedef struct _spill_info_t spill_info_t;
-
-struct _reloader_t {
- reloader_t *next;
- ir_node *reloader;
-};
+#include "bemodule.h"
+#include "be.h"
+#include "belive_t.h"
+#include "beirg.h"
+#include "bearch.h"
+#include "benode.h"
+#include "besched.h"
+#include "bera.h"
+#include "beintlive_t.h"
-struct _spill_info_t {
- ir_node *spilled_node;
- reloader_t *reloaders;
-};
+#include "lc_opts.h"
+#include "lc_opts_enum.h"
-typedef struct _spill_ctx_t {
- ir_node *spilled; /**< The spilled node. */
- ir_node *user; /**< The node this spill is for. */
- ir_node *spill; /**< The spill itself. */
-} spill_ctx_t;
+DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
-struct _spill_env_t {
- firm_dbg_module_t *dbg;
+typedef struct be_pre_spill_env_t {
+ ir_graph *irg;
const arch_register_class_t *cls;
- const be_chordal_env_t *chordal_env;
- struct obstack obst;
- set *spill_ctxs;
- set *spills; /**< all spill_info_t's, which must be placed */
- pset *mem_phis; /**< set of all special spilled phis. allocated and freed seperately */
- decide_irn_t is_mem_phi; /**< callback func to decide if a phi needs special spilling */
- void *data; /**< data passed to all callbacks */
-};
-
-static int cmp_spillctx(const void *a, const void *b, size_t n) {
- const spill_ctx_t *p = a;
- const spill_ctx_t *q = b;
- return !(p->user == q->user && p->spilled == q->spilled);
-}
-
-static int cmp_spillinfo(const void *x, const void *y, size_t size) {
- const spill_info_t *xx = x;
- const spill_info_t *yy = y;
- return ! (xx->spilled_node == yy->spilled_node);
-}
-
-spill_env_t *be_new_spill_env(firm_dbg_module_t *dbg,
- const be_chordal_env_t *chordal_env,
- decide_irn_t is_mem_phi, void *data) {
-
- spill_env_t *env = malloc(sizeof(env[0]));
- env->spill_ctxs = new_set(cmp_spillctx, 1024);
- env->spills = new_set(cmp_spillinfo, 1024);
- env->cls = chordal_env->cls;
- env->dbg = dbg;
- env->is_mem_phi = is_mem_phi;
- env->data = data;
- env->chordal_env = chordal_env;
- obstack_init(&env->obst);
- return env;
-}
-
-void be_delete_spill_env(spill_env_t *senv) {
- del_set(senv->spill_ctxs);
- del_set(senv->spills);
- obstack_free(&senv->obst, NULL);
- free(senv);
-}
-
-static spill_ctx_t *be_get_spill_ctx(set *sc, ir_node *to_spill, ir_node *ctx_irn) {
- spill_ctx_t templ;
-
- templ.spilled = to_spill;
- templ.user = ctx_irn;
- templ.spill = NULL;
-
- return set_insert(sc, &templ, sizeof(templ), HASH_COMBINE(HASH_PTR(to_spill), HASH_PTR(ctx_irn)));
-}
-
-static ir_node *be_spill_irn(spill_env_t *senv, ir_node *irn, ir_node *ctx_irn) {
- spill_ctx_t *ctx;
- DBG((senv->dbg, LEVEL_1, "%+F in ctx %+F\n", irn, ctx_irn));
-
- ctx = be_get_spill_ctx(senv->spill_ctxs, irn, ctx_irn);
- if(!ctx->spill) {
- const be_main_env_t *env = senv->chordal_env->birg->main_env;
- ctx->spill = be_spill(env->arch_env, irn, ctx_irn);
- }
-
- return ctx->spill;
-}
-
-/**
- * If the first usage of a phi result would be out of memory
- * there is no sense in allocating a register for it.
- * Thus we spill it and all its operands to the same spill slot.
- * Therefore the phi/dataB becomes a phi/Memory
- */
-static ir_node *be_spill_phi(spill_env_t *senv, ir_node *phi, ir_node *ctx_irn) {
- int i, n = get_irn_arity(phi);
- ir_node **ins, *bl = get_nodes_block(phi);
- ir_graph *irg = senv->chordal_env->irg;
- spill_ctx_t *ctx;
-
- assert(is_Phi(phi));
- DBG((senv->dbg, LEVEL_1, "%+F in ctx %+F\n", phi, ctx_irn));
-
- /* search an existing spill for this context */
- ctx = be_get_spill_ctx(senv->spill_ctxs, phi, ctx_irn);
-
- /* if not found spill the phi */
- if(!ctx->spill) {
- /* build a new PhiM with dummy in-array */
- ins = malloc(n * sizeof(ins[0]));
- for(i=0; i<n; ++i)
- ins[i] = new_r_Unknown(irg, mode_M);
- ctx->spill = new_r_Phi(senv->chordal_env->irg, bl, n, ins, mode_M);
- free(ins);
-
- /* re-wire the phiM */
- for(i=0; i<n; ++i) {
- ir_node *arg = get_irn_n(phi, i);
- ir_node *sub_res;
-
- if(is_Phi(arg) && pset_find_ptr(senv->mem_phis, arg))
- sub_res = be_spill_phi(senv, arg, ctx_irn);
- else
- sub_res = be_spill_irn(senv, arg, ctx_irn);
-
- set_irn_n(ctx->spill, i, sub_res);
- }
- }
- return ctx->spill;
-}
-
-static ir_node *be_spill_node(spill_env_t *senv, ir_node *to_spill) {
- ir_node *res;
- if (pset_find_ptr(senv->mem_phis, to_spill))
- res = be_spill_phi(senv, to_spill, to_spill);
- else
- res = be_spill_irn(senv, to_spill, to_spill);
-
- return res;
-}
-
-static void phi_walker(ir_node *irn, void *env) {
- spill_env_t *senv = env;
- const arch_env_t *arch = senv->chordal_env->birg->main_env->arch_env;
-
- if (is_Phi(irn) && arch_irn_has_reg_class(arch, irn, 0, senv->cls)
- && senv->is_mem_phi(irn, senv->data)) {
- DBG((senv->dbg, LEVEL_1, " %+F\n", irn));
- pset_insert_ptr(senv->mem_phis, irn);
+} be_pre_spill_env_t;
+
+static void prepare_constr_insn(be_pre_spill_env_t *env, ir_node *node)
+{
+ const arch_register_class_t *cls = env->cls;
+ ir_node *block = get_nodes_block(node);
+ const ir_graph *irg = env->irg;
+ be_irg_t *birg = be_birg_from_irg(irg);
+ be_lv_t *lv = be_get_irg_liveness(irg);
+ unsigned *tmp = NULL;
+ unsigned *def_constr = NULL;
+ int arity = get_irn_arity(node);
+ ir_node *def;
+
+ int i, i2;
+
+ /* Insert a copy for constraint inputs attached to a value which can't
+ * fulfill the constraint
+ * (typical example: stack pointer as input to copyb)
+ * TODO: This really just checks precolored registers at the moment and
+ * ignores the general case of not matching in/out constraints
+ */
+ for (i = 0; i < arity; ++i) {
+ ir_node *op = get_irn_n(node, i);
+ const arch_register_req_t *req = arch_get_irn_register_req_in(node, i);
+ const arch_register_t *reg;
+ ir_node *copy;
+
+ if (req->cls != cls)
+ continue;
+ reg = arch_get_irn_register(op);
+ if (reg == NULL)
+ continue;
+
+ /* precolored with an ignore register (which is not a joker like
+ unknown/noreg) */
+ if ((reg->type & arch_register_type_joker) ||
+ rbitset_is_set(birg->allocatable_regs, reg->global_index))
+ continue;
+
+ if (! (req->type & arch_register_req_type_limited))
+ continue;
+ if (rbitset_is_set(req->limited, reg->index))
+ continue;
+
+ copy = be_new_Copy(block, op);
+ stat_ev_int("constr_copy", 1);
+ sched_add_before(node, copy);
+ set_irn_n(node, i, copy);
+ DBG((dbg, LEVEL_3, "inserting ignore arg copy %+F for %+F pos %d\n",
+ copy, node, i));
}
-}
-void be_insert_spills_reloads(spill_env_t *senv, pset *reload_set) {
- const arch_env_t *aenv = senv->chordal_env->birg->main_env->arch_env;
- ir_graph *irg = senv->chordal_env->irg;
- ir_node *irn;
- spill_info_t *si;
- struct obstack ob;
-
- obstack_init(&ob);
-
- /* get all special spilled phis */
- DBG((senv->dbg, LEVEL_1, "Mem-phis:\n"));
- senv->mem_phis = pset_new_ptr_default();
- irg_walk_graph(senv->chordal_env->irg, phi_walker, NULL, senv);
-
- /* Add reloads for mem_phis */
- /* BETTER: These reloads (1) should only be inserted, if they are really needed */
- DBG((senv->dbg, LEVEL_1, "Reloads for mem-phis:\n"));
- for(irn = pset_first(senv->mem_phis); irn; irn = pset_next(senv->mem_phis)) {
- const ir_edge_t *e;
- DBG((senv->dbg, LEVEL_1, " Mem-phi %+F\n", irn));
- foreach_out_edge(irn, e) {
- ir_node *user = e->src;
- if (is_Phi(user) && !pset_find_ptr(senv->mem_phis, user)) {
- ir_node *use_bl = get_nodes_block(user);
- DBG((senv->dbg, LEVEL_1, " non-mem-phi user %+F\n", user));
- be_add_reload_on_edge(senv, irn, use_bl, e->pos); /* (1) */
- }
+ /* insert copies for nodes that occur constrained more than once. */
+ for (i = 0; i < arity; ++i) {
+ ir_node *in;
+ ir_node *copy;
+ const arch_register_req_t *req;
+
+ req = arch_get_irn_register_req_in(node, i);
+ if (req->cls != cls)
+ continue;
+
+ if (! (req->type & arch_register_req_type_limited))
+ continue;
+
+ in = get_irn_n(node, i);
+ if (!arch_irn_consider_in_reg_alloc(cls, in))
+ continue;
+
+ for (i2 = i + 1; i2 < arity; ++i2) {
+ ir_node *in2;
+ const arch_register_req_t *req2;
+
+ req2 = arch_get_irn_register_req_in(node, i2);
+ if (req2->cls != cls)
+ continue;
+ if (! (req2->type & arch_register_req_type_limited))
+ continue;
+
+ in2 = get_irn_n(node, i2);
+ if (in2 != in)
+ continue;
+
+ /* if the constraint is the same, no copy is necessary
+ * TODO generalise unequal but overlapping constraints */
+ if (rbitsets_equal(req->limited, req2->limited, cls->n_regs))
+ continue;
+
+ copy = be_new_Copy(block, in);
+ stat_ev_int("constr_copy", 1);
+
+ sched_add_before(node, copy);
+ set_irn_n(node, i2, copy);
+ DBG((dbg, LEVEL_3,
+ "inserting multiple constr copy %+F for %+F pos %d\n",
+ copy, node, i2));
}
}
- /* process each spilled node */
- DBG((senv->dbg, LEVEL_1, "Insert spills and reloads:\n"));
- for(si = set_first(senv->spills); si; si = set_next(senv->spills)) {
- reloader_t *rld;
- ir_node **reloads;
- int n_reloads = 0;
- ir_mode *mode = get_irn_mode(si->spilled_node);
-
- /* go through all reloads for this spill */
- for(rld = si->reloaders; rld; rld = rld->next) {
- /* the spill for this reloader */
- ir_node *spill = be_spill_node(senv, si->spilled_node);
-
- /* the reload */
- ir_node *reload = be_reload(aenv, senv->cls, rld->reloader, mode, spill);
-
- DBG((senv->dbg, LEVEL_1, " %+F of %+F before %+F\n", reload, si->spilled_node, rld->reloader));
- if(reload_set)
- pset_insert_ptr(reload_set, reload);
-
- /* remember the reload */
- obstack_ptr_grow(&ob, reload);
- n_reloads++;
+ /* collect all registers occurring in out constraints. */
+ be_foreach_definition(node, cls, def,
+ if (! (req_->type & arch_register_req_type_limited))
+ continue;
+ if (def_constr == NULL) {
+ rbitset_alloca(def_constr, cls->n_regs);
}
+ rbitset_or(def_constr, req_->limited, cls->n_regs);
+ );
- assert(n_reloads > 0);
- obstack_ptr_grow(&ob, si->spilled_node);
- reloads = obstack_finish(&ob);
- be_ssa_constr_ignore(senv->chordal_env->dom_front, n_reloads + 1, reloads, senv->mem_phis);
- obstack_free(&ob, reloads);
+ /* no output constraints => we're good */
+ if (def_constr == NULL) {
+ return;
}
- obstack_free(&ob, NULL);
-
- for(irn = pset_first(senv->mem_phis); irn; irn = pset_next(senv->mem_phis)) {
- int i, n;
- for(i = 0, n = get_irn_arity(irn); i < n; ++i)
- set_irn_n(irn, i, new_r_Bad(senv->chordal_env->irg));
- sched_remove(irn);
+ /*
+ * insert copies for all constrained arguments living through the node
+ * and being constrained to a register which also occurs in out constraints.
+ */
+ rbitset_alloca(tmp, cls->n_regs);
+ for (i = 0; i < arity; ++i) {
+ const arch_register_req_t *req;
+ ir_node *in;
+ ir_node *copy;
+
+ /*
+ * Check, if
+ * 1) the operand is constrained.
+ * 2) lives through the node.
+ * 3) is constrained to a register occurring in out constraints.
+ */
+ req = arch_get_irn_register_req_in(node, i);
+ if (req->cls != cls)
+ continue;
+ if (!(req->type & arch_register_req_type_limited))
+ continue;
+
+ in = get_irn_n(node, i);
+ if (!arch_irn_consider_in_reg_alloc(cls, in))
+ continue;
+ if (!be_values_interfere(lv, node, in))
+ continue;
+
+ rbitset_copy(tmp, req->limited, cls->n_regs);
+ rbitset_and(tmp, def_constr, cls->n_regs);
+
+ if (rbitset_is_empty(tmp, cls->n_regs))
+ continue;
+
+ /*
+ * only create the copy if the operand is no copy.
+ * this is necessary since the assure constraints phase inserts
+ * Copies and Keeps for operands which must be different from the
+ * results. Additional copies here would destroy this.
+ */
+ if (be_is_Copy(in))
+ continue;
+
+ copy = be_new_Copy(block, in);
+ sched_add_before(node, copy);
+ set_irn_n(node, i, copy);
+ DBG((dbg, LEVEL_3, "inserting constr copy %+F for %+F pos %d\n",
+ copy, node, i));
+ be_liveness_update(lv, in);
}
-
- del_pset(senv->mem_phis);
-}
-
-void be_add_reload(spill_env_t *senv, ir_node *to_spill, ir_node *before) {
- spill_info_t templ, *res;
- reloader_t *rel;
-
- templ.spilled_node = to_spill;
- templ.reloaders = NULL;
- res = set_insert(senv->spills, &templ, sizeof(templ), HASH_PTR(to_spill));
-
- rel = obstack_alloc(&senv->obst, sizeof(rel[0]));
- rel->reloader = before;
- rel->next = res->reloaders;
- res->reloaders = rel;
}
-void be_add_reload_on_edge(spill_env_t *senv, ir_node *to_spill, ir_node *bl, int pos) {
- ir_node *insert_bl = get_irn_arity(bl) == 1 ? sched_first(bl) : get_Block_cfgpred_block(bl, pos);
- be_add_reload(senv, to_spill, insert_bl);
+static void pre_spill_prepare_constr_walker(ir_node *block, void *data)
+{
+ be_pre_spill_env_t *env = (be_pre_spill_env_t*)data;
+ sched_foreach(block, node) {
+ prepare_constr_insn(env, node);
+ }
}
+void be_pre_spill_prepare_constr(ir_graph *irg,
+ const arch_register_class_t *cls)
+{
+ be_pre_spill_env_t env;
+ memset(&env, 0, sizeof(env));
+ env.irg = irg;
+ env.cls = cls;
+ be_assure_live_sets(irg);
-/****************************************
-
- SPILL SLOT MANAGEMENT AND OPTS
-
-****************************************/
-
-typedef struct _spill_slot_t {
- unsigned size;
- unsigned align;
- pset *members;
- ir_mode *largest_mode; /* the mode of all members with largest size */
-} spill_slot_t;
-
-typedef struct _ss_env_t {
- firm_dbg_module_t *dbg;
- struct obstack ob;
- be_chordal_env_t *cenv;
- pmap *slots; /* maps spill_contexts to spill_slots */
- pmap *types; /* maps modes to types */
-} ss_env_t;
-
-
-static void compute_spill_slots_walker(ir_node *spill, void *env) {
- ss_env_t *ssenv = env;
- ir_node *ctx;
- pmap_entry *entry;
- spill_slot_t *ss;
-
- if (!be_is_Spill(spill))
- return;
-
- /* check, if this spill is for a context already known */
- ctx = be_get_Spill_context(spill);
- entry = pmap_find(ssenv->slots, ctx);
-
- if (!entry) {
- /* this is a new spill context */
- ss = obstack_alloc(&ssenv->ob, sizeof(*ss));
- ss->members = pset_new_ptr(8);
- ss->largest_mode = get_irn_mode(get_irn_n(spill, 0));
- ss->size = get_mode_size_bytes(ss->largest_mode);
- ss->align = ss->size; /* TODO Assumed for now */
- pmap_insert(ssenv->slots, ctx, ss);
- } else {
- ir_node *irn;
- /* values with the same spill_ctx must go into the same spill slot */
- ss = entry->value;
- assert(ss->size == (unsigned)get_mode_size_bytes(get_irn_mode(get_irn_n(spill, 0))) && "Different sizes for the same spill slot are not allowed yet.");
- for (irn = pset_first(ss->members); irn; irn = pset_next(ss->members)) {
- /* use values_interfere here, because it uses the dominance check,
- which does work for values in memory */
- assert(!values_interfere(spill, irn) && "Spills for the same spill slot must not interfere!");
- }
- }
-
- pset_insert_ptr(ss->members, spill);
+ irg_block_walk_graph(irg, pre_spill_prepare_constr_walker, NULL, &env);
}
-static int ss_sorter(const void *v1, const void *v2) {
- const spill_slot_t *ss1 = v1;
- const spill_slot_t *ss2 = v2;
- return ((int) ss2->size) - ((int) ss1->size);
-}
-/**
- * This function should optimize the spill slots.
- * - Coalescing of multiple slots
- * - Ordering the slots
- *
- * Input slots are in @p ssenv->slots
- * @p size The count of initial spill slots in @p ssenv->slots
- * This also is the size of the preallocated array @p ass
- *
- * @return An array of spill slots @p ass in specific order
- **/
-static void optimize_slots(ss_env_t *ssenv, int size, spill_slot_t **ass) {
- int i, o, used_slots;
- pmap_entry *entr;
-
- i=0;
- pmap_foreach(ssenv->slots, entr)
- ass[i++] = entr->value;
-
- /* Sort the array to minimize fragmentation and cache footprint.
- Large slots come first */
- qsort(ass, size, sizeof(ass[0]), ss_sorter);
-
- /* For each spill slot:
- - assign a new offset to this slot
- - xor find another slot to coalesce with */
- used_slots = 0;
- for (i=0; i<size; ++i) { /* for each spill slot */
- ir_node *n1;
- int tgt_slot = -1;
-
- DBG((ssenv->dbg, LEVEL_1, "Spill slot %d members:\n", i));
- for(n1 = pset_first(ass[i]->members); n1; n1 = pset_next(ass[i]->members))
- DBG((ssenv->dbg, LEVEL_1, " %+F\n", n1));
-
-
- for (o=0; o < used_slots && tgt_slot == -1; ++o) { /* for each offset-assigned spill slot */
- /* check inter-slot-pairs for interference */
- ir_node *n2;
- for(n1 = pset_first(ass[i]->members); n1; n1 = pset_next(ass[i]->members))
- for(n2 = pset_first(ass[o]->members); n2; n2 = pset_next(ass[o]->members))
- if(values_interfere(n1, n2)) {
- pset_break(ass[i]->members);
- pset_break(ass[o]->members);
- DBG((ssenv->dbg, LEVEL_1, " Interf %+F -- %+F\n", n1, n2));
- goto interf_detected;
- }
-
- /* if we are here, there is no interference between ass[i] and ass[o] */
- tgt_slot = o;
-
-interf_detected: /*nothing*/ ;
- }
+int be_coalesce_spill_slots = 1;
+int be_do_remats = 1;
- /* now the members of ass[i] join the members of ass[tgt_slot] */
+static const lc_opt_table_entry_t be_spill_options[] = {
+ LC_OPT_ENT_BOOL ("coalesce_slots", "coalesce the spill slots", &be_coalesce_spill_slots),
+ LC_OPT_ENT_BOOL ("remat", "try to rematerialize values instead of reloading", &be_do_remats),
+ LC_OPT_LAST
+};
- /* do we need a new slot? */
- if (tgt_slot == -1) {
- tgt_slot = used_slots;
- used_slots++;
+static be_module_list_entry_t *spillers = NULL;
+static const be_spiller_t *selected_spiller = NULL;
- /* init slot */
- if (tgt_slot != i) {
- ass[tgt_slot]->size = ass[i]->size;
- del_pset(ass[tgt_slot]->members);
- ass[tgt_slot]->members = pset_new_ptr(8);
- }
- }
-
- /* copy the members to the target pset */
- /* NOTE: If src and tgt pset are the same, inserting while iterating is not allowed */
- if (tgt_slot != i)
- for(n1 = pset_first(ass[i]->members); n1; n1 = pset_next(ass[i]->members))
- pset_insert_ptr(ass[tgt_slot]->members, n1);
- }
+void be_register_spiller(const char *name, be_spiller_t *spiller)
+{
+ if (selected_spiller == NULL)
+ selected_spiller = spiller;
+ be_add_module_to_list(&spillers, name, spiller);
}
-#define ALIGN_SPILL_AREA 16
-#define pset_foreach(pset, elm) for(elm=pset_first(pset); elm; elm=pset_next(pset))
+void be_do_spill(ir_graph *irg, const arch_register_class_t *cls)
+{
+ assert(selected_spiller != NULL);
-/**
- * Returns a spill type for a mode. Keep them in a map to reduce
- * the number of types.
- */
-static ir_type *get_spill_type(pmap *types, ir_mode *mode) {
- pmap_entry *e = pmap_find(types, mode);
- ir_type *res;
-
- if (! e) {
- char buf[64];
- snprintf(buf, sizeof(buf), "spill_slot_type_%s", get_mode_name(mode));
- res = new_type_primitive(new_id_from_str(buf), mode);
- pmap_insert(types, mode, res);
- }
- else
- res = e->value;
- return res;
+ selected_spiller->spill(irg, cls);
}
-static void assign_entities(ss_env_t *ssenv, int n, spill_slot_t **ss) {
- int i, offset;
- ir_type *frame = get_irg_frame_type(ssenv->cenv->irg);
-
- /* aligning by increasing frame size */
- offset = get_type_size_bits(frame) / 8;
- offset = round_up2(offset, ALIGN_SPILL_AREA);
- set_type_size_bytes(frame, -1);
-
- /* create entities and assign offsets according to size and alignment*/
- for (i=0; i<n; ++i) {
- char buf[64];
- ident *name;
- entity *spill_ent;
- ir_node *irn;
-
- /* build entity */
- snprintf(buf, sizeof(buf), "spill_slot_%d", i);
- name = new_id_from_str(buf);
-
- spill_ent = new_entity(frame, name, get_spill_type(ssenv->types, ss[i]->largest_mode));
-
- /* align */
- offset = round_up2(offset, ss[i]->align);
- /* set */
- set_entity_offset_bytes(spill_ent, offset);
- /* next possible offset */
- offset += ss[i]->size;
-
- pset_foreach(ss[i]->members, irn)
- be_set_Spill_entity(irn, spill_ent);
- }
+BE_REGISTER_MODULE_CONSTRUCTOR(be_init_spilloptions)
+void be_init_spilloptions(void)
+{
+ lc_opt_entry_t *be_grp = lc_opt_get_grp(firm_opt_get_root(), "be");
+ lc_opt_entry_t *spill_grp = lc_opt_get_grp(be_grp, "spill");
- /* set final size of stack frame */
- set_type_size_bytes(frame, offset);
-}
+ lc_opt_add_table(spill_grp, be_spill_options);
+ be_add_module_list_opt(be_grp, "spiller", "spill algorithm",
+ &spillers, (void**) &selected_spiller);
-void be_compute_spill_offsets(be_chordal_env_t *cenv) {
- ss_env_t ssenv;
- spill_slot_t **ss;
- int ss_size;
- pmap_entry *pme;
-
- obstack_init(&ssenv.ob);
- ssenv.cenv = cenv;
- ssenv.slots = pmap_create();
- ssenv.types = pmap_create();
- ssenv.dbg = firm_dbg_register("ir.be.spillslots");
-
- /* Get initial spill slots */
- irg_walk_graph(cenv->irg, NULL, compute_spill_slots_walker, &ssenv);
-
- /* Build an empty array for optimized spill slots */
- ss_size = pmap_count(ssenv.slots);
- ss = obstack_alloc(&ssenv.ob, ss_size * sizeof(*ss));
- optimize_slots(&ssenv, ss_size, ss);
-
- /* Integrate slots into the stack frame entity */
- assign_entities(&ssenv, ss_size, ss);
-
- /* Clean up */
- pmap_foreach(ssenv.slots, pme)
- del_pset(((spill_slot_t *)pme->value)->members);
- pmap_destroy(ssenv.slots);
- pmap_destroy(ssenv.types);
- obstack_free(&ssenv.ob, NULL);
-
- be_copy_entities_to_reloads(cenv->irg);
+ FIRM_DBG_REGISTER(dbg, "firm.be.spillprepare");
}