* @brief Spill module selection; Preparation steps
* @author Matthias Braun
* @date 29.09.2005
- * @version $Id$
*/
#include "config.h"
DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
typedef struct be_pre_spill_env_t {
- be_irg_t *birg;
+ ir_graph *irg;
const arch_register_class_t *cls;
} be_pre_spill_env_t;
{
const arch_register_class_t *cls = env->cls;
ir_node *block = get_nodes_block(node);
- const be_irg_t *birg = env->birg;
- be_lv_t *lv = birg->lv;
+ const ir_graph *irg = env->irg;
+ be_irg_t *birg = be_birg_from_irg(irg);
+ be_lv_t *lv = be_get_irg_liveness(irg);
unsigned *tmp = NULL;
unsigned *def_constr = NULL;
int arity = get_irn_arity(node);
+ ir_node *def;
int i, i2;
/* Insert a copy for constraint inputs attached to a value which can't
- * fullfil the constraint
+ * fulfill the constraint
* (typical example: stack pointer as input to copyb)
* TODO: This really just checks precolored registers at the moment and
- * ignore the general case of not matching in/out constraints
+ * ignores the general case of not matching in/out constraints
*/
for (i = 0; i < arity; ++i) {
- ir_node *op = get_irn_n(node, i);
- ir_node *copy;
- const arch_register_t *reg;
- const arch_register_req_t *req;
+ ir_node *op = get_irn_n(node, i);
+ const arch_register_req_t *req = arch_get_irn_register_req_in(node, i);
+ const arch_register_t *reg;
+ ir_node *copy;
- req = arch_get_register_req(node, i);
if (req->cls != cls)
continue;
reg = arch_get_irn_register(op);
/* precolored with an ignore register (which is not a joker like
unknown/noreg) */
- if (arch_register_type_is(reg, joker)
- || !arch_register_type_is(reg, ignore))
+ if ((reg->type & arch_register_type_joker) ||
+ rbitset_is_set(birg->allocatable_regs, reg->global_index))
continue;
if (! (req->type & arch_register_req_type_limited))
if (rbitset_is_set(req->limited, reg->index))
continue;
- copy = be_new_Copy(cls, block, op);
+ copy = be_new_Copy(block, op);
stat_ev_int("constr_copy", 1);
sched_add_before(node, copy);
set_irn_n(node, i, copy);
- DBG((dbg, LEVEL_3, "inserting ignore arg copy %+F for %+F pos %d\n", copy, node, i));
+ DBG((dbg, LEVEL_3, "inserting ignore arg copy %+F for %+F pos %d\n",
+ copy, node, i));
}
/* insert copies for nodes that occur constrained more than once. */
ir_node *copy;
const arch_register_req_t *req;
- req = arch_get_register_req(node, i);
+ req = arch_get_irn_register_req_in(node, i);
if (req->cls != cls)
continue;
ir_node *in2;
const arch_register_req_t *req2;
- req2 = arch_get_register_req(node, i2);
+ req2 = arch_get_irn_register_req_in(node, i2);
if (req2->cls != cls)
continue;
if (! (req2->type & arch_register_req_type_limited))
/* if the constraint is the same, no copy is necessary
* TODO generalise unequal but overlapping constraints */
- if (rbitset_equal(req->limited, req2->limited, cls->n_regs))
+ if (rbitsets_equal(req->limited, req2->limited, cls->n_regs))
continue;
- copy = be_new_Copy(cls, block, in);
+ copy = be_new_Copy(block, in);
stat_ev_int("constr_copy", 1);
sched_add_before(node, copy);
}
/* collect all registers occurring in out constraints. */
- if (get_irn_mode(node) == mode_T) {
- const ir_edge_t *edge;
-
- foreach_out_edge(node, edge) {
- ir_node *proj = get_edge_src_irn(edge);
- const arch_register_req_t *req = arch_get_register_req_out(proj);
- if (! (req->type & arch_register_req_type_limited))
- continue;
-
- if (def_constr == NULL) {
- rbitset_alloca(def_constr, cls->n_regs);
- }
- rbitset_or(def_constr, req->limited, cls->n_regs);
- }
- } else {
- const arch_register_req_t *req = arch_get_register_req_out(node);
- if (req->type & arch_register_req_type_limited) {
+ be_foreach_definition(node, cls, def,
+ if (! (req_->type & arch_register_req_type_limited))
+ continue;
+ if (def_constr == NULL) {
rbitset_alloca(def_constr, cls->n_regs);
- rbitset_or(def_constr, req->limited, cls->n_regs);
}
- }
+ rbitset_or(def_constr, req_->limited, cls->n_regs);
+ );
/* no output constraints => we're good */
if (def_constr == NULL) {
* 2) lives through the node.
* 3) is constrained to a register occurring in out constraints.
*/
- req = arch_get_register_req(node, i);
+ req = arch_get_irn_register_req_in(node, i);
if (req->cls != cls)
continue;
if (!(req->type & arch_register_req_type_limited))
if (be_is_Copy(in))
continue;
- copy = be_new_Copy(cls, block, in);
+ copy = be_new_Copy(block, in);
sched_add_before(node, copy);
set_irn_n(node, i, copy);
DBG((dbg, LEVEL_3, "inserting constr copy %+F for %+F pos %d\n",
static void pre_spill_prepare_constr_walker(ir_node *block, void *data)
{
- be_pre_spill_env_t *env = data;
- ir_node *node;
+ be_pre_spill_env_t *env = (be_pre_spill_env_t*)data;
sched_foreach(block, node) {
prepare_constr_insn(env, node);
}
}
-void be_pre_spill_prepare_constr(be_irg_t *birg,
+void be_pre_spill_prepare_constr(ir_graph *irg,
const arch_register_class_t *cls)
{
- ir_graph *irg = birg->irg;
be_pre_spill_env_t env;
memset(&env, 0, sizeof(env));
- env.birg = birg;
- env.cls = cls;
+ env.irg = irg;
+ env.cls = cls;
- be_assure_liveness(birg);
+ be_assure_live_sets(irg);
irg_block_walk_graph(irg, pre_spill_prepare_constr_walker, NULL, &env);
}
be_add_module_to_list(&spillers, name, spiller);
}
-void be_do_spill(be_irg_t *birg, const arch_register_class_t *cls)
+void be_do_spill(ir_graph *irg, const arch_register_class_t *cls)
{
assert(selected_spiller != NULL);
- selected_spiller->spill(birg, cls);
+ selected_spiller->spill(irg, cls);
}
-BE_REGISTER_MODULE_CONSTRUCTOR(be_init_spilloptions);
+BE_REGISTER_MODULE_CONSTRUCTOR(be_init_spilloptions)
void be_init_spilloptions(void)
{
lc_opt_entry_t *be_grp = lc_opt_get_grp(firm_opt_get_root(), "be");
lc_opt_entry_t *spill_grp = lc_opt_get_grp(be_grp, "spill");
lc_opt_add_table(spill_grp, be_spill_options);
- be_add_module_list_opt(spill_grp, "spiller", "spill algorithm",
+ be_add_module_list_opt(be_grp, "spiller", "spill algorithm",
&spillers, (void**) &selected_spiller);
FIRM_DBG_REGISTER(dbg, "firm.be.spillprepare");