+static
+spill_info_t *get_spillinfo(const spill_env_t *env, ir_node *value)
+{
+ spill_info_t info, *res;
+ int hash = nodeset_hash(value);
+
+ info.to_spill = value;
+ res = set_find(env->spills, &info, sizeof(info), hash);
+
+ if (res == NULL) {
+ info.reloaders = NULL;
+ info.spill = NULL;
+ info.old_spill = NULL;
+ info.reload_cls = NULL;
+ res = set_insert(env->spills, &info, sizeof(info), hash);
+ }
+
+ return res;
+}
+
+spill_env_t *be_new_spill_env(be_irg_t *birg)
+{
+ const arch_env_t *arch_env = birg->main_env->arch_env;
+
+ spill_env_t *env = xmalloc(sizeof(env[0]));
+ env->spills = new_set(cmp_spillinfo, 1024);
+ env->irg = be_get_birg_irg(birg);
+ env->birg = birg;
+ env->arch_env = arch_env;
+ ir_nodeset_init(&env->mem_phis);
+ env->spill_cost = arch_env->isa->spill_cost;
+ env->reload_cost = arch_env->isa->reload_cost;
+ env->exec_freq = be_get_birg_exec_freq(birg);
+ obstack_init(&env->obst);
+
+#ifdef FIRM_STATISTICS
+ env->spill_count = 0;
+ env->reload_count = 0;
+ env->remat_count = 0;
+ env->spilled_phi_count = 0;
+#endif
+
+ return env;
+}
+
+void be_delete_spill_env(spill_env_t *env)
+{
+ del_set(env->spills);
+ ir_nodeset_destroy(&env->mem_phis);
+ obstack_free(&env->obst, NULL);
+ free(env);
+}
+
+/*
+ * ____ _ ____ _ _
+ * | _ \| | __ _ ___ ___ | _ \ ___| | ___ __ _ __| |___
+ * | |_) | |/ _` |/ __/ _ \ | |_) / _ \ |/ _ \ / _` |/ _` / __|
+ * | __/| | (_| | (_| __/ | _ < __/ | (_) | (_| | (_| \__ \
+ * |_| |_|\__,_|\___\___| |_| \_\___|_|\___/ \__,_|\__,_|___/
+ *
+ */
+
+void be_add_remat(spill_env_t *env, ir_node *to_spill, ir_node *before,
+ ir_node *rematted_node)
+{
+ spill_info_t *spill_info;
+ reloader_t *reloader;
+
+ spill_info = get_spillinfo(env, to_spill);
+
+ /* add the remat information */
+ reloader = obstack_alloc(&env->obst, sizeof(reloader[0]));
+ reloader->next = spill_info->reloaders;
+ reloader->reloader = before;
+ reloader->rematted_node = rematted_node;
+ reloader->remat_cost_delta = 0; /* We will never have a cost win over a
+ reload since we're not even allowed to
+ create a reload */
+
+ spill_info->reloaders = reloader;
+
+ DBG((dbg, LEVEL_1, "creating spillinfo for %+F, will be rematerialized before %+F\n",
+ to_spill, before));
+}
+
+void be_add_reload(spill_env_t *env, ir_node *to_spill, ir_node *before,
+ const arch_register_class_t *reload_cls, int allow_remat)
+{
+ spill_info_t *info;
+ reloader_t *rel;
+
+ info = get_spillinfo(env, to_spill);
+
+ if (is_Phi(to_spill)) {
+ int i, arity;
+
+ /* create spillinfos for the phi arguments */
+ for (i = 0, arity = get_irn_arity(to_spill); i < arity; ++i) {
+ ir_node *arg = get_irn_n(to_spill, i);
+ get_spillinfo(env, arg);
+ }
+
+#if 1
+ /* hackery... sometimes the morgan algo spilled the value of a phi,
+ * the belady algo decides later to spill the whole phi, then sees the
+ * spill node and adds a reload for that spill node, problem is the
+ * reload gets attach to that same spill (and is totally unnecessary)
+ */
+ if (info->old_spill != NULL &&
+ (before == info->old_spill || value_dominates(before, info->old_spill)))
+ {
+ printf("spilledphi hack was needed...\n");
+ before = sched_next(info->old_spill);
+ }
+#endif
+ }
+
+ assert(!is_Proj(before) && !be_is_Keep(before));
+
+ /* put reload into list */
+ rel = obstack_alloc(&env->obst, sizeof(rel[0]));
+ rel->next = info->reloaders;
+ rel->reloader = before;
+ rel->rematted_node = NULL;
+ if(!allow_remat) {
+ rel->remat_cost_delta = REMAT_COST_INFINITE;
+ } else {
+ rel->remat_cost_delta = 0;
+ }
+
+ info->reloaders = rel;
+ assert(info->reload_cls == NULL || info->reload_cls == reload_cls);
+ info->reload_cls = reload_cls;
+
+ DBG((dbg, LEVEL_1, "creating spillinfo for %+F, will be reloaded before %+F, may%s be rematerialized\n",
+ to_spill, before, allow_remat ? "" : " not"));
+}
+
+ir_node *be_get_end_of_block_insertion_point(const ir_node *block)
+{
+ ir_node *last = sched_last(block);
+
+ /* we might have projs and keepanys behind the jump... */
+ while(is_Proj(last) || be_is_Keep(last)) {
+ last = sched_prev(last);
+ assert(!sched_is_end(last));
+ }
+
+ if(!is_cfop(last)) {
+ last = sched_next(last);
+ /* last node must be a cfop, only exception is the start block */
+ assert(last == get_irg_start_block(get_irn_irg(block)));
+ }
+
+ /* add the reload before the (cond-)jump */
+ return last;