+static void be_limited(void *data, bitset_t *bs)
+{
+ be_req_t *req = data;
+
+ switch(req->kind) {
+ case be_req_kind_negate_old_limited:
+ case be_req_kind_old_limited:
+ req->x.old_limited.old_limited(req->x.old_limited.old_limited_env, bs);
+ if(req->kind == be_req_kind_negate_old_limited)
+ bitset_flip_all(bs);
+ break;
+ case be_req_kind_single_reg:
+ bitset_clear_all(bs);
+ bitset_set(bs, req->x.single_reg->index);
+ break;
+ }
+}
+
+static INLINE be_req_t *get_req(ir_node *irn, int pos)
+{
+ int idx = pos < 0 ? -(pos + 1) : pos;
+ be_node_attr_t *a = get_irn_attr(irn);
+ be_reg_data_t *rd = &a->reg_data[idx];
+ be_req_t *r = pos < 0 ? &rd->req : &rd->in_req;
+
+ assert(is_be_node(irn));
+ assert(!(pos >= 0) || pos < get_irn_arity(irn));
+ assert(!(pos < 0) || -(pos + 1) <= a->max_reg_data);
+
+ return r;
+}
+
+void be_set_constr_single_reg(ir_node *irn, int pos, const arch_register_t *reg)
+{
+ be_req_t *r = get_req(irn, pos);
+
+ r->kind = be_req_kind_single_reg;
+ r->x.single_reg = reg;
+ r->req.limited = be_limited;
+ r->req.limited_env = r;
+ r->req.type = arch_register_req_type_limited;
+ r->req.cls = reg->reg_class;
+}
+
+void be_set_constr_limited(ir_node *irn, int pos, const arch_register_req_t *req)
+{
+ be_req_t *r = get_req(irn, pos);
+
+ assert(arch_register_req_is(req, limited));
+
+ r->kind = be_req_kind_old_limited;
+ r->req.limited = be_limited;
+ r->req.limited_env = r;
+ r->req.type = arch_register_req_type_limited;
+ r->req.cls = req->cls;
+
+ r->x.old_limited.old_limited = req->limited;
+ r->x.old_limited.old_limited_env = req->limited_env;
+}
+
+void be_node_set_flags(ir_node *irn, int pos, arch_irn_flags_t flags)
+{
+ be_req_t *r = get_req(irn, pos);
+ r->flags = flags;
+}
+
+void be_node_set_reg_class(ir_node *irn, int pos, const arch_register_class_t *cls)
+{
+ be_req_t *r = get_req(irn, pos);
+ r->req.cls = cls;
+ if(r->req.type == arch_register_req_type_none)
+ r->req.type = arch_register_req_type_normal;
+}
+
+void be_node_set_req_type(ir_node *irn, int pos, arch_register_req_type_t type)
+{
+ be_req_t *r = get_req(irn, pos);
+ r->req.type = type;
+}
+
+void be_set_IncSP_offset(ir_node *irn, unsigned offset)
+{
+ be_stack_attr_t *a = get_irn_attr(irn);
+ assert(be_is_IncSP(irn));
+ a->offset = offset;
+}
+
+unsigned be_get_IncSP_offset(const ir_node *irn)
+{
+ be_stack_attr_t *a = get_irn_attr(irn);
+ assert(be_is_IncSP(irn));
+ return a->offset;
+}
+
+void be_set_IncSP_direction(ir_node *irn, be_stack_dir_t dir)
+{
+ be_stack_attr_t *a = get_irn_attr(irn);
+ assert(be_is_IncSP(irn));
+ a->dir = dir;
+}
+
+be_stack_dir_t be_get_IncSP_direction(const ir_node *irn)
+{
+ be_stack_attr_t *a = get_irn_attr(irn);
+ assert(be_is_IncSP(irn));
+ return a->dir;
+}
+
+void be_set_Spill_entity(ir_node *irn, entity *ent)
+{
+ be_spill_attr_t *a = get_irn_attr(irn);
+ assert(be_is_Spill(irn));
+ a->frame_attr.ent = ent;
+}
+
+static ir_node *find_a_spill_walker(ir_node *irn, unsigned visited_nr)
+{
+ unsigned nr = get_irn_visited(irn);
+
+ set_irn_visited(irn, visited_nr);
+
+ if(is_Phi(irn)) {
+ int i, n;
+ if(nr < visited_nr) {
+ for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
+ ir_node *n = find_a_spill_walker(get_irn_n(irn, i), visited_nr);
+ if(n != NULL)
+ return n;
+ }
+ }
+ }
+
+ else if(be_get_irn_opcode(irn) == beo_Spill)
+ return irn;
+
+ return NULL;
+}
+
+ir_node *be_get_Spill_context(const ir_node *irn) {
+ const be_spill_attr_t *a = get_irn_attr(irn);
+ assert(be_is_Spill(irn));
+ return a->spill_ctx;
+}
+
+/**
+ * Finds a spill for a reload.
+ * If the reload is directly using the spill, this is simple,
+ * else we perform DFS from the reload (over all PhiMs) and return
+ * the first spill node we find.
+ */
+static INLINE ir_node *find_a_spill(const ir_node *irn)
+{
+ ir_graph *irg = get_irn_irg(irn);
+ unsigned visited_nr = get_irg_visited(irg) + 1;
+
+ assert(be_is_Reload(irn));
+ set_irg_visited(irg, visited_nr);
+ return find_a_spill_walker(be_get_Reload_mem(irn), visited_nr);
+}
+
+entity *be_get_spill_entity(const ir_node *irn)
+{
+ switch(be_get_irn_opcode(irn)) {
+ case beo_Reload:
+ {
+ ir_node *spill = find_a_spill(irn);
+ return be_get_spill_entity(spill);
+ }
+ case beo_Spill:
+ {
+ be_spill_attr_t *a = get_irn_attr(irn);
+ return a->frame_attr.ent;
+ }
+ default:
+ assert(0 && "Must give spill/reload node");
+ break;
+ }
+
+ return NULL;
+}
+
+static void link_reload_walker(ir_node *irn, void *data)
+{
+ ir_node **root = (ir_node **) data;
+ if(be_is_Reload(irn)) {
+ set_irn_link(irn, *root);
+ *root = irn;
+ }
+}
+
+void be_copy_entities_to_reloads(ir_graph *irg)
+{
+ ir_node *irn = NULL;
+ irg_walk_graph(irg, link_reload_walker, NULL, (void *) &irn);
+
+ while(irn) {
+ be_frame_attr_t *a = get_irn_attr(irn);
+ entity *ent = be_get_spill_entity(irn);
+ a->ent = ent;
+ irn = get_irn_link(irn);
+ }
+}
+
+ir_node *be_spill(const arch_env_t *arch_env, ir_node *irn, ir_node *ctx)
+{
+ ir_node *bl = get_nodes_block(irn);
+ ir_graph *irg = get_irn_irg(bl);
+ ir_node *frame = get_irg_frame(irg);
+ ir_node *insert = bl;
+ ir_node *spill;
+
+ const arch_register_class_t *cls = arch_get_irn_reg_class(arch_env, irn, -1);
+ const arch_register_class_t *cls_frame = arch_get_irn_reg_class(arch_env, frame, -1);
+
+ spill = be_new_Spill(cls, cls_frame, irg, bl, frame, irn, ctx);