+ir_entity* be_get_MemPerm_in_entity(const ir_node* irn, int n)
+{
+ be_memperm_attr_t *attr = get_irn_attr(irn);
+
+ assert(be_is_MemPerm(irn));
+ assert(n < be_get_MemPerm_entity_arity(irn));
+
+ return attr->in_entities[n];
+}
+
+void be_set_MemPerm_out_entity(const ir_node *irn, int n, ir_entity *ent)
+{
+ be_memperm_attr_t *attr = get_irn_attr(irn);
+
+ assert(be_is_MemPerm(irn));
+ assert(n < be_get_MemPerm_entity_arity(irn));
+
+ attr->out_entities[n] = ent;
+}
+
+ir_entity* be_get_MemPerm_out_entity(const ir_node* irn, int n)
+{
+ be_memperm_attr_t *attr = get_irn_attr(irn);
+
+ assert(be_is_MemPerm(irn));
+ assert(n < be_get_MemPerm_entity_arity(irn));
+
+ return attr->out_entities[n];
+}
+
+int be_get_MemPerm_entity_arity(const ir_node *irn)
+{
+ return get_irn_arity(irn) - 1;
+}
+
+void be_set_constr_single_reg(ir_node *node, int pos, const arch_register_t *reg)
+{
+ arch_register_req_t *req = get_req(node, pos);
+ const arch_register_class_t *cls = arch_register_get_class(reg);
+ ir_graph *irg = get_irn_irg(node);
+ struct obstack *obst = get_irg_obstack(irg);
+ unsigned *limited_bitset;
+
+ assert(req->cls == NULL || req->cls == cls);
+ assert(! (req->type & arch_register_req_type_limited));
+ assert(req->limited == NULL);
+
+ limited_bitset = rbitset_obstack_alloc(obst, arch_register_class_n_regs(cls));
+ rbitset_set(limited_bitset, arch_register_get_index(reg));
+
+ req->cls = cls;
+ req->type |= arch_register_req_type_limited;
+ req->limited = limited_bitset;
+}
+
+void be_set_constr_limited(ir_node *node, int pos, const arch_register_req_t *req)
+{
+ ir_graph *irg = get_irn_irg(node);
+ struct obstack *obst = get_irg_obstack(irg);
+ arch_register_req_t *r = get_req(node, pos);
+
+ assert(arch_register_req_is(req, limited));
+ assert(! (req->type & (arch_register_req_type_should_be_same | arch_register_req_type_should_be_different)));
+ memcpy(r, req, sizeof(r[0]));
+ r->limited = rbitset_duplicate_obstack_alloc(obst, req->limited, req->cls->n_regs);
+}
+
+void be_node_set_flags(ir_node *irn, int pos, arch_irn_flags_t flags)
+{
+ be_req_t *bereq = get_be_req(irn, pos);
+ bereq->flags = flags;
+}
+
+void be_node_set_reg_class(ir_node *irn, int pos, const arch_register_class_t *cls)
+{
+ arch_register_req_t *req = get_req(irn, pos);
+
+ req->cls = cls;
+
+ if (cls == NULL) {
+ req->type = arch_register_req_type_none;
+ } else if (req->type == arch_register_req_type_none) {
+ req->type = arch_register_req_type_normal;
+ }
+}
+
+void be_node_set_req_type(ir_node *irn, int pos, arch_register_req_type_t type)
+{
+ arch_register_req_t *req = get_req(irn, pos);
+ req->type = type;
+}
+
+ir_node *be_get_IncSP_pred(ir_node *irn) {
+ assert(be_is_IncSP(irn));
+ return get_irn_n(irn, 0);
+}
+
+void be_set_IncSP_pred(ir_node *incsp, ir_node *pred) {
+ assert(be_is_IncSP(incsp));
+ set_irn_n(incsp, 0, pred);
+}
+
+ir_node *be_get_IncSP_mem(ir_node *irn) {
+ assert(be_is_IncSP(irn));
+ return get_irn_n(irn, 1);
+}
+
+void be_set_IncSP_offset(ir_node *irn, int offset)
+{
+ be_stack_attr_t *a = get_irn_attr(irn);
+ assert(be_is_IncSP(irn));
+ a->offset = offset;
+}
+
+int be_get_IncSP_offset(const ir_node *irn)
+{
+ be_stack_attr_t *a = get_irn_attr(irn);
+ assert(be_is_IncSP(irn));
+ return a->offset;
+}
+
+ir_node *be_spill(const arch_env_t *arch_env, ir_node *irn)
+{
+ ir_node *bl = get_nodes_block(irn);
+ ir_graph *irg = get_irn_irg(bl);
+ ir_node *frame = get_irg_frame(irg);
+ const arch_register_class_t *cls = arch_get_irn_reg_class(arch_env, irn, -1);
+ const arch_register_class_t *cls_frame = arch_get_irn_reg_class(arch_env, frame, -1);
+ ir_node *spill;
+
+ spill = be_new_Spill(cls, cls_frame, irg, bl, frame, irn);
+ return spill;
+}
+
+ir_node *be_reload(const arch_env_t *arch_env, const arch_register_class_t *cls, ir_node *insert, ir_mode *mode, ir_node *spill)
+{
+ ir_node *reload;
+ ir_node *bl = is_Block(insert) ? insert : get_nodes_block(insert);
+ ir_graph *irg = get_irn_irg(bl);
+ ir_node *frame = get_irg_frame(irg);
+ const arch_register_class_t *cls_frame = arch_get_irn_reg_class(arch_env, frame, -1);
+
+ assert(be_is_Spill(spill) || (is_Phi(spill) && get_irn_mode(spill) == mode_M));
+
+ reload = be_new_Reload(cls, cls_frame, irg, bl, frame, spill, mode);
+
+ if (is_Block(insert)) {
+ insert = sched_skip(insert, 0, sched_skip_cf_predicator, (void *) arch_env);
+ sched_add_after(insert, reload);
+ } else {
+ sched_add_before(insert, reload);
+ }
+
+ return reload;
+}
+
+/*
+ ____ ____
+ | _ \ ___ __ _ | _ \ ___ __ _ ___
+ | |_) / _ \/ _` | | |_) / _ \/ _` / __|
+ | _ < __/ (_| | | _ < __/ (_| \__ \
+ |_| \_\___|\__, | |_| \_\___|\__, |___/
+ |___/ |_|
+
+*/
+
+
+static const
+arch_register_req_t *get_out_reg_req(const ir_node *irn, int out_pos)
+{
+ const be_node_attr_t *a = get_irn_attr(irn);
+
+ if(out_pos >= ARR_LEN(a->reg_data)) {
+ return arch_no_register_req;
+ }
+
+ return &a->reg_data[out_pos].req.req;
+}
+
+static const
+arch_register_req_t *get_in_reg_req(const ir_node *irn, int pos)
+{
+ const be_node_attr_t *a = get_irn_attr(irn);
+
+ if(pos >= get_irn_arity(irn) || pos >= ARR_LEN(a->reg_data))
+ return arch_no_register_req;
+
+ return &a->reg_data[pos].in_req.req;
+}
+
+static const arch_register_req_t *
+be_node_get_irn_reg_req(const void *self, const ir_node *irn, int pos)
+{
+ int out_pos = pos;
+
+ (void) self;
+ if (pos < 0) {
+ if (get_irn_mode(irn) == mode_T)
+ return arch_no_register_req;
+
+ out_pos = redir_proj((const ir_node **)&irn);
+ assert(is_be_node(irn));
+ return get_out_reg_req(irn, out_pos);
+ } else if (is_be_node(irn)) {
+ /*
+ * For spills and reloads, we return "none" as requirement for frame
+ * pointer, so every input is ok. Some backends need this (e.g. STA).
+ */
+ if ((be_is_Spill(irn) && pos == be_pos_Spill_frame) ||
+ (be_is_Reload(irn) && pos == be_pos_Reload_frame))
+ return arch_no_register_req;
+
+ return get_in_reg_req(irn, pos);
+ }
+
+ return arch_no_register_req;
+}
+
+const arch_register_t *
+be_node_get_irn_reg(const void *self, const ir_node *irn)