+ be_frame_attr_t *a;
+ ir_node *irn;
+ ir_node *in[3];
+
+ in[0] = mem;
+ in[1] = frame;
+ in[2] = data;
+ irn = new_ir_node(NULL, irg, bl, op_be_FrameStore, mode_T, 3, in);
+ a = init_node_attr(irn, 3);
+ a->ent = ent;
+ a->offset = 0;
+ be_node_set_reg_class(irn, 1, cls_frame);
+ be_node_set_reg_class(irn, 2, cls_data);
+ return irn;
+}
+
+ir_node *be_new_FrameAddr(const arch_register_class_t *cls_frame, ir_graph *irg, ir_node *bl, ir_node *frame, ir_entity *ent)
+{
+ be_frame_attr_t *a;
+ ir_node *irn;
+ ir_node *in[1];
+
+ in[0] = frame;
+ irn = new_ir_node(NULL, irg, bl, op_be_FrameAddr, get_irn_mode(frame), 1, in);
+ a = init_node_attr(irn, 1);
+ a->ent = ent;
+ a->offset = 0;
+ be_node_set_reg_class(irn, 0, cls_frame);
+ be_node_set_reg_class(irn, OUT_POS(0), cls_frame);
+
+ return optimize_node(irn);
+}
+
+ir_node *be_new_CopyKeep(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, ir_node *src, int n, ir_node *in_keep[], ir_mode *mode)
+{
+ ir_node *irn;
+ ir_node **in = (ir_node **) alloca((n + 1) * sizeof(in[0]));
+
+ in[0] = src;
+ memcpy(&in[1], in_keep, n * sizeof(in[0]));
+ irn = new_ir_node(NULL, irg, bl, op_be_CopyKeep, mode, n + 1, in);
+ init_node_attr(irn, n + 1);
+ be_node_set_reg_class(irn, OUT_POS(0), cls);
+ be_node_set_reg_class(irn, 0, cls);
+
+ return irn;
+}
+
+ir_node *be_new_CopyKeep_single(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, ir_node *src, ir_node *keep, ir_mode *mode)
+{
+ ir_node *in[1];
+
+ in[0] = keep;
+ return be_new_CopyKeep(cls, irg, bl, src, 1, in, mode);
+}
+
+ir_node *be_get_CopyKeep_op(const ir_node *cpy) {
+ return get_irn_n(cpy, be_pos_CopyKeep_op);
+}
+
+void be_set_CopyKeep_op(ir_node *cpy, ir_node *op) {
+ set_irn_n(cpy, be_pos_CopyKeep_op, op);
+}
+
+ir_node *be_new_Barrier(ir_graph *irg, ir_node *bl, int n, ir_node *in[])
+{
+ ir_node *res;
+ int i;
+
+ res = new_ir_node(NULL, irg, bl, op_be_Barrier, mode_T, -1, NULL);
+ init_node_attr(res, -1);
+ for(i = 0; i < n; ++i) {
+ add_irn_n(res, in[i]);
+ add_register_req(res);
+ }
+
+ return res;
+}
+
+ir_node *be_Barrier_append_node(ir_node *barrier, ir_node *node)
+{
+ ir_graph *irg = get_irn_irg(barrier);
+ ir_node *block = get_nodes_block(barrier);
+ ir_mode *mode = get_irn_mode(node);
+ int n = add_irn_n(barrier, node);
+ ir_node *proj = new_r_Proj(irg, block, barrier, mode, n);
+ add_register_req(barrier);
+
+ return proj;
+}
+
+int be_is_Spill (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_Spill ; }
+int be_is_Reload (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_Reload ; }
+int be_is_Copy (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_Copy ; }
+int be_is_CopyKeep (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_CopyKeep ; }
+int be_is_Perm (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_Perm ; }
+int be_is_MemPerm (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_MemPerm ; }
+int be_is_Keep (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_Keep ; }
+int be_is_Call (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_Call ; }
+int be_is_Return (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_Return ; }
+int be_is_IncSP (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_IncSP ; }
+int be_is_SetSP (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_SetSP ; }
+int be_is_AddSP (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_AddSP ; }
+int be_is_SubSP (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_SubSP ; }
+int be_is_RegParams (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_RegParams ; }
+int be_is_StackParam (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_StackParam ; }
+int be_is_FrameAddr (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_FrameAddr ; }
+int be_is_FrameLoad (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_FrameLoad ; }
+int be_is_FrameStore (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_FrameStore ; }
+int be_is_Barrier (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_Barrier ; }
+
+int be_has_frame_entity(const ir_node *irn)
+{
+ switch(be_get_irn_opcode(irn)) {
+ case beo_StackParam:
+ case beo_Spill:
+ case beo_Reload:
+ case beo_FrameStore:
+ case beo_FrameLoad:
+ case beo_FrameAddr:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+ir_entity *be_get_frame_entity(const ir_node *irn)
+{
+ if (be_has_frame_entity(irn)) {
+ be_frame_attr_t *a = get_irn_attr(irn);
+ return a->ent;
+ }
+ return NULL;
+}
+
+int be_get_frame_offset(const ir_node *irn)
+{
+ assert(is_be_node(irn));
+ if (be_has_frame_entity(irn)) {
+ be_frame_attr_t *a = get_irn_attr(irn);
+ return a->offset;
+ }
+ return 0;
+}
+
+void be_set_MemPerm_in_entity(const ir_node *irn, int n, ir_entity *ent)
+{
+ be_memperm_attr_t *attr = get_irn_attr(irn);
+
+ assert(be_is_MemPerm(irn));
+ assert(n < be_get_MemPerm_entity_arity(irn));
+
+ attr->in_entities[n] = ent;
+}
+
+ir_entity* be_get_MemPerm_in_entity(const ir_node* irn, int n)
+{
+ be_memperm_attr_t *attr = get_irn_attr(irn);
+
+ assert(be_is_MemPerm(irn));
+ assert(n < be_get_MemPerm_entity_arity(irn));
+
+ return attr->in_entities[n];
+}
+
+void be_set_MemPerm_out_entity(const ir_node *irn, int n, ir_entity *ent)
+{
+ be_memperm_attr_t *attr = get_irn_attr(irn);
+
+ assert(be_is_MemPerm(irn));
+ assert(n < be_get_MemPerm_entity_arity(irn));
+
+ attr->out_entities[n] = ent;
+}
+
+ir_entity* be_get_MemPerm_out_entity(const ir_node* irn, int n)
+{
+ be_memperm_attr_t *attr = get_irn_attr(irn);
+
+ assert(be_is_MemPerm(irn));
+ assert(n < be_get_MemPerm_entity_arity(irn));
+
+ return attr->out_entities[n];
+}
+
+int be_get_MemPerm_entity_arity(const ir_node *irn)
+{
+ return get_irn_arity(irn) - 1;
+}
+
+static void be_limited(void *data, bitset_t *bs)
+{
+ be_req_t *req = data;
+
+ switch(req->kind) {
+ case be_req_kind_negate_old_limited:
+ case be_req_kind_old_limited:
+ req->x.old_limited.old_limited(req->x.old_limited.old_limited_env, bs);
+ if(req->kind == be_req_kind_negate_old_limited)
+ bitset_flip_all(bs);
+ break;
+ case be_req_kind_single_reg:
+ bitset_clear_all(bs);
+ bitset_set(bs, req->x.single_reg->index);
+ break;
+ }
+}
+
+static INLINE be_req_t *get_req(ir_node *irn, int pos)
+{
+ int idx = pos < 0 ? -(pos + 1) : pos;
+ be_node_attr_t *a = get_irn_attr(irn);
+ be_reg_data_t *rd = &a->reg_data[idx];
+ be_req_t *r = pos < 0 ? &rd->req : &rd->in_req;
+
+ assert(is_be_node(irn));
+ assert(!(pos >= 0) || pos < get_irn_arity(irn));
+ assert(!(pos < 0) || -(pos + 1) <= ARR_LEN(a->reg_data));
+
+ return r;
+}
+
+void be_set_constr_single_reg(ir_node *irn, int pos, const arch_register_t *reg)
+{
+ be_req_t *r = get_req(irn, pos);
+
+ r->kind = be_req_kind_single_reg;
+ r->x.single_reg = reg;
+ r->req.limited = be_limited;
+ r->req.limited_env = r;
+ r->req.type = arch_register_req_type_limited;
+ r->req.cls = reg->reg_class;
+}
+
+void be_set_constr_limited(ir_node *irn, int pos, const arch_register_req_t *req)
+{
+ be_req_t *r = get_req(irn, pos);
+
+ assert(arch_register_req_is(req, limited));
+
+ r->kind = be_req_kind_old_limited;
+ r->req.limited = be_limited;
+ r->req.limited_env = r;
+ r->req.type = arch_register_req_type_limited;
+ r->req.cls = req->cls;
+
+ r->x.old_limited.old_limited = req->limited;
+ r->x.old_limited.old_limited_env = req->limited_env;
+}
+
+void be_node_set_flags(ir_node *irn, int pos, arch_irn_flags_t flags)
+{
+ be_req_t *r = get_req(irn, pos);
+ r->flags = flags;
+}
+
+void be_node_set_reg_class(ir_node *irn, int pos, const arch_register_class_t *cls)
+{
+ be_req_t *r = get_req(irn, pos);
+
+ r->req.cls = cls;
+
+ if (cls == NULL) {
+ r->req.type = arch_register_req_type_none;
+ } else if (r->req.type == arch_register_req_type_none) {
+ r->req.type = arch_register_req_type_normal;
+ }
+}
+
+void be_node_set_req_type(ir_node *irn, int pos, arch_register_req_type_t type)
+{
+ be_req_t *r = get_req(irn, pos);
+ r->req.type = type;
+}
+
+ir_node *be_get_IncSP_pred(ir_node *irn) {
+ assert(be_is_IncSP(irn));
+ return get_irn_n(irn, 0);
+}
+
+void be_set_IncSP_pred(ir_node *incsp, ir_node *pred) {
+ assert(be_is_IncSP(incsp));
+ set_irn_n(incsp, 0, pred);
+}
+
+ir_node *be_get_IncSP_mem(ir_node *irn) {
+ assert(be_is_IncSP(irn));
+ return get_irn_n(irn, 1);
+}
+
+void be_set_IncSP_offset(ir_node *irn, int offset)
+{
+ be_stack_attr_t *a = get_irn_attr(irn);
+ assert(be_is_IncSP(irn));
+ a->offset = offset;
+}
+
+int be_get_IncSP_offset(const ir_node *irn)
+{
+ be_stack_attr_t *a = get_irn_attr(irn);
+ assert(be_is_IncSP(irn));
+ return a->offset;
+}
+
+ir_node *be_spill(const arch_env_t *arch_env, ir_node *irn)
+{
+ ir_node *bl = get_nodes_block(irn);
+ ir_graph *irg = get_irn_irg(bl);
+ ir_node *frame = get_irg_frame(irg);
+ const arch_register_class_t *cls = arch_get_irn_reg_class(arch_env, irn, -1);
+ const arch_register_class_t *cls_frame = arch_get_irn_reg_class(arch_env, frame, -1);
+ ir_node *spill;
+
+ spill = be_new_Spill(cls, cls_frame, irg, bl, frame, irn);
+ return spill;
+}
+
+ir_node *be_reload(const arch_env_t *arch_env, const arch_register_class_t *cls, ir_node *insert, ir_mode *mode, ir_node *spill)
+{
+ ir_node *reload;
+ ir_node *bl = is_Block(insert) ? insert : get_nodes_block(insert);
+ ir_graph *irg = get_irn_irg(bl);
+ ir_node *frame = get_irg_frame(irg);
+ const arch_register_class_t *cls_frame = arch_get_irn_reg_class(arch_env, frame, -1);
+
+ assert(be_is_Spill(spill) || (is_Phi(spill) && get_irn_mode(spill) == mode_M));
+
+ reload = be_new_Reload(cls, cls_frame, irg, bl, frame, spill, mode);
+
+ if (is_Block(insert)) {
+ insert = sched_skip(insert, 0, sched_skip_cf_predicator, (void *) arch_env);
+ sched_add_after(insert, reload);
+ } else {
+ sched_add_before(insert, reload);
+ }
+
+ return reload;
+}
+
+/*
+ ____ ____
+ | _ \ ___ __ _ | _ \ ___ __ _ ___
+ | |_) / _ \/ _` | | |_) / _ \/ _` / __|
+ | _ < __/ (_| | | _ < __/ (_| \__ \
+ |_| \_\___|\__, | |_| \_\___|\__, |___/
+ |___/ |_|
+
+*/
+
+
+static void *put_out_reg_req(arch_register_req_t *req, const ir_node *irn, int out_pos)
+{
+ const be_node_attr_t *a = get_irn_attr(irn);
+
+ if(out_pos < ARR_LEN(a->reg_data)) {
+ memcpy(req, &a->reg_data[out_pos].req, sizeof(req[0]));
+
+ if(be_is_Copy(irn)) {
+ req->type |= arch_register_req_type_should_be_same;
+ req->other_same = be_get_Copy_op(irn);
+ }
+ } else {
+ req->type = arch_register_req_type_none;
+ req->cls = NULL;
+ }
+
+ return req;
+}
+
+static void *put_in_reg_req(arch_register_req_t *req, const ir_node *irn, int pos)
+{
+ const be_node_attr_t *a = get_irn_attr(irn);
+
+ if(pos < get_irn_arity(irn) && pos < ARR_LEN(a->reg_data)) {
+ memcpy(req, &a->reg_data[pos].in_req, sizeof(req[0]));
+ } else {
+ req->type = arch_register_req_type_none;
+ req->cls = NULL;
+ }
+
+ return req;
+}
+
+static const arch_register_req_t *
+be_node_get_irn_reg_req(const void *self, arch_register_req_t *req, const ir_node *irn, int pos)
+{
+ int out_pos = pos;
+
+ if (pos < 0) {
+ if (get_irn_mode(irn) == mode_T)
+ return NULL;
+
+ out_pos = redir_proj((const ir_node **)&irn);
+ assert(is_be_node(irn));
+ return put_out_reg_req(req, irn, out_pos);
+ }
+
+ else {
+ if (is_be_node(irn)) {
+ /*
+ For spills and reloads, we return "none" as requirement for frame pointer,
+ so every input is ok. Some backends need this (e.g. STA). We use an arbitrary
+ large number as pos, so put_in_reg_req will return "none" as requirement.
+ */
+ if ((be_is_Spill(irn) && pos == be_pos_Spill_frame) ||
+ (be_is_Reload(irn) && pos == be_pos_Reload_frame))
+ return put_in_reg_req(req, irn, INT_MAX);
+ else
+ return put_in_reg_req(req, irn, pos);
+ }
+ return NULL;
+ }
+
+ return req;
+}
+
+const arch_register_t *
+be_node_get_irn_reg(const void *_self, const ir_node *irn)
+{
+ be_reg_data_t *r = retrieve_reg_data(irn);
+ return r ? r->reg : NULL;
+}
+
+static arch_irn_class_t be_node_classify(const void *_self, const ir_node *irn)
+{
+ redir_proj((const ir_node **) &irn);
+
+ switch(be_get_irn_opcode(irn)) {
+#define XXX(a,b) case beo_ ## a: return arch_irn_class_ ## b
+ XXX(Spill, spill);
+ XXX(Reload, reload);
+ XXX(Perm, perm);
+ XXX(Copy, copy);
+ XXX(Return, branch);
+ XXX(StackParam, stackparam);
+#undef XXX
+ default:
+ return arch_irn_class_normal;
+ }
+