+ insert = sched_next(irn);
+ while((is_Phi(insert) || is_Proj(insert)) && !sched_is_end(insert))
+ insert = sched_next(insert);
+
+ /*
+ * Here's one special case:
+ * If the spill is in the start block, the spill must be after the frame
+ * pointer is set up. This is checked here and fixed.
+ * If the insertion point is already the block, everything is fine, since
+ * the Spill gets inserted at the end of the block.
+ */
+ if(bl == get_irg_start_block(irg) && insert != bl && sched_comes_after(insert, frame))
+ insert = sched_next(frame);
+
+ sched_add_before(insert, spill);
+ return spill;
+}
+
+ir_node *be_reload(const arch_env_t *arch_env, const arch_register_class_t *cls, ir_node *reloader, ir_mode *mode, ir_node *spill)
+{
+ ir_node *reload;
+
+ ir_node *bl = is_Block(reloader) ? reloader : get_nodes_block(reloader);
+ ir_graph *irg = get_irn_irg(bl);
+ ir_node *frame = get_irg_frame(irg);
+ const arch_register_class_t *cls_frame = arch_get_irn_reg_class(arch_env, frame, -1);
+
+ assert(be_is_Spill(spill) || (is_Phi(spill) && get_irn_mode(spill) == mode_M));
+
+ reload = be_new_Reload(cls, cls_frame, irg, bl, frame, spill, mode);
+
+ sched_add_before(reloader, reload);
+ return reload;
+}
+
+static void *put_out_reg_req(arch_register_req_t *req, const ir_node *irn, int out_pos)
+{
+ const be_node_attr_t *a = get_irn_attr(irn);
+
+ if(out_pos < a->max_reg_data)
+ memcpy(req, &a->reg_data[out_pos].req, sizeof(req[0]));
+ else {
+ req->type = arch_register_req_type_none;
+ req->cls = NULL;
+ }
+
+ return req;
+}
+
+static void *put_in_reg_req(arch_register_req_t *req, const ir_node *irn, int pos)
+{
+ const be_node_attr_t *a = get_irn_attr(irn);
+ int n = get_irn_arity(irn);
+
+ if(pos < get_irn_arity(irn) && pos < a->max_reg_data)
+ memcpy(req, &a->reg_data[pos].in_req, sizeof(req[0]));
+ else {
+ req->type = arch_register_req_type_none;
+ req->cls = NULL;
+ }
+
+ return req;
+}
+
+static const arch_register_req_t *
+be_node_get_irn_reg_req(const void *self, arch_register_req_t *req, const ir_node *irn, int pos)
+{
+ int out_pos = pos;
+
+ if(pos < 0) {
+ if(get_irn_mode(irn) == mode_T)
+ return NULL;
+
+ out_pos = redir_proj((const ir_node **) &irn, pos);
+ assert(is_be_node(irn));
+ return put_out_reg_req(req, irn, out_pos);
+ }
+
+ else {
+ return is_be_node(irn) ? put_in_reg_req(req, irn, pos) : NULL;
+ }
+
+ return req;
+}
+
+const arch_register_t *
+be_node_get_irn_reg(const void *_self, const ir_node *irn)
+{
+ int out_pos;
+ be_node_attr_t *a;
+
+ out_pos = redir_proj((const ir_node **) &irn, -1);
+ a = get_irn_attr(irn);
+
+ assert(is_be_node(irn));
+ assert(out_pos < a->max_reg_data && "position too high");
+
+ return a->reg_data[out_pos].reg;
+}
+
+static arch_irn_class_t be_node_classify(const void *_self, const ir_node *irn)
+{
+ redir_proj((const ir_node **) &irn, -1);
+
+ switch(be_get_irn_opcode(irn)) {
+#define XXX(a,b) case beo_ ## a: return arch_irn_class_ ## b;
+ XXX(Spill, spill)
+ XXX(Reload, reload)
+ XXX(Perm, perm)
+ XXX(Copy, copy)
+#undef XXX
+ default:
+ return 0;
+ }
+
+ return 0;
+}
+
+static arch_irn_flags_t be_node_get_flags(const void *_self, const ir_node *irn)
+{
+ int out_pos;
+ be_node_attr_t *a;
+
+ out_pos = redir_proj((const ir_node **) &irn, -1);
+ a = get_irn_attr(irn);
+
+ assert(is_be_node(irn));
+ assert(out_pos < a->max_reg_data && "position too high");
+
+ return a->reg_data[out_pos].req.flags;
+}
+
+static entity *be_node_get_frame_entity(const void *self, const ir_node *irn)
+{
+ return be_get_frame_entity(irn);
+}
+
+static void be_node_set_frame_offset(const void *self, ir_node *irn, int offset)
+{
+ if(be_has_frame_entity(irn)) {
+ be_frame_attr_t *a = get_irn_attr(irn);
+ a->offset = offset;
+ }
+}
+
+
+static const arch_irn_ops_if_t be_node_irn_ops_if = {
+ be_node_get_irn_reg_req,
+ be_node_set_irn_reg,
+ be_node_get_irn_reg,
+ be_node_classify,
+ be_node_get_flags,
+ be_node_get_frame_entity,
+ be_node_set_frame_offset
+};
+
+static const arch_irn_ops_t be_node_irn_ops = {
+ &be_node_irn_ops_if
+};
+
+const void *be_node_get_arch_ops(const arch_irn_handler_t *self, const ir_node *irn)
+{
+ redir_proj((const ir_node **) &irn, -1);
+ return is_be_node(irn) ? &be_node_irn_ops : NULL;
+}
+
+const arch_irn_handler_t be_node_irn_handler = {
+ be_node_get_arch_ops
+};
+
+
+static void dump_node_req(FILE *f, be_req_t *req)
+{
+ unsigned i;
+ int did_something = 0;
+ const char *suffix = "";
+
+ if(req->flags != arch_irn_flags_none) {
+ fprintf(f, "flags: ");
+ for(i = arch_irn_flags_none; i <= log2_ceil(arch_irn_flags_last); ++i) {
+ if(req->flags & (1 << i)) {
+ fprintf(f, "%s%s", suffix, arch_irn_flag_str(1 << i));
+ suffix = "|";
+ }
+ }
+ suffix = ", ";
+ did_something = 1;
+ }
+
+ if(req->req.cls != 0) {
+ char tmp[256];
+ fprintf(f, suffix);
+ arch_register_req_format(tmp, sizeof(tmp), &req->req);
+ fprintf(f, "%s", tmp);
+ did_something = 1;
+ }
+
+ if(did_something)
+ fprintf(f, "\n");
+}
+
+static void dump_node_reqs(FILE *f, ir_node *irn)
+{
+ int i;
+ be_node_attr_t *a = get_irn_attr(irn);
+
+ fprintf(f, "registers: \n");
+ for(i = 0; i < a->max_reg_data; ++i) {
+ be_reg_data_t *rd = &a->reg_data[i];
+ if(rd->reg)
+ fprintf(f, "#%d: %s\n", i, rd->reg->name);
+ }
+
+ fprintf(f, "in requirements\n");
+ for(i = 0; i < a->max_reg_data; ++i) {
+ dump_node_req(f, &a->reg_data[i].in_req);
+ }
+
+ fprintf(f, "\nout requirements\n");
+ for(i = 0; i < a->max_reg_data; ++i) {
+ dump_node_req(f, &a->reg_data[i].req);
+ }
+}
+
+static int dump_node(ir_node *irn, FILE *f, dump_reason_t reason)
+{
+ be_node_attr_t *at = get_irn_attr(irn);
+
+ assert(is_be_node(irn));
+
+ switch(reason) {
+ case dump_node_opcode_txt:
+ fprintf(f, get_op_name(get_irn_op(irn)));
+ break;
+ case dump_node_mode_txt:
+ fprintf(f, get_mode_name(get_irn_mode(irn)));
+ break;
+ case dump_node_nodeattr_txt:
+ break;
+ case dump_node_info_txt:
+ dump_node_reqs(f, irn);
+
+ if(be_has_frame_entity(irn)) {
+ be_frame_attr_t *a = (be_frame_attr_t *) at;
+ if (a->ent)
+ ir_fprintf(f, "frame entity: %+F offset %x (%d)\n", a->ent, a->offset, a->offset);
+
+ }
+
+ switch(be_get_irn_opcode(irn)) {
+ case beo_Spill:
+ {
+ be_spill_attr_t *a = (be_spill_attr_t *) at;
+ ir_fprintf(f, "spill context: %+F\n", a->spill_ctx);
+ }
+ break;
+
+ case beo_IncSP:
+ {
+ be_stack_attr_t *a = (be_stack_attr_t *) at;
+ fprintf(f, "offset: %u\n", a->offset);
+ fprintf(f, "direction: %s\n", a->dir == be_stack_dir_along ? "along" : "against");
+ }
+ break;
+ }
+
+ }
+
+ return 0;
+}
+
+/**
+ * Copies the backend specific attributes from old node to new node.
+ */
+static void copy_attr(const ir_node *old_node, ir_node *new_node)
+{
+ be_node_attr_t *old_attr = get_irn_attr(old_node);
+ be_node_attr_t *new_attr = get_irn_attr(new_node);
+ int i;
+
+ assert(is_be_node(old_node));
+ assert(is_be_node(new_node));
+
+ memcpy(new_attr, old_attr, get_op_attr_size(get_irn_op(old_node)));
+ new_attr->reg_data = NULL;
+
+ if(new_attr->max_reg_data > 0) {
+ new_attr->reg_data = NEW_ARR_D(be_reg_data_t, get_irg_obstack(get_irn_irg(new_node)), new_attr->max_reg_data);
+ memcpy(new_attr->reg_data, old_attr->reg_data, new_attr->max_reg_data * sizeof(be_reg_data_t));
+
+ for(i = 0; i < old_attr->max_reg_data; ++i) {
+ be_req_t *r;
+
+ r = &new_attr->reg_data[i].req;
+ r->req.limited_env = r;
+
+ r = &new_attr->reg_data[i].in_req;
+ r->req.limited_env = r;
+ }
+ }
+}
+
+static const ir_op_ops be_node_op_ops = {
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ copy_attr,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ dump_node,
+ NULL
+};
+
+pset *nodes_live_at(const arch_env_t *arch_env, const arch_register_class_t *cls, const ir_node *pos, pset *live)
+{
+ firm_dbg_module_t *dbg = firm_dbg_register("firm.be.node");
+ const ir_node *bl = is_Block(pos) ? pos : get_nodes_block(pos);
+ ir_node *irn;
+ irn_live_t *li;
+
+ live_foreach(bl, li) {
+ ir_node *irn = (ir_node *) li->irn;
+ if(live_is_end(li) && arch_irn_consider_in_reg_alloc(arch_env, cls, irn))
+ pset_insert_ptr(live, irn);
+ }
+
+ sched_foreach_reverse(bl, irn) {
+ int i, n;
+ ir_node *x;
+
+ /*
+ * If we encounter the node we want to insert the Perm after,
+ * exit immediately, so that this node is still live
+ */
+ if(irn == pos)
+ return live;
+
+ DBG((dbg, LEVEL_1, "%+F\n", irn));
+ for(x = pset_first(live); x; x = pset_next(live))
+ DBG((dbg, LEVEL_1, "\tlive: %+F\n", x));
+
+ if(arch_irn_consider_in_reg_alloc(arch_env, cls, irn))
+ pset_remove_ptr(live, irn);
+
+ for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
+ ir_node *op = get_irn_n(irn, i);
+
+ if(arch_irn_consider_in_reg_alloc(arch_env, cls, op))
+ pset_insert_ptr(live, op);
+ }
+ }
+
+ return live;
+}
+
+ir_node *insert_Perm_after(const arch_env_t *arch_env,
+ const arch_register_class_t *cls,
+ dom_front_info_t *dom_front,
+ ir_node *pos)
+{
+ ir_node *bl = is_Block(pos) ? pos : get_nodes_block(pos);
+ ir_graph *irg = get_irn_irg(bl);
+ pset *live = pset_new_ptr_default();
+ firm_dbg_module_t *dbg = firm_dbg_register("be.node");
+
+ ir_node *curr, *irn, *perm, **nodes;
+ int i, n;
+
+ DBG((dbg, LEVEL_1, "Insert Perm after: %+F\n", pos));
+
+ if(!nodes_live_at(arch_env, cls, pos, live));
+
+ n = pset_count(live);
+
+ if(n == 0)
+ return NULL;
+
+ nodes = malloc(n * sizeof(nodes[0]));
+
+ DBG((dbg, LEVEL_1, "live:\n"));
+ for(irn = pset_first(live), i = 0; irn; irn = pset_next(live), i++) {
+ DBG((dbg, LEVEL_1, "\t%+F\n", irn));
+ nodes[i] = irn;
+ }
+
+ perm = be_new_Perm(cls, irg, bl, n, nodes);
+ sched_add_after(pos, perm);
+ free(nodes);
+
+ curr = perm;
+ for(i = 0; i < n; ++i) {
+ ir_node *copies[2];
+ ir_node *perm_op = get_irn_n(perm, i);
+ const arch_register_t *reg = arch_get_irn_register(arch_env, perm_op);
+
+ ir_mode *mode = get_irn_mode(perm_op);
+ ir_node *proj = new_r_Proj(irg, bl, perm, mode, i);
+ arch_set_irn_register(arch_env, proj, reg);
+
+ sched_add_after(curr, proj);
+ curr = proj;
+
+ copies[0] = perm_op;
+ copies[1] = proj;
+ be_ssa_constr(dom_front, 2, copies);
+ }
+ return perm;