+typedef struct {
+ int n_outs;
+ const arch_register_class_t *cls;
+ be_reg_data_t *reg_data;
+} be_node_attr_t;
+
+typedef struct {
+ be_node_attr_t node_attr;
+ ir_node *spill_ctx; /**< The node in whose context this spill was introduced. */
+ entity *ent; /**< The entity in the stack frame the spill writes to. */
+} be_spill_attr_t;
+
+static ir_op *op_Spill;
+static ir_op *op_Reload;
+static ir_op *op_Perm;
+static ir_op *op_Copy;
+static ir_op *op_Keep;
+
+static int beo_base = -1;
+
+static const ir_op_ops be_node_op_ops;
+
+#define N irop_flag_none
+#define L irop_flag_labeled
+#define C irop_flag_commutative
+#define X irop_flag_cfopcode
+#define I irop_flag_ip_cfopcode
+#define F irop_flag_fragile
+#define Y irop_flag_forking
+#define H irop_flag_highlevel
+#define c irop_flag_constlike
+#define K irop_flag_keep
+
+void be_node_init(void) {
+ static int inited = 0;
+
+ if(inited)
+ return;
+
+ inited = 1;
+
+ /* Acquire all needed opcodes. */
+ beo_base = get_next_ir_opcodes(beo_Last - 1);
+
+ op_Spill = new_ir_op(beo_base + beo_Spill, "Spill", op_pin_state_mem_pinned, N, oparity_unary, 0, sizeof(be_spill_attr_t), &be_node_op_ops);
+ op_Reload = new_ir_op(beo_base + beo_Reload, "Reload", op_pin_state_mem_pinned, N, oparity_zero, 0, sizeof(be_node_attr_t), &be_node_op_ops);
+ op_Perm = new_ir_op(beo_base + beo_Perm, "Perm", op_pin_state_pinned, N, oparity_variable, 0, sizeof(be_node_attr_t), &be_node_op_ops);
+ op_Copy = new_ir_op(beo_base + beo_Copy, "Copy", op_pin_state_pinned, N, oparity_unary, 0, sizeof(be_node_attr_t), &be_node_op_ops);
+ op_Keep = new_ir_op(beo_base + beo_Keep, "Keep", op_pin_state_pinned, K, oparity_variable, 0, sizeof(be_node_attr_t), &be_node_op_ops);
+
+ set_op_tag(op_Spill, &be_node_tag);
+ set_op_tag(op_Reload, &be_node_tag);
+ set_op_tag(op_Perm, &be_node_tag);
+ set_op_tag(op_Copy, &be_node_tag);
+ set_op_tag(op_Keep, &be_node_tag);
+}
+
+static void *init_node_attr(ir_node* irn, const arch_register_class_t *cls, ir_graph *irg, int n_outs)
+{
+ be_node_attr_t *a = get_irn_attr(irn);
+
+ a->n_outs = n_outs;
+ a->cls = cls;
+ a->reg_data = NULL;
+
+ if(n_outs > 0) {
+ int i;
+
+ a->reg_data = NEW_ARR_D(be_reg_data_t, get_irg_obstack(irg), n_outs);
+ memset(a->reg_data, 0, n_outs * sizeof(a->reg_data[0]));
+ for(i = 0; i < n_outs; ++i) {
+ a->reg_data[i].req.req.cls = cls;
+ a->reg_data[i].req.req.type = arch_register_req_type_normal;
+ }
+ }
+
+ return a;
+}
+
+static INLINE int is_be_node(const ir_node *irn)
+{
+ return get_op_tag(get_irn_op(irn)) == &be_node_tag;
+}
+
+be_opcode_t get_irn_be_opcode(const ir_node *irn)
+{
+ return is_be_node(irn) ? get_irn_opcode(irn) - beo_base : beo_NoBeOp;
+}
+
+ir_node *be_new_Spill(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, ir_node *to_spill, ir_node *ctx)
+{
+ be_spill_attr_t *a;
+ ir_node *in[1];
+ ir_node *res;
+
+ in[0] = to_spill;
+ res = new_ir_node(NULL, irg, bl, op_Spill, mode_M, 1, in);
+ a = init_node_attr(res, cls, irg, 0);
+ a->ent = NULL;
+ a->spill_ctx = ctx;
+ return res;
+}
+
+ir_node *be_new_Reload(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, ir_mode *mode, ir_node *mem)
+{
+ ir_node *in[1];
+ ir_node *res;