added get_next_ir_opcodes() to allow allocation of cosecutive opcodes
[libfirm] / ir / be / benode.c
index b1c6f59..a60e01a 100644 (file)
@@ -59,9 +59,16 @@ typedef struct {
        int *pos;
 } be_op_t;
 
+typedef struct {
+       arch_register_req_t req;
+       unsigned            negate_limited : 1;
+       void                (*old_limited)(void *ptr, bitset_t *bs);
+       void                *old_limited_env;
+} be_req_t;
+
 typedef struct {
        const arch_register_t *reg;
-       arch_register_req_t   req;
+       be_req_t              req;
 } be_reg_data_t;
 
 typedef struct {
@@ -73,8 +80,7 @@ typedef struct {
 typedef struct {
        be_node_attr_t node_attr;
        ir_node *spill_ctx;  /**< The node in whose context this spill was introduced. */
-       unsigned offset;     /**< The offset of the memory location the spill writes to
-                                                  in the spill area. */
+       entity *ent;     /**< The entity in the stack frame the spill writes to. */
 } be_spill_attr_t;
 
 static ir_op *op_Spill;
@@ -87,26 +93,33 @@ static int beo_base = -1;
 
 static const ir_op_ops be_node_op_ops;
 
+#define N   irop_flag_none
+#define L   irop_flag_labeled
+#define C   irop_flag_commutative
+#define X   irop_flag_cfopcode
+#define I   irop_flag_ip_cfopcode
+#define F   irop_flag_fragile
+#define Y   irop_flag_forking
+#define H   irop_flag_highlevel
+#define c   irop_flag_constlike
+#define K   irop_flag_keep
+
 void be_node_init(void) {
        static int inited = 0;
-       int i;
 
        if(inited)
                return;
 
        inited = 1;
 
-       beo_base = get_next_ir_opcode();
+       /* Acquire all needed opcodes. */
+       beo_base = get_next_ir_opcodes(beo_Last - 1);
 
-       /* Acquire all needed opcodes. We assume that they are consecutive! */
-       for(i = beo_Spill; i < beo_Last; ++i)
-               get_next_ir_opcode();
-
-       op_Spill  = new_ir_op(beo_base + beo_Spill,  "Spill",  op_pin_state_mem_pinned, 0, oparity_unary,    0, sizeof(be_spill_attr_t), &be_node_op_ops);
-       op_Reload = new_ir_op(beo_base + beo_Reload, "Reload", op_pin_state_mem_pinned, 0, oparity_zero,     0, sizeof(be_node_attr_t),  &be_node_op_ops);
-       op_Perm   = new_ir_op(beo_base + beo_Perm,   "Perm",   op_pin_state_pinned,     0, oparity_variable, 0, sizeof(be_node_attr_t),  &be_node_op_ops);
-       op_Copy   = new_ir_op(beo_base + beo_Copy,   "Copy",   op_pin_state_pinned,     0, oparity_unary,    0, sizeof(be_node_attr_t),  &be_node_op_ops);
-       op_Keep   = new_ir_op(beo_base + beo_Keep,   "Keep",   op_pin_state_pinned,     0, oparity_variable, 0, sizeof(be_node_attr_t),  &be_node_op_ops);
+       op_Spill  = new_ir_op(beo_base + beo_Spill,  "Spill",  op_pin_state_mem_pinned, N, oparity_unary,    0, sizeof(be_spill_attr_t), &be_node_op_ops);
+       op_Reload = new_ir_op(beo_base + beo_Reload, "Reload", op_pin_state_mem_pinned, N, oparity_zero,     0, sizeof(be_node_attr_t),  &be_node_op_ops);
+       op_Perm   = new_ir_op(beo_base + beo_Perm,   "Perm",   op_pin_state_pinned,     N, oparity_variable, 0, sizeof(be_node_attr_t),  &be_node_op_ops);
+       op_Copy   = new_ir_op(beo_base + beo_Copy,   "Copy",   op_pin_state_pinned,     N, oparity_unary,    0, sizeof(be_node_attr_t),  &be_node_op_ops);
+       op_Keep   = new_ir_op(beo_base + beo_Keep,   "Keep",   op_pin_state_pinned,     K, oparity_variable, 0, sizeof(be_node_attr_t),  &be_node_op_ops);
 
        set_op_tag(op_Spill,  &be_node_tag);
        set_op_tag(op_Reload, &be_node_tag);
@@ -129,8 +142,8 @@ static void *init_node_attr(ir_node* irn, const arch_register_class_t *cls, ir_g
                a->reg_data = NEW_ARR_D(be_reg_data_t, get_irg_obstack(irg), n_outs);
                memset(a->reg_data, 0, n_outs * sizeof(a->reg_data[0]));
                for(i = 0; i < n_outs; ++i) {
-                       a->reg_data[i].req.cls  = cls;
-                       a->reg_data[i].req.type = arch_register_req_type_normal;
+                       a->reg_data[i].req.req.cls  = cls;
+                       a->reg_data[i].req.req.type = arch_register_req_type_normal;
                }
        }
 
@@ -156,7 +169,7 @@ ir_node *be_new_Spill(const arch_register_class_t *cls, ir_graph *irg, ir_node *
        in[0] = to_spill;
        res   = new_ir_node(NULL, irg, bl, op_Spill, mode_M, 1, in);
        a     = init_node_attr(res, cls, irg, 0);
-       a->offset    = BE_SPILL_NO_OFFSET;
+       a->ent       = NULL;
        a->spill_ctx = ctx;
        return res;
 }
@@ -225,20 +238,38 @@ int be_is_Keep(const ir_node *irn)
        return get_irn_be_opcode(irn) == beo_Keep;
 }
 
-void be_set_Perm_out_req(ir_node *irn, int pos, const arch_register_req_t *req)
+static void be_limited(void *data, bitset_t *bs)
 {
-       be_node_attr_t *a = get_irn_attr(irn);
+       be_req_t *req = data;
+
+       req->old_limited(req->old_limited_env, bs);
+       if(req->negate_limited)
+               bitset_flip_all(bs);
+}
+
+void be_set_Perm_out_req(ir_node *irn, int pos, const arch_register_req_t *req, unsigned negate_limited)
+{
+       be_node_attr_t *a   = get_irn_attr(irn);
+       be_req_t       *r = &a->reg_data[pos].req;
 
        assert(be_is_Perm(irn));
        assert(pos >= 0 && pos < get_irn_arity(irn));
-       memcpy(&a->reg_data[pos].req, req, sizeof(req[0]));
+       memcpy(&r->req, req, sizeof(req[0]));
+
+       if(arch_register_req_is(req, limited)) {
+               r->old_limited     = r->req.limited;
+               r->old_limited_env = r->req.limited_env;
+               r->req.limited     = be_limited;
+               r->req.limited_env = r;
+               r->negate_limited  = negate_limited;
+       }
 }
 
-void be_set_Spill_offset(ir_node *irn, unsigned offset)
+void be_set_Spill_entity(ir_node *irn, entity *ent)
 {
        be_spill_attr_t *a = get_irn_attr(irn);
        assert(be_is_Spill(irn));
-       a->offset = offset;
+       a->ent = ent;
 }
 
 static ir_node *find_a_spill_walker(ir_node *irn, unsigned visited_nr)
@@ -284,24 +315,23 @@ static INLINE ir_node *find_a_spill(ir_node *irn)
        return find_a_spill_walker(irn, visited_nr);
 }
 
-
-unsigned be_get_spill_offset(ir_node *irn)
+entity *be_get_spill_entity(ir_node *irn)
 {
        int opc           = get_irn_opcode(irn);
 
        switch(get_irn_be_opcode(irn)) {
        case beo_Reload:
-               return be_get_spill_offset(find_a_spill(irn));
+               return be_get_spill_entity(find_a_spill(irn));
        case beo_Spill:
                {
                        be_spill_attr_t *a = get_irn_attr(irn);
-                       return a->offset;
+                       return a->ent;
                }
        default:
                assert(0 && "Must give spill/reload node");
        }
 
-       return (unsigned) -1;
+       return NULL;
 }
 
 ir_node *be_spill(const arch_env_t *arch_env, ir_node *irn, ir_node *ctx)
@@ -518,8 +548,9 @@ static int dump_node(ir_node *irn, FILE *f, dump_reason_t reason)
 
                        if(get_irn_be_opcode(irn) == beo_Spill) {
                                be_spill_attr_t *a = (be_spill_attr_t *) at;
+                               unsigned ofs = get_entity_offset_bytes(a->ent);
                                ir_fprintf(f, "spill context: %+F\n", a->spill_ctx);
-                               ir_fprintf(f, "spill offset: %04x (%u)\n", a->offset, a->offset);
+                               ir_fprintf(f, "spill entity: %+F offset %x (%d)\n", a->ent, ofs, ofs);
                        }
                        break;
        }