X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fbe%2Fbenode.c;h=a60e01a18c79f084f29734f082fff5ff4550e9dc;hb=f804d333c7b5459c3c1a6bfc188ecdc54346be73;hp=b4a0dcdf54f10f05b3559878a6aa7da5df8fb872;hpb=891cdb488b4bf5224867aa8ef6b28007c432057a;p=libfirm diff --git a/ir/be/benode.c b/ir/be/benode.c index b4a0dcdf5..a60e01a18 100644 --- a/ir/be/benode.c +++ b/ir/be/benode.c @@ -59,9 +59,16 @@ typedef struct { int *pos; } be_op_t; +typedef struct { + arch_register_req_t req; + unsigned negate_limited : 1; + void (*old_limited)(void *ptr, bitset_t *bs); + void *old_limited_env; +} be_req_t; + typedef struct { const arch_register_t *reg; - arch_register_req_t req; + be_req_t req; } be_reg_data_t; typedef struct { @@ -86,26 +93,33 @@ static int beo_base = -1; static const ir_op_ops be_node_op_ops; +#define N irop_flag_none +#define L irop_flag_labeled +#define C irop_flag_commutative +#define X irop_flag_cfopcode +#define I irop_flag_ip_cfopcode +#define F irop_flag_fragile +#define Y irop_flag_forking +#define H irop_flag_highlevel +#define c irop_flag_constlike +#define K irop_flag_keep + void be_node_init(void) { static int inited = 0; - int i; if(inited) return; inited = 1; - beo_base = get_next_ir_opcode(); - - /* Acquire all needed opcodes. We assume that they are consecutive! */ - for(i = beo_Spill; i < beo_Last; ++i) - get_next_ir_opcode(); + /* Acquire all needed opcodes. */ + beo_base = get_next_ir_opcodes(beo_Last - 1); - op_Spill = new_ir_op(beo_base + beo_Spill, "Spill", op_pin_state_mem_pinned, 0, oparity_unary, 0, sizeof(be_spill_attr_t), &be_node_op_ops); - op_Reload = new_ir_op(beo_base + beo_Reload, "Reload", op_pin_state_mem_pinned, 0, oparity_zero, 0, sizeof(be_node_attr_t), &be_node_op_ops); - op_Perm = new_ir_op(beo_base + beo_Perm, "Perm", op_pin_state_pinned, 0, oparity_variable, 0, sizeof(be_node_attr_t), &be_node_op_ops); - op_Copy = new_ir_op(beo_base + beo_Copy, "Copy", op_pin_state_pinned, 0, oparity_unary, 0, sizeof(be_node_attr_t), &be_node_op_ops); - op_Keep = new_ir_op(beo_base + beo_Keep, "Keep", op_pin_state_pinned, 0, oparity_variable, 0, sizeof(be_node_attr_t), &be_node_op_ops); + op_Spill = new_ir_op(beo_base + beo_Spill, "Spill", op_pin_state_mem_pinned, N, oparity_unary, 0, sizeof(be_spill_attr_t), &be_node_op_ops); + op_Reload = new_ir_op(beo_base + beo_Reload, "Reload", op_pin_state_mem_pinned, N, oparity_zero, 0, sizeof(be_node_attr_t), &be_node_op_ops); + op_Perm = new_ir_op(beo_base + beo_Perm, "Perm", op_pin_state_pinned, N, oparity_variable, 0, sizeof(be_node_attr_t), &be_node_op_ops); + op_Copy = new_ir_op(beo_base + beo_Copy, "Copy", op_pin_state_pinned, N, oparity_unary, 0, sizeof(be_node_attr_t), &be_node_op_ops); + op_Keep = new_ir_op(beo_base + beo_Keep, "Keep", op_pin_state_pinned, K, oparity_variable, 0, sizeof(be_node_attr_t), &be_node_op_ops); set_op_tag(op_Spill, &be_node_tag); set_op_tag(op_Reload, &be_node_tag); @@ -128,8 +142,8 @@ static void *init_node_attr(ir_node* irn, const arch_register_class_t *cls, ir_g a->reg_data = NEW_ARR_D(be_reg_data_t, get_irg_obstack(irg), n_outs); memset(a->reg_data, 0, n_outs * sizeof(a->reg_data[0])); for(i = 0; i < n_outs; ++i) { - a->reg_data[i].req.cls = cls; - a->reg_data[i].req.type = arch_register_req_type_normal; + a->reg_data[i].req.req.cls = cls; + a->reg_data[i].req.req.type = arch_register_req_type_normal; } } @@ -155,7 +169,7 @@ ir_node *be_new_Spill(const arch_register_class_t *cls, ir_graph *irg, ir_node * in[0] = to_spill; res = new_ir_node(NULL, irg, bl, op_Spill, mode_M, 1, in); a = init_node_attr(res, cls, irg, 0); - a->ent = NULL; + a->ent = NULL; a->spill_ctx = ctx; return res; } @@ -224,13 +238,31 @@ int be_is_Keep(const ir_node *irn) return get_irn_be_opcode(irn) == beo_Keep; } -void be_set_Perm_out_req(ir_node *irn, int pos, const arch_register_req_t *req) +static void be_limited(void *data, bitset_t *bs) { - be_node_attr_t *a = get_irn_attr(irn); + be_req_t *req = data; + + req->old_limited(req->old_limited_env, bs); + if(req->negate_limited) + bitset_flip_all(bs); +} + +void be_set_Perm_out_req(ir_node *irn, int pos, const arch_register_req_t *req, unsigned negate_limited) +{ + be_node_attr_t *a = get_irn_attr(irn); + be_req_t *r = &a->reg_data[pos].req; assert(be_is_Perm(irn)); assert(pos >= 0 && pos < get_irn_arity(irn)); - memcpy(&a->reg_data[pos].req, req, sizeof(req[0])); + memcpy(&r->req, req, sizeof(req[0])); + + if(arch_register_req_is(req, limited)) { + r->old_limited = r->req.limited; + r->old_limited_env = r->req.limited_env; + r->req.limited = be_limited; + r->req.limited_env = r; + r->negate_limited = negate_limited; + } } void be_set_Spill_entity(ir_node *irn, entity *ent) @@ -283,7 +315,6 @@ static INLINE ir_node *find_a_spill(ir_node *irn) return find_a_spill_walker(irn, visited_nr); } - entity *be_get_spill_entity(ir_node *irn) { int opc = get_irn_opcode(irn); @@ -517,8 +548,9 @@ static int dump_node(ir_node *irn, FILE *f, dump_reason_t reason) if(get_irn_be_opcode(irn) == beo_Spill) { be_spill_attr_t *a = (be_spill_attr_t *) at; + unsigned ofs = get_entity_offset_bytes(a->ent); ir_fprintf(f, "spill context: %+F\n", a->spill_ctx); -//TODO ir_fprintf(f, "spill offset: %04x (%u)\n", a->offset, a->offset); + ir_fprintf(f, "spill entity: %+F offset %x (%d)\n", a->ent, ofs, ofs); } break; }