X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fbe%2Fbenode.c;h=b87dcfd6ca50a9b56e06958bae345148c3bc960f;hb=93587aaef8b19becb0c79f6c332238bcae3db1da;hp=b1c6f593424686798e5cd77cca25ecce82966d09;hpb=cdf28710bfb9bc07406af412d6780c7cdb9283d2;p=libfirm diff --git a/ir/be/benode.c b/ir/be/benode.c index b1c6f5934..b87dcfd6c 100644 --- a/ir/be/benode.c +++ b/ir/be/benode.c @@ -59,13 +59,34 @@ typedef struct { int *pos; } be_op_t; +typedef enum { + be_req_kind_old_limited, + be_req_kind_negate_old_limited, + be_req_kind_single_reg +} be_req_kind_t; + +typedef struct { + arch_register_req_t req; + be_req_kind_t kind; + union { + struct { + void (*old_limited)(void *ptr, bitset_t *bs); + void *old_limited_env; + } old_limited; + + const arch_register_t *single_reg; + } x; +} be_req_t; + typedef struct { const arch_register_t *reg; - arch_register_req_t req; + be_req_t req; + be_req_t in_req; } be_reg_data_t; typedef struct { - int n_outs; + int max_reg_data; + arch_irn_flags_t flags; const arch_register_class_t *cls; be_reg_data_t *reg_data; } be_node_attr_t; @@ -73,64 +94,95 @@ typedef struct { typedef struct { be_node_attr_t node_attr; ir_node *spill_ctx; /**< The node in whose context this spill was introduced. */ - unsigned offset; /**< The offset of the memory location the spill writes to - in the spill area. */ + entity *ent; /**< The entity in the stack frame the spill writes to. */ } be_spill_attr_t; +typedef struct { + be_node_attr_t node_attr; + int offset; /**< The offset by which the stack shall be increased/decreased. */ + be_stack_dir_t dir; /**< The direction in which the stack shall be modified (along or in the other direction). */ +} be_stack_attr_t; + static ir_op *op_Spill; static ir_op *op_Reload; static ir_op *op_Perm; static ir_op *op_Copy; static ir_op *op_Keep; +static ir_op *op_Call; +static ir_op *op_IncSP; +static ir_op *op_AddSP; +static ir_op *op_RegParams; +static ir_op *op_StackParam; +static ir_op *op_NoReg; static int beo_base = -1; static const ir_op_ops be_node_op_ops; +#define N irop_flag_none +#define L irop_flag_labeled +#define C irop_flag_commutative +#define X irop_flag_cfopcode +#define I irop_flag_ip_cfopcode +#define F irop_flag_fragile +#define Y irop_flag_forking +#define H irop_flag_highlevel +#define c irop_flag_constlike +#define K irop_flag_keep + void be_node_init(void) { static int inited = 0; - int i; if(inited) return; inited = 1; - beo_base = get_next_ir_opcode(); - - /* Acquire all needed opcodes. We assume that they are consecutive! */ - for(i = beo_Spill; i < beo_Last; ++i) - get_next_ir_opcode(); - - op_Spill = new_ir_op(beo_base + beo_Spill, "Spill", op_pin_state_mem_pinned, 0, oparity_unary, 0, sizeof(be_spill_attr_t), &be_node_op_ops); - op_Reload = new_ir_op(beo_base + beo_Reload, "Reload", op_pin_state_mem_pinned, 0, oparity_zero, 0, sizeof(be_node_attr_t), &be_node_op_ops); - op_Perm = new_ir_op(beo_base + beo_Perm, "Perm", op_pin_state_pinned, 0, oparity_variable, 0, sizeof(be_node_attr_t), &be_node_op_ops); - op_Copy = new_ir_op(beo_base + beo_Copy, "Copy", op_pin_state_pinned, 0, oparity_unary, 0, sizeof(be_node_attr_t), &be_node_op_ops); - op_Keep = new_ir_op(beo_base + beo_Keep, "Keep", op_pin_state_pinned, 0, oparity_variable, 0, sizeof(be_node_attr_t), &be_node_op_ops); - - set_op_tag(op_Spill, &be_node_tag); - set_op_tag(op_Reload, &be_node_tag); - set_op_tag(op_Perm, &be_node_tag); - set_op_tag(op_Copy, &be_node_tag); - set_op_tag(op_Keep, &be_node_tag); -} - -static void *init_node_attr(ir_node* irn, const arch_register_class_t *cls, ir_graph *irg, int n_outs) + /* Acquire all needed opcodes. */ + beo_base = get_next_ir_opcodes(beo_Last - 1); + + op_Spill = new_ir_op(beo_base + beo_Spill, "Spill", op_pin_state_mem_pinned, N, oparity_unary, 0, sizeof(be_spill_attr_t), &be_node_op_ops); + op_Reload = new_ir_op(beo_base + beo_Reload, "Reload", op_pin_state_mem_pinned, N, oparity_zero, 0, sizeof(be_node_attr_t), &be_node_op_ops); + op_Perm = new_ir_op(beo_base + beo_Perm, "Perm", op_pin_state_pinned, N, oparity_variable, 0, sizeof(be_node_attr_t), &be_node_op_ops); + op_Copy = new_ir_op(beo_base + beo_Copy, "Copy", op_pin_state_pinned, N, oparity_unary, 0, sizeof(be_node_attr_t), &be_node_op_ops); + op_Keep = new_ir_op(beo_base + beo_Keep, "Keep", op_pin_state_pinned, K, oparity_variable, 0, sizeof(be_node_attr_t), &be_node_op_ops); + op_NoReg = new_ir_op(beo_base + beo_Keep, "NoReg", op_pin_state_pinned, K, oparity_variable, 0, sizeof(be_node_attr_t), &be_node_op_ops); + op_Call = new_ir_op(beo_base + beo_Keep, "Call", op_pin_state_pinned, K, oparity_variable, 0, sizeof(be_node_attr_t), &be_node_op_ops); + op_AddSP = new_ir_op(beo_base + beo_Keep, "AddSP", op_pin_state_pinned, K, oparity_variable, 0, sizeof(be_stack_attr_t), &be_node_op_ops); + op_IncSP = new_ir_op(beo_base + beo_Keep, "IncSP", op_pin_state_pinned, K, oparity_variable, 0, sizeof(be_stack_attr_t), &be_node_op_ops); + op_RegParams = new_ir_op(beo_base + beo_Keep, "RegParams", op_pin_state_pinned, K, oparity_variable, 0, sizeof(be_node_attr_t), &be_node_op_ops); + op_StackParam = new_ir_op(beo_base + beo_Keep, "StackParam", op_pin_state_pinned, K, oparity_variable, 0, sizeof(be_stack_attr_t), &be_node_op_ops); + + set_op_tag(op_Spill, &be_node_tag); + set_op_tag(op_Reload, &be_node_tag); + set_op_tag(op_Perm, &be_node_tag); + set_op_tag(op_Copy, &be_node_tag); + set_op_tag(op_Keep, &be_node_tag); + set_op_tag(op_NoReg, &be_node_tag); + set_op_tag(op_Call, &be_node_tag); + set_op_tag(op_AddSP, &be_node_tag); + set_op_tag(op_IncSP, &be_node_tag); + set_op_tag(op_RegParams, &be_node_tag); + set_op_tag(op_StackParam, &be_node_tag); +} + +static void *init_node_attr(ir_node* irn, const arch_register_class_t *cls, ir_graph *irg, int max_reg_data) { be_node_attr_t *a = get_irn_attr(irn); - a->n_outs = n_outs; - a->cls = cls; - a->reg_data = NULL; + a->max_reg_data = max_reg_data; + a->flags = arch_irn_flags_none; + a->cls = cls; + a->reg_data = NULL; - if(n_outs > 0) { + if(max_reg_data > 0) { int i; - a->reg_data = NEW_ARR_D(be_reg_data_t, get_irg_obstack(irg), n_outs); - memset(a->reg_data, 0, n_outs * sizeof(a->reg_data[0])); - for(i = 0; i < n_outs; ++i) { - a->reg_data[i].req.cls = cls; - a->reg_data[i].req.type = arch_register_req_type_normal; + a->reg_data = NEW_ARR_D(be_reg_data_t, get_irg_obstack(irg), max_reg_data); + memset(a->reg_data, 0, max_reg_data * sizeof(a->reg_data[0])); + for(i = 0; i < max_reg_data; ++i) { + a->reg_data[i].req.req.cls = cls; + a->reg_data[i].req.req.type = arch_register_req_type_normal; } } @@ -147,6 +199,34 @@ be_opcode_t get_irn_be_opcode(const ir_node *irn) return is_be_node(irn) ? get_irn_opcode(irn) - beo_base : beo_NoBeOp; } +static int redir_proj(const ir_node **node, int pos) +{ + const ir_node *n = *node; + + if(is_Proj(n)) { + assert(pos == -1 && "Illegal pos for a Proj"); + *node = get_Proj_pred(n); + return get_Proj_proj(n); + } + + return 0; +} + +static void +be_node_set_irn_reg(const void *_self, ir_node *irn, const arch_register_t *reg) +{ + int out_pos; + be_node_attr_t *a; + + out_pos = redir_proj((const ir_node **) &irn, -1); + a = get_irn_attr(irn); + + assert(is_be_node(irn)); + assert(out_pos < a->max_reg_data && "position too high"); + a->reg_data[out_pos].reg = reg; +} + + ir_node *be_new_Spill(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, ir_node *to_spill, ir_node *ctx) { be_spill_attr_t *a; @@ -156,7 +236,7 @@ ir_node *be_new_Spill(const arch_register_class_t *cls, ir_graph *irg, ir_node * in[0] = to_spill; res = new_ir_node(NULL, irg, bl, op_Spill, mode_M, 1, in); a = init_node_attr(res, cls, irg, 0); - a->offset = BE_SPILL_NO_OFFSET; + a->ent = NULL; a->spill_ctx = ctx; return res; } @@ -200,45 +280,195 @@ ir_node *be_new_Keep(const arch_register_class_t *cls, ir_graph *irg, ir_node *b return irn; } -int be_is_Spill(const ir_node *irn) +ir_node *be_new_Call(ir_graph *irg, ir_node *bl, ir_node *mem, ir_node *sp, ir_node *ptr, int n_outs, int n, ir_node *in[]) { - return get_irn_be_opcode(irn) == beo_Spill; + int real_n = 3 + n; + ir_node *irn; + ir_node **real_in; + + real_in = malloc(sizeof(real_in[0]) * (real_n)); + + real_in[0] = mem; + real_in[1] = sp; + real_in[2] = ptr; + memcpy(&real_in[3], in, n * sizeof(in[0])); + + irn = new_ir_node(NULL, irg, bl, op_Call, mode_T, real_n, real_in); + init_node_attr(irn, NULL, irg, (n_outs > real_n ? n_outs : real_n)); + return irn; } -int be_is_Reload(const ir_node *irn) +ir_node *be_new_IncSP(const arch_register_t *sp, ir_graph *irg, ir_node *bl, ir_node *old_sp, unsigned offset, be_stack_dir_t dir) { - return get_irn_be_opcode(irn) == beo_Reload; + be_stack_attr_t *a; + ir_node *irn; + ir_node *in[1]; + + in[0] = old_sp; + irn = new_ir_node(NULL, irg, bl, op_IncSP, sp->reg_class->mode, 1, in); + a = init_node_attr(irn, sp->reg_class, irg, 1); + a->dir = dir; + a->offset = offset; + + a->node_attr.flags |= arch_irn_flags_ignore; + + /* Set output constraint to stack register. */ + be_set_constr_single_reg(irn, -1, sp); + be_node_set_irn_reg(NULL, irn, sp); + + return irn; } -int be_is_Copy(const ir_node *irn) +ir_node *be_new_AddSP(const arch_register_t *sp, ir_graph *irg, ir_node *bl, ir_node *old_sp, ir_node *op) { - return get_irn_be_opcode(irn) == beo_Copy; + be_node_attr_t *a; + ir_node *irn; + ir_node *in[2]; + + in[0] = old_sp; + in[1] = op; + irn = new_ir_node(NULL, irg, bl, op_AddSP, sp->reg_class->mode, 2, in); + a = init_node_attr(irn, sp->reg_class, irg, 1); + a->flags |= arch_irn_flags_ignore; + + /* Set output constraint to stack register. */ + be_set_constr_single_reg(irn, -1, sp); + be_node_set_irn_reg(NULL, irn, sp); + + return irn; } -int be_is_Perm(const ir_node *irn) +ir_node *be_new_NoReg(const arch_register_t *reg, ir_graph *irg, ir_node *bl) { - return get_irn_be_opcode(irn) == beo_Perm; + be_node_attr_t *a; + ir_node *irn; + ir_node *in[1]; + + irn = new_ir_node(NULL, irg, bl, op_NoReg, reg->reg_class->mode, 0, in); + a = init_node_attr(irn, reg->reg_class, irg, 1); + a->flags |= arch_irn_flags_ignore; + be_set_constr_single_reg(irn, -1, reg); + be_node_set_irn_reg(NULL, irn, reg); + return irn; } -int be_is_Keep(const ir_node *irn) +ir_node *be_new_StackParam(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, ir_mode *mode, ir_node *frame_pointer, unsigned offset) { - return get_irn_be_opcode(irn) == beo_Keep; + be_stack_attr_t *a; + ir_node *irn; + ir_node *in[1]; + + in[0] = frame_pointer; + irn = new_ir_node(NULL, irg, bl, op_StackParam, mode, 1, in); + a = init_node_attr(irn, cls, irg, 1); + a->offset = offset; + return irn; } -void be_set_Perm_out_req(ir_node *irn, int pos, const arch_register_req_t *req) +int be_is_Spill (const ir_node *irn) { return get_irn_be_opcode(irn) == beo_Spill ; } +int be_is_Reload (const ir_node *irn) { return get_irn_be_opcode(irn) == beo_Reload ; } +int be_is_Copy (const ir_node *irn) { return get_irn_be_opcode(irn) == beo_Copy ; } +int be_is_Perm (const ir_node *irn) { return get_irn_be_opcode(irn) == beo_Perm ; } +int be_is_Keep (const ir_node *irn) { return get_irn_be_opcode(irn) == beo_Keep ; } +int be_is_Call (const ir_node *irn) { return get_irn_be_opcode(irn) == beo_Call ; } +int be_is_IncSP (const ir_node *irn) { return get_irn_be_opcode(irn) == beo_IncSP ; } +int be_is_AddSP (const ir_node *irn) { return get_irn_be_opcode(irn) == beo_AddSP ; } +int be_is_RegParams (const ir_node *irn) { return get_irn_be_opcode(irn) == beo_RegParams ; } +int be_is_StackParam (const ir_node *irn) { return get_irn_be_opcode(irn) == beo_StackParam ; } +int be_is_NoReg (const ir_node *irn) { return get_irn_be_opcode(irn) == beo_NoReg ; } + +static void be_limited(void *data, bitset_t *bs) +{ + be_req_t *req = data; + + switch(req->kind) { + case be_req_kind_negate_old_limited: + case be_req_kind_old_limited: + req->x.old_limited.old_limited(req->x.old_limited.old_limited_env, bs); + if(req->kind == be_req_kind_negate_old_limited) + bitset_flip_all(bs); + break; + case be_req_kind_single_reg: + bitset_clear_all(bs); + bitset_set(bs, req->x.single_reg->index); + break; + } +} + +void be_set_constr_single_reg(ir_node *irn, int pos, const arch_register_t *reg) { + int idx = pos < 0 ? -(pos - 1) : pos; be_node_attr_t *a = get_irn_attr(irn); + be_reg_data_t *rd = &a->reg_data[idx]; + be_req_t *r = pos < 0 ? &rd->req : &rd->in_req; - assert(be_is_Perm(irn)); - assert(pos >= 0 && pos < get_irn_arity(irn)); - memcpy(&a->reg_data[pos].req, req, sizeof(req[0])); + assert(is_be_node(irn)); + assert(!(pos >= 0) || pos < get_irn_arity(irn)); + assert(!(pos < 0) || -(pos + 1) <= a->max_reg_data); + + r->kind = be_req_kind_single_reg; + r->x.single_reg = reg; + r->req.limited = be_limited; + r->req.limited_env = r; + r->req.type = arch_register_req_type_limited; + r->req.cls = reg->reg_class; +} + +void be_set_constr_limited(ir_node *irn, int pos, const arch_register_req_t *req) +{ + int idx = pos < 0 ? -(pos - 1) : pos; + be_node_attr_t *a = get_irn_attr(irn); + be_reg_data_t *rd = &a->reg_data[idx]; + be_req_t *r = pos < 0 ? &rd->req : &rd->in_req; + + assert(is_be_node(irn)); + assert(!(pos >= 0) || pos < get_irn_arity(irn)); + assert(!(pos < 0) || -(pos + 1) <= a->max_reg_data); + assert(arch_register_req_is(req, limited)); + + r->kind = be_req_kind_old_limited; + r->req.limited = be_limited; + r->req.limited_env = r; + r->req.type = arch_register_req_type_limited; + r->req.cls = req->cls; + + r->x.old_limited.old_limited = req->limited; + r->x.old_limited.old_limited_env = req->limited_env; +} + +void be_set_IncSP_offset(ir_node *irn, unsigned offset) +{ + be_stack_attr_t *a = get_irn_attr(irn); + assert(be_is_IncSP(irn)); + a->offset = offset; +} + +unsigned be_get_IncSP_offset(ir_node *irn) +{ + be_stack_attr_t *a = get_irn_attr(irn); + assert(be_is_IncSP(irn)); + return a->offset; +} + +void be_set_IncSP_direction(ir_node *irn, be_stack_dir_t dir) +{ + be_stack_attr_t *a = get_irn_attr(irn); + assert(be_is_IncSP(irn)); + a->dir = dir; } -void be_set_Spill_offset(ir_node *irn, unsigned offset) +be_stack_dir_t be_get_IncSP_direction(ir_node *irn) +{ + be_stack_attr_t *a = get_irn_attr(irn); + assert(be_is_IncSP(irn)); + return a->dir; +} + +void be_set_Spill_entity(ir_node *irn, entity *ent) { be_spill_attr_t *a = get_irn_attr(irn); assert(be_is_Spill(irn)); - a->offset = offset; + a->ent = ent; } static ir_node *find_a_spill_walker(ir_node *irn, unsigned visited_nr) @@ -284,24 +514,23 @@ static INLINE ir_node *find_a_spill(ir_node *irn) return find_a_spill_walker(irn, visited_nr); } - -unsigned be_get_spill_offset(ir_node *irn) +entity *be_get_spill_entity(ir_node *irn) { int opc = get_irn_opcode(irn); switch(get_irn_be_opcode(irn)) { case beo_Reload: - return be_get_spill_offset(find_a_spill(irn)); + return be_get_spill_entity(find_a_spill(irn)); case beo_Spill: { be_spill_attr_t *a = get_irn_attr(irn); - return a->offset; + return a->ent; } default: assert(0 && "Must give spill/reload node"); } - return (unsigned) -1; + return NULL; } ir_node *be_spill(const arch_env_t *arch_env, ir_node *irn, ir_node *ctx) @@ -344,25 +573,11 @@ ir_node *be_reload(const arch_env_t *arch_env, return reload; } -static int redir_proj(const ir_node **node, int pos) -{ - const ir_node *n = *node; - - if(is_Proj(n)) { - assert(pos == -1 && "Illegal pos for a Proj"); - *node = get_Proj_pred(n); - return get_Proj_proj(n); - } - - return 0; -} - static void *put_out_reg_req(arch_register_req_t *req, const ir_node *irn, int out_pos) { const be_node_attr_t *a = get_irn_attr(irn); - - if(out_pos < a->n_outs) + if(out_pos < a->max_reg_data) memcpy(req, &a->reg_data[out_pos].req, sizeof(req[0])); else { req->type = arch_register_req_type_none; @@ -377,22 +592,11 @@ static void *put_in_reg_req(arch_register_req_t *req, const ir_node *irn, int po const be_node_attr_t *a = get_irn_attr(irn); int n = get_irn_arity(irn); - req->type = arch_register_req_type_none; - req->cls = NULL; - - switch(get_irn_be_opcode(irn)) { - case beo_Spill: - case beo_Copy: - case beo_Keep: - case beo_Perm: - if(pos < n) { - req->type = arch_register_req_type_normal; - req->cls = a->cls; - } - break; - case beo_Reload: - default: - req = NULL; + if(pos < get_irn_arity(irn)) + memcpy(req, &a->reg_data[pos].in_req, sizeof(req[0])); + else { + req->type = arch_register_req_type_none; + req->cls = NULL; } return req; @@ -419,20 +623,6 @@ be_node_get_irn_reg_req(const void *self, arch_register_req_t *req, const ir_nod return req; } -static void -be_node_set_irn_reg(const void *_self, ir_node *irn, const arch_register_t *reg) -{ - int out_pos; - be_node_attr_t *a; - - out_pos = redir_proj((const ir_node **) &irn, -1); - a = get_irn_attr(irn); - - assert(is_be_node(irn)); - assert(out_pos < a->n_outs && "position too high"); - a->reg_data[out_pos].reg = reg; -} - const arch_register_t * be_node_get_irn_reg(const void *_self, const ir_node *irn) { @@ -443,7 +633,7 @@ be_node_get_irn_reg(const void *_self, const ir_node *irn) a = get_irn_attr(irn); assert(is_be_node(irn)); - assert(out_pos < a->n_outs && "position too high"); + assert(out_pos < a->max_reg_data && "position too high"); return a->reg_data[out_pos].reg; } @@ -466,9 +656,10 @@ arch_irn_class_t be_node_classify(const void *_self, const ir_node *irn) return 0; } -arch_irn_class_t be_node_get_flags(const void *_self, const ir_node *irn) +arch_irn_flags_t be_node_get_flags(const void *_self, const ir_node *irn) { - return 0; + be_node_attr_t *a = get_irn_attr(irn); + return a->flags; } static const arch_irn_ops_if_t be_node_irn_ops_if = { @@ -511,29 +702,62 @@ static int dump_node(ir_node *irn, FILE *f, dump_reason_t reason) break; case dump_node_info_txt: fprintf(f, "reg class: %s\n", at->cls->name); - for(i = 0; i < at->n_outs; ++i) { + for(i = 0; i < at->max_reg_data; ++i) { const arch_register_t *reg = at->reg_data[i].reg; fprintf(f, "reg #%d: %s\n", i, reg ? reg->name : "n/a"); } - if(get_irn_be_opcode(irn) == beo_Spill) { - be_spill_attr_t *a = (be_spill_attr_t *) at; - ir_fprintf(f, "spill context: %+F\n", a->spill_ctx); - ir_fprintf(f, "spill offset: %04x (%u)\n", a->offset, a->offset); + switch(get_irn_be_opcode(irn)) { + case beo_Spill: + { + be_spill_attr_t *a = (be_spill_attr_t *) at; + + ir_fprintf(f, "spill context: %+F\n", a->spill_ctx); + if (a->ent) { + unsigned ofs = get_entity_offset_bytes(a->ent); + ir_fprintf(f, "spill entity: %+F offset %x (%d)\n", a->ent, ofs, ofs); + } + else { + ir_fprintf(f, "spill entity: n/a\n"); + } + } + break; + + case beo_IncSP: + { + be_stack_attr_t *a = (be_stack_attr_t *) at; + fprintf(f, "offset: %u\n", a->offset); + fprintf(f, "direction: %s\n", a->dir == be_stack_dir_along ? "along" : "against"); + } + break; } - break; + } return 0; } +void copy_attr(const ir_node *old_node, ir_node *new_node) +{ + be_node_attr_t *old_attr = get_irn_attr(old_attr); + be_node_attr_t *new_attr = get_irn_attr(new_node); + + assert(is_be_node(old_node)); + assert(is_be_node(new_node)); + + memcpy(new_attr, old_attr, old_node->op->attr_size); + + new_attr->reg_data = NEW_ARR_D(be_reg_data_t, get_irg_obstack(get_irn_irg(new_node)), new_attr->max_reg_data); + memcpy(new_attr->reg_data, old_attr->reg_data, new_attr->max_reg_data * sizeof(be_reg_data_t)); +} + static const ir_op_ops be_node_op_ops = { NULL, NULL, NULL, NULL, NULL, - NULL, + copy_attr, NULL, NULL, NULL, @@ -546,7 +770,7 @@ static const ir_op_ops be_node_op_ops = { pset *nodes_live_at(const arch_env_t *arch_env, const arch_register_class_t *cls, const ir_node *pos, pset *live) { firm_dbg_module_t *dbg = firm_dbg_register("firm.be.node"); - ir_node *bl = get_nodes_block(pos); + const ir_node *bl = is_Block(pos) ? pos : get_nodes_block(pos); ir_node *irn; irn_live_t *li; @@ -582,7 +806,7 @@ pset *nodes_live_at(const arch_env_t *arch_env, const arch_register_class_t *cls } } - return NULL; + return live; } ir_node *insert_Perm_after(const arch_env_t *arch_env, @@ -600,8 +824,7 @@ ir_node *insert_Perm_after(const arch_env_t *arch_env, DBG((dbg, LEVEL_1, "Insert Perm after: %+F\n", pos)); - if(!nodes_live_at(arch_env, cls, pos, live)) - assert(0 && "position not found"); + if(!nodes_live_at(arch_env, cls, pos, live)); n = pset_count(live); @@ -634,7 +857,7 @@ ir_node *insert_Perm_after(const arch_env_t *arch_env, curr = proj; copies[0] = proj; - be_introduce_copies(dom_front, perm_op, 1, copies); + be_ssa_constr_single(dom_front, perm_op, 1, copies); } return perm; }