X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fbe%2Fbenode.c;h=2f32ea549495db6133b9703c144857efb7ce30f4;hb=d14c6378674f36728eacaf5dc7e4bb045ff9fbab;hp=3cc8b42395609de62a56fca1e01c0e7c85ccd38c;hpb=a51b19f667731104d1277df71b26daa2d3816189;p=libfirm diff --git a/ir/be/benode.c b/ir/be/benode.c index 3cc8b4239..2f32ea549 100644 --- a/ir/be/benode.c +++ b/ir/be/benode.c @@ -7,7 +7,7 @@ * * This file provides Perm, Copy, Spill and Reload nodes. * - * Copyright (C) 2005 Universitaet Karlsruhe + * Copyright (C) 2005-2006 Universitaet Karlsruhe * Released under the GPL */ @@ -48,17 +48,6 @@ static unsigned be_node_tag = FOURCC('B', 'E', 'N', 'O'); -#if 0 -typedef enum _node_kind_t { - node_kind_spill, - node_kind_reload, - node_kind_perm, - node_kind_copy, - node_kind_kill, - node_kind_last -} node_kind_t; -#endif - typedef enum { be_req_kind_old_limited, be_req_kind_negate_old_limited, @@ -100,33 +89,33 @@ typedef struct { /** The be_Stack attribute type. */ typedef struct { be_node_attr_t node_attr; - int offset; /**< The offset by which the stack shall be increased/decreased. */ - be_stack_dir_t dir; /**< The direction in which the stack shall be modified (expand or shrink). */ + int offset; /**< The offset by which the stack shall be expanded/shrinked. */ } be_stack_attr_t; /** The be_Frame attribute type. */ typedef struct { be_node_attr_t node_attr; - entity *ent; + ir_entity *ent; int offset; } be_frame_attr_t; /** The be_Call attribute type. */ typedef struct { be_node_attr_t node_attr; - entity *ent; /**< The called entity if this is a static call. */ + ir_entity *ent; /**< The called entity if this is a static call. */ ir_type *call_tp; /**< The call type, copied from the original Call node. */ } be_call_attr_t; -/** The be_Spill attribute type. */ typedef struct { - be_frame_attr_t frame_attr; - ir_node *spill_ctx; /**< The node in whose context this spill was introduced. */ -} be_spill_attr_t; + be_node_attr_t node_attr; + ir_entity **in_entities; + ir_entity **out_entities; +} be_memperm_attr_t; ir_op *op_be_Spill; ir_op *op_be_Reload; ir_op *op_be_Perm; +ir_op *op_be_MemPerm; ir_op *op_be_Copy; ir_op *op_be_Keep; ir_op *op_be_CopyKeep; @@ -134,6 +123,7 @@ ir_op *op_be_Call; ir_op *op_be_Return; ir_op *op_be_IncSP; ir_op *op_be_AddSP; +ir_op *op_be_SubSP; ir_op *op_be_SetSP; ir_op *op_be_RegParams; ir_op *op_be_StackParam; @@ -204,33 +194,37 @@ void be_node_init(void) { /* Acquire all needed opcodes. */ beo_base = get_next_ir_opcodes(beo_Last - 1); - op_be_Spill = new_ir_op(beo_base + beo_Spill, "be_Spill", op_pin_state_mem_pinned, N, oparity_unary, 0, sizeof(be_spill_attr_t), &be_node_op_ops); - op_be_Reload = new_ir_op(beo_base + beo_Reload, "be_Reload", op_pin_state_mem_pinned, N, oparity_zero, 0, sizeof(be_frame_attr_t), &be_node_op_ops); - op_be_Perm = new_ir_op(beo_base + beo_Perm, "be_Perm", op_pin_state_pinned, N, oparity_variable, 0, sizeof(be_node_attr_t), &be_node_op_ops); - op_be_Copy = new_ir_op(beo_base + beo_Copy, "be_Copy", op_pin_state_floats, N, oparity_unary, 0, sizeof(be_node_attr_t), &be_node_op_ops); - op_be_Keep = new_ir_op(beo_base + beo_Keep, "be_Keep", op_pin_state_pinned, K, oparity_variable, 0, sizeof(be_node_attr_t), &be_node_op_ops); - op_be_CopyKeep = new_ir_op(beo_base + beo_CopyKeep, "be_CopyKeep", op_pin_state_pinned, K, oparity_variable, 0, sizeof(be_node_attr_t), &be_node_op_ops); - op_be_Call = new_ir_op(beo_base + beo_Call, "be_Call", op_pin_state_pinned, N, oparity_variable, 0, sizeof(be_call_attr_t), &be_node_op_ops); - op_be_Return = new_ir_op(beo_base + beo_Return, "be_Return", op_pin_state_pinned, X, oparity_variable, 0, sizeof(be_return_attr_t), &be_node_op_ops); - op_be_AddSP = new_ir_op(beo_base + beo_AddSP, "be_AddSP", op_pin_state_pinned, N, oparity_unary, 0, sizeof(be_node_attr_t), &be_node_op_ops); - op_be_SetSP = new_ir_op(beo_base + beo_SetSP, "be_SetSP", op_pin_state_pinned, N, oparity_binary, 0, sizeof(be_stack_attr_t), &be_node_op_ops); - op_be_IncSP = new_ir_op(beo_base + beo_IncSP, "be_IncSP", op_pin_state_pinned, N, oparity_binary, 0, sizeof(be_stack_attr_t), &be_node_op_ops); - op_be_RegParams = new_ir_op(beo_base + beo_RegParams, "be_RegParams", op_pin_state_pinned, N, oparity_zero, 0, sizeof(be_node_attr_t), &be_node_op_ops); - op_be_StackParam = new_ir_op(beo_base + beo_StackParam, "be_StackParam", op_pin_state_pinned, N, oparity_unary, 0, sizeof(be_frame_attr_t), &be_node_op_ops); - op_be_FrameAddr = new_ir_op(beo_base + beo_FrameAddr, "be_FrameAddr", op_pin_state_pinned, N, oparity_unary, 0, sizeof(be_frame_attr_t), &be_node_op_ops); - op_be_FrameLoad = new_ir_op(beo_base + beo_FrameLoad, "be_FrameLoad", op_pin_state_pinned, N, oparity_any, 0, sizeof(be_frame_attr_t), &be_node_op_ops); - op_be_FrameStore = new_ir_op(beo_base + beo_FrameStore, "be_FrameStore", op_pin_state_pinned, N, oparity_any, 0, sizeof(be_frame_attr_t), &be_node_op_ops); - op_be_Barrier = new_ir_op(beo_base + beo_Barrier, "be_Barrier", op_pin_state_pinned, N, oparity_any, 0, sizeof(be_node_attr_t), &be_node_op_ops); + op_be_Spill = new_ir_op(beo_base + beo_Spill, "be_Spill", op_pin_state_mem_pinned, N, oparity_unary, 0, sizeof(be_frame_attr_t), &be_node_op_ops); + op_be_Reload = new_ir_op(beo_base + beo_Reload, "be_Reload", op_pin_state_mem_pinned, N, oparity_zero, 0, sizeof(be_frame_attr_t), &be_node_op_ops); + op_be_Perm = new_ir_op(beo_base + beo_Perm, "be_Perm", op_pin_state_pinned, N, oparity_variable, 0, sizeof(be_node_attr_t), &be_node_op_ops); + op_be_MemPerm = new_ir_op(beo_base + beo_MemPerm, "be_MemPerm", op_pin_state_mem_pinned, N, oparity_variable, 0, sizeof(be_memperm_attr_t), &be_node_op_ops); + op_be_Copy = new_ir_op(beo_base + beo_Copy, "be_Copy", op_pin_state_floats, N, oparity_unary, 0, sizeof(be_node_attr_t), &be_node_op_ops); + op_be_Keep = new_ir_op(beo_base + beo_Keep, "be_Keep", op_pin_state_pinned, K, oparity_variable, 0, sizeof(be_node_attr_t), &be_node_op_ops); + op_be_CopyKeep = new_ir_op(beo_base + beo_CopyKeep, "be_CopyKeep", op_pin_state_pinned, K, oparity_variable, 0, sizeof(be_node_attr_t), &be_node_op_ops); + op_be_Call = new_ir_op(beo_base + beo_Call, "be_Call", op_pin_state_pinned, F, oparity_variable, 0, sizeof(be_call_attr_t), &be_node_op_ops); + op_be_Return = new_ir_op(beo_base + beo_Return, "be_Return", op_pin_state_pinned, X, oparity_variable, 0, sizeof(be_return_attr_t), &be_node_op_ops); + op_be_AddSP = new_ir_op(beo_base + beo_AddSP, "be_AddSP", op_pin_state_pinned, N, oparity_unary, 0, sizeof(be_node_attr_t), &be_node_op_ops); + op_be_SubSP = new_ir_op(beo_base + beo_SubSP, "be_SubSP", op_pin_state_pinned, N, oparity_unary, 0, sizeof(be_node_attr_t), &be_node_op_ops); + op_be_SetSP = new_ir_op(beo_base + beo_SetSP, "be_SetSP", op_pin_state_pinned, N, oparity_binary, 0, sizeof(be_stack_attr_t), &be_node_op_ops); + op_be_IncSP = new_ir_op(beo_base + beo_IncSP, "be_IncSP", op_pin_state_pinned, N, oparity_binary, 0, sizeof(be_stack_attr_t), &be_node_op_ops); + op_be_RegParams = new_ir_op(beo_base + beo_RegParams, "be_RegParams", op_pin_state_pinned, N, oparity_zero, 0, sizeof(be_node_attr_t), &be_node_op_ops); + op_be_StackParam = new_ir_op(beo_base + beo_StackParam, "be_StackParam", op_pin_state_pinned, N, oparity_unary, 0, sizeof(be_frame_attr_t), &be_node_op_ops); + op_be_FrameAddr = new_ir_op(beo_base + beo_FrameAddr, "be_FrameAddr", op_pin_state_pinned, N, oparity_unary, 0, sizeof(be_frame_attr_t), &be_node_op_ops); + op_be_FrameLoad = new_ir_op(beo_base + beo_FrameLoad, "be_FrameLoad", op_pin_state_pinned, N, oparity_any, 0, sizeof(be_frame_attr_t), &be_node_op_ops); + op_be_FrameStore = new_ir_op(beo_base + beo_FrameStore, "be_FrameStore", op_pin_state_pinned, N, oparity_any, 0, sizeof(be_frame_attr_t), &be_node_op_ops); + op_be_Barrier = new_ir_op(beo_base + beo_Barrier, "be_Barrier", op_pin_state_pinned, N, oparity_any, 0, sizeof(be_node_attr_t), &be_node_op_ops); set_op_tag(op_be_Spill, &be_node_tag); set_op_tag(op_be_Reload, &be_node_tag); set_op_tag(op_be_Perm, &be_node_tag); + set_op_tag(op_be_MemPerm, &be_node_tag); set_op_tag(op_be_Copy, &be_node_tag); set_op_tag(op_be_Keep, &be_node_tag); set_op_tag(op_be_CopyKeep, &be_node_tag); set_op_tag(op_be_Call, &be_node_tag); set_op_tag(op_be_Return, &be_node_tag); set_op_tag(op_be_AddSP, &be_node_tag); + set_op_tag(op_be_SubSP, &be_node_tag); set_op_tag(op_be_SetSP, &be_node_tag); set_op_tag(op_be_IncSP, &be_node_tag); set_op_tag(op_be_RegParams, &be_node_tag); @@ -286,7 +280,6 @@ static int redir_proj(const ir_node **node, int pos) if(is_Proj(n)) { ir_node *irn; - assert(pos == -1 && "Illegal pos for a Proj"); *node = irn = get_Proj_pred(n); if(is_Proj(irn)) { assert(get_irn_mode(irn) == mode_T); @@ -298,51 +291,73 @@ static int redir_proj(const ir_node **node, int pos) return 0; } +static be_node_attr_t *retrieve_irn_attr(const ir_node *irn, int *the_pos) +{ + int dummy; + be_node_attr_t *res = NULL; + int *pos = the_pos ? the_pos : &dummy; + + *pos = -1; + if(is_Proj(irn)) { + ir_node *pred = get_Proj_pred(irn); + int p = get_Proj_proj(irn); + + if(is_be_node(pred)) { + assert(get_irn_mode(pred) == mode_T); + *pos = p; + res = get_irn_attr(pred); + assert(p >= 0 && p < res->max_reg_data && "illegal proj number"); + } + } + + else if(is_be_node(irn) && get_irn_mode(irn) != mode_T) { + be_node_attr_t *a = get_irn_attr(irn); + if(a->max_reg_data > 0) { + res = a; + *pos = 0; + } + } + + return res; +} + +static be_reg_data_t *retrieve_reg_data(const ir_node *irn) +{ + int pos; + be_node_attr_t *a = retrieve_irn_attr(irn, &pos); + return a ? &a->reg_data[pos] : NULL; +} + static void be_node_set_irn_reg(const void *_self, ir_node *irn, const arch_register_t *reg) { - int out_pos; - be_node_attr_t *a; - - out_pos = redir_proj((const ir_node **) &irn, -1); - a = get_irn_attr(irn); + be_reg_data_t *r = retrieve_reg_data(irn); - assert(is_be_node(irn)); - assert(out_pos < a->max_reg_data && "position too high"); - a->reg_data[out_pos].reg = reg; + if(r) + r->reg = reg; } -ir_node *be_new_Spill(const arch_register_class_t *cls, const arch_register_class_t *cls_frame, ir_graph *irg, ir_node *bl, ir_node *frame, ir_node *to_spill, ir_node *ctx) +ir_node *be_new_Spill(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, ir_node *to_spill) { - be_spill_attr_t *a; - ir_node *in[2]; - ir_node *res; + be_frame_attr_t *a; + ir_node *res; - in[0] = frame; - in[1] = to_spill; - res = new_ir_node(NULL, irg, bl, op_be_Spill, mode_M, 2, in); - a = init_node_attr(res, 2); - a->frame_attr.ent = NULL; - a->frame_attr.offset = 0; - a->spill_ctx = ctx; + res = new_ir_node(NULL, irg, bl, op_be_Spill, mode_M, 1, &to_spill); + a = init_node_attr(res, 2); + a->ent = NULL; + a->offset = 0; - be_node_set_reg_class(res, 0, cls_frame); - be_node_set_reg_class(res, 1, cls); + be_node_set_reg_class(res, be_pos_Spill_val, cls); return res; } -ir_node *be_new_Reload(const arch_register_class_t *cls, const arch_register_class_t *cls_frame, ir_graph *irg, ir_node *bl, ir_node *frame, ir_node *mem, ir_mode *mode) +ir_node *be_new_Reload(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, ir_node *mem, ir_mode *mode) { - ir_node *in[2]; - ir_node *res; - - in[0] = frame; - in[1] = mem; - res = new_ir_node(NULL, irg, bl, op_be_Reload, mode, 2, in); + ir_node *res = new_ir_node(NULL, irg, bl, op_be_Reload, mode, 1, &mem); init_node_attr(res, 2); - be_node_set_reg_class(res, 0, cls_frame); be_node_set_reg_class(res, -1, cls); + be_node_set_flags(res, -1, arch_irn_flags_rematerializable); return res; } @@ -352,10 +367,10 @@ ir_node *be_get_Reload_mem(const ir_node *irn) return get_irn_n(irn, be_pos_Reload_mem); } -ir_node *be_get_Reload_frame(const ir_node *irn) +ir_node *be_get_Spill_val(const ir_node *irn) { - assert(be_is_Reload(irn)); - return get_irn_n(irn, be_pos_Reload_frame); + assert(be_is_Spill(irn)); + return get_irn_n(irn, be_pos_Spill_val); } ir_node *be_new_Perm(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, int n, ir_node *in[]) @@ -371,6 +386,40 @@ ir_node *be_new_Perm(const arch_register_class_t *cls, ir_graph *irg, ir_node *b return irn; } +ir_node *be_new_MemPerm(const arch_env_t *arch_env, ir_graph *irg, ir_node *bl, int n, ir_node *in[]) +{ + int i; + ir_node *frame = get_irg_frame(irg); + const arch_register_class_t *cls_frame = arch_get_irn_reg_class(arch_env, frame, -1); + ir_node *irn; + const arch_register_t *sp = arch_env->isa->sp; + be_memperm_attr_t *attr; + ir_node **real_in; + + real_in = alloca((n+1) * sizeof(real_in[0])); + real_in[0] = frame; + memcpy(&real_in[1], in, n * sizeof(real_in[0])); + + irn = new_ir_node(NULL, irg, bl, op_be_MemPerm, mode_T, n+1, real_in); + + init_node_attr(irn, n + 1); + be_node_set_reg_class(irn, 0, sp->reg_class); + for(i = 0; i < n; ++i) { + be_node_set_reg_class(irn, i + 1, cls_frame); + be_node_set_reg_class(irn, OUT_POS(i), cls_frame); + } + + attr = get_irn_attr(irn); + + attr->in_entities = obstack_alloc(irg->obst, n * sizeof(attr->in_entities[0])); + memset(attr->in_entities, 0, n * sizeof(attr->in_entities[0])); + attr->out_entities = obstack_alloc(irg->obst, n*sizeof(attr->out_entities[0])); + memset(attr->out_entities, 0, n*sizeof(attr->out_entities[0])); + + return irn; +} + + ir_node *be_new_Copy(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, ir_node *op) { ir_node *in[1]; @@ -420,7 +469,7 @@ ir_node *be_new_Call(dbg_info *dbg, ir_graph *irg, ir_node *bl, ir_node *mem, ir real_in[be_pos_Call_ptr] = ptr; memcpy(&real_in[be_pos_Call_first_arg], in, n * sizeof(in[0])); - irn = new_ir_node(NULL, irg, bl, op_be_Call, mode_T, real_n, real_in); + irn = new_ir_node(dbg, irg, bl, op_be_Call, mode_T, real_n, real_in); a = init_node_attr(irn, (n_outs > real_n ? n_outs : real_n)); a->ent = NULL; a->call_tp = call_tp; @@ -428,14 +477,14 @@ ir_node *be_new_Call(dbg_info *dbg, ir_graph *irg, ir_node *bl, ir_node *mem, ir } /* Gets the call entity or NULL if this is no static call. */ -entity *be_Call_get_entity(const ir_node *call) { +ir_entity *be_Call_get_entity(const ir_node *call) { be_call_attr_t *a = get_irn_attr(call); assert(be_is_Call(call)); return a->ent; } /* Sets the call entity. */ -void be_Call_set_entity(ir_node *call, entity *ent) { +void be_Call_set_entity(ir_node *call, ir_entity *ent) { be_call_attr_t *a = get_irn_attr(call); assert(be_is_Call(call)); a->ent = ent; @@ -474,24 +523,22 @@ int be_Return_get_n_rets(ir_node *ret) return a->num_ret_vals; } -ir_node *be_new_IncSP(const arch_register_t *sp, ir_graph *irg, ir_node *bl, ir_node *old_sp, ir_node *mem, unsigned offset, be_stack_dir_t dir) +ir_node *be_new_IncSP(const arch_register_t *sp, ir_graph *irg, ir_node *bl, ir_node *old_sp, int offset) { be_stack_attr_t *a; ir_node *irn; - ir_node *in[2]; + ir_node *in[1]; in[0] = old_sp; - in[1] = mem; - irn = new_ir_node(NULL, irg, bl, op_be_IncSP, sp->reg_class->mode, 2, in); + irn = new_ir_node(NULL, irg, bl, op_be_IncSP, sp->reg_class->mode, sizeof(in) / sizeof(in[0]), in); a = init_node_attr(irn, 1); - a->dir = dir; a->offset = offset; be_node_set_flags(irn, -1, arch_irn_flags_ignore | arch_irn_flags_modify_sp); /* Set output constraint to stack register. */ be_node_set_reg_class(irn, 0, sp->reg_class); - be_set_constr_single_reg(irn, -1, sp); + be_set_constr_single_reg(irn, BE_OUT_POS(0), sp); be_node_set_irn_reg(NULL, irn, sp); return irn; @@ -506,13 +553,39 @@ ir_node *be_new_AddSP(const arch_register_t *sp, ir_graph *irg, ir_node *bl, ir_ in[be_pos_AddSP_old_sp] = old_sp; in[be_pos_AddSP_size] = sz; - irn = new_ir_node(NULL, irg, bl, op_be_AddSP, mode_T, be_pos_AddSP_last, in); - a = init_node_attr(irn, be_pos_AddSP_last); + irn = new_ir_node(NULL, irg, bl, op_be_AddSP, mode_T, be_pos_AddSP_last, in); + a = init_node_attr(irn, be_pos_AddSP_last); - be_node_set_flags(irn, OUT_POS(0), arch_irn_flags_ignore | arch_irn_flags_modify_sp); + be_node_set_flags(irn, OUT_POS(pn_be_AddSP_res), arch_irn_flags_ignore | arch_irn_flags_modify_sp); /* Set output constraint to stack register. */ - be_set_constr_single_reg(irn, OUT_POS(0), sp); + be_set_constr_single_reg(irn, be_pos_AddSP_old_sp, sp); + be_node_set_reg_class(irn, be_pos_AddSP_size, arch_register_get_class(sp)); + be_set_constr_single_reg(irn, OUT_POS(pn_be_AddSP_res), sp); + a->reg_data[pn_be_AddSP_res].reg = sp; + + return irn; +} + +ir_node *be_new_SubSP(const arch_register_t *sp, ir_graph *irg, ir_node *bl, ir_node *old_sp, ir_node *sz) +{ + be_node_attr_t *a; + ir_node *irn; + ir_node *in[be_pos_SubSP_last]; + + in[be_pos_SubSP_old_sp] = old_sp; + in[be_pos_SubSP_size] = sz; + + irn = new_ir_node(NULL, irg, bl, op_be_SubSP, mode_T, be_pos_SubSP_last, in); + a = init_node_attr(irn, be_pos_SubSP_last); + + be_node_set_flags(irn, OUT_POS(pn_be_SubSP_res), arch_irn_flags_ignore | arch_irn_flags_modify_sp); + + /* Set output constraint to stack register. */ + be_set_constr_single_reg(irn, be_pos_SubSP_old_sp, sp); + be_node_set_reg_class(irn, be_pos_SubSP_size, arch_register_get_class(sp)); + be_set_constr_single_reg(irn, OUT_POS(pn_be_SubSP_res), sp); + a->reg_data[pn_be_SubSP_res].reg = sp; return irn; } @@ -533,14 +606,13 @@ ir_node *be_new_SetSP(const arch_register_t *sp, ir_graph *irg, ir_node *bl, ir_ /* Set output constraint to stack register. */ be_set_constr_single_reg(irn, OUT_POS(0), sp); - be_node_set_reg_class(irn, 1, sp->reg_class); - be_node_set_reg_class(irn, 2, sp->reg_class); - be_node_set_irn_reg(NULL, irn, sp); + be_node_set_reg_class(irn, be_pos_AddSP_size, sp->reg_class); + be_node_set_reg_class(irn, be_pos_AddSP_old_sp, sp->reg_class); return irn; } -ir_node *be_new_StackParam(const arch_register_class_t *cls, const arch_register_class_t *cls_frame, ir_graph *irg, ir_node *bl, ir_mode *mode, ir_node *frame_pointer, entity *ent) +ir_node *be_new_StackParam(const arch_register_class_t *cls, const arch_register_class_t *cls_frame, ir_graph *irg, ir_node *bl, ir_mode *mode, ir_node *frame_pointer, ir_entity *ent) { be_frame_attr_t *a; ir_node *irn; @@ -567,7 +639,7 @@ ir_node *be_new_RegParams(ir_graph *irg, ir_node *bl, int n_outs) } ir_node *be_new_FrameLoad(const arch_register_class_t *cls_frame, const arch_register_class_t *cls_data, - ir_graph *irg, ir_node *bl, ir_node *mem, ir_node *frame, entity *ent) + ir_graph *irg, ir_node *bl, ir_node *mem, ir_node *frame, ir_entity *ent) { be_frame_attr_t *a; ir_node *irn; @@ -585,7 +657,7 @@ ir_node *be_new_FrameLoad(const arch_register_class_t *cls_frame, const arch_reg } ir_node *be_new_FrameStore(const arch_register_class_t *cls_frame, const arch_register_class_t *cls_data, - ir_graph *irg, ir_node *bl, ir_node *mem, ir_node *frame, ir_node *data, entity *ent) + ir_graph *irg, ir_node *bl, ir_node *mem, ir_node *frame, ir_node *data, ir_entity *ent) { be_frame_attr_t *a; ir_node *irn; @@ -603,7 +675,7 @@ ir_node *be_new_FrameStore(const arch_register_class_t *cls_frame, const arch_re return irn; } -ir_node *be_new_FrameAddr(const arch_register_class_t *cls_frame, ir_graph *irg, ir_node *bl, ir_node *frame, entity *ent) +ir_node *be_new_FrameAddr(const arch_register_class_t *cls_frame, ir_graph *irg, ir_node *bl, ir_node *frame, ir_entity *ent) { be_frame_attr_t *a; ir_node *irn; @@ -665,6 +737,7 @@ int be_is_Reload (const ir_node *irn) { return be_get_irn_opcode(irn) == int be_is_Copy (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_Copy ; } int be_is_CopyKeep (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_CopyKeep ; } int be_is_Perm (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_Perm ; } +int be_is_MemPerm (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_MemPerm ; } int be_is_Keep (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_Keep ; } int be_is_Call (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_Call ; } int be_is_Return (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_Return ; } @@ -693,15 +766,70 @@ int be_has_frame_entity(const ir_node *irn) } } -entity *be_get_frame_entity(const ir_node *irn) +ir_entity *be_get_frame_entity(const ir_node *irn) { - if(be_has_frame_entity(irn)) { + if (be_has_frame_entity(irn)) { be_frame_attr_t *a = get_irn_attr(irn); return a->ent; } return NULL; } +int be_get_frame_offset(const ir_node *irn) +{ + assert(is_be_node(irn)); + if (be_has_frame_entity(irn)) { + be_frame_attr_t *a = get_irn_attr(irn); + return a->offset; + } + return 0; +} + +void be_set_MemPerm_in_entity(const ir_node *irn, int n, ir_entity *ent) +{ + be_memperm_attr_t *attr = get_irn_attr(irn); + + assert(be_is_MemPerm(irn)); + assert(n < be_get_MemPerm_entity_arity(irn)); + + attr->in_entities[n] = ent; +} + +ir_entity* be_get_MemPerm_in_entity(const ir_node* irn, int n) +{ + be_memperm_attr_t *attr = get_irn_attr(irn); + + assert(be_is_MemPerm(irn)); + assert(n < be_get_MemPerm_entity_arity(irn)); + + return attr->in_entities[n]; +} + +void be_set_MemPerm_out_entity(const ir_node *irn, int n, ir_entity *ent) +{ + be_memperm_attr_t *attr = get_irn_attr(irn); + + assert(be_is_MemPerm(irn)); + assert(n < be_get_MemPerm_entity_arity(irn)); + + attr->out_entities[n] = ent; +} + +ir_entity* be_get_MemPerm_out_entity(const ir_node* irn, int n) +{ + be_memperm_attr_t *attr = get_irn_attr(irn); + + assert(be_is_MemPerm(irn)); + assert(n < be_get_MemPerm_entity_arity(irn)); + + return attr->out_entities[n]; +} + +int be_get_MemPerm_entity_arity(const ir_node *irn) +{ + return get_irn_arity(irn) - 1; +} + static void be_limited(void *data, bitset_t *bs) { be_req_t *req = data; @@ -771,8 +899,12 @@ void be_node_set_flags(ir_node *irn, int pos, arch_irn_flags_t flags) void be_node_set_reg_class(ir_node *irn, int pos, const arch_register_class_t *cls) { be_req_t *r = get_req(irn, pos); + r->req.cls = cls; - if(r->req.type == arch_register_req_type_none) + + if (cls == NULL) + r->req.type = arch_register_req_type_none; + else if (r->req.type == arch_register_req_type_none) r->req.type = arch_register_req_type_normal; } @@ -797,180 +929,41 @@ ir_node *be_get_IncSP_mem(ir_node *irn) { return get_irn_n(irn, 1); } -void be_set_IncSP_offset(ir_node *irn, unsigned offset) +void be_set_IncSP_offset(ir_node *irn, int offset) { be_stack_attr_t *a = get_irn_attr(irn); assert(be_is_IncSP(irn)); a->offset = offset; } -unsigned be_get_IncSP_offset(const ir_node *irn) +int be_get_IncSP_offset(const ir_node *irn) { be_stack_attr_t *a = get_irn_attr(irn); assert(be_is_IncSP(irn)); return a->offset; } -void be_set_IncSP_direction(ir_node *irn, be_stack_dir_t dir) +ir_node *be_spill(const arch_env_t *arch_env, ir_node *irn) { - be_stack_attr_t *a = get_irn_attr(irn); - assert(be_is_IncSP(irn)); - a->dir = dir; -} + ir_node *bl = get_nodes_block(irn); + ir_graph *irg = get_irn_irg(bl); + const arch_register_class_t *cls = arch_get_irn_reg_class(arch_env, irn, -1); + ir_node *spill; -be_stack_dir_t be_get_IncSP_direction(const ir_node *irn) -{ - be_stack_attr_t *a = get_irn_attr(irn); - assert(be_is_IncSP(irn)); - return a->dir; -} - -void be_set_Spill_entity(ir_node *irn, entity *ent) -{ - be_spill_attr_t *a = get_irn_attr(irn); - assert(be_is_Spill(irn)); - a->frame_attr.ent = ent; -} - -static ir_node *find_a_spill_walker(ir_node *irn, unsigned visited_nr) -{ - unsigned nr = get_irn_visited(irn); - - set_irn_visited(irn, visited_nr); - - if(is_Phi(irn)) { - int i, n; - if(nr < visited_nr) { - for(i = 0, n = get_irn_arity(irn); i < n; ++i) { - ir_node *n = find_a_spill_walker(get_irn_n(irn, i), visited_nr); - if(n != NULL) - return n; - } - } - } - - else if(be_get_irn_opcode(irn) == beo_Spill) - return irn; - - return NULL; -} - -ir_node *be_get_Spill_context(const ir_node *irn) { - const be_spill_attr_t *a = get_irn_attr(irn); - assert(be_is_Spill(irn)); - return a->spill_ctx; -} - -/** - * Finds a spill for a reload. - * If the reload is directly using the spill, this is simple, - * else we perform DFS from the reload (over all PhiMs) and return - * the first spill node we find. - */ -static INLINE ir_node *find_a_spill(const ir_node *irn) -{ - ir_graph *irg = get_irn_irg(irn); - unsigned visited_nr = get_irg_visited(irg) + 1; - - assert(be_is_Reload(irn)); - set_irg_visited(irg, visited_nr); - return find_a_spill_walker(be_get_Reload_mem(irn), visited_nr); -} - -entity *be_get_spill_entity(const ir_node *irn) -{ - switch(be_get_irn_opcode(irn)) { - case beo_Reload: - { - ir_node *spill = find_a_spill(irn); - return be_get_spill_entity(spill); - } - case beo_Spill: - { - be_spill_attr_t *a = get_irn_attr(irn); - return a->frame_attr.ent; - } - default: - assert(0 && "Must give spill/reload node"); - break; - } - - return NULL; -} - -static void link_reload_walker(ir_node *irn, void *data) -{ - ir_node **root = (ir_node **) data; - if(be_is_Reload(irn)) { - set_irn_link(irn, *root); - *root = irn; - } -} - -void be_copy_entities_to_reloads(ir_graph *irg) -{ - ir_node *irn = NULL; - irg_walk_graph(irg, link_reload_walker, NULL, (void *) &irn); - - while(irn) { - be_frame_attr_t *a = get_irn_attr(irn); - entity *ent = be_get_spill_entity(irn); - a->ent = ent; - irn = get_irn_link(irn); - } -} - -ir_node *be_spill(const arch_env_t *arch_env, ir_node *irn, ir_node *ctx) -{ - ir_node *bl = get_nodes_block(irn); - ir_graph *irg = get_irn_irg(bl); - ir_node *frame = get_irg_frame(irg); - ir_node *insert = bl; - ir_node *spill; - const arch_register_class_t *cls = arch_get_irn_reg_class(arch_env, irn, -1); - const arch_register_class_t *cls_frame = arch_get_irn_reg_class(arch_env, frame, -1); - - spill = be_new_Spill(cls, cls_frame, irg, bl, frame, irn, ctx); + spill = be_new_Spill(cls, irg, bl, irn); return spill; - -#if 0 - /* - * search the right insertion point. a spill of a phi cannot be put - * directly after the phi, if there are some phis behind the one which - * is spilled. Also, a spill of a Proj must be after all Projs of the - * same tuple node. - * - * Here's one special case: - * If the spill is in the start block, the spill must be after the frame - * pointer is set up. This is done by setting insert to the end of the block - * which is its default initialization (see above). - */ - - insert = sched_next(irn); - if(insert != bl && bl == get_irg_start_block(irg) && sched_get_time_step(frame) >= sched_get_time_step(insert)) - insert = sched_next(frame); - - while((is_Phi(insert) || is_Proj(insert)) && !sched_is_end(insert)) - insert = sched_next(insert); - - sched_add_before(insert, spill); - return spill; -#endif } ir_node *be_reload(const arch_env_t *arch_env, const arch_register_class_t *cls, ir_node *insert, ir_mode *mode, ir_node *spill) { - ir_node *reload; - - ir_node *bl = is_Block(insert) ? insert : get_nodes_block(insert); - ir_graph *irg = get_irn_irg(bl); - ir_node *frame = get_irg_frame(irg); - const arch_register_class_t *cls_frame = arch_get_irn_reg_class(arch_env, frame, -1); + ir_node *reload; + ir_node *bl = is_Block(insert) ? insert : get_nodes_block(insert); + ir_graph *irg = get_irn_irg(bl); assert(be_is_Spill(spill) || (is_Phi(spill) && get_irn_mode(spill) == mode_M)); - reload = be_new_Reload(cls, cls_frame, irg, bl, frame, spill, mode); + reload = be_new_Reload(cls, irg, bl, spill, mode); if(is_Block(insert)) { insert = sched_skip(insert, 0, sched_skip_cf_predicator, (void *) arch_env); @@ -1052,16 +1045,8 @@ be_node_get_irn_reg_req(const void *self, arch_register_req_t *req, const ir_nod const arch_register_t * be_node_get_irn_reg(const void *_self, const ir_node *irn) { - int out_pos; - be_node_attr_t *a; - - out_pos = redir_proj((const ir_node **) &irn, -1); - a = get_irn_attr(irn); - - assert(is_be_node(irn)); - assert(out_pos < a->max_reg_data && "position too high"); - - return a->reg_data[out_pos].reg; + be_reg_data_t *r = retrieve_reg_data(irn); + return r ? r->reg : NULL; } static arch_irn_class_t be_node_classify(const void *_self, const ir_node *irn) @@ -1078,7 +1063,7 @@ static arch_irn_class_t be_node_classify(const void *_self, const ir_node *irn) XXX(StackParam, stackparam); #undef XXX default: - return 0; + return arch_irn_class_normal; } return 0; @@ -1086,23 +1071,25 @@ static arch_irn_class_t be_node_classify(const void *_self, const ir_node *irn) static arch_irn_flags_t be_node_get_flags(const void *_self, const ir_node *irn) { - int out_pos; - be_node_attr_t *a; - - out_pos = redir_proj((const ir_node **) &irn, -1); - a = get_irn_attr(irn); - - assert(is_be_node(irn)); - assert(out_pos < a->max_reg_data && "position too high"); - - return a->reg_data[out_pos].req.flags; + be_reg_data_t *r = retrieve_reg_data(irn); + return r ? r->req.flags : 0; } -static entity *be_node_get_frame_entity(const void *self, const ir_node *irn) +static ir_entity *be_node_get_frame_entity(const void *self, const ir_node *irn) { return be_get_frame_entity(irn); } +static void be_node_set_frame_entity(const void *self, ir_node *irn, ir_entity *ent) +{ + be_frame_attr_t *a; + + assert(be_has_frame_entity(irn)); + + a = get_irn_attr(irn); + a->ent = ent; +} + static void be_node_set_frame_offset(const void *self, ir_node *irn, int offset) { if(be_has_frame_entity(irn)) { @@ -1111,6 +1098,11 @@ static void be_node_set_frame_offset(const void *self, ir_node *irn, int offset) } } +static int be_node_get_sp_bias(const void *self, const ir_node *irn) +{ + return be_is_IncSP(irn) ? be_get_IncSP_offset(irn) : 0; +} + /* ___ ____ _ _ _ _ _ _ |_ _| _ \| \ | | | | | | __ _ _ __ __| | | ___ _ __ @@ -1127,7 +1119,13 @@ static const arch_irn_ops_if_t be_node_irn_ops_if = { be_node_classify, be_node_get_flags, be_node_get_frame_entity, - be_node_set_frame_offset + be_node_set_frame_entity, + be_node_set_frame_offset, + be_node_get_sp_bias, + NULL, /* get_inverse */ + NULL, /* get_op_estimated_cost */ + NULL, /* possible_memory_operand */ + NULL, /* perform_memory_operand */ }; static const arch_irn_ops_t be_node_irn_ops = { @@ -1242,15 +1240,24 @@ static arch_irn_flags_t phi_get_flags(const void *_self, const ir_node *irn) return arch_irn_flags_none; } -static entity *phi_get_frame_entity(const void *_self, const ir_node *irn) +static ir_entity *phi_get_frame_entity(const void *_self, const ir_node *irn) { return NULL; } +static void phi_set_frame_entity(const void *_self, ir_node *irn, ir_entity *ent) +{ +} + static void phi_set_frame_offset(const void *_self, ir_node *irn, int bias) { } +static int phi_get_sp_bias(const void* self, const ir_node *irn) +{ + return 0; +} + static const arch_irn_ops_if_t phi_irn_ops = { phi_get_irn_reg_req, phi_set_irn_reg, @@ -1258,7 +1265,13 @@ static const arch_irn_ops_if_t phi_irn_ops = { phi_classify, phi_get_flags, phi_get_frame_entity, - phi_set_frame_offset + phi_set_frame_entity, + phi_set_frame_offset, + phi_get_sp_bias, + NULL, /* get_inverse */ + NULL, /* get_op_estimated_cost */ + NULL, /* possible_memory_operand */ + NULL, /* perform_memory_operand */ }; static const arch_irn_handler_t phi_irn_handler = { @@ -1296,7 +1309,6 @@ void be_phi_handler_reset(arch_irn_handler_t *handler) h->regs = pmap_create(); } - /* _ _ _ ____ _ | \ | | ___ __| | ___ | _ \ _ _ _ __ ___ _ __ (_)_ __ __ _ @@ -1402,21 +1414,15 @@ static int dump_node(ir_node *irn, FILE *f, dump_reason_t reason) } switch(be_get_irn_opcode(irn)) { - case beo_Spill: - { - be_spill_attr_t *a = (be_spill_attr_t *) at; - ir_fprintf(f, "spill context: %+F\n", a->spill_ctx); - } - break; - case beo_IncSP: { be_stack_attr_t *a = (be_stack_attr_t *) at; - if (a->offset == BE_STACK_FRAME_SIZE) + if (a->offset == BE_STACK_FRAME_SIZE_EXPAND) fprintf(f, "offset: FRAME_SIZE\n"); + else if(a->offset == BE_STACK_FRAME_SIZE_SHRINK) + fprintf(f, "offset: -FRAME SIZE\n"); else fprintf(f, "offset: %u\n", a->offset); - fprintf(f, "direction: %s\n", a->dir == be_stack_dir_expand ? "expand" : "shrink"); } break; case beo_Call: @@ -1426,6 +1432,24 @@ static int dump_node(ir_node *irn, FILE *f, dump_reason_t reason) if (a->ent) fprintf(f, "\ncalling: %s\n", get_entity_name(a->ent)); } + break; + case beo_MemPerm: + { + int i; + for(i = 0; i < be_get_MemPerm_entity_arity(irn); ++i) { + ir_entity *in, *out; + in = be_get_MemPerm_in_entity(irn, i); + out = be_get_MemPerm_out_entity(irn, i); + if(in) { + fprintf(f, "\nin[%d]: %s\n", i, get_entity_name(in)); + } + if(out) { + fprintf(f, "\nout[%d]: %s\n", i, get_entity_name(out)); + } + } + } + break; + default: break; }