X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fbe%2Fbeabi.c;h=5944a736255167dd80d2bb1eb2fb8058c87b81a1;hb=2ce8008369b1dced0ccd07574d91bff82ab773ef;hp=2adfa3af03c2fcce2d36e92e8e1febde5212a5a1;hpb=e88385016800d3c56c3fa09770e9f7995c42e106;p=libfirm diff --git a/ir/be/beabi.c b/ir/be/beabi.c index 2adfa3af0..5944a7362 100644 --- a/ir/be/beabi.c +++ b/ir/be/beabi.c @@ -55,6 +55,7 @@ #include "beirg.h" #include "bessaconstr.h" #include "bemodule.h" +#include "betranshlp.h" DEBUG_ONLY(static firm_dbg_module_t *dbg;) @@ -86,8 +87,6 @@ struct be_abi_call_t { * The ABI information for the current graph. */ struct be_abi_irg_t { - survive_dce_t *dce_survivor; - be_abi_call_t *call; /**< The ABI call information. */ ir_node *init_sp; /**< The node representing the stack pointer @@ -96,11 +95,8 @@ struct be_abi_irg_t { ir_node *start; /**< The be_Start params node. */ pmap *regs; /**< A map of all callee-save and ignore regs to their Projs to the RegParams node. */ - int start_block_bias; /**< The stack bias at the end of the start block. */ - void *cb; /**< ABI Callback self pointer. */ - pmap *keep_map; /**< mapping blocks to keep nodes. */ ir_node **calls; /**< flexible array containing all be_Call nodes */ @@ -111,6 +107,17 @@ static ir_heights_t *ir_heights; /** Flag: if set, try to omit the frame pointer in all routines. */ static int be_omit_fp = 1; +static ir_node *be_abi_reg_map_get(pmap *map, const arch_register_t *reg) +{ + return (ir_node*)pmap_get(map, reg); +} + +static void be_abi_reg_map_set(pmap *map, const arch_register_t* reg, + ir_node *node) +{ + pmap_insert(map, reg, node); +} + /* _ ____ ___ ____ _ _ _ _ / \ | __ )_ _| / ___|__ _| | | |__ __ _ ___| | _____ @@ -281,13 +288,12 @@ static void be_abi_call_free(be_abi_call_t *call) * @param args the stack argument layout type * @param between the between layout type * @param locals the method frame type - * @param stack_dir the stack direction: < 0 decreasing, > 0 increasing addresses * @param param_map an array mapping method argument positions to the stack argument type * * @return the initialized stack layout */ static be_stack_layout_t *stack_frame_init(be_stack_layout_t *frame, ir_type *args, - ir_type *between, ir_type *locals, int stack_dir, + ir_type *between, ir_type *locals, ir_entity *param_map[]) { frame->arg_type = args; @@ -295,19 +301,13 @@ static be_stack_layout_t *stack_frame_init(be_stack_layout_t *frame, ir_type *ar frame->frame_type = locals; frame->initial_offset = 0; frame->initial_bias = 0; - frame->stack_dir = stack_dir; frame->order[1] = between; frame->param_map = param_map; - if (stack_dir > 0) { - frame->order[0] = args; - frame->order[2] = locals; - } else { - /* typical decreasing stack: locals have the - * lowest addresses, arguments the highest */ - frame->order[0] = locals; - frame->order[2] = args; - } + /* typical decreasing stack: locals have the + * lowest addresses, arguments the highest */ + frame->order[0] = locals; + frame->order[2] = args; return frame; } @@ -336,11 +336,10 @@ static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp) const arch_env_t *arch_env = be_get_irg_arch_env(irg); ir_type *call_tp = get_Call_type(irn); ir_node *call_ptr = get_Call_ptr(irn); - int n_params = get_method_n_params(call_tp); + size_t n_params = get_method_n_params(call_tp); ir_node *curr_mem = get_Call_mem(irn); ir_node *bl = get_nodes_block(irn); int stack_size = 0; - int stack_dir = arch_env->stack_dir; const arch_register_t *sp = arch_env->sp; be_abi_call_t *call = be_abi_call_new(sp->reg_class); ir_mode *mach_mode = sp->reg_class->mode; @@ -363,6 +362,8 @@ static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp) int *reg_param_idxs; int *stack_param_idx; int i, n, destroy_all_regs; + size_t s; + size_t p; dbg_info *dbgi; /* Let the isa fill out the abi description for that call node. */ @@ -371,26 +372,26 @@ static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp) /* Insert code to put the stack arguments on the stack. */ assert(get_Call_n_params(irn) == n_params); stack_param_idx = ALLOCAN(int, n_params); - for (i = 0; i < n_params; ++i) { - be_abi_call_arg_t *arg = get_call_arg(call, 0, i, 0); + for (p = 0; p < n_params; ++p) { + be_abi_call_arg_t *arg = get_call_arg(call, 0, p, 0); assert(arg); if (arg->on_stack) { - int arg_size = get_type_size_bytes(get_method_param_type(call_tp, i)); + int arg_size = get_type_size_bytes(get_method_param_type(call_tp, p)); stack_size += round_up2(arg->space_before, arg->alignment); stack_size += round_up2(arg_size, arg->alignment); stack_size += round_up2(arg->space_after, arg->alignment); - stack_param_idx[n_stack_params++] = i; + stack_param_idx[n_stack_params++] = p; } } /* Collect all arguments which are passed in registers. */ reg_param_idxs = ALLOCAN(int, n_params); - for (i = 0; i < n_params; ++i) { - be_abi_call_arg_t *arg = get_call_arg(call, 0, i, 0); + for (p = 0; p < n_params; ++p) { + be_abi_call_arg_t *arg = get_call_arg(call, 0, p, 0); if (arg && arg->in_reg) { - reg_param_idxs[n_reg_params++] = i; + reg_param_idxs[n_reg_params++] = p; } } @@ -403,7 +404,7 @@ static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp) * Note: we also have to do this for stack_size == 0, because we may have * to adjust stack alignment for the call. */ - if (stack_dir < 0 && !do_seq && !no_alloc) { + if (!do_seq && !no_alloc) { curr_sp = be_new_IncSP(sp, bl, curr_sp, stack_size, 1); } @@ -419,7 +420,7 @@ static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp) * We must them reverse again if they are pushed (not stored) and the stack * direction is downwards. */ - if (call->flags.bits.left_to_right ^ (do_seq && stack_dir < 0)) { + if (call->flags.bits.left_to_right ^ do_seq) { for (i = 0; i < n_stack_params >> 1; ++i) { int other = n_stack_params - i - 1; int tmp = stack_param_idx[i]; @@ -469,7 +470,8 @@ static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp) /* Insert a store for primitive arguments. */ if (is_atomic_type(param_type)) { - ir_node *mem_input = do_seq ? curr_mem : new_r_NoMem(irg); + ir_node *nomem = get_irg_no_mem(irg); + ir_node *mem_input = do_seq ? curr_mem : nomem; ir_node *store = new_rd_Store(dbgi, bl, mem_input, addr, param, cons_none); mem = new_r_Proj(store, mode_M, pn_Store_M); } else { @@ -580,13 +582,9 @@ static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp) } /* add state registers ins */ - for (i = 0; i < ARR_LEN(states); ++i) { - const arch_register_t *reg = states[i]; + for (s = 0; s < ARR_LEN(states); ++s) { + const arch_register_t *reg = states[s]; const arch_register_class_t *cls = arch_register_get_class(reg); -#if 0 - ir_node *regnode = be_abi_reg_map_get(env->regs, reg); - ir_fprintf(stderr, "Adding %+F\n", regnode); -#endif ir_node *regnode = new_r_Unknown(irg, arch_register_class_mode(cls)); in[n_ins++] = regnode; } @@ -660,7 +658,7 @@ static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp) Set the register class of the call address to the backend provided class (default: stack pointer class) */ - be_node_set_reg_class_in(low_call, be_pos_Call_ptr, call->cls_addr); + be_node_set_reg_class_in(low_call, n_be_Call_ptr, call->cls_addr); DBG((dbg, LEVEL_3, "\tcreated backend call %+F\n", low_call)); @@ -670,7 +668,7 @@ static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp) be_abi_call_arg_t *arg = get_call_arg(call, 0, index, 0); assert(arg->reg != NULL); - be_set_constr_single_reg_in(low_call, be_pos_Call_first_arg + i, + be_set_constr_single_reg_in(low_call, n_be_Call_first_arg + i, arg->reg, arch_register_req_type_none); } @@ -697,6 +695,7 @@ static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp) { ir_node **in, *keep; int i; + size_t d; int n = 0; int curr_res_proj = pn_be_Call_first_res + n_reg_results; int n_ins; @@ -708,8 +707,8 @@ static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp) set_irn_link(curr_sp, (void*) sp); in[n++] = curr_sp; - for (i = 0; i < ARR_LEN(destroyed_regs); ++i) { - const arch_register_t *reg = destroyed_regs[i]; + for (d = 0; d < ARR_LEN(destroyed_regs); ++d) { + const arch_register_t *reg = destroyed_regs[d]; ir_node *proj = new_r_Proj(low_call, reg->reg_class->mode, curr_res_proj); /* memorize the register in the link field. we need afterwards to set the register class of the keep correctly. */ @@ -976,6 +975,7 @@ static int cmp_call_dependency(const void *c1, const void *c2) { ir_node *n1 = *(ir_node **) c1; ir_node *n2 = *(ir_node **) c2; + unsigned h1, h2; /* Classical qsort() comparison function behavior: @@ -990,7 +990,16 @@ static int cmp_call_dependency(const void *c1, const void *c2) return 1; /* The nodes have no depth order, but we need a total order because qsort() - * is not stable. */ + * is not stable. + * + * Additionally, we need to respect transitive dependencies. Consider a + * Call a depending on Call b and an independent Call c. + * We MUST NOT order c > a and b > c. */ + h1 = get_irn_height(ir_heights, n1); + h2 = get_irn_height(ir_heights, n2); + if (h1 < h2) return -1; + if (h1 > h2) return 1; + /* Same height, so use a random (but stable) order */ return get_irn_idx(n1) - get_irn_idx(n2); } @@ -1134,9 +1143,8 @@ static ir_type *compute_arg_type(be_abi_irg_t *env, ir_graph *irg, ir_type *method_type, ir_type *val_param_tp, ir_entity ***param_map) { - const arch_env_t *arch_env = be_get_irg_arch_env(irg); int dir = env->call->flags.bits.left_to_right ? 1 : -1; - int inc = arch_env->stack_dir * dir; + int inc = -dir; int n = get_method_n_params(method_type); int curr = inc > 0 ? 0 : n - 1; struct obstack *obst = be_get_be_obst(irg); @@ -1211,76 +1219,14 @@ static void reg_map_to_arr(reg_node_map_t *res, pmap *reg_map) qsort(res, n, sizeof(res[0]), cmp_regs); } -/** - * Creates a barrier. - */ -static ir_node *create_barrier(ir_node *bl, ir_node **mem, pmap *regs, - int in_req) -{ - int n_regs = pmap_count(regs); - int n; - ir_node *irn; - ir_node **in; - reg_node_map_t *rm; - - in = ALLOCAN(ir_node*, n_regs+1); - rm = ALLOCAN(reg_node_map_t, n_regs); - reg_map_to_arr(rm, regs); - for (n = 0; n < n_regs; ++n) { - in[n] = rm[n].irn; - } - - if (mem) { - in[n++] = *mem; - } - - irn = be_new_Barrier(bl, n, in); - - for (n = 0; n < n_regs; ++n) { - ir_node *pred = rm[n].irn; - const arch_register_t *reg = rm[n].reg; - arch_register_req_type_t add_type = arch_register_req_type_none; - ir_node *proj; - const backend_info_t *info; - - /* stupid workaround for now... as not all nodes report register - * requirements. */ - info = be_get_info(skip_Proj(pred)); - if (info != NULL && info->out_infos != NULL) { - const arch_register_req_t *ireq = arch_get_register_req_out(pred); - if (ireq->type & arch_register_req_type_ignore) - add_type |= arch_register_req_type_ignore; - if (ireq->type & arch_register_req_type_produces_sp) - add_type |= arch_register_req_type_produces_sp; - } - - proj = new_r_Proj(irn, get_irn_mode(pred), n); - be_node_set_reg_class_in(irn, n, reg->reg_class); - if (in_req) { - be_set_constr_single_reg_in(irn, n, reg, - arch_register_req_type_none); - } - be_set_constr_single_reg_out(irn, n, reg, add_type); - arch_set_irn_register(proj, reg); - - pmap_insert(regs, (void *) reg, proj); - } - - if (mem) { - *mem = new_r_Proj(irn, mode_M, n); - } - - return irn; -} - /** * Creates a be_Return for a Return node. * - * @param @env the abi environment - * @param irn the Return node or NULL if there was none - * @param bl the block where the be_Retun should be placed - * @param mem the current memory - * @param n_res number of return results + * @param @env the abi environment + * @param irn the Return node or NULL if there was none + * @param bl the block where the be_Retun should be placed + * @param mem the current memory + * @param n_res number of return results */ static ir_node *create_be_return(be_abi_irg_t *env, ir_node *irn, ir_node *bl, ir_node *mem, int n_res) @@ -1331,10 +1277,6 @@ static ir_node *create_be_return(be_abi_irg_t *env, ir_node *irn, ir_node *bl, be_abi_reg_map_set(reg_map, arch_env->sp, stack); - /* Make the Epilogue node and call the arch's epilogue maker. */ - create_barrier(bl, &mem, reg_map, 1); - call->cb->epilogue(env->cb, bl, &mem, reg_map); - /* Maximum size of the in array for Return nodes is return args + callee save/ignore registers + memory + stack pointer @@ -1385,7 +1327,7 @@ static ir_node *create_be_return(be_abi_irg_t *env, ir_node *irn, ir_node *bl, if (regs[i] == NULL) continue; - be_node_set_reg_class_in(ret, i, regs[i]->reg_class); + be_set_constr_single_reg_in(ret, i, regs[i], arch_register_req_type_none); } /* Free the space of the Epilog's in array and the register <-> proj map. */ @@ -1544,7 +1486,7 @@ static void fix_address_of_parameter_access(be_abi_irg_t *env, ir_graph *irg, restore_optimization_state(&state); /* reroute all edges to the new memory source */ - edges_reroute(imem, nmem, irg); + edges_reroute(imem, nmem); store = NULL; mem = imem; @@ -1568,6 +1510,7 @@ static void fix_address_of_parameter_access(be_abi_irg_t *env, ir_graph *irg, /* the new memory Proj gets the last Proj from store */ set_Proj_pred(nmem, store); set_Proj_proj(nmem, pn_Store_M); + set_nodes_block(nmem, get_nodes_block(store)); /* move all entities to the frame type */ frame_tp = get_irg_frame_type(irg); @@ -1608,25 +1551,13 @@ static void fix_address_of_parameter_access(be_abi_irg_t *env, ir_graph *irg, */ static void fix_start_block(ir_graph *irg) { - ir_node *initial_X = get_irg_initial_exec(irg); - ir_node *start_block = get_irg_start_block(irg); - const ir_edge_t *edge; + ir_node *initial_X = get_irg_initial_exec(irg); + ir_node *start_block = get_irg_start_block(irg); + ir_node *jmp = new_r_Jmp(start_block); assert(is_Proj(initial_X)); - - foreach_out_edge(initial_X, edge) { - ir_node *block = get_edge_src_irn(edge); - - if (is_Anchor(block)) - continue; - if (block != start_block) { - ir_node *jmp = new_r_Jmp(start_block); - set_Block_cfgpred(block, get_edge_src_pos(edge), jmp); - set_irg_initial_exec(irg, jmp); - return; - } - } - panic("Initial exec has no follow block in %+F", irg); + exchange(initial_X, jmp); + set_irg_initial_exec(irg, new_r_Bad(irg, mode_X)); } /** @@ -1734,8 +1665,6 @@ static void modify_irg(ir_graph *irg) DBG((dbg, LEVEL_1, "introducing abi on %+F\n", irg)); - /* Must fetch memory here, otherwise the start Barrier gets the wrong - * memory, which leads to loops in the DAG. */ old_mem = get_irg_initial_mem(irg); irp_reserve_resources(irp, IR_RESOURCE_ENTITY_LINK); @@ -1817,10 +1746,10 @@ static void modify_irg(ir_graph *irg) } } - bet_type = call->cb->get_between_type(env->cb); - stack_frame_init(stack_layout, arg_type, bet_type, - get_irg_frame_type(irg), arch_env->stack_dir, param_map); stack_layout->sp_relative = call->flags.bits.try_omit_fp; + bet_type = call->cb->get_between_type(irg); + stack_frame_init(stack_layout, arg_type, bet_type, + get_irg_frame_type(irg), param_map); /* Count the register params and add them to the number of Projs for the RegParams node */ for (i = 0; i < n_params; ++i) { @@ -1846,6 +1775,9 @@ static void modify_irg(ir_graph *irg) } } + fp_reg = call->flags.bits.try_omit_fp ? arch_env->sp : arch_env->bp; + rbitset_clear(birg->allocatable_regs, fp_reg->global_index); + /* handle start block here (place a jump in the block) */ fix_start_block(irg); @@ -1853,15 +1785,15 @@ static void modify_irg(ir_graph *irg) pmap_insert(env->regs, (void *) arch_env->bp, NULL); start_bl = get_irg_start_block(irg); env->start = be_new_Start(NULL, start_bl, pmap_count(env->regs) + 1); + set_irg_start(irg, env->start); /* * make proj nodes for the callee save registers. * memorize them, since Return nodes get those as inputs. * - * Note, that if a register corresponds to an argument, the regs map contains - * the old Proj from start for that argument. + * Note, that if a register corresponds to an argument, the regs map + * contains the old Proj from start for that argument. */ - rm = ALLOCAN(reg_node_map_t, pmap_count(env->regs)); reg_map_to_arr(rm, env->regs); for (i = 0, n = pmap_count(env->regs); i < n; ++i) { @@ -1872,7 +1804,10 @@ static void modify_irg(ir_graph *irg) ir_node *proj; if (reg == sp) - add_type |= arch_register_req_type_produces_sp | arch_register_req_type_ignore; + add_type |= arch_register_req_type_produces_sp; + if (!rbitset_is_set(birg->allocatable_regs, reg->global_index)) { + add_type |= arch_register_req_type_ignore; + } assert(nr >= 0); proj = new_r_Proj(env->start, mode, nr + 1); @@ -1890,23 +1825,11 @@ static void modify_irg(ir_graph *irg) mem = new_mem_proj; set_irg_initial_mem(irg, mem); - /* Generate the Prologue */ - fp_reg = call->cb->prologue(env->cb, &mem, env->regs, &stack_layout->initial_bias); - - /* do the stack allocation BEFORE the barrier, or spill code - might be added before it */ - env->init_sp = be_abi_reg_map_get(env->regs, sp); - env->init_sp = be_new_IncSP(sp, start_bl, env->init_sp, BE_STACK_FRAME_SIZE_EXPAND, 0); - be_abi_reg_map_set(env->regs, sp, env->init_sp); - - create_barrier(start_bl, &mem, env->regs, 0); - env->init_sp = be_abi_reg_map_get(env->regs, sp); - arch_set_irn_register(env->init_sp, sp); + /* set new frame_pointer */ frame_pointer = be_abi_reg_map_get(env->regs, fp_reg); set_irg_frame(irg, frame_pointer); - rbitset_clear(birg->allocatable_regs, fp_reg->global_index); /* rewire old mem users to new mem */ exchange(old_mem, mem); @@ -1940,8 +1863,9 @@ static void modify_irg(ir_graph *irg) if (is_atomic_type(param_type) && get_irn_n_edges(args[i]) > 0) { ir_mode *mode = get_type_mode(param_type); ir_mode *load_mode = arg->load_mode; + ir_node *nomem = get_irg_no_mem(irg); - ir_node *load = new_r_Load(start_bl, new_r_NoMem(irg), addr, load_mode, cons_floats); + ir_node *load = new_r_Load(start_bl, nomem, addr, load_mode, cons_floats); repl = new_r_Proj(load, load_mode, pn_Load_res); if (mode != load_mode) { @@ -1970,7 +1894,7 @@ static void modify_irg(ir_graph *irg) /* the arg proj is not needed anymore now and should be only used by the anchor */ assert(get_irn_n_edges(arg_tuple) == 1); kill_node(arg_tuple); - set_irg_args(irg, new_r_Bad(irg)); + set_irg_args(irg, new_r_Bad(irg, mode_T)); /* All Return nodes hang on the End node, so look for them there. */ end = get_irg_end_block(irg); @@ -2175,14 +2099,14 @@ static void fix_pic_symconsts(ir_node *node, void *data) /* we need an extra indirection for global data outside our current module. The loads are always safe and can therefore float and need no memory input */ - load = new_r_Load(block, new_r_NoMem(irg), add, mode, cons_floats); + load = new_r_Load(block, get_irg_no_mem(irg), add, mode, cons_floats); load_res = new_r_Proj(load, mode, pn_Load_res); set_irn_n(node, i, load_res); } } -be_abi_irg_t *be_abi_introduce(ir_graph *irg) +void be_abi_introduce(ir_graph *irg) { be_abi_irg_t *env = XMALLOCZ(be_abi_irg_t); ir_node *old_frame = get_irg_frame(irg); @@ -2192,11 +2116,10 @@ be_abi_irg_t *be_abi_introduce(ir_graph *irg) ir_type *method_type = get_entity_type(entity); be_irg_t *birg = be_birg_from_irg(irg); struct obstack *obst = &birg->obst; + ir_node *dummy = new_r_Dummy(irg, + arch_env->sp->reg_class->mode); unsigned r; - pmap_entry *ent; - ir_node *dummy; - /* determine allocatable registers */ assert(birg->allocatable_regs == NULL); birg->allocatable_regs = rbitset_obstack_alloc(obst, arch_env->n_registers); @@ -2215,14 +2138,15 @@ be_abi_irg_t *be_abi_introduce(ir_graph *irg) be_omit_fp = options->omit_fp; - env->dce_survivor = new_survive_dce(); env->keep_map = pmap_create(); env->call = be_abi_call_new(arch_env->sp->reg_class); arch_env_get_call_abi(arch_env, method_type, env->call); - env->init_sp = dummy = new_r_Dummy(irg, arch_env->sp->reg_class->mode); + env->init_sp = dummy; env->calls = NEW_ARR_F(ir_node*, 0); + edges_assure(irg); + if (options->pic) { irg_walk_graph(irg, fix_pic_symconsts, NULL, env); } @@ -2230,12 +2154,6 @@ be_abi_irg_t *be_abi_introduce(ir_graph *irg) /* Lower all call nodes in the IRG. */ process_calls(irg); - /* - Beware: init backend abi call object after processing calls, - otherwise some information might be not yet available. - */ - env->cb = env->call->cb->init(env->call, irg); - /* Process the IRG */ modify_irg(irg); @@ -2254,15 +2172,8 @@ be_abi_irg_t *be_abi_introduce(ir_graph *irg) exchange(dummy, env->init_sp); exchange(old_frame, get_irg_frame(irg)); - /* Make some important node pointers survive the dead node elimination. */ - survive_dce_register_irn(env->dce_survivor, &env->init_sp); - foreach_pmap(env->regs, ent) { - survive_dce_register_irn(env->dce_survivor, (ir_node **) &ent->value); - } - - env->call->cb->done(env->cb); - env->cb = NULL; - return env; + pmap_destroy(env->regs); + env->regs = NULL; } void be_abi_free(ir_graph *irg) @@ -2271,10 +2182,7 @@ void be_abi_free(ir_graph *irg) if (env->call != NULL) be_abi_call_free(env->call); - if (env->dce_survivor != NULL) - free_survive_dce(env->dce_survivor); - if (env->regs != NULL) - pmap_destroy(env->regs); + assert(env->regs == NULL); free(env); be_set_irg_abi(irg, NULL); @@ -2320,21 +2228,7 @@ void be_set_allocatable_regs(const ir_graph *irg, } } -ir_node *be_abi_get_callee_save_irn(be_abi_irg_t *abi, const arch_register_t *reg) -{ - assert(reg->type & arch_register_type_callee_save); - assert(pmap_contains(abi->regs, (void *) reg)); - return (ir_node*)pmap_get(abi->regs, (void *) reg); -} - -ir_node *be_abi_get_ignore_irn(be_abi_irg_t *abi, const arch_register_t *reg) -{ - assert(reg->type & arch_register_type_ignore); - assert(pmap_contains(abi->regs, (void *) reg)); - return (ir_node*)pmap_get(abi->regs, (void *) reg); -} - -BE_REGISTER_MODULE_CONSTRUCTOR(be_init_abi); +BE_REGISTER_MODULE_CONSTRUCTOR(be_init_abi) void be_init_abi(void) { FIRM_DBG_REGISTER(dbg, "firm.be.abi");