X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fbe%2Fsparc%2Fsparc_transform.c;h=c54bab704dcad70848b706d1eaf06e97db9998b7;hb=792661421a71bcfa9b64da8c6b655e826e94d2c1;hp=c8f6302de5a7f4b77beec7c1b8a184aab8632364;hpb=ee5425ca1c172c4cf244de9864f8fd44ad2cb645;p=libfirm diff --git a/ir/be/sparc/sparc_transform.c b/ir/be/sparc/sparc_transform.c index c8f6302de..c54bab704 100644 --- a/ir/be/sparc/sparc_transform.c +++ b/ir/be/sparc/sparc_transform.c @@ -60,6 +60,11 @@ DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;) +typedef struct reg_info_t { + size_t offset; + ir_node *irn; +} reg_info_t; + static const arch_register_t *sp_reg = &sparc_registers[REG_SP]; static const arch_register_t *fp_reg = &sparc_registers[REG_FRAME_POINTER]; static calling_convention_t *current_cconv = NULL; @@ -70,16 +75,11 @@ static ir_mode *mode_fp; static ir_mode *mode_fp2; //static ir_mode *mode_fp4; static pmap *node_to_stack; -static size_t start_mem_offset; -static ir_node *start_mem; -static size_t start_g0_offset; -static ir_node *start_g0; -static size_t start_g7_offset; -static ir_node *start_g7; -static size_t start_sp_offset; -static ir_node *start_sp; -static size_t start_fp_offset; -static ir_node *start_fp; +static reg_info_t start_mem; +static reg_info_t start_g0; +static reg_info_t start_g7; +static reg_info_t start_sp; +static reg_info_t start_fp; static ir_node *frame_base; static size_t start_params_offset; static size_t start_callee_saves_offset; @@ -150,95 +150,6 @@ static ir_node *gen_sign_extension(dbg_info *dbgi, ir_node *block, ir_node *op, return rshift_node; } -/** - * returns true if it is assured, that the upper bits of a node are "clean" - * which means for a 16 or 8 bit value, that the upper bits in the register - * are 0 for unsigned and a copy of the last significant bit for signed - * numbers. - */ -static bool upper_bits_clean(ir_node *node, ir_mode *mode) -{ - switch ((ir_opcode)get_irn_opcode(node)) { - case iro_And: - if (!mode_is_signed(mode)) { - return upper_bits_clean(get_And_left(node), mode) - || upper_bits_clean(get_And_right(node), mode); - } - /* FALLTHROUGH */ - case iro_Or: - case iro_Eor: - return upper_bits_clean(get_binop_left(node), mode) - && upper_bits_clean(get_binop_right(node), mode); - - case iro_Shr: - if (mode_is_signed(mode)) { - return false; /* TODO */ - } else { - ir_node *right = get_Shr_right(node); - if (is_Const(right)) { - ir_tarval *tv = get_Const_tarval(right); - long val = get_tarval_long(tv); - if (val >= 32 - (long)get_mode_size_bits(mode)) - return true; - } - return upper_bits_clean(get_Shr_left(node), mode); - } - - case iro_Shrs: - return upper_bits_clean(get_Shrs_left(node), mode); - - case iro_Const: { - ir_tarval *tv = get_Const_tarval(node); - long val = get_tarval_long(tv); - if (mode_is_signed(mode)) { - long shifted = val >> (get_mode_size_bits(mode)-1); - return shifted == 0 || shifted == -1; - } else { - unsigned long shifted = (unsigned long)val; - shifted >>= get_mode_size_bits(mode)-1; - shifted >>= 1; - return shifted == 0; - } - } - - case iro_Conv: { - ir_mode *dest_mode = get_irn_mode(node); - ir_node *op = get_Conv_op(node); - ir_mode *src_mode = get_irn_mode(op); - unsigned src_bits = get_mode_size_bits(src_mode); - unsigned dest_bits = get_mode_size_bits(dest_mode); - /* downconvs are a nop */ - if (src_bits <= dest_bits) - return upper_bits_clean(op, mode); - if (dest_bits <= get_mode_size_bits(mode) - && mode_is_signed(dest_mode) == mode_is_signed(mode)) - return true; - return false; - } - - case iro_Proj: { - ir_node *pred = get_Proj_pred(node); - switch (get_irn_opcode(pred)) { - case iro_Load: { - ir_mode *load_mode = get_Load_mode(pred); - unsigned load_bits = get_mode_size_bits(load_mode); - unsigned bits = get_mode_size_bits(mode); - if (load_bits > bits) - return false; - if (mode_is_signed(mode) != mode_is_signed(load_mode)) - return false; - return true; - } - default: - break; - } - } - default: - break; - } - return false; -} - /** * Extend a value to 32 bit signed/unsigned depending on its mode. * @@ -290,9 +201,10 @@ static bool is_imm_encodeable(const ir_node *node) static bool needs_extension(ir_node *op) { ir_mode *mode = get_irn_mode(op); - if (get_mode_size_bits(mode) >= get_mode_size_bits(mode_gp)) + unsigned gp_bits = get_mode_size_bits(mode_gp); + if (get_mode_size_bits(mode) >= gp_bits) return false; - return !upper_bits_clean(op, mode); + return !be_upper_bits_clean(op, mode); } /** @@ -482,25 +394,26 @@ static ir_node *gen_helper_binopx(ir_node *node, match_flags_t match_flags, } -static ir_node *get_g0(ir_graph *irg) +static ir_node *get_reg(ir_graph *const irg, reg_info_t *const reg) { - if (start_g0 == NULL) { + if (!reg->irn) { /* this is already the transformed start node */ - ir_node *start = get_irg_start(irg); + ir_node *const start = get_irg_start(irg); assert(is_sparc_Start(start)); - start_g0 = new_r_Proj(start, mode_gp, start_g0_offset); + arch_register_class_t const *const cls = arch_get_irn_register_req_out(start, reg->offset)->cls; + reg->irn = new_r_Proj(start, cls ? cls->mode : mode_M, reg->offset); } - return start_g0; + return reg->irn; +} + +static ir_node *get_g0(ir_graph *irg) +{ + return get_reg(irg, &start_g0); } static ir_node *get_g7(ir_graph *irg) { - if (start_g7 == NULL) { - ir_node *start = get_irg_start(irg); - assert(is_sparc_Start(start)); - start_g7 = new_r_Proj(start, mode_gp, start_g7_offset); - } - return start_g7; + return get_reg(irg, &start_g7); } static ir_node *make_tls_offset(dbg_info *dbgi, ir_node *block, @@ -663,7 +576,7 @@ static ir_node *gen_Proj_AddCC_t(ir_node *node) case pn_sparc_AddCC_t_flags: return new_r_Proj(new_pred, mode_flags, pn_sparc_AddCC_flags); default: - panic("Invalid AddCC_t proj found"); + panic("Invalid proj found"); } } @@ -710,7 +623,7 @@ static ir_node *gen_Proj_SubCC_t(ir_node *node) case pn_sparc_SubCC_t_flags: return new_r_Proj(new_pred, mode_flags, pn_sparc_SubCC_flags); default: - panic("Invalid SubCC_t proj found"); + panic("Invalid proj found"); } } @@ -777,7 +690,7 @@ static ir_node *gen_Load(ir_node *node) address_t address; if (get_Load_unaligned(node) == align_non_aligned) { - panic("sparc: transformation of unaligned Loads not implemented yet"); + panic("transformation of unaligned Loads not implemented yet"); } if (mode_is_float(mode)) { @@ -820,7 +733,7 @@ static ir_node *gen_Store(ir_node *node) address_t address; if (get_Store_unaligned(node) == align_non_aligned) { - panic("sparc: transformation of unaligned Stores not implemented yet"); + panic("transformation of unaligned Stores not implemented yet"); } if (mode_is_float(mode)) { @@ -1054,7 +967,7 @@ static ir_node *gen_Shl(ir_node *node) { ir_mode *mode = get_irn_mode(node); if (get_mode_modulo_shift(mode) != 32) - panic("modulo_shift!=32 not supported by sparc backend"); + panic("modulo_shift!=32 not supported"); return gen_helper_binop(node, MATCH_NONE, new_bd_sparc_Sll_reg, new_bd_sparc_Sll_imm); } @@ -1062,7 +975,7 @@ static ir_node *gen_Shr(ir_node *node) { ir_mode *mode = get_irn_mode(node); if (get_mode_modulo_shift(mode) != 32) - panic("modulo_shift!=32 not supported by sparc backend"); + panic("modulo_shift!=32 not supported"); return gen_helper_binop(node, MATCH_NONE, new_bd_sparc_Srl_reg, new_bd_sparc_Srl_imm); } @@ -1070,7 +983,7 @@ static ir_node *gen_Shrs(ir_node *node) { ir_mode *mode = get_irn_mode(node); if (get_mode_modulo_shift(mode) != 32) - panic("modulo_shift!=32 not supported by sparc backend"); + panic("modulo_shift!=32 not supported"); return gen_helper_binop(node, MATCH_NONE, new_bd_sparc_Sra_reg, new_bd_sparc_Sra_imm); } @@ -1105,7 +1018,7 @@ static ir_entity *create_float_const_entity(ir_tarval *tv) { const arch_env_t *arch_env = be_get_irg_arch_env(current_ir_graph); sparc_isa_t *isa = (sparc_isa_t*) arch_env; - ir_entity *entity = (ir_entity*) pmap_get(isa->constants, tv); + ir_entity *entity = pmap_get(ir_entity, isa->constants, tv); ir_initializer_t *initializer; ir_mode *mode; ir_type *type; @@ -1171,8 +1084,9 @@ static ir_node *gen_Const(ir_node *node) if (mode_is_float(mode)) { return gen_float_const(dbgi, block, tv); } + + assert(get_mode_size_bits(get_tarval_mode(tv)) <= 32); val = (int32_t)get_tarval_long(tv); - assert((long)val == get_tarval_long(tv)); return create_int_const(block, val); } @@ -1371,7 +1285,7 @@ static ir_node *create_ftoi(dbg_info *dbgi, ir_node *block, ir_node *op, ir_graph *irg = get_irn_irg(block); ir_node *sp = get_irg_frame(irg); ir_node *nomem = get_irg_no_mem(irg); - ir_node *stf = create_stf(dbgi, block, ftoi, sp, nomem, src_mode, + ir_node *stf = create_stf(dbgi, block, ftoi, sp, nomem, mode_fp, NULL, 0, true); ir_node *ld = new_bd_sparc_Ld_imm(dbgi, block, sp, stf, mode_gp, NULL, 0, true); @@ -1436,7 +1350,7 @@ static ir_node *gen_Conv(ir_node *node) } else { /* float -> int conv */ if (!mode_is_signed(dst_mode)) - panic("float to unsigned not implemented yet"); + panic("float to unsigned not lowered"); return create_ftoi(dbgi, block, new_op, src_mode); } } else { @@ -1449,31 +1363,20 @@ static ir_node *gen_Conv(ir_node *node) return create_itof(dbgi, block, new_op, dst_mode); } } else { /* complete in gp registers */ - int min_bits; - ir_mode *min_mode; - - if (src_bits == dst_bits || dst_mode == mode_b) { + if (src_bits >= dst_bits || dst_mode == mode_b) { /* kill unnecessary conv */ return be_transform_node(op); } - if (src_bits < dst_bits) { - min_bits = src_bits; - min_mode = src_mode; - } else { - min_bits = dst_bits; - min_mode = dst_mode; - } - - if (upper_bits_clean(op, min_mode)) { + if (be_upper_bits_clean(op, src_mode)) { return be_transform_node(op); } new_op = be_transform_node(op); - if (mode_is_signed(min_mode)) { - return gen_sign_extension(dbgi, block, new_op, min_bits); + if (mode_is_signed(src_mode)) { + return gen_sign_extension(dbgi, block, new_op, src_bits); } else { - return gen_zero_extension(dbgi, block, new_op, min_bits); + return gen_zero_extension(dbgi, block, new_op, src_bits); } } } @@ -1493,6 +1396,15 @@ static ir_node *gen_Unknown(ir_node *node) panic("Unexpected Unknown mode"); } +static void make_start_out(reg_info_t *const info, struct obstack *const obst, ir_node *const start, size_t const offset, arch_register_t const *const reg, arch_register_req_type_t const flags) +{ + info->offset = offset; + info->irn = NULL; + arch_register_req_t const *const req = be_create_reg_req(obst, reg, arch_register_req_type_ignore | flags); + arch_set_irn_register_req_out(start, offset, req); + arch_set_irn_register_out(start, offset, reg); +} + /** * transform the start node to the prolog code */ @@ -1505,14 +1417,11 @@ static ir_node *gen_Start(ir_node *node) ir_node *new_block = be_transform_node(block); dbg_info *dbgi = get_irn_dbg_info(node); struct obstack *obst = be_get_be_obst(irg); - const arch_register_req_t *req; size_t n_outs; ir_node *start; size_t i; - size_t o; /* start building list of start constraints */ - assert(obstack_object_size(obst) == 0); /* calculate number of outputs */ n_outs = 4; /* memory, g0, g7, sp */ @@ -1527,43 +1436,25 @@ static ir_node *gen_Start(ir_node *node) start = new_bd_sparc_Start(dbgi, new_block, n_outs); - o = 0; + size_t o = 0; /* first output is memory */ - start_mem_offset = o; + start_mem.offset = o; + start_mem.irn = NULL; arch_set_irn_register_req_out(start, o, arch_no_register_req); ++o; /* the zero register */ - start_g0_offset = o; - req = be_create_reg_req(obst, &sparc_registers[REG_G0], - arch_register_req_type_ignore); - arch_set_irn_register_req_out(start, o, req); - arch_set_irn_register_out(start, o, &sparc_registers[REG_G0]); - ++o; + make_start_out(&start_g0, obst, start, o++, &sparc_registers[REG_G0], arch_register_req_type_none); /* g7 is used for TLS data */ - start_g7_offset = o; - req = be_create_reg_req(obst, &sparc_registers[REG_G7], - arch_register_req_type_ignore); - arch_set_irn_register_req_out(start, o, req); - arch_set_irn_register_out(start, o, &sparc_registers[REG_G7]); - ++o; + make_start_out(&start_g7, obst, start, o++, &sparc_registers[REG_G7], arch_register_req_type_none); /* we need an output for the stackpointer */ - start_sp_offset = o; - req = be_create_reg_req(obst, sp_reg, - arch_register_req_type_produces_sp | arch_register_req_type_ignore); - arch_set_irn_register_req_out(start, o, req); - arch_set_irn_register_out(start, o, sp_reg); - ++o; + make_start_out(&start_sp, obst, start, o++, sp_reg, arch_register_req_type_produces_sp); if (!current_cconv->omit_fp) { - start_fp_offset = o; - req = be_create_reg_req(obst, fp_reg, arch_register_req_type_ignore); - arch_set_irn_register_req_out(start, o, req); - arch_set_irn_register_out(start, o, fp_reg); - ++o; + make_start_out(&start_fp, obst, start, o++, fp_reg, arch_register_req_type_none); } /* function parameters in registers */ @@ -1603,29 +1494,17 @@ static ir_node *gen_Start(ir_node *node) static ir_node *get_initial_sp(ir_graph *irg) { - if (start_sp == NULL) { - ir_node *start = get_irg_start(irg); - start_sp = new_r_Proj(start, mode_gp, start_sp_offset); - } - return start_sp; + return get_reg(irg, &start_sp); } static ir_node *get_initial_fp(ir_graph *irg) { - if (start_fp == NULL) { - ir_node *start = get_irg_start(irg); - start_fp = new_r_Proj(start, mode_gp, start_fp_offset); - } - return start_fp; + return get_reg(irg, &start_fp); } static ir_node *get_initial_mem(ir_graph *irg) { - if (start_mem == NULL) { - ir_node *start = get_irg_start(irg); - start_mem = new_r_Proj(start, mode_M, start_mem_offset); - } - return start_mem; + return get_reg(irg, &start_mem); } static ir_node *get_stack_pointer_for(ir_node *node) @@ -1642,7 +1521,7 @@ static ir_node *get_stack_pointer_for(ir_node *node) } be_transform_node(stack_pred); - stack = (ir_node*)pmap_get(node_to_stack, stack_pred); + stack = pmap_get(ir_node, node_to_stack, stack_pred); if (stack == NULL) { return get_stack_pointer_for(stack_pred); } @@ -1763,15 +1642,18 @@ static void bitcast_float_to_int(dbg_info *dbgi, ir_node *block, (get_tarval_sub_bits(tv, 1) << 8) | (get_tarval_sub_bits(tv, 2) << 16) | (get_tarval_sub_bits(tv, 3) << 24); - result[0] = create_int_const(block, val); + ir_node *valc = create_int_const(block, val); if (bits == 64) { - int32_t val = get_tarval_sub_bits(tv, 4) | + int32_t val2 = get_tarval_sub_bits(tv, 4) | (get_tarval_sub_bits(tv, 5) << 8) | (get_tarval_sub_bits(tv, 6) << 16) | (get_tarval_sub_bits(tv, 7) << 24); - result[1] = create_int_const(block, val); + ir_node *valc2 = create_int_const(block, val2); + result[0] = valc2; + result[1] = valc; } else { assert(bits == 32); + result[0] = valc; result[1] = NULL; } } else { @@ -1968,12 +1850,16 @@ static ir_node *gen_Call(ir_node *node) } assert(result_info->req1 == NULL); } + const unsigned *allocatable_regs = be_birg_from_irg(irg)->allocatable_regs; for (i = 0; i < N_SPARC_REGISTERS; ++i) { const arch_register_t *reg; if (!rbitset_is_set(cconv->caller_saves, i)) continue; reg = &sparc_registers[i]; - arch_set_irn_register_req_out(res, o++, reg->single_req); + arch_set_irn_register_req_out(res, o, reg->single_req); + if (!rbitset_is_set(allocatable_regs, reg->global_index)) + arch_set_irn_register_out(res, o, reg); + ++o; } assert(o == out_arity); @@ -2018,54 +1904,58 @@ static ir_node *gen_Alloc(ir_node *node) ir_type *type = get_Alloc_type(node); ir_node *size = get_Alloc_count(node); ir_node *stack_pred = get_stack_pointer_for(node); + ir_node *mem = get_Alloc_mem(node); + ir_node *new_mem = be_transform_node(mem); ir_node *subsp; + if (get_Alloc_where(node) != stack_alloc) panic("only stack-alloc supported in sparc backend (at %+F)", node); /* lowerer should have transformed all allocas to byte size */ - if (type != get_unknown_type() && get_type_size_bytes(type) != 1) + if (!is_unknown_type(type) && get_type_size_bytes(type) != 1) panic("Found non-byte alloc in sparc backend (at %+F)", node); if (is_Const(size)) { ir_tarval *tv = get_Const_tarval(size); long sizel = get_tarval_long(tv); - subsp = be_new_IncSP(sp_reg, new_block, stack_pred, sizel, 0); - set_irn_dbg_info(subsp, dbgi); + + assert((sizel & (SPARC_STACK_ALIGNMENT - 1)) == 0 && "Found Alloc with misaligned constant"); + subsp = new_bd_sparc_SubSP_imm(dbgi, new_block, stack_pred, new_mem, NULL, sizel); } else { ir_node *new_size = be_transform_node(size); - subsp = new_bd_sparc_SubSP(dbgi, new_block, stack_pred, new_size); - arch_set_irn_register(subsp, sp_reg); + subsp = new_bd_sparc_SubSP_reg(dbgi, new_block, stack_pred, new_size, new_mem); } - /* if we are the last IncSP producer in a block then we have to keep - * the stack value. - * Note: This here keeps all producers which is more than necessary */ - keep_alive(subsp); + ir_node *stack_proj = new_r_Proj(subsp, mode_gp, pn_sparc_SubSP_stack); + arch_set_irn_register(stack_proj, sp_reg); + /* If we are the last stack producer in a block, we have to keep the + * stack value. This keeps all producers, which is more than necessary. */ + keep_alive(stack_proj); - pmap_insert(node_to_stack, node, subsp); - /* the "result" is the unmodified sp value */ - return stack_pred; + pmap_insert(node_to_stack, node, stack_proj); + + return subsp; } static ir_node *gen_Proj_Alloc(ir_node *node) { - ir_node *alloc = get_Proj_pred(node); - long pn = get_Proj_proj(node); + ir_node *alloc = get_Proj_pred(node); + ir_node *new_alloc = be_transform_node(alloc); + long pn = get_Proj_proj(node); switch ((pn_Alloc)pn) { - case pn_Alloc_M: { - ir_node *alloc_mem = get_Alloc_mem(alloc); - return be_transform_node(alloc_mem); - } + case pn_Alloc_M: + return new_r_Proj(new_alloc, mode_M, pn_sparc_SubSP_M); case pn_Alloc_res: { - ir_node *new_alloc = be_transform_node(alloc); - return new_alloc; + ir_node *addr_proj = new_r_Proj(new_alloc, mode_gp, pn_sparc_SubSP_addr); + arch_set_irn_register(addr_proj, arch_get_irn_register(node)); + return addr_proj; } case pn_Alloc_X_regular: case pn_Alloc_X_except: - panic("sparc backend: exception output of alloc not supported (at %+F)", + panic("exception output of alloc not supported (at %+F)", node); } - panic("sparc backend: invalid Proj->Alloc"); + panic("invalid Proj->Alloc"); } static ir_node *gen_Free(ir_node *node) @@ -2082,7 +1972,7 @@ static ir_node *gen_Free(ir_node *node) if (get_Alloc_where(node) != stack_alloc) panic("only stack-alloc supported in sparc backend (at %+F)", node); /* lowerer should have transformed all allocas to byte size */ - if (type != get_unknown_type() && get_type_size_bytes(type) != 1) + if (!is_unknown_type(type) && get_type_size_bytes(type) != 1) panic("Found non-byte alloc in sparc backend (at %+F)", node); if (is_Const(size)) { @@ -2143,18 +2033,10 @@ static const arch_register_req_t *get_float_req(ir_mode *mode) } } -/** - * Transform some Phi nodes - */ static ir_node *gen_Phi(ir_node *node) { + ir_mode *mode = get_irn_mode(node); const arch_register_req_t *req; - ir_node *block = be_transform_node(get_nodes_block(node)); - ir_graph *irg = current_ir_graph; - dbg_info *dbgi = get_irn_dbg_info(node); - ir_mode *mode = get_irn_mode(node); - ir_node *phi; - if (mode_needs_gp_reg(mode)) { /* we shouldn't have any 64bit stuff around anymore */ assert(get_mode_size_bits(mode) <= 32); @@ -2167,14 +2049,7 @@ static ir_node *gen_Phi(ir_node *node) req = arch_no_register_req; } - /* phi nodes allow loops, so we use the old arguments for now - * and fix this later */ - phi = new_ir_node(dbgi, irg, block, op_Phi, mode, get_irn_arity(node), get_irn_in(node) + 1); - copy_node_attr(irg, node, phi); - be_duplicate_deps(node, phi); - arch_set_irn_register_req_out(phi, 0, req); - be_enqueue_preds(node); - return phi; + return be_transform_phi(node, req); } /** @@ -2237,15 +2112,6 @@ static ir_node *gen_Proj_Store(ir_node *node) panic("Unsupported Proj from Store"); } -/** - * Transform the Projs from a Cmp. - */ -static ir_node *gen_Proj_Cmp(ir_node *node) -{ - (void) node; - panic("not implemented"); -} - /** * transform Projs from a Div */ @@ -2261,7 +2127,7 @@ static ir_node *gen_Proj_Div(ir_node *node) } else if (is_sparc_fdiv(new_pred)) { res_mode = get_Div_resmode(pred); } else { - panic("sparc backend: Div transformed to something unexpected: %+F", + panic("Div transformed to something unexpected: %+F", new_pred); } assert((int)pn_sparc_SDiv_res == (int)pn_sparc_UDiv_res); @@ -2272,7 +2138,7 @@ static ir_node *gen_Proj_Div(ir_node *node) case pn_Div_res: return new_r_Proj(new_pred, res_mode, pn_sparc_SDiv_res); case pn_Div_M: - return new_r_Proj(new_pred, mode_gp, pn_sparc_SDiv_M); + return new_r_Proj(new_pred, mode_M, pn_sparc_SDiv_M); default: break; } @@ -2446,8 +2312,6 @@ static ir_node *gen_Proj(ir_node *node) return gen_Proj_Load(node); case iro_Call: return gen_Proj_Call(node); - case iro_Cmp: - return gen_Proj_Cmp(node); case iro_Switch: case iro_Cond: return be_duplicate_node(node); @@ -2551,11 +2415,6 @@ void sparc_transform_graph(ir_graph *irg) mode_flags = sparc_reg_classes[CLASS_sparc_flags_class].mode; assert(sparc_reg_classes[CLASS_sparc_fpflags_class].mode == mode_flags); - start_mem = NULL; - start_g0 = NULL; - start_g7 = NULL; - start_sp = NULL; - start_fp = NULL; frame_base = NULL; stackorder = be_collect_stacknodes(irg); @@ -2586,7 +2445,7 @@ void sparc_transform_graph(ir_graph *irg) /* do code placement, to optimize the position of constants */ place_code(irg); /* backend expects outedges to be always on */ - edges_assure(irg); + assure_edges(irg); } void sparc_init_transform(void)