X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fbe%2Fsparc%2Fsparc_transform.c;h=83d9f4574cfce04eb9cef8b39d674a811f6a9e84;hb=7cf5d1acea5684e12bd09537b60324acb1c0b163;hp=666d291f9a680d50e8f5507c87915efcc9bf9f26;hpb=d1cb53b02702f0ea401e511b4825cf83bc89610a;p=libfirm diff --git a/ir/be/sparc/sparc_transform.c b/ir/be/sparc/sparc_transform.c index 666d291f9..83d9f4574 100644 --- a/ir/be/sparc/sparc_transform.c +++ b/ir/be/sparc/sparc_transform.c @@ -21,7 +21,6 @@ * @file * @brief code selection (transform FIRM into SPARC FIRM) * @author Hannes Rapp, Matthias Braun - * @version $Id$ */ #include "config.h" @@ -42,11 +41,11 @@ #include "error.h" #include "util.h" -#include "../benode.h" -#include "../beirg.h" -#include "../beutil.h" -#include "../betranshlp.h" -#include "../beabihelper.h" +#include "benode.h" +#include "beirg.h" +#include "beutil.h" +#include "betranshlp.h" +#include "beabihelper.h" #include "bearch_sparc_t.h" #include "sparc_nodes_attr.h" @@ -61,6 +60,11 @@ DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;) +typedef struct reg_info_t { + size_t offset; + ir_node *irn; +} reg_info_t; + static const arch_register_t *sp_reg = &sparc_registers[REG_SP]; static const arch_register_t *fp_reg = &sparc_registers[REG_FRAME_POINTER]; static calling_convention_t *current_cconv = NULL; @@ -71,14 +75,11 @@ static ir_mode *mode_fp; static ir_mode *mode_fp2; //static ir_mode *mode_fp4; static pmap *node_to_stack; -static size_t start_mem_offset; -static ir_node *start_mem; -static size_t start_g0_offset; -static ir_node *start_g0; -static size_t start_sp_offset; -static ir_node *start_sp; -static size_t start_fp_offset; -static ir_node *start_fp; +static reg_info_t start_mem; +static reg_info_t start_g0; +static reg_info_t start_g7; +static reg_info_t start_sp; +static reg_info_t start_fp; static ir_node *frame_base; static size_t start_params_offset; static size_t start_callee_saves_offset; @@ -149,20 +150,6 @@ static ir_node *gen_sign_extension(dbg_info *dbgi, ir_node *block, ir_node *op, return rshift_node; } -/** - * returns true if it is assured, that the upper bits of a node are "clean" - * which means for a 16 or 8 bit value, that the upper bits in the register - * are 0 for unsigned and a copy of the last significant bit for signed - * numbers. - */ -static bool upper_bits_clean(ir_node *transformed_node, ir_mode *mode) -{ - (void) transformed_node; - (void) mode; - /* TODO */ - return false; -} - /** * Extend a value to 32 bit signed/unsigned depending on its mode. * @@ -175,8 +162,7 @@ static ir_node *gen_extension(dbg_info *dbgi, ir_node *block, ir_node *op, ir_mode *orig_mode) { int bits = get_mode_size_bits(orig_mode); - if (bits == 32) - return op; + assert(bits < 32); if (mode_is_signed(orig_mode)) { return gen_sign_extension(dbgi, block, op, bits); @@ -212,13 +198,17 @@ static bool is_imm_encodeable(const ir_node *node) return sparc_is_value_imm_encodeable(value); } -static bool needs_extension(ir_mode *mode) +static bool needs_extension(ir_node *op) { - return get_mode_size_bits(mode) < get_mode_size_bits(mode_gp); + ir_mode *mode = get_irn_mode(op); + unsigned gp_bits = get_mode_size_bits(mode_gp); + if (get_mode_size_bits(mode) >= gp_bits) + return false; + return !be_upper_bits_clean(op, mode); } /** - * Check, if a given node is a Down-Conv, ie. a integer Conv + * Check, if a given node is a Down-Conv, i.e. a integer Conv * from a mode with a mode with more bits to a mode with lesser bits. * Moreover, we return only true if the node has not more than 1 user. * @@ -241,7 +231,7 @@ static bool is_downconv(const ir_node *node) get_mode_size_bits(dest_mode) <= get_mode_size_bits(src_mode); } -static ir_node *sparc_skip_downconv(ir_node *node) +static ir_node *skip_downconv(ir_node *node) { while (is_downconv(node)) { node = get_Conv_op(node); @@ -269,8 +259,8 @@ static ir_node *gen_helper_binop_args(ir_node *node, ir_mode *mode2; if (flags & MATCH_MODE_NEUTRAL) { - op1 = sparc_skip_downconv(op1); - op2 = sparc_skip_downconv(op2); + op1 = skip_downconv(op1); + op2 = skip_downconv(op2); } mode1 = get_irn_mode(op1); mode2 = get_irn_mode(op2); @@ -281,13 +271,13 @@ static ir_node *gen_helper_binop_args(ir_node *node, if (is_imm_encodeable(op2)) { int32_t immediate = get_tarval_long(get_Const_tarval(op2)); new_op1 = be_transform_node(op1); - if (! (flags & MATCH_MODE_NEUTRAL) && needs_extension(mode1)) { + if (! (flags & MATCH_MODE_NEUTRAL) && needs_extension(op1)) { new_op1 = gen_extension(dbgi, block, new_op1, mode1); } return new_imm(dbgi, block, new_op1, NULL, immediate); } new_op2 = be_transform_node(op2); - if (! (flags & MATCH_MODE_NEUTRAL) && needs_extension(mode2)) { + if (! (flags & MATCH_MODE_NEUTRAL) && needs_extension(op2)) { new_op2 = gen_extension(dbgi, block, new_op2, mode2); } @@ -297,7 +287,7 @@ static ir_node *gen_helper_binop_args(ir_node *node, } new_op1 = be_transform_node(op1); - if (! (flags & MATCH_MODE_NEUTRAL) && needs_extension(mode1)) { + if (! (flags & MATCH_MODE_NEUTRAL) && needs_extension(op1)) { new_op1 = gen_extension(dbgi, block, new_op1, mode1); } return new_reg(dbgi, block, new_op1, new_op2); @@ -404,15 +394,50 @@ static ir_node *gen_helper_binopx(ir_node *node, match_flags_t match_flags, } -static ir_node *get_g0(ir_graph *irg) +static ir_node *get_reg(ir_graph *const irg, reg_info_t *const reg) { - if (start_g0 == NULL) { + if (!reg->irn) { /* this is already the transformed start node */ - ir_node *start = get_irg_start(irg); + ir_node *const start = get_irg_start(irg); assert(is_sparc_Start(start)); - start_g0 = new_r_Proj(start, mode_gp, start_g0_offset); + arch_register_class_t const *const cls = arch_get_irn_register_req_out(start, reg->offset)->cls; + reg->irn = new_r_Proj(start, cls ? cls->mode : mode_M, reg->offset); + } + return reg->irn; +} + +static ir_node *get_g0(ir_graph *irg) +{ + return get_reg(irg, &start_g0); +} + +static ir_node *get_g7(ir_graph *irg) +{ + return get_reg(irg, &start_g7); +} + +static ir_node *make_tls_offset(dbg_info *dbgi, ir_node *block, + ir_entity *entity, int32_t offset) +{ + ir_node *hi = new_bd_sparc_SetHi(dbgi, block, entity, offset); + ir_node *low = new_bd_sparc_Xor_imm(dbgi, block, hi, entity, offset); + return low; +} + +static ir_node *make_address(dbg_info *dbgi, ir_node *block, ir_entity *entity, + int32_t offset) +{ + if (get_entity_owner(entity) == get_tls_type()) { + ir_graph *irg = get_irn_irg(block); + ir_node *g7 = get_g7(irg); + ir_node *offsetn = make_tls_offset(dbgi, block, entity, offset); + ir_node *add = new_bd_sparc_Add_reg(dbgi, block, g7, offsetn); + return add; + } else { + ir_node *hi = new_bd_sparc_SetHi(dbgi, block, entity, offset); + ir_node *low = new_bd_sparc_Or_imm(dbgi, block, hi, entity, offset); + return low; } - return start_g0; } typedef struct address_t { @@ -446,15 +471,28 @@ static void match_address(ir_node *ptr, address_t *address, bool use_ptr2) * won't save anything but produce multiple sethi+or combinations with * just different offsets */ if (is_SymConst(base) && get_irn_n_edges(base) == 1) { - dbg_info *dbgi = get_irn_dbg_info(ptr); - ir_node *block = get_nodes_block(ptr); - ir_node *new_block = be_transform_node(block); - entity = get_SymConst_entity(base); - base = new_bd_sparc_SetHi(dbgi, new_block, entity, offset); - } else if (use_ptr2 && is_Add(base) && entity == NULL && offset == 0) { + ir_entity *sc_entity = get_SymConst_entity(base); + dbg_info *dbgi = get_irn_dbg_info(ptr); + ir_node *block = get_nodes_block(ptr); + ir_node *new_block = be_transform_node(block); + + if (get_entity_owner(sc_entity) == get_tls_type()) { + if (!use_ptr2) { + goto only_offset; + } else { + ptr2 = make_tls_offset(dbgi, new_block, sc_entity, offset); + offset = 0; + base = get_g7(get_irn_irg(base)); + } + } else { + entity = sc_entity; + base = new_bd_sparc_SetHi(dbgi, new_block, entity, offset); + } + } else if (use_ptr2 && is_Add(base) && offset == 0) { ptr2 = be_transform_node(get_Add_right(base)); base = be_transform_node(get_Add_left(base)); } else { +only_offset: if (sparc_is_value_imm_encodeable(offset)) { base = be_transform_node(base); } else { @@ -538,7 +576,7 @@ static ir_node *gen_Proj_AddCC_t(ir_node *node) case pn_sparc_AddCC_t_flags: return new_r_Proj(new_pred, mode_flags, pn_sparc_AddCC_flags); default: - panic("Invalid AddCC_t proj found"); + panic("Invalid proj found"); } } @@ -585,7 +623,7 @@ static ir_node *gen_Proj_SubCC_t(ir_node *node) case pn_sparc_SubCC_t_flags: return new_r_Proj(new_pred, mode_flags, pn_sparc_SubCC_flags); default: - panic("Invalid SubCC_t proj found"); + panic("Invalid proj found"); } } @@ -652,7 +690,7 @@ static ir_node *gen_Load(ir_node *node) address_t address; if (get_Load_unaligned(node) == align_non_aligned) { - panic("sparc: transformation of unaligned Loads not implemented yet"); + panic("transformation of unaligned Loads not implemented yet"); } if (mode_is_float(mode)) { @@ -689,23 +727,31 @@ static ir_node *gen_Store(ir_node *node) ir_node *mem = get_Store_mem(node); ir_node *new_mem = be_transform_node(mem); ir_node *val = get_Store_value(node); - ir_node *new_val = be_transform_node(val); ir_mode *mode = get_irn_mode(val); dbg_info *dbgi = get_irn_dbg_info(node); ir_node *new_store = NULL; address_t address; if (get_Store_unaligned(node) == align_non_aligned) { - panic("sparc: transformation of unaligned Stores not implemented yet"); + panic("transformation of unaligned Stores not implemented yet"); } if (mode_is_float(mode)) { + ir_node *new_val = be_transform_node(val); /* TODO: variants with reg+reg address mode */ match_address(ptr, &address, false); new_store = create_stf(dbgi, block, new_val, address.ptr, new_mem, mode, address.entity, address.offset, false); } else { - assert(get_mode_size_bits(mode) <= 32); + ir_node *new_val; + unsigned dest_bits = get_mode_size_bits(mode); + while (is_downconv(node) + && get_mode_size_bits(get_irn_mode(node)) >= dest_bits) { + val = get_Conv_op(val); + } + new_val = be_transform_node(val); + + assert(dest_bits <= 32); match_address(ptr, &address, true); if (address.ptr2 != NULL) { assert(address.entity == NULL && address.offset == 0); @@ -849,22 +895,41 @@ static ir_node *gen_helper_bitop(ir_node *node, new_binop_reg_func new_reg, new_binop_imm_func new_imm, new_binop_reg_func new_not_reg, - new_binop_imm_func new_not_imm) + new_binop_imm_func new_not_imm, + match_flags_t flags) { ir_node *op1 = get_binop_left(node); ir_node *op2 = get_binop_right(node); if (is_Not(op1)) { return gen_helper_binop_args(node, op2, get_Not_op(op1), - MATCH_MODE_NEUTRAL, + flags, new_not_reg, new_not_imm); } if (is_Not(op2)) { return gen_helper_binop_args(node, op1, get_Not_op(op2), - MATCH_MODE_NEUTRAL, + flags, new_not_reg, new_not_imm); } + if (is_Const(op2) && get_irn_n_edges(op2) == 1) { + ir_tarval *tv = get_Const_tarval(op2); + long value = get_tarval_long(tv); + if (!sparc_is_value_imm_encodeable(value)) { + long notvalue = ~value; + if ((notvalue & 0x3ff) == 0) { + ir_node *block = get_nodes_block(node); + ir_node *new_block = be_transform_node(block); + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *new_op2 + = new_bd_sparc_SetHi(NULL, new_block, NULL, notvalue); + ir_node *new_op1 = be_transform_node(op1); + ir_node *result + = new_not_reg(dbgi, new_block, new_op1, new_op2); + return result; + } + } + } return gen_helper_binop_args(node, op1, op2, - MATCH_MODE_NEUTRAL | MATCH_COMMUTATIVE, + flags | MATCH_COMMUTATIVE, new_reg, new_imm); } @@ -874,7 +939,8 @@ static ir_node *gen_And(ir_node *node) new_bd_sparc_And_reg, new_bd_sparc_And_imm, new_bd_sparc_AndN_reg, - new_bd_sparc_AndN_imm); + new_bd_sparc_AndN_imm, + MATCH_MODE_NEUTRAL); } static ir_node *gen_Or(ir_node *node) @@ -883,7 +949,8 @@ static ir_node *gen_Or(ir_node *node) new_bd_sparc_Or_reg, new_bd_sparc_Or_imm, new_bd_sparc_OrN_reg, - new_bd_sparc_OrN_imm); + new_bd_sparc_OrN_imm, + MATCH_MODE_NEUTRAL); } static ir_node *gen_Eor(ir_node *node) @@ -892,14 +959,15 @@ static ir_node *gen_Eor(ir_node *node) new_bd_sparc_Xor_reg, new_bd_sparc_Xor_imm, new_bd_sparc_XNor_reg, - new_bd_sparc_XNor_imm); + new_bd_sparc_XNor_imm, + MATCH_MODE_NEUTRAL); } static ir_node *gen_Shl(ir_node *node) { ir_mode *mode = get_irn_mode(node); if (get_mode_modulo_shift(mode) != 32) - panic("modulo_shift!=32 not supported by sparc backend"); + panic("modulo_shift!=32 not supported"); return gen_helper_binop(node, MATCH_NONE, new_bd_sparc_Sll_reg, new_bd_sparc_Sll_imm); } @@ -907,7 +975,7 @@ static ir_node *gen_Shr(ir_node *node) { ir_mode *mode = get_irn_mode(node); if (get_mode_modulo_shift(mode) != 32) - panic("modulo_shift!=32 not supported by sparc backend"); + panic("modulo_shift!=32 not supported"); return gen_helper_binop(node, MATCH_NONE, new_bd_sparc_Srl_reg, new_bd_sparc_Srl_imm); } @@ -915,7 +983,7 @@ static ir_node *gen_Shrs(ir_node *node) { ir_mode *mode = get_irn_mode(node); if (get_mode_modulo_shift(mode) != 32) - panic("modulo_shift!=32 not supported by sparc backend"); + panic("modulo_shift!=32 not supported"); return gen_helper_binop(node, MATCH_NONE, new_bd_sparc_Sra_reg, new_bd_sparc_Sra_imm); } @@ -946,11 +1014,11 @@ static ir_node *gen_Minus(ir_node *node) /** * Create an entity for a given (floating point) tarval */ -static ir_entity *create_float_const_entity(ir_tarval *tv) +static ir_entity *create_float_const_entity(ir_graph *const irg, ir_tarval *const tv) { - const arch_env_t *arch_env = be_get_irg_arch_env(current_ir_graph); + const arch_env_t *arch_env = be_get_irg_arch_env(irg); sparc_isa_t *isa = (sparc_isa_t*) arch_env; - ir_entity *entity = (ir_entity*) pmap_get(isa->constants, tv); + ir_entity *entity = pmap_get(ir_entity, isa->constants, tv); ir_initializer_t *initializer; ir_mode *mode; ir_type *type; @@ -975,9 +1043,10 @@ static ir_entity *create_float_const_entity(ir_tarval *tv) static ir_node *gen_float_const(dbg_info *dbgi, ir_node *block, ir_tarval *tv) { - ir_entity *entity = create_float_const_entity(tv); + ir_graph *irg = get_Block_irg(block); + ir_entity *entity = create_float_const_entity(irg, tv); ir_node *hi = new_bd_sparc_SetHi(dbgi, block, entity, 0); - ir_node *mem = get_irg_no_mem(current_ir_graph); + ir_node *mem = get_irg_no_mem(irg); ir_mode *mode = get_tarval_mode(tv); ir_node *new_op = create_ldf(dbgi, block, hi, mem, mode, entity, 0, false); @@ -987,67 +1056,58 @@ static ir_node *gen_float_const(dbg_info *dbgi, ir_node *block, ir_tarval *tv) return proj; } -static ir_node *gen_Const(ir_node *node) +static ir_node *create_int_const(ir_node *block, int32_t value) { - ir_node *block = be_transform_node(get_nodes_block(node)); - ir_mode *mode = get_irn_mode(node); - dbg_info *dbgi = get_irn_dbg_info(node); - ir_tarval *tv = get_Const_tarval(node); - long value; - - if (mode_is_float(mode)) { - return gen_float_const(dbgi, block, tv); - } - - value = get_tarval_long(tv); if (value == 0) { - return get_g0(get_irn_irg(node)); + ir_graph *irg = get_irn_irg(block); + return get_g0(irg); } else if (sparc_is_value_imm_encodeable(value)) { - ir_graph *irg = get_irn_irg(node); - return new_bd_sparc_Or_imm(dbgi, block, get_g0(irg), NULL, value); + ir_graph *irg = get_irn_irg(block); + return new_bd_sparc_Or_imm(NULL, block, get_g0(irg), NULL, value); } else { - ir_node *hi = new_bd_sparc_SetHi(dbgi, block, NULL, value); + ir_node *hi = new_bd_sparc_SetHi(NULL, block, NULL, value); if ((value & 0x3ff) != 0) { - return new_bd_sparc_Or_imm(dbgi, block, hi, NULL, value & 0x3ff); + return new_bd_sparc_Or_imm(NULL, block, hi, NULL, value & 0x3ff); } else { return hi; } } } -static ir_mode *get_cmp_mode(ir_node *b_value) +static ir_node *gen_Const(ir_node *node) { - ir_node *op; - - if (!is_Cmp(b_value)) - panic("can't determine cond signednes (no cmp)"); - op = get_Cmp_left(b_value); - return get_irn_mode(op); -} + ir_node *block = be_transform_node(get_nodes_block(node)); + ir_mode *mode = get_irn_mode(node); + dbg_info *dbgi = get_irn_dbg_info(node); + ir_tarval *tv = get_Const_tarval(node); + int32_t val; -static ir_node *make_address(dbg_info *dbgi, ir_node *block, ir_entity *entity, - int32_t offset) -{ - ir_node *hi = new_bd_sparc_SetHi(dbgi, block, entity, offset); - ir_node *low = new_bd_sparc_Or_imm(dbgi, block, hi, entity, offset); + if (mode_is_float(mode)) { + return gen_float_const(dbgi, block, tv); + } - if (get_entity_owner(entity) == get_tls_type()) - panic("thread local storage not supported yet in sparc backend"); - return low; + assert(get_mode_size_bits(get_tarval_mode(tv)) <= 32); + val = (int32_t)get_tarval_long(tv); + return create_int_const(block, val); } -static ir_node *gen_SwitchJmp(ir_node *node) +static ir_node *gen_Switch(ir_node *node) { - dbg_info *dbgi = get_irn_dbg_info(node); - ir_node *block = be_transform_node(get_nodes_block(node)); - ir_node *selector = get_Cond_selector(node); - ir_node *new_selector = be_transform_node(selector); - long default_pn = get_Cond_default_proj(node); - ir_entity *entity; - ir_node *table_address; - ir_node *idx; - ir_node *load; - ir_node *address; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *block = get_nodes_block(node); + ir_node *new_block = be_transform_node(block); + ir_graph *irg = get_irn_irg(block); + ir_node *selector = get_Switch_selector(node); + ir_node *new_selector = be_transform_node(selector); + const ir_switch_table *table = get_Switch_table(node); + unsigned n_outs = get_Switch_n_outs(node); + ir_entity *entity; + ir_node *table_address; + ir_node *idx; + ir_node *load; + ir_node *address; + + table = ir_switch_table_duplicate(irg, table); /* switch with smaller mode not implemented yet */ assert(get_mode_size_bits(get_irn_mode(selector)) == 32); @@ -1057,65 +1117,40 @@ static ir_node *gen_SwitchJmp(ir_node *node) add_entity_linkage(entity, IR_LINKAGE_CONSTANT); /* construct base address */ - table_address = make_address(dbgi, block, entity, 0); + table_address = make_address(dbgi, new_block, entity, 0); /* scale index */ - idx = new_bd_sparc_Sll_imm(dbgi, block, new_selector, NULL, 2); + idx = new_bd_sparc_Sll_imm(dbgi, new_block, new_selector, NULL, 2); /* load from jumptable */ - load = new_bd_sparc_Ld_reg(dbgi, block, table_address, idx, - get_irg_no_mem(current_ir_graph), + load = new_bd_sparc_Ld_reg(dbgi, new_block, table_address, idx, + get_irg_no_mem(irg), mode_gp); address = new_r_Proj(load, mode_gp, pn_sparc_Ld_res); - return new_bd_sparc_SwitchJmp(dbgi, block, address, default_pn, entity); + return new_bd_sparc_SwitchJmp(dbgi, new_block, address, n_outs, table, entity); } static ir_node *gen_Cond(ir_node *node) { ir_node *selector = get_Cond_selector(node); - ir_mode *mode = get_irn_mode(selector); + ir_node *cmp_left; + ir_mode *cmp_mode; ir_node *block; ir_node *flag_node; - bool is_unsigned; ir_relation relation; dbg_info *dbgi; - /* switch/case jumps */ - if (mode != mode_b) { - return gen_SwitchJmp(node); - } - - block = be_transform_node(get_nodes_block(node)); - dbgi = get_irn_dbg_info(node); - - /* regular if/else jumps */ - if (is_Cmp(selector)) { - ir_mode *cmp_mode; - - cmp_mode = get_cmp_mode(selector); - flag_node = be_transform_node(selector); - relation = get_Cmp_relation(selector); - is_unsigned = !mode_is_signed(cmp_mode); - if (mode_is_float(cmp_mode)) { - assert(!is_unsigned); - return new_bd_sparc_fbfcc(dbgi, block, flag_node, relation); - } else { - return new_bd_sparc_Bicc(dbgi, block, flag_node, relation, is_unsigned); - } + /* note: after lower_mode_b we are guaranteed to have a Cmp input */ + block = be_transform_node(get_nodes_block(node)); + dbgi = get_irn_dbg_info(node); + cmp_left = get_Cmp_left(selector); + cmp_mode = get_irn_mode(cmp_left); + flag_node = be_transform_node(selector); + relation = get_Cmp_relation(selector); + if (mode_is_float(cmp_mode)) { + return new_bd_sparc_fbfcc(dbgi, block, flag_node, relation); } else { - /* in this case, the selector must already deliver a mode_b value. - * this happens, for example, when the Cond is connected to a Conv - * which converts its argument to mode_b. */ - ir_node *new_op; - ir_graph *irg; - assert(mode == mode_b); - - block = be_transform_node(get_nodes_block(node)); - irg = get_irn_irg(block); - dbgi = get_irn_dbg_info(node); - new_op = be_transform_node(selector); - /* follow the SPARC architecture manual and use orcc for tst */ - flag_node = new_bd_sparc_OrCCZero_reg(dbgi, block, new_op, get_g0(irg)); - return new_bd_sparc_Bicc(dbgi, block, flag_node, ir_relation_less_greater, true); + bool is_unsigned = !mode_is_signed(cmp_mode); + return new_bd_sparc_Bicc(dbgi, block, flag_node, relation, is_unsigned); } } @@ -1155,19 +1190,34 @@ static ir_node *gen_Cmp(ir_node *node) new_bd_sparc_AndCCZero_reg, new_bd_sparc_AndCCZero_imm, new_bd_sparc_AndNCCZero_reg, - new_bd_sparc_AndNCCZero_imm); + new_bd_sparc_AndNCCZero_imm, + MATCH_NONE); } else if (is_Or(op1)) { return gen_helper_bitop(op1, new_bd_sparc_OrCCZero_reg, new_bd_sparc_OrCCZero_imm, new_bd_sparc_OrNCCZero_reg, - new_bd_sparc_OrNCCZero_imm); + new_bd_sparc_OrNCCZero_imm, + MATCH_NONE); } else if (is_Eor(op1)) { return gen_helper_bitop(op1, new_bd_sparc_XorCCZero_reg, new_bd_sparc_XorCCZero_imm, new_bd_sparc_XNorCCZero_reg, - new_bd_sparc_XNorCCZero_imm); + new_bd_sparc_XNorCCZero_imm, + MATCH_NONE); + } else if (is_Add(op1)) { + return gen_helper_binop(op1, MATCH_COMMUTATIVE, + new_bd_sparc_AddCCZero_reg, + new_bd_sparc_AddCCZero_imm); + } else if (is_Sub(op1)) { + return gen_helper_binop(op1, MATCH_NONE, + new_bd_sparc_SubCCZero_reg, + new_bd_sparc_SubCCZero_imm); + } else if (is_Mul(op1)) { + return gen_helper_binop(op1, MATCH_COMMUTATIVE, + new_bd_sparc_MulCCZero_reg, + new_bd_sparc_MulCCZero_imm); } } @@ -1236,8 +1286,9 @@ static ir_node *create_ftoi(dbg_info *dbgi, ir_node *block, ir_node *op, ir_graph *irg = get_irn_irg(block); ir_node *sp = get_irg_frame(irg); ir_node *nomem = get_irg_no_mem(irg); - ir_node *stf = create_stf(dbgi, block, ftoi, sp, nomem, src_mode, + ir_node *stf = create_stf(dbgi, block, ftoi, sp, nomem, mode_fp, NULL, 0, true); + arch_add_irn_flags(stf, arch_irn_flags_spill); ir_node *ld = new_bd_sparc_Ld_imm(dbgi, block, sp, stf, mode_gp, NULL, 0, true); ir_node *res = new_r_Proj(ld, mode_gp, pn_sparc_Ld_res); @@ -1255,6 +1306,7 @@ static ir_node *create_itof(dbg_info *dbgi, ir_node *block, ir_node *op, ir_node *nomem = get_irg_no_mem(irg); ir_node *st = new_bd_sparc_St_imm(dbgi, block, op, sp, nomem, mode_gp, NULL, 0, true); + arch_add_irn_flags(st, arch_irn_flags_spill); ir_node *ldf = new_bd_sparc_Ldf_s(dbgi, block, sp, st, mode_fp, NULL, 0, true); ir_node *res = new_r_Proj(ldf, mode_fp, pn_sparc_Ldf_res); @@ -1287,13 +1339,13 @@ static ir_node *gen_Conv(ir_node *node) if (src_mode == mode_b) panic("ConvB not lowered %+F", node); - new_op = be_transform_node(op); if (src_mode == dst_mode) - return new_op; + return be_transform_node(op); if (mode_is_float(src_mode) || mode_is_float(dst_mode)) { assert((src_bits <= 64 && dst_bits <= 64) && "quad FP not implemented"); + new_op = be_transform_node(op); if (mode_is_float(src_mode)) { if (mode_is_float(dst_mode)) { /* float -> float conv */ @@ -1301,7 +1353,7 @@ static ir_node *gen_Conv(ir_node *node) } else { /* float -> int conv */ if (!mode_is_signed(dst_mode)) - panic("float to unsigned not implemented yet"); + panic("float to unsigned not lowered"); return create_ftoi(dbgi, block, new_op, src_mode); } } else { @@ -1313,38 +1365,21 @@ static ir_node *gen_Conv(ir_node *node) } return create_itof(dbgi, block, new_op, dst_mode); } - } else if (src_mode == mode_b) { - panic("ConvB not lowered %+F", node); } else { /* complete in gp registers */ - int min_bits; - ir_mode *min_mode; - - if (src_bits == dst_bits) { + if (src_bits >= dst_bits || dst_mode == mode_b) { /* kill unnecessary conv */ - return new_op; + return be_transform_node(op); } - if (dst_mode == mode_b) { - /* mode_b lowering already took care that we only have 0/1 values */ - return new_op; + if (be_upper_bits_clean(op, src_mode)) { + return be_transform_node(op); } + new_op = be_transform_node(op); - if (src_bits < dst_bits) { - min_bits = src_bits; - min_mode = src_mode; + if (mode_is_signed(src_mode)) { + return gen_sign_extension(dbgi, block, new_op, src_bits); } else { - min_bits = dst_bits; - min_mode = dst_mode; - } - - if (upper_bits_clean(new_op, min_mode)) { - return new_op; - } - - if (mode_is_signed(min_mode)) { - return gen_sign_extension(dbgi, block, new_op, min_bits); - } else { - return gen_zero_extension(dbgi, block, new_op, min_bits); + return gen_zero_extension(dbgi, block, new_op, src_bits); } } } @@ -1364,6 +1399,15 @@ static ir_node *gen_Unknown(ir_node *node) panic("Unexpected Unknown mode"); } +static void make_start_out(reg_info_t *const info, struct obstack *const obst, ir_node *const start, size_t const offset, arch_register_t const *const reg, arch_register_req_type_t const flags) +{ + info->offset = offset; + info->irn = NULL; + arch_register_req_t const *const req = be_create_reg_req(obst, reg, arch_register_req_type_ignore | flags); + arch_set_irn_register_req_out(start, offset, req); + arch_set_irn_register_out(start, offset, reg); +} + /** * transform the start node to the prolog code */ @@ -1376,17 +1420,14 @@ static ir_node *gen_Start(ir_node *node) ir_node *new_block = be_transform_node(block); dbg_info *dbgi = get_irn_dbg_info(node); struct obstack *obst = be_get_be_obst(irg); - const arch_register_req_t *req; size_t n_outs; ir_node *start; size_t i; - size_t o; /* start building list of start constraints */ - assert(obstack_object_size(obst) == 0); /* calculate number of outputs */ - n_outs = 3; /* memory, zero, sp */ + n_outs = 4; /* memory, g0, g7, sp */ if (!current_cconv->omit_fp) ++n_outs; /* framepointer */ /* function parameters */ @@ -1398,35 +1439,25 @@ static ir_node *gen_Start(ir_node *node) start = new_bd_sparc_Start(dbgi, new_block, n_outs); - o = 0; + size_t o = 0; /* first output is memory */ - start_mem_offset = o; + start_mem.offset = o; + start_mem.irn = NULL; arch_set_irn_register_req_out(start, o, arch_no_register_req); ++o; /* the zero register */ - start_g0_offset = o; - req = be_create_reg_req(obst, &sparc_registers[REG_G0], - arch_register_req_type_ignore); - arch_set_irn_register_req_out(start, o, req); - arch_set_irn_register_out(start, o, &sparc_registers[REG_G0]); - ++o; + make_start_out(&start_g0, obst, start, o++, &sparc_registers[REG_G0], arch_register_req_type_none); + + /* g7 is used for TLS data */ + make_start_out(&start_g7, obst, start, o++, &sparc_registers[REG_G7], arch_register_req_type_none); /* we need an output for the stackpointer */ - start_sp_offset = o; - req = be_create_reg_req(obst, sp_reg, - arch_register_req_type_produces_sp | arch_register_req_type_ignore); - arch_set_irn_register_req_out(start, o, req); - arch_set_irn_register_out(start, o, sp_reg); - ++o; + make_start_out(&start_sp, obst, start, o++, sp_reg, arch_register_req_type_produces_sp); if (!current_cconv->omit_fp) { - start_fp_offset = o; - req = be_create_reg_req(obst, fp_reg, arch_register_req_type_ignore); - arch_set_irn_register_req_out(start, o, req); - arch_set_irn_register_out(start, o, fp_reg); - ++o; + make_start_out(&start_fp, obst, start, o++, fp_reg, arch_register_req_type_none); } /* function parameters in registers */ @@ -1466,29 +1497,17 @@ static ir_node *gen_Start(ir_node *node) static ir_node *get_initial_sp(ir_graph *irg) { - if (start_sp == NULL) { - ir_node *start = get_irg_start(irg); - start_sp = new_r_Proj(start, mode_gp, start_sp_offset); - } - return start_sp; + return get_reg(irg, &start_sp); } static ir_node *get_initial_fp(ir_graph *irg) { - if (start_fp == NULL) { - ir_node *start = get_irg_start(irg); - start_fp = new_r_Proj(start, mode_gp, start_fp_offset); - } - return start_fp; + return get_reg(irg, &start_fp); } static ir_node *get_initial_mem(ir_graph *irg) { - if (start_mem == NULL) { - ir_node *start = get_irg_start(irg); - start_mem = new_r_Proj(start, mode_M, start_mem_offset); - } - return start_mem; + return get_reg(irg, &start_mem); } static ir_node *get_stack_pointer_for(ir_node *node) @@ -1505,7 +1524,7 @@ static ir_node *get_stack_pointer_for(ir_node *node) } be_transform_node(stack_pred); - stack = (ir_node*)pmap_get(node_to_stack, stack_pred); + stack = pmap_get(ir_node, node_to_stack, stack_pred); if (stack == NULL) { return get_stack_pointer_for(stack_pred); } @@ -1586,7 +1605,7 @@ static ir_node *gen_Return(ir_node *node) static ir_node *bitcast_int_to_float(dbg_info *dbgi, ir_node *block, ir_node *value0, ir_node *value1) { - ir_graph *irg = current_ir_graph; + ir_graph *irg = get_Block_irg(block); ir_node *sp = get_irg_frame(irg); ir_node *nomem = get_irg_no_mem(irg); ir_node *st = new_bd_sparc_St_imm(dbgi, block, value0, sp, nomem, @@ -1594,11 +1613,13 @@ static ir_node *bitcast_int_to_float(dbg_info *dbgi, ir_node *block, ir_mode *mode; ir_node *ldf; ir_node *mem; + arch_add_irn_flags(st, arch_irn_flags_spill); set_irn_pinned(st, op_pin_state_floats); if (value1 != NULL) { ir_node *st1 = new_bd_sparc_St_imm(dbgi, block, value1, sp, nomem, mode_gp, NULL, 4, true); + arch_add_irn_flags(st1, arch_irn_flags_spill); ir_node *in[2] = { st, st1 }; ir_node *sync = new_r_Sync(block, 2, in); set_irn_pinned(st1, op_pin_state_floats); @@ -1616,33 +1637,57 @@ static ir_node *bitcast_int_to_float(dbg_info *dbgi, ir_node *block, } static void bitcast_float_to_int(dbg_info *dbgi, ir_node *block, - ir_node *node, ir_mode *float_mode, + ir_node *value, ir_mode *float_mode, ir_node **result) { - ir_graph *irg = current_ir_graph; - ir_node *stack = get_irg_frame(irg); - ir_node *nomem = get_irg_no_mem(irg); - ir_node *stf = create_stf(dbgi, block, node, stack, nomem, float_mode, - NULL, 0, true); - int bits = get_mode_size_bits(float_mode); - ir_node *ld; - set_irn_pinned(stf, op_pin_state_floats); - - ld = new_bd_sparc_Ld_imm(dbgi, block, stack, stf, mode_gp, NULL, 0, true); - set_irn_pinned(ld, op_pin_state_floats); - result[0] = new_r_Proj(ld, mode_gp, pn_sparc_Ld_res); - - if (bits == 64) { - ir_node *ld2 = new_bd_sparc_Ld_imm(dbgi, block, stack, stf, mode_gp, - NULL, 4, true); + int bits = get_mode_size_bits(float_mode); + if (is_Const(value)) { + ir_tarval *tv = get_Const_tarval(value); + int32_t val = get_tarval_sub_bits(tv, 0) | + (get_tarval_sub_bits(tv, 1) << 8) | + (get_tarval_sub_bits(tv, 2) << 16) | + (get_tarval_sub_bits(tv, 3) << 24); + ir_node *valc = create_int_const(block, val); + if (bits == 64) { + int32_t val2 = get_tarval_sub_bits(tv, 4) | + (get_tarval_sub_bits(tv, 5) << 8) | + (get_tarval_sub_bits(tv, 6) << 16) | + (get_tarval_sub_bits(tv, 7) << 24); + ir_node *valc2 = create_int_const(block, val2); + result[0] = valc2; + result[1] = valc; + } else { + assert(bits == 32); + result[0] = valc; + result[1] = NULL; + } + } else { + ir_graph *irg = get_Block_irg(block); + ir_node *stack = get_irg_frame(irg); + ir_node *nomem = get_irg_no_mem(irg); + ir_node *new_value = be_transform_node(value); + ir_node *stf = create_stf(dbgi, block, new_value, stack, nomem, + float_mode, NULL, 0, true); + ir_node *ld; + arch_add_irn_flags(stf, arch_irn_flags_spill); + set_irn_pinned(stf, op_pin_state_floats); + + ld = new_bd_sparc_Ld_imm(dbgi, block, stack, stf, mode_gp, NULL, 0, true); set_irn_pinned(ld, op_pin_state_floats); - result[1] = new_r_Proj(ld2, mode_gp, pn_sparc_Ld_res); + result[0] = new_r_Proj(ld, mode_gp, pn_sparc_Ld_res); - arch_add_irn_flags(ld, (arch_irn_flags_t)sparc_arch_irn_flag_needs_64bit_spillslot); - arch_add_irn_flags(ld2, (arch_irn_flags_t)sparc_arch_irn_flag_needs_64bit_spillslot); - } else { - assert(bits == 32); - result[1] = NULL; + if (bits == 64) { + ir_node *ld2 = new_bd_sparc_Ld_imm(dbgi, block, stack, stf, mode_gp, + NULL, 4, true); + set_irn_pinned(ld, op_pin_state_floats); + result[1] = new_r_Proj(ld2, mode_gp, pn_sparc_Ld_res); + + arch_add_irn_flags(ld, (arch_irn_flags_t)sparc_arch_irn_flag_needs_64bit_spillslot); + arch_add_irn_flags(ld2, (arch_irn_flags_t)sparc_arch_irn_flag_needs_64bit_spillslot); + } else { + assert(bits == 32); + result[1] = NULL; + } } } @@ -1676,7 +1721,7 @@ static ir_node *gen_Call(ir_node *node) ir_entity *entity = NULL; ir_node *new_frame = get_stack_pointer_for(node); bool aggregate_return - = type->attr.ma.has_compound_ret_parameter; + = get_method_calling_convention(type) & cc_compound_ret; ir_node *incsp; int mem_pos; ir_node *res; @@ -1707,10 +1752,10 @@ static ir_node *gen_Call(ir_node *node) /* parameters */ for (p = 0; p < n_params; ++p) { ir_node *value = get_Call_param(node, p); - ir_node *new_value = be_transform_node(value); const reg_or_stackslot_t *param = &cconv->parameters[p]; ir_type *param_type = get_method_param_type(type, p); ir_mode *mode = get_type_mode(param_type); + ir_node *partial_value; ir_node *new_values[2]; ir_node *str; int offset; @@ -1718,8 +1763,9 @@ static ir_node *gen_Call(ir_node *node) if (mode_is_float(mode) && param->reg0 != NULL) { unsigned size_bits = get_mode_size_bits(mode); assert(size_bits <= 64); - bitcast_float_to_int(dbgi, new_block, new_value, mode, new_values); + bitcast_float_to_int(dbgi, new_block, value, mode, new_values); } else { + ir_node *new_value = be_transform_node(value); new_values[0] = new_value; new_values[1] = NULL; } @@ -1742,8 +1788,10 @@ static ir_node *gen_Call(ir_node *node) /* we need a store if we're here */ if (new_values[1] != NULL) { - new_value = new_values[1]; - mode = mode_gp; + partial_value = new_values[1]; + mode = mode_gp; + } else { + partial_value = new_values[0]; } /* we need to skip over our save area when constructing the call @@ -1751,10 +1799,10 @@ static ir_node *gen_Call(ir_node *node) offset = param->offset + SPARC_MIN_STACKSIZE; if (mode_is_float(mode)) { - str = create_stf(dbgi, new_block, new_value, incsp, new_mem, + str = create_stf(dbgi, new_block, partial_value, incsp, new_mem, mode, NULL, offset, true); } else { - str = new_bd_sparc_St_imm(dbgi, new_block, new_value, incsp, + str = new_bd_sparc_St_imm(dbgi, new_block, partial_value, incsp, new_mem, mode, NULL, offset, true); } set_irn_pinned(str, op_pin_state_floats); @@ -1808,12 +1856,16 @@ static ir_node *gen_Call(ir_node *node) } assert(result_info->req1 == NULL); } + const unsigned *allocatable_regs = be_birg_from_irg(irg)->allocatable_regs; for (i = 0; i < N_SPARC_REGISTERS; ++i) { const arch_register_t *reg; if (!rbitset_is_set(cconv->caller_saves, i)) continue; reg = &sparc_registers[i]; - arch_set_irn_register_req_out(res, o++, reg->single_req); + arch_set_irn_register_req_out(res, o, reg->single_req); + if (!rbitset_is_set(allocatable_regs, reg->global_index)) + arch_set_irn_register_out(res, o, reg); + ++o; } assert(o == out_arity); @@ -1858,54 +1910,54 @@ static ir_node *gen_Alloc(ir_node *node) ir_type *type = get_Alloc_type(node); ir_node *size = get_Alloc_count(node); ir_node *stack_pred = get_stack_pointer_for(node); + ir_node *mem = get_Alloc_mem(node); + ir_node *new_mem = be_transform_node(mem); ir_node *subsp; + if (get_Alloc_where(node) != stack_alloc) panic("only stack-alloc supported in sparc backend (at %+F)", node); /* lowerer should have transformed all allocas to byte size */ - if (type != get_unknown_type() && get_type_size_bytes(type) != 1) + if (!is_unknown_type(type) && get_type_size_bytes(type) != 1) panic("Found non-byte alloc in sparc backend (at %+F)", node); if (is_Const(size)) { ir_tarval *tv = get_Const_tarval(size); long sizel = get_tarval_long(tv); - subsp = be_new_IncSP(sp_reg, new_block, stack_pred, sizel, 0); - set_irn_dbg_info(subsp, dbgi); + + assert((sizel & (SPARC_STACK_ALIGNMENT - 1)) == 0 && "Found Alloc with misaligned constant"); + subsp = new_bd_sparc_SubSP_imm(dbgi, new_block, stack_pred, new_mem, NULL, sizel); } else { ir_node *new_size = be_transform_node(size); - subsp = new_bd_sparc_SubSP(dbgi, new_block, stack_pred, new_size); - arch_set_irn_register(subsp, sp_reg); + subsp = new_bd_sparc_SubSP_reg(dbgi, new_block, stack_pred, new_size, new_mem); } - /* if we are the last IncSP producer in a block then we have to keep - * the stack value. - * Note: This here keeps all producers which is more than necessary */ - keep_alive(subsp); + ir_node *stack_proj = new_r_Proj(subsp, mode_gp, pn_sparc_SubSP_stack); + arch_set_irn_register(stack_proj, sp_reg); + /* If we are the last stack producer in a block, we have to keep the + * stack value. This keeps all producers, which is more than necessary. */ + keep_alive(stack_proj); - pmap_insert(node_to_stack, node, subsp); - /* the "result" is the unmodified sp value */ - return stack_pred; + pmap_insert(node_to_stack, node, stack_proj); + + return subsp; } static ir_node *gen_Proj_Alloc(ir_node *node) { - ir_node *alloc = get_Proj_pred(node); - long pn = get_Proj_proj(node); + ir_node *alloc = get_Proj_pred(node); + ir_node *new_alloc = be_transform_node(alloc); + long pn = get_Proj_proj(node); switch ((pn_Alloc)pn) { - case pn_Alloc_M: { - ir_node *alloc_mem = get_Alloc_mem(alloc); - return be_transform_node(alloc_mem); - } - case pn_Alloc_res: { - ir_node *new_alloc = be_transform_node(alloc); - return new_alloc; - } + case pn_Alloc_M: return new_r_Proj(new_alloc, mode_M, pn_sparc_SubSP_M); + case pn_Alloc_res: return new_r_Proj(new_alloc, mode_gp, pn_sparc_SubSP_addr); + case pn_Alloc_X_regular: case pn_Alloc_X_except: - panic("sparc backend: exception output of alloc not supported (at %+F)", + panic("exception output of alloc not supported (at %+F)", node); } - panic("sparc backend: invalid Proj->Alloc"); + panic("invalid Proj->Alloc"); } static ir_node *gen_Free(ir_node *node) @@ -1922,7 +1974,7 @@ static ir_node *gen_Free(ir_node *node) if (get_Alloc_where(node) != stack_alloc) panic("only stack-alloc supported in sparc backend (at %+F)", node); /* lowerer should have transformed all allocas to byte size */ - if (type != get_unknown_type() && get_type_size_bytes(type) != 1) + if (!is_unknown_type(type) && get_type_size_bytes(type) != 1) panic("Found non-byte alloc in sparc backend (at %+F)", node); if (is_Const(size)) { @@ -1974,31 +2026,19 @@ static const arch_register_req_t float4_req = { static const arch_register_req_t *get_float_req(ir_mode *mode) { - unsigned bits = get_mode_size_bits(mode); - assert(mode_is_float(mode)); - if (bits == 32) { - return &float1_req; - } else if (bits == 64) { - return &float2_req; - } else { - assert(bits == 128); - return &float4_req; + switch (get_mode_size_bits(mode)) { + case 32: return &float1_req; + case 64: return &float2_req; + case 128: return &float4_req; + default: panic("invalid float mode"); } } -/** - * Transform some Phi nodes - */ static ir_node *gen_Phi(ir_node *node) { + ir_mode *mode = get_irn_mode(node); const arch_register_req_t *req; - ir_node *block = be_transform_node(get_nodes_block(node)); - ir_graph *irg = current_ir_graph; - dbg_info *dbgi = get_irn_dbg_info(node); - ir_mode *mode = get_irn_mode(node); - ir_node *phi; - if (mode_needs_gp_reg(mode)) { /* we shouldn't have any 64bit stuff around anymore */ assert(get_mode_size_bits(mode) <= 32); @@ -2006,20 +2046,12 @@ static ir_node *gen_Phi(ir_node *node) mode = mode_gp; req = sparc_reg_classes[CLASS_sparc_gp].class_req; } else if (mode_is_float(mode)) { - mode = mode; req = get_float_req(mode); } else { req = arch_no_register_req; } - /* phi nodes allow loops, so we use the old arguments for now - * and fix this later */ - phi = new_ir_node(dbgi, irg, block, op_Phi, mode, get_irn_arity(node), get_irn_in(node) + 1); - copy_node_attr(irg, node, phi); - be_duplicate_deps(node, phi); - arch_set_irn_register_req_out(phi, 0, req); - be_enqueue_preds(node); - return phi; + return be_transform_phi(node, req); } /** @@ -2082,15 +2114,6 @@ static ir_node *gen_Proj_Store(ir_node *node) panic("Unsupported Proj from Store"); } -/** - * Transform the Projs from a Cmp. - */ -static ir_node *gen_Proj_Cmp(ir_node *node) -{ - (void) node; - panic("not implemented"); -} - /** * transform Projs from a Div */ @@ -2106,7 +2129,7 @@ static ir_node *gen_Proj_Div(ir_node *node) } else if (is_sparc_fdiv(new_pred)) { res_mode = get_Div_resmode(pred); } else { - panic("sparc backend: Div transformed to something unexpected: %+F", + panic("Div transformed to something unexpected: %+F", new_pred); } assert((int)pn_sparc_SDiv_res == (int)pn_sparc_UDiv_res); @@ -2117,7 +2140,7 @@ static ir_node *gen_Proj_Div(ir_node *node) case pn_Div_res: return new_r_Proj(new_pred, res_mode, pn_sparc_SDiv_res); case pn_Div_M: - return new_r_Proj(new_pred, mode_gp, pn_sparc_SDiv_M); + return new_r_Proj(new_pred, mode_M, pn_sparc_SDiv_M); default: break; } @@ -2291,8 +2314,7 @@ static ir_node *gen_Proj(ir_node *node) return gen_Proj_Load(node); case iro_Call: return gen_Proj_Call(node); - case iro_Cmp: - return gen_Proj_Cmp(node); + case iro_Switch: case iro_Cond: return be_duplicate_node(node); case iro_Div: @@ -2365,6 +2387,7 @@ static void sparc_register_transformers(void) be_set_transform_function(op_Start, gen_Start); be_set_transform_function(op_Store, gen_Store); be_set_transform_function(op_Sub, gen_Sub); + be_set_transform_function(op_Switch, gen_Switch); be_set_transform_function(op_SymConst, gen_SymConst); be_set_transform_function(op_Unknown, gen_Unknown); @@ -2387,16 +2410,13 @@ void sparc_transform_graph(ir_graph *irg) node_to_stack = pmap_create(); - mode_gp = mode_Iu; - mode_fp = mode_F; + mode_gp = sparc_reg_classes[CLASS_sparc_gp].mode; + mode_fp = sparc_reg_classes[CLASS_sparc_fp].mode; mode_fp2 = mode_D; - mode_flags = mode_Bu; //mode_fp4 = ? + mode_flags = sparc_reg_classes[CLASS_sparc_flags_class].mode; + assert(sparc_reg_classes[CLASS_sparc_fpflags_class].mode == mode_flags); - start_mem = NULL; - start_g0 = NULL; - start_sp = NULL; - start_fp = NULL; frame_base = NULL; stackorder = be_collect_stacknodes(irg); @@ -2426,6 +2446,8 @@ void sparc_transform_graph(ir_graph *irg) /* do code placement, to optimize the position of constants */ place_code(irg); + /* backend expects outedges to be always on */ + assure_edges(irg); } void sparc_init_transform(void)