X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fbe%2Fsparc%2Fsparc_transform.c;h=fffa4ceae21c29a7785a18eacd2be9bfe83aae90;hb=274626e2d58cfa247b88ee05adaca8906b025d93;hp=28ae9912331d048279f9f857d3588658bab17146;hpb=30e0e01a7abee881b1684ee93750e886cf2d6d66;p=libfirm diff --git a/ir/be/sparc/sparc_transform.c b/ir/be/sparc/sparc_transform.c index 28ae99123..fffa4ceae 100644 --- a/ir/be/sparc/sparc_transform.c +++ b/ir/be/sparc/sparc_transform.c @@ -20,10 +20,13 @@ /** * @file * @brief code selection (transform FIRM into SPARC FIRM) - * @version $Id$ + * @author Hannes Rapp, Matthias Braun */ #include "config.h" +#include +#include + #include "irnode_t.h" #include "irgraph_t.h" #include "irmode_t.h" @@ -31,16 +34,18 @@ #include "iredges.h" #include "ircons.h" #include "irprintf.h" +#include "iroptimize.h" #include "dbginfo.h" #include "iropt_t.h" #include "debug.h" #include "error.h" +#include "util.h" -#include "../benode.h" -#include "../beirg.h" -#include "../beutil.h" -#include "../betranshlp.h" -#include "../beabihelper.h" +#include "benode.h" +#include "beirg.h" +#include "beutil.h" +#include "betranshlp.h" +#include "beabihelper.h" #include "bearch_sparc_t.h" #include "sparc_nodes_attr.h" @@ -55,23 +60,55 @@ DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;) -static sparc_code_gen_t *env_cg; -static beabi_helper_env_t *abihelper; -static const arch_register_t *sp_reg = &sparc_gp_regs[REG_SP]; -static const arch_register_t *fp_reg = &sparc_gp_regs[REG_FRAME_POINTER]; -static calling_convention_t *cconv = NULL; +static const arch_register_t *sp_reg = &sparc_registers[REG_SP]; +static const arch_register_t *fp_reg = &sparc_registers[REG_FRAME_POINTER]; +static calling_convention_t *current_cconv = NULL; +static be_stackorder_t *stackorder; static ir_mode *mode_gp; +static ir_mode *mode_flags; static ir_mode *mode_fp; static ir_mode *mode_fp2; //static ir_mode *mode_fp4; static pmap *node_to_stack; +static size_t start_mem_offset; +static ir_node *start_mem; +static size_t start_g0_offset; +static ir_node *start_g0; +static size_t start_g7_offset; +static ir_node *start_g7; +static size_t start_sp_offset; +static ir_node *start_sp; +static size_t start_fp_offset; +static ir_node *start_fp; +static ir_node *frame_base; +static size_t start_params_offset; +static size_t start_callee_saves_offset; + +static const arch_register_t *const omit_fp_callee_saves[] = { + &sparc_registers[REG_L0], + &sparc_registers[REG_L1], + &sparc_registers[REG_L2], + &sparc_registers[REG_L3], + &sparc_registers[REG_L4], + &sparc_registers[REG_L5], + &sparc_registers[REG_L6], + &sparc_registers[REG_L7], + &sparc_registers[REG_I0], + &sparc_registers[REG_I1], + &sparc_registers[REG_I2], + &sparc_registers[REG_I3], + &sparc_registers[REG_I4], + &sparc_registers[REG_I5], +}; -static ir_node *gen_SymConst(ir_node *node); - - -static inline int mode_needs_gp_reg(ir_mode *mode) +static inline bool mode_needs_gp_reg(ir_mode *mode) { - return mode_is_int(mode) || mode_is_reference(mode); + if (mode_is_int(mode) || mode_is_reference(mode)) { + /* we should only see 32bit code */ + assert(get_mode_size_bits(mode) <= 32); + return true; + } + return false; } /** @@ -86,10 +123,10 @@ static ir_node *gen_zero_extension(dbg_info *dbgi, ir_node *block, ir_node *op, int src_bits) { if (src_bits == 8) { - return new_bd_sparc_And_imm(dbgi, block, op, 0xFF); + return new_bd_sparc_And_imm(dbgi, block, op, NULL, 0xFF); } else if (src_bits == 16) { - ir_node *lshift = new_bd_sparc_Sll_imm(dbgi, block, op, 16); - ir_node *rshift = new_bd_sparc_Slr_imm(dbgi, block, lshift, 16); + ir_node *lshift = new_bd_sparc_Sll_imm(dbgi, block, op, NULL, 16); + ir_node *rshift = new_bd_sparc_Srl_imm(dbgi, block, lshift, NULL, 16); return rshift; } else { panic("zero extension only supported for 8 and 16 bits"); @@ -108,8 +145,8 @@ static ir_node *gen_sign_extension(dbg_info *dbgi, ir_node *block, ir_node *op, int src_bits) { int shift_width = 32 - src_bits; - ir_node *lshift_node = new_bd_sparc_Sll_imm(dbgi, block, op, shift_width); - ir_node *rshift_node = new_bd_sparc_Sra_imm(dbgi, block, lshift_node, shift_width); + ir_node *lshift_node = new_bd_sparc_Sll_imm(dbgi, block, op, NULL, shift_width); + ir_node *rshift_node = new_bd_sparc_Sra_imm(dbgi, block, lshift_node, NULL, shift_width); return rshift_node; } @@ -119,11 +156,86 @@ static ir_node *gen_sign_extension(dbg_info *dbgi, ir_node *block, ir_node *op, * are 0 for unsigned and a copy of the last significant bit for signed * numbers. */ -static bool upper_bits_clean(ir_node *transformed_node, ir_mode *mode) +static bool upper_bits_clean(ir_node *node, ir_mode *mode) { - (void) transformed_node; - (void) mode; - /* TODO */ + switch ((ir_opcode)get_irn_opcode(node)) { + case iro_And: + if (!mode_is_signed(mode)) { + return upper_bits_clean(get_And_left(node), mode) + || upper_bits_clean(get_And_right(node), mode); + } + /* FALLTHROUGH */ + case iro_Or: + case iro_Eor: + return upper_bits_clean(get_binop_left(node), mode) + && upper_bits_clean(get_binop_right(node), mode); + + case iro_Shr: + if (mode_is_signed(mode)) { + return false; /* TODO */ + } else { + ir_node *right = get_Shr_right(node); + if (is_Const(right)) { + ir_tarval *tv = get_Const_tarval(right); + long val = get_tarval_long(tv); + if (val >= 32 - (long)get_mode_size_bits(mode)) + return true; + } + return upper_bits_clean(get_Shr_left(node), mode); + } + + case iro_Shrs: + return upper_bits_clean(get_Shrs_left(node), mode); + + case iro_Const: { + ir_tarval *tv = get_Const_tarval(node); + long val = get_tarval_long(tv); + if (mode_is_signed(mode)) { + long shifted = val >> (get_mode_size_bits(mode)-1); + return shifted == 0 || shifted == -1; + } else { + unsigned long shifted = (unsigned long)val; + shifted >>= get_mode_size_bits(mode)-1; + shifted >>= 1; + return shifted == 0; + } + } + + case iro_Conv: { + ir_mode *dest_mode = get_irn_mode(node); + ir_node *op = get_Conv_op(node); + ir_mode *src_mode = get_irn_mode(op); + unsigned src_bits = get_mode_size_bits(src_mode); + unsigned dest_bits = get_mode_size_bits(dest_mode); + /* downconvs are a nop */ + if (src_bits <= dest_bits) + return upper_bits_clean(op, mode); + if (dest_bits <= get_mode_size_bits(mode) + && mode_is_signed(dest_mode) == mode_is_signed(mode)) + return true; + return false; + } + + case iro_Proj: { + ir_node *pred = get_Proj_pred(node); + switch (get_irn_opcode(pred)) { + case iro_Load: { + ir_mode *load_mode = get_Load_mode(pred); + unsigned load_bits = get_mode_size_bits(load_mode); + unsigned bits = get_mode_size_bits(mode); + if (load_bits > bits) + return false; + if (mode_is_signed(mode) != mode_is_signed(load_mode)) + return false; + return true; + } + default: + break; + } + } + default: + break; + } return false; } @@ -139,8 +251,7 @@ static ir_node *gen_extension(dbg_info *dbgi, ir_node *block, ir_node *op, ir_mode *orig_mode) { int bits = get_mode_size_bits(orig_mode); - if (bits == 32) - return op; + assert(bits < 32); if (mode_is_signed(orig_mode)) { return gen_sign_extension(dbgi, block, op, bits); @@ -149,74 +260,71 @@ static ir_node *gen_extension(dbg_info *dbgi, ir_node *block, ir_node *op, } } -/** - * Creates a possible DAG for a constant. - */ -static ir_node *create_const_graph_value(dbg_info *dbgi, ir_node *block, - long value) -{ - ir_node *result; - - /* we need to load hi & lo separately */ - if (value < -4096 || value > 4095) { - ir_node *hi = new_bd_sparc_HiImm(dbgi, block, (int) value); - result = new_bd_sparc_LoImm(dbgi, block, hi, value); - be_dep_on_frame(hi); - } else { - result = new_bd_sparc_Mov_imm(dbgi, block, (int) value); - be_dep_on_frame(result); - } +typedef enum { + MATCH_NONE = 0, + MATCH_COMMUTATIVE = 1U << 0, /**< commutative operation. */ + MATCH_MODE_NEUTRAL = 1U << 1, /**< the higher bits of the inputs don't + influence the significant lower bit at + all (for cases where mode < 32bit) */ +} match_flags_t; +ENUM_BITSET(match_flags_t) - return result; -} +typedef ir_node* (*new_binop_reg_func) (dbg_info *dbgi, ir_node *block, ir_node *op1, ir_node *op2); +typedef ir_node* (*new_binop_fp_func) (dbg_info *dbgi, ir_node *block, ir_node *op1, ir_node *op2, ir_mode *mode); +typedef ir_node* (*new_binop_imm_func) (dbg_info *dbgi, ir_node *block, ir_node *op1, ir_entity *entity, int32_t immediate); +typedef ir_node* (*new_unop_fp_func) (dbg_info *dbgi, ir_node *block, ir_node *op1, ir_mode *mode); /** - * Create a DAG constructing a given Const. - * - * @param irn a Firm const + * checks if a node's value can be encoded as a immediate */ -static ir_node *create_const_graph(ir_node *irn, ir_node *block) +static bool is_imm_encodeable(const ir_node *node) { - tarval *tv = get_Const_tarval(irn); - ir_mode *mode = get_tarval_mode(tv); - dbg_info *dbgi = get_irn_dbg_info(irn); long value; + if (!is_Const(node)) + return false; - - if (mode_is_reference(mode)) { - /* SPARC V8 is 32bit, so we can safely convert a reference tarval into Iu */ - assert(get_mode_size_bits(mode) == get_mode_size_bits(mode_gp)); - tv = tarval_convert_to(tv, mode_gp); - } - - value = get_tarval_long(tv); - return create_const_graph_value(dbgi, block, value); + value = get_tarval_long(get_Const_tarval(node)); + return sparc_is_value_imm_encodeable(value); } -typedef enum { - MATCH_NONE = 0, - MATCH_COMMUTATIVE = 1 << 0, /**< commutative operation. */ -} match_flags_t; - -typedef ir_node* (*new_binop_reg_func) (dbg_info *dbgi, ir_node *block, ir_node *op1, ir_node *op2); -typedef ir_node* (*new_binop_fp_func) (dbg_info *dbgi, ir_node *block, ir_node *op1, ir_node *op2, ir_mode *mode); -typedef ir_node* (*new_binop_imm_func) (dbg_info *dbgi, ir_node *block, ir_node *op1, int simm13); -typedef ir_node* (*new_unop_fp_func) (dbg_info *dbgi, ir_node *block, ir_node *op1, ir_mode *mode); +static bool needs_extension(ir_node *op) +{ + ir_mode *mode = get_irn_mode(op); + if (get_mode_size_bits(mode) >= get_mode_size_bits(mode_gp)) + return false; + return !upper_bits_clean(op, mode); +} /** - * checks if a node's value can be encoded as a immediate + * Check, if a given node is a Down-Conv, ie. a integer Conv + * from a mode with a mode with more bits to a mode with lesser bits. + * Moreover, we return only true if the node has not more than 1 user. * + * @param node the node + * @return non-zero if node is a Down-Conv */ -static bool is_imm_encodeable(const ir_node *node) +static bool is_downconv(const ir_node *node) { - long val; + ir_mode *src_mode; + ir_mode *dest_mode; - if (!is_Const(node)) + if (!is_Conv(node)) return false; - val = get_tarval_long(get_Const_tarval(node)); + src_mode = get_irn_mode(get_Conv_op(node)); + dest_mode = get_irn_mode(node); + return + mode_needs_gp_reg(src_mode) && + mode_needs_gp_reg(dest_mode) && + get_mode_size_bits(dest_mode) <= get_mode_size_bits(src_mode); +} - return -4096 <= val && val <= 4095; +static ir_node *skip_downconv(ir_node *node) +{ + while (is_downconv(node)) { + node = get_Conv_op(node); + } + return node; } /** @@ -225,32 +333,63 @@ static bool is_imm_encodeable(const ir_node *node) * @param new_reg register generation function ptr * @param new_imm immediate generation function ptr */ -static ir_node *gen_helper_binop(ir_node *node, match_flags_t flags, - new_binop_reg_func new_reg, new_binop_imm_func new_imm) +static ir_node *gen_helper_binop_args(ir_node *node, + ir_node *op1, ir_node *op2, + match_flags_t flags, + new_binop_reg_func new_reg, + new_binop_imm_func new_imm) { - ir_node *block = be_transform_node(get_nodes_block(node)); - ir_node *op1 = get_binop_left(node); + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *block = be_transform_node(get_nodes_block(node)); ir_node *new_op1; - ir_node *op2 = get_binop_right(node); ir_node *new_op2; - dbg_info *dbgi = get_irn_dbg_info(node); + ir_mode *mode1; + ir_mode *mode2; - if (is_imm_encodeable(op2)) { - ir_node *new_op1 = be_transform_node(op1); - return new_imm(dbgi, block, new_op1, get_tarval_long(get_Const_tarval(op2))); + if (flags & MATCH_MODE_NEUTRAL) { + op1 = skip_downconv(op1); + op2 = skip_downconv(op2); } + mode1 = get_irn_mode(op1); + mode2 = get_irn_mode(op2); + /* we shouldn't see 64bit code */ + assert(get_mode_size_bits(mode1) <= 32); + assert(get_mode_size_bits(mode2) <= 32); + if (is_imm_encodeable(op2)) { + int32_t immediate = get_tarval_long(get_Const_tarval(op2)); + new_op1 = be_transform_node(op1); + if (! (flags & MATCH_MODE_NEUTRAL) && needs_extension(op1)) { + new_op1 = gen_extension(dbgi, block, new_op1, mode1); + } + return new_imm(dbgi, block, new_op1, NULL, immediate); + } new_op2 = be_transform_node(op2); + if (! (flags & MATCH_MODE_NEUTRAL) && needs_extension(op2)) { + new_op2 = gen_extension(dbgi, block, new_op2, mode2); + } if ((flags & MATCH_COMMUTATIVE) && is_imm_encodeable(op1)) { - return new_imm(dbgi, block, new_op2, get_tarval_long(get_Const_tarval(op1)) ); + int32_t immediate = get_tarval_long(get_Const_tarval(op1)); + return new_imm(dbgi, block, new_op2, NULL, immediate); } new_op1 = be_transform_node(op1); - + if (! (flags & MATCH_MODE_NEUTRAL) && needs_extension(op1)) { + new_op1 = gen_extension(dbgi, block, new_op1, mode1); + } return new_reg(dbgi, block, new_op1, new_op2); } +static ir_node *gen_helper_binop(ir_node *node, match_flags_t flags, + new_binop_reg_func new_reg, + new_binop_imm_func new_imm) +{ + ir_node *op1 = get_binop_left(node); + ir_node *op2 = get_binop_right(node); + return gen_helper_binop_args(node, op1, op2, flags, new_reg, new_imm); +} + /** * helper function for FP binop operations */ @@ -285,25 +424,176 @@ static ir_node *gen_helper_unfpop(ir_node *node, ir_mode *mode, new_unop_fp_func new_func_double, new_unop_fp_func new_func_quad) { - ir_node *block = be_transform_node(get_nodes_block(node)); - ir_node *op1 = get_binop_left(node); - ir_node *new_op1 = be_transform_node(op1); - dbg_info *dbgi = get_irn_dbg_info(node); - unsigned bits = get_mode_size_bits(mode); + ir_node *block = be_transform_node(get_nodes_block(node)); + ir_node *op = get_unop_op(node); + ir_node *new_op = be_transform_node(op); + dbg_info *dbgi = get_irn_dbg_info(node); + unsigned bits = get_mode_size_bits(mode); switch (bits) { case 32: - return new_func_single(dbgi, block, new_op1, mode); + return new_func_single(dbgi, block, new_op, mode); case 64: - return new_func_double(dbgi, block, new_op1, mode); + return new_func_double(dbgi, block, new_op, mode); case 128: - return new_func_quad(dbgi, block, new_op1, mode); + return new_func_quad(dbgi, block, new_op, mode); default: break; } panic("unsupported mode %+F for float op", mode); } +typedef ir_node* (*new_binopx_imm_func)(dbg_info *dbgi, ir_node *block, + ir_node *op1, ir_node *flags, + ir_entity *imm_entity, int32_t imm); + +typedef ir_node* (*new_binopx_reg_func)(dbg_info *dbgi, ir_node *block, + ir_node *op1, ir_node *op2, + ir_node *flags); + +static ir_node *gen_helper_binopx(ir_node *node, match_flags_t match_flags, + new_binopx_reg_func new_binopx_reg, + new_binopx_imm_func new_binopx_imm) +{ + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *block = be_transform_node(get_nodes_block(node)); + ir_node *op1 = get_irn_n(node, 0); + ir_node *op2 = get_irn_n(node, 1); + ir_node *flags = get_irn_n(node, 2); + ir_node *new_flags = be_transform_node(flags); + ir_node *new_op1; + ir_node *new_op2; + + /* only support for mode-neutral implemented so far */ + assert(match_flags & MATCH_MODE_NEUTRAL); + + if (is_imm_encodeable(op2)) { + int32_t immediate = get_tarval_long(get_Const_tarval(op2)); + new_op1 = be_transform_node(op1); + return new_binopx_imm(dbgi, block, new_op1, new_flags, NULL, immediate); + } + new_op2 = be_transform_node(op2); + if ((match_flags & MATCH_COMMUTATIVE) && is_imm_encodeable(op1)) { + int32_t immediate = get_tarval_long(get_Const_tarval(op1)); + return new_binopx_imm(dbgi, block, new_op2, new_flags, NULL, immediate); + } + new_op1 = be_transform_node(op1); + return new_binopx_reg(dbgi, block, new_op1, new_op2, new_flags); + +} + +static ir_node *get_g0(ir_graph *irg) +{ + if (start_g0 == NULL) { + /* this is already the transformed start node */ + ir_node *start = get_irg_start(irg); + assert(is_sparc_Start(start)); + start_g0 = new_r_Proj(start, mode_gp, start_g0_offset); + } + return start_g0; +} + +static ir_node *get_g7(ir_graph *irg) +{ + if (start_g7 == NULL) { + ir_node *start = get_irg_start(irg); + assert(is_sparc_Start(start)); + start_g7 = new_r_Proj(start, mode_gp, start_g7_offset); + } + return start_g7; +} + +static ir_node *make_tls_offset(dbg_info *dbgi, ir_node *block, + ir_entity *entity, int32_t offset) +{ + ir_node *hi = new_bd_sparc_SetHi(dbgi, block, entity, offset); + ir_node *low = new_bd_sparc_Xor_imm(dbgi, block, hi, entity, offset); + return low; +} + +static ir_node *make_address(dbg_info *dbgi, ir_node *block, ir_entity *entity, + int32_t offset) +{ + if (get_entity_owner(entity) == get_tls_type()) { + ir_graph *irg = get_irn_irg(block); + ir_node *g7 = get_g7(irg); + ir_node *offsetn = make_tls_offset(dbgi, block, entity, offset); + ir_node *add = new_bd_sparc_Add_reg(dbgi, block, g7, offsetn); + return add; + } else { + ir_node *hi = new_bd_sparc_SetHi(dbgi, block, entity, offset); + ir_node *low = new_bd_sparc_Or_imm(dbgi, block, hi, entity, offset); + return low; + } +} + +typedef struct address_t { + ir_node *ptr; + ir_node *ptr2; + ir_entity *entity; + int32_t offset; +} address_t; + +/** + * Match a load/store address + */ +static void match_address(ir_node *ptr, address_t *address, bool use_ptr2) +{ + ir_node *base = ptr; + ir_node *ptr2 = NULL; + int32_t offset = 0; + ir_entity *entity = NULL; + + if (is_Add(base)) { + ir_node *add_right = get_Add_right(base); + if (is_Const(add_right)) { + base = get_Add_left(base); + offset += get_tarval_long(get_Const_tarval(add_right)); + } + } + /* Note that we don't match sub(x, Const) or chains of adds/subs + * because this should all be normalized by now */ + + /* we only use the symconst if we're the only user otherwise we probably + * won't save anything but produce multiple sethi+or combinations with + * just different offsets */ + if (is_SymConst(base) && get_irn_n_edges(base) == 1) { + ir_entity *sc_entity = get_SymConst_entity(base); + dbg_info *dbgi = get_irn_dbg_info(ptr); + ir_node *block = get_nodes_block(ptr); + ir_node *new_block = be_transform_node(block); + + if (get_entity_owner(sc_entity) == get_tls_type()) { + if (!use_ptr2) { + goto only_offset; + } else { + ptr2 = make_tls_offset(dbgi, new_block, sc_entity, offset); + offset = 0; + base = get_g7(get_irn_irg(base)); + } + } else { + entity = sc_entity; + base = new_bd_sparc_SetHi(dbgi, new_block, entity, offset); + } + } else if (use_ptr2 && is_Add(base) && offset == 0) { + ptr2 = be_transform_node(get_Add_right(base)); + base = be_transform_node(get_Add_left(base)); + } else { +only_offset: + if (sparc_is_value_imm_encodeable(offset)) { + base = be_transform_node(base); + } else { + base = be_transform_node(ptr); + offset = 0; + } + } + + address->ptr = base; + address->ptr2 = ptr2; + address->entity = entity; + address->offset = offset; +} + /** * Creates an sparc Add. * @@ -313,13 +603,74 @@ static ir_node *gen_helper_unfpop(ir_node *node, ir_mode *mode, static ir_node *gen_Add(ir_node *node) { ir_mode *mode = get_irn_mode(node); + ir_node *right; if (mode_is_float(mode)) { return gen_helper_binfpop(node, mode, new_bd_sparc_fadd_s, new_bd_sparc_fadd_d, new_bd_sparc_fadd_q); } - return gen_helper_binop(node, MATCH_COMMUTATIVE, new_bd_sparc_Add_reg, new_bd_sparc_Add_imm); + /* special case: + 0x1000 can be represented as - 0x1000 */ + right = get_Add_right(node); + if (is_Const(right)) { + ir_node *left = get_Add_left(node); + ir_tarval *tv; + uint32_t val; + /* is this simple address arithmetic? then we can let the linker do + * the calculation. */ + if (is_SymConst(left) && get_irn_n_edges(left) == 1) { + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *block = be_transform_node(get_nodes_block(node)); + address_t address; + + /* the value of use_ptr2 shouldn't matter here */ + match_address(node, &address, false); + assert(is_sparc_SetHi(address.ptr)); + return new_bd_sparc_Or_imm(dbgi, block, address.ptr, + address.entity, address.offset); + } + + tv = get_Const_tarval(right); + val = get_tarval_long(tv); + if (val == 0x1000) { + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *block = be_transform_node(get_nodes_block(node)); + ir_node *op = get_Add_left(node); + ir_node *new_op = be_transform_node(op); + return new_bd_sparc_Sub_imm(dbgi, block, new_op, NULL, -0x1000); + } + } + + return gen_helper_binop(node, MATCH_COMMUTATIVE | MATCH_MODE_NEUTRAL, + new_bd_sparc_Add_reg, new_bd_sparc_Add_imm); +} + +static ir_node *gen_AddCC_t(ir_node *node) +{ + return gen_helper_binop(node, MATCH_COMMUTATIVE | MATCH_MODE_NEUTRAL, + new_bd_sparc_AddCC_reg, new_bd_sparc_AddCC_imm); +} + +static ir_node *gen_Proj_AddCC_t(ir_node *node) +{ + long pn = get_Proj_proj(node); + ir_node *pred = get_Proj_pred(node); + ir_node *new_pred = be_transform_node(pred); + + switch (pn) { + case pn_sparc_AddCC_t_res: + return new_r_Proj(new_pred, mode_gp, pn_sparc_AddCC_res); + case pn_sparc_AddCC_t_flags: + return new_r_Proj(new_pred, mode_flags, pn_sparc_AddCC_flags); + default: + panic("Invalid AddCC_t proj found"); + } +} + +static ir_node *gen_AddX_t(ir_node *node) +{ + return gen_helper_binopx(node, MATCH_COMMUTATIVE | MATCH_MODE_NEUTRAL, + new_bd_sparc_AddX_reg, new_bd_sparc_AddX_imm); } /** @@ -337,45 +688,74 @@ static ir_node *gen_Sub(ir_node *node) new_bd_sparc_fsub_d, new_bd_sparc_fsub_q); } - return gen_helper_binop(node, MATCH_NONE, new_bd_sparc_Sub_reg, new_bd_sparc_Sub_imm); + return gen_helper_binop(node, MATCH_MODE_NEUTRAL, + new_bd_sparc_Sub_reg, new_bd_sparc_Sub_imm); +} + +static ir_node *gen_SubCC_t(ir_node *node) +{ + return gen_helper_binop(node, MATCH_MODE_NEUTRAL, + new_bd_sparc_SubCC_reg, new_bd_sparc_SubCC_imm); +} + +static ir_node *gen_Proj_SubCC_t(ir_node *node) +{ + long pn = get_Proj_proj(node); + ir_node *pred = get_Proj_pred(node); + ir_node *new_pred = be_transform_node(pred); + + switch (pn) { + case pn_sparc_SubCC_t_res: + return new_r_Proj(new_pred, mode_gp, pn_sparc_SubCC_res); + case pn_sparc_SubCC_t_flags: + return new_r_Proj(new_pred, mode_flags, pn_sparc_SubCC_flags); + default: + panic("Invalid SubCC_t proj found"); + } +} + +static ir_node *gen_SubX_t(ir_node *node) +{ + return gen_helper_binopx(node, MATCH_MODE_NEUTRAL, + new_bd_sparc_SubX_reg, new_bd_sparc_SubX_imm); } -static ir_node *create_ldf(dbg_info *dbgi, ir_node *block, ir_node *ptr, - ir_node *mem, ir_mode *mode, ir_entity *entity, - int entity_sign, long offset, bool is_frame_entity) +ir_node *create_ldf(dbg_info *dbgi, ir_node *block, ir_node *ptr, + ir_node *mem, ir_mode *mode, ir_entity *entity, + long offset, bool is_frame_entity) { unsigned bits = get_mode_size_bits(mode); assert(mode_is_float(mode)); if (bits == 32) { return new_bd_sparc_Ldf_s(dbgi, block, ptr, mem, mode, entity, - entity_sign, offset, is_frame_entity); + offset, is_frame_entity); } else if (bits == 64) { return new_bd_sparc_Ldf_d(dbgi, block, ptr, mem, mode, entity, - entity_sign, offset, is_frame_entity); + offset, is_frame_entity); } else { assert(bits == 128); return new_bd_sparc_Ldf_q(dbgi, block, ptr, mem, mode, entity, - entity_sign, offset, is_frame_entity); + offset, is_frame_entity); } } -static ir_node *create_stf(dbg_info *dbgi, ir_node *block, ir_node *ptr, - ir_node *value, ir_node *mem, ir_mode *mode, - ir_entity *entity, int entity_sign, long offset, - bool is_frame_entity) +ir_node *create_stf(dbg_info *dbgi, ir_node *block, ir_node *value, + ir_node *ptr, ir_node *mem, ir_mode *mode, + ir_entity *entity, long offset, + bool is_frame_entity) { unsigned bits = get_mode_size_bits(mode); assert(mode_is_float(mode)); if (bits == 32) { - return new_bd_sparc_Stf_s(dbgi, block, ptr, value, mem, mode, entity, - entity_sign, offset, is_frame_entity); + return new_bd_sparc_Stf_s(dbgi, block, value, ptr, mem, mode, entity, + offset, is_frame_entity); } else if (bits == 64) { - return new_bd_sparc_Stf_d(dbgi, block, ptr, value, mem, mode, entity, - entity_sign, offset, is_frame_entity); + return new_bd_sparc_Stf_d(dbgi, block, value, ptr, mem, mode, entity, + offset, is_frame_entity); } else { assert(bits == 128); - return new_bd_sparc_Stf_q(dbgi, block, ptr, value, mem, mode, entity, - entity_sign, offset, is_frame_entity); + return new_bd_sparc_Stf_q(dbgi, block, value, ptr, mem, mode, entity, + offset, is_frame_entity); } } @@ -387,19 +767,34 @@ static ir_node *create_stf(dbg_info *dbgi, ir_node *block, ir_node *ptr, */ static ir_node *gen_Load(ir_node *node) { + dbg_info *dbgi = get_irn_dbg_info(node); ir_mode *mode = get_Load_mode(node); ir_node *block = be_transform_node(get_nodes_block(node)); ir_node *ptr = get_Load_ptr(node); - ir_node *new_ptr = be_transform_node(ptr); ir_node *mem = get_Load_mem(node); ir_node *new_mem = be_transform_node(mem); - dbg_info *dbgi = get_irn_dbg_info(node); ir_node *new_load = NULL; + address_t address; + + if (get_Load_unaligned(node) == align_non_aligned) { + panic("sparc: transformation of unaligned Loads not implemented yet"); + } if (mode_is_float(mode)) { - new_load = create_ldf(dbgi, block, new_ptr, new_mem, mode, NULL, 0, 0, false); + match_address(ptr, &address, false); + new_load = create_ldf(dbgi, block, address.ptr, new_mem, mode, + address.entity, address.offset, false); } else { - new_load = new_bd_sparc_Ld(dbgi, block, new_ptr, new_mem, mode, NULL, 0, 0, false); + match_address(ptr, &address, true); + if (address.ptr2 != NULL) { + assert(address.entity == NULL && address.offset == 0); + new_load = new_bd_sparc_Ld_reg(dbgi, block, address.ptr, + address.ptr2, new_mem, mode); + } else { + new_load = new_bd_sparc_Ld_imm(dbgi, block, address.ptr, new_mem, + mode, address.entity, address.offset, + false); + } } set_irn_pinned(new_load, get_irn_pinned(node)); @@ -416,19 +811,44 @@ static ir_node *gen_Store(ir_node *node) { ir_node *block = be_transform_node(get_nodes_block(node)); ir_node *ptr = get_Store_ptr(node); - ir_node *new_ptr = be_transform_node(ptr); ir_node *mem = get_Store_mem(node); ir_node *new_mem = be_transform_node(mem); ir_node *val = get_Store_value(node); - ir_node *new_val = be_transform_node(val); ir_mode *mode = get_irn_mode(val); dbg_info *dbgi = get_irn_dbg_info(node); - ir_node *new_store = NULL; + ir_node *new_store = NULL; + address_t address; + + if (get_Store_unaligned(node) == align_non_aligned) { + panic("sparc: transformation of unaligned Stores not implemented yet"); + } if (mode_is_float(mode)) { - new_store = create_stf(dbgi, block, new_ptr, new_val, new_mem, mode, NULL, 0, 0, false); + ir_node *new_val = be_transform_node(val); + /* TODO: variants with reg+reg address mode */ + match_address(ptr, &address, false); + new_store = create_stf(dbgi, block, new_val, address.ptr, new_mem, + mode, address.entity, address.offset, false); } else { - new_store = new_bd_sparc_St(dbgi, block, new_ptr, new_val, new_mem, mode, NULL, 0, 0, false); + ir_node *new_val; + unsigned dest_bits = get_mode_size_bits(mode); + while (is_downconv(node) + && get_mode_size_bits(get_irn_mode(node)) >= dest_bits) { + val = get_Conv_op(val); + } + new_val = be_transform_node(val); + + assert(dest_bits <= 32); + match_address(ptr, &address, true); + if (address.ptr2 != NULL) { + assert(address.entity == NULL && address.offset == 0); + new_store = new_bd_sparc_St_reg(dbgi, block, new_val, address.ptr, + address.ptr2, new_mem, mode); + } else { + new_store = new_bd_sparc_St_imm(dbgi, block, new_val, address.ptr, + new_mem, mode, address.entity, + address.offset, false); + } } set_irn_pinned(new_store, get_irn_pinned(node)); @@ -449,8 +869,7 @@ static ir_node *gen_Mul(ir_node *node) new_bd_sparc_fmul_d, new_bd_sparc_fmul_q); } - assert(mode_is_data(mode)); - return gen_helper_binop(node, MATCH_COMMUTATIVE, + return gen_helper_binop(node, MATCH_COMMUTATIVE | MATCH_MODE_NEUTRAL, new_bd_sparc_Mul_reg, new_bd_sparc_Mul_imm); } @@ -464,17 +883,28 @@ static ir_node *gen_Mulh(ir_node *node) { ir_mode *mode = get_irn_mode(node); ir_node *mul; - ir_node *proj_res_hi; if (mode_is_float(mode)) panic("FP not supported yet"); + if (mode_is_signed(mode)) { + mul = gen_helper_binop(node, MATCH_COMMUTATIVE, new_bd_sparc_SMulh_reg, new_bd_sparc_SMulh_imm); + return new_r_Proj(mul, mode_gp, pn_sparc_SMulh_low); + } else { + mul = gen_helper_binop(node, MATCH_COMMUTATIVE, new_bd_sparc_UMulh_reg, new_bd_sparc_UMulh_imm); + return new_r_Proj(mul, mode_gp, pn_sparc_UMulh_low); + } +} - assert(mode_is_data(mode)); - mul = gen_helper_binop(node, MATCH_COMMUTATIVE, new_bd_sparc_Mulh_reg, new_bd_sparc_Mulh_imm); - //arch_irn_add_flags(mul, arch_irn_flags_modify_flags); - proj_res_hi = new_r_Proj(mul, mode_gp, pn_sparc_Mulh_low); - return proj_res_hi; +static ir_node *gen_sign_extension_value(ir_node *node) +{ + ir_node *block = get_nodes_block(node); + ir_node *new_block = be_transform_node(block); + ir_node *new_node = be_transform_node(node); + /* TODO: we could do some shortcuts for some value types probably. + * (For constants or other cases where we know the sign bit in + * advance) */ + return new_bd_sparc_Sra_imm(NULL, new_block, new_node, NULL, 31); } /** @@ -484,50 +914,47 @@ static ir_node *gen_Mulh(ir_node *node) */ static ir_node *gen_Div(ir_node *node) { - ir_mode *mode = get_Div_resmode(node); - ir_node *res; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *block = get_nodes_block(node); + ir_node *new_block = be_transform_node(block); + ir_mode *mode = get_Div_resmode(node); + ir_node *left = get_Div_left(node); + ir_node *left_low = be_transform_node(left); + ir_node *right = get_Div_right(node); + ir_node *res; - assert(!mode_is_float(mode)); - if (mode_is_signed(mode)) { - res = gen_helper_binop(node, 0, new_bd_sparc_SDiv_reg, - new_bd_sparc_SDiv_imm); - } else { - res = gen_helper_binop(node, 0, new_bd_sparc_UDiv_reg, - new_bd_sparc_UDiv_imm); + if (mode_is_float(mode)) { + return gen_helper_binfpop(node, mode, new_bd_sparc_fdiv_s, + new_bd_sparc_fdiv_d, new_bd_sparc_fdiv_q); } - return res; -} -static ir_node *gen_Quot(ir_node *node) -{ - ir_mode *mode = get_Quot_resmode(node); - assert(mode_is_float(mode)); - return gen_helper_binfpop(node, mode, new_bd_sparc_fdiv_s, - new_bd_sparc_fdiv_d, new_bd_sparc_fdiv_q); -} - -static ir_node *gen_Abs(ir_node *node) -{ - ir_mode *const mode = get_irn_mode(node); + if (mode_is_signed(mode)) { + ir_node *left_high = gen_sign_extension_value(left); - if (mode_is_float(mode)) { - return gen_helper_unfpop(node, mode, new_bd_sparc_fabs_s, - new_bd_sparc_fabs_d, new_bd_sparc_fabs_q); + if (is_imm_encodeable(right)) { + int32_t immediate = get_tarval_long(get_Const_tarval(right)); + res = new_bd_sparc_SDiv_imm(dbgi, new_block, left_high, left_low, + NULL, immediate); + } else { + ir_node *new_right = be_transform_node(right); + res = new_bd_sparc_SDiv_reg(dbgi, new_block, left_high, left_low, + new_right); + } } else { - ir_node *const block = be_transform_node(get_nodes_block(node)); - dbg_info *const dbgi = get_irn_dbg_info(node); - ir_node *const op = get_Abs_op(node); - ir_node *const new_op = be_transform_node(op); - ir_node *const sra = new_bd_sparc_Sra_imm(dbgi, block, new_op, 31); - ir_node *const xor = new_bd_sparc_Xor_reg(dbgi, block, new_op, sra); - ir_node *const sub = new_bd_sparc_Sub_reg(dbgi, block, xor, sra); - return sub; + ir_graph *irg = get_irn_irg(node); + ir_node *left_high = get_g0(irg); + if (is_imm_encodeable(right)) { + int32_t immediate = get_tarval_long(get_Const_tarval(right)); + res = new_bd_sparc_UDiv_imm(dbgi, new_block, left_high, left_low, + NULL, immediate); + } else { + ir_node *new_right = be_transform_node(right); + res = new_bd_sparc_UDiv_reg(dbgi, new_block, left_high, left_low, + new_right); + } } -} -static ir_node *get_g0(void) -{ - return be_prolog_get_reg_value(abihelper, &sparc_gp_regs[REG_G0]); + return res; } /** @@ -537,42 +964,113 @@ static ir_node *get_g0(void) */ static ir_node *gen_Not(ir_node *node) { + ir_node *op = get_Not_op(node); + ir_graph *irg = get_irn_irg(node); + ir_node *zero = get_g0(irg); dbg_info *dbgi = get_irn_dbg_info(node); ir_node *block = be_transform_node(get_nodes_block(node)); - ir_node *op = get_Not_op(node); ir_node *new_op = be_transform_node(op); - ir_node *zero = get_g0(); + /* Note: Not(Eor()) is normalize in firm localopts already so + * we don't match it for xnor here */ + + /* Not can be represented with xnor 0, n */ return new_bd_sparc_XNor_reg(dbgi, block, zero, new_op); } +static ir_node *gen_helper_bitop(ir_node *node, + new_binop_reg_func new_reg, + new_binop_imm_func new_imm, + new_binop_reg_func new_not_reg, + new_binop_imm_func new_not_imm, + match_flags_t flags) +{ + ir_node *op1 = get_binop_left(node); + ir_node *op2 = get_binop_right(node); + if (is_Not(op1)) { + return gen_helper_binop_args(node, op2, get_Not_op(op1), + flags, + new_not_reg, new_not_imm); + } + if (is_Not(op2)) { + return gen_helper_binop_args(node, op1, get_Not_op(op2), + flags, + new_not_reg, new_not_imm); + } + if (is_Const(op2) && get_irn_n_edges(op2) == 1) { + ir_tarval *tv = get_Const_tarval(op2); + long value = get_tarval_long(tv); + if (!sparc_is_value_imm_encodeable(value)) { + long notvalue = ~value; + if ((notvalue & 0x3ff) == 0) { + ir_node *block = get_nodes_block(node); + ir_node *new_block = be_transform_node(block); + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *new_op2 + = new_bd_sparc_SetHi(NULL, new_block, NULL, notvalue); + ir_node *new_op1 = be_transform_node(op1); + ir_node *result + = new_not_reg(dbgi, new_block, new_op1, new_op2); + return result; + } + } + } + return gen_helper_binop_args(node, op1, op2, + flags | MATCH_COMMUTATIVE, + new_reg, new_imm); +} + static ir_node *gen_And(ir_node *node) { - return gen_helper_binop(node, MATCH_COMMUTATIVE, new_bd_sparc_And_reg, new_bd_sparc_And_imm); + return gen_helper_bitop(node, + new_bd_sparc_And_reg, + new_bd_sparc_And_imm, + new_bd_sparc_AndN_reg, + new_bd_sparc_AndN_imm, + MATCH_MODE_NEUTRAL); } static ir_node *gen_Or(ir_node *node) { - return gen_helper_binop(node, MATCH_COMMUTATIVE, new_bd_sparc_Or_reg, new_bd_sparc_Or_imm); + return gen_helper_bitop(node, + new_bd_sparc_Or_reg, + new_bd_sparc_Or_imm, + new_bd_sparc_OrN_reg, + new_bd_sparc_OrN_imm, + MATCH_MODE_NEUTRAL); } static ir_node *gen_Eor(ir_node *node) { - return gen_helper_binop(node, MATCH_COMMUTATIVE, new_bd_sparc_Xor_reg, new_bd_sparc_Xor_imm); + return gen_helper_bitop(node, + new_bd_sparc_Xor_reg, + new_bd_sparc_Xor_imm, + new_bd_sparc_XNor_reg, + new_bd_sparc_XNor_imm, + MATCH_MODE_NEUTRAL); } static ir_node *gen_Shl(ir_node *node) { + ir_mode *mode = get_irn_mode(node); + if (get_mode_modulo_shift(mode) != 32) + panic("modulo_shift!=32 not supported by sparc backend"); return gen_helper_binop(node, MATCH_NONE, new_bd_sparc_Sll_reg, new_bd_sparc_Sll_imm); } static ir_node *gen_Shr(ir_node *node) { - return gen_helper_binop(node, MATCH_NONE, new_bd_sparc_Slr_reg, new_bd_sparc_Slr_imm); + ir_mode *mode = get_irn_mode(node); + if (get_mode_modulo_shift(mode) != 32) + panic("modulo_shift!=32 not supported by sparc backend"); + return gen_helper_binop(node, MATCH_NONE, new_bd_sparc_Srl_reg, new_bd_sparc_Srl_imm); } static ir_node *gen_Shrs(ir_node *node) { + ir_mode *mode = get_irn_mode(node); + if (get_mode_modulo_shift(mode) != 32) + panic("modulo_shift!=32 not supported by sparc backend"); return gen_helper_binop(node, MATCH_NONE, new_bd_sparc_Sra_reg, new_bd_sparc_Sra_imm); } @@ -596,24 +1094,18 @@ static ir_node *gen_Minus(ir_node *node) dbgi = get_irn_dbg_info(node); op = get_Minus_op(node); new_op = be_transform_node(op); - zero = get_g0(); + zero = get_g0(get_irn_irg(node)); return new_bd_sparc_Sub_reg(dbgi, block, zero, new_op); } -static ir_node *make_addr(dbg_info *dbgi, ir_entity *entity) -{ - ir_node *block = get_irg_start_block(current_ir_graph); - ir_node *node = new_bd_sparc_SymConst(dbgi, block, entity); - be_dep_on_frame(node); - return node; -} - /** * Create an entity for a given (floating point) tarval */ -static ir_entity *create_float_const_entity(tarval *tv) +static ir_entity *create_float_const_entity(ir_tarval *tv) { - ir_entity *entity = (ir_entity*) pmap_get(env_cg->constants, tv); + const arch_env_t *arch_env = be_get_irg_arch_env(current_ir_graph); + sparc_isa_t *isa = (sparc_isa_t*) arch_env; + ir_entity *entity = (ir_entity*) pmap_get(isa->constants, tv); ir_initializer_t *initializer; ir_mode *mode; ir_type *type; @@ -632,92 +1124,118 @@ static ir_entity *create_float_const_entity(tarval *tv) initializer = create_initializer_tarval(tv); set_entity_initializer(entity, initializer); - pmap_insert(env_cg->constants, tv, entity); + pmap_insert(isa->constants, tv, entity); return entity; } -/** - * Transforms a Const node. - * - * @param node the ir Const node - * @return The transformed sparc node. - */ -static ir_node *gen_Const(ir_node *node) +static ir_node *gen_float_const(dbg_info *dbgi, ir_node *block, ir_tarval *tv) { - ir_node *block = be_transform_node(get_nodes_block(node)); - ir_mode *mode = get_irn_mode(node); - - if (mode_is_float(mode)) { - dbg_info *dbgi = get_irn_dbg_info(node); - tarval *tv = get_Const_tarval(node); - ir_entity *entity = create_float_const_entity(tv); - ir_node *addr = make_addr(dbgi, entity); - ir_node *mem = new_NoMem(); - ir_node *new_op - = create_ldf(dbgi, block, addr, mem, mode, NULL, 0, 0, false); - ir_node *proj = new_Proj(new_op, mode, pn_sparc_Ldf_res); + ir_entity *entity = create_float_const_entity(tv); + ir_node *hi = new_bd_sparc_SetHi(dbgi, block, entity, 0); + ir_node *mem = get_irg_no_mem(current_ir_graph); + ir_mode *mode = get_tarval_mode(tv); + ir_node *new_op + = create_ldf(dbgi, block, hi, mem, mode, entity, 0, false); + ir_node *proj = new_r_Proj(new_op, mode, pn_sparc_Ldf_res); - set_irn_pinned(new_op, op_pin_state_floats); - return proj; - } + set_irn_pinned(new_op, op_pin_state_floats); + return proj; +} - /* use the 0 register instead of a 0-constant */ - if (is_Const_null(node)) { - return get_g0(); +static ir_node *create_int_const(ir_node *block, int32_t value) +{ + if (value == 0) { + ir_graph *irg = get_irn_irg(block); + return get_g0(irg); + } else if (sparc_is_value_imm_encodeable(value)) { + ir_graph *irg = get_irn_irg(block); + return new_bd_sparc_Or_imm(NULL, block, get_g0(irg), NULL, value); + } else { + ir_node *hi = new_bd_sparc_SetHi(NULL, block, NULL, value); + if ((value & 0x3ff) != 0) { + return new_bd_sparc_Or_imm(NULL, block, hi, NULL, value & 0x3ff); + } else { + return hi; + } } - - return create_const_graph(node, block); } -static ir_mode *get_cmp_mode(ir_node *b_value) +static ir_node *gen_Const(ir_node *node) { - ir_node *pred; - ir_node *op; + ir_node *block = be_transform_node(get_nodes_block(node)); + ir_mode *mode = get_irn_mode(node); + dbg_info *dbgi = get_irn_dbg_info(node); + ir_tarval *tv = get_Const_tarval(node); + int32_t val; - if (!is_Proj(b_value)) - panic("can't determine cond signednes"); - pred = get_Proj_pred(b_value); - if (!is_Cmp(pred)) - panic("can't determine cond signednes (no cmp)"); - op = get_Cmp_left(pred); - return get_irn_mode(op); + if (mode_is_float(mode)) { + return gen_float_const(dbgi, block, tv); + } + val = (int32_t)get_tarval_long(tv); + assert((long)val == get_tarval_long(tv)); + return create_int_const(block, val); } -/** - * Transform Cond nodes - */ -static ir_node *gen_Cond(ir_node *node) +static ir_node *gen_Switch(ir_node *node) { - ir_node *selector = get_Cond_selector(node); - ir_mode *mode = get_irn_mode(selector); - ir_mode *cmp_mode; - ir_node *block; - ir_node *flag_node; - bool is_unsigned; - pn_Cmp pnc; - dbg_info *dbgi; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *block = get_nodes_block(node); + ir_node *new_block = be_transform_node(block); + ir_graph *irg = get_irn_irg(block); + ir_node *selector = get_Switch_selector(node); + ir_node *new_selector = be_transform_node(selector); + const ir_switch_table *table = get_Switch_table(node); + unsigned n_outs = get_Switch_n_outs(node); + ir_entity *entity; + ir_node *table_address; + ir_node *idx; + ir_node *load; + ir_node *address; - // switch/case jumps - if (mode != mode_b) { - panic("SwitchJmp not supported yet"); - } + table = ir_switch_table_duplicate(irg, table); + + /* switch with smaller mode not implemented yet */ + assert(get_mode_size_bits(get_irn_mode(selector)) == 32); - // regular if/else jumps - assert(is_Proj(selector)); - assert(is_Cmp(get_Proj_pred(selector))); + entity = new_entity(NULL, id_unique("TBL%u"), get_unknown_type()); + set_entity_visibility(entity, ir_visibility_private); + add_entity_linkage(entity, IR_LINKAGE_CONSTANT); - cmp_mode = get_cmp_mode(selector); + /* construct base address */ + table_address = make_address(dbgi, new_block, entity, 0); + /* scale index */ + idx = new_bd_sparc_Sll_imm(dbgi, new_block, new_selector, NULL, 2); + /* load from jumptable */ + load = new_bd_sparc_Ld_reg(dbgi, new_block, table_address, idx, + get_irg_no_mem(current_ir_graph), + mode_gp); + address = new_r_Proj(load, mode_gp, pn_sparc_Ld_res); + + return new_bd_sparc_SwitchJmp(dbgi, new_block, address, n_outs, table, entity); +} + +static ir_node *gen_Cond(ir_node *node) +{ + ir_node *selector = get_Cond_selector(node); + ir_node *cmp_left; + ir_mode *cmp_mode; + ir_node *block; + ir_node *flag_node; + ir_relation relation; + dbg_info *dbgi; + /* note: after lower_mode_b we are guaranteed to have a Cmp input */ block = be_transform_node(get_nodes_block(node)); dbgi = get_irn_dbg_info(node); - flag_node = be_transform_node(get_Proj_pred(selector)); - pnc = get_Proj_proj(selector); - is_unsigned = !mode_is_signed(cmp_mode); + cmp_left = get_Cmp_left(selector); + cmp_mode = get_irn_mode(cmp_left); + flag_node = be_transform_node(selector); + relation = get_Cmp_relation(selector); if (mode_is_float(cmp_mode)) { - assert(!is_unsigned); - return new_bd_sparc_fbfcc(dbgi, block, flag_node, pnc); + return new_bd_sparc_fbfcc(dbgi, block, flag_node, relation); } else { - return new_bd_sparc_Bicc(dbgi, block, flag_node, pnc, is_unsigned); + bool is_unsigned = !mode_is_signed(cmp_mode); + return new_bd_sparc_Bicc(dbgi, block, flag_node, relation, is_unsigned); } } @@ -726,17 +1244,17 @@ static ir_node *gen_Cond(ir_node *node) */ static ir_node *gen_Cmp(ir_node *node) { - ir_node *block = be_transform_node(get_nodes_block(node)); - ir_node *op1 = get_Cmp_left(node); - ir_node *op2 = get_Cmp_right(node); - ir_mode *cmp_mode = get_irn_mode(op1); - dbg_info *dbgi = get_irn_dbg_info(node); - ir_node *new_op1 = be_transform_node(op1); - ir_node *new_op2 = be_transform_node(op2); + ir_node *op1 = get_Cmp_left(node); + ir_node *op2 = get_Cmp_right(node); + ir_mode *cmp_mode = get_irn_mode(op1); assert(get_irn_mode(op2) == cmp_mode); if (mode_is_float(cmp_mode)) { - unsigned bits = get_mode_size_bits(cmp_mode); + ir_node *block = be_transform_node(get_nodes_block(node)); + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *new_op1 = be_transform_node(op1); + ir_node *new_op2 = be_transform_node(op2); + unsigned bits = get_mode_size_bits(cmp_mode); if (bits == 32) { return new_bd_sparc_fcmp_s(dbgi, block, new_op1, new_op2, cmp_mode); } else if (bits == 64) { @@ -747,10 +1265,50 @@ static ir_node *gen_Cmp(ir_node *node) } } + /* when we compare a bitop like and,or,... with 0 then we can directly use + * the bitopcc variant. + * Currently we only do this when we're the only user of the node... + */ + if (is_Const(op2) && is_Const_null(op2) && get_irn_n_edges(op1) == 1) { + if (is_And(op1)) { + return gen_helper_bitop(op1, + new_bd_sparc_AndCCZero_reg, + new_bd_sparc_AndCCZero_imm, + new_bd_sparc_AndNCCZero_reg, + new_bd_sparc_AndNCCZero_imm, + MATCH_NONE); + } else if (is_Or(op1)) { + return gen_helper_bitop(op1, + new_bd_sparc_OrCCZero_reg, + new_bd_sparc_OrCCZero_imm, + new_bd_sparc_OrNCCZero_reg, + new_bd_sparc_OrNCCZero_imm, + MATCH_NONE); + } else if (is_Eor(op1)) { + return gen_helper_bitop(op1, + new_bd_sparc_XorCCZero_reg, + new_bd_sparc_XorCCZero_imm, + new_bd_sparc_XNorCCZero_reg, + new_bd_sparc_XNorCCZero_imm, + MATCH_NONE); + } else if (is_Add(op1)) { + return gen_helper_binop(op1, MATCH_COMMUTATIVE, + new_bd_sparc_AddCCZero_reg, + new_bd_sparc_AddCCZero_imm); + } else if (is_Sub(op1)) { + return gen_helper_binop(op1, MATCH_NONE, + new_bd_sparc_SubCCZero_reg, + new_bd_sparc_SubCCZero_imm); + } else if (is_Mul(op1)) { + return gen_helper_binop(op1, MATCH_COMMUTATIVE, + new_bd_sparc_MulCCZero_reg, + new_bd_sparc_MulCCZero_imm); + } + } + /* integer compare */ - new_op1 = gen_extension(dbgi, block, new_op1, cmp_mode); - new_op2 = gen_extension(dbgi, block, new_op2, cmp_mode); - return new_bd_sparc_Cmp_reg(dbgi, block, new_op1, new_op2); + return gen_helper_binop_args(node, op1, op2, MATCH_NONE, + new_bd_sparc_Cmp_reg, new_bd_sparc_Cmp_imm); } /** @@ -758,10 +1316,11 @@ static ir_node *gen_Cmp(ir_node *node) */ static ir_node *gen_SymConst(ir_node *node) { - ir_entity *entity = get_SymConst_entity(node); - dbg_info *dbgi = get_irn_dbg_info(node); - - return make_addr(dbgi, entity); + ir_entity *entity = get_SymConst_entity(node); + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *block = get_nodes_block(node); + ir_node *new_block = be_transform_node(block); + return make_address(dbgi, new_block, entity, 0); } static ir_node *create_fftof(dbg_info *dbgi, ir_node *block, ir_node *op, @@ -797,76 +1356,105 @@ static ir_node *create_fftof(dbg_info *dbgi, ir_node *block, ir_node *op, static ir_node *create_ftoi(dbg_info *dbgi, ir_node *block, ir_node *op, ir_mode *src_mode) { - unsigned bits = get_mode_size_bits(src_mode); + ir_node *ftoi; + unsigned bits = get_mode_size_bits(src_mode); if (bits == 32) { - return new_bd_sparc_fftoi_s(dbgi, block, op, src_mode); + ftoi = new_bd_sparc_fftoi_s(dbgi, block, op, src_mode); } else if (bits == 64) { - return new_bd_sparc_fftoi_d(dbgi, block, op, src_mode); + ftoi = new_bd_sparc_fftoi_d(dbgi, block, op, src_mode); } else { assert(bits == 128); - return new_bd_sparc_fftoi_q(dbgi, block, op, src_mode); + ftoi = new_bd_sparc_fftoi_q(dbgi, block, op, src_mode); + } + + { + ir_graph *irg = get_irn_irg(block); + ir_node *sp = get_irg_frame(irg); + ir_node *nomem = get_irg_no_mem(irg); + ir_node *stf = create_stf(dbgi, block, ftoi, sp, nomem, src_mode, + NULL, 0, true); + ir_node *ld = new_bd_sparc_Ld_imm(dbgi, block, sp, stf, mode_gp, + NULL, 0, true); + ir_node *res = new_r_Proj(ld, mode_gp, pn_sparc_Ld_res); + set_irn_pinned(stf, op_pin_state_floats); + set_irn_pinned(ld, op_pin_state_floats); + return res; } } static ir_node *create_itof(dbg_info *dbgi, ir_node *block, ir_node *op, ir_mode *dst_mode) { - unsigned bits = get_mode_size_bits(dst_mode); + ir_graph *irg = get_irn_irg(block); + ir_node *sp = get_irg_frame(irg); + ir_node *nomem = get_irg_no_mem(irg); + ir_node *st = new_bd_sparc_St_imm(dbgi, block, op, sp, nomem, + mode_gp, NULL, 0, true); + ir_node *ldf = new_bd_sparc_Ldf_s(dbgi, block, sp, st, mode_fp, + NULL, 0, true); + ir_node *res = new_r_Proj(ldf, mode_fp, pn_sparc_Ldf_res); + unsigned bits = get_mode_size_bits(dst_mode); + set_irn_pinned(st, op_pin_state_floats); + set_irn_pinned(ldf, op_pin_state_floats); + if (bits == 32) { - return new_bd_sparc_fitof_s(dbgi, block, op, dst_mode); + return new_bd_sparc_fitof_s(dbgi, block, res, dst_mode); } else if (bits == 64) { - return new_bd_sparc_fitof_d(dbgi, block, op, dst_mode); + return new_bd_sparc_fitof_d(dbgi, block, res, dst_mode); } else { assert(bits == 128); - return new_bd_sparc_fitof_q(dbgi, block, op, dst_mode); + return new_bd_sparc_fitof_q(dbgi, block, res, dst_mode); } } -/** - * Transforms a Conv node. - * - */ static ir_node *gen_Conv(ir_node *node) { ir_node *block = be_transform_node(get_nodes_block(node)); ir_node *op = get_Conv_op(node); - ir_node *new_op = be_transform_node(op); ir_mode *src_mode = get_irn_mode(op); ir_mode *dst_mode = get_irn_mode(node); - dbg_info *dbg = get_irn_dbg_info(node); + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *new_op; int src_bits = get_mode_size_bits(src_mode); int dst_bits = get_mode_size_bits(dst_mode); + if (src_mode == mode_b) + panic("ConvB not lowered %+F", node); + if (src_mode == dst_mode) - return new_op; + return be_transform_node(op); if (mode_is_float(src_mode) || mode_is_float(dst_mode)) { assert((src_bits <= 64 && dst_bits <= 64) && "quad FP not implemented"); + new_op = be_transform_node(op); if (mode_is_float(src_mode)) { if (mode_is_float(dst_mode)) { /* float -> float conv */ - return create_fftof(dbg, block, new_op, src_mode, dst_mode); + return create_fftof(dbgi, block, new_op, src_mode, dst_mode); } else { /* float -> int conv */ if (!mode_is_signed(dst_mode)) panic("float to unsigned not implemented yet"); - return create_ftoi(dbg, block, new_op, src_mode); + return create_ftoi(dbgi, block, new_op, src_mode); } } else { /* int -> float conv */ - if (!mode_is_signed(src_mode)) - panic("unsigned to float not implemented yet"); - return create_itof(dbg, block, new_op, dst_mode); + if (src_bits < 32) { + new_op = gen_extension(dbgi, block, new_op, src_mode); + } else if (src_bits == 32 && !mode_is_signed(src_mode)) { + panic("unsigned to float not lowered!"); + } + return create_itof(dbgi, block, new_op, dst_mode); } } else { /* complete in gp registers */ int min_bits; ir_mode *min_mode; - if (src_bits == dst_bits) { + if (src_bits == dst_bits || dst_mode == mode_b) { /* kill unnecessary conv */ - return new_op; + return be_transform_node(op); } if (src_bits < dst_bits) { @@ -877,100 +1465,36 @@ static ir_node *gen_Conv(ir_node *node) min_mode = dst_mode; } - if (upper_bits_clean(new_op, min_mode)) { - return new_op; + if (upper_bits_clean(op, min_mode)) { + return be_transform_node(op); } + new_op = be_transform_node(op); if (mode_is_signed(min_mode)) { - return gen_sign_extension(dbg, block, new_op, min_bits); + return gen_sign_extension(dbgi, block, new_op, min_bits); } else { - return gen_zero_extension(dbg, block, new_op, min_bits); + return gen_zero_extension(dbgi, block, new_op, min_bits); } } } static ir_node *gen_Unknown(ir_node *node) { - ir_node *block = get_nodes_block(node); - ir_node *new_block = be_transform_node(block); - dbg_info *dbgi = get_irn_dbg_info(node); - /* just produce a 0 */ ir_mode *mode = get_irn_mode(node); if (mode_is_float(mode)) { - panic("FP not implemented"); - be_dep_on_frame(node); - return node; + ir_node *block = be_transform_node(get_nodes_block(node)); + return gen_float_const(NULL, block, get_mode_null(mode)); } else if (mode_needs_gp_reg(mode)) { - return create_const_graph_value(dbgi, new_block, 0); + ir_graph *irg = get_irn_irg(node); + return get_g0(irg); } panic("Unexpected Unknown mode"); } /** - * Produces the type which sits between the stack args and the locals on the - * stack. - */ -static ir_type *sparc_get_between_type(void) -{ - static ir_type *between_type = NULL; - - if (between_type == NULL) { - between_type = new_type_class(new_id_from_str("sparc_between_type")); - set_type_size_bytes(between_type, SPARC_MIN_STACKSIZE); - } - - return between_type; -} - -static void create_stacklayout(ir_graph *irg) -{ - ir_entity *entity = get_irg_entity(irg); - ir_type *function_type = get_entity_type(entity); - be_stack_layout_t *layout = be_get_irg_stack_layout(irg); - ir_type *arg_type; - int p; - int n_params; - - /* calling conventions must be decided by now */ - assert(cconv != NULL); - - /* construct argument type */ - arg_type = new_type_struct(id_mangle_u(get_entity_ident(entity), new_id_from_chars("arg_type", 8))); - n_params = get_method_n_params(function_type); - for (p = 0; p < n_params; ++p) { - reg_or_stackslot_t *param = &cconv->parameters[p]; - char buf[128]; - ident *id; - - if (param->type == NULL) - continue; - - snprintf(buf, sizeof(buf), "param_%d", p); - id = new_id_from_str(buf); - param->entity = new_entity(arg_type, id, param->type); - set_entity_offset(param->entity, param->offset); - } - - memset(layout, 0, sizeof(*layout)); - - layout->frame_type = get_irg_frame_type(irg); - layout->between_type = sparc_get_between_type(); - layout->arg_type = arg_type; - layout->initial_offset = 0; - layout->initial_bias = 0; - layout->stack_dir = -1; - layout->sp_relative = false; - - assert(N_FRAME_TYPES == 3); - layout->order[0] = layout->frame_type; - layout->order[1] = layout->between_type; - layout->order[2] = layout->arg_type; -} - -/** - * transform the start node to the prolog code + initial barrier + * transform the start node to the prolog code */ static ir_node *gen_Start(ir_node *node) { @@ -980,67 +1504,145 @@ static ir_node *gen_Start(ir_node *node) ir_node *block = get_nodes_block(node); ir_node *new_block = be_transform_node(block); dbg_info *dbgi = get_irn_dbg_info(node); - ir_node *mem; + struct obstack *obst = be_get_be_obst(irg); + const arch_register_req_t *req; + size_t n_outs; ir_node *start; - ir_node *sp; - ir_node *fp; - ir_node *barrier; - ir_node *save; - int i; - - /* stackpointer is important at function prolog */ - be_prolog_add_reg(abihelper, sp_reg, + size_t i; + size_t o; + + /* start building list of start constraints */ + assert(obstack_object_size(obst) == 0); + + /* calculate number of outputs */ + n_outs = 4; /* memory, g0, g7, sp */ + if (!current_cconv->omit_fp) + ++n_outs; /* framepointer */ + /* function parameters */ + n_outs += current_cconv->n_param_regs; + /* callee saves */ + if (current_cconv->omit_fp) { + n_outs += ARRAY_SIZE(omit_fp_callee_saves); + } + + start = new_bd_sparc_Start(dbgi, new_block, n_outs); + + o = 0; + + /* first output is memory */ + start_mem_offset = o; + arch_set_irn_register_req_out(start, o, arch_no_register_req); + ++o; + + /* the zero register */ + start_g0_offset = o; + req = be_create_reg_req(obst, &sparc_registers[REG_G0], + arch_register_req_type_ignore); + arch_set_irn_register_req_out(start, o, req); + arch_set_irn_register_out(start, o, &sparc_registers[REG_G0]); + ++o; + + /* g7 is used for TLS data */ + start_g7_offset = o; + req = be_create_reg_req(obst, &sparc_registers[REG_G7], + arch_register_req_type_ignore); + arch_set_irn_register_req_out(start, o, req); + arch_set_irn_register_out(start, o, &sparc_registers[REG_G7]); + ++o; + + /* we need an output for the stackpointer */ + start_sp_offset = o; + req = be_create_reg_req(obst, sp_reg, arch_register_req_type_produces_sp | arch_register_req_type_ignore); - be_prolog_add_reg(abihelper, &sparc_gp_regs[REG_G0], - arch_register_req_type_ignore); + arch_set_irn_register_req_out(start, o, req); + arch_set_irn_register_out(start, o, sp_reg); + ++o; + + if (!current_cconv->omit_fp) { + start_fp_offset = o; + req = be_create_reg_req(obst, fp_reg, arch_register_req_type_ignore); + arch_set_irn_register_req_out(start, o, req); + arch_set_irn_register_out(start, o, fp_reg); + ++o; + } + /* function parameters in registers */ + start_params_offset = o; for (i = 0; i < get_method_n_params(function_type); ++i) { - const reg_or_stackslot_t *param = &cconv->parameters[i]; - if (param->reg0 != NULL) - be_prolog_add_reg(abihelper, param->reg0, 0); - if (param->reg1 != NULL) - be_prolog_add_reg(abihelper, param->reg1, 0); + const reg_or_stackslot_t *param = ¤t_cconv->parameters[i]; + const arch_register_t *reg0 = param->reg0; + const arch_register_t *reg1 = param->reg1; + if (reg0 != NULL) { + arch_set_irn_register_req_out(start, o, reg0->single_req); + arch_set_irn_register_out(start, o, reg0); + ++o; + } + if (reg1 != NULL) { + arch_set_irn_register_req_out(start, o, reg1->single_req); + arch_set_irn_register_out(start, o, reg1); + ++o; + } } + /* we need the values of the callee saves (Note: non omit-fp mode has no + * callee saves) */ + start_callee_saves_offset = o; + if (current_cconv->omit_fp) { + size_t n_callee_saves = ARRAY_SIZE(omit_fp_callee_saves); + size_t c; + for (c = 0; c < n_callee_saves; ++c) { + const arch_register_t *reg = omit_fp_callee_saves[c]; + arch_set_irn_register_req_out(start, o, reg->single_req); + arch_set_irn_register_out(start, o, reg); + ++o; + } + } + assert(n_outs == o); - start = be_prolog_create_start(abihelper, dbgi, new_block); - - mem = be_prolog_get_memory(abihelper); - sp = be_prolog_get_reg_value(abihelper, sp_reg); - save = new_bd_sparc_Save(NULL, block, sp, mem, SPARC_MIN_STACKSIZE); - fp = new_r_Proj(save, mode_gp, pn_sparc_Save_frame); - sp = new_r_Proj(save, mode_gp, pn_sparc_Save_stack); - mem = new_r_Proj(save, mode_M, pn_sparc_Save_mem); - arch_set_irn_register(fp, fp_reg); - arch_set_irn_register(sp, sp_reg); - - be_prolog_add_reg(abihelper, fp_reg, arch_register_req_type_ignore); - be_prolog_set_reg_value(abihelper, fp_reg, fp); + return start; +} - sp = be_new_IncSP(sp_reg, new_block, sp, BE_STACK_FRAME_SIZE_EXPAND, 0); - be_prolog_set_reg_value(abihelper, sp_reg, sp); - be_prolog_set_memory(abihelper, mem); +static ir_node *get_initial_sp(ir_graph *irg) +{ + if (start_sp == NULL) { + ir_node *start = get_irg_start(irg); + start_sp = new_r_Proj(start, mode_gp, start_sp_offset); + } + return start_sp; +} - barrier = be_prolog_create_barrier(abihelper, new_block); +static ir_node *get_initial_fp(ir_graph *irg) +{ + if (start_fp == NULL) { + ir_node *start = get_irg_start(irg); + start_fp = new_r_Proj(start, mode_gp, start_fp_offset); + } + return start_fp; +} - return barrier; +static ir_node *get_initial_mem(ir_graph *irg) +{ + if (start_mem == NULL) { + ir_node *start = get_irg_start(irg); + start_mem = new_r_Proj(start, mode_M, start_mem_offset); + } + return start_mem; } static ir_node *get_stack_pointer_for(ir_node *node) { /* get predecessor in stack_order list */ - ir_node *stack_pred = be_get_stack_pred(abihelper, node); - ir_node *stack_pred_transformed; + ir_node *stack_pred = be_get_stack_pred(stackorder, node); ir_node *stack; if (stack_pred == NULL) { /* first stack user in the current block. We can simply use the * initial sp_proj for it */ - ir_node *sp_proj = be_prolog_get_reg_value(abihelper, sp_reg); - return sp_proj; + ir_graph *irg = get_irn_irg(node); + return get_initial_sp(irg); } - stack_pred_transformed = be_transform_node(stack_pred); - stack = pmap_get(node_to_stack, stack_pred); + be_transform_node(stack_pred); + stack = (ir_node*)pmap_get(node_to_stack, stack_pred); if (stack == NULL) { return get_stack_pointer_for(stack_pred); } @@ -1053,45 +1655,67 @@ static ir_node *get_stack_pointer_for(ir_node *node) */ static ir_node *gen_Return(ir_node *node) { - ir_node *block = get_nodes_block(node); - ir_node *new_block = be_transform_node(block); - dbg_info *dbgi = get_irn_dbg_info(node); - ir_node *mem = get_Return_mem(node); - ir_node *new_mem = be_transform_node(mem); - ir_node *sp_proj = get_stack_pointer_for(node); - int n_res = get_Return_n_ress(node); + ir_node *block = get_nodes_block(node); + ir_graph *irg = get_irn_irg(node); + ir_node *new_block = be_transform_node(block); + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *mem = get_Return_mem(node); + ir_node *new_mem = be_transform_node(mem); + ir_node *sp = get_stack_pointer_for(node); + size_t n_res = get_Return_n_ress(node); + struct obstack *be_obst = be_get_be_obst(irg); ir_node *bereturn; - ir_node *incsp; - int i; + ir_node **in; + const arch_register_req_t **reqs; + size_t i; + size_t p; + size_t n_ins; + + /* estimate number of return values */ + n_ins = 2 + n_res; /* memory + stackpointer, return values */ + if (current_cconv->omit_fp) + n_ins += ARRAY_SIZE(omit_fp_callee_saves); + + in = ALLOCAN(ir_node*, n_ins); + reqs = OALLOCN(be_obst, const arch_register_req_t*, n_ins); + p = 0; + + in[p] = new_mem; + reqs[p] = arch_no_register_req; + ++p; - be_epilog_begin(abihelper); - be_epilog_set_memory(abihelper, new_mem); - /* connect stack pointer with initial stack pointer. fix_stack phase - will later serialize all stack pointer adjusting nodes */ - be_epilog_add_reg(abihelper, sp_reg, - arch_register_req_type_produces_sp | arch_register_req_type_ignore, - sp_proj); + in[p] = sp; + reqs[p] = sp_reg->single_req; + ++p; /* result values */ for (i = 0; i < n_res; ++i) { ir_node *res_value = get_Return_res(node, i); ir_node *new_res_value = be_transform_node(res_value); - const reg_or_stackslot_t *slot = &cconv->results[i]; - const arch_register_t *reg = slot->reg0; - assert(slot->reg1 == NULL); - be_epilog_add_reg(abihelper, reg, 0, new_res_value); + const reg_or_stackslot_t *slot = ¤t_cconv->results[i]; + assert(slot->req1 == NULL); + in[p] = new_res_value; + reqs[p] = slot->req0; + ++p; } + /* callee saves */ + if (current_cconv->omit_fp) { + ir_node *start = get_irg_start(irg); + size_t n_callee_saves = ARRAY_SIZE(omit_fp_callee_saves); + for (i = 0; i < n_callee_saves; ++i) { + const arch_register_t *reg = omit_fp_callee_saves[i]; + ir_mode *mode = reg->reg_class->mode; + ir_node *value + = new_r_Proj(start, mode, i + start_callee_saves_offset); + in[p] = value; + reqs[p] = reg->single_req; + ++p; + } + } + assert(p == n_ins); - /* create the barrier before the epilog code */ - be_epilog_create_barrier(abihelper, new_block); - - /* epilog code: an incsp */ - sp_proj = be_epilog_get_reg_value(abihelper, sp_reg); - incsp = be_new_IncSP(sp_reg, new_block, sp_proj, - BE_STACK_FRAME_SIZE_SHRINK, 0); - be_epilog_set_reg_value(abihelper, sp_reg, incsp); - - bereturn = be_epilog_create_return(abihelper, dbgi, new_block); + bereturn = new_bd_sparc_Return_reg(dbgi, new_block, n_ins, in); + arch_set_irn_register_reqs_in(bereturn, reqs); return bereturn; } @@ -1101,17 +1725,17 @@ static ir_node *bitcast_int_to_float(dbg_info *dbgi, ir_node *block, { ir_graph *irg = current_ir_graph; ir_node *sp = get_irg_frame(irg); - ir_node *nomem = new_NoMem(); - ir_node *st = new_bd_sparc_St(dbgi, block, sp, value0, nomem, mode_gp, - NULL, 0, 0, true); + ir_node *nomem = get_irg_no_mem(irg); + ir_node *st = new_bd_sparc_St_imm(dbgi, block, value0, sp, nomem, + mode_gp, NULL, 0, true); ir_mode *mode; ir_node *ldf; ir_node *mem; set_irn_pinned(st, op_pin_state_floats); if (value1 != NULL) { - ir_node *st1 = new_bd_sparc_St(dbgi, block, sp, value1, nomem, mode_gp, - NULL, 0, 4, true); + ir_node *st1 = new_bd_sparc_St_imm(dbgi, block, value1, sp, nomem, + mode_gp, NULL, 4, true); ir_node *in[2] = { st, st1 }; ir_node *sync = new_r_Sync(block, 2, in); set_irn_pinned(st1, op_pin_state_floats); @@ -1122,40 +1746,60 @@ static ir_node *bitcast_int_to_float(dbg_info *dbgi, ir_node *block, mode = mode_fp; } - ldf = create_ldf(dbgi, block, sp, mem, mode, NULL, 0, 0, true); + ldf = create_ldf(dbgi, block, sp, mem, mode, NULL, 0, true); set_irn_pinned(ldf, op_pin_state_floats); - return new_Proj(ldf, mode, pn_sparc_Ldf_res); + return new_r_Proj(ldf, mode, pn_sparc_Ldf_res); } static void bitcast_float_to_int(dbg_info *dbgi, ir_node *block, - ir_node *node, ir_mode *float_mode, + ir_node *value, ir_mode *float_mode, ir_node **result) { - ir_graph *irg = current_ir_graph; - ir_node *stack = get_irg_frame(irg); - ir_node *nomem = new_NoMem(); - ir_node *stf = create_stf(dbgi, block, stack, node, nomem, float_mode, - NULL, 0, 0, true); - int bits = get_mode_size_bits(float_mode); - ir_node *ld; - set_irn_pinned(stf, op_pin_state_floats); - - ld = new_bd_sparc_Ld(dbgi, block, stack, stf, mode_gp, NULL, 0, 0, true); - set_irn_pinned(ld, op_pin_state_floats); - result[0] = new_Proj(ld, mode_gp, pn_sparc_Ld_res); - - if (bits == 64) { - ir_node *ld2 = new_bd_sparc_Ld(dbgi, block, stack, stf, mode_gp, - NULL, 0, 4, true); + int bits = get_mode_size_bits(float_mode); + if (is_Const(value)) { + ir_tarval *tv = get_Const_tarval(value); + int32_t val = get_tarval_sub_bits(tv, 0) | + (get_tarval_sub_bits(tv, 1) << 8) | + (get_tarval_sub_bits(tv, 2) << 16) | + (get_tarval_sub_bits(tv, 3) << 24); + result[0] = create_int_const(block, val); + if (bits == 64) { + int32_t val = get_tarval_sub_bits(tv, 4) | + (get_tarval_sub_bits(tv, 5) << 8) | + (get_tarval_sub_bits(tv, 6) << 16) | + (get_tarval_sub_bits(tv, 7) << 24); + result[1] = create_int_const(block, val); + } else { + assert(bits == 32); + result[1] = NULL; + } + } else { + ir_graph *irg = current_ir_graph; + ir_node *stack = get_irg_frame(irg); + ir_node *nomem = get_irg_no_mem(irg); + ir_node *new_value = be_transform_node(value); + ir_node *stf = create_stf(dbgi, block, new_value, stack, nomem, + float_mode, NULL, 0, true); + ir_node *ld; + set_irn_pinned(stf, op_pin_state_floats); + + ld = new_bd_sparc_Ld_imm(dbgi, block, stack, stf, mode_gp, NULL, 0, true); set_irn_pinned(ld, op_pin_state_floats); - result[1] = new_Proj(ld2, mode_gp, pn_sparc_Ld_res); + result[0] = new_r_Proj(ld, mode_gp, pn_sparc_Ld_res); - arch_irn_add_flags(ld, sparc_arch_irn_flag_needs_64bit_spillslot); - arch_irn_add_flags(ld2, sparc_arch_irn_flag_needs_64bit_spillslot); - } else { - assert(bits == 32); - result[1] = NULL; + if (bits == 64) { + ir_node *ld2 = new_bd_sparc_Ld_imm(dbgi, block, stack, stf, mode_gp, + NULL, 4, true); + set_irn_pinned(ld, op_pin_state_floats); + result[1] = new_r_Proj(ld2, mode_gp, pn_sparc_Ld_res); + + arch_add_irn_flags(ld, (arch_irn_flags_t)sparc_arch_irn_flag_needs_64bit_spillslot); + arch_add_irn_flags(ld2, (arch_irn_flags_t)sparc_arch_irn_flag_needs_64bit_spillslot); + } else { + assert(bits == 32); + result[1] = NULL; + } } } @@ -1169,27 +1813,32 @@ static ir_node *gen_Call(ir_node *node) ir_node *new_mem = be_transform_node(mem); dbg_info *dbgi = get_irn_dbg_info(node); ir_type *type = get_Call_type(node); - int n_params = get_Call_n_params(node); - int n_param_regs = sizeof(param_regs)/sizeof(param_regs[0]); + size_t n_params = get_Call_n_params(node); + size_t n_ress = get_method_n_ress(type); /* max inputs: memory, callee, register arguments */ - int max_inputs = 2 + n_param_regs; - ir_node **in = ALLOCAN(ir_node*, max_inputs); - ir_node **sync_ins = ALLOCAN(ir_node*, max_inputs); + ir_node **sync_ins = ALLOCAN(ir_node*, n_params); struct obstack *obst = be_get_be_obst(irg); + calling_convention_t *cconv + = sparc_decide_calling_convention(type, NULL); + size_t n_param_regs = cconv->n_param_regs; + /* param-regs + mem + stackpointer + callee */ + unsigned max_inputs = 3 + n_param_regs; + ir_node **in = ALLOCAN(ir_node*, max_inputs); const arch_register_req_t **in_req = OALLOCNZ(obst, const arch_register_req_t*, max_inputs); - calling_convention_t *cconv - = sparc_decide_calling_convention(type, true); int in_arity = 0; int sync_arity = 0; int n_caller_saves - = sizeof(caller_saves)/sizeof(caller_saves[0]); + = rbitset_popcount(cconv->caller_saves, N_SPARC_REGISTERS); ir_entity *entity = NULL; ir_node *new_frame = get_stack_pointer_for(node); + bool aggregate_return + = get_method_calling_convention(type) & cc_compound_ret; ir_node *incsp; int mem_pos; ir_node *res; - int p; + size_t p; + size_t r; int i; int o; int out_arity; @@ -1215,18 +1864,20 @@ static ir_node *gen_Call(ir_node *node) /* parameters */ for (p = 0; p < n_params; ++p) { ir_node *value = get_Call_param(node, p); - ir_node *new_value = be_transform_node(value); const reg_or_stackslot_t *param = &cconv->parameters[p]; ir_type *param_type = get_method_param_type(type, p); ir_mode *mode = get_type_mode(param_type); + ir_node *partial_value; ir_node *new_values[2]; ir_node *str; + int offset; if (mode_is_float(mode) && param->reg0 != NULL) { unsigned size_bits = get_mode_size_bits(mode); assert(size_bits <= 64); - bitcast_float_to_int(dbgi, new_block, new_value, mode, new_values); + bitcast_float_to_int(dbgi, new_block, value, mode, new_values); } else { + ir_node *new_value = be_transform_node(value); new_values[0] = new_value; new_values[1] = NULL; } @@ -1249,22 +1900,26 @@ static ir_node *gen_Call(ir_node *node) /* we need a store if we're here */ if (new_values[1] != NULL) { - new_value = new_values[1]; - mode = mode_gp; + partial_value = new_values[1]; + mode = mode_gp; + } else { + partial_value = new_values[0]; } - /* create a parameter frame if necessary */ + /* we need to skip over our save area when constructing the call + * arguments on stack */ + offset = param->offset + SPARC_MIN_STACKSIZE; + if (mode_is_float(mode)) { - str = create_stf(dbgi, new_block, incsp, new_value, new_mem, - mode, NULL, 0, param->offset, true); + str = create_stf(dbgi, new_block, partial_value, incsp, new_mem, + mode, NULL, offset, true); } else { - str = new_bd_sparc_St(dbgi, new_block, incsp, new_value, new_mem, - mode, NULL, 0, param->offset, true); + str = new_bd_sparc_St_imm(dbgi, new_block, partial_value, incsp, + new_mem, mode, NULL, offset, true); } set_irn_pinned(str, op_pin_state_floats); sync_ins[sync_arity++] = str; } - assert(in_arity <= max_inputs); /* construct memory input */ if (sync_arity == 0) { @@ -1282,28 +1937,43 @@ static ir_node *gen_Call(ir_node *node) in_req[in_arity] = sparc_reg_classes[CLASS_sparc_gp].class_req; ++in_arity; } + assert(in_arity <= (int)max_inputs); /* outputs: * - memory + * - results * - caller saves */ - out_arity = 1 + n_caller_saves; + out_arity = 1 + cconv->n_reg_results + n_caller_saves; /* create call node */ if (entity != NULL) { res = new_bd_sparc_Call_imm(dbgi, new_block, in_arity, in, out_arity, - entity, 0); + entity, 0, aggregate_return); } else { - res = new_bd_sparc_Call_reg(dbgi, new_block, in_arity, in, out_arity); + res = new_bd_sparc_Call_reg(dbgi, new_block, in_arity, in, out_arity, + aggregate_return); } - set_sparc_in_req_all(res, in_req); + arch_set_irn_register_reqs_in(res, in_req); /* create output register reqs */ o = 0; - arch_set_out_register_req(res, o++, arch_no_register_req); - for (i = 0; i < n_caller_saves; ++i) { - const arch_register_t *reg = caller_saves[i]; - arch_set_out_register_req(res, o++, reg->single_req); + arch_set_irn_register_req_out(res, o++, arch_no_register_req); + /* add register requirements for the result regs */ + for (r = 0; r < n_ress; ++r) { + const reg_or_stackslot_t *result_info = &cconv->results[r]; + const arch_register_req_t *req = result_info->req0; + if (req != NULL) { + arch_set_irn_register_req_out(res, o++, req); + } + assert(result_info->req1 == NULL); + } + for (i = 0; i < N_SPARC_REGISTERS; ++i) { + const arch_register_t *reg; + if (!rbitset_is_set(cconv->caller_saves, i)) + continue; + reg = &sparc_registers[i]; + arch_set_irn_register_req_out(res, o++, reg->single_req); } assert(o == out_arity); @@ -1336,12 +2006,104 @@ static ir_node *gen_Sel(ir_node *node) /* must be the frame pointer all other sels must have been lowered * already */ assert(is_Proj(ptr) && is_Start(get_Proj_pred(ptr))); - /* we should not have value types from parameters anymore - they should be - lowered */ - assert(get_entity_owner(entity) != - get_method_value_param_type(get_entity_type(get_irg_entity(get_irn_irg(node))))); - return new_bd_sparc_FrameAddr(dbgi, new_block, new_ptr, entity); + return new_bd_sparc_FrameAddr(dbgi, new_block, new_ptr, entity, 0); +} + +static ir_node *gen_Alloc(ir_node *node) +{ + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *block = get_nodes_block(node); + ir_node *new_block = be_transform_node(block); + ir_type *type = get_Alloc_type(node); + ir_node *size = get_Alloc_count(node); + ir_node *stack_pred = get_stack_pointer_for(node); + ir_node *subsp; + if (get_Alloc_where(node) != stack_alloc) + panic("only stack-alloc supported in sparc backend (at %+F)", node); + /* lowerer should have transformed all allocas to byte size */ + if (!is_unknown_type(type) && get_type_size_bytes(type) != 1) + panic("Found non-byte alloc in sparc backend (at %+F)", node); + + if (is_Const(size)) { + ir_tarval *tv = get_Const_tarval(size); + long sizel = get_tarval_long(tv); + subsp = be_new_IncSP(sp_reg, new_block, stack_pred, sizel, 0); + set_irn_dbg_info(subsp, dbgi); + } else { + ir_node *new_size = be_transform_node(size); + subsp = new_bd_sparc_SubSP(dbgi, new_block, stack_pred, new_size); + arch_set_irn_register(subsp, sp_reg); + } + + /* if we are the last IncSP producer in a block then we have to keep + * the stack value. + * Note: This here keeps all producers which is more than necessary */ + keep_alive(subsp); + + pmap_insert(node_to_stack, node, subsp); + /* the "result" is the unmodified sp value */ + return stack_pred; +} + +static ir_node *gen_Proj_Alloc(ir_node *node) +{ + ir_node *alloc = get_Proj_pred(node); + long pn = get_Proj_proj(node); + + switch ((pn_Alloc)pn) { + case pn_Alloc_M: { + ir_node *alloc_mem = get_Alloc_mem(alloc); + return be_transform_node(alloc_mem); + } + case pn_Alloc_res: { + ir_node *new_alloc = be_transform_node(alloc); + return new_alloc; + } + case pn_Alloc_X_regular: + case pn_Alloc_X_except: + panic("sparc backend: exception output of alloc not supported (at %+F)", + node); + } + panic("sparc backend: invalid Proj->Alloc"); +} + +static ir_node *gen_Free(ir_node *node) +{ + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *block = get_nodes_block(node); + ir_node *new_block = be_transform_node(block); + ir_type *type = get_Free_type(node); + ir_node *size = get_Free_count(node); + ir_node *mem = get_Free_mem(node); + ir_node *new_mem = be_transform_node(mem); + ir_node *stack_pred = get_stack_pointer_for(node); + ir_node *addsp; + if (get_Alloc_where(node) != stack_alloc) + panic("only stack-alloc supported in sparc backend (at %+F)", node); + /* lowerer should have transformed all allocas to byte size */ + if (!is_unknown_type(type) && get_type_size_bytes(type) != 1) + panic("Found non-byte alloc in sparc backend (at %+F)", node); + + if (is_Const(size)) { + ir_tarval *tv = get_Const_tarval(size); + long sizel = get_tarval_long(tv); + addsp = be_new_IncSP(sp_reg, new_block, stack_pred, -sizel, 0); + set_irn_dbg_info(addsp, dbgi); + } else { + ir_node *new_size = be_transform_node(size); + addsp = new_bd_sparc_AddSP(dbgi, new_block, stack_pred, new_size); + arch_set_irn_register(addsp, sp_reg); + } + + /* if we are the last IncSP producer in a block then we have to keep + * the stack value. + * Note: This here keeps all producers which is more than necessary */ + keep_alive(addsp); + + pmap_insert(node_to_stack, node, addsp); + /* the "result" is the unmodified sp value */ + return new_mem; } static const arch_register_req_t float1_req = { @@ -1372,16 +2134,12 @@ static const arch_register_req_t float4_req = { static const arch_register_req_t *get_float_req(ir_mode *mode) { - unsigned bits = get_mode_size_bits(mode); - assert(mode_is_float(mode)); - if (bits == 32) { - return &float1_req; - } else if (bits == 64) { - return &float2_req; - } else { - assert(bits == 128); - return &float4_req; + switch (get_mode_size_bits(mode)) { + case 32: return &float1_req; + case 64: return &float2_req; + case 128: return &float4_req; + default: panic("invalid float mode"); } } @@ -1404,7 +2162,6 @@ static ir_node *gen_Phi(ir_node *node) mode = mode_gp; req = sparc_reg_classes[CLASS_sparc_gp].class_req; } else if (mode_is_float(mode)) { - mode = mode; req = get_float_req(mode); } else { req = arch_no_register_req; @@ -1415,7 +2172,7 @@ static ir_node *gen_Phi(ir_node *node) phi = new_ir_node(dbgi, irg, block, op_Phi, mode, get_irn_arity(node), get_irn_in(node) + 1); copy_node_attr(irg, node, phi); be_duplicate_deps(node, phi); - arch_set_out_register_req(phi, 0, req); + arch_set_irn_register_req_out(phi, 0, req); be_enqueue_preds(node); return phi; } @@ -1442,7 +2199,10 @@ static ir_node *gen_Proj_Load(ir_node *node) break; case iro_sparc_Ldf: if (pn == pn_Load_res) { - return new_rd_Proj(dbgi, new_load, mode_fp, pn_sparc_Ldf_res); + const sparc_load_store_attr_t *attr + = get_sparc_load_store_attr_const(new_load); + ir_mode *mode = attr->load_store_mode; + return new_rd_Proj(dbgi, new_load, mode, pn_sparc_Ldf_res); } else if (pn == pn_Load_M) { return new_rd_Proj(dbgi, new_load, mode_M, pn_sparc_Ld_M); } @@ -1494,13 +2254,23 @@ static ir_node *gen_Proj_Div(ir_node *node) ir_node *pred = get_Proj_pred(node); ir_node *new_pred = be_transform_node(pred); long pn = get_Proj_proj(node); + ir_mode *res_mode; - assert(is_sparc_SDiv(new_pred) || is_sparc_UDiv(new_pred)); - assert(pn_sparc_SDiv_res == pn_sparc_UDiv_res); - assert(pn_sparc_SDiv_M == pn_sparc_UDiv_M); + if (is_sparc_SDiv(new_pred) || is_sparc_UDiv(new_pred)) { + res_mode = mode_gp; + } else if (is_sparc_fdiv(new_pred)) { + res_mode = get_Div_resmode(pred); + } else { + panic("sparc backend: Div transformed to something unexpected: %+F", + new_pred); + } + assert((int)pn_sparc_SDiv_res == (int)pn_sparc_UDiv_res); + assert((int)pn_sparc_SDiv_M == (int)pn_sparc_UDiv_M); + assert((int)pn_sparc_SDiv_res == (int)pn_sparc_fdiv_res); + assert((int)pn_sparc_SDiv_M == (int)pn_sparc_fdiv_M); switch (pn) { case pn_Div_res: - return new_r_Proj(new_pred, mode_gp, pn_sparc_SDiv_res); + return new_r_Proj(new_pred, res_mode, pn_sparc_SDiv_res); case pn_Div_M: return new_r_Proj(new_pred, mode_gp, pn_sparc_SDiv_M); default: @@ -1509,82 +2279,90 @@ static ir_node *gen_Proj_Div(ir_node *node) panic("Unsupported Proj from Div"); } -static ir_node *gen_Proj_Quot(ir_node *node) +static ir_node *get_frame_base(ir_graph *irg) { - ir_node *pred = get_Proj_pred(node); - ir_node *new_pred = be_transform_node(pred); - long pn = get_Proj_proj(node); - - assert(is_sparc_fdiv(new_pred)); - switch (pn) { - case pn_Quot_res: - return new_r_Proj(new_pred, mode_gp, pn_sparc_fdiv_res); - case pn_Quot_M: - return new_r_Proj(new_pred, mode_gp, pn_sparc_fdiv_M); - default: - break; + if (frame_base == NULL) { + if (current_cconv->omit_fp) { + frame_base = get_initial_sp(irg); + } else { + frame_base = get_initial_fp(irg); + } } - panic("Unsupported Proj from Quot"); + return frame_base; } static ir_node *gen_Proj_Start(ir_node *node) { ir_node *block = get_nodes_block(node); ir_node *new_block = be_transform_node(block); - ir_node *barrier = be_transform_node(get_Proj_pred(node)); long pn = get_Proj_proj(node); + /* make sure prolog is constructed */ + be_transform_node(get_Proj_pred(node)); switch ((pn_Start) pn) { case pn_Start_X_initial_exec: /* exchange ProjX with a jump */ return new_bd_sparc_Ba(NULL, new_block); - case pn_Start_M: - return new_r_Proj(barrier, mode_M, 0); + case pn_Start_M: { + ir_graph *irg = get_irn_irg(node); + return get_initial_mem(irg); + } case pn_Start_T_args: - return barrier; + return new_r_Bad(get_irn_irg(block), mode_T); case pn_Start_P_frame_base: - return be_prolog_get_reg_value(abihelper, fp_reg); - case pn_Start_P_tls: - return new_Bad(); - case pn_Start_max: - break; + return get_frame_base(get_irn_irg(block)); } panic("Unexpected start proj: %ld\n", pn); } static ir_node *gen_Proj_Proj_Start(ir_node *node) { - long pn = get_Proj_proj(node); - ir_node *block = get_nodes_block(node); - ir_node *new_block = be_transform_node(block); - ir_entity *entity = get_irg_entity(current_ir_graph); - ir_type *method_type = get_entity_type(entity); - ir_type *param_type = get_method_param_type(method_type, pn); + long pn = get_Proj_proj(node); + ir_node *block = get_nodes_block(node); + ir_graph *irg = get_irn_irg(node); + ir_node *new_block = be_transform_node(block); + ir_node *args = get_Proj_pred(node); + ir_node *start = get_Proj_pred(args); + ir_node *new_start = be_transform_node(start); const reg_or_stackslot_t *param; /* Proj->Proj->Start must be a method argument */ assert(get_Proj_proj(get_Proj_pred(node)) == pn_Start_T_args); - param = &cconv->parameters[pn]; + param = ¤t_cconv->parameters[pn]; if (param->reg0 != NULL) { /* argument transmitted in register */ - ir_mode *mode = get_type_mode(param_type); - const arch_register_t *reg = param->reg0; - ir_node *value = be_prolog_get_reg_value(abihelper, reg); + const arch_register_t *reg = param->reg0; + ir_mode *reg_mode = reg->reg_class->mode; + long new_pn = param->reg_offset + start_params_offset; + ir_node *value = new_r_Proj(new_start, reg_mode, new_pn); + bool is_float = false; + + { + ir_entity *entity = get_irg_entity(irg); + ir_type *method_type = get_entity_type(entity); + if (pn < (long)get_method_n_params(method_type)) { + ir_type *param_type = get_method_param_type(method_type, pn); + ir_mode *mode = get_type_mode(param_type); + is_float = mode_is_float(mode); + } + } - if (mode_is_float(mode)) { + if (is_float) { + const arch_register_t *reg1 = param->reg1; ir_node *value1 = NULL; - if (param->reg1 != NULL) { - value1 = be_prolog_get_reg_value(abihelper, param->reg1); + if (reg1 != NULL) { + ir_mode *reg1_mode = reg1->reg_class->mode; + value1 = new_r_Proj(new_start, reg1_mode, new_pn+1); } else if (param->entity != NULL) { - ir_node *fp = be_prolog_get_reg_value(abihelper, fp_reg); - ir_node *mem = be_prolog_get_memory(abihelper); - ir_node *ld = new_bd_sparc_Ld(NULL, new_block, fp, mem, - mode_gp, param->entity, - 0, 0, true); - value1 = new_Proj(ld, mode_gp, pn_sparc_Ld_res); + ir_node *fp = get_initial_fp(irg); + ir_node *mem = get_initial_mem(irg); + ir_node *ld = new_bd_sparc_Ld_imm(NULL, new_block, fp, mem, + mode_gp, param->entity, + 0, true); + value1 = new_r_Proj(ld, mode_gp, pn_sparc_Ld_res); } /* convert integer value to float */ @@ -1593,19 +2371,19 @@ static ir_node *gen_Proj_Proj_Start(ir_node *node) return value; } else { /* argument transmitted on stack */ - ir_node *fp = be_prolog_get_reg_value(abihelper, fp_reg); - ir_node *mem = be_prolog_get_memory(abihelper); - ir_mode *mode = get_type_mode(param->type); - ir_node *load; - ir_node *value; + ir_node *mem = get_initial_mem(irg); + ir_mode *mode = get_type_mode(param->type); + ir_node *base = get_frame_base(irg); + ir_node *load; + ir_node *value; if (mode_is_float(mode)) { - load = create_ldf(NULL, new_block, fp, mem, mode, - param->entity, 0, 0, true); + load = create_ldf(NULL, new_block, base, mem, mode, + param->entity, 0, true); value = new_r_Proj(load, mode_fp, pn_sparc_Ldf_res); } else { - load = new_bd_sparc_Ld(NULL, new_block, fp, mem, mode, - param->entity, 0, 0, true); + load = new_bd_sparc_Ld_imm(NULL, new_block, base, mem, mode, + param->entity, 0, true); value = new_r_Proj(load, mode_gp, pn_sparc_Ld_res); } set_irn_pinned(load, op_pin_state_floats); @@ -1626,30 +2404,11 @@ static ir_node *gen_Proj_Call(ir_node *node) case pn_Call_X_regular: case pn_Call_X_except: case pn_Call_T_result: - case pn_Call_P_value_res_base: - case pn_Call_max: break; } panic("Unexpected Call proj %ld\n", pn); } -/** - * Finds number of output value of a mode_T node which is constrained to - * a single specific register. - */ -static int find_out_for_reg(ir_node *node, const arch_register_t *reg) -{ - int n_outs = arch_irn_get_n_outs(node); - int o; - - for (o = 0; o < n_outs; ++o) { - const arch_register_req_t *req = arch_get_out_register_req(node, o); - if (req == reg->single_req) - return o; - } - return -1; -} - static ir_node *gen_Proj_Proj_Call(ir_node *node) { long pn = get_Proj_proj(node); @@ -1657,22 +2416,18 @@ static ir_node *gen_Proj_Proj_Call(ir_node *node) ir_node *new_call = be_transform_node(call); ir_type *function_type = get_Call_type(call); calling_convention_t *cconv - = sparc_decide_calling_convention(function_type, true); - const reg_or_stackslot_t *res = &cconv->results[pn]; - const arch_register_t *reg = res->reg0; - ir_mode *mode; - int regn; + = sparc_decide_calling_convention(function_type, NULL); + const reg_or_stackslot_t *res = &cconv->results[pn]; + ir_mode *mode = get_irn_mode(node); + long new_pn = 1 + res->reg_offset; - assert(res->reg0 != NULL && res->reg1 == NULL); - regn = find_out_for_reg(new_call, reg); - if (regn < 0) { - panic("Internal error in calling convention for return %+F", node); + assert(res->req0 != NULL && res->req1 == NULL); + if (mode_needs_gp_reg(mode)) { + mode = mode_gp; } - mode = res->reg0->reg_class->mode; - sparc_free_calling_convention(cconv); - return new_r_Proj(new_call, mode, regn); + return new_r_Proj(new_call, mode, new_pn); } /** @@ -1683,6 +2438,8 @@ static ir_node *gen_Proj(ir_node *node) ir_node *pred = get_Proj_pred(node); switch (get_irn_opcode(pred)) { + case iro_Alloc: + return gen_Proj_Alloc(node); case iro_Store: return gen_Proj_Store(node); case iro_Load: @@ -1691,12 +2448,11 @@ static ir_node *gen_Proj(ir_node *node) return gen_Proj_Call(node); case iro_Cmp: return gen_Proj_Cmp(node); + case iro_Switch: case iro_Cond: return be_duplicate_node(node); case iro_Div: return gen_Proj_Div(node); - case iro_Quot: - return gen_Proj_Quot(node); case iro_Start: return gen_Proj_Start(node); case iro_Proj: { @@ -1709,6 +2465,11 @@ static ir_node *gen_Proj(ir_node *node) /* FALLTHROUGH */ } default: + if (is_sparc_AddCC_t(pred)) { + return gen_Proj_AddCC_t(node); + } else if (is_sparc_SubCC_t(pred)) { + return gen_Proj_SubCC_t(node); + } panic("code selection didn't expect Proj after %+F\n", pred); } } @@ -1728,12 +2489,12 @@ static ir_node *gen_Jmp(ir_node *node) /** * configure transformation callbacks */ -void sparc_register_transformers(void) +static void sparc_register_transformers(void) { be_start_transform_setup(); - be_set_transform_function(op_Abs, gen_Abs); be_set_transform_function(op_Add, gen_Add); + be_set_transform_function(op_Alloc, gen_Alloc); be_set_transform_function(op_And, gen_And); be_set_transform_function(op_Call, gen_Call); be_set_transform_function(op_Cmp, gen_Cmp); @@ -1742,6 +2503,7 @@ void sparc_register_transformers(void) be_set_transform_function(op_Conv, gen_Conv); be_set_transform_function(op_Div, gen_Div); be_set_transform_function(op_Eor, gen_Eor); + be_set_transform_function(op_Free, gen_Free); be_set_transform_function(op_Jmp, gen_Jmp); be_set_transform_function(op_Load, gen_Load); be_set_transform_function(op_Minus, gen_Minus); @@ -1751,7 +2513,6 @@ void sparc_register_transformers(void) be_set_transform_function(op_Or, gen_Or); be_set_transform_function(op_Phi, gen_Phi); be_set_transform_function(op_Proj, gen_Proj); - be_set_transform_function(op_Quot, gen_Quot); be_set_transform_function(op_Return, gen_Return); be_set_transform_function(op_Sel, gen_Sel); be_set_transform_function(op_Shl, gen_Shl); @@ -1760,62 +2521,58 @@ void sparc_register_transformers(void) be_set_transform_function(op_Start, gen_Start); be_set_transform_function(op_Store, gen_Store); be_set_transform_function(op_Sub, gen_Sub); + be_set_transform_function(op_Switch, gen_Switch); be_set_transform_function(op_SymConst, gen_SymConst); be_set_transform_function(op_Unknown, gen_Unknown); + be_set_transform_function(op_sparc_AddX_t, gen_AddX_t); + be_set_transform_function(op_sparc_AddCC_t,gen_AddCC_t); be_set_transform_function(op_sparc_Save, be_duplicate_node); -} - -/* hack to avoid unused fp proj at start barrier */ -static void assure_fp_keep(void) -{ - unsigned n_users = 0; - const ir_edge_t *edge; - ir_node *fp_proj = be_prolog_get_reg_value(abihelper, fp_reg); - - foreach_out_edge(fp_proj, edge) { - ir_node *succ = get_edge_src_irn(edge); - if (is_End(succ) || is_Anchor(succ)) - continue; - ++n_users; - } - - if (n_users == 0) { - ir_node *block = get_nodes_block(fp_proj); - ir_node *in[1] = { fp_proj }; - be_new_Keep(block, 1, in); - } + be_set_transform_function(op_sparc_SubX_t, gen_SubX_t); + be_set_transform_function(op_sparc_SubCC_t,gen_SubCC_t); } /** * Transform a Firm graph into a SPARC graph. */ -void sparc_transform_graph(sparc_code_gen_t *cg) +void sparc_transform_graph(ir_graph *irg) { - ir_graph *irg = cg->irg; ir_entity *entity = get_irg_entity(irg); ir_type *frame_type; sparc_register_transformers(); - env_cg = cg; node_to_stack = pmap_create(); - mode_gp = mode_Iu; - mode_fp = mode_F; - mode_fp2 = mode_D; + mode_gp = sparc_reg_classes[CLASS_sparc_gp].mode; + mode_fp = sparc_reg_classes[CLASS_sparc_fp].mode; + mode_fp2 = mode_D; //mode_fp4 = ? + mode_flags = sparc_reg_classes[CLASS_sparc_flags_class].mode; + assert(sparc_reg_classes[CLASS_sparc_fpflags_class].mode == mode_flags); + + start_mem = NULL; + start_g0 = NULL; + start_g7 = NULL; + start_sp = NULL; + start_fp = NULL; + frame_base = NULL; + + stackorder = be_collect_stacknodes(irg); + current_cconv + = sparc_decide_calling_convention(get_entity_type(entity), irg); + if (sparc_variadic_fixups(irg, current_cconv)) { + sparc_free_calling_convention(current_cconv); + current_cconv + = sparc_decide_calling_convention(get_entity_type(entity), irg); + } + sparc_create_stacklayout(irg, current_cconv); + be_add_parameter_entity_stores(irg); - abihelper = be_abihelper_prepare(irg); - be_collect_stacknodes(abihelper); - cconv = sparc_decide_calling_convention(get_entity_type(entity), false); - create_stacklayout(irg); - - be_transform_graph(cg->irg, NULL); - assure_fp_keep(); + be_transform_graph(irg, NULL); - be_abihelper_finish(abihelper); - sparc_free_calling_convention(cconv); + be_free_stackorder(stackorder); + sparc_free_calling_convention(current_cconv); frame_type = get_irg_frame_type(irg); if (get_type_state(frame_type) == layout_undefined) @@ -1825,6 +2582,11 @@ void sparc_transform_graph(sparc_code_gen_t *cg) node_to_stack = NULL; be_add_missing_keeps(irg); + + /* do code placement, to optimize the position of constants */ + place_code(irg); + /* backend expects outedges to be always on */ + edges_assure(irg); } void sparc_init_transform(void)