+ unsigned bits = get_mode_size_bits(mode);
+
+ switch (bits) {
+ case 32:
+ return new_func_single(dbgi, block, new_op1, new_op2, mode);
+ case 64:
+ return new_func_double(dbgi, block, new_op1, new_op2, mode);
+ case 128:
+ return new_func_quad(dbgi, block, new_op1, new_op2, mode);
+ default:
+ break;
+ }
+ panic("unsupported mode %+F for float op", mode);
+}
+
+static ir_node *gen_helper_unfpop(ir_node *node, ir_mode *mode,
+ new_unop_fp_func new_func_single,
+ new_unop_fp_func new_func_double,
+ new_unop_fp_func new_func_quad)
+{
+ ir_node *block = be_transform_node(get_nodes_block(node));
+ ir_node *op1 = get_binop_left(node);
+ ir_node *new_op1 = be_transform_node(op1);
+ dbg_info *dbgi = get_irn_dbg_info(node);
+ unsigned bits = get_mode_size_bits(mode);
+
+ switch (bits) {
+ case 32:
+ return new_func_single(dbgi, block, new_op1, mode);
+ case 64:
+ return new_func_double(dbgi, block, new_op1, mode);
+ case 128:
+ return new_func_quad(dbgi, block, new_op1, mode);
+ default:
+ break;
+ }
+ panic("unsupported mode %+F for float op", mode);
+}
+
+typedef ir_node* (*new_binopx_imm_func)(dbg_info *dbgi, ir_node *block,
+ ir_node *op1, ir_node *flags,
+ ir_entity *imm_entity, int32_t imm);
+
+typedef ir_node* (*new_binopx_reg_func)(dbg_info *dbgi, ir_node *block,
+ ir_node *op1, ir_node *op2,
+ ir_node *flags);
+
+static ir_node *gen_helper_binopx(ir_node *node, match_flags_t match_flags,
+ new_binopx_reg_func new_binopx_reg,
+ new_binopx_imm_func new_binopx_imm)
+{
+ dbg_info *dbgi = get_irn_dbg_info(node);
+ ir_node *block = be_transform_node(get_nodes_block(node));
+ ir_node *op1 = get_irn_n(node, 0);
+ ir_node *op2 = get_irn_n(node, 1);
+ ir_node *flags = get_irn_n(node, 2);
+ ir_node *new_flags = be_transform_node(flags);
+ ir_node *new_op1;
+ ir_node *new_op2;
+
+ /* only support for mode-neutral implemented so far */
+ assert(match_flags & MATCH_MODE_NEUTRAL);
+
+ if (is_imm_encodeable(op2)) {
+ int32_t immediate = get_tarval_long(get_Const_tarval(op2));
+ new_op1 = be_transform_node(op1);
+ return new_binopx_imm(dbgi, block, new_op1, new_flags, NULL, immediate);
+ }
+ new_op2 = be_transform_node(op2);
+ if ((match_flags & MATCH_COMMUTATIVE) && is_imm_encodeable(op1)) {
+ int32_t immediate = get_tarval_long(get_Const_tarval(op1));
+ return new_binopx_imm(dbgi, block, new_op2, new_flags, NULL, immediate);
+ }
+ new_op1 = be_transform_node(op1);
+ return new_binopx_reg(dbgi, block, new_op1, new_op2, new_flags);
+
+}
+
+static ir_node *get_g0(void)
+{
+ return be_prolog_get_reg_value(abihelper, &sparc_registers[REG_G0]);
+}
+
+typedef struct address_t {
+ ir_node *ptr;
+ ir_node *ptr2;
+ ir_entity *entity;
+ int32_t offset;
+} address_t;
+
+/**
+ * Match a load/store address
+ */
+static void match_address(ir_node *ptr, address_t *address, bool use_ptr2)
+{
+ ir_node *base = ptr;
+ ir_node *ptr2 = NULL;
+ int32_t offset = 0;
+ ir_entity *entity = NULL;
+
+ if (is_Add(base)) {
+ ir_node *add_right = get_Add_right(base);
+ if (is_Const(add_right)) {
+ base = get_Add_left(base);
+ offset += get_tarval_long(get_Const_tarval(add_right));
+ }
+ }
+ /* Note that we don't match sub(x, Const) or chains of adds/subs
+ * because this should all be normalized by now */
+
+ /* we only use the symconst if we're the only user otherwise we probably
+ * won't save anything but produce multiple sethi+or combinations with
+ * just different offsets */
+ if (is_SymConst(base) && get_irn_n_edges(base) == 1) {
+ dbg_info *dbgi = get_irn_dbg_info(ptr);
+ ir_node *block = get_nodes_block(ptr);
+ ir_node *new_block = be_transform_node(block);
+ entity = get_SymConst_entity(base);
+ base = new_bd_sparc_SetHi(dbgi, new_block, entity, offset);
+ } else if (use_ptr2 && is_Add(base) && entity == NULL && offset == 0) {
+ ptr2 = be_transform_node(get_Add_right(base));
+ base = be_transform_node(get_Add_left(base));
+ } else {
+ if (sparc_is_value_imm_encodeable(offset)) {
+ base = be_transform_node(base);
+ } else {
+ base = be_transform_node(ptr);
+ offset = 0;
+ }
+ }