+ unsigned index = reg->global_index;
+ return (index >= REG_G0 && index <= REG_G7)
+ || (index >= REG_I0 && index <= REG_I7);
+}
+
+static void replace_with_restore_reg(ir_node *node, ir_node *replaced,
+ ir_node *op0, ir_node *op1)
+{
+ dbg_info *dbgi = get_irn_dbg_info(node);
+ ir_node *stack_in = get_irn_n(node, n_sparc_RestoreZero_stack);
+ ir_node *fp = get_irn_n(node, n_sparc_RestoreZero_frame_pointer);
+ ir_node *block = get_nodes_block(node);
+ ir_mode *mode = get_irn_mode(node);
+ ir_node *new_node = new_bd_sparc_Restore_reg(dbgi, block, stack_in, fp,
+ op0, op1);
+ ir_node *stack = new_r_Proj(new_node, mode, pn_sparc_Restore_stack);
+ ir_node *res = new_r_Proj(new_node, mode, pn_sparc_Restore_res);
+ const arch_register_t *reg = arch_get_irn_register(replaced);
+ const arch_register_t *sp = &sparc_registers[REG_SP];
+ arch_set_irn_register_out(new_node, pn_sparc_Restore_stack, sp);
+ arch_set_irn_register_out(new_node, pn_sparc_Restore_res, reg);
+
+ sched_add_before(node, new_node);
+ be_peephole_exchange(node, stack);
+ be_peephole_exchange(replaced, res);
+}
+
+static void replace_with_restore_imm(ir_node *node, ir_node *replaced,
+ ir_node *op, ir_entity *imm_entity,
+ int32_t immediate)
+{
+ dbg_info *dbgi = get_irn_dbg_info(node);
+ ir_node *stack_in = get_irn_n(node, n_sparc_RestoreZero_stack);
+ ir_node *fp = get_irn_n(node, n_sparc_RestoreZero_frame_pointer);
+ ir_node *block = get_nodes_block(node);
+ ir_mode *mode = get_irn_mode(node);
+ ir_node *new_node = new_bd_sparc_Restore_imm(dbgi, block, stack_in, fp,
+ op, imm_entity, immediate);
+ ir_node *stack = new_r_Proj(new_node, mode, pn_sparc_Restore_stack);
+ ir_node *res = new_r_Proj(new_node, mode, pn_sparc_Restore_res);
+ const arch_register_t *reg = arch_get_irn_register(replaced);
+ const arch_register_t *sp = &sparc_registers[REG_SP];
+ arch_set_irn_register_out(new_node, pn_sparc_Restore_stack, sp);
+ arch_set_irn_register_out(new_node, pn_sparc_Restore_res, reg);
+
+ sched_add_before(node, new_node);
+ be_peephole_exchange(node, stack);
+ be_peephole_exchange(replaced, res);
+}
+
+static void peephole_sparc_RestoreZero(ir_node *node)
+{
+ /* restore gives us a free "add" instruction, let's try to use that to fold
+ * an instruction in. We can do the following:
+ *
+ * - Copy values (g0 + reg)
+ * - Produce constants (g0 + immediate)
+ * - Perform an add (reg + reg)
+ * - Perform a sub with immediate (reg + (-immediate))
+ *
+ * Note: In an ideal world, this would not be a peephole optimization but
+ * already performed during code selection. Since about all foldable ops are
+ * arguments of the return node. However we have a hard time doing this
+ * since we construct epilogue code only after register allocation
+ * (and therefore after code selection).
+ */
+ int n_tries = 10; /* limit our search */
+
+ for (ir_node *schedpoint = node;;) {
+ const arch_register_t *reg;