}
}
+/**
+ * Creates a Push from Store(IncSP(gp_reg_size))
+ */
+static void ia32_create_Push(ir_node *irn, ia32_code_gen_t *cg) {
+ ir_node *sp = get_irn_n(irn, 0);
+ ir_node *val, *next, *push, *bl, *proj_M, *proj_res, *old_proj_M;
+ const ir_edge_t *edge;
+
+ if (get_ia32_am_offs(irn) || !be_is_IncSP(sp))
+ return;
+
+ if (arch_get_irn_register(cg->arch_env, get_irn_n(irn, 1)) !=
+ &ia32_gp_regs[REG_GP_NOREG])
+ return;
+
+ val = get_irn_n(irn, 2);
+ if (mode_is_float(get_irn_mode(val)))
+ return;
+
+ if (be_get_IncSP_direction(sp) != be_stack_dir_expand ||
+ be_get_IncSP_offset(sp) != get_mode_size_bytes(ia32_reg_classes[CLASS_ia32_gp].mode))
+ return;
+
+ /* ok, translate into Push */
+ edge = get_irn_out_edge_first(irn);
+ old_proj_M = get_edge_src_irn(edge);
+
+ next = sched_next(irn);
+ sched_remove(irn);
+ sched_remove(sp);
+
+ bl = get_nodes_block(irn);
+ push = new_rd_ia32_Push(NULL, current_ir_graph, bl,
+ be_get_IncSP_pred(sp), val, be_get_IncSP_mem(sp), mode_T);
+ proj_res = new_r_Proj(current_ir_graph, bl, push, get_irn_mode(sp), 0);
+ proj_M = new_r_Proj(current_ir_graph, bl, push, mode_M, 1);
+
+ /* the push must have SP out register */
+ arch_set_irn_register(cg->arch_env, push, arch_get_irn_register(cg->arch_env, sp));
+
+ exchange(old_proj_M, proj_M);
+ exchange(sp, proj_res);
+ sched_add_before(next, push);
+ sched_add_after(push, proj_res);
+}
+
+/**
+ * Creates a Pop from IncSP(Load(sp))
+ */
+static void ia32_create_Pop(ir_node *irn, ia32_code_gen_t *cg) {
+ ir_node *old_proj_M = be_get_IncSP_mem(irn);
+ ir_node *load = skip_Proj(old_proj_M);
+ ir_node *old_proj_res = NULL;
+ ir_node *bl, *pop, *next, *proj_res, *proj_sp, *proj_M;
+ const ir_edge_t *edge;
+ const arch_register_t *reg, *sp;
+
+ if (! is_ia32_Load(load) || get_ia32_am_offs(load))
+ return;
+
+ if (arch_get_irn_register(cg->arch_env, get_irn_n(load, 1)) !=
+ &ia32_gp_regs[REG_GP_NOREG])
+ return;
+ if (arch_get_irn_register(cg->arch_env, get_irn_n(load, 0)) != cg->isa->sp)
+ return;
+
+ /* ok, translate into pop */
+ foreach_out_edge(load, edge) {
+ ir_node *succ = get_edge_src_irn(edge);
+ if (succ != old_proj_M) {
+ old_proj_res = succ;
+ break;
+ }
+ }
+ if (! old_proj_res) {
+ assert(0);
+ return; /* should not happen */
+ }
+
+ bl = get_nodes_block(load);
+
+ /* IncSP is typically scheduled after the load, so remove it first */
+ sched_remove(irn);
+ next = sched_next(old_proj_res);
+ sched_remove(old_proj_res);
+ sched_remove(load);
+
+ reg = arch_get_irn_register(cg->arch_env, load);
+ sp = arch_get_irn_register(cg->arch_env, irn);
+
+ pop = new_rd_ia32_Pop(NULL, current_ir_graph, bl, get_irn_n(irn, 0), get_irn_n(load, 2), mode_T);
+ proj_res = new_r_Proj(current_ir_graph, bl, pop, get_irn_mode(old_proj_res), 0);
+ proj_sp = new_r_Proj(current_ir_graph, bl, pop, get_irn_mode(irn), 1);
+ proj_M = new_r_Proj(current_ir_graph, bl, pop, mode_M, 2);
+
+ exchange(old_proj_M, proj_M);
+ exchange(old_proj_res, proj_res);
+ exchange(irn, proj_sp);
+
+ arch_set_irn_register(cg->arch_env, proj_res, reg);
+ arch_set_irn_register(cg->arch_env, proj_sp, sp);
+
+ sched_add_before(next, proj_sp);
+ sched_add_before(proj_sp, proj_res);
+ sched_add_before(proj_res,pop);
+}
+
/**
* Tries to optimize two following IncSP.
*/
be_set_IncSP_offset(irn, (unsigned)new_ofs);
be_set_IncSP_direction(irn, curr_dir);
}
+ else
+ ia32_create_Pop(irn, cg);
}
/**
ia32_optimize_CondJmp(irn, cg);
else if (be_is_IncSP(irn))
ia32_optimize_IncSP(irn, cg);
+ else if (is_ia32_Store(irn))
+ ia32_create_Push(irn, cg);
}