+/**
+ * Transforms a be node into a Load.
+ */
+static void transform_to_Load(ia32_transform_env_t *env) {
+ ir_node *irn = env->irn;
+ entity *ent = be_get_frame_entity(irn);
+ ir_mode *mode = env->mode;
+ ir_node *noreg = ia32_new_NoReg_gp(env->cg);
+ ir_node *nomem = new_rd_NoMem(env->irg);
+ ir_node *sched_point = NULL;
+ ir_node *ptr = get_irn_n(irn, 0);
+ ir_node *mem = be_is_Reload(irn) ? get_irn_n(irn, 1) : nomem;
+ ir_node *new_op, *proj;
+ const arch_register_t *reg;
+
+ if (sched_is_scheduled(irn)) {
+ sched_point = sched_prev(irn);
+ }
+
+ if (mode_is_float(mode)) {
+ if (USE_SSE2(env->cg))
+ new_op = new_rd_ia32_fLoad(env->dbg, env->irg, env->block, ptr, noreg, mem, mode_T);
+ else
+ new_op = new_rd_ia32_vfld(env->dbg, env->irg, env->block, ptr, noreg, mem, mode_T);
+ }
+ else {
+ new_op = new_rd_ia32_Load(env->dbg, env->irg, env->block, ptr, noreg, mem, mode_T);
+ }
+
+ set_ia32_am_support(new_op, ia32_am_Source);
+ set_ia32_op_type(new_op, ia32_AddrModeS);
+ set_ia32_am_flavour(new_op, ia32_B);
+ set_ia32_ls_mode(new_op, mode);
+ set_ia32_frame_ent(new_op, ent);
+ set_ia32_use_frame(new_op);
+
+ proj = new_rd_Proj(env->dbg, env->irg, env->block, new_op, mode, pn_Load_res);
+
+ if (sched_point) {
+ sched_add_after(sched_point, new_op);
+ sched_add_after(new_op, proj);
+
+ sched_remove(irn);
+ }
+
+ /* copy the register from the old node to the new Load */
+ reg = arch_get_irn_register(env->cg->arch_env, irn);
+ arch_set_irn_register(env->cg->arch_env, new_op, reg);
+
+ SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, new_op));
+
+ exchange(irn, proj);
+}
+
+/**
+ * Transforms a be node into a Store.
+ */
+static void transform_to_Store(ia32_transform_env_t *env) {
+ ir_node *irn = env->irn;
+ entity *ent = be_get_frame_entity(irn);
+ ir_mode *mode = env->mode;
+ ir_node *noreg = ia32_new_NoReg_gp(env->cg);
+ ir_node *nomem = new_rd_NoMem(env->irg);
+ ir_node *ptr = get_irn_n(irn, 0);
+ ir_node *val = get_irn_n(irn, 1);
+ ir_node *new_op, *proj;
+ ir_node *sched_point = NULL;
+
+ if (sched_is_scheduled(irn)) {
+ sched_point = sched_prev(irn);
+ }
+
+ if (mode_is_float(mode)) {
+ if (USE_SSE2(env->cg))
+ new_op = new_rd_ia32_fStore(env->dbg, env->irg, env->block, ptr, noreg, val, nomem, mode_T);
+ else
+ new_op = new_rd_ia32_vfst(env->dbg, env->irg, env->block, ptr, noreg, val, nomem, mode_T);
+ }
+ else if (get_mode_size_bits(mode) == 8) {
+ new_op = new_rd_ia32_Store8Bit(env->dbg, env->irg, env->block, ptr, noreg, val, nomem, mode_T);
+ }
+ else {
+ new_op = new_rd_ia32_Store(env->dbg, env->irg, env->block, ptr, noreg, val, nomem, mode_T);
+ }
+
+ set_ia32_am_support(new_op, ia32_am_Dest);
+ set_ia32_op_type(new_op, ia32_AddrModeD);
+ set_ia32_am_flavour(new_op, ia32_B);
+ set_ia32_ls_mode(new_op, mode);
+ set_ia32_frame_ent(new_op, ent);
+ set_ia32_use_frame(new_op);
+
+ proj = new_rd_Proj(env->dbg, env->irg, env->block, new_op, mode_M, 0);
+
+ if (sched_point) {
+ sched_add_after(sched_point, new_op);
+ sched_add_after(new_op, proj);
+
+ sched_remove(irn);
+ }
+
+ SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, new_op));
+
+ exchange(irn, proj);
+}
+
+/**
+ * Fix the mode of Spill/Reload
+ */
+static ir_mode *fix_spill_mode(ia32_code_gen_t *cg, ir_mode *mode)
+{
+ if (mode_is_float(mode)) {
+ if (USE_SSE2(cg))
+ mode = mode_D;
+ else
+ mode = mode_E;
+ }
+ else
+ mode = mode_Is;
+ return mode;
+}
+
+/**
+ * Block-Walker: Calls the transform functions Spill and Reload.
+ */
+static void ia32_after_ra_walker(ir_node *block, void *env) {
+ ir_node *node, *prev;
+ ia32_code_gen_t *cg = env;
+ ia32_transform_env_t tenv;
+
+ tenv.block = block;
+ tenv.irg = current_ir_graph;
+ tenv.cg = cg;
+ DEBUG_ONLY(tenv.mod = cg->mod;)
+
+ /* beware: the schedule is changed here */
+ for (node = sched_last(block); !sched_is_begin(node); node = prev) {
+ prev = sched_prev(node);
+ if (be_is_Reload(node)) {
+ /* we always reload the whole register */
+ tenv.dbg = get_irn_dbg_info(node);
+ tenv.irn = node;
+ tenv.mode = fix_spill_mode(cg, get_irn_mode(node));
+ transform_to_Load(&tenv);
+ }
+ else if (be_is_Spill(node)) {
+ /* we always spill the whole register */
+ tenv.dbg = get_irn_dbg_info(node);
+ tenv.irn = node;
+ tenv.mode = fix_spill_mode(cg, get_irn_mode(be_get_Spill_context(node)));
+ transform_to_Store(&tenv);
+ }
+ }
+}
+
+/**
+ * We transform Spill and Reload here. This needs to be done before
+ * stack biasing otherwise we would miss the corrected offset for these nodes.
+ *
+ * If x87 instruction should be emitted, run the x87 simulator and patch
+ * the virtual instructions. This must obviously be done after register allocation.
+ */
+static void ia32_after_ra(void *self) {
+ ia32_code_gen_t *cg = self;
+ irg_block_walk_graph(cg->irg, NULL, ia32_after_ra_walker, self);
+
+ /* if we do x87 code generation, rewrite all the virtual instructions and registers */
+ if (cg->used_x87) {
+ x87_simulate_graph(cg->arch_env, cg->irg, cg->blk_sched);
+ }
+}
+