+
+/**************************************************
+ * _ _ __
+ * | | (_)/ _|
+ * ___ ___ __| | ___ __ _ ___ _ __ _| |_
+ * / __/ _ \ / _` |/ _ \/ _` |/ _ \ '_ \ | | _|
+ * | (_| (_) | (_| | __/ (_| | __/ | | | | | |
+ * \___\___/ \__,_|\___|\__, |\___|_| |_| |_|_|
+ * __/ |
+ * |___/
+ **************************************************/
+
+static void ia32_kill_convs(ia32_code_gen_t *cg) {
+ ir_node *irn;
+
+ /* BEWARE: the Projs are inserted in the set */
+ foreach_nodeset(cg->kill_conv, irn) {
+ ir_node *in = get_irn_n(get_Proj_pred(irn), 2);
+ edges_reroute(irn, in, cg->birg->irg);
+ }
+}
+
+/**
+ * Transforms the standard firm graph into
+ * an ia32 firm graph
+ */
+static void ia32_prepare_graph(void *self) {
+ ia32_code_gen_t *cg = self;
+ dom_front_info_t *dom;
+ DEBUG_ONLY(firm_dbg_module_t *old_mod = cg->mod;)
+
+ FIRM_DBG_REGISTER(cg->mod, "firm.be.ia32.transform");
+
+ /* 1st: transform constants and psi condition trees */
+ ia32_pre_transform_phase(cg);
+
+ /* 2nd: transform all remaining nodes */
+ ia32_register_transformers();
+ dom = be_compute_dominance_frontiers(cg->irg);
+
+ cg->kill_conv = new_nodeset(5);
+ irg_walk_blkwise_graph(cg->irg, NULL, ia32_transform_node, cg);
+ ia32_kill_convs(cg);
+ del_nodeset(cg->kill_conv);
+
+ be_free_dominance_frontiers(dom);
+
+ if (cg->dump)
+ be_dump(cg->irg, "-transformed", dump_ir_block_graph_sched);
+
+ /* 3rd: optimize address mode */
+ FIRM_DBG_REGISTER(cg->mod, "firm.be.ia32.am");
+ ia32_optimize_addressmode(cg);
+
+ if (cg->dump)
+ be_dump(cg->irg, "-am", dump_ir_block_graph_sched);
+
+ DEBUG_ONLY(cg->mod = old_mod;)
+}
+
+/**
+ * Dummy functions for hooks we don't need but which must be filled.
+ */
+static void ia32_before_sched(void *self) {
+}
+
+static void remove_unused_nodes(ir_node *irn, bitset_t *already_visited) {
+ int i;
+ ir_mode *mode;
+ ir_node *mem_proj;
+
+ if (is_Block(irn))
+ return;
+
+ mode = get_irn_mode(irn);
+
+ /* check if we already saw this node or the node has more than one user */
+ if (bitset_contains_irn(already_visited, irn) || get_irn_n_edges(irn) > 1)
+ return;
+
+ /* mark irn visited */
+ bitset_add_irn(already_visited, irn);
+
+ /* non-Tuple nodes with one user: ok, return */
+ if (get_irn_n_edges(irn) >= 1 && mode != mode_T)
+ return;
+
+ /* tuple node has one user which is not the mem proj-> ok */
+ if (mode == mode_T && get_irn_n_edges(irn) == 1) {
+ mem_proj = ia32_get_proj_for_mode(irn, mode_M);
+ if (! mem_proj)
+ return;
+ }
+
+ for (i = get_irn_arity(irn) - 1; i >= 0; i--) {
+ ir_node *pred = get_irn_n(irn, i);
+
+ /* do not follow memory edges or we will accidentally remove stores */
+ if (is_Proj(pred) && get_irn_mode(pred) == mode_M)
+ continue;
+
+ set_irn_n(irn, i, new_Bad());
+
+ /*
+ The current node is about to be removed: if the predecessor
+ has only this node as user, it need to be removed as well.
+ */
+ if (get_irn_n_edges(pred) <= 1)
+ remove_unused_nodes(pred, already_visited);
+ }
+
+ if (sched_is_scheduled(irn))
+ sched_remove(irn);
+}
+
+static void remove_unused_loads_walker(ir_node *irn, void *env) {
+ bitset_t *already_visited = env;
+ if (is_ia32_Ld(irn) && ! bitset_contains_irn(already_visited, irn))
+ remove_unused_nodes(irn, env);
+}
+
+/**
+ * Called before the register allocator.
+ * Calculate a block schedule here. We need it for the x87
+ * simulator and the emitter.
+ */
+static void ia32_before_ra(void *self) {
+ ia32_code_gen_t *cg = self;
+ bitset_t *already_visited = bitset_irg_malloc(cg->irg);
+
+ cg->blk_sched = sched_create_block_schedule(cg->irg);
+
+ /*
+ Handle special case:
+ There are sometimes unused loads, only pinned by memory.
+ We need to remove those Loads and all other nodes which won't be used
+ after removing the Load from schedule.
+ */
+ irg_walk_graph(cg->irg, remove_unused_loads_walker, NULL, already_visited);
+ bitset_free(already_visited);
+}
+
+
+/**
+ * Transforms a be node into a Load.
+ */
+static void transform_to_Load(ia32_transform_env_t *env) {
+ ir_node *irn = env->irn;
+ entity *ent = be_get_frame_entity(irn);
+ ir_mode *mode = env->mode;
+ ir_node *noreg = ia32_new_NoReg_gp(env->cg);
+ ir_node *nomem = new_rd_NoMem(env->irg);
+ ir_node *sched_point = NULL;
+ ir_node *ptr = get_irn_n(irn, 0);
+ ir_node *mem = be_is_Reload(irn) ? get_irn_n(irn, 1) : nomem;
+ ir_node *new_op, *proj;
+ const arch_register_t *reg;
+
+ if (sched_is_scheduled(irn)) {
+ sched_point = sched_prev(irn);
+ }
+
+ if (mode_is_float(mode)) {
+ if (USE_SSE2(env->cg))
+ new_op = new_rd_ia32_xLoad(env->dbg, env->irg, env->block, ptr, noreg, mem);
+ else
+ new_op = new_rd_ia32_vfld(env->dbg, env->irg, env->block, ptr, noreg, mem);
+ }
+ else {
+ new_op = new_rd_ia32_Load(env->dbg, env->irg, env->block, ptr, noreg, mem);
+ }
+
+ set_ia32_am_support(new_op, ia32_am_Source);
+ set_ia32_op_type(new_op, ia32_AddrModeS);
+ set_ia32_am_flavour(new_op, ia32_B);
+ set_ia32_ls_mode(new_op, mode);
+ set_ia32_frame_ent(new_op, ent);
+ set_ia32_use_frame(new_op);
+
+ DBG_OPT_RELOAD2LD(irn, new_op);
+
+ proj = new_rd_Proj(env->dbg, env->irg, env->block, new_op, mode, pn_Load_res);
+
+ if (sched_point) {
+ sched_add_after(sched_point, new_op);
+ sched_add_after(new_op, proj);
+
+ sched_remove(irn);
+ }
+
+ /* copy the register from the old node to the new Load */
+ reg = arch_get_irn_register(env->cg->arch_env, irn);
+ arch_set_irn_register(env->cg->arch_env, new_op, reg);
+
+ SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, irn));
+
+ exchange(irn, proj);
+}
+
+/**
+ * Transforms a be node into a Store.
+ */
+static void transform_to_Store(ia32_transform_env_t *env) {
+ ir_node *irn = env->irn;
+ entity *ent = be_get_frame_entity(irn);
+ ir_mode *mode = env->mode;
+ ir_node *noreg = ia32_new_NoReg_gp(env->cg);
+ ir_node *nomem = new_rd_NoMem(env->irg);
+ ir_node *ptr = get_irn_n(irn, 0);
+ ir_node *val = get_irn_n(irn, 1);
+ ir_node *new_op, *proj;
+ ir_node *sched_point = NULL;
+
+ if (sched_is_scheduled(irn)) {
+ sched_point = sched_prev(irn);
+ }
+
+ if (mode_is_float(mode)) {
+ if (USE_SSE2(env->cg))
+ new_op = new_rd_ia32_xStore(env->dbg, env->irg, env->block, ptr, noreg, val, nomem);
+ else
+ new_op = new_rd_ia32_vfst(env->dbg, env->irg, env->block, ptr, noreg, val, nomem);
+ }
+ else if (get_mode_size_bits(mode) == 8) {
+ new_op = new_rd_ia32_Store8Bit(env->dbg, env->irg, env->block, ptr, noreg, val, nomem);
+ }
+ else {
+ new_op = new_rd_ia32_Store(env->dbg, env->irg, env->block, ptr, noreg, val, nomem);
+ }
+
+ set_ia32_am_support(new_op, ia32_am_Dest);
+ set_ia32_op_type(new_op, ia32_AddrModeD);
+ set_ia32_am_flavour(new_op, ia32_B);
+ set_ia32_ls_mode(new_op, mode);
+ set_ia32_frame_ent(new_op, ent);
+ set_ia32_use_frame(new_op);
+
+ DBG_OPT_SPILL2ST(irn, new_op);
+
+ proj = new_rd_Proj(env->dbg, env->irg, env->block, new_op, mode_M, pn_ia32_Store_M);
+
+ if (sched_point) {
+ sched_add_after(sched_point, new_op);
+ sched_remove(irn);
+ }
+
+ SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, irn));
+
+ exchange(irn, proj);
+}
+
+static ir_node *create_push(ia32_transform_env_t *env, ir_node *schedpoint, ir_node *sp, ir_node *mem, entity *ent, const char *offset) {
+ ir_node *noreg = ia32_new_NoReg_gp(env->cg);
+
+ ir_node *push = new_rd_ia32_Push(env->dbg, env->irg, env->block, sp, noreg, mem);
+
+ set_ia32_frame_ent(push, ent);
+ set_ia32_use_frame(push);
+ set_ia32_op_type(push, ia32_AddrModeS);
+ set_ia32_am_flavour(push, ia32_B);
+ set_ia32_ls_mode(push, mode_Is);
+ if(offset != NULL)
+ add_ia32_am_offs(push, offset);
+
+ sched_add_before(schedpoint, push);
+ return push;