+/**
+ * Checks for every reload if it's user can perform the load on itself.
+ */
+static void memory_operand_walker(ir_node *irn, void *env) {
+ be_chordal_env_t *cenv = env;
+ const arch_env_t *aenv = cenv->birg->main_env->arch_env;
+ const ir_edge_t *edge, *ne;
+ ir_node *block;
+ ir_node *spill;
+
+ if (! be_is_Reload(irn))
+ return;
+
+ // only use memory operands, if the reload is only used by 1 node
+ if(get_irn_n_edges(irn) > 1)
+ return;
+
+ spill = be_get_Reload_mem(irn);
+ block = get_nodes_block(irn);
+
+ foreach_out_edge_safe(irn, edge, ne) {
+ ir_node *src = get_edge_src_irn(edge);
+ int pos = get_edge_src_pos(edge);
+
+ if (! src)
+ continue;
+
+ if (get_nodes_block(src) == block && arch_possible_memory_operand(aenv, src, pos)) {
+ DBG((cenv->dbg, LEVEL_3, "performing memory operand %+F at %+F\n", irn, src));
+ arch_perform_memory_operand(aenv, src, spill, pos);
+ }
+ }
+
+ /* kill the Reload */
+ if (get_irn_n_edges(irn) == 0) {
+ sched_remove(irn);
+ set_irn_n(irn, 0, new_Bad());
+ set_irn_n(irn, 1, new_Bad());
+ }
+}
+
+/**
+ * Starts a walk for memory operands if supported by the backend.
+ */
+static INLINE void check_for_memory_operands(be_chordal_env_t *chordal_env) {
+ irg_walk_graph(chordal_env->irg, NULL, memory_operand_walker, chordal_env);
+}
+
+/**
+ * Sorry for doing stats again...
+ */
+typedef struct _node_stat_t {
+ unsigned int n_phis; /**< Phis of the current register class. */
+ unsigned int n_mem_phis; /**< Memory Phis (Phis with spill operands). */
+ unsigned int n_copies; /**< Copies */
+ unsigned int n_perms; /**< Perms */
+ unsigned int n_spills; /**< Spill nodes */
+ unsigned int n_reloads; /**< Reloads */
+} node_stat_t;
+
+struct node_stat_walker {
+ node_stat_t *stat;
+ const be_chordal_env_t *cenv;
+ bitset_t *mem_phis;
+};
+
+static void node_stat_walker(ir_node *irn, void *data)