+ int i;
+ int *unk_vnum; /**< An arraw, where are saved the value number, that
+ are synced from this sync node.*/
+ ent_leaves_t *value_ent;
+ value_arr_entry_t *val_arr_blk, *val_arr;
+ ir_node *pred, *leave, *sync, **in;
+ ir_node *sync_blk; /**< The block, where the sync node must be created.*/
+
+
+ val_arr_blk = get_irn_link(blk);
+
+ for(value_ent = set_first(env->set_ent); value_ent; value_ent = set_next(env->set_ent)) {
+
+
+ if(val_arr_blk[GET_ENT_VNUM(value_ent->ent)].access_type <= 3)
+ /* This entity is not stored in this block.*/
+ continue;
+
+ for(i = get_Block_n_cfgpreds(blk) - 1; i >= 0; i--) {
+
+ pred = get_Block_cfgpred(blk, i);
+ pred = get_nodes_block(pred);
+ val_arr = get_irn_link(pred);
+
+ if(val_arr[GET_ENT_VNUM(value_ent->ent)].access_type == SYNCED)
+ /* This entity was synced.*/
+ continue;
+
+ if(val_arr[GET_ENT_VNUM(value_ent->ent)].access_type <= 3) {
+
+ /* To avoid repeated sync of this entity in this block.*/
+ val_arr[GET_ENT_VNUM(value_ent->ent)].access_type = SYNCED;
+ /* In this predecessor block is this entity not acessed.
+ * We must sync in the end ot this block.*/
+ if(get_Block_n_cfgpreds(blk) > 1)
+ sync_blk = get_nodes_block(get_Block_cfgpred(blk, i));
+ else
+ sync_blk = blk;
+
+ val_arr = get_irn_link(sync_blk);
+ /* An array to save the memory edges, that must be
+ * synced.*/
+ in = NEW_ARR_F(ir_node *, 1);
+
+ /* An array to save the value numbers,
+ * that must be repaired.*/
+ unk_vnum = NEW_ARR_F(int, 0);
+ /* The global memory edge.*/
+ if(val_arr[env->gl_mem_vnum].mem_edge_state == NULL)
+ in[0] = new_Unknown(mode_M);
+ else
+ in[0] = val_arr[env->gl_mem_vnum].mem_edge_state;
+
+ for(leave = pset_first(value_ent->leaves); leave; leave = pset_next(value_ent->leaves))
+ /* All this memory edges must be synced.*/
+ add_mem_edge(val_arr, GET_IRN_VNUM(leave), &in, &unk_vnum);
+
+ /* We create the sync and set it in the global memory state.*/
+ sync = new_r_Sync(current_ir_graph, sync_blk, ARR_LEN(in), in);
+ /* We must check this, why it is possible to get a Bad node
+ * form new_r_Sync(), when the node can be optimized.
+ * In this case we must do nothing.*/
+ if(get_irn_op(sync) == op_Sync) {
+ val_arr[env->gl_mem_vnum].mem_edge_state = sync;
+ /* We add this sync node to the sync's fix list.*/
+ add_sync_to_fixlist(val_arr[env->gl_mem_vnum].mem_edge_state, unk_vnum, env);
+ }
+ DEL_ARR_F(in);
+ }
+ }
+ }
+}
+/**
+ * The function split the memory edge of load and store nodes, that have
+ * as predecessor a scalar
+ *
+ * @param irn The node, that memory edge must be spleted.
+ * @param env The enviroment pinter.
+ */
+static void split_ls_mem_edge(ir_node *irn, env_t *env) {
+
+ ir_op *op;
+ ir_node *leave, *irn_blk, *mem_state, *new_mem_state;
+ unsigned ent_vnum, sel_vnum, i;
+ value_arr_entry_t *val_arr;
+ sels_t key_sels, *value_sels;
+ ent_leaves_t key_ent, *value_ent;
+
+ op = get_irn_op(irn);
+
+ if(op == op_Load)
+ key_sels.sel = get_Load_ptr(irn);
+ else
+ key_sels.sel = get_Store_ptr(irn);
+
+ value_sels = set_find(env->set_sels, &key_sels, sizeof(key_sels), HASH_PTR(key_sels.sel));
+
+ if(value_sels != NULL) {
+ /* we have found a load or store, that use a sel of our set
+ * and we must split or extend, if the memory edge have been
+ * split for this sel, the memory edge.*/
+
+ key_ent.ent = value_sels->ent;
+ value_ent = set_find(env->set_ent, &key_ent, sizeof(key_ent), HASH_PTR(key_ent.ent));
+ /*To check if the enities set is right filled. */
+ assert(value_ent && " This sel's entity isn't int the entity set.");
+
+ leave = pset_find(value_ent->leaves, key_sels.sel, HASH_PTR(get_Sel_entity(key_sels.sel)));
+ /*To check if the leaves set is right filled. */
+ assert(leave && "Anything in data_flow_scalar_replacment algorithm is wrong.");
+
+ ent_vnum = GET_ENT_VNUM(value_ent->ent);
+ sel_vnum = GET_IRN_VNUM(leave);
+ irn_blk = get_nodes_block(irn);
+ val_arr = get_irn_link(irn_blk);
+
+ if(val_arr[ent_vnum].access_type == 0)
+ /* We have found a scalar, that address is not stored as jet.*/
+ i = sel_vnum;
+ else
+ /* This scalar have been stored.*/
+ i = env->gl_mem_vnum;
+
+ if(val_arr[i].mem_edge_state == NULL) {
+ /* We split now for this sel the memory edge in this block.*/
+ mem_state = new_Unknown(mode_M);
+ /* We must mark this node to fix later*/
+ add_ls_to_fixlist(irn, i, env);
+ }
+ else
+ /* We have split the memory edge and the current state is saved.*/
+ mem_state = val_arr[i].mem_edge_state;
+
+ /* We set this Load or Store to the memory edge of this
+ * sel.*/
+ if(op == op_Load)
+ set_Load_mem(irn, mem_state);
+ else
+ set_Store_mem(irn, mem_state);
+
+ /* When we have split or extended the memory edge we must
+ * update the memory_edge_state of this sel*/
+ new_mem_state = get_irn_out(irn, 0);
+ if(get_irn_mode(new_mem_state) == mode_M)
+ val_arr[i].mem_edge_state = new_mem_state;
+ else
+ val_arr[i].mem_edge_state = get_irn_out(irn, 1);
+ }
+}
+
+/**
+ * The function split the memory edge of phi nodes, that have
+ * as predecessor a scalar
+ *
+ * @param irn The phi node, that memory edge must be spleted.
+ * @param env The enviroment pinter.
+ */
+static void split_phi_mem_edge(ir_node *irn, env_t *env) {
+
+ ir_node *irn_blk, *unk, *leave, **in;
+ int n, j;
+ ent_leaves_t *value_ent;
+ value_arr_entry_t *val_arr;
+
+ irn_blk = get_nodes_block(irn);
+ val_arr = get_irn_link(irn_blk);
+
+ n = get_Block_n_cfgpreds(irn_blk);
+
+ in = alloca(sizeof(*in) * n);
+
+ for(value_ent = set_first(env->set_ent); value_ent; value_ent = set_next(env->set_ent))
+ if(val_arr[GET_ENT_VNUM(value_ent->ent)].access_type < 3)
+ /* This scalar wasn't be saved and we need to produce a phi for it.*/
+ for(leave = pset_first(value_ent->leaves); leave; leave = pset_next(value_ent->leaves)){
+
+ unk = new_Unknown(mode_M);
+ for (j = n - 1; j >= 0; --j)
+ in[j] = unk;
+
+ val_arr[GET_IRN_VNUM(leave)].mem_edge_state = new_r_Phi(current_ir_graph, irn_blk, n, in, mode_M);
+
+ add_ls_to_fixlist(val_arr[GET_IRN_VNUM(leave)].mem_edge_state, GET_IRN_VNUM(leave), env);
+ }
+
+ /* We use for the global memory the phi node, that
+ * is already available.*/
+ val_arr[env->gl_mem_vnum].mem_edge_state = irn;
+}
+
+/**
+ * The function handles the call nodes, that have
+ * as parameter a scalar
+ *
+ * @param env The enviroment pinter.
+ * @param call The call node, that must be handled.
+ * @param accessed_entities A set wit all entities, that are accessed from this call node.*/
+static void split_call_mem_edge(env_t *env, ir_node *call, pset *accessed_entities) {
+
+ ent_leaves_t key_ent, *value_ent;
+ value_arr_entry_t *val_arr;
+ call_access_t key_call, *value_call;
+ ir_node *call_blk, *new_mem_state, *leave;
+ ir_node *sync, **in;
+ ir_entity *ent;
+ unsigned ent_vnum;
+ int fix_irn = 0; /**< Set to 1 if we must add this call to it fix list.*/
+ int *accessed_leaves_vnum = NULL; /**< An arraw, where are saved the value number, that
+ are synced from call's sync node, if we need it.*/