2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief parallelizing Load/Store optimisation
23 * @author Christoph Mallon
28 #include "iroptimize.h"
39 #include "irnodeset.h"
46 #define OPTIMISE_LOAD_AFTER_LOAD
49 #define UNIMPLEMENTED abort();
52 DEBUG_ONLY(static firm_dbg_module_t *dbg);
55 static struct obstack obst;
56 static size_t count_addrs;
57 static ir_node** addrs;
60 static void AddressCollector(ir_node* node, void* env)
62 ir_nodeset_t* addrs_set = env;
65 addr = get_Load_ptr(node);
66 } else if (is_Store(node)) {
67 addr = get_Store_ptr(node);
71 ir_nodeset_insert(addrs_set, addr);
75 /* Collects all unique addresses used by load and store nodes of a graph and
76 * puts them into an array for later use */
77 static void CollectAddresses(ir_graph* irg)
79 ir_nodeset_t addrs_set;
81 ir_nodeset_init(&addrs_set);
82 irg_walk_graph(irg, AddressCollector, NULL, &addrs_set);
84 count_addrs = ir_nodeset_size(&addrs_set);
85 DB((dbg, LEVEL_1, "===> %+F uses %u unique addresses\n", irg, (unsigned int)count_addrs));
86 if (count_addrs != 0) {
87 ir_nodeset_iterator_t addr_iter;
90 addrs = NEW_ARR_D(ir_node*, &obst, count_addrs);
91 ir_nodeset_iterator_init(&addr_iter, &addrs_set);
92 for (i = 0; i < count_addrs; i++) {
93 ir_node* addr = ir_nodeset_iterator_next(&addr_iter);
95 set_irn_link(addr, (void *)i);
97 DB((dbg, LEVEL_2, "===> Collected unique symbolic address %+F\n", addr));
103 static void AliasSetAdder(ir_node* block, void* env)
105 ir_nodeset_t* alias_set;
109 alias_set = NEW_ARR_D(ir_nodeset_t, &obst, count_addrs);
110 for (i = 0; i < count_addrs; i++) {
111 ir_nodeset_init(&alias_set[i]);
113 set_irn_link(block, alias_set);
117 static void SetStartAddressesTop(ir_graph* irg)
119 ir_node* initial_mem;
120 ir_node* start_block;
121 ir_nodeset_t* start_addrs;
124 initial_mem = get_irg_initial_mem(irg);
125 start_block = get_irg_start_block(irg);
126 start_addrs = get_irn_link(start_block);
127 for (i = 0; i < count_addrs; i++) {
128 ir_nodeset_insert(&start_addrs[i], initial_mem);
130 mark_Block_block_visited(start_block);
134 static void AliasSetDestroyer(ir_node* block, void* env)
136 ir_nodeset_t* alias_set = get_irn_link(block);
140 for (i = 0; i < count_addrs; i++) {
141 ir_nodeset_destroy(&alias_set[i]);
146 static ir_alias_relation AliasTest(ir_graph* irg, ir_node* addr, ir_mode* mode, ir_node* other)
151 if (is_Proj(other)) other = get_Proj_pred(other);
153 if (is_Load(other)) {
154 other_addr = get_Load_ptr(other);
155 } else if (is_Store(other)) {
156 other_addr = get_Store_ptr(other);
161 other_mode = get_irn_mode(other);
162 return get_alias_relation(irg, addr, mode, other_addr, other_mode);
166 static int in_cmp(void const* va, void const* vb)
168 ir_node const* const a = *(ir_node const*const*)va;
169 ir_node const* const b = *(ir_node const*const*)vb;
170 return get_irn_idx(a) - get_irn_idx(b);
174 static ir_node* GenerateSync(ir_graph* irg, ir_node* block, ir_nodeset_t* after_set)
176 size_t set_size = ir_nodeset_size(after_set);
177 ir_nodeset_iterator_t iter;
179 assert(set_size != 0);
181 ir_nodeset_iterator_init(&iter, after_set);
183 return ir_nodeset_iterator_next(&iter);
188 NEW_ARR_A(ir_node*, in, set_size);
189 for (i = 0; i < set_size; i++) {
190 in[i] = ir_nodeset_iterator_next(&iter);
192 qsort(in, set_size, sizeof(*in), in_cmp);
193 return new_r_Sync(irg, block, set_size, in);
198 static ir_node** unfinished_phis;
201 static void PlaceMemPhis(ir_graph* irg, ir_node* block, ir_node* phi)
204 size_t block_n_preds = get_Block_n_cfgpreds(block);
205 ir_nodeset_t* thissets;
210 thissets = get_irn_link(block);
211 NEW_ARR_A(ir_node*, in, block_n_preds);
212 for (j = 0; j < count_addrs; j++) {
215 for (i = 0; i < block_n_preds; i++) {
216 ir_node* pred_block = get_nodes_block(get_Phi_pred(phi, i)); // TODO get_Block_cfgpred_block(block, i);
217 ir_nodeset_t* predsets = get_irn_link(pred_block);
218 size_t predset_size = ir_nodeset_size(&predsets[j]);
220 if (predset_size == 0) {
221 in[i] = new_r_Unknown(irg, mode_M);
224 in[i] = GenerateSync(irg, pred_block, &predsets[j]);
227 new_phi = new_r_Phi(irg, block, block_n_preds, in, mode_M);
229 set_irn_link(new_phi, unfinished_phis[j]);
230 unfinished_phis[j] = new_phi;
232 ir_nodeset_insert(&thissets[j], new_phi);
237 static int WalkMem(ir_graph* irg, ir_node* node, ir_node* last_block);
240 static void WalkMemPhi(ir_graph* irg, ir_node* block, ir_node* phi)
242 size_t n = get_Phi_n_preds(phi);
245 for (i = 0; i < n; i++) {
246 WalkMem(irg, get_Phi_pred(phi, i), block);
249 PlaceMemPhis(irg, block, phi);
250 exchange(phi, new_Bad());
254 static void PlaceLoad(ir_graph* irg, ir_node* block, ir_node* load, ir_node* memory)
256 ir_node* addr = get_Load_ptr(load);
257 size_t addr_idx = (size_t)get_irn_link(addr);
258 ir_nodeset_t* interfere_sets = get_irn_link(block);
259 ir_nodeset_t* interfere_set = &interfere_sets[addr_idx];
260 size_t size = ir_nodeset_size(interfere_set);
261 ir_nodeset_iterator_t interfere_iter;
265 ir_nodeset_iterator_init(&interfere_iter, interfere_set);
267 ir_node* after = ir_nodeset_iterator_next(&interfere_iter);
268 assert(!is_Proj(after) || !is_Load(get_Proj_pred(after)));
269 DB((dbg, LEVEL_3, "===> %+F must be executed after %+F\n", load, after));
270 set_Load_mem(load, after);
277 NEW_ARR_A(ir_node*, after_set, size);
279 while ((mem = ir_nodeset_iterator_next(&interfere_iter)) != NULL) {
281 ir_node* pred = get_Proj_pred(mem);
283 #ifdef OPTIMISE_LOAD_AFTER_LOAD
284 if (get_Load_ptr(pred) == addr && get_Load_mode(pred) == get_Load_mode(load)) {
285 exchange(load, pred);
292 DB((dbg, LEVEL_3, "===> %+F must be executed after %+F\n", load, mem));
293 after_set[i++] = mem;
297 after = after_set[0];
299 after = new_r_Sync(irg, block, i, after_set);
301 set_Load_mem(load, after);
304 for (i = 0; i < count_addrs; i++) {
305 ir_mode* mode = get_Load_mode(load);
306 ir_node* other_addr = addrs[i];
307 ir_mode* other_mode = mode; // XXX second mode is nonsense
308 ir_alias_relation rel = get_alias_relation(irg, addr, mode, other_addr, other_mode);
310 DB((dbg, LEVEL_3, "===> Testing for alias between %+F and %+F. Relation is %d\n", addr, other_addr, rel));
311 if (rel == ir_no_alias) {
314 DB((dbg, LEVEL_3, "===> %+F potentially aliases address %+F\n", load, other_addr));
316 ir_nodeset_insert(&interfere_sets[i], memory);
321 static void PlaceStore(ir_graph* irg, ir_node* block, ir_node* store, ir_node* memory)
323 ir_node* addr = get_Store_ptr(store);
324 size_t addr_idx = (size_t)get_irn_link(addr);
325 ir_nodeset_t* interfere_sets = get_irn_link(block);
326 ir_nodeset_t* interfere_set = &interfere_sets[addr_idx];
330 after = GenerateSync(irg, block, interfere_set);
331 set_Store_mem(store, after);
333 for (i = 0; i < count_addrs; i++) {
334 ir_nodeset_iterator_t interfere_iter;
335 ir_mode* mode = get_irn_mode(get_Store_value(store));
336 ir_node* other_addr = addrs[i];
337 ir_mode* other_mode = mode; // XXX second mode is nonsense
338 ir_alias_relation rel = get_alias_relation(irg, addr, mode, other_addr, other_mode);
341 DB((dbg, LEVEL_3, "===> Testing for alias between %+F and %+F. Relation is %d\n", addr, other_addr, rel));
342 if (rel == ir_no_alias) {
345 DB((dbg, LEVEL_3, "===> %+F potentially aliases address %+F\n", store, other_addr));
347 ir_nodeset_iterator_init(&interfere_iter, &interfere_sets[i]);
348 while ((other_node = ir_nodeset_iterator_next(&interfere_iter)) != NULL) {
349 if (AliasTest(irg, addr, mode, other_node) != ir_no_alias) {
350 DB((dbg, LEVEL_3, "===> Removing %+F from execute-after set of %+F due to %+F\n", other_node, addrs[i], store));
351 ir_nodeset_remove_iterator(&interfere_sets[i], &interfere_iter);
355 ir_nodeset_insert(&interfere_sets[i], memory);
360 static int WalkMem(ir_graph* irg, ir_node* node, ir_node* last_block)
362 int block_change = 0;
363 ir_node* block = get_nodes_block(node);
365 ir_node* memory = node;
366 ir_nodeset_t* addr_sets;
368 if (block != last_block) {
369 DB((dbg, LEVEL_3, "===> Changing block from %+F to %+F\n", last_block, block));
371 if (!Block_block_visited(block)) {
372 mark_Block_block_visited(block);
374 DB((dbg, LEVEL_2, "===> Hit already visited block at %+F\n", node));
380 if (is_Proj(node)) node = get_Proj_pred(node);
383 WalkMemPhi(irg, block, node);
385 } else if (is_Sync(node)) {
387 } else if (is_Return(node)) {
388 pred = get_Return_mem(node);
390 pred = get_fragile_op_mem(node);
393 if (WalkMem(irg, pred, block)) {
394 // There was a block change
395 size_t block_arity = get_Block_n_cfgpreds(block);
397 DB((dbg, LEVEL_3, "===> There is a block change before %+F\n", node));
398 if (block_arity == 1) {
399 // Just one predecessor, inherit its alias sets
400 ir_node* pred_block = get_nodes_block(pred);
401 ir_nodeset_t* predsets = get_irn_link(pred_block);
402 ir_nodeset_t* thissets = get_irn_link(block);
405 DB((dbg, LEVEL_3, "===> Copying the only predecessor's address sets\n"));
407 if (ir_nodeset_size(&predsets[0]) == 0) {
410 DB((dbg, LEVEL_3, "===> The predecessor was not finished yet\n"));
411 assert(Block_block_visited(pred_block));
413 unknown = new_r_Unknown(irg, mode_M);
414 for (i = 0; i < count_addrs; i++) {
415 ir_node* phi_unk = new_r_Phi(irg, block, 1, &unknown, mode_M);
416 DB((dbg, LEVEL_3, "===> Placing unfinished %+F for %+F in %+F\n", phi_unk, addrs[i], block));
417 set_irn_link(phi_unk, unfinished_phis[i]);
418 unfinished_phis[i] = phi_unk;
419 ir_nodeset_insert(&thissets[i], phi_unk);
422 for (i = 0; i < count_addrs; i++) {
423 ir_nodeset_iterator_t prediter;
426 ir_nodeset_iterator_init(&prediter, &predsets[i]);
427 while ((addr = ir_nodeset_iterator_next(&prediter)) != NULL) {
428 ir_nodeset_insert(&thissets[i], addr);
435 DB((dbg, LEVEL_3, "===> Detotalising %+F\n", node));
437 addr_sets = get_irn_link(block);
440 PlaceLoad(irg, block, node, memory);
441 } else if (is_Store(node)) {
442 PlaceStore(irg, block, node, memory);
444 ir_nodeset_t sync_set;
448 DB((dbg, LEVEL_3, "===> Fallback: %+F aliases everything\n", node));
450 ir_nodeset_init(&sync_set);
451 for (i = 0; i < count_addrs; i++) {
452 ir_nodeset_iterator_t iter;
455 ir_nodeset_iterator_init(&iter, &addr_sets[i]);
456 while ((mem = ir_nodeset_iterator_next(&iter)) != NULL) {
457 ir_nodeset_insert(&sync_set, mem);
461 after = GenerateSync(irg, block, &sync_set);
462 set_irn_n(node, 0, after); // XXX unnice way to set the memory input
464 for (i = 0; i < count_addrs; i++) {
465 ir_nodeset_iterator_t iter;
466 ir_nodeset_iterator_init(&iter, &addr_sets[i]);
467 while (ir_nodeset_iterator_next(&iter) != NULL) {
468 ir_nodeset_remove_iterator(&addr_sets[i], &iter);
470 ir_nodeset_insert(&addr_sets[i], memory);
478 static void FinalisePhis(ir_graph* irg)
482 for (i = 0; i < count_addrs; i++) {
486 for (phi = unfinished_phis[i]; phi != NULL; phi = next_phi) {
487 ir_node* block = get_nodes_block(phi);
488 size_t block_n_preds = get_Block_n_cfgpreds(block);
490 next_phi = get_irn_link(phi);
492 DB((dbg, LEVEL_4, "===> Finialising phi %+F in %+F\n", phi, block));
494 if (block_n_preds == 1) {
495 ir_node* pred_block = get_Block_cfgpred_block(block, 0);
496 ir_nodeset_t* pred_sets = get_irn_link(pred_block);
497 ir_node* after = GenerateSync(irg, pred_block, &pred_sets[i]);
499 assert(is_Unknown(get_Phi_pred(phi, 0)));
500 exchange(phi, after);
505 NEW_ARR_A(ir_node*, in, block_n_preds);
506 for (j = 0; j < block_n_preds; j++) {
507 ir_node* pred_block = get_Block_cfgpred_block(block, j);
508 ir_nodeset_t* pred_sets = get_irn_link(pred_block);
510 if (is_Unknown(get_Phi_pred(phi, j))) {
511 set_Phi_pred(phi, j, GenerateSync(irg, pred_block, &pred_sets[i]));
520 static void Detotalise(ir_graph* irg)
522 ir_node* end_block = get_irg_end_block(irg);
523 size_t npreds = get_Block_n_cfgpreds(end_block);
526 unfinished_phis = XMALLOCN(ir_node, count_addrs);
527 for (i = 0; i < count_addrs; i++) {
528 unfinished_phis[i] = NULL;
531 for (i = 0; i < npreds; i++) {
532 ir_node* pred = get_Block_cfgpred(end_block, i);
533 assert(is_Return(pred));
534 DB((dbg, LEVEL_2, "===> Starting memory walk at %+F\n", pred));
535 WalkMem(irg, pred, NULL);
539 xfree(unfinished_phis);
545 static void AddSyncPreds(ir_nodeset_t* preds, ir_node* sync)
547 size_t n = get_Sync_n_preds(sync);
550 for (i = 0; i < n; i++) {
551 ir_node* pred = get_Sync_pred(sync, i);
553 AddSyncPreds(preds, pred);
555 ir_nodeset_insert(preds, pred);
560 static void NormaliseSync(ir_node* node, void* env)
563 ir_nodeset_iterator_t iter;
569 if (!is_Sync(node)) return;
571 ir_nodeset_init(&preds);
572 AddSyncPreds(&preds, node);
574 count_preds = ir_nodeset_size(&preds);
575 if (count_preds != (unsigned)get_Sync_n_preds(node)) {
576 NEW_ARR_A(ir_node*, in, count_preds);
577 ir_nodeset_iterator_init(&iter, &preds);
578 for (i = 0; i < count_preds; i++) {
579 ir_node* pred = ir_nodeset_iterator_next(&iter);
580 assert(pred != NULL);
583 set_irn_in(node, count_preds, in);
586 ir_nodeset_destroy(&preds);
589 void opt_ldst2(ir_graph* irg)
591 FIRM_DBG_REGISTER(dbg, "firm.opt.ldst2");
592 DB((dbg, LEVEL_1, "===> Performing load/store optimisation on %+F\n", irg));
594 normalize_one_return(irg);
595 dump_ir_block_graph(irg, "-prefluffig");
599 if (1 /* XXX */ || get_opt_alias_analysis()) {
600 assure_irg_address_taken_computed(irg);
601 assure_irp_globals_address_taken_computed();
605 CollectAddresses(irg);
606 if (count_addrs == 0) return;
608 irg_block_walk_graph(irg, AliasSetAdder, NULL, NULL);
609 inc_irg_block_visited(irg);
610 SetStartAddressesTop(irg);
612 dump_ir_block_graph(irg, "-fluffig");
614 irg_block_walk_graph(irg, AliasSetDestroyer, NULL, NULL);
615 obstack_free(&obst, NULL);
617 normalize_proj_nodes(irg);
618 irg_walk_graph(irg, NormaliseSync, NULL, NULL);
619 optimize_graph_df(irg);
620 irg_walk_graph(irg, NormaliseSync, NULL, NULL);
621 dump_ir_block_graph(irg, "-postfluffig");
626 typedef struct parallelise_info
628 ir_node *origin_block;
630 ir_mode *origin_mode;
631 ir_nodeset_t this_mem;
632 ir_nodeset_t user_mem;
636 static void parallelise_load(parallelise_info *pi, ir_node *irn)
638 /* There is no point in investigating the same subgraph twice */
639 if (ir_nodeset_contains(&pi->user_mem, irn))
642 //ir_fprintf(stderr, "considering %+F\n", irn);
643 if (get_nodes_block(irn) == pi->origin_block) {
645 ir_node *pred = get_Proj_pred(irn);
647 get_Load_volatility(pred) == volatility_non_volatile) {
648 ir_node *mem = get_Load_mem(pred);
649 //ir_nodeset_insert(&pi->this_mem, mem);
650 ir_nodeset_insert(&pi->user_mem, irn);
651 //ir_fprintf(stderr, "adding %+F to user set\n", irn);
652 parallelise_load(pi, mem);
654 } else if (is_Store(pred) &&
655 get_Store_volatility(pred) == volatility_non_volatile) {
656 ir_mode *org_mode = pi->origin_mode;
657 ir_node *org_ptr = pi->origin_ptr;
658 ir_mode *store_mode = get_irn_mode(get_Store_value(pred));
659 ir_node *store_ptr = get_Store_ptr(pred);
660 if (get_alias_relation(current_ir_graph, org_ptr, org_mode, store_ptr, store_mode) == ir_no_alias) {
661 ir_node *mem = get_Store_mem(pred);
662 //ir_fprintf(stderr, "Ld after St: %+F (%+F) does not alias %+F (%+F)\n", org_ptr, org_mode, store_ptr, store_mode);
663 ir_nodeset_insert(&pi->user_mem, irn);
664 //ir_fprintf(stderr, "adding %+F to user set\n", irn);
665 parallelise_load(pi, mem);
669 } else if (is_Sync(irn)) {
670 int n = get_Sync_n_preds(irn);
673 for (i = 0; i < n; ++i) {
674 ir_node *sync_pred = get_Sync_pred(irn, i);
675 parallelise_load(pi, sync_pred);
680 ir_nodeset_insert(&pi->this_mem, irn);
681 //ir_fprintf(stderr, "adding %+F to this set\n", irn);
685 static void parallelise_store(parallelise_info *pi, ir_node *irn)
687 /* There is no point in investigating the same subgraph twice */
688 if (ir_nodeset_contains(&pi->user_mem, irn))
691 //ir_fprintf(stderr, "considering %+F\n", irn);
692 if (get_nodes_block(irn) == pi->origin_block) {
694 ir_node *pred = get_Proj_pred(irn);
696 get_Load_volatility(pred) == volatility_non_volatile) {
697 ir_mode *org_mode = pi->origin_mode;
698 ir_node *org_ptr = pi->origin_ptr;
699 ir_mode *load_mode = get_Load_mode(pred);
700 ir_node *load_ptr = get_Load_ptr(pred);
701 if (get_alias_relation(current_ir_graph, org_ptr, org_mode, load_ptr, load_mode) == ir_no_alias) {
702 ir_node *mem = get_Load_mem(pred);
703 //ir_fprintf(stderr, "St after Ld: %+F (%+F) does not alias %+F (%+F)\n", org_ptr, org_mode, load_ptr, load_mode);
704 ir_nodeset_insert(&pi->user_mem, irn);
705 //ir_fprintf(stderr, "adding %+F to user set\n", irn);
706 parallelise_store(pi, mem);
709 } else if (is_Store(pred) &&
710 get_Store_volatility(pred) == volatility_non_volatile) {
711 ir_mode *org_mode = pi->origin_mode;
712 ir_node *org_ptr = pi->origin_ptr;
713 ir_mode *store_mode = get_irn_mode(get_Store_value(pred));
714 ir_node *store_ptr = get_Store_ptr(pred);
715 if (get_alias_relation(current_ir_graph, org_ptr, org_mode, store_ptr, store_mode) == ir_no_alias) {
718 //ir_fprintf(stderr, "St after St: %+F (%+F) does not alias %+F (%+F)\n", org_ptr, org_mode, store_ptr, store_mode);
719 ir_nodeset_insert(&pi->user_mem, irn);
720 //ir_fprintf(stderr, "adding %+F to user set\n", irn);
721 mem = get_Store_mem(pred);
722 parallelise_store(pi, mem);
726 } else if (is_Sync(irn)) {
727 int n = get_Sync_n_preds(irn);
730 for (i = 0; i < n; ++i) {
731 ir_node *sync_pred = get_Sync_pred(irn, i);
732 parallelise_store(pi, sync_pred);
737 ir_nodeset_insert(&pi->this_mem, irn);
738 //ir_fprintf(stderr, "adding %+F to this set\n", irn);
742 static void walker(ir_node *proj, void *env)
752 if (!is_Proj(proj)) return;
753 if (get_irn_mode(proj) != mode_M) return;
755 mem_op = get_Proj_pred(proj);
756 if (is_Load(mem_op)) {
757 if (get_Load_volatility(mem_op) != volatility_non_volatile) return;
759 block = get_nodes_block(mem_op);
760 pred = get_Load_mem(mem_op);
761 //ir_fprintf(stderr, "starting parallelise at %+F for %+F\n", pred, proj);
763 pi.origin_block = block,
764 pi.origin_ptr = get_Load_ptr(mem_op);
765 pi.origin_mode = get_Load_mode(mem_op);
766 ir_nodeset_init(&pi.this_mem);
767 ir_nodeset_init(&pi.user_mem);
769 parallelise_load(&pi, pred);
770 } else if (is_Store(mem_op)) {
771 if (get_Store_volatility(mem_op) != volatility_non_volatile) return;
773 block = get_nodes_block(mem_op);
774 pred = get_Store_mem(mem_op);
775 //ir_fprintf(stderr, "starting parallelise at %+F for %+F\n", pred, proj);
777 pi.origin_block = block,
778 pi.origin_ptr = get_Store_ptr(mem_op);
779 pi.origin_mode = get_irn_mode(get_Store_value(mem_op));
780 ir_nodeset_init(&pi.this_mem);
781 ir_nodeset_init(&pi.user_mem);
783 parallelise_store(&pi, pred);
788 n = ir_nodeset_size(&pi.user_mem);
789 if (n != 0) { /* nothing happened otherwise */
790 ir_graph *irg = current_ir_graph;
793 ir_nodeset_iterator_t iter;
797 //ir_fprintf(stderr, "creating sync for users of %+F with %d inputs\n", proj, n);
798 NEW_ARR_A(ir_node*, in, n);
800 in[i++] = new_r_Unknown(irg, mode_M);
801 ir_nodeset_iterator_init(&iter, &pi.user_mem);
803 ir_node* p = ir_nodeset_iterator_next(&iter);
804 if (p == NULL) break;
808 sync = new_r_Sync(irg, block, n, in);
809 exchange(proj, sync);
811 assert(pn_Load_M == pn_Store_M);
812 proj = new_r_Proj(irg, block, mem_op, mode_M, pn_Load_M);
813 set_Sync_pred(sync, 0, proj);
815 n = ir_nodeset_size(&pi.this_mem);
816 //ir_fprintf(stderr, "creating sync for %+F with %d inputs\n", mem_op, n);
817 ir_nodeset_iterator_init(&iter, &pi.this_mem);
819 sync = ir_nodeset_iterator_next(&iter);
821 NEW_ARR_A(ir_node*, in, n);
824 ir_node* p = ir_nodeset_iterator_next(&iter);
825 if (p == NULL) break;
829 sync = new_r_Sync(irg, block, n, in);
831 set_memop_mem(mem_op, sync);
834 ir_nodeset_destroy(&pi.this_mem);
835 ir_nodeset_destroy(&pi.user_mem);
839 void opt_sync(ir_graph *irg)
841 //assure_irg_entity_usage_computed(irg);
842 //assure_irp_globals_entity_usage_computed();
844 irg_walk_graph(irg, NULL, walker, NULL);
845 //optimize_graph_df(irg);
846 //irg_walk_graph(irg, NormaliseSync, NULL, NULL);