fix a few warnings
[libfirm] / ir / opt / ldst2.c
1 /*
2  * Copyright (C) 1995-2008 University of Karlsruhe.  All right reserved.
3  *
4  * This file is part of libFirm.
5  *
6  * This file may be distributed and/or modified under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation and appearing in the file LICENSE.GPL included in the
9  * packaging of this file.
10  *
11  * Licensees holding valid libFirm Professional Edition licenses may use
12  * this file in accordance with the libFirm Commercial License.
13  * Agreement provided with the Software.
14  *
15  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17  * PURPOSE.
18  */
19
20 /**
21  * @file
22  * @brief   parallelizing Load/Store optimisation
23  * @author  Christoph Mallon
24  * @version $Id: $
25  */
26 #ifdef HAVE_CONFIG_H
27 #include "config.h"
28 #endif
29
30 #include "iroptimize.h"
31
32 #include "array.h"
33 #include "debug.h"
34 #include "ircons.h"
35 #include "irgraph.h"
36 #include "irgmod.h"
37 #include "irgopt.h"
38 #include "irgwalk.h"
39 #include "irmemory.h"
40 #include "irnode.h"
41 #include "irnodeset.h"
42 #include "obst.h"
43 #include "irdump.h"
44 #include "irflag_t.h"
45 #include "irprintf.h"
46
47 #if +0
48 #define OPTIMISE_LOAD_AFTER_LOAD
49
50
51 #define UNIMPLEMENTED abort();
52
53
54 DEBUG_ONLY(static firm_dbg_module_t *dbg);
55
56
57 static struct obstack obst;
58 static size_t count_addrs;
59 static ir_node** addrs;
60
61
62 static void AddressCollector(ir_node* node, void* env)
63 {
64         ir_nodeset_t* addrs_set = env;
65         ir_node* addr;
66         if (is_Load(node)) {
67                 addr = get_Load_ptr(node);
68         } else if (is_Store(node)) {
69                 addr = get_Store_ptr(node);
70         } else {
71                 return;
72         }
73         ir_nodeset_insert(addrs_set, addr);
74 }
75
76
77 /* Collects all unique addresses used by load and store nodes of a graph and
78  * puts them into an array for later use */
79 static void CollectAddresses(ir_graph* irg)
80 {
81         ir_nodeset_t addrs_set;
82
83         ir_nodeset_init(&addrs_set);
84         irg_walk_graph(irg, AddressCollector, NULL, &addrs_set);
85
86         count_addrs = ir_nodeset_size(&addrs_set);
87         DB((dbg, LEVEL_1, "===> %+F uses %u unique addresses\n", irg, (unsigned int)count_addrs));
88         if (count_addrs != 0) {
89                 ir_nodeset_iterator_t addr_iter;
90                 size_t i;
91
92                 addrs = NEW_ARR_D(ir_node*, &obst, count_addrs);
93                 ir_nodeset_iterator_init(&addr_iter, &addrs_set);
94                 for (i = 0; i < count_addrs; i++) {
95                         ir_node* addr = ir_nodeset_iterator_next(&addr_iter);
96                         assert(addr != NULL);
97                         set_irn_link(addr, (void *)i);
98                         addrs[i] = addr;
99                         DB((dbg, LEVEL_2, "===> Collected unique symbolic address %+F\n", addr));
100                 }
101         }
102 }
103
104
105 static void AliasSetAdder(ir_node* block, void* env)
106 {
107         ir_nodeset_t* alias_set;
108         size_t i;
109         (void) env;
110
111         alias_set = NEW_ARR_D(ir_nodeset_t, &obst, count_addrs);
112         for (i = 0; i < count_addrs; i++) {
113                 ir_nodeset_init(&alias_set[i]);
114         }
115         set_irn_link(block, alias_set);
116 }
117
118
119 static void SetStartAddressesTop(ir_graph* irg)
120 {
121         ir_node* initial_mem;
122         ir_node* start_block;
123         ir_nodeset_t* start_addrs;
124         size_t i;
125
126         initial_mem = get_irg_initial_mem(irg);
127         start_block = get_irg_start_block(irg);
128         start_addrs = get_irn_link(start_block);
129         for (i = 0; i < count_addrs; i++) {
130                 ir_nodeset_insert(&start_addrs[i], initial_mem);
131         }
132         mark_Block_block_visited(start_block);
133 }
134
135
136 static void AliasSetDestroyer(ir_node* block, void* env)
137 {
138         ir_nodeset_t* alias_set = get_irn_link(block);
139         size_t i;
140         (void) env;
141
142         for (i = 0; i < count_addrs; i++) {
143                 ir_nodeset_destroy(&alias_set[i]);
144         }
145 }
146
147
148 static ir_alias_relation AliasTest(ir_graph* irg, ir_node* addr, ir_mode* mode, ir_node* other)
149 {
150         ir_node* other_addr;
151         ir_mode* other_mode;
152
153         if (is_Proj(other)) other = get_Proj_pred(other);
154
155         if (is_Load(other)) {
156                 other_addr = get_Load_ptr(other);
157         } else if (is_Store(other)) {
158                 other_addr = get_Store_ptr(other);
159         } else {
160                 return may_alias;
161         }
162
163         other_mode = get_irn_mode(other);
164         return get_alias_relation(irg, addr, mode, other_addr, other_mode);
165 }
166
167
168 static ir_node* GenerateSync(ir_graph* irg, ir_node* block, ir_nodeset_t* after_set)
169 {
170         size_t set_size = ir_nodeset_size(after_set);
171         ir_nodeset_iterator_t iter;
172
173         assert(set_size != 0);
174
175         ir_nodeset_iterator_init(&iter, after_set);
176         if (set_size == 1) {
177                 return ir_nodeset_iterator_next(&iter);
178         } else {
179                 ir_node** in;
180                 size_t i;
181
182                 NEW_ARR_A(ir_node*, in, set_size);
183                 for (i = 0; i < set_size; i++) {
184                         in[i] = ir_nodeset_iterator_next(&iter);
185                 }
186                 return new_r_Sync(irg, block, set_size, in);
187         }
188 }
189
190
191 static ir_node** unfinished_phis;
192
193
194 static void PlaceMemPhis(ir_graph* irg, ir_node* block, ir_node* phi)
195 {
196         int unfinished = 0;
197         size_t block_n_preds = get_Block_n_cfgpreds(block);
198         ir_nodeset_t* thissets;
199         ir_node** in;
200         size_t i;
201         size_t j;
202
203         thissets = get_irn_link(block);
204         NEW_ARR_A(ir_node*, in, block_n_preds);
205         for (j = 0; j < count_addrs; j++) {
206                 ir_node* new_phi;
207
208                 for (i = 0; i < block_n_preds; i++) {
209                         ir_node* pred_block = get_nodes_block(get_Phi_pred(phi, i)); // TODO get_Block_cfgpred_block(block, i);
210                         ir_nodeset_t* predsets = get_irn_link(pred_block);
211                         size_t predset_size = ir_nodeset_size(&predsets[j]);
212
213                         if (predset_size == 0) {
214                                 in[i] = new_r_Unknown(irg, mode_M);
215                                 unfinished = 1;
216                         } else {
217                                 in[i] = GenerateSync(irg, pred_block, &predsets[j]);
218                         }
219                 }
220                 new_phi = new_r_Phi(irg, block, block_n_preds, in, mode_M);
221                 if (unfinished) {
222                         set_irn_link(new_phi, unfinished_phis[j]);
223                         unfinished_phis[j] = new_phi;
224                 }
225                 ir_nodeset_insert(&thissets[j], new_phi);
226         }
227 }
228
229
230 static int WalkMem(ir_graph* irg, ir_node* node, ir_node* last_block);
231
232
233 static void WalkMemPhi(ir_graph* irg, ir_node* block, ir_node* phi)
234 {
235         size_t n = get_Phi_n_preds(phi);
236         size_t i;
237
238         for (i = 0; i < n; i++) {
239                 WalkMem(irg, get_Phi_pred(phi, i), block);
240         }
241
242         PlaceMemPhis(irg, block, phi);
243         exchange(phi, new_Bad());
244 }
245
246
247 static void PlaceLoad(ir_graph* irg, ir_node* block, ir_node* load, ir_node* memory)
248 {
249         ir_node* addr = get_Load_ptr(load);
250         size_t addr_idx = (size_t)get_irn_link(addr);
251         ir_nodeset_t* interfere_sets = get_irn_link(block);
252         ir_nodeset_t* interfere_set = &interfere_sets[addr_idx];
253         size_t size = ir_nodeset_size(interfere_set);
254         ir_nodeset_iterator_t interfere_iter;
255         size_t i;
256
257         assert(size > 0);
258         ir_nodeset_iterator_init(&interfere_iter, interfere_set);
259         if (size == 1) {
260                 ir_node* after = ir_nodeset_iterator_next(&interfere_iter);
261                 assert(!is_Proj(after) || !is_Load(get_Proj_pred(after)));
262                 DB((dbg, LEVEL_3, "===> %+F must be executed after %+F\n", load, after));
263                 set_Load_mem(load, after);
264         } else {
265                 ir_node** after_set;
266                 ir_node* after;
267                 ir_node* mem;
268                 size_t i;
269
270                 NEW_ARR_A(ir_node*, after_set, size);
271                 i = 0;
272                 while ((mem = ir_nodeset_iterator_next(&interfere_iter)) != NULL) {
273                         if (is_Proj(mem)) {
274                                 ir_node* pred = get_Proj_pred(mem);
275                                 if (is_Load(pred)) {
276 #ifdef OPTIMISE_LOAD_AFTER_LOAD
277                                         if (get_Load_ptr(pred) == addr && get_Load_mode(pred) == get_Load_mode(load)) {
278                                                 exchange(load, pred);
279                                                 return;
280                                         }
281 #endif
282                                         continue;
283                                 }
284                         }
285                         DB((dbg, LEVEL_3, "===> %+F must be executed after %+F\n", load, mem));
286                         after_set[i++] = mem;
287                 }
288                 assert(i != 0);
289                 if (i == 1) {
290                         after = after_set[0];
291                 } else {
292                         after = new_r_Sync(irg, block, i, after_set);
293                 }
294                 set_Load_mem(load, after);
295         }
296
297         for (i = 0; i < count_addrs; i++) {
298                 ir_mode* mode = get_Load_mode(load);
299                 ir_node* other_addr = addrs[i];
300                 ir_mode* other_mode = mode; // XXX second mode is nonsense
301                 ir_alias_relation rel = get_alias_relation(irg, addr, mode, other_addr, other_mode);
302
303                 DB((dbg, LEVEL_3, "===> Testing for alias between %+F and %+F. Relation is %d\n", addr, other_addr, rel));
304                 if (rel == no_alias) {
305                         continue;
306                 }
307                 DB((dbg, LEVEL_3, "===> %+F potentially aliases address %+F\n", load, other_addr));
308
309                 ir_nodeset_insert(&interfere_sets[i], memory);
310         }
311 }
312
313
314 static void PlaceStore(ir_graph* irg, ir_node* block, ir_node* store, ir_node* memory)
315 {
316         ir_node* addr = get_Store_ptr(store);
317         size_t addr_idx = (size_t)get_irn_link(addr);
318         ir_nodeset_t* interfere_sets = get_irn_link(block);
319         ir_nodeset_t* interfere_set = &interfere_sets[addr_idx];
320         ir_node* after;
321         size_t i;
322
323         after = GenerateSync(irg, block, interfere_set);
324         set_Store_mem(store, after);
325
326         for (i = 0; i < count_addrs; i++) {
327                 ir_nodeset_iterator_t interfere_iter;
328                 ir_mode* mode = get_irn_mode(get_Store_value(store));
329                 ir_node* other_addr = addrs[i];
330                 ir_mode* other_mode = mode; // XXX second mode is nonsense
331                 ir_alias_relation rel = get_alias_relation(irg, addr, mode, other_addr, other_mode);
332                 ir_node* other_node;
333
334                 DB((dbg, LEVEL_3, "===> Testing for alias between %+F and %+F. Relation is %d\n", addr, other_addr, rel));
335                 if (rel == no_alias) {
336                         continue;
337                 }
338                 DB((dbg, LEVEL_3, "===> %+F potentially aliases address %+F\n", store, other_addr));
339
340                 ir_nodeset_iterator_init(&interfere_iter, &interfere_sets[i]);
341                 while ((other_node = ir_nodeset_iterator_next(&interfere_iter)) != NULL) {
342                         if (AliasTest(irg, addr, mode, other_node) != no_alias) {
343                                 DB((dbg, LEVEL_3, "===> Removing %+F from execute-after set of %+F due to %+F\n", other_node, addrs[i], store));
344                                 ir_nodeset_remove_iterator(&interfere_sets[i], &interfere_iter);
345                         }
346                 }
347
348                 ir_nodeset_insert(&interfere_sets[i], memory);
349         }
350 }
351
352
353 static int WalkMem(ir_graph* irg, ir_node* node, ir_node* last_block)
354 {
355         int block_change = 0;
356         ir_node* block = get_nodes_block(node);
357         ir_node* pred;
358         ir_node* memory = node;
359         ir_nodeset_t* addr_sets;
360
361         if (block != last_block) {
362                 DB((dbg, LEVEL_3, "===> Changing block from %+F to %+F\n", last_block, block));
363                 block_change = 1;
364                 if (Block_not_block_visited(block)) {
365                         mark_Block_block_visited(block);
366                 } else {
367                         DB((dbg, LEVEL_2, "===> Hit already visited block at %+F\n", node));
368                         return block_change;
369                 }
370         }
371
372         // Skip projs
373         if (is_Proj(node)) node = get_Proj_pred(node);
374
375         if (is_Phi(node)) {
376                 WalkMemPhi(irg, block, node);
377                 return block_change;
378         } else if (is_Sync(node)) {
379                 UNIMPLEMENTED
380         } else if (is_Return(node)) {
381                 pred = get_Return_mem(node);
382         } else {
383                 pred = get_fragile_op_mem(node);
384         }
385
386         if (WalkMem(irg, pred, block)) {
387                 // There was a block change
388                 size_t block_arity = get_Block_n_cfgpreds(block);
389
390                 DB((dbg, LEVEL_3, "===> There is a block change before %+F\n", node));
391                 if (block_arity == 1) {
392                         // Just one predecessor, inherit its alias sets
393                         ir_node* pred_block = get_nodes_block(pred);
394                         ir_nodeset_t* predsets = get_irn_link(pred_block);
395                         ir_nodeset_t* thissets = get_irn_link(block);
396                         size_t i;
397
398                         DB((dbg, LEVEL_3, "===> Copying the only predecessor's address sets\n"));
399
400                         if (ir_nodeset_size(&predsets[0]) == 0) {
401                                 ir_node* unknown;
402
403                                 DB((dbg, LEVEL_3, "===> The predecessor was not finished yet\n"));
404                                 assert(!Block_not_block_visited(pred_block));
405
406                                 unknown = new_r_Unknown(irg, mode_M);
407                                 for (i = 0; i < count_addrs; i++) {
408                                         ir_node* phi_unk = new_r_Phi(irg, block, 1, &unknown, mode_M);
409                                         DB((dbg, LEVEL_3, "===> Placing unfinished %+F for %+F in %+F\n", phi_unk, addrs[i], block));
410                                         set_irn_link(phi_unk, unfinished_phis[i]);
411                                         unfinished_phis[i] = phi_unk;
412                                         ir_nodeset_insert(&thissets[i], phi_unk);
413                                 }
414                         } else {
415                                 for (i = 0; i < count_addrs; i++) {
416                                         ir_nodeset_iterator_t prediter;
417                                         ir_node* addr;
418
419                                         ir_nodeset_iterator_init(&prediter, &predsets[i]);
420                                         while ((addr = ir_nodeset_iterator_next(&prediter)) != NULL) {
421                                                 ir_nodeset_insert(&thissets[i], addr);
422                                         }
423                                 }
424                         }
425                 }
426         }
427
428         DB((dbg, LEVEL_3, "===> Detotalising %+F\n", node));
429
430         addr_sets = get_irn_link(block);
431
432         if (is_Load(node)) {
433                 PlaceLoad(irg, block, node, memory);
434         } else if (is_Store(node)) {
435                 PlaceStore(irg, block, node, memory);
436         } else {
437                 ir_nodeset_t sync_set;
438                 size_t i;
439                 ir_node* after;
440
441                 DB((dbg, LEVEL_3, "===> Fallback: %+F aliases everything\n", node));
442
443                 ir_nodeset_init(&sync_set);
444                 for (i = 0; i < count_addrs; i++) {
445                         ir_nodeset_iterator_t iter;
446                         ir_node* mem;
447
448                         ir_nodeset_iterator_init(&iter, &addr_sets[i]);
449                         while ((mem = ir_nodeset_iterator_next(&iter)) != NULL) {
450                                 ir_nodeset_insert(&sync_set, mem);
451                         }
452                 }
453
454                 after = GenerateSync(irg, block, &sync_set);
455                 set_irn_n(node, 0, after); // XXX unnice way to set the memory input
456
457                 for (i = 0; i < count_addrs; i++) {
458                         ir_nodeset_iterator_t iter;
459                         ir_nodeset_iterator_init(&iter, &addr_sets[i]);
460                         while (ir_nodeset_iterator_next(&iter) != NULL) {
461                                 ir_nodeset_remove_iterator(&addr_sets[i], &iter);
462                         }
463                         ir_nodeset_insert(&addr_sets[i], memory);
464                 }
465         }
466
467         return block_change;
468 }
469
470
471 static void FinalisePhis(ir_graph* irg)
472 {
473         size_t i;
474
475         for (i = 0; i < count_addrs; i++) {
476                 ir_node* next_phi;
477                 ir_node* phi;
478
479                 for (phi = unfinished_phis[i]; phi != NULL; phi = next_phi) {
480                         ir_node* block = get_nodes_block(phi);
481                         size_t block_n_preds = get_Block_n_cfgpreds(block);
482
483                         next_phi = get_irn_link(phi);
484
485                         DB((dbg, LEVEL_4, "===> Finialising phi %+F in %+F\n", phi, block));
486
487                         if (block_n_preds == 1) {
488                                 ir_node* pred_block = get_Block_cfgpred_block(block, 0);
489                                 ir_nodeset_t* pred_sets = get_irn_link(pred_block);
490                                 ir_node* after = GenerateSync(irg, pred_block, &pred_sets[i]);
491
492                                 assert(is_Unknown(get_Phi_pred(phi, 0)));
493                                 exchange(phi, after);
494                         } else {
495                                 ir_node** in;
496                                 size_t j;
497
498                                 NEW_ARR_A(ir_node*, in, block_n_preds);
499                                 for (j = 0; j < block_n_preds; j++) {
500                                         ir_node* pred_block = get_Block_cfgpred_block(block, j);
501                                         ir_nodeset_t* pred_sets = get_irn_link(pred_block);
502
503                                         if (is_Unknown(get_Phi_pred(phi, j))) {
504                                                 set_Phi_pred(phi, j, GenerateSync(irg, pred_block, &pred_sets[i]));
505                                         }
506                                 }
507                         }
508                 }
509         }
510 }
511
512
513 static void Detotalise(ir_graph* irg)
514 {
515         ir_node* end_block = get_irg_end_block(irg);
516         size_t npreds = get_Block_n_cfgpreds(end_block);
517         size_t i;
518
519         unfinished_phis = xmalloc(sizeof(*unfinished_phis) * count_addrs);
520         for (i = 0; i < count_addrs; i++) {
521                 unfinished_phis[i] = NULL;
522         }
523
524         for (i = 0; i < npreds; i++) {
525                 ir_node* pred = get_Block_cfgpred(end_block, i);
526                 assert(is_Return(pred));
527                 DB((dbg, LEVEL_2, "===> Starting memory walk at %+F\n", pred));
528                 WalkMem(irg, pred, NULL);
529         }
530
531         FinalisePhis(irg);
532         xfree(unfinished_phis);
533 }
534 #endif
535
536
537 static void AddSyncPreds(ir_nodeset_t* preds, ir_node* sync)
538 {
539         size_t n = get_Sync_n_preds(sync);
540         size_t i;
541
542         for (i = 0; i < n; i++) {
543                 ir_node* pred = get_Sync_pred(sync, i);
544                 if (is_Sync(pred)) {
545                         AddSyncPreds(preds, pred);
546                 } else {
547                         ir_nodeset_insert(preds, pred);
548                 }
549         }
550 }
551
552 #if 0
553 static void NormaliseSync(ir_node* node, void* env)
554 {
555         ir_nodeset_t preds;
556         ir_nodeset_iterator_t iter;
557         ir_node** in;
558         size_t count_preds;
559         size_t i;
560         (void) env;
561
562         if (!is_Sync(node)) return;
563
564         ir_nodeset_init(&preds);
565         AddSyncPreds(&preds, node);
566
567         count_preds = ir_nodeset_size(&preds);
568         if (count_preds != (unsigned)get_Sync_n_preds(node)) {
569                 NEW_ARR_A(ir_node*, in, count_preds);
570                 ir_nodeset_iterator_init(&iter, &preds);
571                 for (i = 0; i < count_preds; i++) {
572                         ir_node* pred = ir_nodeset_iterator_next(&iter);
573                         assert(pred != NULL);
574                         in[i] = pred;
575                 }
576                 set_irn_in(node, count_preds, in);
577         }
578
579         ir_nodeset_destroy(&preds);
580 }
581
582 void opt_ldst2(ir_graph* irg)
583 {
584         FIRM_DBG_REGISTER(dbg, "firm.opt.ldst2");
585         DB((dbg, LEVEL_1, "===> Performing load/store optimisation on %+F\n", irg));
586
587         normalize_one_return(irg);
588         dump_ir_block_graph(irg, "-prefluffig");
589
590         obstack_init(&obst);
591
592         if (1 /* XXX */ || get_opt_alias_analysis()) {
593                 assure_irg_address_taken_computed(irg);
594                 assure_irp_globals_address_taken_computed();
595         }
596
597
598         CollectAddresses(irg);
599         if (count_addrs == 0) return;
600
601         irg_block_walk_graph(irg, AliasSetAdder, NULL, NULL);
602         inc_irg_block_visited(irg);
603         SetStartAddressesTop(irg);
604         Detotalise(irg);
605         dump_ir_block_graph(irg, "-fluffig");
606
607         irg_block_walk_graph(irg, AliasSetDestroyer, NULL, NULL);
608         obstack_free(&obst, NULL);
609
610         normalize_proj_nodes(irg);
611         irg_walk_graph(irg, NormaliseSync, NULL, NULL);
612   optimize_graph_df(irg);
613         irg_walk_graph(irg, NormaliseSync, NULL, NULL);
614         dump_ir_block_graph(irg, "-postfluffig");
615 }
616 #endif
617
618
619 typedef struct parallelise_info
620 {
621         ir_node      *origin_block;
622         ir_node      *origin_ptr;
623         ir_mode      *origin_mode;
624         ir_nodeset_t  this_mem;
625         ir_nodeset_t  user_mem;
626 } parallelise_info;
627
628
629 static void parallelise_load(parallelise_info *pi, ir_node *irn)
630 {
631         //ir_fprintf(stderr, "considering %+F\n", irn);
632         if (get_nodes_block(irn) == pi->origin_block) {
633                 if (is_Proj(irn)) {
634                         ir_node *pred = get_Proj_pred(irn);
635                         if (is_Load(pred) &&
636                                         get_Load_volatility(pred) == volatility_non_volatile) {
637                                 ir_node *mem = get_Load_mem(pred);
638                                 //ir_nodeset_insert(&pi->this_mem, mem);
639                                 ir_nodeset_insert(&pi->user_mem, irn);
640                                 //ir_fprintf(stderr, "adding %+F to user set\n", irn);
641                                 parallelise_load(pi, mem);
642                                 return;
643                         } else if (is_Store(pred) &&
644                                         get_Store_volatility(pred) == volatility_non_volatile) {
645                                 ir_mode *org_mode   = pi->origin_mode;
646                                 ir_node *org_ptr    = pi->origin_ptr;
647                                 ir_mode *store_mode = get_irn_mode(get_Store_value(pred));
648                                 ir_node *store_ptr  = get_Store_ptr(pred);
649                                 if (get_alias_relation(current_ir_graph, org_ptr, org_mode, store_ptr, store_mode) == no_alias) {
650                                         ir_node *mem = get_Store_mem(pred);
651                                         ir_fprintf(stderr, "Ld after St: %+F (%+F) does not alias %+F (%+F)\n", org_ptr, org_mode, store_ptr, store_mode);
652                                         ir_nodeset_insert(&pi->user_mem, irn);
653                                         //ir_fprintf(stderr, "adding %+F to user set\n", irn);
654                                         parallelise_load(pi, mem);
655                                         return;
656                                 }
657                         }
658                 } else if (is_Sync(irn)) {
659                         int n = get_Sync_n_preds(irn);
660                         int i;
661
662                         for (i = 0; i < n; ++i) {
663                                 ir_node *sync_pred = get_Sync_pred(irn, i);
664                                 parallelise_load(pi, sync_pred);
665                         }
666                         return;
667                 }
668         }
669         ir_nodeset_insert(&pi->this_mem, irn);
670         //ir_fprintf(stderr, "adding %+F to this set\n", irn);
671 }
672
673
674 static void parallelise_store(parallelise_info *pi, ir_node *irn)
675 {
676         //ir_fprintf(stderr, "considering %+F\n", irn);
677         if (get_nodes_block(irn) == pi->origin_block) {
678                 if (is_Proj(irn)) {
679                         ir_node *pred = get_Proj_pred(irn);
680                         if (is_Load(pred) &&
681                                         get_Load_volatility(pred) == volatility_non_volatile) {
682                                 ir_mode *org_mode  = pi->origin_mode;
683                                 ir_node *org_ptr   = pi->origin_ptr;
684                                 ir_mode *load_mode = get_Load_mode(pred);
685                                 ir_node *load_ptr  = get_Load_ptr(pred);
686                                 if (get_alias_relation(current_ir_graph, org_ptr, org_mode, load_ptr, load_mode) == no_alias) {
687                                         ir_node *mem = get_Load_mem(pred);
688                                         ir_fprintf(stderr, "St after Ld: %+F (%+F) does not alias %+F (%+F)\n", org_ptr, org_mode, load_ptr, load_mode);
689                                         ir_nodeset_insert(&pi->user_mem, irn);
690                                         //ir_fprintf(stderr, "adding %+F to user set\n", irn);
691                                         parallelise_store(pi, mem);
692                                         return;
693                                 }
694                         } else if (is_Store(pred) &&
695                                         get_Store_volatility(pred) == volatility_non_volatile) {
696                                 ir_mode *org_mode   = pi->origin_mode;
697                                 ir_node *org_ptr    = pi->origin_ptr;
698                                 ir_mode *store_mode = get_irn_mode(get_Store_value(pred));
699                                 ir_node *store_ptr  = get_Store_ptr(pred);
700                                 if (get_alias_relation(current_ir_graph, org_ptr, org_mode, store_ptr, store_mode) == no_alias) {
701                                         ir_node *mem;
702
703                                         ir_fprintf(stderr, "St after St: %+F (%+F) does not alias %+F (%+F)\n", org_ptr, org_mode, store_ptr, store_mode);
704                                         ir_nodeset_insert(&pi->user_mem, irn);
705                                         //ir_fprintf(stderr, "adding %+F to user set\n", irn);
706                                         mem = get_Store_mem(pred);
707                                         parallelise_store(pi, mem);
708                                         return;
709                                 }
710                         }
711                 } else if (is_Sync(irn)) {
712                         int n = get_Sync_n_preds(irn);
713                         int i;
714
715                         for (i = 0; i < n; ++i) {
716                                 ir_node *sync_pred = get_Sync_pred(irn, i);
717                                 parallelise_store(pi, sync_pred);
718                         }
719                         return;
720                 }
721         }
722         ir_nodeset_insert(&pi->this_mem, irn);
723         //ir_fprintf(stderr, "adding %+F to this set\n", irn);
724 }
725
726
727 static void walker(ir_node *proj, void *env)
728 {
729         ir_node          *mem_op;
730         ir_node          *pred;
731         ir_node          *block;
732         int               n;
733         parallelise_info  pi;
734
735         (void)env;
736
737         if (!is_Proj(proj)) return;
738         if (get_irn_mode(proj) != mode_M) return;
739
740         mem_op = get_Proj_pred(proj);
741         if (is_Load(mem_op)) {
742                 if (get_Load_volatility(mem_op) != volatility_non_volatile) return;
743
744                 block = get_nodes_block(mem_op);
745                 pred  = get_Load_mem(mem_op);
746                 //ir_fprintf(stderr, "starting parallelise at %+F for %+F\n", pred, proj);
747
748                 pi.origin_block = block,
749                 pi.origin_ptr   = get_Load_ptr(mem_op);
750                 pi.origin_mode  = get_Load_mode(mem_op);
751                 ir_nodeset_init(&pi.this_mem);
752                 ir_nodeset_init(&pi.user_mem);
753
754                 parallelise_load(&pi, pred);
755         } else if (is_Store(mem_op)) {
756                 if (get_Store_volatility(mem_op) != volatility_non_volatile) return;
757
758                 block = get_nodes_block(mem_op);
759                 pred  = get_Store_mem(mem_op);
760                 //ir_fprintf(stderr, "starting parallelise at %+F for %+F\n", pred, proj);
761
762                 pi.origin_block = block,
763                 pi.origin_ptr   = get_Store_ptr(mem_op);
764                 pi.origin_mode  = get_irn_mode(get_Store_value(mem_op));
765                 ir_nodeset_init(&pi.this_mem);
766                 ir_nodeset_init(&pi.user_mem);
767
768                 parallelise_store(&pi, pred);
769         } else {
770                 return;
771         }
772
773         n = ir_nodeset_size(&pi.user_mem);
774         if (n != 0) { /* nothing happend otherwise */
775                 ir_graph               *irg  = current_ir_graph;
776                 ir_node                *sync;
777                 ir_node               **in;
778                 ir_nodeset_iterator_t   iter;
779                 int                     i;
780
781                 ++n;
782                 //ir_fprintf(stderr, "creating sync for users of %+F with %d inputs\n", proj, n);
783                 NEW_ARR_A(ir_node*, in, n);
784                 i = 0;
785                 in[i++] = new_r_Unknown(irg, mode_M);
786                 ir_nodeset_iterator_init(&iter, &pi.user_mem);
787                 for (;;) {
788                         ir_node* p = ir_nodeset_iterator_next(&iter);
789                         if (p == NULL) break;
790                         in[i++] = p;
791                 }
792                 assert(i == n);
793                 sync = new_r_Sync(irg, block, n, in);
794                 exchange(proj, sync);
795
796                 assert(pn_Load_M == pn_Store_M);
797                 proj = new_r_Proj(irg, block, mem_op, mode_M, pn_Load_M);
798                 set_Sync_pred(sync, 0, proj);
799
800                 n = ir_nodeset_size(&pi.this_mem);
801                 //ir_fprintf(stderr, "creating sync for %+F with %d inputs\n", mem_op, n);
802                 ir_nodeset_iterator_init(&iter, &pi.this_mem);
803                 if (n == 1) {
804                         sync = ir_nodeset_iterator_next(&iter);
805                 } else {
806                         NEW_ARR_A(ir_node*, in, n);
807                         i = 0;
808                         for (;;) {
809                                 ir_node* p = ir_nodeset_iterator_next(&iter);
810                                 if (p == NULL) break;
811                                 in[i++] = p;
812                         }
813                         assert(i == n);
814                         sync = new_r_Sync(irg, block, n, in);
815                 }
816                 set_memop_mem(mem_op, sync);
817         }
818
819         ir_nodeset_destroy(&pi.this_mem);
820         ir_nodeset_destroy(&pi.user_mem);
821 }
822
823
824 void opt_ldst2(ir_graph *irg)
825 {
826         assure_irg_address_taken_computed(irg);
827         assure_irp_globals_address_taken_computed();
828
829         irg_walk_graph(irg, NULL, walker, NULL);
830   //optimize_graph_df(irg);
831         //irg_walk_graph(irg, NormaliseSync, NULL, NULL);
832 }