Loads do not remove any nodes from the exec after sets. Also fix a 'node leak'.
[libfirm] / ir / opt / ldst2.c
1 /*
2  * Copyright (C) 1995-2007 University of Karlsruhe.  All right reserved.
3  *
4  * This file is part of libFirm.
5  *
6  * This file may be distributed and/or modified under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation and appearing in the file LICENSE.GPL included in the
9  * packaging of this file.
10  *
11  * Licensees holding valid libFirm Professional Edition licenses may use
12  * this file in accordance with the libFirm Commercial License.
13  * Agreement provided with the Software.
14  *
15  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17  * PURPOSE.
18  */
19
20 /**
21  * @file
22  * @brief   parallelizing Load/Store optimisation
23  * @author  Christoph Mallon
24  * @version $Id: $
25  */
26 #ifdef HAVE_CONFIG_H
27 #include "config.h"
28 #endif
29
30 #include "array.h"
31 #include "debug.h"
32 #include "ircons.h"
33 #include "irgraph.h"
34 #include "irgmod.h"
35 #include "irgopt.h"
36 #include "irgwalk.h"
37 #include "irmemory.h"
38 #include "irnode.h"
39 #include "irnodeset.h"
40 #include "ldst2.h"
41 #include "obst.h"
42 #include "return.h"
43 #include "irdump.h"
44 #include "irflag_t.h"
45
46 #define OPTIMISE_LOAD_AFTER_LOAD
47
48
49 #define UNIMPLEMENTED abort();
50
51
52 DEBUG_ONLY(static firm_dbg_module_t *dbg);
53
54
55 static struct obstack obst;
56 static size_t count_addrs;
57 static ir_node** addrs;
58
59
60 static void AddressCollector(ir_node* node, void* env)
61 {
62         ir_nodeset_t* addrs_set = env;
63         ir_node* addr;
64         if (is_Load(node)) {
65                 addr = get_Load_ptr(node);
66         } else if (is_Store(node)) {
67                 addr = get_Store_ptr(node);
68         } else {
69                 return;
70         }
71         ir_nodeset_insert(addrs_set, addr);
72 }
73
74
75 /* Collects all unique addresses used by load and store nodes of a graph and
76  * puts them into an array for later use */
77 static void CollectAddresses(ir_graph* irg)
78 {
79         ir_nodeset_t addrs_set;
80
81         ir_nodeset_init(&addrs_set);
82         irg_walk_graph(irg, AddressCollector, NULL, &addrs_set);
83
84         count_addrs = ir_nodeset_size(&addrs_set);
85         DB((dbg, LEVEL_1, "===> %+F uses %u unique addresses\n", irg, (unsigned int)count_addrs));
86         if (count_addrs != 0) {
87                 ir_nodeset_iterator_t addr_iter;
88                 size_t i;
89
90                 addrs = NEW_ARR_D(ir_node*, &obst, count_addrs);
91                 ir_nodeset_iterator_init(&addr_iter, &addrs_set);
92                 for (i = 0; i < count_addrs; i++) {
93                         ir_node* addr = ir_nodeset_iterator_next(&addr_iter);
94                         assert(addr != NULL);
95                         set_irn_link(addr, (void *)i);
96                         addrs[i] = addr;
97                         DB((dbg, LEVEL_2, "===> Collected unique symbolic address %+F\n", addr));
98                 }
99         }
100 }
101
102
103 static void AliasSetAdder(ir_node* block, void* env)
104 {
105         ir_nodeset_t* alias_set;
106         size_t i;
107
108         alias_set = NEW_ARR_D(ir_nodeset_t, &obst, count_addrs);
109         for (i = 0; i < count_addrs; i++) {
110                 ir_nodeset_init(&alias_set[i]);
111         }
112         set_irn_link(block, alias_set);
113 }
114
115
116 static void SetStartAddressesTop(ir_graph* irg)
117 {
118         ir_node* initial_mem;
119         ir_node* start_block;
120         ir_nodeset_t* start_addrs;
121         size_t i;
122
123         initial_mem = get_irg_initial_mem(irg);
124         start_block = get_irg_start_block(irg);
125         start_addrs = get_irn_link(start_block);
126         for (i = 0; i < count_addrs; i++) {
127                 ir_nodeset_insert(&start_addrs[i], initial_mem);
128         }
129         mark_Block_block_visited(start_block);
130 }
131
132
133 static void AliasSetDestroyer(ir_node* block, void* env)
134 {
135         ir_nodeset_t* alias_set = get_irn_link(block);
136         size_t i;
137
138         for (i = 0; i < count_addrs; i++) {
139                 ir_nodeset_destroy(&alias_set[i]);
140         }
141 }
142
143
144 static ir_alias_relation AliasTest(ir_graph* irg, ir_node* addr, ir_mode* mode, ir_node* other)
145 {
146         ir_node* other_addr;
147         ir_mode* other_mode;
148
149         if (is_Proj(other)) other = get_Proj_pred(other);
150
151         if (is_Load(other)) {
152                 other_addr = get_Load_ptr(other);
153         } else if (is_Store(other)) {
154                 other_addr = get_Store_ptr(other);
155         } else {
156                 return may_alias;
157         }
158
159         other_mode = get_irn_mode(other);
160         return get_alias_relation(irg, addr, mode, other_addr, other_mode);
161 }
162
163
164 static ir_node* GenerateSync(ir_graph* irg, ir_node* block, ir_nodeset_t* after_set)
165 {
166         size_t set_size = ir_nodeset_size(after_set);
167         ir_nodeset_iterator_t iter;
168
169         assert(set_size != 0);
170
171         ir_nodeset_iterator_init(&iter, after_set);
172         if (set_size == 1) {
173                 return ir_nodeset_iterator_next(&iter);
174         } else {
175                 ir_node** in;
176                 size_t i;
177
178                 NEW_ARR_A(ir_node*, in, set_size);
179                 for (i = 0; i < set_size; i++) {
180                         in[i] = ir_nodeset_iterator_next(&iter);
181                 }
182                 return new_r_Sync(irg, block, set_size, in);
183         }
184 }
185
186
187 static ir_node** unfinished_phis;
188
189
190 static void PlaceMemPhis(ir_graph* irg, ir_node* block, ir_node* phi)
191 {
192         int unfinished = 0;
193         size_t block_n_preds = get_Block_n_cfgpreds(block);
194         ir_nodeset_t* thissets;
195         ir_node** in;
196         size_t i;
197         size_t j;
198
199         thissets = get_irn_link(block);
200         NEW_ARR_A(ir_node*, in, block_n_preds);
201         for (j = 0; j < count_addrs; j++) {
202                 ir_node* new_phi;
203
204                 for (i = 0; i < block_n_preds; i++) {
205                         ir_node* pred_block = get_nodes_block(get_Phi_pred(phi, i)); // TODO get_Block_cfgpred_block(block, i);
206                         ir_nodeset_t* predsets = get_irn_link(pred_block);
207                         size_t predset_size = ir_nodeset_size(&predsets[j]);
208
209                         if (predset_size == 0) {
210                                 in[i] = new_r_Unknown(irg, mode_M);
211                                 unfinished = 1;
212                         } else {
213                                 in[i] = GenerateSync(irg, pred_block, &predsets[j]);
214                         }
215                 }
216                 new_phi = new_r_Phi(irg, block, block_n_preds, in, mode_M);
217                 if (unfinished) {
218                         set_irn_link(new_phi, unfinished_phis[j]);
219                         unfinished_phis[j] = new_phi;
220                 }
221                 ir_nodeset_insert(&thissets[j], new_phi);
222         }
223 }
224
225
226 static int WalkMem(ir_graph* irg, ir_node* node, ir_node* last_block);
227
228
229 static void WalkMemPhi(ir_graph* irg, ir_node* block, ir_node* phi)
230 {
231         size_t n = get_Phi_n_preds(phi);
232         size_t i;
233
234         for (i = 0; i < n; i++) {
235                 WalkMem(irg, get_Phi_pred(phi, i), block);
236         }
237
238         PlaceMemPhis(irg, block, phi);
239         exchange(phi, new_Bad());
240 }
241
242
243 static void PlaceLoad(ir_graph* irg, ir_node* block, ir_node* load, ir_node* memory)
244 {
245         ir_node* addr = get_Load_ptr(load);
246         size_t addr_idx = (size_t)get_irn_link(addr);
247         ir_nodeset_t* interfere_sets = get_irn_link(block);
248         ir_nodeset_t* interfere_set = &interfere_sets[addr_idx];
249         size_t size = ir_nodeset_size(interfere_set);
250         ir_nodeset_iterator_t interfere_iter;
251         size_t i;
252
253         assert(size > 0);
254         ir_nodeset_iterator_init(&interfere_iter, interfere_set);
255         if (size == 1) {
256                 ir_node* after = ir_nodeset_iterator_next(&interfere_iter);
257                 assert(!is_Proj(after) || !is_Load(get_Proj_pred(after)));
258                 DB((dbg, LEVEL_3, "===> %+F must be executed after %+F\n", load, after));
259                 set_Load_mem(load, after);
260         } else {
261                 ir_node** after_set;
262                 ir_node* after;
263                 ir_node* mem;
264                 size_t i;
265
266                 NEW_ARR_A(ir_node*, after_set, size);
267                 i = 0;
268                 while ((mem = ir_nodeset_iterator_next(&interfere_iter)) != NULL) {
269                         if (is_Proj(mem)) {
270                                 ir_node* pred = get_Proj_pred(mem);
271                                 if (is_Load(pred)) {
272 #ifdef OPTIMISE_LOAD_AFTER_LOAD
273                                         if (get_Load_ptr(pred) == addr && get_Load_mode(pred) == get_Load_mode(load)) {
274                                                 exchange(load, pred);
275                                                 return;
276                                         }
277 #endif
278                                         continue;
279                                 }
280                         }
281                         DB((dbg, LEVEL_3, "===> %+F must be executed after %+F\n", load, mem));
282                         after_set[i++] = mem;
283                 }
284                 assert(i != 0);
285                 if (i == 1) {
286                         after = after_set[0];
287                 } else {
288                         after = new_r_Sync(irg, block, i, after_set);
289                 }
290                 set_Load_mem(load, after);
291         }
292
293         for (i = 0; i < count_addrs; i++) {
294                 ir_mode* mode = get_Load_mode(load);
295                 ir_node* other_addr = addrs[i];
296                 ir_mode* other_mode = mode; // XXX second mode is nonsense
297                 ir_alias_relation rel = get_alias_relation(irg, addr, mode, other_addr, other_mode);
298
299                 DB((dbg, LEVEL_3, "===> Testing for alias between %+F and %+F. Relation is %d\n", addr, other_addr, rel));
300                 if (rel == no_alias) {
301                         continue;
302                 }
303                 DB((dbg, LEVEL_3, "===> %+F potentially aliases address %+F\n", load, other_addr));
304
305                 ir_nodeset_insert(&interfere_sets[i], memory);
306         }
307 }
308
309
310 static void PlaceStore(ir_graph* irg, ir_node* block, ir_node* store, ir_node* memory)
311 {
312         ir_node* addr = get_Store_ptr(store);
313         size_t addr_idx = (size_t)get_irn_link(addr);
314         ir_nodeset_t* interfere_sets = get_irn_link(block);
315         ir_nodeset_t* interfere_set = &interfere_sets[addr_idx];
316         ir_node* after;
317         size_t i;
318
319         after = GenerateSync(irg, block, interfere_set);
320         set_Store_mem(store, after);
321
322         for (i = 0; i < count_addrs; i++) {
323                 ir_nodeset_iterator_t interfere_iter;
324                 ir_mode* mode = get_irn_mode(get_Store_value(store));
325                 ir_node* other_addr = addrs[i];
326                 ir_mode* other_mode = mode; // XXX second mode is nonsense
327                 ir_alias_relation rel = get_alias_relation(irg, addr, mode, other_addr, other_mode);
328                 ir_node* other_node;
329
330                 DB((dbg, LEVEL_3, "===> Testing for alias between %+F and %+F. Relation is %d\n", addr, other_addr, rel));
331                 if (rel == no_alias) {
332                         continue;
333                 }
334                 DB((dbg, LEVEL_3, "===> %+F potentially aliases address %+F\n", store, other_addr));
335
336                 ir_nodeset_iterator_init(&interfere_iter, &interfere_sets[i]);
337                 while ((other_node = ir_nodeset_iterator_next(&interfere_iter)) != NULL) {
338                         if (AliasTest(irg, addr, mode, other_node) != no_alias) {
339                                 DB((dbg, LEVEL_3, "===> Removing %+F from execute-after set of %+F due to %+F\n", other_node, addrs[i], store));
340                                 ir_nodeset_remove_iterator(&interfere_sets[i], &interfere_iter);
341                         }
342                 }
343
344                 ir_nodeset_insert(&interfere_sets[i], memory);
345         }
346 }
347
348
349 static int WalkMem(ir_graph* irg, ir_node* node, ir_node* last_block)
350 {
351         int block_change = 0;
352         ir_node* block = get_nodes_block(node);
353         ir_node* pred;
354         ir_node* memory = node;
355         ir_nodeset_t* addr_sets;
356
357         if (block != last_block) {
358                 DB((dbg, LEVEL_3, "===> Changing block from %+F to %+F\n", last_block, block));
359                 block_change = 1;
360                 if (Block_not_block_visited(block)) {
361                         mark_Block_block_visited(block);
362                 } else {
363                         DB((dbg, LEVEL_2, "===> Hit already visited block at %+F\n", node));
364                         return block_change;
365                 }
366         }
367
368         // Skip projs
369         if (is_Proj(node)) node = get_Proj_pred(node);
370
371         if (is_Phi(node)) {
372                 WalkMemPhi(irg, block, node);
373                 return block_change;
374         } else if (is_Sync(node)) {
375                 UNIMPLEMENTED
376         } else if (is_Return(node)) {
377                 pred = get_Return_mem(node);
378         } else {
379                 pred = get_fragile_op_mem(node);
380         }
381
382         if (WalkMem(irg, pred, block)) {
383                 // There was a block change
384                 size_t block_arity = get_Block_n_cfgpreds(block);
385
386                 DB((dbg, LEVEL_3, "===> There is a block change before %+F\n", node));
387                 if (block_arity == 1) {
388                         // Just one predecessor, inherit its alias sets
389                         ir_node* pred_block = get_nodes_block(pred);
390                         ir_nodeset_t* predsets = get_irn_link(pred_block);
391                         ir_nodeset_t* thissets = get_irn_link(block);
392                         size_t i;
393
394                         DB((dbg, LEVEL_3, "===> Copying the only predecessor's address sets\n"));
395
396                         if (ir_nodeset_size(&predsets[0]) == 0) {
397                                 ir_node* unknown;
398
399                                 DB((dbg, LEVEL_3, "===> The predecessor was not finished yet\n"));
400                                 assert(!Block_not_block_visited(pred_block));
401
402                                 unknown = new_r_Unknown(irg, mode_M);
403                                 for (i = 0; i < count_addrs; i++) {
404                                         ir_node* phi_unk = new_r_Phi(irg, block, 1, &unknown, mode_M);
405                                         DB((dbg, LEVEL_3, "===> Placing unfinished %+F for %+F in %+F\n", phi_unk, addrs[i], block));
406                                         set_irn_link(phi_unk, unfinished_phis[i]);
407                                         unfinished_phis[i] = phi_unk;
408                                         ir_nodeset_insert(&thissets[i], phi_unk);
409                                 }
410                         } else {
411                                 for (i = 0; i < count_addrs; i++) {
412                                         ir_nodeset_iterator_t prediter;
413                                         ir_node* addr;
414
415                                         ir_nodeset_iterator_init(&prediter, &predsets[i]);
416                                         while ((addr = ir_nodeset_iterator_next(&prediter)) != NULL) {
417                                                 ir_nodeset_insert(&thissets[i], addr);
418                                         }
419                                 }
420                         }
421                 }
422         }
423
424         DB((dbg, LEVEL_3, "===> Detotalising %+F\n", node));
425
426         addr_sets = get_irn_link(block);
427
428         if (is_Load(node)) {
429                 PlaceLoad(irg, block, node, memory);
430         } else if (is_Store(node)) {
431                 PlaceStore(irg, block, node, memory);
432         } else {
433                 ir_nodeset_t sync_set;
434                 size_t i;
435                 ir_node* after;
436
437                 DB((dbg, LEVEL_3, "===> Fallback: %+F aliases everything\n", node));
438
439                 ir_nodeset_init(&sync_set);
440                 for (i = 0; i < count_addrs; i++) {
441                         ir_nodeset_iterator_t iter;
442                         ir_node* mem;
443
444                         ir_nodeset_iterator_init(&iter, &addr_sets[i]);
445                         while ((mem = ir_nodeset_iterator_next(&iter)) != NULL) {
446                                 ir_nodeset_insert(&sync_set, mem);
447                         }
448                 }
449
450                 after = GenerateSync(irg, block, &sync_set);
451                 set_irn_n(node, 0, after); // XXX unnice way to set the memory input
452
453                 for (i = 0; i < count_addrs; i++) {
454                         ir_nodeset_iterator_t iter;
455                         ir_nodeset_iterator_init(&iter, &addr_sets[i]);
456                         while (ir_nodeset_iterator_next(&iter) != NULL) {
457                                 ir_nodeset_remove_iterator(&addr_sets[i], &iter);
458                         }
459                         ir_nodeset_insert(&addr_sets[i], memory);
460                 }
461         }
462
463         return block_change;
464 }
465
466
467 static void FinalisePhis(ir_graph* irg)
468 {
469         size_t i;
470
471         for (i = 0; i < count_addrs; i++) {
472                 ir_node* next_phi;
473                 ir_node* phi;
474
475                 for (phi = unfinished_phis[i]; phi != NULL; phi = next_phi) {
476                         ir_node* block = get_nodes_block(phi);
477                         size_t block_n_preds = get_Block_n_cfgpreds(block);
478
479                         next_phi = get_irn_link(phi);
480
481                         DB((dbg, LEVEL_4, "===> Finialising phi %+F in %+F\n", phi, block));
482
483                         if (block_n_preds == 1) {
484                                 ir_node* pred_block = get_Block_cfgpred_block(block, 0);
485                                 ir_nodeset_t* pred_sets = get_irn_link(pred_block);
486                                 ir_node* after = GenerateSync(irg, pred_block, &pred_sets[i]);
487
488                                 assert(is_Unknown(get_Phi_pred(phi, 0)));
489                                 exchange(phi, after);
490                         } else {
491                                 ir_node** in;
492                                 size_t j;
493
494                                 NEW_ARR_A(ir_node*, in, block_n_preds);
495                                 for (j = 0; j < block_n_preds; j++) {
496                                         ir_node* pred_block = get_Block_cfgpred_block(block, j);
497                                         ir_nodeset_t* pred_sets = get_irn_link(pred_block);
498
499                                         if (is_Unknown(get_Phi_pred(phi, j))) {
500                                                 set_Phi_pred(phi, j, GenerateSync(irg, pred_block, &pred_sets[i]));
501                                         }
502                                 }
503                         }
504                 }
505         }
506 }
507
508
509 static void Detotalise(ir_graph* irg)
510 {
511         ir_node* end_block = get_irg_end_block(irg);
512         size_t npreds = get_Block_n_cfgpreds(end_block);
513         size_t i;
514
515         unfinished_phis = xmalloc(sizeof(*unfinished_phis) * count_addrs);
516         for (i = 0; i < count_addrs; i++) {
517                 unfinished_phis[i] = NULL;
518         }
519
520         for (i = 0; i < npreds; i++) {
521                 ir_node* pred = get_Block_cfgpred(end_block, i);
522                 assert(is_Return(pred));
523                 DB((dbg, LEVEL_2, "===> Starting memory walk at %+F\n", pred));
524                 WalkMem(irg, pred, NULL);
525         }
526
527         FinalisePhis(irg);
528         xfree(unfinished_phis);
529 }
530
531
532 static void AddSyncPreds(ir_nodeset_t* preds, ir_node* sync)
533 {
534         size_t n = get_Sync_n_preds(sync);
535         size_t i;
536
537         for (i = 0; i < n; i++) {
538                 ir_node* pred = get_Sync_pred(sync, i);
539                 if (is_Sync(pred)) {
540                         AddSyncPreds(preds, pred);
541                 } else {
542                         ir_nodeset_insert(preds, pred);
543                 }
544         }
545 }
546
547
548 static void NormaliseSync(ir_node* node, void* env)
549 {
550         ir_nodeset_t preds;
551         ir_nodeset_iterator_t iter;
552         ir_node** in;
553         size_t count_preds;
554         size_t i;
555
556         if (!is_Sync(node)) return;
557
558         ir_nodeset_init(&preds);
559         AddSyncPreds(&preds, node);
560
561         count_preds = ir_nodeset_size(&preds);
562         if (count_preds != get_Sync_n_preds(node)) {
563                 NEW_ARR_A(ir_node*, in, count_preds);
564                 ir_nodeset_iterator_init(&iter, &preds);
565                 for (i = 0; i < count_preds; i++) {
566                         ir_node* pred = ir_nodeset_iterator_next(&iter);
567                         assert(pred != NULL);
568                         in[i] = pred;
569                 }
570                 set_irn_in(node, count_preds, in);
571         }
572
573         ir_nodeset_destroy(&preds);
574 }
575
576
577 void opt_ldst2(ir_graph* irg)
578 {
579         FIRM_DBG_REGISTER(dbg, "firm.opt.ldst2");
580         DB((dbg, LEVEL_1, "===> Performing load/store optimisation on %+F\n", irg));
581
582         normalize_one_return(irg);
583         dump_ir_block_graph(irg, "-prefluffig");
584
585         obstack_init(&obst);
586
587         if (1 /* XXX */ || get_opt_alias_analysis()) {
588                 assure_irg_address_taken_computed(irg);
589                 assure_irp_globals_address_taken_computed();
590         }
591
592
593         CollectAddresses(irg);
594         if (count_addrs == 0) return;
595
596         irg_block_walk_graph(irg, AliasSetAdder, NULL, NULL);
597         inc_irg_block_visited(irg);
598         SetStartAddressesTop(irg);
599         Detotalise(irg);
600         dump_ir_block_graph(irg, "-fluffig");
601
602         irg_block_walk_graph(irg, AliasSetDestroyer, NULL, NULL);
603         obstack_free(&obst, NULL);
604
605         normalize_proj_nodes(irg);
606         irg_walk_graph(irg, NormaliseSync, NULL, NULL);
607   optimize_graph_df(irg);
608         irg_walk_graph(irg, NormaliseSync, NULL, NULL);
609         dump_ir_block_graph(irg, "-postfluffig");
610 }