- do not optimize away labeled blocks
[libfirm] / ir / opt / ldstopt.c
1 /*
2  * Copyright (C) 1995-2008 University of Karlsruhe.  All right reserved.
3  *
4  * This file is part of libFirm.
5  *
6  * This file may be distributed and/or modified under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation and appearing in the file LICENSE.GPL included in the
9  * packaging of this file.
10  *
11  * Licensees holding valid libFirm Professional Edition licenses may use
12  * this file in accordance with the libFirm Commercial License.
13  * Agreement provided with the Software.
14  *
15  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17  * PURPOSE.
18  */
19
20 /**
21  * @file
22  * @brief   Load/Store optimizations.
23  * @author  Michael Beck
24  * @version $Id$
25  */
26 #include "config.h"
27
28 #include <string.h>
29
30 #include "iroptimize.h"
31 #include "irnode_t.h"
32 #include "irgraph_t.h"
33 #include "irmode_t.h"
34 #include "iropt_t.h"
35 #include "ircons_t.h"
36 #include "irgmod.h"
37 #include "irgwalk.h"
38 #include "irvrfy.h"
39 #include "tv_t.h"
40 #include "dbginfo_t.h"
41 #include "iropt_dbg.h"
42 #include "irflag_t.h"
43 #include "array_t.h"
44 #include "irhooks.h"
45 #include "iredges.h"
46 #include "irtools.h"
47 #include "opt_polymorphy.h"
48 #include "irmemory.h"
49 #include "irphase_t.h"
50 #include "irgopt.h"
51 #include "debug.h"
52
53 /** The debug handle. */
54 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
55
56 #ifdef DO_CACHEOPT
57 #include "cacheopt/cachesim.h"
58 #endif
59
60 #undef IMAX
61 #define IMAX(a,b)       ((a) > (b) ? (a) : (b))
62
63 #define MAX_PROJ        IMAX(IMAX(pn_Load_max, pn_Store_max), pn_Call_max)
64
65 enum changes_t {
66         DF_CHANGED = 1,       /**< data flow changed */
67         CF_CHANGED = 2,       /**< control flow changed */
68 };
69
70 /**
71  * walker environment
72  */
73 typedef struct _walk_env_t {
74         struct obstack obst;          /**< list of all stores */
75         unsigned changes;             /**< a bitmask of graph changes */
76 } walk_env_t;
77
78 /** A Load/Store info. */
79 typedef struct _ldst_info_t {
80         ir_node  *projs[MAX_PROJ];    /**< list of Proj's of this node */
81         ir_node  *exc_block;          /**< the exception block if available */
82         int      exc_idx;             /**< predecessor index in the exception block */
83         unsigned visited;             /**< visited counter for breaking loops */
84 } ldst_info_t;
85
86 /**
87  * flags for control flow.
88  */
89 enum block_flags_t {
90         BLOCK_HAS_COND = 1,      /**< Block has conditional control flow */
91         BLOCK_HAS_EXC  = 2       /**< Block has exceptional control flow */
92 };
93
94 /**
95  * a Block info.
96  */
97 typedef struct _block_info_t {
98         unsigned flags;               /**< flags for the block */
99 } block_info_t;
100
101 /** the master visited flag for loop detection. */
102 static unsigned master_visited = 0;
103
104 #define INC_MASTER()       ++master_visited
105 #define MARK_NODE(info)    (info)->visited = master_visited
106 #define NODE_VISITED(info) (info)->visited >= master_visited
107
108 /**
109  * get the Load/Store info of a node
110  */
111 static ldst_info_t *get_ldst_info(ir_node *node, struct obstack *obst) {
112         ldst_info_t *info = get_irn_link(node);
113
114         if (! info) {
115                 info = obstack_alloc(obst, sizeof(*info));
116                 memset(info, 0, sizeof(*info));
117                 set_irn_link(node, info);
118         }
119         return info;
120 }  /* get_ldst_info */
121
122 /**
123  * get the Block info of a node
124  */
125 static block_info_t *get_block_info(ir_node *node, struct obstack *obst) {
126         block_info_t *info = get_irn_link(node);
127
128         if (! info) {
129                 info = obstack_alloc(obst, sizeof(*info));
130                 memset(info, 0, sizeof(*info));
131                 set_irn_link(node, info);
132         }
133         return info;
134 }  /* get_block_info */
135
136 /**
137  * update the projection info for a Load/Store
138  */
139 static unsigned update_projs(ldst_info_t *info, ir_node *proj)
140 {
141         long nr = get_Proj_proj(proj);
142
143         assert(0 <= nr && nr <= MAX_PROJ && "Wrong proj from LoadStore");
144
145         if (info->projs[nr]) {
146                 /* there is already one, do CSE */
147                 exchange(proj, info->projs[nr]);
148                 return DF_CHANGED;
149         }
150         else {
151                 info->projs[nr] = proj;
152                 return 0;
153         }
154 }  /* update_projs */
155
156 /**
157  * update the exception block info for a Load/Store node.
158  *
159  * @param info   the load/store info struct
160  * @param block  the exception handler block for this load/store
161  * @param pos    the control flow input of the block
162  */
163 static unsigned update_exc(ldst_info_t *info, ir_node *block, int pos)
164 {
165         assert(info->exc_block == NULL && "more than one exception block found");
166
167         info->exc_block = block;
168         info->exc_idx   = pos;
169         return 0;
170 }  /* update_exc */
171
172 /** Return the number of uses of an address node */
173 #define get_irn_n_uses(adr)     get_irn_n_edges(adr)
174
175 /**
176  * walker, collects all Load/Store/Proj nodes
177  *
178  * walks from Start -> End
179  */
180 static void collect_nodes(ir_node *node, void *env)
181 {
182         ir_opcode   opcode = get_irn_opcode(node);
183         ir_node     *pred, *blk, *pred_blk;
184         ldst_info_t *ldst_info;
185         walk_env_t  *wenv = env;
186
187         if (opcode == iro_Proj) {
188                 pred   = get_Proj_pred(node);
189                 opcode = get_irn_opcode(pred);
190
191                 if (opcode == iro_Load || opcode == iro_Store || opcode == iro_Call) {
192                         ldst_info = get_ldst_info(pred, &wenv->obst);
193
194                         wenv->changes |= update_projs(ldst_info, node);
195
196                         /*
197                          * Place the Proj's to the same block as the
198                          * predecessor Load. This is always ok and prevents
199                          * "non-SSA" form after optimizations if the Proj
200                          * is in a wrong block.
201                          */
202                         blk      = get_nodes_block(node);
203                         pred_blk = get_nodes_block(pred);
204                         if (blk != pred_blk) {
205                                 wenv->changes |= DF_CHANGED;
206                                 set_nodes_block(node, pred_blk);
207                         }
208                 }
209         } else if (opcode == iro_Block) {
210                 int i;
211
212                 for (i = get_Block_n_cfgpreds(node) - 1; i >= 0; --i) {
213                         ir_node      *pred_block, *proj;
214                         block_info_t *bl_info;
215                         int          is_exc = 0;
216
217                         pred = proj = get_Block_cfgpred(node, i);
218
219                         if (is_Proj(proj)) {
220                                 pred   = get_Proj_pred(proj);
221                                 is_exc = get_Proj_proj(proj) == pn_Generic_X_except;
222                         }
223
224                         /* ignore Bad predecessors, they will be removed later */
225                         if (is_Bad(pred))
226                                 continue;
227
228                         pred_block = get_nodes_block(pred);
229                         bl_info    = get_block_info(pred_block, &wenv->obst);
230
231                         if (is_fragile_op(pred) && is_exc)
232                                 bl_info->flags |= BLOCK_HAS_EXC;
233                         else if (is_irn_forking(pred))
234                                 bl_info->flags |= BLOCK_HAS_COND;
235
236                         opcode = get_irn_opcode(pred);
237                         if (is_exc && (opcode == iro_Load || opcode == iro_Store || opcode == iro_Call)) {
238                                 ldst_info = get_ldst_info(pred, &wenv->obst);
239
240                                 wenv->changes |= update_exc(ldst_info, node, i);
241                         }
242                 }
243         }
244 }  /* collect_nodes */
245
246 /**
247  * Returns an entity if the address ptr points to a constant one.
248  *
249  * @param ptr  the address
250  *
251  * @return an entity or NULL
252  */
253 static ir_entity *find_constant_entity(ir_node *ptr)
254 {
255         for (;;) {
256                 if (is_SymConst(ptr) && get_SymConst_kind(ptr) == symconst_addr_ent) {
257                         return get_SymConst_entity(ptr);
258                 } else if (is_Sel(ptr)) {
259                         ir_entity *ent = get_Sel_entity(ptr);
260                         ir_type   *tp  = get_entity_owner(ent);
261
262                         /* Do not fiddle with polymorphism. */
263                         if (is_Class_type(get_entity_owner(ent)) &&
264                                 ((get_entity_n_overwrites(ent)    != 0) ||
265                                 (get_entity_n_overwrittenby(ent) != 0)   ) )
266                                 return NULL;
267
268                         if (is_Array_type(tp)) {
269                                 /* check bounds */
270                                 int i, n;
271
272                                 for (i = 0, n = get_Sel_n_indexs(ptr); i < n; ++i) {
273                                         ir_node *bound;
274                                         tarval *tlower, *tupper;
275                                         ir_node *index = get_Sel_index(ptr, i);
276                                         tarval *tv     = computed_value(index);
277
278                                         /* check if the index is constant */
279                                         if (tv == tarval_bad)
280                                                 return NULL;
281
282                                         bound  = get_array_lower_bound(tp, i);
283                                         tlower = computed_value(bound);
284                                         bound  = get_array_upper_bound(tp, i);
285                                         tupper = computed_value(bound);
286
287                                         if (tlower == tarval_bad || tupper == tarval_bad)
288                                                 return NULL;
289
290                                         if (tarval_cmp(tv, tlower) & pn_Cmp_Lt)
291                                                 return NULL;
292                                         if (tarval_cmp(tupper, tv) & pn_Cmp_Lt)
293                                                 return NULL;
294
295                                         /* ok, bounds check finished */
296                                 }
297                         }
298
299                         if (variability_constant == get_entity_variability(ent))
300                                 return ent;
301
302                         /* try next */
303                         ptr = get_Sel_ptr(ptr);
304                 } else if (is_Add(ptr)) {
305                         ir_node *l = get_Add_left(ptr);
306                         ir_node *r = get_Add_right(ptr);
307
308                         if (get_irn_mode(l) == get_irn_mode(ptr) && is_Const(r))
309                                 ptr = l;
310                         else if (get_irn_mode(r) == get_irn_mode(ptr) && is_Const(l))
311                                 ptr = r;
312                         else
313                                 return NULL;
314
315                         /* for now, we support only one addition, reassoc should fold all others */
316                         if (! is_SymConst(ptr) && !is_Sel(ptr))
317                                 return NULL;
318                 } else if (is_Sub(ptr)) {
319                         ir_node *l = get_Sub_left(ptr);
320                         ir_node *r = get_Sub_right(ptr);
321
322                         if (get_irn_mode(l) == get_irn_mode(ptr) &&     is_Const(r))
323                                 ptr = l;
324                         else
325                                 return NULL;
326                         /* for now, we support only one substraction, reassoc should fold all others */
327                         if (! is_SymConst(ptr) && !is_Sel(ptr))
328                                 return NULL;
329                 } else
330                         return NULL;
331         }
332 }  /* find_constant_entity */
333
334 /**
335  * Return the Selection index of a Sel node from dimension n
336  */
337 static long get_Sel_array_index_long(ir_node *n, int dim) {
338         ir_node *index = get_Sel_index(n, dim);
339         assert(is_Const(index));
340         return get_tarval_long(get_Const_tarval(index));
341 }  /* get_Sel_array_index_long */
342
343 /**
344  * Returns the accessed component graph path for an
345  * node computing an address.
346  *
347  * @param ptr    the node computing the address
348  * @param depth  current depth in steps upward from the root
349  *               of the address
350  */
351 static compound_graph_path *rec_get_accessed_path(ir_node *ptr, int depth) {
352         compound_graph_path *res = NULL;
353         ir_entity           *root, *field, *ent;
354         int                 path_len, pos, idx;
355         tarval              *tv;
356         ir_type             *tp;
357
358         if (is_SymConst(ptr)) {
359                 /* a SymConst. If the depth is 0, this is an access to a global
360                  * entity and we don't need a component path, else we know
361                  * at least its length.
362                  */
363                 assert(get_SymConst_kind(ptr) == symconst_addr_ent);
364                 root = get_SymConst_entity(ptr);
365                 res = (depth == 0) ? NULL : new_compound_graph_path(get_entity_type(root), depth);
366         } else if (is_Sel(ptr)) {
367                 /* it's a Sel, go up until we find the root */
368                 res = rec_get_accessed_path(get_Sel_ptr(ptr), depth+1);
369                 if (res == NULL)
370                         return NULL;
371
372                 /* fill up the step in the path at the current position */
373                 field    = get_Sel_entity(ptr);
374                 path_len = get_compound_graph_path_length(res);
375                 pos      = path_len - depth - 1;
376                 set_compound_graph_path_node(res, pos, field);
377
378                 if (is_Array_type(get_entity_owner(field))) {
379                         assert(get_Sel_n_indexs(ptr) == 1 && "multi dim arrays not implemented");
380                         set_compound_graph_path_array_index(res, pos, get_Sel_array_index_long(ptr, 0));
381                 }
382         } else if (is_Add(ptr)) {
383                 ir_node *l    = get_Add_left(ptr);
384                 ir_node *r    = get_Add_right(ptr);
385                 ir_mode *mode = get_irn_mode(ptr);
386                 tarval  *tmp;
387
388                 if (is_Const(r) && get_irn_mode(l) == mode) {
389                         ptr = l;
390                         tv  = get_Const_tarval(r);
391                 } else {
392                         ptr = r;
393                         tv  = get_Const_tarval(l);
394                 }
395 ptr_arith:
396                 mode = get_tarval_mode(tv);
397                 tmp  = tv;
398
399                 /* ptr must be a Sel or a SymConst, this was checked in find_constant_entity() */
400                 if (is_Sel(ptr)) {
401                         field = get_Sel_entity(ptr);
402                 } else {
403                         field = get_SymConst_entity(ptr);
404                 }
405                 idx = 0;
406                 for (ent = field;;) {
407                         unsigned size;
408                         tarval   *sz, *tv_index, *tlower, *tupper;
409                         ir_node  *bound;
410
411                         tp = get_entity_type(ent);
412                         if (! is_Array_type(tp))
413                                 break;
414                         ent = get_array_element_entity(tp);
415                         size = get_type_size_bytes(get_entity_type(ent));
416                         sz   = new_tarval_from_long(size, mode);
417
418                         tv_index = tarval_div(tmp, sz);
419                         tmp      = tarval_mod(tmp, sz);
420
421                         if (tv_index == tarval_bad || tmp == tarval_bad)
422                                 return NULL;
423
424                         assert(get_array_n_dimensions(tp) == 1 && "multiarrays not implemented");
425                         bound  = get_array_lower_bound(tp, 0);
426                         tlower = computed_value(bound);
427                         bound  = get_array_upper_bound(tp, 0);
428                         tupper = computed_value(bound);
429
430                         if (tlower == tarval_bad || tupper == tarval_bad)
431                                 return NULL;
432
433                         if (tarval_cmp(tv_index, tlower) & pn_Cmp_Lt)
434                                 return NULL;
435                         if (tarval_cmp(tupper, tv_index) & pn_Cmp_Lt)
436                                 return NULL;
437
438                         /* ok, bounds check finished */
439                         ++idx;
440                 }
441                 if (! tarval_is_null(tmp)) {
442                         /* access to some struct/union member */
443                         return NULL;
444                 }
445
446                 /* should be at least ONE array */
447                 if (idx == 0)
448                         return NULL;
449
450                 res = rec_get_accessed_path(ptr, depth + idx);
451                 if (res == NULL)
452                         return NULL;
453
454                 path_len = get_compound_graph_path_length(res);
455                 pos      = path_len - depth - idx;
456
457                 for (ent = field;;) {
458                         unsigned size;
459                         tarval   *sz, *tv_index;
460                         long     index;
461
462                         tp = get_entity_type(ent);
463                         if (! is_Array_type(tp))
464                                 break;
465                         ent = get_array_element_entity(tp);
466                         set_compound_graph_path_node(res, pos, ent);
467
468                         size = get_type_size_bytes(get_entity_type(ent));
469                         sz   = new_tarval_from_long(size, mode);
470
471                         tv_index = tarval_div(tv, sz);
472                         tv       = tarval_mod(tv, sz);
473
474                         /* worked above, should work again */
475                         assert(tv_index != tarval_bad && tv != tarval_bad);
476
477                         /* bounds already checked above */
478                         index = get_tarval_long(tv_index);
479                         set_compound_graph_path_array_index(res, pos, index);
480                         ++pos;
481                 }
482         } else if (is_Sub(ptr)) {
483                 ir_node *l = get_Sub_left(ptr);
484                 ir_node *r = get_Sub_right(ptr);
485
486                 ptr = l;
487                 tv  = get_Const_tarval(r);
488                 tv  = tarval_neg(tv);
489                 goto ptr_arith;
490         }
491         return res;
492 }  /* rec_get_accessed_path */
493
494 /**
495  * Returns an access path or NULL.  The access path is only
496  * valid, if the graph is in phase_high and _no_ address computation is used.
497  */
498 static compound_graph_path *get_accessed_path(ir_node *ptr) {
499         compound_graph_path *gr = rec_get_accessed_path(ptr, 0);
500         return gr;
501 }  /* get_accessed_path */
502
503 typedef struct path_entry {
504         ir_entity         *ent;
505         struct path_entry *next;
506         long              index;
507 } path_entry;
508
509 static ir_node *rec_find_compound_ent_value(ir_node *ptr, path_entry *next) {
510         path_entry       entry, *p;
511         ir_entity        *ent, *field;
512         ir_initializer_t *initializer;
513         tarval           *tv;
514         ir_type          *tp;
515         unsigned         n;
516
517         entry.next = next;
518         if (is_SymConst(ptr)) {
519                 /* found the root */
520                 ent         = get_SymConst_entity(ptr);
521                 initializer = get_entity_initializer(ent);
522                 for (p = next; p != NULL;) {
523                         if (initializer->kind != IR_INITIALIZER_COMPOUND)
524                                 return NULL;
525                         n  = get_initializer_compound_n_entries(initializer);
526                         tp = get_entity_type(ent);
527
528                         if (is_Array_type(tp)) {
529                                 ent = get_array_element_entity(tp);
530                                 if (ent != p->ent) {
531                                         /* a missing [0] */
532                                         if (0 >= n)
533                                                 return NULL;
534                                         initializer = get_initializer_compound_value(initializer, 0);
535                                         continue;
536                                 }
537                         }
538                         if (p->index >= (int) n)
539                                 return NULL;
540                         initializer = get_initializer_compound_value(initializer, p->index);
541
542                         ent = p->ent;
543                         p   = p->next;
544                 }
545                 tp = get_entity_type(ent);
546                 while (is_Array_type(tp)) {
547                         ent = get_array_element_entity(tp);
548                         tp = get_entity_type(ent);
549                         /* a missing [0] */
550                         n  = get_initializer_compound_n_entries(initializer);
551                         if (0 >= n)
552                                 return NULL;
553                         initializer = get_initializer_compound_value(initializer, 0);
554                 }
555
556                 switch (initializer->kind) {
557                 case IR_INITIALIZER_CONST:
558                         return get_initializer_const_value(initializer);
559                 case IR_INITIALIZER_TARVAL:
560                 case IR_INITIALIZER_NULL:
561                 default:
562                         return NULL;
563                 }
564         } else if (is_Sel(ptr)) {
565                 entry.ent = field = get_Sel_entity(ptr);
566                 tp = get_entity_owner(field);
567                 if (is_Array_type(tp)) {
568                         assert(get_Sel_n_indexs(ptr) == 1 && "multi dim arrays not implemented");
569                         entry.index = get_Sel_array_index_long(ptr, 0) - get_array_lower_bound_int(tp, 0);
570                 } else {
571                         int i, n_members = get_compound_n_members(tp);
572                         for (i = 0; i < n_members; ++i) {
573                                 if (get_compound_member(tp, i) == field)
574                                         break;
575                         }
576                         if (i >= n_members) {
577                                 /* not found: should NOT happen */
578                                 return NULL;
579                         }
580                         entry.index = i;
581                 }
582                 return rec_find_compound_ent_value(get_Sel_ptr(ptr), &entry);
583         }  else if (is_Add(ptr)) {
584                 ir_node  *l = get_Add_left(ptr);
585                 ir_node  *r = get_Add_right(ptr);
586                 ir_mode  *mode;
587                 unsigned pos;
588
589                 if (is_Const(r)) {
590                         ptr = l;
591                         tv  = get_Const_tarval(r);
592                 } else {
593                         ptr = r;
594                         tv  = get_Const_tarval(l);
595                 }
596 ptr_arith:
597                 mode = get_tarval_mode(tv);
598
599                 /* ptr must be a Sel or a SymConst, this was checked in find_constant_entity() */
600                 if (is_Sel(ptr)) {
601                         field = get_Sel_entity(ptr);
602                 } else {
603                         field = get_SymConst_entity(ptr);
604                 }
605
606                 /* count needed entries */
607                 pos = 0;
608                 for (ent = field;;) {
609                         tp = get_entity_type(ent);
610                         if (! is_Array_type(tp))
611                                 break;
612                         ent = get_array_element_entity(tp);
613                         ++pos;
614                 }
615                 /* should be at least ONE entry */
616                 if (pos == 0)
617                         return NULL;
618
619                 /* allocate the right number of entries */
620                 NEW_ARR_A(path_entry, p, pos);
621
622                 /* fill them up */
623                 pos = 0;
624                 for (ent = field;;) {
625                         unsigned size;
626                         tarval   *sz, *tv_index, *tlower, *tupper;
627                         long     index;
628                         ir_node  *bound;
629
630                         tp = get_entity_type(ent);
631                         if (! is_Array_type(tp))
632                                 break;
633                         ent = get_array_element_entity(tp);
634                         p[pos].ent  = ent;
635                         p[pos].next = &p[pos + 1];
636
637                         size = get_type_size_bytes(get_entity_type(ent));
638                         sz   = new_tarval_from_long(size, mode);
639
640                         tv_index = tarval_div(tv, sz);
641                         tv       = tarval_mod(tv, sz);
642
643                         if (tv_index == tarval_bad || tv == tarval_bad)
644                                 return NULL;
645
646                         assert(get_array_n_dimensions(tp) == 1 && "multiarrays not implemented");
647                         bound  = get_array_lower_bound(tp, 0);
648                         tlower = computed_value(bound);
649                         bound  = get_array_upper_bound(tp, 0);
650                         tupper = computed_value(bound);
651
652                         if (tlower == tarval_bad || tupper == tarval_bad)
653                                 return NULL;
654
655                         if (tarval_cmp(tv_index, tlower) & pn_Cmp_Lt)
656                                 return NULL;
657                         if (tarval_cmp(tupper, tv_index) & pn_Cmp_Lt)
658                                 return NULL;
659
660                         /* ok, bounds check finished */
661                         index = get_tarval_long(tv_index);
662                         p[pos].index = index;
663                         ++pos;
664                 }
665                 if (! tarval_is_null(tv)) {
666                         /* hmm, wrong access */
667                         return NULL;
668                 }
669                 p[pos - 1].next = next;
670                 return rec_find_compound_ent_value(ptr, p);
671         } else if (is_Sub(ptr)) {
672                 ir_node *l = get_Sub_left(ptr);
673                 ir_node *r = get_Sub_right(ptr);
674
675                 ptr = l;
676                 tv  = get_Const_tarval(r);
677                 tv  = tarval_neg(tv);
678                 goto ptr_arith;
679         }
680         return NULL;
681 }
682
683 static ir_node *find_compound_ent_value(ir_node *ptr) {
684         return rec_find_compound_ent_value(ptr, NULL);
685 }
686
687 /* forward */
688 static void reduce_adr_usage(ir_node *ptr);
689
690 /**
691  * Update a Load that may have lost its users.
692  */
693 static void handle_load_update(ir_node *load) {
694         ldst_info_t *info = get_irn_link(load);
695
696         /* do NOT touch volatile loads for now */
697         if (get_Load_volatility(load) == volatility_is_volatile)
698                 return;
699
700         if (! info->projs[pn_Load_res] && ! info->projs[pn_Load_X_except]) {
701                 ir_node *ptr = get_Load_ptr(load);
702                 ir_node *mem = get_Load_mem(load);
703
704                 /* a Load whose value is neither used nor exception checked, remove it */
705                 exchange(info->projs[pn_Load_M], mem);
706                 if (info->projs[pn_Load_X_regular])
707                         exchange(info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
708                 kill_node(load);
709                 reduce_adr_usage(ptr);
710         }
711 }  /* handle_load_update */
712
713 /**
714  * A use of an address node has vanished. Check if this was a Proj
715  * node and update the counters.
716  */
717 static void reduce_adr_usage(ir_node *ptr) {
718         if (is_Proj(ptr)) {
719                 if (get_irn_n_edges(ptr) <= 0) {
720                         /* this Proj is dead now */
721                         ir_node *pred = get_Proj_pred(ptr);
722
723                         if (is_Load(pred)) {
724                                 ldst_info_t *info = get_irn_link(pred);
725                                 info->projs[get_Proj_proj(ptr)] = NULL;
726
727                                 /* this node lost its result proj, handle that */
728                                 handle_load_update(pred);
729                         }
730                 }
731         }
732 }  /* reduce_adr_usage */
733
734 /**
735  * Check, if an already existing value of mode old_mode can be converted
736  * into the needed one new_mode without loss.
737  */
738 static int can_use_stored_value(ir_mode *old_mode, ir_mode *new_mode) {
739         if (old_mode == new_mode)
740                 return 1;
741
742         /* if both modes are two-complement ones, we can always convert the
743            Stored value into the needed one. */
744         if (get_mode_size_bits(old_mode) >= get_mode_size_bits(new_mode) &&
745                   get_mode_arithmetic(old_mode) == irma_twos_complement &&
746                   get_mode_arithmetic(new_mode) == irma_twos_complement)
747                 return 1;
748         return 0;
749 }  /* can_use_stored_value */
750
751 /**
752  * Check whether a Call is at least pure, ie. does only read memory.
753  */
754 static unsigned is_Call_pure(ir_node *call) {
755         ir_type *call_tp = get_Call_type(call);
756         unsigned prop = get_method_additional_properties(call_tp);
757
758         /* check first the call type */
759         if ((prop & (mtp_property_const|mtp_property_pure)) == 0) {
760                 /* try the called entity */
761                 ir_node *ptr = get_Call_ptr(call);
762
763                 if (is_Global(ptr)) {
764                         ir_entity *ent = get_Global_entity(ptr);
765
766                         prop = get_entity_additional_properties(ent);
767                 }
768         }
769         return (prop & (mtp_property_const|mtp_property_pure)) != 0;
770 }  /* is_Call_pure */
771
772 static ir_node *get_base_and_offset(ir_node *ptr, long *pOffset)
773 {
774         ir_mode *mode  = get_irn_mode(ptr);
775         long    offset = 0;
776
777         /* TODO: long might not be enough, we should probably use some tarval thingy... */
778         for (;;) {
779                 if (is_Add(ptr)) {
780                         ir_node *l = get_Add_left(ptr);
781                         ir_node *r = get_Add_right(ptr);
782
783                         if (get_irn_mode(l) != mode || !is_Const(r))
784                                 break;
785
786                         offset += get_tarval_long(get_Const_tarval(r));
787                         ptr     = l;
788                 } else if (is_Sub(ptr)) {
789                         ir_node *l = get_Sub_left(ptr);
790                         ir_node *r = get_Sub_right(ptr);
791
792                         if (get_irn_mode(l) != mode || !is_Const(r))
793                                 break;
794
795                         offset -= get_tarval_long(get_Const_tarval(r));
796                         ptr     = l;
797                 } else if (is_Sel(ptr)) {
798                         ir_entity *ent = get_Sel_entity(ptr);
799                         ir_type   *tp  = get_entity_owner(ent);
800
801                         if (is_Array_type(tp)) {
802                                 int     size;
803                                 ir_node *index;
804
805                                 /* only one dimensional arrays yet */
806                                 if (get_Sel_n_indexs(ptr) != 1)
807                                         break;
808                                 index = get_Sel_index(ptr, 0);
809                                 if (! is_Const(index))
810                                         break;
811
812                                 tp = get_entity_type(ent);
813                                 if (get_type_state(tp) != layout_fixed)
814                                         break;
815
816                                 size    = get_type_size_bytes(tp);
817                                 offset += size * get_tarval_long(get_Const_tarval(index));
818                         } else {
819                                 if (get_type_state(tp) != layout_fixed)
820                                         break;
821                                 offset += get_entity_offset(ent);
822                         }
823                         ptr = get_Sel_ptr(ptr);
824                 } else
825                         break;
826         }
827
828         *pOffset = offset;
829         return ptr;
830 }
831
832 static int try_load_after_store(ir_node *load,
833                 ir_node *load_base_ptr, long load_offset, ir_node *store)
834 {
835         ldst_info_t *info;
836         ir_node *store_ptr      = get_Store_ptr(store);
837         long     store_offset;
838         ir_node *store_base_ptr = get_base_and_offset(store_ptr, &store_offset);
839         ir_node *store_value;
840         ir_mode *store_mode;
841         ir_node *load_ptr;
842         ir_mode *load_mode;
843         long     load_mode_len;
844         long     store_mode_len;
845         long     delta;
846         int      res;
847
848         if (load_base_ptr != store_base_ptr)
849                 return 0;
850
851         load_mode      = get_Load_mode(load);
852         load_mode_len  = get_mode_size_bytes(load_mode);
853         store_mode     = get_irn_mode(get_Store_value(store));
854         store_mode_len = get_mode_size_bytes(store_mode);
855         delta          = load_offset - store_offset;
856         store_value    = get_Store_value(store);
857
858         if (delta != 0 || store_mode != load_mode) {
859                 if (delta < 0 || delta + load_mode_len > store_mode_len)
860                         return 0;
861
862                 if (get_mode_arithmetic(store_mode) != irma_twos_complement ||
863                         get_mode_arithmetic(load_mode)  != irma_twos_complement)
864                         return 0;
865
866
867                 /* produce a shift to adjust offset delta */
868                 if (delta > 0) {
869                         ir_node *cnst;
870
871                         /* FIXME: only true for little endian */
872                         cnst        = new_Const_long(mode_Iu, delta * 8);
873                         store_value = new_r_Shr(current_ir_graph, get_nodes_block(load),
874                                                                         store_value, cnst, store_mode);
875                 }
876
877                 /* add an convert if needed */
878                 if (store_mode != load_mode) {
879                         store_value = new_r_Conv(current_ir_graph, get_nodes_block(load),
880                                                                          store_value, load_mode);
881                 }
882         }
883
884         DBG_OPT_RAW(load, store_value);
885
886         info = get_irn_link(load);
887         if (info->projs[pn_Load_M])
888                 exchange(info->projs[pn_Load_M], get_Load_mem(load));
889
890         res = 0;
891         /* no exception */
892         if (info->projs[pn_Load_X_except]) {
893                 exchange( info->projs[pn_Load_X_except], new_Bad());
894                 res |= CF_CHANGED;
895         }
896         if (info->projs[pn_Load_X_regular]) {
897                 exchange( info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
898                 res |= CF_CHANGED;
899         }
900
901         if (info->projs[pn_Load_res])
902                 exchange(info->projs[pn_Load_res], store_value);
903
904         load_ptr = get_Load_ptr(load);
905         kill_node(load);
906         reduce_adr_usage(load_ptr);
907         return res | DF_CHANGED;
908 }
909
910 /**
911  * Follow the memory chain as long as there are only Loads,
912  * alias free Stores, and constant Calls and try to replace the
913  * current Load by a previous ones.
914  * Note that in unreachable loops it might happen that we reach
915  * load again, as well as we can fall into a cycle.
916  * We break such cycles using a special visited flag.
917  *
918  * INC_MASTER() must be called before dive into
919  */
920 static unsigned follow_Mem_chain(ir_node *load, ir_node *curr) {
921         unsigned    res = 0;
922         ldst_info_t *info = get_irn_link(load);
923         ir_node     *pred;
924         ir_node     *ptr       = get_Load_ptr(load);
925         ir_node     *mem       = get_Load_mem(load);
926         ir_mode     *load_mode = get_Load_mode(load);
927
928         for (pred = curr; load != pred; ) {
929                 ldst_info_t *pred_info = get_irn_link(pred);
930
931                 /*
932                  * a Load immediately after a Store -- a read after write.
933                  * We may remove the Load, if both Load & Store does not have an
934                  * exception handler OR they are in the same MacroBlock. In the latter
935                  * case the Load cannot throw an exception when the previous Store was
936                  * quiet.
937                  *
938                  * Why we need to check for Store Exception? If the Store cannot
939                  * be executed (ROM) the exception handler might simply jump into
940                  * the load MacroBlock :-(
941                  * We could make it a little bit better if we would know that the
942                  * exception handler of the Store jumps directly to the end...
943                  */
944                 if (is_Store(pred) && ((pred_info->projs[pn_Store_X_except] == NULL
945                                 && info->projs[pn_Load_X_except] == NULL)
946                                 || get_nodes_MacroBlock(load) == get_nodes_MacroBlock(pred)))
947                 {
948                         long    load_offset;
949                         ir_node *base_ptr = get_base_and_offset(ptr, &load_offset);
950                         int     changes   = try_load_after_store(load, base_ptr, load_offset, pred);
951
952                         if (changes != 0)
953                                 return res | changes;
954                 } else if (is_Load(pred) && get_Load_ptr(pred) == ptr &&
955                            can_use_stored_value(get_Load_mode(pred), load_mode)) {
956                         /*
957                          * a Load after a Load -- a read after read.
958                          * We may remove the second Load, if it does not have an exception handler
959                          * OR they are in the same MacroBlock. In the later case the Load cannot
960                          * throw an exception when the previous Load was quiet.
961                          *
962                          * Here, there is no need to check if the previous Load has an exception
963                          * hander because they would have exact the same exception...
964                          */
965                         if (info->projs[pn_Load_X_except] == NULL || get_nodes_MacroBlock(load) == get_nodes_MacroBlock(pred)) {
966                                 ir_node *value;
967
968                                 DBG_OPT_RAR(load, pred);
969
970                                 /* the result is used */
971                                 if (info->projs[pn_Load_res]) {
972                                         if (pred_info->projs[pn_Load_res] == NULL) {
973                                                 /* create a new Proj again */
974                                                 pred_info->projs[pn_Load_res] = new_r_Proj(current_ir_graph, get_nodes_block(pred), pred, get_Load_mode(pred), pn_Load_res);
975                                         }
976                                         value = pred_info->projs[pn_Load_res];
977
978                                         /* add an convert if needed */
979                                         if (get_Load_mode(pred) != load_mode) {
980                                                 value = new_r_Conv(current_ir_graph, get_nodes_block(load), value, load_mode);
981                                         }
982
983                                         exchange(info->projs[pn_Load_res], value);
984                                 }
985
986                                 if (info->projs[pn_Load_M])
987                                         exchange(info->projs[pn_Load_M], mem);
988
989                                 /* no exception */
990                                 if (info->projs[pn_Load_X_except]) {
991                                         exchange(info->projs[pn_Load_X_except], new_Bad());
992                                         res |= CF_CHANGED;
993                                 }
994                                 if (info->projs[pn_Load_X_regular]) {
995                                         exchange( info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
996                                         res |= CF_CHANGED;
997                                 }
998
999                                 kill_node(load);
1000                                 reduce_adr_usage(ptr);
1001                                 return res |= DF_CHANGED;
1002                         }
1003                 }
1004
1005                 if (is_Store(pred)) {
1006                         /* check if we can pass through this store */
1007                         ir_alias_relation rel = get_alias_relation(
1008                                 current_ir_graph,
1009                                 get_Store_ptr(pred),
1010                                 get_irn_mode(get_Store_value(pred)),
1011                                 ptr, load_mode);
1012                         /* if the might be an alias, we cannot pass this Store */
1013                         if (rel != ir_no_alias)
1014                                 break;
1015                         pred = skip_Proj(get_Store_mem(pred));
1016                 } else if (is_Load(pred)) {
1017                         pred = skip_Proj(get_Load_mem(pred));
1018                 } else if (is_Call(pred)) {
1019                         if (is_Call_pure(pred)) {
1020                                 /* The called graph is at least pure, so there are no Store's
1021                                    in it. We can handle it like a Load and skip it. */
1022                                 pred = skip_Proj(get_Call_mem(pred));
1023                         } else {
1024                                 /* there might be Store's in the graph, stop here */
1025                                 break;
1026                         }
1027                 } else {
1028                         /* follow only Load chains */
1029                         break;
1030                 }
1031
1032                 /* check for cycles */
1033                 if (NODE_VISITED(pred_info))
1034                         break;
1035                 MARK_NODE(pred_info);
1036         }
1037
1038         if (is_Sync(pred)) {
1039                 int i;
1040
1041                 /* handle all Sync predecessors */
1042                 for (i = get_Sync_n_preds(pred) - 1; i >= 0; --i) {
1043                         res |= follow_Mem_chain(load, skip_Proj(get_Sync_pred(pred, i)));
1044                         if (res)
1045                                 return res;
1046                 }
1047         }
1048
1049         return res;
1050 }  /* follow_Mem_chain */
1051
1052 /*
1053  * Check if we can replace the load by a given const from
1054  * the const code irg.
1055  */
1056 ir_node *can_replace_load_by_const(const ir_node *load, ir_node *c) {
1057         ir_mode *c_mode = get_irn_mode(c);
1058         ir_mode *l_mode = get_Load_mode(load);
1059         ir_node *res    = NULL;
1060
1061         if (c_mode != l_mode) {
1062                 /* check, if the mode matches OR can be easily converted info */
1063                 if (is_reinterpret_cast(c_mode, l_mode)) {
1064                         /* we can safely cast */
1065                         dbg_info *dbg   = get_irn_dbg_info(load);
1066                         ir_node  *block = get_nodes_block(load);
1067
1068                         /* copy the value from the const code irg and cast it */
1069                         res = copy_const_value(dbg, c);
1070                         res = new_rd_Conv(dbg, current_ir_graph, block, res, l_mode);
1071                 }
1072         } else {
1073                 /* copy the value from the const code irg */
1074                 res = copy_const_value(get_irn_dbg_info(load), c);
1075         }
1076         return res;
1077 }  /* can_replace_load_by_const */
1078
1079 /**
1080  * optimize a Load
1081  *
1082  * @param load  the Load node
1083  */
1084 static unsigned optimize_load(ir_node *load)
1085 {
1086         ldst_info_t *info = get_irn_link(load);
1087         ir_node     *mem, *ptr, *value;
1088         ir_entity   *ent;
1089         long        dummy;
1090         unsigned    res = 0;
1091
1092         /* do NOT touch volatile loads for now */
1093         if (get_Load_volatility(load) == volatility_is_volatile)
1094                 return 0;
1095
1096         /* the address of the load to be optimized */
1097         ptr = get_Load_ptr(load);
1098
1099         /*
1100          * Check if we can remove the exception from a Load:
1101          * This can be done, if the address is from an Sel(Alloc) and
1102          * the Sel type is a subtype of the allocated type.
1103          *
1104          * This optimizes some often used OO constructs,
1105          * like x = new O; x->t;
1106          */
1107         if (info->projs[pn_Load_X_except]) {
1108                 ir_node *addr = ptr;
1109
1110                 /* find base address */
1111                 while (is_Sel(addr))
1112                         addr = get_Sel_ptr(addr);
1113                 if (is_Alloc(skip_Proj(skip_Cast(addr)))) {
1114                         /* simple case: a direct load after an Alloc. Firm Alloc throw
1115                          * an exception in case of out-of-memory. So, there is no way for an
1116                          * exception in this load.
1117                          * This code is constructed by the "exception lowering" in the Jack compiler.
1118                          */
1119                         exchange(info->projs[pn_Load_X_except], new_Bad());
1120                         info->projs[pn_Load_X_except] = NULL;
1121                         exchange(info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
1122                         info->projs[pn_Load_X_regular] = NULL;
1123                         res |= CF_CHANGED;
1124                 }
1125         }
1126
1127         /* The mem of the Load. Must still be returned after optimization. */
1128         mem = get_Load_mem(load);
1129
1130         if (! info->projs[pn_Load_res] && ! info->projs[pn_Load_X_except]) {
1131                 /* a Load which value is neither used nor exception checked, remove it */
1132                 exchange(info->projs[pn_Load_M], mem);
1133
1134                 if (info->projs[pn_Load_X_regular]) {
1135                         /* should not happen, but if it does, remove it */
1136                         exchange(info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
1137                         res |= CF_CHANGED;
1138                 }
1139                 kill_node(load);
1140                 reduce_adr_usage(ptr);
1141                 return res | DF_CHANGED;
1142         }
1143
1144         /* Load from a constant polymorphic field, where we can resolve
1145            polymorphism. */
1146         value = transform_polymorph_Load(load);
1147         if (value == load) {
1148                 value = NULL;
1149                 /* check if we can determine the entity that will be loaded */
1150                 ent = find_constant_entity(ptr);
1151                 if (ent != NULL                                     &&
1152                     allocation_static == get_entity_allocation(ent) &&
1153                     visibility_external_allocated != get_entity_visibility(ent)) {
1154                         /* a static allocation that is not external: there should be NO exception
1155                          * when loading even if we cannot replace the load itself. */
1156
1157                         /* no exception, clear the info field as it might be checked later again */
1158                         if (info->projs[pn_Load_X_except]) {
1159                                 exchange(info->projs[pn_Load_X_except], new_Bad());
1160                                 info->projs[pn_Load_X_except] = NULL;
1161                                 res |= CF_CHANGED;
1162                         }
1163                         if (info->projs[pn_Load_X_regular]) {
1164                                 exchange(info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
1165                                 info->projs[pn_Load_X_regular] = NULL;
1166                                 res |= CF_CHANGED;
1167                         }
1168
1169                         if (variability_constant == get_entity_variability(ent)) {
1170                                 if (is_atomic_entity(ent)) {
1171                                         /* Might not be atomic after lowering of Sels.  In this case we
1172                                          * could also load, but it's more complicated. */
1173                                         /* more simpler case: we load the content of a constant value:
1174                                          * replace it by the constant itself */
1175                                         value = get_atomic_ent_value(ent);
1176                                 } else if (ent->has_initializer) {
1177                                         /* new style initializer */
1178                                         value = find_compound_ent_value(ptr);
1179                                 } else {
1180                                         /* old style initializer */
1181                                         compound_graph_path *path = get_accessed_path(ptr);
1182
1183                                         if (path != NULL) {
1184                                                 assert(is_proper_compound_graph_path(path, get_compound_graph_path_length(path)-1));
1185
1186                                                 value = get_compound_ent_value_by_path(ent, path);
1187                                                 DB((dbg, LEVEL_1, "  Constant access at %F%F resulted in %+F\n", ent, path, value));
1188                                                 free_compound_graph_path(path);
1189                                         }
1190                                 }
1191                                 if (value != NULL)
1192                                         value = can_replace_load_by_const(load, value);
1193                         }
1194                 }
1195         }
1196         if (value != NULL) {
1197                 /* we completely replace the load by this value */
1198                 if (info->projs[pn_Load_X_except]) {
1199                         exchange(info->projs[pn_Load_X_except], new_Bad());
1200                         info->projs[pn_Load_X_except] = NULL;
1201                         res |= CF_CHANGED;
1202                 }
1203                 if (info->projs[pn_Load_X_regular]) {
1204                         exchange(info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
1205                         info->projs[pn_Load_X_regular] = NULL;
1206                         res |= CF_CHANGED;
1207                 }
1208                 if (info->projs[pn_Load_M]) {
1209                         exchange(info->projs[pn_Load_M], mem);
1210                         res |= DF_CHANGED;
1211                 }
1212                 if (info->projs[pn_Load_res]) {
1213                         exchange(info->projs[pn_Load_res], value);
1214                         res |= DF_CHANGED;
1215                 }
1216                 kill_node(load);
1217                 reduce_adr_usage(ptr);
1218                 return res;
1219         }
1220
1221         /* Check, if the address of this load is used more than once.
1222          * If not, more load cannot be removed in any case. */
1223         if (get_irn_n_uses(ptr) <= 1 && get_irn_n_uses(get_base_and_offset(ptr, &dummy)) <= 1)
1224                 return res;
1225
1226         /*
1227          * follow the memory chain as long as there are only Loads
1228          * and try to replace current Load or Store by a previous one.
1229          * Note that in unreachable loops it might happen that we reach
1230          * load again, as well as we can fall into a cycle.
1231          * We break such cycles using a special visited flag.
1232          */
1233         INC_MASTER();
1234         res = follow_Mem_chain(load, skip_Proj(mem));
1235         return res;
1236 }  /* optimize_load */
1237
1238 /**
1239  * Check whether a value of mode new_mode would completely overwrite a value
1240  * of mode old_mode in memory.
1241  */
1242 static int is_completely_overwritten(ir_mode *old_mode, ir_mode *new_mode)
1243 {
1244         return get_mode_size_bits(new_mode) >= get_mode_size_bits(old_mode);
1245 }  /* is_completely_overwritten */
1246
1247 /**
1248  * Check whether small is a part of large (starting at same address).
1249  */
1250 static int is_partially_same(ir_node *small, ir_node *large)
1251 {
1252         ir_mode *sm = get_irn_mode(small);
1253         ir_mode *lm = get_irn_mode(large);
1254
1255         /* FIXME: Check endianness */
1256         return is_Conv(small) && get_Conv_op(small) == large
1257             && get_mode_size_bytes(sm) < get_mode_size_bytes(lm)
1258             && get_mode_arithmetic(sm) == irma_twos_complement
1259             && get_mode_arithmetic(lm) == irma_twos_complement;
1260 }  /* is_partially_same */
1261
1262 /**
1263  * follow the memory chain as long as there are only Loads and alias free Stores.
1264  *
1265  * INC_MASTER() must be called before dive into
1266  */
1267 static unsigned follow_Mem_chain_for_Store(ir_node *store, ir_node *curr) {
1268         unsigned res = 0;
1269         ldst_info_t *info = get_irn_link(store);
1270         ir_node *pred;
1271         ir_node *ptr = get_Store_ptr(store);
1272         ir_node *mem = get_Store_mem(store);
1273         ir_node *value = get_Store_value(store);
1274         ir_mode *mode  = get_irn_mode(value);
1275         ir_node *block = get_nodes_block(store);
1276         ir_node *mblk  = get_Block_MacroBlock(block);
1277
1278         for (pred = curr; pred != store;) {
1279                 ldst_info_t *pred_info = get_irn_link(pred);
1280
1281                 /*
1282                  * BEWARE: one might think that checking the modes is useless, because
1283                  * if the pointers are identical, they refer to the same object.
1284                  * This is only true in strong typed languages, not is C were the following
1285                  * is possible *(ir_type1 *)p = a; *(ir_type2 *)p = b ...
1286                  * However, if the size of the mode that is written is bigger or equal the
1287                  * size of the old one, the old value is completely overwritten and can be
1288                  * killed ...
1289                  */
1290                 if (is_Store(pred) && get_Store_ptr(pred) == ptr &&
1291             get_nodes_MacroBlock(pred) == mblk) {
1292                         /*
1293                          * a Store after a Store in the same MacroBlock -- a write after write.
1294                          */
1295
1296                         /*
1297                          * We may remove the first Store, if the old value is completely
1298                          * overwritten or the old value is a part of the new value,
1299                          * and if it does not have an exception handler.
1300                          *
1301                          * TODO: What, if both have the same exception handler ???
1302                          */
1303                         if (get_Store_volatility(pred) != volatility_is_volatile
1304                                 && !pred_info->projs[pn_Store_X_except]) {
1305                                 ir_node *predvalue = get_Store_value(pred);
1306                                 ir_mode *predmode  = get_irn_mode(predvalue);
1307
1308                                 if(is_completely_overwritten(predmode, mode)
1309                                         || is_partially_same(predvalue, value)) {
1310                                         DBG_OPT_WAW(pred, store);
1311                                         exchange(pred_info->projs[pn_Store_M], get_Store_mem(pred));
1312                                         kill_node(pred);
1313                                         reduce_adr_usage(ptr);
1314                                         return DF_CHANGED;
1315                                 }
1316                         }
1317
1318                         /*
1319                          * We may remove the Store, if the old value already contains
1320                          * the new value, and if it does not have an exception handler.
1321                          *
1322                          * TODO: What, if both have the same exception handler ???
1323                          */
1324                         if (get_Store_volatility(store) != volatility_is_volatile
1325                                 && !info->projs[pn_Store_X_except]) {
1326                                 ir_node *predvalue = get_Store_value(pred);
1327
1328                                 if(is_partially_same(value, predvalue)) {
1329                                         DBG_OPT_WAW(pred, store);
1330                                         exchange(info->projs[pn_Store_M], mem);
1331                                         kill_node(store);
1332                                         reduce_adr_usage(ptr);
1333                                         return DF_CHANGED;
1334                                 }
1335                         }
1336                 } else if (is_Load(pred) && get_Load_ptr(pred) == ptr &&
1337                            value == pred_info->projs[pn_Load_res]) {
1338                         /*
1339                          * a Store of a value just loaded from the same address
1340                          * -- a write after read.
1341                          * We may remove the Store, if it does not have an exception
1342                          * handler.
1343                          */
1344                         if (! info->projs[pn_Store_X_except]) {
1345                                 DBG_OPT_WAR(store, pred);
1346                                 exchange(info->projs[pn_Store_M], mem);
1347                                 kill_node(store);
1348                                 reduce_adr_usage(ptr);
1349                                 return DF_CHANGED;
1350                         }
1351                 }
1352
1353                 if (is_Store(pred)) {
1354                         /* check if we can pass through this store */
1355                         ir_alias_relation rel = get_alias_relation(
1356                                 current_ir_graph,
1357                                 get_Store_ptr(pred),
1358                                 get_irn_mode(get_Store_value(pred)),
1359                                 ptr, mode);
1360                         /* if the might be an alias, we cannot pass this Store */
1361                         if (rel != ir_no_alias)
1362                                 break;
1363                         pred = skip_Proj(get_Store_mem(pred));
1364                 } else if (is_Load(pred)) {
1365                         ir_alias_relation rel = get_alias_relation(
1366                                 current_ir_graph, get_Load_ptr(pred), get_Load_mode(pred),
1367                                 ptr, mode);
1368                         if (rel != ir_no_alias)
1369                                 break;
1370
1371                         pred = skip_Proj(get_Load_mem(pred));
1372                 } else {
1373                         /* follow only Load chains */
1374                         break;
1375                 }
1376
1377                 /* check for cycles */
1378                 if (NODE_VISITED(pred_info))
1379                         break;
1380                 MARK_NODE(pred_info);
1381         }
1382
1383         if (is_Sync(pred)) {
1384                 int i;
1385
1386                 /* handle all Sync predecessors */
1387                 for (i = get_Sync_n_preds(pred) - 1; i >= 0; --i) {
1388                         res |= follow_Mem_chain_for_Store(store, skip_Proj(get_Sync_pred(pred, i)));
1389                         if (res)
1390                                 break;
1391                 }
1392         }
1393         return res;
1394 }  /* follow_Mem_chain_for_Store */
1395
1396 /** find entity used as base for an address calculation */
1397 static ir_entity *find_entity(ir_node *ptr)
1398 {
1399         switch(get_irn_opcode(ptr)) {
1400         case iro_SymConst:
1401                 return get_SymConst_entity(ptr);
1402         case iro_Sel: {
1403                 ir_node *pred = get_Sel_ptr(ptr);
1404                 if (get_irg_frame(get_irn_irg(ptr)) == pred)
1405                         return get_Sel_entity(ptr);
1406
1407                 return find_entity(pred);
1408         }
1409         case iro_Sub:
1410         case iro_Add: {
1411                 ir_node *left = get_binop_left(ptr);
1412                 ir_node *right;
1413                 if (mode_is_reference(get_irn_mode(left)))
1414                         return find_entity(left);
1415                 right = get_binop_right(ptr);
1416                 if (mode_is_reference(get_irn_mode(right)))
1417                         return find_entity(right);
1418                 return NULL;
1419         }
1420         default:
1421                 return NULL;
1422         }
1423 }
1424
1425 /**
1426  * optimize a Store
1427  *
1428  * @param store  the Store node
1429  */
1430 static unsigned optimize_store(ir_node *store) {
1431         ir_node   *ptr;
1432         ir_node   *mem;
1433         ir_entity *entity;
1434
1435         if (get_Store_volatility(store) == volatility_is_volatile)
1436                 return 0;
1437
1438         ptr    = get_Store_ptr(store);
1439         entity = find_entity(ptr);
1440
1441         /* a store to an entity which is never read is unnecessary */
1442         if (entity != NULL && !(get_entity_usage(entity) & ir_usage_read)) {
1443                 ldst_info_t *info = get_irn_link(store);
1444                 if (info->projs[pn_Store_X_except] == NULL) {
1445                         exchange(info->projs[pn_Store_M], get_Store_mem(store));
1446                         kill_node(store);
1447                         reduce_adr_usage(ptr);
1448                         return DF_CHANGED;
1449                 }
1450         }
1451
1452         /* Check, if the address of this Store is used more than once.
1453          * If not, this Store cannot be removed in any case. */
1454         if (get_irn_n_uses(ptr) <= 1)
1455                 return 0;
1456
1457         mem = get_Store_mem(store);
1458
1459         /* follow the memory chain as long as there are only Loads */
1460         INC_MASTER();
1461
1462         return follow_Mem_chain_for_Store(store, skip_Proj(mem));
1463 }  /* optimize_store */
1464
1465 /**
1466  * walker, optimizes Phi after Stores to identical places:
1467  * Does the following optimization:
1468  * @verbatim
1469  *
1470  *   val1   val2   val3          val1  val2  val3
1471  *    |      |      |               \    |    /
1472  *  Store  Store  Store              \   |   /
1473  *      \    |    /                   PhiData
1474  *       \   |   /                       |
1475  *        \  |  /                      Store
1476  *          PhiM
1477  *
1478  * @endverbatim
1479  * This reduces the number of stores and allows for predicated execution.
1480  * Moves Stores back to the end of a function which may be bad.
1481  *
1482  * This is only possible if the predecessor blocks have only one successor.
1483  */
1484 static unsigned optimize_phi(ir_node *phi, walk_env_t *wenv)
1485 {
1486         int i, n;
1487         ir_node *store, *old_store, *ptr, *block, *phi_block, *phiM, *phiD, *exc, *projM;
1488         ir_mode *mode;
1489         ir_node **inM, **inD, **projMs;
1490         int *idx;
1491         dbg_info *db = NULL;
1492         ldst_info_t *info;
1493         block_info_t *bl_info;
1494         unsigned res = 0;
1495
1496         /* Must be a memory Phi */
1497         if (get_irn_mode(phi) != mode_M)
1498                 return 0;
1499
1500         n = get_Phi_n_preds(phi);
1501         if (n <= 0)
1502                 return 0;
1503
1504         /* must be only one user */
1505         projM = get_Phi_pred(phi, 0);
1506         if (get_irn_n_edges(projM) != 1)
1507                 return 0;
1508
1509         store = skip_Proj(projM);
1510         old_store = store;
1511         if (!is_Store(store))
1512                 return 0;
1513
1514         block = get_nodes_block(store);
1515
1516         /* abort on dead blocks */
1517         if (is_Block_dead(block))
1518                 return 0;
1519
1520         /* check if the block is post dominated by Phi-block
1521            and has no exception exit */
1522         bl_info = get_irn_link(block);
1523         if (bl_info->flags & BLOCK_HAS_EXC)
1524                 return 0;
1525
1526         phi_block = get_nodes_block(phi);
1527         if (! block_strictly_postdominates(phi_block, block))
1528                 return 0;
1529
1530         /* this is the address of the store */
1531         ptr  = get_Store_ptr(store);
1532         mode = get_irn_mode(get_Store_value(store));
1533         info = get_irn_link(store);
1534         exc  = info->exc_block;
1535
1536         for (i = 1; i < n; ++i) {
1537                 ir_node *pred = get_Phi_pred(phi, i);
1538
1539                 if (get_irn_n_edges(pred) != 1)
1540                         return 0;
1541
1542                 pred = skip_Proj(pred);
1543                 if (!is_Store(pred))
1544                         return 0;
1545
1546                 if (ptr != get_Store_ptr(pred) || mode != get_irn_mode(get_Store_value(pred)))
1547                         return 0;
1548
1549                 info = get_irn_link(pred);
1550
1551                 /* check, if all stores have the same exception flow */
1552                 if (exc != info->exc_block)
1553                         return 0;
1554
1555                 /* abort on dead blocks */
1556                 block = get_nodes_block(pred);
1557                 if (is_Block_dead(block))
1558                         return 0;
1559
1560                 /* check if the block is post dominated by Phi-block
1561                    and has no exception exit. Note that block must be different from
1562                    Phi-block, else we would move a Store from end End of a block to its
1563                    Start... */
1564                 bl_info = get_irn_link(block);
1565                 if (bl_info->flags & BLOCK_HAS_EXC)
1566                         return 0;
1567                 if (block == phi_block || ! block_postdominates(phi_block, block))
1568                         return 0;
1569         }
1570
1571         /*
1572          * ok, when we are here, we found all predecessors of a Phi that
1573          * are Stores to the same address and size. That means whatever
1574          * we do before we enter the block of the Phi, we do a Store.
1575          * So, we can move the Store to the current block:
1576          *
1577          *   val1    val2    val3          val1  val2  val3
1578          *    |       |       |               \    |    /
1579          * | Str | | Str | | Str |             \   |   /
1580          *      \     |     /                   PhiData
1581          *       \    |    /                       |
1582          *        \   |   /                       Str
1583          *           PhiM
1584          *
1585          * Is only allowed if the predecessor blocks have only one successor.
1586          */
1587
1588         NEW_ARR_A(ir_node *, projMs, n);
1589         NEW_ARR_A(ir_node *, inM, n);
1590         NEW_ARR_A(ir_node *, inD, n);
1591         NEW_ARR_A(int, idx, n);
1592
1593         /* Prepare: Collect all Store nodes.  We must do this
1594            first because we otherwise may loose a store when exchanging its
1595            memory Proj.
1596          */
1597         for (i = n - 1; i >= 0; --i) {
1598                 ir_node *store;
1599
1600                 projMs[i] = get_Phi_pred(phi, i);
1601                 assert(is_Proj(projMs[i]));
1602
1603                 store = get_Proj_pred(projMs[i]);
1604                 info  = get_irn_link(store);
1605
1606                 inM[i] = get_Store_mem(store);
1607                 inD[i] = get_Store_value(store);
1608                 idx[i] = info->exc_idx;
1609         }
1610         block = get_nodes_block(phi);
1611
1612         /* second step: create a new memory Phi */
1613         phiM = new_rd_Phi(get_irn_dbg_info(phi), current_ir_graph, block, n, inM, mode_M);
1614
1615         /* third step: create a new data Phi */
1616         phiD = new_rd_Phi(get_irn_dbg_info(phi), current_ir_graph, block, n, inD, mode);
1617
1618         /* rewire memory and kill the node */
1619         for (i = n - 1; i >= 0; --i) {
1620                 ir_node *proj  = projMs[i];
1621
1622                 if(is_Proj(proj)) {
1623                         ir_node *store = get_Proj_pred(proj);
1624                         exchange(proj, inM[i]);
1625                         kill_node(store);
1626                 }
1627         }
1628
1629         /* fourth step: create the Store */
1630         store = new_rd_Store(db, current_ir_graph, block, phiM, ptr, phiD);
1631 #ifdef DO_CACHEOPT
1632         co_set_irn_name(store, co_get_irn_ident(old_store));
1633 #endif
1634
1635         projM = new_rd_Proj(NULL, current_ir_graph, block, store, mode_M, pn_Store_M);
1636
1637         info = get_ldst_info(store, &wenv->obst);
1638         info->projs[pn_Store_M] = projM;
1639
1640         /* fifths step: repair exception flow */
1641         if (exc) {
1642                 ir_node *projX = new_rd_Proj(NULL, current_ir_graph, block, store, mode_X, pn_Store_X_except);
1643
1644                 info->projs[pn_Store_X_except] = projX;
1645                 info->exc_block                = exc;
1646                 info->exc_idx                  = idx[0];
1647
1648                 for (i = 0; i < n; ++i) {
1649                         set_Block_cfgpred(exc, idx[i], projX);
1650                 }
1651
1652                 if (n > 1) {
1653                         /* the exception block should be optimized as some inputs are identical now */
1654                 }
1655
1656                 res |= CF_CHANGED;
1657         }
1658
1659         /* sixth step: replace old Phi */
1660         exchange(phi, projM);
1661
1662         return res | DF_CHANGED;
1663 }  /* optimize_phi */
1664
1665 /**
1666  * walker, do the optimizations
1667  */
1668 static void do_load_store_optimize(ir_node *n, void *env) {
1669         walk_env_t *wenv = env;
1670
1671         switch (get_irn_opcode(n)) {
1672
1673         case iro_Load:
1674                 wenv->changes |= optimize_load(n);
1675                 break;
1676
1677         case iro_Store:
1678                 wenv->changes |= optimize_store(n);
1679                 break;
1680
1681         case iro_Phi:
1682                 wenv->changes |= optimize_phi(n, wenv);
1683                 break;
1684
1685         default:
1686                 ;
1687         }
1688 }  /* do_load_store_optimize */
1689
1690 /** A scc. */
1691 typedef struct scc {
1692         ir_node *head;          /**< the head of the list */
1693 } scc;
1694
1695 /** A node entry. */
1696 typedef struct node_entry {
1697         unsigned DFSnum;    /**< the DFS number of this node */
1698         unsigned low;       /**< the low number of this node */
1699         ir_node  *header;   /**< the header of this node */
1700         int      in_stack;  /**< flag, set if the node is on the stack */
1701         ir_node  *next;     /**< link to the next node the the same scc */
1702         scc      *pscc;     /**< the scc of this node */
1703         unsigned POnum;     /**< the post order number for blocks */
1704 } node_entry;
1705
1706 /** A loop entry. */
1707 typedef struct loop_env {
1708         ir_phase ph;           /**< the phase object */
1709         ir_node  **stack;      /**< the node stack */
1710         int      tos;          /**< tos index */
1711         unsigned nextDFSnum;   /**< the current DFS number */
1712         unsigned POnum;        /**< current post order number */
1713
1714         unsigned changes;      /**< a bitmask of graph changes */
1715 } loop_env;
1716
1717 /**
1718 * Gets the node_entry of a node
1719 */
1720 static node_entry *get_irn_ne(ir_node *irn, loop_env *env) {
1721         ir_phase   *ph = &env->ph;
1722         node_entry *e  = phase_get_irn_data(&env->ph, irn);
1723
1724         if (! e) {
1725                 e = phase_alloc(ph, sizeof(*e));
1726                 memset(e, 0, sizeof(*e));
1727                 phase_set_irn_data(ph, irn, e);
1728         }
1729         return e;
1730 }  /* get_irn_ne */
1731
1732 /**
1733  * Push a node onto the stack.
1734  *
1735  * @param env   the loop environment
1736  * @param n     the node to push
1737  */
1738 static void push(loop_env *env, ir_node *n) {
1739         node_entry *e;
1740
1741         if (env->tos == ARR_LEN(env->stack)) {
1742                 int nlen = ARR_LEN(env->stack) * 2;
1743                 ARR_RESIZE(ir_node *, env->stack, nlen);
1744         }
1745         env->stack[env->tos++] = n;
1746         e = get_irn_ne(n, env);
1747         e->in_stack = 1;
1748 }  /* push */
1749
1750 /**
1751  * pop a node from the stack
1752  *
1753  * @param env   the loop environment
1754  *
1755  * @return  The topmost node
1756  */
1757 static ir_node *pop(loop_env *env) {
1758         ir_node *n = env->stack[--env->tos];
1759         node_entry *e = get_irn_ne(n, env);
1760
1761         e->in_stack = 0;
1762         return n;
1763 }  /* pop */
1764
1765 /**
1766  * Check if irn is a region constant.
1767  * The block or irn must strictly dominate the header block.
1768  *
1769  * @param irn           the node to check
1770  * @param header_block  the header block of the induction variable
1771  */
1772 static int is_rc(ir_node *irn, ir_node *header_block) {
1773         ir_node *block = get_nodes_block(irn);
1774
1775         return (block != header_block) && block_dominates(block, header_block);
1776 }  /* is_rc */
1777
1778 typedef struct phi_entry phi_entry;
1779 struct phi_entry {
1780         ir_node   *phi;    /**< A phi with a region const memory. */
1781         int       pos;     /**< The position of the region const memory */
1782         ir_node   *load;   /**< the newly created load for this phi */
1783         phi_entry *next;
1784 };
1785
1786 /**
1787  * Move loops out of loops if possible.
1788  *
1789  * @param pscc   the loop described by an SCC
1790  * @param env    the loop environment
1791  */
1792 static void move_loads_out_of_loops(scc *pscc, loop_env *env) {
1793         ir_node   *phi, *load, *next, *other, *next_other;
1794         ir_entity *ent;
1795         int       j;
1796         phi_entry *phi_list = NULL;
1797
1798         /* collect all outer memories */
1799         for (phi = pscc->head; phi != NULL; phi = next) {
1800                 node_entry *ne = get_irn_ne(phi, env);
1801                 next = ne->next;
1802
1803                 /* check all memory Phi's */
1804                 if (! is_Phi(phi))
1805                         continue;
1806
1807                 assert(get_irn_mode(phi) == mode_M && "DFS geturn non-memory Phi");
1808
1809                 for (j = get_irn_arity(phi) - 1; j >= 0; --j) {
1810                         ir_node    *pred = get_irn_n(phi, j);
1811                         node_entry *pe   = get_irn_ne(pred, env);
1812
1813                         if (pe->pscc != ne->pscc) {
1814                                 /* not in the same SCC, is region const */
1815                                 phi_entry *pe = phase_alloc(&env->ph, sizeof(*pe));
1816
1817                                 pe->phi  = phi;
1818                                 pe->pos  = j;
1819                                 pe->next = phi_list;
1820                                 phi_list = pe;
1821                         }
1822                 }
1823         }
1824         /* no Phis no fun */
1825         assert(phi_list != NULL && "DFS found a loop without Phi");
1826
1827         for (load = pscc->head; load; load = next) {
1828                 ir_mode *load_mode;
1829                 node_entry *ne = get_irn_ne(load, env);
1830                 next = ne->next;
1831
1832                 if (is_Load(load)) {
1833                         ldst_info_t *info = get_irn_link(load);
1834                         ir_node     *ptr = get_Load_ptr(load);
1835
1836                         /* for now, we cannot handle Loads with exceptions */
1837                         if (info->projs[pn_Load_res] == NULL || info->projs[pn_Load_X_regular] != NULL || info->projs[pn_Load_X_except] != NULL)
1838                                 continue;
1839
1840                         /* for now, we can only handle Load(Global) */
1841                         if (! is_Global(ptr))
1842                                 continue;
1843                         ent = get_Global_entity(ptr);
1844                         load_mode = get_Load_mode(load);
1845                         for (other = pscc->head; other != NULL; other = next_other) {
1846                                 node_entry *ne = get_irn_ne(other, env);
1847                                 next_other = ne->next;
1848
1849                                 if (is_Store(other)) {
1850                                         ir_alias_relation rel = get_alias_relation(
1851                                                 current_ir_graph,
1852                                                 get_Store_ptr(other),
1853                                                 get_irn_mode(get_Store_value(other)),
1854                                                 ptr, load_mode);
1855                                         /* if the might be an alias, we cannot pass this Store */
1856                                         if (rel != ir_no_alias)
1857                                                 break;
1858                                 }
1859                                 /* only pure Calls are allowed here, so ignore them */
1860                         }
1861                         if (other == NULL) {
1862                                 ldst_info_t *ninfo;
1863                                 phi_entry   *pe;
1864                                 dbg_info    *db;
1865
1866                                 /* for now, we cannot handle more than one input */
1867                                 if (phi_list->next != NULL)
1868                                         return;
1869
1870                                 /* yep, no aliasing Store found, Load can be moved */
1871                                 DB((dbg, LEVEL_1, "  Found a Load that could be moved: %+F\n", load));
1872
1873                                 db   = get_irn_dbg_info(load);
1874                                 for (pe = phi_list; pe != NULL; pe = pe->next) {
1875                                         int     pos   = pe->pos;
1876                                         ir_node *phi  = pe->phi;
1877                                         ir_node *blk  = get_nodes_block(phi);
1878                                         ir_node *pred = get_Block_cfgpred_block(blk, pos);
1879                                         ir_node *irn, *mem;
1880
1881                                         pe->load = irn = new_rd_Load(db, current_ir_graph, pred, get_Phi_pred(phi, pos), ptr, load_mode);
1882                                         ninfo = get_ldst_info(irn, phase_obst(&env->ph));
1883
1884                                         ninfo->projs[pn_Load_M] = mem = new_r_Proj(current_ir_graph, pred, irn, mode_M, pn_Load_M);
1885                                         set_Phi_pred(phi, pos, mem);
1886
1887                                         ninfo->projs[pn_Load_res] = new_r_Proj(current_ir_graph, pred, irn, load_mode, pn_Load_res);
1888
1889                                         DB((dbg, LEVEL_1, "  Created %+F in %+F\n", irn, pred));
1890                                 }
1891
1892                                 /* now kill the old Load */
1893                                 exchange(info->projs[pn_Load_M], get_Load_mem(load));
1894                                 exchange(info->projs[pn_Load_res], ninfo->projs[pn_Load_res]);
1895
1896                                 env->changes |= DF_CHANGED;
1897                         }
1898                 }
1899         }
1900 }  /* move_loads_out_of_loops */
1901
1902 /**
1903  * Process a loop SCC.
1904  *
1905  * @param pscc  the SCC
1906  * @param env   the loop environment
1907  */
1908 static void process_loop(scc *pscc, loop_env *env) {
1909         ir_node *irn, *next, *header = NULL;
1910         node_entry *b, *h = NULL;
1911         int j, only_phi, num_outside, process = 0;
1912         ir_node *out_rc;
1913
1914         /* find the header block for this scc */
1915         for (irn = pscc->head; irn; irn = next) {
1916                 node_entry *e = get_irn_ne(irn, env);
1917                 ir_node *block = get_nodes_block(irn);
1918
1919                 next = e->next;
1920                 b = get_irn_ne(block, env);
1921
1922                 if (header) {
1923                         if (h->POnum < b->POnum) {
1924                                 header = block;
1925                                 h      = b;
1926                         }
1927                 }
1928                 else {
1929                         header = block;
1930                         h      = b;
1931                 }
1932         }
1933
1934         /* check if this scc contains only Phi, Loads or Stores nodes */
1935         only_phi    = 1;
1936         num_outside = 0;
1937         out_rc      = NULL;
1938         for (irn = pscc->head; irn; irn = next) {
1939                 node_entry *e = get_irn_ne(irn, env);
1940
1941                 next = e->next;
1942                 switch (get_irn_opcode(irn)) {
1943                 case iro_Call:
1944                         if (is_Call_pure(irn)) {
1945                                 /* pure calls can be treated like loads */
1946                                 only_phi = 0;
1947                                 break;
1948                         }
1949                         /* non-pure calls must be handle like may-alias Stores */
1950                         goto fail;
1951                 case iro_CopyB:
1952                         /* cannot handle CopyB yet */
1953                         goto fail;
1954                 case iro_Load:
1955                         process = 1;
1956                         if (get_Load_volatility(irn) == volatility_is_volatile) {
1957                                 /* cannot handle loops with volatile Loads */
1958                                 goto fail;
1959                         }
1960                         only_phi = 0;
1961                         break;
1962                 case iro_Store:
1963                         if (get_Store_volatility(irn) == volatility_is_volatile) {
1964                                 /* cannot handle loops with volatile Stores */
1965                                 goto fail;
1966                         }
1967                         only_phi = 0;
1968                         break;
1969                 default:
1970                         only_phi = 0;
1971                         break;
1972                 case iro_Phi:
1973                         for (j = get_irn_arity(irn) - 1; j >= 0; --j) {
1974                                 ir_node *pred  = get_irn_n(irn, j);
1975                                 node_entry *pe = get_irn_ne(pred, env);
1976
1977                                 if (pe->pscc != e->pscc) {
1978                                         /* not in the same SCC, must be a region const */
1979                                         if (! is_rc(pred, header)) {
1980                                                 /* not a memory loop */
1981                                                 goto fail;
1982                                         }
1983                                         if (! out_rc) {
1984                                                 out_rc = pred;
1985                                                 ++num_outside;
1986                                         } else if (out_rc != pred) {
1987                                                 ++num_outside;
1988                                         }
1989                                 }
1990                         }
1991                         break;
1992                 }
1993         }
1994         if (! process)
1995                 goto fail;
1996
1997         /* found a memory loop */
1998         DB((dbg, LEVEL_2, "  Found a memory loop:\n  "));
1999         if (only_phi && num_outside == 1) {
2000                 /* a phi cycle with only one real predecessor can be collapsed */
2001                 DB((dbg, LEVEL_2, "  Found an USELESS Phi cycle:\n  "));
2002
2003                 for (irn = pscc->head; irn; irn = next) {
2004                         node_entry *e = get_irn_ne(irn, env);
2005                         next = e->next;
2006                         e->header = NULL;
2007                         exchange(irn, out_rc);
2008                 }
2009                 env->changes |= DF_CHANGED;
2010                 return;
2011         }
2012
2013         /* set the header for every node in this scc */
2014         for (irn = pscc->head; irn; irn = next) {
2015                 node_entry *e = get_irn_ne(irn, env);
2016                 e->header = header;
2017                 next = e->next;
2018                 DB((dbg, LEVEL_2, " %+F,", irn));
2019         }
2020         DB((dbg, LEVEL_2, "\n"));
2021
2022         move_loads_out_of_loops(pscc, env);
2023
2024 fail:
2025         ;
2026 }  /* process_loop */
2027
2028 /**
2029  * Process a SCC.
2030  *
2031  * @param pscc  the SCC
2032  * @param env   the loop environment
2033  */
2034 static void process_scc(scc *pscc, loop_env *env) {
2035         ir_node *head = pscc->head;
2036         node_entry *e = get_irn_ne(head, env);
2037
2038 #ifdef DEBUG_libfirm
2039         {
2040                 ir_node *irn, *next;
2041
2042                 DB((dbg, LEVEL_4, " SCC at %p:\n ", pscc));
2043                 for (irn = pscc->head; irn; irn = next) {
2044                         node_entry *e = get_irn_ne(irn, env);
2045
2046                         next = e->next;
2047
2048                         DB((dbg, LEVEL_4, " %+F,", irn));
2049                 }
2050                 DB((dbg, LEVEL_4, "\n"));
2051         }
2052 #endif
2053
2054         if (e->next != NULL) {
2055                 /* this SCC has more than one member */
2056                 process_loop(pscc, env);
2057         }
2058 }  /* process_scc */
2059
2060 /**
2061  * Do Tarjan's SCC algorithm and drive load/store optimization.
2062  *
2063  * @param irn  start at this node
2064  * @param env  the loop environment
2065  */
2066 static void dfs(ir_node *irn, loop_env *env)
2067 {
2068         int i, n;
2069         node_entry *node = get_irn_ne(irn, env);
2070
2071         mark_irn_visited(irn);
2072
2073         node->DFSnum = env->nextDFSnum++;
2074         node->low    = node->DFSnum;
2075         push(env, irn);
2076
2077         /* handle preds */
2078         if (is_Phi(irn) || is_Sync(irn)) {
2079                 n = get_irn_arity(irn);
2080                 for (i = 0; i < n; ++i) {
2081                         ir_node *pred = get_irn_n(irn, i);
2082                         node_entry *o = get_irn_ne(pred, env);
2083
2084                         if (!irn_visited(pred)) {
2085                                 dfs(pred, env);
2086                                 node->low = MIN(node->low, o->low);
2087                         }
2088                         if (o->DFSnum < node->DFSnum && o->in_stack)
2089                                 node->low = MIN(o->DFSnum, node->low);
2090                 }
2091         } else if (is_fragile_op(irn)) {
2092                 ir_node *pred = get_fragile_op_mem(irn);
2093                 node_entry *o = get_irn_ne(pred, env);
2094
2095                 if (!irn_visited(pred)) {
2096                         dfs(pred, env);
2097                         node->low = MIN(node->low, o->low);
2098                 }
2099                 if (o->DFSnum < node->DFSnum && o->in_stack)
2100                         node->low = MIN(o->DFSnum, node->low);
2101         } else if (is_Proj(irn)) {
2102                 ir_node *pred = get_Proj_pred(irn);
2103                 node_entry *o = get_irn_ne(pred, env);
2104
2105                 if (!irn_visited(pred)) {
2106                         dfs(pred, env);
2107                         node->low = MIN(node->low, o->low);
2108                 }
2109                 if (o->DFSnum < node->DFSnum && o->in_stack)
2110                         node->low = MIN(o->DFSnum, node->low);
2111         }
2112         else {
2113                  /* IGNORE predecessors */
2114         }
2115
2116         if (node->low == node->DFSnum) {
2117                 scc *pscc = phase_alloc(&env->ph, sizeof(*pscc));
2118                 ir_node *x;
2119
2120                 pscc->head = NULL;
2121                 do {
2122                         node_entry *e;
2123
2124                         x = pop(env);
2125                         e = get_irn_ne(x, env);
2126                         e->pscc    = pscc;
2127                         e->next    = pscc->head;
2128                         pscc->head = x;
2129                 } while (x != irn);
2130
2131                 process_scc(pscc, env);
2132         }
2133 }  /* dfs */
2134
2135 /**
2136  * Do the DFS on the memory edges a graph.
2137  *
2138  * @param irg  the graph to process
2139  * @param env  the loop environment
2140  */
2141 static void do_dfs(ir_graph *irg, loop_env *env) {
2142         ir_graph *rem = current_ir_graph;
2143         ir_node  *endblk, *end;
2144         int      i;
2145
2146         current_ir_graph = irg;
2147         inc_irg_visited(irg);
2148
2149         /* visit all memory nodes */
2150         endblk = get_irg_end_block(irg);
2151         for (i = get_Block_n_cfgpreds(endblk) - 1; i >= 0; --i) {
2152                 ir_node *pred = get_Block_cfgpred(endblk, i);
2153
2154                 pred = skip_Proj(pred);
2155                 if (is_Return(pred))
2156                         dfs(get_Return_mem(pred), env);
2157                 else if (is_Raise(pred))
2158                         dfs(get_Raise_mem(pred), env);
2159                 else if (is_fragile_op(pred))
2160                         dfs(get_fragile_op_mem(pred), env);
2161                 else {
2162                         assert(0 && "Unknown EndBlock predecessor");
2163                 }
2164         }
2165
2166         /* visit the keep-alives */
2167         end = get_irg_end(irg);
2168         for (i = get_End_n_keepalives(end) - 1; i >= 0; --i) {
2169                 ir_node *ka = get_End_keepalive(end, i);
2170
2171                 if (is_Phi(ka) && !irn_visited(ka))
2172                         dfs(ka, env);
2173         }
2174         current_ir_graph = rem;
2175 }  /* do_dfs */
2176
2177 /**
2178  * Initialize new phase data. We do this always explicit, so return NULL here
2179  */
2180 static void *init_loop_data(ir_phase *ph, const ir_node *irn, void *data) {
2181         (void)ph;
2182         (void)irn;
2183         (void)data;
2184         return NULL;
2185 }  /* init_loop_data */
2186
2187 /**
2188  * Optimize Loads/Stores in loops.
2189  *
2190  * @param irg  the graph
2191  */
2192 static int optimize_loops(ir_graph *irg) {
2193         loop_env env;
2194
2195         env.stack         = NEW_ARR_F(ir_node *, 128);
2196         env.tos           = 0;
2197         env.nextDFSnum    = 0;
2198         env.POnum         = 0;
2199         env.changes       = 0;
2200         phase_init(&env.ph, "ldstopt", irg, PHASE_DEFAULT_GROWTH, init_loop_data, NULL);
2201
2202         /* calculate the SCC's and drive loop optimization. */
2203         do_dfs(irg, &env);
2204
2205         DEL_ARR_F(env.stack);
2206         phase_free(&env.ph);
2207
2208         return env.changes;
2209 }  /* optimize_loops */
2210
2211 /*
2212  * do the load store optimization
2213  */
2214 int optimize_load_store(ir_graph *irg) {
2215         walk_env_t env;
2216
2217         FIRM_DBG_REGISTER(dbg, "firm.opt.ldstopt");
2218
2219         assert(get_irg_phase_state(irg) != phase_building);
2220         assert(get_irg_pinned(irg) != op_pin_state_floats &&
2221                 "LoadStore optimization needs pinned graph");
2222
2223         /* we need landing pads */
2224         remove_critical_cf_edges(irg);
2225
2226         edges_assure(irg);
2227
2228         /* for Phi optimization post-dominators are needed ... */
2229         assure_postdoms(irg);
2230
2231         if (get_opt_alias_analysis()) {
2232                 assure_irg_entity_usage_computed(irg);
2233                 assure_irp_globals_entity_usage_computed();
2234         }
2235
2236         obstack_init(&env.obst);
2237         env.changes = 0;
2238
2239         /* init the links, then collect Loads/Stores/Proj's in lists */
2240         master_visited = 0;
2241         irg_walk_graph(irg, firm_clear_link, collect_nodes, &env);
2242
2243         /* now we have collected enough information, optimize */
2244         irg_walk_graph(irg, NULL, do_load_store_optimize, &env);
2245
2246         env.changes |= optimize_loops(irg);
2247
2248         obstack_free(&env.obst, NULL);
2249
2250         /* Handle graph state */
2251         if (env.changes) {
2252                 set_irg_outs_inconsistent(irg);
2253                 set_irg_entity_usage_state(irg, ir_entity_usage_not_computed);
2254         }
2255
2256         if (env.changes & CF_CHANGED) {
2257                 /* is this really needed: Yes, control flow changed, block might
2258                 have Bad() predecessors. */
2259                 set_irg_doms_inconsistent(irg);
2260         }
2261         return env.changes != 0;
2262 }  /* optimize_load_store */