4c63e0cb9504efe81e75e2459ad46a6709c30f83
[libfirm] / ir / opt / ldstopt.c
1 /*
2  * Copyright (C) 1995-2008 University of Karlsruhe.  All right reserved.
3  *
4  * This file is part of libFirm.
5  *
6  * This file may be distributed and/or modified under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation and appearing in the file LICENSE.GPL included in the
9  * packaging of this file.
10  *
11  * Licensees holding valid libFirm Professional Edition licenses may use
12  * this file in accordance with the libFirm Commercial License.
13  * Agreement provided with the Software.
14  *
15  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17  * PURPOSE.
18  */
19
20 /**
21  * @file
22  * @brief   Load/Store optimizations.
23  * @author  Michael Beck
24  * @version $Id$
25  */
26 #include "config.h"
27
28 #include <string.h>
29
30 #include "iroptimize.h"
31 #include "irnode_t.h"
32 #include "irgraph_t.h"
33 #include "irmode_t.h"
34 #include "iropt_t.h"
35 #include "ircons_t.h"
36 #include "irgmod.h"
37 #include "irgwalk.h"
38 #include "irvrfy.h"
39 #include "tv_t.h"
40 #include "dbginfo_t.h"
41 #include "iropt_dbg.h"
42 #include "irflag_t.h"
43 #include "array_t.h"
44 #include "irhooks.h"
45 #include "iredges.h"
46 #include "irpass.h"
47 #include "opt_polymorphy.h"
48 #include "irmemory.h"
49 #include "irphase_t.h"
50 #include "irgopt.h"
51 #include "set.h"
52 #include "debug.h"
53
54 /** The debug handle. */
55 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
56
57 #ifdef DO_CACHEOPT
58 #include "cacheopt/cachesim.h"
59 #endif
60
61 #undef IMAX
62 #define IMAX(a,b)       ((a) > (b) ? (a) : (b))
63
64 #define MAX_PROJ        IMAX(IMAX(pn_Load_max, pn_Store_max), pn_Call_max)
65
66 enum changes_t {
67         DF_CHANGED = 1,       /**< data flow changed */
68         CF_CHANGED = 2,       /**< control flow changed */
69 };
70
71 /**
72  * walker environment
73  */
74 typedef struct _walk_env_t {
75         struct obstack obst;          /**< list of all stores */
76         unsigned changes;             /**< a bitmask of graph changes */
77 } walk_env_t;
78
79 /** A Load/Store info. */
80 typedef struct _ldst_info_t {
81         ir_node  *projs[MAX_PROJ];    /**< list of Proj's of this node */
82         ir_node  *exc_block;          /**< the exception block if available */
83         int      exc_idx;             /**< predecessor index in the exception block */
84         unsigned visited;             /**< visited counter for breaking loops */
85 } ldst_info_t;
86
87 /**
88  * flags for control flow.
89  */
90 enum block_flags_t {
91         BLOCK_HAS_COND = 1,      /**< Block has conditional control flow */
92         BLOCK_HAS_EXC  = 2       /**< Block has exceptional control flow */
93 };
94
95 /**
96  * a Block info.
97  */
98 typedef struct _block_info_t {
99         unsigned flags;               /**< flags for the block */
100 } block_info_t;
101
102 /** the master visited flag for loop detection. */
103 static unsigned master_visited = 0;
104
105 #define INC_MASTER()       ++master_visited
106 #define MARK_NODE(info)    (info)->visited = master_visited
107 #define NODE_VISITED(info) (info)->visited >= master_visited
108
109 /**
110  * get the Load/Store info of a node
111  */
112 static ldst_info_t *get_ldst_info(ir_node *node, struct obstack *obst) {
113         ldst_info_t *info = get_irn_link(node);
114
115         if (! info) {
116                 info = OALLOCZ(obst, ldst_info_t);
117                 set_irn_link(node, info);
118         }
119         return info;
120 }  /* get_ldst_info */
121
122 /**
123  * get the Block info of a node
124  */
125 static block_info_t *get_block_info(ir_node *node, struct obstack *obst) {
126         block_info_t *info = get_irn_link(node);
127
128         if (! info) {
129                 info = OALLOCZ(obst, block_info_t);
130                 set_irn_link(node, info);
131         }
132         return info;
133 }  /* get_block_info */
134
135 /**
136  * update the projection info for a Load/Store
137  */
138 static unsigned update_projs(ldst_info_t *info, ir_node *proj)
139 {
140         long nr = get_Proj_proj(proj);
141
142         assert(0 <= nr && nr <= MAX_PROJ && "Wrong proj from LoadStore");
143
144         if (info->projs[nr]) {
145                 /* there is already one, do CSE */
146                 exchange(proj, info->projs[nr]);
147                 return DF_CHANGED;
148         }
149         else {
150                 info->projs[nr] = proj;
151                 return 0;
152         }
153 }  /* update_projs */
154
155 /**
156  * update the exception block info for a Load/Store node.
157  *
158  * @param info   the load/store info struct
159  * @param block  the exception handler block for this load/store
160  * @param pos    the control flow input of the block
161  */
162 static unsigned update_exc(ldst_info_t *info, ir_node *block, int pos)
163 {
164         assert(info->exc_block == NULL && "more than one exception block found");
165
166         info->exc_block = block;
167         info->exc_idx   = pos;
168         return 0;
169 }  /* update_exc */
170
171 /** Return the number of uses of an address node */
172 #define get_irn_n_uses(adr)     get_irn_n_edges(adr)
173
174 /**
175  * walker, collects all Load/Store/Proj nodes
176  *
177  * walks from Start -> End
178  */
179 static void collect_nodes(ir_node *node, void *env)
180 {
181         ir_opcode   opcode = get_irn_opcode(node);
182         ir_node     *pred, *blk, *pred_blk;
183         ldst_info_t *ldst_info;
184         walk_env_t  *wenv = env;
185
186         if (opcode == iro_Proj) {
187                 pred   = get_Proj_pred(node);
188                 opcode = get_irn_opcode(pred);
189
190                 if (opcode == iro_Load || opcode == iro_Store || opcode == iro_Call) {
191                         ldst_info = get_ldst_info(pred, &wenv->obst);
192
193                         wenv->changes |= update_projs(ldst_info, node);
194
195                         /*
196                          * Place the Proj's to the same block as the
197                          * predecessor Load. This is always ok and prevents
198                          * "non-SSA" form after optimizations if the Proj
199                          * is in a wrong block.
200                          */
201                         blk      = get_nodes_block(node);
202                         pred_blk = get_nodes_block(pred);
203                         if (blk != pred_blk) {
204                                 wenv->changes |= DF_CHANGED;
205                                 set_nodes_block(node, pred_blk);
206                         }
207                 }
208         } else if (opcode == iro_Block) {
209                 int i;
210
211                 for (i = get_Block_n_cfgpreds(node) - 1; i >= 0; --i) {
212                         ir_node      *pred_block, *proj;
213                         block_info_t *bl_info;
214                         int          is_exc = 0;
215
216                         pred = proj = get_Block_cfgpred(node, i);
217
218                         if (is_Proj(proj)) {
219                                 pred   = get_Proj_pred(proj);
220                                 is_exc = get_Proj_proj(proj) == pn_Generic_X_except;
221                         }
222
223                         /* ignore Bad predecessors, they will be removed later */
224                         if (is_Bad(pred))
225                                 continue;
226
227                         pred_block = get_nodes_block(pred);
228                         bl_info    = get_block_info(pred_block, &wenv->obst);
229
230                         if (is_fragile_op(pred) && is_exc)
231                                 bl_info->flags |= BLOCK_HAS_EXC;
232                         else if (is_irn_forking(pred))
233                                 bl_info->flags |= BLOCK_HAS_COND;
234
235                         opcode = get_irn_opcode(pred);
236                         if (is_exc && (opcode == iro_Load || opcode == iro_Store || opcode == iro_Call)) {
237                                 ldst_info = get_ldst_info(pred, &wenv->obst);
238
239                                 wenv->changes |= update_exc(ldst_info, node, i);
240                         }
241                 }
242         }
243 }  /* collect_nodes */
244
245 /**
246  * Returns an entity if the address ptr points to a constant one.
247  *
248  * @param ptr  the address
249  *
250  * @return an entity or NULL
251  */
252 static ir_entity *find_constant_entity(ir_node *ptr)
253 {
254         for (;;) {
255                 if (is_SymConst(ptr) && get_SymConst_kind(ptr) == symconst_addr_ent) {
256                         return get_SymConst_entity(ptr);
257                 } else if (is_Sel(ptr)) {
258                         ir_entity *ent = get_Sel_entity(ptr);
259                         ir_type   *tp  = get_entity_owner(ent);
260
261                         /* Do not fiddle with polymorphism. */
262                         if (is_Class_type(get_entity_owner(ent)) &&
263                                 ((get_entity_n_overwrites(ent)    != 0) ||
264                                 (get_entity_n_overwrittenby(ent) != 0)   ) )
265                                 return NULL;
266
267                         if (is_Array_type(tp)) {
268                                 /* check bounds */
269                                 int i, n;
270
271                                 for (i = 0, n = get_Sel_n_indexs(ptr); i < n; ++i) {
272                                         ir_node *bound;
273                                         tarval *tlower, *tupper;
274                                         ir_node *index = get_Sel_index(ptr, i);
275                                         tarval *tv     = computed_value(index);
276
277                                         /* check if the index is constant */
278                                         if (tv == tarval_bad)
279                                                 return NULL;
280
281                                         bound  = get_array_lower_bound(tp, i);
282                                         tlower = computed_value(bound);
283                                         bound  = get_array_upper_bound(tp, i);
284                                         tupper = computed_value(bound);
285
286                                         if (tlower == tarval_bad || tupper == tarval_bad)
287                                                 return NULL;
288
289                                         if (tarval_cmp(tv, tlower) & pn_Cmp_Lt)
290                                                 return NULL;
291                                         if (tarval_cmp(tupper, tv) & pn_Cmp_Lt)
292                                                 return NULL;
293
294                                         /* ok, bounds check finished */
295                                 }
296                         }
297
298                         if (get_entity_linkage(ent) & IR_LINKAGE_CONSTANT)
299                                 return ent;
300
301                         /* try next */
302                         ptr = get_Sel_ptr(ptr);
303                 } else if (is_Add(ptr)) {
304                         ir_node *l = get_Add_left(ptr);
305                         ir_node *r = get_Add_right(ptr);
306
307                         if (get_irn_mode(l) == get_irn_mode(ptr) && is_Const(r))
308                                 ptr = l;
309                         else if (get_irn_mode(r) == get_irn_mode(ptr) && is_Const(l))
310                                 ptr = r;
311                         else
312                                 return NULL;
313
314                         /* for now, we support only one addition, reassoc should fold all others */
315                         if (! is_SymConst(ptr) && !is_Sel(ptr))
316                                 return NULL;
317                 } else if (is_Sub(ptr)) {
318                         ir_node *l = get_Sub_left(ptr);
319                         ir_node *r = get_Sub_right(ptr);
320
321                         if (get_irn_mode(l) == get_irn_mode(ptr) &&     is_Const(r))
322                                 ptr = l;
323                         else
324                                 return NULL;
325                         /* for now, we support only one substraction, reassoc should fold all others */
326                         if (! is_SymConst(ptr) && !is_Sel(ptr))
327                                 return NULL;
328                 } else
329                         return NULL;
330         }
331 }  /* find_constant_entity */
332
333 /**
334  * Return the Selection index of a Sel node from dimension n
335  */
336 static long get_Sel_array_index_long(ir_node *n, int dim) {
337         ir_node *index = get_Sel_index(n, dim);
338         assert(is_Const(index));
339         return get_tarval_long(get_Const_tarval(index));
340 }  /* get_Sel_array_index_long */
341
342 /**
343  * Returns the accessed component graph path for an
344  * node computing an address.
345  *
346  * @param ptr    the node computing the address
347  * @param depth  current depth in steps upward from the root
348  *               of the address
349  */
350 static compound_graph_path *rec_get_accessed_path(ir_node *ptr, int depth) {
351         compound_graph_path *res = NULL;
352         ir_entity           *root, *field, *ent;
353         int                 path_len, pos, idx;
354         tarval              *tv;
355         ir_type             *tp;
356
357         if (is_SymConst(ptr)) {
358                 /* a SymConst. If the depth is 0, this is an access to a global
359                  * entity and we don't need a component path, else we know
360                  * at least its length.
361                  */
362                 assert(get_SymConst_kind(ptr) == symconst_addr_ent);
363                 root = get_SymConst_entity(ptr);
364                 res = (depth == 0) ? NULL : new_compound_graph_path(get_entity_type(root), depth);
365         } else if (is_Sel(ptr)) {
366                 /* it's a Sel, go up until we find the root */
367                 res = rec_get_accessed_path(get_Sel_ptr(ptr), depth+1);
368                 if (res == NULL)
369                         return NULL;
370
371                 /* fill up the step in the path at the current position */
372                 field    = get_Sel_entity(ptr);
373                 path_len = get_compound_graph_path_length(res);
374                 pos      = path_len - depth - 1;
375                 set_compound_graph_path_node(res, pos, field);
376
377                 if (is_Array_type(get_entity_owner(field))) {
378                         assert(get_Sel_n_indexs(ptr) == 1 && "multi dim arrays not implemented");
379                         set_compound_graph_path_array_index(res, pos, get_Sel_array_index_long(ptr, 0));
380                 }
381         } else if (is_Add(ptr)) {
382                 ir_node *l    = get_Add_left(ptr);
383                 ir_node *r    = get_Add_right(ptr);
384                 ir_mode *mode = get_irn_mode(ptr);
385                 tarval  *tmp;
386
387                 if (is_Const(r) && get_irn_mode(l) == mode) {
388                         ptr = l;
389                         tv  = get_Const_tarval(r);
390                 } else {
391                         ptr = r;
392                         tv  = get_Const_tarval(l);
393                 }
394 ptr_arith:
395                 mode = get_tarval_mode(tv);
396                 tmp  = tv;
397
398                 /* ptr must be a Sel or a SymConst, this was checked in find_constant_entity() */
399                 if (is_Sel(ptr)) {
400                         field = get_Sel_entity(ptr);
401                 } else {
402                         field = get_SymConst_entity(ptr);
403                 }
404                 idx = 0;
405                 for (ent = field;;) {
406                         unsigned size;
407                         tarval   *sz, *tv_index, *tlower, *tupper;
408                         ir_node  *bound;
409
410                         tp = get_entity_type(ent);
411                         if (! is_Array_type(tp))
412                                 break;
413                         ent = get_array_element_entity(tp);
414                         size = get_type_size_bytes(get_entity_type(ent));
415                         sz   = new_tarval_from_long(size, mode);
416
417                         tv_index = tarval_div(tmp, sz);
418                         tmp      = tarval_mod(tmp, sz);
419
420                         if (tv_index == tarval_bad || tmp == tarval_bad)
421                                 return NULL;
422
423                         assert(get_array_n_dimensions(tp) == 1 && "multiarrays not implemented");
424                         bound  = get_array_lower_bound(tp, 0);
425                         tlower = computed_value(bound);
426                         bound  = get_array_upper_bound(tp, 0);
427                         tupper = computed_value(bound);
428
429                         if (tlower == tarval_bad || tupper == tarval_bad)
430                                 return NULL;
431
432                         if (tarval_cmp(tv_index, tlower) & pn_Cmp_Lt)
433                                 return NULL;
434                         if (tarval_cmp(tupper, tv_index) & pn_Cmp_Lt)
435                                 return NULL;
436
437                         /* ok, bounds check finished */
438                         ++idx;
439                 }
440                 if (! tarval_is_null(tmp)) {
441                         /* access to some struct/union member */
442                         return NULL;
443                 }
444
445                 /* should be at least ONE array */
446                 if (idx == 0)
447                         return NULL;
448
449                 res = rec_get_accessed_path(ptr, depth + idx);
450                 if (res == NULL)
451                         return NULL;
452
453                 path_len = get_compound_graph_path_length(res);
454                 pos      = path_len - depth - idx;
455
456                 for (ent = field;;) {
457                         unsigned size;
458                         tarval   *sz, *tv_index;
459                         long     index;
460
461                         tp = get_entity_type(ent);
462                         if (! is_Array_type(tp))
463                                 break;
464                         ent = get_array_element_entity(tp);
465                         set_compound_graph_path_node(res, pos, ent);
466
467                         size = get_type_size_bytes(get_entity_type(ent));
468                         sz   = new_tarval_from_long(size, mode);
469
470                         tv_index = tarval_div(tv, sz);
471                         tv       = tarval_mod(tv, sz);
472
473                         /* worked above, should work again */
474                         assert(tv_index != tarval_bad && tv != tarval_bad);
475
476                         /* bounds already checked above */
477                         index = get_tarval_long(tv_index);
478                         set_compound_graph_path_array_index(res, pos, index);
479                         ++pos;
480                 }
481         } else if (is_Sub(ptr)) {
482                 ir_node *l = get_Sub_left(ptr);
483                 ir_node *r = get_Sub_right(ptr);
484
485                 ptr = l;
486                 tv  = get_Const_tarval(r);
487                 tv  = tarval_neg(tv);
488                 goto ptr_arith;
489         }
490         return res;
491 }  /* rec_get_accessed_path */
492
493 /**
494  * Returns an access path or NULL.  The access path is only
495  * valid, if the graph is in phase_high and _no_ address computation is used.
496  */
497 static compound_graph_path *get_accessed_path(ir_node *ptr) {
498         compound_graph_path *gr = rec_get_accessed_path(ptr, 0);
499         return gr;
500 }  /* get_accessed_path */
501
502 typedef struct path_entry {
503         ir_entity         *ent;
504         struct path_entry *next;
505         long              index;
506 } path_entry;
507
508 static ir_node *rec_find_compound_ent_value(ir_node *ptr, path_entry *next) {
509         path_entry       entry, *p;
510         ir_entity        *ent, *field;
511         ir_initializer_t *initializer;
512         tarval           *tv;
513         ir_type          *tp;
514         unsigned         n;
515
516         entry.next = next;
517         if (is_SymConst(ptr)) {
518                 /* found the root */
519                 ent         = get_SymConst_entity(ptr);
520                 initializer = get_entity_initializer(ent);
521                 for (p = next; p != NULL;) {
522                         if (initializer->kind != IR_INITIALIZER_COMPOUND)
523                                 return NULL;
524                         n  = get_initializer_compound_n_entries(initializer);
525                         tp = get_entity_type(ent);
526
527                         if (is_Array_type(tp)) {
528                                 ent = get_array_element_entity(tp);
529                                 if (ent != p->ent) {
530                                         /* a missing [0] */
531                                         if (0 >= n)
532                                                 return NULL;
533                                         initializer = get_initializer_compound_value(initializer, 0);
534                                         continue;
535                                 }
536                         }
537                         if (p->index >= (int) n)
538                                 return NULL;
539                         initializer = get_initializer_compound_value(initializer, p->index);
540
541                         ent = p->ent;
542                         p   = p->next;
543                 }
544                 tp = get_entity_type(ent);
545                 while (is_Array_type(tp)) {
546                         ent = get_array_element_entity(tp);
547                         tp = get_entity_type(ent);
548                         /* a missing [0] */
549                         n  = get_initializer_compound_n_entries(initializer);
550                         if (0 >= n)
551                                 return NULL;
552                         initializer = get_initializer_compound_value(initializer, 0);
553                 }
554
555                 switch (initializer->kind) {
556                 case IR_INITIALIZER_CONST:
557                         return get_initializer_const_value(initializer);
558                 case IR_INITIALIZER_TARVAL:
559                 case IR_INITIALIZER_NULL:
560                 default:
561                         return NULL;
562                 }
563         } else if (is_Sel(ptr)) {
564                 entry.ent = field = get_Sel_entity(ptr);
565                 tp = get_entity_owner(field);
566                 if (is_Array_type(tp)) {
567                         assert(get_Sel_n_indexs(ptr) == 1 && "multi dim arrays not implemented");
568                         entry.index = get_Sel_array_index_long(ptr, 0) - get_array_lower_bound_int(tp, 0);
569                 } else {
570                         int i, n_members = get_compound_n_members(tp);
571                         for (i = 0; i < n_members; ++i) {
572                                 if (get_compound_member(tp, i) == field)
573                                         break;
574                         }
575                         if (i >= n_members) {
576                                 /* not found: should NOT happen */
577                                 return NULL;
578                         }
579                         entry.index = i;
580                 }
581                 return rec_find_compound_ent_value(get_Sel_ptr(ptr), &entry);
582         }  else if (is_Add(ptr)) {
583                 ir_node  *l = get_Add_left(ptr);
584                 ir_node  *r = get_Add_right(ptr);
585                 ir_mode  *mode;
586                 unsigned pos;
587
588                 if (is_Const(r)) {
589                         ptr = l;
590                         tv  = get_Const_tarval(r);
591                 } else {
592                         ptr = r;
593                         tv  = get_Const_tarval(l);
594                 }
595 ptr_arith:
596                 mode = get_tarval_mode(tv);
597
598                 /* ptr must be a Sel or a SymConst, this was checked in find_constant_entity() */
599                 if (is_Sel(ptr)) {
600                         field = get_Sel_entity(ptr);
601                 } else {
602                         field = get_SymConst_entity(ptr);
603                 }
604
605                 /* count needed entries */
606                 pos = 0;
607                 for (ent = field;;) {
608                         tp = get_entity_type(ent);
609                         if (! is_Array_type(tp))
610                                 break;
611                         ent = get_array_element_entity(tp);
612                         ++pos;
613                 }
614                 /* should be at least ONE entry */
615                 if (pos == 0)
616                         return NULL;
617
618                 /* allocate the right number of entries */
619                 NEW_ARR_A(path_entry, p, pos);
620
621                 /* fill them up */
622                 pos = 0;
623                 for (ent = field;;) {
624                         unsigned size;
625                         tarval   *sz, *tv_index, *tlower, *tupper;
626                         long     index;
627                         ir_node  *bound;
628
629                         tp = get_entity_type(ent);
630                         if (! is_Array_type(tp))
631                                 break;
632                         ent = get_array_element_entity(tp);
633                         p[pos].ent  = ent;
634                         p[pos].next = &p[pos + 1];
635
636                         size = get_type_size_bytes(get_entity_type(ent));
637                         sz   = new_tarval_from_long(size, mode);
638
639                         tv_index = tarval_div(tv, sz);
640                         tv       = tarval_mod(tv, sz);
641
642                         if (tv_index == tarval_bad || tv == tarval_bad)
643                                 return NULL;
644
645                         assert(get_array_n_dimensions(tp) == 1 && "multiarrays not implemented");
646                         bound  = get_array_lower_bound(tp, 0);
647                         tlower = computed_value(bound);
648                         bound  = get_array_upper_bound(tp, 0);
649                         tupper = computed_value(bound);
650
651                         if (tlower == tarval_bad || tupper == tarval_bad)
652                                 return NULL;
653
654                         if (tarval_cmp(tv_index, tlower) & pn_Cmp_Lt)
655                                 return NULL;
656                         if (tarval_cmp(tupper, tv_index) & pn_Cmp_Lt)
657                                 return NULL;
658
659                         /* ok, bounds check finished */
660                         index = get_tarval_long(tv_index);
661                         p[pos].index = index;
662                         ++pos;
663                 }
664                 if (! tarval_is_null(tv)) {
665                         /* hmm, wrong access */
666                         return NULL;
667                 }
668                 p[pos - 1].next = next;
669                 return rec_find_compound_ent_value(ptr, p);
670         } else if (is_Sub(ptr)) {
671                 ir_node *l = get_Sub_left(ptr);
672                 ir_node *r = get_Sub_right(ptr);
673
674                 ptr = l;
675                 tv  = get_Const_tarval(r);
676                 tv  = tarval_neg(tv);
677                 goto ptr_arith;
678         }
679         return NULL;
680 }
681
682 static ir_node *find_compound_ent_value(ir_node *ptr) {
683         return rec_find_compound_ent_value(ptr, NULL);
684 }
685
686 /* forward */
687 static void reduce_adr_usage(ir_node *ptr);
688
689 /**
690  * Update a Load that may have lost its users.
691  */
692 static void handle_load_update(ir_node *load) {
693         ldst_info_t *info = get_irn_link(load);
694
695         /* do NOT touch volatile loads for now */
696         if (get_Load_volatility(load) == volatility_is_volatile)
697                 return;
698
699         if (! info->projs[pn_Load_res] && ! info->projs[pn_Load_X_except]) {
700                 ir_node *ptr = get_Load_ptr(load);
701                 ir_node *mem = get_Load_mem(load);
702
703                 /* a Load whose value is neither used nor exception checked, remove it */
704                 exchange(info->projs[pn_Load_M], mem);
705                 if (info->projs[pn_Load_X_regular])
706                         exchange(info->projs[pn_Load_X_regular], new_r_Jmp(get_nodes_block(load)));
707                 kill_node(load);
708                 reduce_adr_usage(ptr);
709         }
710 }  /* handle_load_update */
711
712 /**
713  * A use of an address node has vanished. Check if this was a Proj
714  * node and update the counters.
715  */
716 static void reduce_adr_usage(ir_node *ptr) {
717         if (is_Proj(ptr)) {
718                 if (get_irn_n_edges(ptr) <= 0) {
719                         /* this Proj is dead now */
720                         ir_node *pred = get_Proj_pred(ptr);
721
722                         if (is_Load(pred)) {
723                                 ldst_info_t *info = get_irn_link(pred);
724                                 info->projs[get_Proj_proj(ptr)] = NULL;
725
726                                 /* this node lost its result proj, handle that */
727                                 handle_load_update(pred);
728                         }
729                 }
730         }
731 }  /* reduce_adr_usage */
732
733 /**
734  * Check, if an already existing value of mode old_mode can be converted
735  * into the needed one new_mode without loss.
736  */
737 static int can_use_stored_value(ir_mode *old_mode, ir_mode *new_mode) {
738         if (old_mode == new_mode)
739                 return 1;
740
741         /* if both modes are two-complement ones, we can always convert the
742            Stored value into the needed one. */
743         if (get_mode_size_bits(old_mode) >= get_mode_size_bits(new_mode) &&
744                   get_mode_arithmetic(old_mode) == irma_twos_complement &&
745                   get_mode_arithmetic(new_mode) == irma_twos_complement)
746                 return 1;
747         return 0;
748 }  /* can_use_stored_value */
749
750 /**
751  * Check whether a Call is at least pure, ie. does only read memory.
752  */
753 static unsigned is_Call_pure(ir_node *call) {
754         ir_type *call_tp = get_Call_type(call);
755         unsigned prop = get_method_additional_properties(call_tp);
756
757         /* check first the call type */
758         if ((prop & (mtp_property_const|mtp_property_pure)) == 0) {
759                 /* try the called entity */
760                 ir_node *ptr = get_Call_ptr(call);
761
762                 if (is_Global(ptr)) {
763                         ir_entity *ent = get_Global_entity(ptr);
764
765                         prop = get_entity_additional_properties(ent);
766                 }
767         }
768         return (prop & (mtp_property_const|mtp_property_pure)) != 0;
769 }  /* is_Call_pure */
770
771 static ir_node *get_base_and_offset(ir_node *ptr, long *pOffset)
772 {
773         ir_mode *mode  = get_irn_mode(ptr);
774         long    offset = 0;
775
776         /* TODO: long might not be enough, we should probably use some tarval thingy... */
777         for (;;) {
778                 if (is_Add(ptr)) {
779                         ir_node *l = get_Add_left(ptr);
780                         ir_node *r = get_Add_right(ptr);
781
782                         if (get_irn_mode(l) != mode || !is_Const(r))
783                                 break;
784
785                         offset += get_tarval_long(get_Const_tarval(r));
786                         ptr     = l;
787                 } else if (is_Sub(ptr)) {
788                         ir_node *l = get_Sub_left(ptr);
789                         ir_node *r = get_Sub_right(ptr);
790
791                         if (get_irn_mode(l) != mode || !is_Const(r))
792                                 break;
793
794                         offset -= get_tarval_long(get_Const_tarval(r));
795                         ptr     = l;
796                 } else if (is_Sel(ptr)) {
797                         ir_entity *ent = get_Sel_entity(ptr);
798                         ir_type   *tp  = get_entity_owner(ent);
799
800                         if (is_Array_type(tp)) {
801                                 int     size;
802                                 ir_node *index;
803
804                                 /* only one dimensional arrays yet */
805                                 if (get_Sel_n_indexs(ptr) != 1)
806                                         break;
807                                 index = get_Sel_index(ptr, 0);
808                                 if (! is_Const(index))
809                                         break;
810
811                                 tp = get_entity_type(ent);
812                                 if (get_type_state(tp) != layout_fixed)
813                                         break;
814
815                                 size    = get_type_size_bytes(tp);
816                                 offset += size * get_tarval_long(get_Const_tarval(index));
817                         } else {
818                                 if (get_type_state(tp) != layout_fixed)
819                                         break;
820                                 offset += get_entity_offset(ent);
821                         }
822                         ptr = get_Sel_ptr(ptr);
823                 } else
824                         break;
825         }
826
827         *pOffset = offset;
828         return ptr;
829 }
830
831 static int try_load_after_store(ir_node *load,
832                 ir_node *load_base_ptr, long load_offset, ir_node *store)
833 {
834         ldst_info_t *info;
835         ir_node *store_ptr      = get_Store_ptr(store);
836         long     store_offset;
837         ir_node *store_base_ptr = get_base_and_offset(store_ptr, &store_offset);
838         ir_node *store_value;
839         ir_mode *store_mode;
840         ir_node *load_ptr;
841         ir_mode *load_mode;
842         long     load_mode_len;
843         long     store_mode_len;
844         long     delta;
845         int      res;
846
847         if (load_base_ptr != store_base_ptr)
848                 return 0;
849
850         load_mode      = get_Load_mode(load);
851         load_mode_len  = get_mode_size_bytes(load_mode);
852         store_mode     = get_irn_mode(get_Store_value(store));
853         store_mode_len = get_mode_size_bytes(store_mode);
854         delta          = load_offset - store_offset;
855         store_value    = get_Store_value(store);
856
857         if (delta != 0 || store_mode != load_mode) {
858                 if (delta < 0 || delta + load_mode_len > store_mode_len)
859                         return 0;
860
861                 if (get_mode_arithmetic(store_mode) != irma_twos_complement ||
862                         get_mode_arithmetic(load_mode)  != irma_twos_complement)
863                         return 0;
864
865
866                 /* produce a shift to adjust offset delta */
867                 if (delta > 0) {
868                         ir_node *cnst;
869
870                         /* FIXME: only true for little endian */
871                         cnst        = new_Const_long(mode_Iu, delta * 8);
872                         store_value = new_r_Shr(get_nodes_block(load),
873                                                                         store_value, cnst, store_mode);
874                 }
875
876                 /* add an convert if needed */
877                 if (store_mode != load_mode) {
878                         store_value = new_r_Conv(get_nodes_block(load), store_value, load_mode);
879                 }
880         }
881
882         DBG_OPT_RAW(load, store_value);
883
884         info = get_irn_link(load);
885         if (info->projs[pn_Load_M])
886                 exchange(info->projs[pn_Load_M], get_Load_mem(load));
887
888         res = 0;
889         /* no exception */
890         if (info->projs[pn_Load_X_except]) {
891                 exchange( info->projs[pn_Load_X_except], new_Bad());
892                 res |= CF_CHANGED;
893         }
894         if (info->projs[pn_Load_X_regular]) {
895                 exchange( info->projs[pn_Load_X_regular], new_r_Jmp(get_nodes_block(load)));
896                 res |= CF_CHANGED;
897         }
898
899         if (info->projs[pn_Load_res])
900                 exchange(info->projs[pn_Load_res], store_value);
901
902         load_ptr = get_Load_ptr(load);
903         kill_node(load);
904         reduce_adr_usage(load_ptr);
905         return res | DF_CHANGED;
906 }
907
908 /**
909  * Follow the memory chain as long as there are only Loads,
910  * alias free Stores, and constant Calls and try to replace the
911  * current Load by a previous ones.
912  * Note that in unreachable loops it might happen that we reach
913  * load again, as well as we can fall into a cycle.
914  * We break such cycles using a special visited flag.
915  *
916  * INC_MASTER() must be called before dive into
917  */
918 static unsigned follow_Mem_chain(ir_node *load, ir_node *curr) {
919         unsigned    res = 0;
920         ldst_info_t *info = get_irn_link(load);
921         ir_node     *pred;
922         ir_node     *ptr       = get_Load_ptr(load);
923         ir_node     *mem       = get_Load_mem(load);
924         ir_mode     *load_mode = get_Load_mode(load);
925
926         for (pred = curr; load != pred; ) {
927                 ldst_info_t *pred_info = get_irn_link(pred);
928
929                 /*
930                  * a Load immediately after a Store -- a read after write.
931                  * We may remove the Load, if both Load & Store does not have an
932                  * exception handler OR they are in the same MacroBlock. In the latter
933                  * case the Load cannot throw an exception when the previous Store was
934                  * quiet.
935                  *
936                  * Why we need to check for Store Exception? If the Store cannot
937                  * be executed (ROM) the exception handler might simply jump into
938                  * the load MacroBlock :-(
939                  * We could make it a little bit better if we would know that the
940                  * exception handler of the Store jumps directly to the end...
941                  */
942                 if (is_Store(pred) && ((pred_info->projs[pn_Store_X_except] == NULL
943                                 && info->projs[pn_Load_X_except] == NULL)
944                                 || get_nodes_MacroBlock(load) == get_nodes_MacroBlock(pred)))
945                 {
946                         long    load_offset;
947                         ir_node *base_ptr = get_base_and_offset(ptr, &load_offset);
948                         int     changes   = try_load_after_store(load, base_ptr, load_offset, pred);
949
950                         if (changes != 0)
951                                 return res | changes;
952                 } else if (is_Load(pred) && get_Load_ptr(pred) == ptr &&
953                            can_use_stored_value(get_Load_mode(pred), load_mode)) {
954                         /*
955                          * a Load after a Load -- a read after read.
956                          * We may remove the second Load, if it does not have an exception handler
957                          * OR they are in the same MacroBlock. In the later case the Load cannot
958                          * throw an exception when the previous Load was quiet.
959                          *
960                          * Here, there is no need to check if the previous Load has an exception
961                          * hander because they would have exact the same exception...
962                          */
963                         if (info->projs[pn_Load_X_except] == NULL || get_nodes_MacroBlock(load) == get_nodes_MacroBlock(pred)) {
964                                 ir_node *value;
965
966                                 DBG_OPT_RAR(load, pred);
967
968                                 /* the result is used */
969                                 if (info->projs[pn_Load_res]) {
970                                         if (pred_info->projs[pn_Load_res] == NULL) {
971                                                 /* create a new Proj again */
972                                                 pred_info->projs[pn_Load_res] = new_r_Proj(get_nodes_block(pred), pred, get_Load_mode(pred), pn_Load_res);
973                                         }
974                                         value = pred_info->projs[pn_Load_res];
975
976                                         /* add an convert if needed */
977                                         if (get_Load_mode(pred) != load_mode) {
978                                                 value = new_r_Conv(get_nodes_block(load), value, load_mode);
979                                         }
980
981                                         exchange(info->projs[pn_Load_res], value);
982                                 }
983
984                                 if (info->projs[pn_Load_M])
985                                         exchange(info->projs[pn_Load_M], mem);
986
987                                 /* no exception */
988                                 if (info->projs[pn_Load_X_except]) {
989                                         exchange(info->projs[pn_Load_X_except], new_Bad());
990                                         res |= CF_CHANGED;
991                                 }
992                                 if (info->projs[pn_Load_X_regular]) {
993                                         exchange( info->projs[pn_Load_X_regular], new_r_Jmp(get_nodes_block(load)));
994                                         res |= CF_CHANGED;
995                                 }
996
997                                 kill_node(load);
998                                 reduce_adr_usage(ptr);
999                                 return res |= DF_CHANGED;
1000                         }
1001                 }
1002
1003                 if (is_Store(pred)) {
1004                         /* check if we can pass through this store */
1005                         ir_alias_relation rel = get_alias_relation(
1006                                 current_ir_graph,
1007                                 get_Store_ptr(pred),
1008                                 get_irn_mode(get_Store_value(pred)),
1009                                 ptr, load_mode);
1010                         /* if the might be an alias, we cannot pass this Store */
1011                         if (rel != ir_no_alias)
1012                                 break;
1013                         pred = skip_Proj(get_Store_mem(pred));
1014                 } else if (is_Load(pred)) {
1015                         pred = skip_Proj(get_Load_mem(pred));
1016                 } else if (is_Call(pred)) {
1017                         if (is_Call_pure(pred)) {
1018                                 /* The called graph is at least pure, so there are no Store's
1019                                    in it. We can handle it like a Load and skip it. */
1020                                 pred = skip_Proj(get_Call_mem(pred));
1021                         } else {
1022                                 /* there might be Store's in the graph, stop here */
1023                                 break;
1024                         }
1025                 } else {
1026                         /* follow only Load chains */
1027                         break;
1028                 }
1029
1030                 /* check for cycles */
1031                 if (NODE_VISITED(pred_info))
1032                         break;
1033                 MARK_NODE(pred_info);
1034         }
1035
1036         if (is_Sync(pred)) {
1037                 int i;
1038
1039                 /* handle all Sync predecessors */
1040                 for (i = get_Sync_n_preds(pred) - 1; i >= 0; --i) {
1041                         res |= follow_Mem_chain(load, skip_Proj(get_Sync_pred(pred, i)));
1042                         if (res)
1043                                 return res;
1044                 }
1045         }
1046
1047         return res;
1048 }  /* follow_Mem_chain */
1049
1050 /*
1051  * Check if we can replace the load by a given const from
1052  * the const code irg.
1053  */
1054 ir_node *can_replace_load_by_const(const ir_node *load, ir_node *c) {
1055         ir_mode *c_mode = get_irn_mode(c);
1056         ir_mode *l_mode = get_Load_mode(load);
1057         ir_node *res    = NULL;
1058
1059         if (c_mode != l_mode) {
1060                 /* check, if the mode matches OR can be easily converted info */
1061                 if (is_reinterpret_cast(c_mode, l_mode)) {
1062                         /* we can safely cast */
1063                         dbg_info *dbg   = get_irn_dbg_info(load);
1064                         ir_node  *block = get_nodes_block(load);
1065
1066                         /* copy the value from the const code irg and cast it */
1067                         res = copy_const_value(dbg, c);
1068                         res = new_rd_Conv(dbg, block, res, l_mode);
1069                 }
1070         } else {
1071                 /* copy the value from the const code irg */
1072                 res = copy_const_value(get_irn_dbg_info(load), c);
1073         }
1074         return res;
1075 }  /* can_replace_load_by_const */
1076
1077 /**
1078  * optimize a Load
1079  *
1080  * @param load  the Load node
1081  */
1082 static unsigned optimize_load(ir_node *load)
1083 {
1084         ldst_info_t *info = get_irn_link(load);
1085         ir_node     *mem, *ptr, *value;
1086         ir_entity   *ent;
1087         long        dummy;
1088         unsigned    res = 0;
1089
1090         /* do NOT touch volatile loads for now */
1091         if (get_Load_volatility(load) == volatility_is_volatile)
1092                 return 0;
1093
1094         /* the address of the load to be optimized */
1095         ptr = get_Load_ptr(load);
1096
1097         /*
1098          * Check if we can remove the exception from a Load:
1099          * This can be done, if the address is from an Sel(Alloc) and
1100          * the Sel type is a subtype of the allocated type.
1101          *
1102          * This optimizes some often used OO constructs,
1103          * like x = new O; x->t;
1104          */
1105         if (info->projs[pn_Load_X_except]) {
1106                 ir_node *addr = ptr;
1107
1108                 /* find base address */
1109                 while (is_Sel(addr))
1110                         addr = get_Sel_ptr(addr);
1111                 if (is_Alloc(skip_Proj(skip_Cast(addr)))) {
1112                         /* simple case: a direct load after an Alloc. Firm Alloc throw
1113                          * an exception in case of out-of-memory. So, there is no way for an
1114                          * exception in this load.
1115                          * This code is constructed by the "exception lowering" in the Jack compiler.
1116                          */
1117                         exchange(info->projs[pn_Load_X_except], new_Bad());
1118                         info->projs[pn_Load_X_except] = NULL;
1119                         exchange(info->projs[pn_Load_X_regular], new_r_Jmp(get_nodes_block(load)));
1120                         info->projs[pn_Load_X_regular] = NULL;
1121                         res |= CF_CHANGED;
1122                 }
1123         }
1124
1125         /* The mem of the Load. Must still be returned after optimization. */
1126         mem = get_Load_mem(load);
1127
1128         if (! info->projs[pn_Load_res] && ! info->projs[pn_Load_X_except]) {
1129                 /* a Load which value is neither used nor exception checked, remove it */
1130                 exchange(info->projs[pn_Load_M], mem);
1131
1132                 if (info->projs[pn_Load_X_regular]) {
1133                         /* should not happen, but if it does, remove it */
1134                         exchange(info->projs[pn_Load_X_regular], new_r_Jmp(get_nodes_block(load)));
1135                         res |= CF_CHANGED;
1136                 }
1137                 kill_node(load);
1138                 reduce_adr_usage(ptr);
1139                 return res | DF_CHANGED;
1140         }
1141
1142         /* Load from a constant polymorphic field, where we can resolve
1143            polymorphism. */
1144         value = transform_polymorph_Load(load);
1145         if (value == load) {
1146                 value = NULL;
1147                 /* check if we can determine the entity that will be loaded */
1148                 ent = find_constant_entity(ptr);
1149                 if (ent != NULL && !(get_entity_linkage(ent) & IR_LINKAGE_EXTERN)) {
1150                         /* a static allocation that is not external: there should be NO exception
1151                          * when loading even if we cannot replace the load itself. */
1152
1153                         /* no exception, clear the info field as it might be checked later again */
1154                         if (info->projs[pn_Load_X_except]) {
1155                                 exchange(info->projs[pn_Load_X_except], new_Bad());
1156                                 info->projs[pn_Load_X_except] = NULL;
1157                                 res |= CF_CHANGED;
1158                         }
1159                         if (info->projs[pn_Load_X_regular]) {
1160                                 exchange(info->projs[pn_Load_X_regular], new_r_Jmp(get_nodes_block(load)));
1161                                 info->projs[pn_Load_X_regular] = NULL;
1162                                 res |= CF_CHANGED;
1163                         }
1164
1165                         if (get_entity_linkage(ent) & IR_LINKAGE_CONSTANT) {
1166                                 if (ent->initializer != NULL) {
1167                                         /* new style initializer */
1168                                         value = find_compound_ent_value(ptr);
1169                                 } else if (entity_has_compound_ent_values(ent)) {
1170                                         /* old style initializer */
1171                                         compound_graph_path *path = get_accessed_path(ptr);
1172
1173                                         if (path != NULL) {
1174                                                 assert(is_proper_compound_graph_path(path, get_compound_graph_path_length(path)-1));
1175
1176                                                 value = get_compound_ent_value_by_path(ent, path);
1177                                                 DB((dbg, LEVEL_1, "  Constant access at %F%F resulted in %+F\n", ent, path, value));
1178                                                 free_compound_graph_path(path);
1179                                         }
1180                                 }
1181                                 if (value != NULL)
1182                                         value = can_replace_load_by_const(load, value);
1183                         }
1184                 }
1185         }
1186         if (value != NULL) {
1187                 /* we completely replace the load by this value */
1188                 if (info->projs[pn_Load_X_except]) {
1189                         exchange(info->projs[pn_Load_X_except], new_Bad());
1190                         info->projs[pn_Load_X_except] = NULL;
1191                         res |= CF_CHANGED;
1192                 }
1193                 if (info->projs[pn_Load_X_regular]) {
1194                         exchange(info->projs[pn_Load_X_regular], new_r_Jmp(get_nodes_block(load)));
1195                         info->projs[pn_Load_X_regular] = NULL;
1196                         res |= CF_CHANGED;
1197                 }
1198                 if (info->projs[pn_Load_M]) {
1199                         exchange(info->projs[pn_Load_M], mem);
1200                         res |= DF_CHANGED;
1201                 }
1202                 if (info->projs[pn_Load_res]) {
1203                         exchange(info->projs[pn_Load_res], value);
1204                         res |= DF_CHANGED;
1205                 }
1206                 kill_node(load);
1207                 reduce_adr_usage(ptr);
1208                 return res;
1209         }
1210
1211         /* Check, if the address of this load is used more than once.
1212          * If not, more load cannot be removed in any case. */
1213         if (get_irn_n_uses(ptr) <= 1 && get_irn_n_uses(get_base_and_offset(ptr, &dummy)) <= 1)
1214                 return res;
1215
1216         /*
1217          * follow the memory chain as long as there are only Loads
1218          * and try to replace current Load or Store by a previous one.
1219          * Note that in unreachable loops it might happen that we reach
1220          * load again, as well as we can fall into a cycle.
1221          * We break such cycles using a special visited flag.
1222          */
1223         INC_MASTER();
1224         res = follow_Mem_chain(load, skip_Proj(mem));
1225         return res;
1226 }  /* optimize_load */
1227
1228 /**
1229  * Check whether a value of mode new_mode would completely overwrite a value
1230  * of mode old_mode in memory.
1231  */
1232 static int is_completely_overwritten(ir_mode *old_mode, ir_mode *new_mode)
1233 {
1234         return get_mode_size_bits(new_mode) >= get_mode_size_bits(old_mode);
1235 }  /* is_completely_overwritten */
1236
1237 /**
1238  * Check whether small is a part of large (starting at same address).
1239  */
1240 static int is_partially_same(ir_node *small, ir_node *large)
1241 {
1242         ir_mode *sm = get_irn_mode(small);
1243         ir_mode *lm = get_irn_mode(large);
1244
1245         /* FIXME: Check endianness */
1246         return is_Conv(small) && get_Conv_op(small) == large
1247             && get_mode_size_bytes(sm) < get_mode_size_bytes(lm)
1248             && get_mode_arithmetic(sm) == irma_twos_complement
1249             && get_mode_arithmetic(lm) == irma_twos_complement;
1250 }  /* is_partially_same */
1251
1252 /**
1253  * follow the memory chain as long as there are only Loads and alias free Stores.
1254  *
1255  * INC_MASTER() must be called before dive into
1256  */
1257 static unsigned follow_Mem_chain_for_Store(ir_node *store, ir_node *curr) {
1258         unsigned res = 0;
1259         ldst_info_t *info = get_irn_link(store);
1260         ir_node *pred;
1261         ir_node *ptr = get_Store_ptr(store);
1262         ir_node *mem = get_Store_mem(store);
1263         ir_node *value = get_Store_value(store);
1264         ir_mode *mode  = get_irn_mode(value);
1265         ir_node *block = get_nodes_block(store);
1266         ir_node *mblk  = get_Block_MacroBlock(block);
1267
1268         for (pred = curr; pred != store;) {
1269                 ldst_info_t *pred_info = get_irn_link(pred);
1270
1271                 /*
1272                  * BEWARE: one might think that checking the modes is useless, because
1273                  * if the pointers are identical, they refer to the same object.
1274                  * This is only true in strong typed languages, not is C were the following
1275                  * is possible *(ir_type1 *)p = a; *(ir_type2 *)p = b ...
1276                  * However, if the size of the mode that is written is bigger or equal the
1277                  * size of the old one, the old value is completely overwritten and can be
1278                  * killed ...
1279                  */
1280                 if (is_Store(pred) && get_Store_ptr(pred) == ptr &&
1281             get_nodes_MacroBlock(pred) == mblk) {
1282                         /*
1283                          * a Store after a Store in the same MacroBlock -- a write after write.
1284                          */
1285
1286                         /*
1287                          * We may remove the first Store, if the old value is completely
1288                          * overwritten or the old value is a part of the new value,
1289                          * and if it does not have an exception handler.
1290                          *
1291                          * TODO: What, if both have the same exception handler ???
1292                          */
1293                         if (get_Store_volatility(pred) != volatility_is_volatile
1294                                 && !pred_info->projs[pn_Store_X_except]) {
1295                                 ir_node *predvalue = get_Store_value(pred);
1296                                 ir_mode *predmode  = get_irn_mode(predvalue);
1297
1298                                 if(is_completely_overwritten(predmode, mode)
1299                                         || is_partially_same(predvalue, value)) {
1300                                         DBG_OPT_WAW(pred, store);
1301                                         exchange(pred_info->projs[pn_Store_M], get_Store_mem(pred));
1302                                         kill_node(pred);
1303                                         reduce_adr_usage(ptr);
1304                                         return DF_CHANGED;
1305                                 }
1306                         }
1307
1308                         /*
1309                          * We may remove the Store, if the old value already contains
1310                          * the new value, and if it does not have an exception handler.
1311                          *
1312                          * TODO: What, if both have the same exception handler ???
1313                          */
1314                         if (get_Store_volatility(store) != volatility_is_volatile
1315                                 && !info->projs[pn_Store_X_except]) {
1316                                 ir_node *predvalue = get_Store_value(pred);
1317
1318                                 if(is_partially_same(value, predvalue)) {
1319                                         DBG_OPT_WAW(pred, store);
1320                                         exchange(info->projs[pn_Store_M], mem);
1321                                         kill_node(store);
1322                                         reduce_adr_usage(ptr);
1323                                         return DF_CHANGED;
1324                                 }
1325                         }
1326                 } else if (is_Load(pred) && get_Load_ptr(pred) == ptr &&
1327                            value == pred_info->projs[pn_Load_res]) {
1328                         /*
1329                          * a Store of a value just loaded from the same address
1330                          * -- a write after read.
1331                          * We may remove the Store, if it does not have an exception
1332                          * handler.
1333                          */
1334                         if (! info->projs[pn_Store_X_except]) {
1335                                 DBG_OPT_WAR(store, pred);
1336                                 exchange(info->projs[pn_Store_M], mem);
1337                                 kill_node(store);
1338                                 reduce_adr_usage(ptr);
1339                                 return DF_CHANGED;
1340                         }
1341                 }
1342
1343                 if (is_Store(pred)) {
1344                         /* check if we can pass through this store */
1345                         ir_alias_relation rel = get_alias_relation(
1346                                 current_ir_graph,
1347                                 get_Store_ptr(pred),
1348                                 get_irn_mode(get_Store_value(pred)),
1349                                 ptr, mode);
1350                         /* if the might be an alias, we cannot pass this Store */
1351                         if (rel != ir_no_alias)
1352                                 break;
1353                         pred = skip_Proj(get_Store_mem(pred));
1354                 } else if (is_Load(pred)) {
1355                         ir_alias_relation rel = get_alias_relation(
1356                                 current_ir_graph, get_Load_ptr(pred), get_Load_mode(pred),
1357                                 ptr, mode);
1358                         if (rel != ir_no_alias)
1359                                 break;
1360
1361                         pred = skip_Proj(get_Load_mem(pred));
1362                 } else {
1363                         /* follow only Load chains */
1364                         break;
1365                 }
1366
1367                 /* check for cycles */
1368                 if (NODE_VISITED(pred_info))
1369                         break;
1370                 MARK_NODE(pred_info);
1371         }
1372
1373         if (is_Sync(pred)) {
1374                 int i;
1375
1376                 /* handle all Sync predecessors */
1377                 for (i = get_Sync_n_preds(pred) - 1; i >= 0; --i) {
1378                         res |= follow_Mem_chain_for_Store(store, skip_Proj(get_Sync_pred(pred, i)));
1379                         if (res)
1380                                 break;
1381                 }
1382         }
1383         return res;
1384 }  /* follow_Mem_chain_for_Store */
1385
1386 /** find entity used as base for an address calculation */
1387 static ir_entity *find_entity(ir_node *ptr)
1388 {
1389         switch(get_irn_opcode(ptr)) {
1390         case iro_SymConst:
1391                 return get_SymConst_entity(ptr);
1392         case iro_Sel: {
1393                 ir_node *pred = get_Sel_ptr(ptr);
1394                 if (get_irg_frame(get_irn_irg(ptr)) == pred)
1395                         return get_Sel_entity(ptr);
1396
1397                 return find_entity(pred);
1398         }
1399         case iro_Sub:
1400         case iro_Add: {
1401                 ir_node *left = get_binop_left(ptr);
1402                 ir_node *right;
1403                 if (mode_is_reference(get_irn_mode(left)))
1404                         return find_entity(left);
1405                 right = get_binop_right(ptr);
1406                 if (mode_is_reference(get_irn_mode(right)))
1407                         return find_entity(right);
1408                 return NULL;
1409         }
1410         default:
1411                 return NULL;
1412         }
1413 }
1414
1415 /**
1416  * optimize a Store
1417  *
1418  * @param store  the Store node
1419  */
1420 static unsigned optimize_store(ir_node *store) {
1421         ir_node   *ptr;
1422         ir_node   *mem;
1423         ir_entity *entity;
1424
1425         if (get_Store_volatility(store) == volatility_is_volatile)
1426                 return 0;
1427
1428         ptr    = get_Store_ptr(store);
1429         entity = find_entity(ptr);
1430
1431         /* a store to an entity which is never read is unnecessary */
1432         if (entity != NULL && !(get_entity_usage(entity) & ir_usage_read)) {
1433                 ldst_info_t *info = get_irn_link(store);
1434                 if (info->projs[pn_Store_X_except] == NULL) {
1435                         DB((dbg, LEVEL_1, "  Killing useless %+F to never read entity %+F\n", store, entity));
1436                         exchange(info->projs[pn_Store_M], get_Store_mem(store));
1437                         kill_node(store);
1438                         reduce_adr_usage(ptr);
1439                         return DF_CHANGED;
1440                 }
1441         }
1442
1443         /* Check, if the address of this Store is used more than once.
1444          * If not, this Store cannot be removed in any case. */
1445         if (get_irn_n_uses(ptr) <= 1)
1446                 return 0;
1447
1448         mem = get_Store_mem(store);
1449
1450         /* follow the memory chain as long as there are only Loads */
1451         INC_MASTER();
1452
1453         return follow_Mem_chain_for_Store(store, skip_Proj(mem));
1454 }  /* optimize_store */
1455
1456 /**
1457  * walker, optimizes Phi after Stores to identical places:
1458  * Does the following optimization:
1459  * @verbatim
1460  *
1461  *   val1   val2   val3          val1  val2  val3
1462  *    |      |      |               \    |    /
1463  *  Store  Store  Store              \   |   /
1464  *      \    |    /                   PhiData
1465  *       \   |   /                       |
1466  *        \  |  /                      Store
1467  *          PhiM
1468  *
1469  * @endverbatim
1470  * This reduces the number of stores and allows for predicated execution.
1471  * Moves Stores back to the end of a function which may be bad.
1472  *
1473  * This is only possible if the predecessor blocks have only one successor.
1474  */
1475 static unsigned optimize_phi(ir_node *phi, walk_env_t *wenv)
1476 {
1477         int i, n;
1478         ir_node *store, *old_store, *ptr, *block, *phi_block, *phiM, *phiD, *exc, *projM;
1479         ir_mode *mode;
1480         ir_node **inM, **inD, **projMs;
1481         int *idx;
1482         dbg_info *db = NULL;
1483         ldst_info_t *info;
1484         block_info_t *bl_info;
1485         unsigned res = 0;
1486
1487         /* Must be a memory Phi */
1488         if (get_irn_mode(phi) != mode_M)
1489                 return 0;
1490
1491         n = get_Phi_n_preds(phi);
1492         if (n <= 0)
1493                 return 0;
1494
1495         /* must be only one user */
1496         projM = get_Phi_pred(phi, 0);
1497         if (get_irn_n_edges(projM) != 1)
1498                 return 0;
1499
1500         store = skip_Proj(projM);
1501         old_store = store;
1502         if (!is_Store(store))
1503                 return 0;
1504
1505         block = get_nodes_block(store);
1506
1507         /* abort on dead blocks */
1508         if (is_Block_dead(block))
1509                 return 0;
1510
1511         /* check if the block is post dominated by Phi-block
1512            and has no exception exit */
1513         bl_info = get_irn_link(block);
1514         if (bl_info->flags & BLOCK_HAS_EXC)
1515                 return 0;
1516
1517         phi_block = get_nodes_block(phi);
1518         if (! block_strictly_postdominates(phi_block, block))
1519                 return 0;
1520
1521         /* this is the address of the store */
1522         ptr  = get_Store_ptr(store);
1523         mode = get_irn_mode(get_Store_value(store));
1524         info = get_irn_link(store);
1525         exc  = info->exc_block;
1526
1527         for (i = 1; i < n; ++i) {
1528                 ir_node *pred = get_Phi_pred(phi, i);
1529
1530                 if (get_irn_n_edges(pred) != 1)
1531                         return 0;
1532
1533                 pred = skip_Proj(pred);
1534                 if (!is_Store(pred))
1535                         return 0;
1536
1537                 if (ptr != get_Store_ptr(pred) || mode != get_irn_mode(get_Store_value(pred)))
1538                         return 0;
1539
1540                 info = get_irn_link(pred);
1541
1542                 /* check, if all stores have the same exception flow */
1543                 if (exc != info->exc_block)
1544                         return 0;
1545
1546                 /* abort on dead blocks */
1547                 block = get_nodes_block(pred);
1548                 if (is_Block_dead(block))
1549                         return 0;
1550
1551                 /* check if the block is post dominated by Phi-block
1552                    and has no exception exit. Note that block must be different from
1553                    Phi-block, else we would move a Store from end End of a block to its
1554                    Start... */
1555                 bl_info = get_irn_link(block);
1556                 if (bl_info->flags & BLOCK_HAS_EXC)
1557                         return 0;
1558                 if (block == phi_block || ! block_postdominates(phi_block, block))
1559                         return 0;
1560         }
1561
1562         /*
1563          * ok, when we are here, we found all predecessors of a Phi that
1564          * are Stores to the same address and size. That means whatever
1565          * we do before we enter the block of the Phi, we do a Store.
1566          * So, we can move the Store to the current block:
1567          *
1568          *   val1    val2    val3          val1  val2  val3
1569          *    |       |       |               \    |    /
1570          * | Str | | Str | | Str |             \   |   /
1571          *      \     |     /                   PhiData
1572          *       \    |    /                       |
1573          *        \   |   /                       Str
1574          *           PhiM
1575          *
1576          * Is only allowed if the predecessor blocks have only one successor.
1577          */
1578
1579         NEW_ARR_A(ir_node *, projMs, n);
1580         NEW_ARR_A(ir_node *, inM, n);
1581         NEW_ARR_A(ir_node *, inD, n);
1582         NEW_ARR_A(int, idx, n);
1583
1584         /* Prepare: Collect all Store nodes.  We must do this
1585            first because we otherwise may loose a store when exchanging its
1586            memory Proj.
1587          */
1588         for (i = n - 1; i >= 0; --i) {
1589                 ir_node *store;
1590
1591                 projMs[i] = get_Phi_pred(phi, i);
1592                 assert(is_Proj(projMs[i]));
1593
1594                 store = get_Proj_pred(projMs[i]);
1595                 info  = get_irn_link(store);
1596
1597                 inM[i] = get_Store_mem(store);
1598                 inD[i] = get_Store_value(store);
1599                 idx[i] = info->exc_idx;
1600         }
1601         block = get_nodes_block(phi);
1602
1603         /* second step: create a new memory Phi */
1604         phiM = new_rd_Phi(get_irn_dbg_info(phi), block, n, inM, mode_M);
1605
1606         /* third step: create a new data Phi */
1607         phiD = new_rd_Phi(get_irn_dbg_info(phi), block, n, inD, mode);
1608
1609         /* rewire memory and kill the node */
1610         for (i = n - 1; i >= 0; --i) {
1611                 ir_node *proj  = projMs[i];
1612
1613                 if(is_Proj(proj)) {
1614                         ir_node *store = get_Proj_pred(proj);
1615                         exchange(proj, inM[i]);
1616                         kill_node(store);
1617                 }
1618         }
1619
1620         /* fourth step: create the Store */
1621         store = new_rd_Store(db, block, phiM, ptr, phiD, 0);
1622 #ifdef DO_CACHEOPT
1623         co_set_irn_name(store, co_get_irn_ident(old_store));
1624 #endif
1625
1626         projM = new_rd_Proj(NULL, block, store, mode_M, pn_Store_M);
1627
1628         info = get_ldst_info(store, &wenv->obst);
1629         info->projs[pn_Store_M] = projM;
1630
1631         /* fifths step: repair exception flow */
1632         if (exc) {
1633                 ir_node *projX = new_rd_Proj(NULL, block, store, mode_X, pn_Store_X_except);
1634
1635                 info->projs[pn_Store_X_except] = projX;
1636                 info->exc_block                = exc;
1637                 info->exc_idx                  = idx[0];
1638
1639                 for (i = 0; i < n; ++i) {
1640                         set_Block_cfgpred(exc, idx[i], projX);
1641                 }
1642
1643                 if (n > 1) {
1644                         /* the exception block should be optimized as some inputs are identical now */
1645                 }
1646
1647                 res |= CF_CHANGED;
1648         }
1649
1650         /* sixth step: replace old Phi */
1651         exchange(phi, projM);
1652
1653         return res | DF_CHANGED;
1654 }  /* optimize_phi */
1655
1656 /**
1657  * walker, do the optimizations
1658  */
1659 static void do_load_store_optimize(ir_node *n, void *env) {
1660         walk_env_t *wenv = env;
1661
1662         switch (get_irn_opcode(n)) {
1663
1664         case iro_Load:
1665                 wenv->changes |= optimize_load(n);
1666                 break;
1667
1668         case iro_Store:
1669                 wenv->changes |= optimize_store(n);
1670                 break;
1671
1672         case iro_Phi:
1673                 wenv->changes |= optimize_phi(n, wenv);
1674                 break;
1675
1676         default:
1677                 ;
1678         }
1679 }  /* do_load_store_optimize */
1680
1681 /** A scc. */
1682 typedef struct scc {
1683         ir_node *head;          /**< the head of the list */
1684 } scc;
1685
1686 /** A node entry. */
1687 typedef struct node_entry {
1688         unsigned DFSnum;    /**< the DFS number of this node */
1689         unsigned low;       /**< the low number of this node */
1690         int      in_stack;  /**< flag, set if the node is on the stack */
1691         ir_node  *next;     /**< link to the next node the the same scc */
1692         scc      *pscc;     /**< the scc of this node */
1693         unsigned POnum;     /**< the post order number for blocks */
1694 } node_entry;
1695
1696 /** A loop entry. */
1697 typedef struct loop_env {
1698         ir_phase ph;           /**< the phase object */
1699         ir_node  **stack;      /**< the node stack */
1700         int      tos;          /**< tos index */
1701         unsigned nextDFSnum;   /**< the current DFS number */
1702         unsigned POnum;        /**< current post order number */
1703
1704         unsigned changes;      /**< a bitmask of graph changes */
1705 } loop_env;
1706
1707 /**
1708 * Gets the node_entry of a node
1709 */
1710 static node_entry *get_irn_ne(ir_node *irn, loop_env *env) {
1711         ir_phase   *ph = &env->ph;
1712         node_entry *e  = phase_get_irn_data(&env->ph, irn);
1713
1714         if (! e) {
1715                 e = phase_alloc(ph, sizeof(*e));
1716                 memset(e, 0, sizeof(*e));
1717                 phase_set_irn_data(ph, irn, e);
1718         }
1719         return e;
1720 }  /* get_irn_ne */
1721
1722 /**
1723  * Push a node onto the stack.
1724  *
1725  * @param env   the loop environment
1726  * @param n     the node to push
1727  */
1728 static void push(loop_env *env, ir_node *n) {
1729         node_entry *e;
1730
1731         if (env->tos == ARR_LEN(env->stack)) {
1732                 int nlen = ARR_LEN(env->stack) * 2;
1733                 ARR_RESIZE(ir_node *, env->stack, nlen);
1734         }
1735         env->stack[env->tos++] = n;
1736         e = get_irn_ne(n, env);
1737         e->in_stack = 1;
1738 }  /* push */
1739
1740 /**
1741  * pop a node from the stack
1742  *
1743  * @param env   the loop environment
1744  *
1745  * @return  The topmost node
1746  */
1747 static ir_node *pop(loop_env *env) {
1748         ir_node *n = env->stack[--env->tos];
1749         node_entry *e = get_irn_ne(n, env);
1750
1751         e->in_stack = 0;
1752         return n;
1753 }  /* pop */
1754
1755 /**
1756  * Check if irn is a region constant.
1757  * The block or irn must strictly dominate the header block.
1758  *
1759  * @param irn           the node to check
1760  * @param header_block  the header block of the induction variable
1761  */
1762 static int is_rc(ir_node *irn, ir_node *header_block) {
1763         ir_node *block = get_nodes_block(irn);
1764
1765         return (block != header_block) && block_dominates(block, header_block);
1766 }  /* is_rc */
1767
1768 typedef struct phi_entry phi_entry;
1769 struct phi_entry {
1770         ir_node   *phi;    /**< A phi with a region const memory. */
1771         int       pos;     /**< The position of the region const memory */
1772         ir_node   *load;   /**< the newly created load for this phi */
1773         phi_entry *next;
1774 };
1775
1776 /**
1777  * An entry in the avail set.
1778  */
1779 typedef struct avail_entry_t {
1780         ir_node *ptr;   /**< the address pointer */
1781         ir_mode *mode;  /**< the load mode */
1782         ir_node *load;  /**< the associated Load */
1783 } avail_entry_t;
1784
1785 /**
1786  * Compare two avail entries.
1787  */
1788 static int cmp_avail_entry(const void *elt, const void *key, size_t size) {
1789         const avail_entry_t *a = elt;
1790         const avail_entry_t *b = key;
1791         (void) size;
1792
1793         return a->ptr != b->ptr || a->mode != b->mode;
1794 }  /* cmp_avail_entry */
1795
1796 /**
1797  * Calculate the hash value of an avail entry.
1798  */
1799 static unsigned hash_cache_entry(const avail_entry_t *entry) {
1800         return get_irn_idx(entry->ptr) * 9 + HASH_PTR(entry->mode);
1801 }  /* hash_cache_entry */
1802
1803 /**
1804  * Move loops out of loops if possible.
1805  *
1806  * @param pscc   the loop described by an SCC
1807  * @param env    the loop environment
1808  */
1809 static void move_loads_out_of_loops(scc *pscc, loop_env *env) {
1810         ir_node   *phi, *load, *next, *other, *next_other;
1811         ir_entity *ent;
1812         int       j;
1813         phi_entry *phi_list = NULL;
1814         set       *avail;
1815
1816         avail = new_set(cmp_avail_entry, 8);
1817
1818         /* collect all outer memories */
1819         for (phi = pscc->head; phi != NULL; phi = next) {
1820                 node_entry *ne = get_irn_ne(phi, env);
1821                 next = ne->next;
1822
1823                 /* check all memory Phi's */
1824                 if (! is_Phi(phi))
1825                         continue;
1826
1827                 assert(get_irn_mode(phi) == mode_M && "DFS return non-memory Phi");
1828
1829                 for (j = get_irn_arity(phi) - 1; j >= 0; --j) {
1830                         ir_node    *pred = get_irn_n(phi, j);
1831                         node_entry *pe   = get_irn_ne(pred, env);
1832
1833                         if (pe->pscc != ne->pscc) {
1834                                 /* not in the same SCC, is region const */
1835                                 phi_entry *pe = phase_alloc(&env->ph, sizeof(*pe));
1836
1837                                 pe->phi  = phi;
1838                                 pe->pos  = j;
1839                                 pe->next = phi_list;
1840                                 phi_list = pe;
1841                         }
1842                 }
1843         }
1844         /* no Phis no fun */
1845         assert(phi_list != NULL && "DFS found a loop without Phi");
1846
1847         /* for now, we cannot handle more than one input (only reducible cf) */
1848         if (phi_list->next != NULL)
1849                 return;
1850
1851         for (load = pscc->head; load; load = next) {
1852                 ir_mode *load_mode;
1853                 node_entry *ne = get_irn_ne(load, env);
1854                 next = ne->next;
1855
1856                 if (is_Load(load)) {
1857                         ldst_info_t *info = get_irn_link(load);
1858                         ir_node     *ptr = get_Load_ptr(load);
1859
1860                         /* for now, we cannot handle Loads with exceptions */
1861                         if (info->projs[pn_Load_res] == NULL || info->projs[pn_Load_X_regular] != NULL || info->projs[pn_Load_X_except] != NULL)
1862                                 continue;
1863
1864                         /* for now, we can only move Load(Global) */
1865                         if (! is_Global(ptr))
1866                                 continue;
1867                         ent       = get_Global_entity(ptr);
1868                         load_mode = get_Load_mode(load);
1869                         for (other = pscc->head; other != NULL; other = next_other) {
1870                                 node_entry *ne = get_irn_ne(other, env);
1871                                 next_other = ne->next;
1872
1873                                 if (is_Store(other)) {
1874                                         ir_alias_relation rel = get_alias_relation(
1875                                                 current_ir_graph,
1876                                                 get_Store_ptr(other),
1877                                                 get_irn_mode(get_Store_value(other)),
1878                                                 ptr, load_mode);
1879                                         /* if the might be an alias, we cannot pass this Store */
1880                                         if (rel != ir_no_alias)
1881                                                 break;
1882                                 }
1883                                 /* only Phis and pure Calls are allowed here, so ignore them */
1884                         }
1885                         if (other == NULL) {
1886                                 ldst_info_t *ninfo;
1887                                 phi_entry   *pe;
1888                                 dbg_info    *db;
1889
1890                                 /* yep, no aliasing Store found, Load can be moved */
1891                                 DB((dbg, LEVEL_1, "  Found a Load that could be moved: %+F\n", load));
1892
1893                                 db   = get_irn_dbg_info(load);
1894                                 for (pe = phi_list; pe != NULL; pe = pe->next) {
1895                                         int     pos   = pe->pos;
1896                                         ir_node *phi  = pe->phi;
1897                                         ir_node *blk  = get_nodes_block(phi);
1898                                         ir_node *pred = get_Block_cfgpred_block(blk, pos);
1899                                         ir_node *irn, *mem;
1900                                         avail_entry_t entry, *res;
1901
1902                                         entry.ptr  = ptr;
1903                                         entry.mode = load_mode;
1904                                         res = set_find(avail, &entry, sizeof(entry), hash_cache_entry(&entry));
1905                                         if (res != NULL) {
1906                                                 irn = res->load;
1907                                         } else {
1908                                                 irn = new_rd_Load(db, pred, get_Phi_pred(phi, pos), ptr, load_mode, 0);
1909                                                 entry.load = irn;
1910                                                 set_insert(avail, &entry, sizeof(entry), hash_cache_entry(&entry));
1911                                                 DB((dbg, LEVEL_1, "  Created %+F in %+F\n", irn, pred));
1912                                         }
1913                                         pe->load = irn;
1914                                         ninfo = get_ldst_info(irn, phase_obst(&env->ph));
1915
1916                                         ninfo->projs[pn_Load_M] = mem = new_r_Proj(pred, irn, mode_M, pn_Load_M);
1917                                         set_Phi_pred(phi, pos, mem);
1918
1919                                         ninfo->projs[pn_Load_res] = new_r_Proj(pred, irn, load_mode, pn_Load_res);
1920                                 }
1921
1922                                 /* now kill the old Load */
1923                                 exchange(info->projs[pn_Load_M], get_Load_mem(load));
1924                                 exchange(info->projs[pn_Load_res], ninfo->projs[pn_Load_res]);
1925
1926                                 env->changes |= DF_CHANGED;
1927                         }
1928                 }
1929         }
1930         del_set(avail);
1931 }  /* move_loads_out_of_loops */
1932
1933 /**
1934  * Process a loop SCC.
1935  *
1936  * @param pscc  the SCC
1937  * @param env   the loop environment
1938  */
1939 static void process_loop(scc *pscc, loop_env *env) {
1940         ir_node *irn, *next, *header = NULL;
1941         node_entry *b, *h = NULL;
1942         int j, only_phi, num_outside, process = 0;
1943         ir_node *out_rc;
1944
1945         /* find the header block for this scc */
1946         for (irn = pscc->head; irn; irn = next) {
1947                 node_entry *e = get_irn_ne(irn, env);
1948                 ir_node *block = get_nodes_block(irn);
1949
1950                 next = e->next;
1951                 b = get_irn_ne(block, env);
1952
1953                 if (header != NULL) {
1954                         if (h->POnum < b->POnum) {
1955                                 header = block;
1956                                 h      = b;
1957                         }
1958                 } else {
1959                         header = block;
1960                         h      = b;
1961                 }
1962         }
1963
1964         /* check if this scc contains only Phi, Loads or Stores nodes */
1965         only_phi    = 1;
1966         num_outside = 0;
1967         out_rc      = NULL;
1968         for (irn = pscc->head; irn; irn = next) {
1969                 node_entry *e = get_irn_ne(irn, env);
1970
1971                 next = e->next;
1972                 switch (get_irn_opcode(irn)) {
1973                 case iro_Call:
1974                         if (is_Call_pure(irn)) {
1975                                 /* pure calls can be treated like loads */
1976                                 only_phi = 0;
1977                                 break;
1978                         }
1979                         /* non-pure calls must be handle like may-alias Stores */
1980                         goto fail;
1981                 case iro_CopyB:
1982                         /* cannot handle CopyB yet */
1983                         goto fail;
1984                 case iro_Load:
1985                         process = 1;
1986                         if (get_Load_volatility(irn) == volatility_is_volatile) {
1987                                 /* cannot handle loops with volatile Loads */
1988                                 goto fail;
1989                         }
1990                         only_phi = 0;
1991                         break;
1992                 case iro_Store:
1993                         if (get_Store_volatility(irn) == volatility_is_volatile) {
1994                                 /* cannot handle loops with volatile Stores */
1995                                 goto fail;
1996                         }
1997                         only_phi = 0;
1998                         break;
1999                 default:
2000                         only_phi = 0;
2001                         break;
2002                 case iro_Phi:
2003                         for (j = get_irn_arity(irn) - 1; j >= 0; --j) {
2004                                 ir_node *pred  = get_irn_n(irn, j);
2005                                 node_entry *pe = get_irn_ne(pred, env);
2006
2007                                 if (pe->pscc != e->pscc) {
2008                                         /* not in the same SCC, must be a region const */
2009                                         if (! is_rc(pred, header)) {
2010                                                 /* not a memory loop */
2011                                                 goto fail;
2012                                         }
2013                                         if (out_rc == NULL) {
2014                                                 /* first region constant */
2015                                                 out_rc = pred;
2016                                                 ++num_outside;
2017                                         } else if (out_rc != pred) {
2018                                                 /* another region constant */
2019                                                 ++num_outside;
2020                                         }
2021                                 }
2022                         }
2023                         break;
2024                 }
2025         }
2026         if (! process)
2027                 goto fail;
2028
2029         /* found a memory loop */
2030         DB((dbg, LEVEL_2, "  Found a memory loop:\n  "));
2031         if (only_phi && num_outside == 1) {
2032                 /* a phi cycle with only one real predecessor can be collapsed */
2033                 DB((dbg, LEVEL_2, "  Found an USELESS Phi cycle:\n  "));
2034
2035                 for (irn = pscc->head; irn; irn = next) {
2036                         node_entry *e = get_irn_ne(irn, env);
2037                         next = e->next;
2038                         exchange(irn, out_rc);
2039                 }
2040                 env->changes |= DF_CHANGED;
2041                 return;
2042         }
2043
2044 #ifdef DEBUG_libfirm
2045         for (irn = pscc->head; irn; irn = next) {
2046                 node_entry *e = get_irn_ne(irn, env);
2047                 next = e->next;
2048                 DB((dbg, LEVEL_2, " %+F,", irn));
2049         }
2050         DB((dbg, LEVEL_2, "\n"));
2051 #endif
2052         move_loads_out_of_loops(pscc, env);
2053
2054 fail:
2055         ;
2056 }  /* process_loop */
2057
2058 /**
2059  * Process a SCC.
2060  *
2061  * @param pscc  the SCC
2062  * @param env   the loop environment
2063  */
2064 static void process_scc(scc *pscc, loop_env *env) {
2065         ir_node *head = pscc->head;
2066         node_entry *e = get_irn_ne(head, env);
2067
2068 #ifdef DEBUG_libfirm
2069         {
2070                 ir_node *irn, *next;
2071
2072                 DB((dbg, LEVEL_4, " SCC at %p:\n ", pscc));
2073                 for (irn = pscc->head; irn; irn = next) {
2074                         node_entry *e = get_irn_ne(irn, env);
2075
2076                         next = e->next;
2077
2078                         DB((dbg, LEVEL_4, " %+F,", irn));
2079                 }
2080                 DB((dbg, LEVEL_4, "\n"));
2081         }
2082 #endif
2083
2084         if (e->next != NULL) {
2085                 /* this SCC has more than one member */
2086                 process_loop(pscc, env);
2087         }
2088 }  /* process_scc */
2089
2090 /**
2091  * Do Tarjan's SCC algorithm and drive load/store optimization.
2092  *
2093  * @param irn  start at this node
2094  * @param env  the loop environment
2095  */
2096 static void dfs(ir_node *irn, loop_env *env)
2097 {
2098         int i, n;
2099         node_entry *node = get_irn_ne(irn, env);
2100
2101         mark_irn_visited(irn);
2102
2103         node->DFSnum = env->nextDFSnum++;
2104         node->low    = node->DFSnum;
2105         push(env, irn);
2106
2107         /* handle preds */
2108         if (is_Phi(irn) || is_Sync(irn)) {
2109                 n = get_irn_arity(irn);
2110                 for (i = 0; i < n; ++i) {
2111                         ir_node *pred = get_irn_n(irn, i);
2112                         node_entry *o = get_irn_ne(pred, env);
2113
2114                         if (!irn_visited(pred)) {
2115                                 dfs(pred, env);
2116                                 node->low = MIN(node->low, o->low);
2117                         }
2118                         if (o->DFSnum < node->DFSnum && o->in_stack)
2119                                 node->low = MIN(o->DFSnum, node->low);
2120                 }
2121         } else if (is_fragile_op(irn)) {
2122                 ir_node *pred = get_fragile_op_mem(irn);
2123                 node_entry *o = get_irn_ne(pred, env);
2124
2125                 if (!irn_visited(pred)) {
2126                         dfs(pred, env);
2127                         node->low = MIN(node->low, o->low);
2128                 }
2129                 if (o->DFSnum < node->DFSnum && o->in_stack)
2130                         node->low = MIN(o->DFSnum, node->low);
2131         } else if (is_Proj(irn)) {
2132                 ir_node *pred = get_Proj_pred(irn);
2133                 node_entry *o = get_irn_ne(pred, env);
2134
2135                 if (!irn_visited(pred)) {
2136                         dfs(pred, env);
2137                         node->low = MIN(node->low, o->low);
2138                 }
2139                 if (o->DFSnum < node->DFSnum && o->in_stack)
2140                         node->low = MIN(o->DFSnum, node->low);
2141         }
2142         else {
2143                  /* IGNORE predecessors */
2144         }
2145
2146         if (node->low == node->DFSnum) {
2147                 scc *pscc = phase_alloc(&env->ph, sizeof(*pscc));
2148                 ir_node *x;
2149
2150                 pscc->head = NULL;
2151                 do {
2152                         node_entry *e;
2153
2154                         x = pop(env);
2155                         e = get_irn_ne(x, env);
2156                         e->pscc    = pscc;
2157                         e->next    = pscc->head;
2158                         pscc->head = x;
2159                 } while (x != irn);
2160
2161                 process_scc(pscc, env);
2162         }
2163 }  /* dfs */
2164
2165 /**
2166  * Do the DFS on the memory edges a graph.
2167  *
2168  * @param irg  the graph to process
2169  * @param env  the loop environment
2170  */
2171 static void do_dfs(ir_graph *irg, loop_env *env) {
2172         ir_graph *rem = current_ir_graph;
2173         ir_node  *endblk, *end;
2174         int      i;
2175
2176         current_ir_graph = irg;
2177         inc_irg_visited(irg);
2178
2179         /* visit all memory nodes */
2180         endblk = get_irg_end_block(irg);
2181         for (i = get_Block_n_cfgpreds(endblk) - 1; i >= 0; --i) {
2182                 ir_node *pred = get_Block_cfgpred(endblk, i);
2183
2184                 pred = skip_Proj(pred);
2185                 if (is_Return(pred))
2186                         dfs(get_Return_mem(pred), env);
2187                 else if (is_Raise(pred))
2188                         dfs(get_Raise_mem(pred), env);
2189                 else if (is_fragile_op(pred))
2190                         dfs(get_fragile_op_mem(pred), env);
2191                 else {
2192                         assert(0 && "Unknown EndBlock predecessor");
2193                 }
2194         }
2195
2196         /* visit the keep-alives */
2197         end = get_irg_end(irg);
2198         for (i = get_End_n_keepalives(end) - 1; i >= 0; --i) {
2199                 ir_node *ka = get_End_keepalive(end, i);
2200
2201                 if (is_Phi(ka) && !irn_visited(ka))
2202                         dfs(ka, env);
2203         }
2204         current_ir_graph = rem;
2205 }  /* do_dfs */
2206
2207 /**
2208  * Initialize new phase data. We do this always explicit, so return NULL here
2209  */
2210 static void *init_loop_data(ir_phase *ph, const ir_node *irn, void *data) {
2211         (void)ph;
2212         (void)irn;
2213         (void)data;
2214         return NULL;
2215 }  /* init_loop_data */
2216
2217 /**
2218  * Optimize Loads/Stores in loops.
2219  *
2220  * @param irg  the graph
2221  */
2222 static int optimize_loops(ir_graph *irg) {
2223         loop_env env;
2224
2225         env.stack         = NEW_ARR_F(ir_node *, 128);
2226         env.tos           = 0;
2227         env.nextDFSnum    = 0;
2228         env.POnum         = 0;
2229         env.changes       = 0;
2230         phase_init(&env.ph, "ldstopt", irg, PHASE_DEFAULT_GROWTH, init_loop_data, NULL);
2231
2232         /* calculate the SCC's and drive loop optimization. */
2233         do_dfs(irg, &env);
2234
2235         DEL_ARR_F(env.stack);
2236         phase_free(&env.ph);
2237
2238         return env.changes;
2239 }  /* optimize_loops */
2240
2241 /*
2242  * do the load store optimization
2243  */
2244 int optimize_load_store(ir_graph *irg) {
2245         walk_env_t env;
2246
2247         FIRM_DBG_REGISTER(dbg, "firm.opt.ldstopt");
2248
2249         assert(get_irg_phase_state(irg) != phase_building);
2250         assert(get_irg_pinned(irg) != op_pin_state_floats &&
2251                 "LoadStore optimization needs pinned graph");
2252
2253         /* we need landing pads */
2254         remove_critical_cf_edges(irg);
2255
2256         edges_assure(irg);
2257
2258         /* for Phi optimization post-dominators are needed ... */
2259         assure_postdoms(irg);
2260
2261         if (get_opt_alias_analysis()) {
2262                 assure_irg_entity_usage_computed(irg);
2263                 assure_irp_globals_entity_usage_computed();
2264         }
2265
2266         obstack_init(&env.obst);
2267         env.changes = 0;
2268
2269         /* init the links, then collect Loads/Stores/Proj's in lists */
2270         master_visited = 0;
2271         irg_walk_graph(irg, firm_clear_link, collect_nodes, &env);
2272
2273         /* now we have collected enough information, optimize */
2274         irg_walk_graph(irg, NULL, do_load_store_optimize, &env);
2275
2276         env.changes |= optimize_loops(irg);
2277
2278         obstack_free(&env.obst, NULL);
2279
2280         /* Handle graph state */
2281         if (env.changes) {
2282                 set_irg_outs_inconsistent(irg);
2283                 set_irg_entity_usage_state(irg, ir_entity_usage_not_computed);
2284         }
2285
2286         if (env.changes & CF_CHANGED) {
2287                 /* is this really needed: Yes, control flow changed, block might
2288                 have Bad() predecessors. */
2289                 set_irg_doms_inconsistent(irg);
2290         }
2291         return env.changes != 0;
2292 }  /* optimize_load_store */
2293
2294 ir_graph_pass_t *optimize_load_store_pass(const char *name)
2295 {
2296         return def_graph_pass_ret(name ? name : "ldst", optimize_load_store);
2297 }  /* optimize_load_store_pass */