experimental load-store improvement
[libfirm] / ir / opt / ldstopt.c
1 /*
2  * Copyright (C) 1995-2008 University of Karlsruhe.  All right reserved.
3  *
4  * This file is part of libFirm.
5  *
6  * This file may be distributed and/or modified under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation and appearing in the file LICENSE.GPL included in the
9  * packaging of this file.
10  *
11  * Licensees holding valid libFirm Professional Edition licenses may use
12  * this file in accordance with the libFirm Commercial License.
13  * Agreement provided with the Software.
14  *
15  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17  * PURPOSE.
18  */
19
20 /**
21  * @file
22  * @brief   Load/Store optimizations.
23  * @author  Michael Beck
24  * @version $Id$
25  */
26 #ifdef HAVE_CONFIG_H
27 # include "config.h"
28 #endif
29
30 #include <string.h>
31 #include <stdbool.h>
32
33 #include "iroptimize.h"
34 #include "irnode_t.h"
35 #include "irgraph_t.h"
36 #include "irmode_t.h"
37 #include "iropt_t.h"
38 #include "ircons_t.h"
39 #include "irgmod.h"
40 #include "irgwalk.h"
41 #include "irvrfy.h"
42 #include "tv_t.h"
43 #include "dbginfo_t.h"
44 #include "iropt_dbg.h"
45 #include "irflag_t.h"
46 #include "array.h"
47 #include "irhooks.h"
48 #include "iredges.h"
49 #include "irtools.h"
50 #include "opt_polymorphy.h"
51 #include "irmemory.h"
52 #include "xmalloc.h"
53 #include "irphase_t.h"
54 #include "irgopt.h"
55 #include "debug.h"
56
57 /** The debug handle. */
58 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
59
60 #ifdef DO_CACHEOPT
61 #include "cacheopt/cachesim.h"
62 #endif
63
64 #undef IMAX
65 #define IMAX(a,b)       ((a) > (b) ? (a) : (b))
66
67 #define MAX_PROJ        IMAX(IMAX(pn_Load_max, pn_Store_max), pn_Call_max)
68
69 enum changes_t {
70         DF_CHANGED = 1,       /**< data flow changed */
71         CF_CHANGED = 2,       /**< control flow changed */
72 };
73
74 /**
75  * walker environment
76  */
77 typedef struct _walk_env_t {
78         struct obstack obst;          /**< list of all stores */
79         unsigned changes;             /**< a bitmask of graph changes */
80 } walk_env_t;
81
82 /** A Load/Store info. */
83 typedef struct _ldst_info_t {
84         ir_node  *projs[MAX_PROJ];    /**< list of Proj's of this node */
85         ir_node  *exc_block;          /**< the exception block if available */
86         int      exc_idx;             /**< predecessor index in the exception block */
87         unsigned visited;             /**< visited counter for breaking loops */
88 } ldst_info_t;
89
90 /**
91  * flags for control flow.
92  */
93 enum block_flags_t {
94         BLOCK_HAS_COND = 1,      /**< Block has conditional control flow */
95         BLOCK_HAS_EXC  = 2       /**< Block has exceptional control flow */
96 };
97
98 /**
99  * a Block info.
100  */
101 typedef struct _block_info_t {
102         unsigned flags;               /**< flags for the block */
103 } block_info_t;
104
105 /** the master visited flag for loop detection. */
106 static unsigned master_visited = 0;
107
108 #define INC_MASTER()       ++master_visited
109 #define MARK_NODE(info)    (info)->visited = master_visited
110 #define NODE_VISITED(info) (info)->visited >= master_visited
111
112 /**
113  * get the Load/Store info of a node
114  */
115 static ldst_info_t *get_ldst_info(ir_node *node, struct obstack *obst) {
116         ldst_info_t *info = get_irn_link(node);
117
118         if (! info) {
119                 info = obstack_alloc(obst, sizeof(*info));
120                 memset(info, 0, sizeof(*info));
121                 set_irn_link(node, info);
122         }
123         return info;
124 }  /* get_ldst_info */
125
126 /**
127  * get the Block info of a node
128  */
129 static block_info_t *get_block_info(ir_node *node, struct obstack *obst) {
130         block_info_t *info = get_irn_link(node);
131
132         if (! info) {
133                 info = obstack_alloc(obst, sizeof(*info));
134                 memset(info, 0, sizeof(*info));
135                 set_irn_link(node, info);
136         }
137         return info;
138 }  /* get_block_info */
139
140 /**
141  * update the projection info for a Load/Store
142  */
143 static unsigned update_projs(ldst_info_t *info, ir_node *proj)
144 {
145         long nr = get_Proj_proj(proj);
146
147         assert(0 <= nr && nr <= MAX_PROJ && "Wrong proj from LoadStore");
148
149         if (info->projs[nr]) {
150                 /* there is already one, do CSE */
151                 exchange(proj, info->projs[nr]);
152                 return DF_CHANGED;
153         }
154         else {
155                 info->projs[nr] = proj;
156                 return 0;
157         }
158 }  /* update_projs */
159
160 /**
161  * update the exception block info for a Load/Store node.
162  *
163  * @param info   the load/store info struct
164  * @param block  the exception handler block for this load/store
165  * @param pos    the control flow input of the block
166  */
167 static unsigned update_exc(ldst_info_t *info, ir_node *block, int pos)
168 {
169         assert(info->exc_block == NULL && "more than one exception block found");
170
171         info->exc_block = block;
172         info->exc_idx   = pos;
173         return 0;
174 }  /* update_exc */
175
176 /** Return the number of uses of an address node */
177 #define get_irn_n_uses(adr)     get_irn_n_edges(adr)
178
179 /**
180  * walker, collects all Load/Store/Proj nodes
181  *
182  * walks from Start -> End
183  */
184 static void collect_nodes(ir_node *node, void *env)
185 {
186         ir_opcode   opcode = get_irn_opcode(node);
187         ir_node     *pred, *blk, *pred_blk;
188         ldst_info_t *ldst_info;
189         walk_env_t  *wenv = env;
190
191         if (opcode == iro_Proj) {
192                 pred   = get_Proj_pred(node);
193                 opcode = get_irn_opcode(pred);
194
195                 if (opcode == iro_Load || opcode == iro_Store || opcode == iro_Call) {
196                         ldst_info = get_ldst_info(pred, &wenv->obst);
197
198                         wenv->changes |= update_projs(ldst_info, node);
199
200                         /*
201                          * Place the Proj's to the same block as the
202                          * predecessor Load. This is always ok and prevents
203                          * "non-SSA" form after optimizations if the Proj
204                          * is in a wrong block.
205                          */
206                         blk      = get_nodes_block(node);
207                         pred_blk = get_nodes_block(pred);
208                         if (blk != pred_blk) {
209                                 wenv->changes |= DF_CHANGED;
210                                 set_nodes_block(node, pred_blk);
211                         }
212                 }
213         } else if (opcode == iro_Block) {
214                 int i;
215
216                 for (i = get_Block_n_cfgpreds(node) - 1; i >= 0; --i) {
217                         ir_node      *pred_block, *proj;
218                         block_info_t *bl_info;
219                         int          is_exc = 0;
220
221                         pred = proj = get_Block_cfgpred(node, i);
222
223                         if (is_Proj(proj)) {
224                                 pred   = get_Proj_pred(proj);
225                                 is_exc = get_Proj_proj(proj) == pn_Generic_X_except;
226                         }
227
228                         /* ignore Bad predecessors, they will be removed later */
229                         if (is_Bad(pred))
230                                 continue;
231
232                         pred_block = get_nodes_block(pred);
233                         bl_info    = get_block_info(pred_block, &wenv->obst);
234
235                         if (is_fragile_op(pred) && is_exc)
236                                 bl_info->flags |= BLOCK_HAS_EXC;
237                         else if (is_irn_forking(pred))
238                                 bl_info->flags |= BLOCK_HAS_COND;
239
240                         opcode = get_irn_opcode(pred);
241                         if (is_exc && (opcode == iro_Load || opcode == iro_Store || opcode == iro_Call)) {
242                                 ldst_info = get_ldst_info(pred, &wenv->obst);
243
244                                 wenv->changes |= update_exc(ldst_info, node, i);
245                         }
246                 }
247         }
248 }  /* collect_nodes */
249
250 /**
251  * Returns an entity if the address ptr points to a constant one.
252  *
253  * @param ptr  the address
254  *
255  * @return an entity or NULL
256  */
257 static ir_entity *find_constant_entity(ir_node *ptr)
258 {
259         for (;;) {
260                 if (is_SymConst(ptr) && get_SymConst_kind(ptr) == symconst_addr_ent) {
261                         ir_entity *ent = get_SymConst_entity(ptr);
262                         if (variability_constant == get_entity_variability(ent))
263                                 return ent;
264                         return NULL;
265                 } else if (is_Sel(ptr)) {
266                         ir_entity *ent = get_Sel_entity(ptr);
267                         ir_type   *tp  = get_entity_owner(ent);
268
269                         /* Do not fiddle with polymorphism. */
270                         if (is_Class_type(get_entity_owner(ent)) &&
271                                 ((get_entity_n_overwrites(ent)    != 0) ||
272                                 (get_entity_n_overwrittenby(ent) != 0)   ) )
273                                 return NULL;
274
275                         if (is_Array_type(tp)) {
276                                 /* check bounds */
277                                 int i, n;
278
279                                 for (i = 0, n = get_Sel_n_indexs(ptr); i < n; ++i) {
280                                         ir_node *bound;
281                                         tarval *tlower, *tupper;
282                                         ir_node *index = get_Sel_index(ptr, i);
283                                         tarval *tv     = computed_value(index);
284
285                                         /* check if the index is constant */
286                                         if (tv == tarval_bad)
287                                                 return NULL;
288
289                                         bound  = get_array_lower_bound(tp, i);
290                                         tlower = computed_value(bound);
291                                         bound  = get_array_upper_bound(tp, i);
292                                         tupper = computed_value(bound);
293
294                                         if (tlower == tarval_bad || tupper == tarval_bad)
295                                                 return NULL;
296
297                                         if (tarval_cmp(tv, tlower) & pn_Cmp_Lt)
298                                                 return NULL;
299                                         if (tarval_cmp(tupper, tv) & pn_Cmp_Lt)
300                                                 return NULL;
301
302                                         /* ok, bounds check finished */
303                                 }
304                         }
305
306                         if (variability_constant == get_entity_variability(ent))
307                                 return ent;
308
309                         /* try next */
310                         ptr = get_Sel_ptr(ptr);
311                 } else if (is_Add(ptr)) {
312                         ir_node *l = get_Add_left(ptr);
313                         ir_node *r = get_Add_right(ptr);
314
315                         if (get_irn_mode(l) == get_irn_mode(ptr) && is_Const(r))
316                                 ptr = l;
317                         else if (get_irn_mode(r) == get_irn_mode(ptr) && is_Const(l))
318                                 ptr = r;
319                         else
320                                 return NULL;
321
322                         /* for now, we support only one addition, reassoc should fold all others */
323                         if (! is_SymConst(ptr) && !is_Sel(ptr))
324                                 return NULL;
325                 } else if (is_Sub(ptr)) {
326                         ir_node *l = get_Sub_left(ptr);
327                         ir_node *r = get_Sub_right(ptr);
328
329                         if (get_irn_mode(l) == get_irn_mode(ptr) &&     is_Const(r))
330                                 ptr = l;
331                         else
332                                 return NULL;
333                         /* for now, we support only one substraction, reassoc should fold all others */
334                         if (! is_SymConst(ptr) && !is_Sel(ptr))
335                                 return NULL;
336                 } else
337                         return NULL;
338         }
339 }  /* find_constant_entity */
340
341 /**
342  * Return the Selection index of a Sel node from dimension n
343  */
344 static long get_Sel_array_index_long(ir_node *n, int dim) {
345         ir_node *index = get_Sel_index(n, dim);
346         assert(is_Const(index));
347         return get_tarval_long(get_Const_tarval(index));
348 }  /* get_Sel_array_index_long */
349
350 /**
351  * Returns the accessed component graph path for an
352  * node computing an address.
353  *
354  * @param ptr    the node computing the address
355  * @param depth  current depth in steps upward from the root
356  *               of the address
357  */
358 static compound_graph_path *rec_get_accessed_path(ir_node *ptr, int depth) {
359         compound_graph_path *res = NULL;
360         ir_entity           *root, *field, *ent;
361         int                 path_len, pos, idx;
362         tarval              *tv;
363         ir_type             *tp;
364
365         if (is_SymConst(ptr)) {
366                 /* a SymConst. If the depth is 0, this is an access to a global
367                  * entity and we don't need a component path, else we know
368                  * at least it's length.
369                  */
370                 assert(get_SymConst_kind(ptr) == symconst_addr_ent);
371                 root = get_SymConst_entity(ptr);
372                 res = (depth == 0) ? NULL : new_compound_graph_path(get_entity_type(root), depth);
373         } else if (is_Sel(ptr)) {
374                 /* it's a Sel, go up until we find the root */
375                 res = rec_get_accessed_path(get_Sel_ptr(ptr), depth+1);
376                 if (res == NULL)
377                         return NULL;
378
379                 /* fill up the step in the path at the current position */
380                 field    = get_Sel_entity(ptr);
381                 path_len = get_compound_graph_path_length(res);
382                 pos      = path_len - depth - 1;
383                 set_compound_graph_path_node(res, pos, field);
384
385                 if (is_Array_type(get_entity_owner(field))) {
386                         assert(get_Sel_n_indexs(ptr) == 1 && "multi dim arrays not implemented");
387                         set_compound_graph_path_array_index(res, pos, get_Sel_array_index_long(ptr, 0));
388                 }
389         } else if (is_Add(ptr)) {
390                 ir_node *l = get_Add_left(ptr);
391                 ir_node *r = get_Add_right(ptr);
392                 ir_mode *mode;
393
394                 if (is_Const(r)) {
395                         ptr = l;
396                         tv  = get_Const_tarval(r);
397                 } else {
398                         ptr = r;
399                         tv  = get_Const_tarval(l);
400                 }
401 ptr_arith:
402                 mode = get_tarval_mode(tv);
403
404                 /* ptr must be a Sel or a SymConst, this was checked in find_constant_entity() */
405                 if (is_Sel(ptr)) {
406                         field = get_Sel_entity(ptr);
407                 } else {
408                         field = get_SymConst_entity(ptr);
409                 }
410                 idx = 0;
411                 for (ent = field;;) {
412                         unsigned size;
413                         tarval   *sz, *tv_index, *tlower, *tupper;
414                         ir_node  *bound;
415
416                         tp = get_entity_type(ent);
417                         if (! is_Array_type(tp))
418                                 break;
419                         ent = get_array_element_entity(tp);
420                         size = get_type_size_bytes(get_entity_type(ent));
421                         sz   = new_tarval_from_long(size, mode);
422
423                         tv_index = tarval_div(tv, sz);
424                         tv       = tarval_mod(tv, sz);
425
426                         if (tv_index == tarval_bad || tv == tarval_bad)
427                                 return NULL;
428
429                         assert(get_array_n_dimensions(tp) == 1 && "multiarrays not implemented");
430                         bound  = get_array_lower_bound(tp, 0);
431                         tlower = computed_value(bound);
432                         bound  = get_array_upper_bound(tp, 0);
433                         tupper = computed_value(bound);
434
435                         if (tlower == tarval_bad || tupper == tarval_bad)
436                                 return NULL;
437
438                         if (tarval_cmp(tv_index, tlower) & pn_Cmp_Lt)
439                                 return NULL;
440                         if (tarval_cmp(tupper, tv_index) & pn_Cmp_Lt)
441                                 return NULL;
442
443                         /* ok, bounds check finished */
444                         ++idx;
445                 }
446                 if (! tarval_is_null(tv)) {
447                         /* access to some struct/union member */
448                         return NULL;
449                 }
450
451                 /* should be at least ONE array */
452                 if (idx == 0)
453                         return NULL;
454
455                 res = rec_get_accessed_path(ptr, depth + idx);
456                 if (res == NULL)
457                         return NULL;
458
459                 path_len = get_compound_graph_path_length(res);
460                 pos      = path_len - depth - idx;
461
462                 for (ent = field;;) {
463                         unsigned size;
464                         tarval   *sz, *tv_index;
465                         long     index;
466
467                         tp = get_entity_type(ent);
468                         if (! is_Array_type(tp))
469                                 break;
470                         ent = get_array_element_entity(tp);
471                         set_compound_graph_path_node(res, pos, ent);
472
473                         size = get_type_size_bytes(get_entity_type(ent));
474                         sz   = new_tarval_from_long(size, mode);
475
476                         tv_index = tarval_div(tv, sz);
477                         tv       = tarval_mod(tv, sz);
478
479                         /* worked above, should work again */
480                         assert(tv_index != tarval_bad && tv != tarval_bad);
481
482                         /* bounds already checked above */
483                         index = get_tarval_long(tv_index);
484                         set_compound_graph_path_array_index(res, pos, index);
485                         ++pos;
486                 }
487         } else if (is_Sub(ptr)) {
488                 ir_node *l = get_Sub_left(ptr);
489                 ir_node *r = get_Sub_right(ptr);
490
491                 ptr = l;
492                 tv  = get_Const_tarval(r);
493                 tv  = tarval_neg(tv);
494                 goto ptr_arith;
495         }
496         return res;
497 }  /* rec_get_accessed_path */
498
499 /**
500  * Returns an access path or NULL.  The access path is only
501  * valid, if the graph is in phase_high and _no_ address computation is used.
502  */
503 static compound_graph_path *get_accessed_path(ir_node *ptr) {
504         return rec_get_accessed_path(ptr, 0);
505 }  /* get_accessed_path */
506
507 typedef struct path_entry {
508         ir_entity         *ent;
509         struct path_entry *next;
510         long              index;
511 } path_entry;
512
513 static ir_node *rec_find_compound_ent_value(ir_node *ptr, path_entry *next) {
514         path_entry       entry, *p;
515         ir_entity        *ent, *field;
516         ir_initializer_t *initializer;
517         tarval           *tv;
518         ir_type          *tp;
519         unsigned         n;
520
521         entry.next = next;
522         if (is_SymConst(ptr)) {
523                 /* found the root */
524                 ent         = get_SymConst_entity(ptr);
525                 initializer = get_entity_initializer(ent);
526                 for (p = next; p != NULL;) {
527                         if (initializer->kind != IR_INITIALIZER_COMPOUND)
528                                 return NULL;
529                         n  = get_initializer_compound_n_entries(initializer);
530                         tp = get_entity_type(ent);
531
532                         if (is_Array_type(tp)) {
533                                 ent = get_array_element_entity(tp);
534                                 if (ent != p->ent) {
535                                         /* a missing [0] */
536                                         if (0 >= n)
537                                                 return NULL;
538                                         initializer = get_initializer_compound_value(initializer, 0);
539                                         continue;
540                                 }
541                         }
542                         if (p->index >= n)
543                                 return NULL;
544                         initializer = get_initializer_compound_value(initializer, p->index);
545
546                         ent = p->ent;
547                         p   = p->next;
548                 }
549                 tp = get_entity_type(ent);
550                 while (is_Array_type(tp)) {
551                         ent = get_array_element_entity(tp);
552                         tp = get_entity_type(ent);
553                         /* a missing [0] */
554                         n  = get_initializer_compound_n_entries(initializer);
555                         if (0 >= n)
556                                 return NULL;
557                         initializer = get_initializer_compound_value(initializer, 0);
558                 }
559
560                 switch (initializer->kind) {
561                 case IR_INITIALIZER_CONST:
562                         return get_initializer_const_value(initializer);
563                 case IR_INITIALIZER_TARVAL:
564                 case IR_INITIALIZER_NULL:
565                 default:
566                         return NULL;
567                 }
568         } else if (is_Sel(ptr)) {
569                 entry.ent = field = get_Sel_entity(ptr);
570                 tp = get_entity_owner(field);
571                 if (is_Array_type(tp)) {
572                         assert(get_Sel_n_indexs(ptr) == 1 && "multi dim arrays not implemented");
573                         entry.index = get_Sel_array_index_long(ptr, 0) - get_array_lower_bound_int(tp, 0);
574                 } else {
575                         int i, n_members = get_compound_n_members(tp);
576                         for (i = 0; i < n_members; ++i) {
577                                 if (get_compound_member(tp, i) == field)
578                                         break;
579                         }
580                         if (i >= n_members) {
581                                 /* not found: should NOT happen */
582                                 return NULL;
583                         }
584                         entry.index = i;
585                 }
586                 return rec_find_compound_ent_value(get_Sel_ptr(ptr), &entry);
587         }  else if (is_Add(ptr)) {
588                 ir_node  *l = get_Add_left(ptr);
589                 ir_node  *r = get_Add_right(ptr);
590                 ir_mode  *mode;
591                 unsigned pos;
592
593                 if (is_Const(r)) {
594                         ptr = l;
595                         tv  = get_Const_tarval(r);
596                 } else {
597                         ptr = r;
598                         tv  = get_Const_tarval(l);
599                 }
600 ptr_arith:
601                 mode = get_tarval_mode(tv);
602
603                 /* ptr must be a Sel or a SymConst, this was checked in find_constant_entity() */
604                 if (is_Sel(ptr)) {
605                         field = get_Sel_entity(ptr);
606                 } else {
607                         field = get_SymConst_entity(ptr);
608                 }
609
610                 /* count needed entries */
611                 pos = 0;
612                 for (ent = field;;) {
613                         tp = get_entity_type(ent);
614                         if (! is_Array_type(tp))
615                                 break;
616                         ent = get_array_element_entity(tp);
617                         ++pos;
618                 }
619                 /* should be at least ONE entry */
620                 if (pos == 0)
621                         return NULL;
622
623                 /* allocate the right number of entries */
624                 NEW_ARR_A(path_entry, p, pos);
625
626                 /* fill them up */
627                 pos = 0;
628                 for (ent = field;;) {
629                         unsigned size;
630                         tarval   *sz, *tv_index, *tlower, *tupper;
631                         long     index;
632                         ir_node  *bound;
633
634                         tp = get_entity_type(ent);
635                         if (! is_Array_type(tp))
636                                 break;
637                         ent = get_array_element_entity(tp);
638                         p[pos].ent  = ent;
639                         p[pos].next = &p[pos + 1];
640
641                         size = get_type_size_bytes(get_entity_type(ent));
642                         sz   = new_tarval_from_long(size, mode);
643
644                         tv_index = tarval_div(tv, sz);
645                         tv       = tarval_mod(tv, sz);
646
647                         if (tv_index == tarval_bad || tv == tarval_bad)
648                                 return NULL;
649
650                         assert(get_array_n_dimensions(tp) == 1 && "multiarrays not implemented");
651                         bound  = get_array_lower_bound(tp, 0);
652                         tlower = computed_value(bound);
653                         bound  = get_array_upper_bound(tp, 0);
654                         tupper = computed_value(bound);
655
656                         if (tlower == tarval_bad || tupper == tarval_bad)
657                                 return NULL;
658
659                         if (tarval_cmp(tv_index, tlower) & pn_Cmp_Lt)
660                                 return NULL;
661                         if (tarval_cmp(tupper, tv_index) & pn_Cmp_Lt)
662                                 return NULL;
663
664                         /* ok, bounds check finished */
665                         index = get_tarval_long(tv_index);
666                         p[pos].index = index;
667                         ++pos;
668                 }
669                 if (! tarval_is_null(tv)) {
670                         /* hmm, wrong access */
671                         return NULL;
672                 }
673                 p[pos - 1].next = next;
674                 return rec_find_compound_ent_value(ptr, p);
675         } else if (is_Sub(ptr)) {
676                 ir_node *l = get_Sub_left(ptr);
677                 ir_node *r = get_Sub_right(ptr);
678
679                 ptr = l;
680                 tv  = get_Const_tarval(r);
681                 tv  = tarval_neg(tv);
682                 goto ptr_arith;
683         }
684         return NULL;
685 }
686
687 static ir_node *find_compound_ent_value(ir_node *ptr) {
688         return rec_find_compound_ent_value(ptr, NULL);
689 }
690
691 /* forward */
692 static void reduce_adr_usage(ir_node *ptr);
693
694 /**
695  * Update a Load that may lost it's usage.
696  */
697 static void handle_load_update(ir_node *load) {
698         ldst_info_t *info = get_irn_link(load);
699
700         /* do NOT touch volatile loads for now */
701         if (get_Load_volatility(load) == volatility_is_volatile)
702                 return;
703
704         if (! info->projs[pn_Load_res] && ! info->projs[pn_Load_X_except]) {
705                 ir_node *ptr = get_Load_ptr(load);
706                 ir_node *mem = get_Load_mem(load);
707
708                 /* a Load which value is neither used nor exception checked, remove it */
709                 exchange(info->projs[pn_Load_M], mem);
710                 if (info->projs[pn_Load_X_regular])
711                         exchange(info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
712                 kill_node(load);
713                 reduce_adr_usage(ptr);
714         }
715 }  /* handle_load_update */
716
717 /**
718  * A Use of an address node is vanished. Check if this was a Proj
719  * node and update the counters.
720  */
721 static void reduce_adr_usage(ir_node *ptr) {
722         if (is_Proj(ptr)) {
723                 if (get_irn_n_edges(ptr) <= 0) {
724                         /* this Proj is dead now */
725                         ir_node *pred = get_Proj_pred(ptr);
726
727                         if (is_Load(pred)) {
728                                 ldst_info_t *info = get_irn_link(pred);
729                                 info->projs[get_Proj_proj(ptr)] = NULL;
730
731                                 /* this node lost it's result proj, handle that */
732                                 handle_load_update(pred);
733                         }
734                 }
735         }
736 }  /* reduce_adr_usage */
737
738 /**
739  * Check, if an already existing value of mode old_mode can be converted
740  * into the needed one new_mode without loss.
741  */
742 static int can_use_stored_value(ir_mode *old_mode, ir_mode *new_mode) {
743         if (old_mode == new_mode)
744                 return 1;
745
746         /* if both modes are two-complement ones, we can always convert the
747            Stored value into the needed one. */
748         if (get_mode_size_bits(old_mode) >= get_mode_size_bits(new_mode) &&
749                   get_mode_arithmetic(old_mode) == irma_twos_complement &&
750                   get_mode_arithmetic(new_mode) == irma_twos_complement)
751                 return 1;
752         return 0;
753 }  /* can_use_stored_value */
754
755 /**
756  * Check whether a Call is at least pure, ie. does only read memory.
757  */
758 static unsigned is_Call_pure(ir_node *call) {
759         ir_type *call_tp = get_Call_type(call);
760         unsigned prop = get_method_additional_properties(call_tp);
761
762         /* check first the call type */
763         if ((prop & (mtp_property_const|mtp_property_pure)) == 0) {
764                 /* try the called entity */
765                 ir_node *ptr = get_Call_ptr(call);
766
767                 if (is_Global(ptr)) {
768                         ir_entity *ent = get_Global_entity(ptr);
769
770                         prop = get_entity_additional_properties(ent);
771                 }
772         }
773         return (prop & (mtp_property_const|mtp_property_pure)) != 0;
774 }  /* is_Call_pure */
775
776 static ir_node *get_base_ptr(ir_node *ptr)
777 {
778         while (is_Add(ptr) && is_Const(get_Add_right(ptr))) {
779                 ptr = get_Add_left(ptr);
780         }
781
782         return ptr;
783 }
784
785 static long get_base_offset(ir_node *ptr)
786 {
787         /* TODO: long might not be enough, we should probably use some tarval thingy... */
788         long offset = 0;
789         while (is_Add(ptr)) {
790                 ir_node *right = get_Add_right(ptr);
791                 if (!is_Const(right))
792                         break;
793                 offset += get_tarval_long(get_Const_tarval(right));
794                 ptr = get_Add_left(ptr);
795         }
796
797         return offset;
798 }
799
800 static int try_load_store(ir_node *load,
801                 ir_node *load_base_ptr, long load_offset, ir_node *store)
802 {
803         ldst_info_t *info;
804         ir_node *store_ptr      = get_Store_ptr(store);
805         ir_node *store_base_ptr = get_base_ptr(store_ptr);
806         ir_node *store_value;
807         ir_mode *store_mode;
808         ir_node *load_ptr;
809         ir_mode *load_mode;
810         long     store_offset   = get_base_offset(store_ptr);
811         long     load_mode_len;
812         long     store_mode_len;
813         long     delta;
814         int      res;
815
816         if (load_base_ptr != store_base_ptr)
817                 return 0;
818
819         load_mode     = get_Load_mode(load);
820         load_mode_len = get_mode_size_bytes(load_mode);
821         store_mode     = get_irn_mode(get_Store_value(store));
822         store_mode_len = get_mode_size_bytes(store_mode);
823
824         delta         = load_offset - store_offset;
825         if (delta < 0 || delta >= store_mode_len)
826                 return 0;
827
828         if (store_mode_len - delta > load_mode_len)
829                 return 0;
830
831         store_value = get_Store_value(store);
832         DBG_OPT_RAW(load, store_value);
833
834         /* produce a shift to adjust offset delta */
835         if (delta > 0) {
836                 ir_node *cnst = new_r_Const_long(current_ir_graph,
837                                 get_irg_start_block(current_ir_graph), mode_Iu, delta * 8);
838                 store_value = new_r_Shr(current_ir_graph, get_nodes_block(load),
839                                         store_value, cnst, store_mode);
840         }
841
842         /* add an convert if needed */
843         if (store_mode != load_mode) {
844                 store_value = new_r_Conv(current_ir_graph, get_nodes_block(load),
845                                          store_value, load_mode);
846         }
847
848         info = get_irn_link(load);
849         if (info->projs[pn_Load_M])
850                 exchange(info->projs[pn_Load_M], get_Load_mem(load));
851
852         res = 0;
853         /* no exception */
854         if (info->projs[pn_Load_X_except]) {
855                 exchange( info->projs[pn_Load_X_except], new_Bad());
856                 res |= CF_CHANGED;
857         }
858         if (info->projs[pn_Load_X_regular]) {
859                 exchange( info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
860                 res |= CF_CHANGED;
861         }
862
863         if (info->projs[pn_Load_res])
864                 exchange(info->projs[pn_Load_res], store_value);
865
866         load_ptr = get_Load_ptr(load);
867         kill_node(load);
868         reduce_adr_usage(load_ptr);
869         return res | DF_CHANGED;
870 }
871
872 /**
873  * Follow the memory chain as long as there are only Loads,
874  * alias free Stores, and constant Calls and try to replace the
875  * current Load by a previous ones.
876  * Note that in unreachable loops it might happen that we reach
877  * load again, as well as we can fall into a cycle.
878  * We break such cycles using a special visited flag.
879  *
880  * INC_MASTER() must be called before dive into
881  */
882 static unsigned follow_Mem_chain(ir_node *load, ir_node *curr) {
883         unsigned res = 0;
884         ldst_info_t *info = get_irn_link(load);
885         ir_node *pred;
886         ir_node *ptr       = get_Load_ptr(load);
887         ir_node *mem       = get_Load_mem(load);
888         ir_mode *load_mode = get_Load_mode(load);
889         ir_node *base_ptr  = get_base_ptr(ptr);
890         long     load_offset = get_base_offset(ptr);
891
892         for (pred = curr; load != pred; ) {
893                 ldst_info_t *pred_info = get_irn_link(pred);
894
895                 /*
896                  * a Load immediately after a Store -- a read after write.
897                  * We may remove the Load, if both Load & Store does not have an
898                  * exception handler OR they are in the same MacroBlock. In the latter
899                  * case the Load cannot throw an exception when the previous Store was
900                  * quiet.
901                  *
902                  * Why we need to check for Store Exception? If the Store cannot
903                  * be executed (ROM) the exception handler might simply jump into
904                  * the load MacroBlock :-(
905                  * We could make it a little bit better if we would know that the
906                  * exception handler of the Store jumps directly to the end...
907                  */
908                 if (is_Store(pred) && ((pred_info->projs[pn_Store_X_except] == NULL
909                                 && info->projs[pn_Load_X_except] == NULL)
910                                 || get_nodes_MacroBlock(load) == get_nodes_MacroBlock(pred)))
911                 {
912                         int changes
913                                 = try_load_store(load, base_ptr, load_offset, pred);
914                         if (changes != 0) {
915                                 return res | changes;
916                         }
917                 } else if (is_Load(pred) && get_Load_ptr(pred) == ptr &&
918                            can_use_stored_value(get_Load_mode(pred), load_mode)) {
919                         /*
920                          * a Load after a Load -- a read after read.
921                          * We may remove the second Load, if it does not have an exception handler
922                          * OR they are in the same MacroBlock. In the later case the Load cannot
923                          * throw an exception when the previous Load was quiet.
924                          *
925                          * Here, there is no need to check if the previous Load has an exception
926                          * hander because they would have exact the same exception...
927                          */
928                         if (info->projs[pn_Load_X_except] == NULL || get_nodes_MacroBlock(load) == get_nodes_MacroBlock(pred)) {
929                                 ir_node *value;
930
931                                 DBG_OPT_RAR(load, pred);
932
933                                 /* the result is used */
934                                 if (info->projs[pn_Load_res]) {
935                                         if (pred_info->projs[pn_Load_res] == NULL) {
936                                                 /* create a new Proj again */
937                                                 pred_info->projs[pn_Load_res] = new_r_Proj(current_ir_graph, get_nodes_block(pred), pred, get_Load_mode(pred), pn_Load_res);
938                                         }
939                                         value = pred_info->projs[pn_Load_res];
940
941                                         /* add an convert if needed */
942                                         if (get_Load_mode(pred) != load_mode) {
943                                                 value = new_r_Conv(current_ir_graph, get_nodes_block(load), value, load_mode);
944                                         }
945
946                                         exchange(info->projs[pn_Load_res], value);
947                                 }
948
949                                 if (info->projs[pn_Load_M])
950                                         exchange(info->projs[pn_Load_M], mem);
951
952                                 /* no exception */
953                                 if (info->projs[pn_Load_X_except]) {
954                                         exchange(info->projs[pn_Load_X_except], new_Bad());
955                                         res |= CF_CHANGED;
956                                 }
957                                 if (info->projs[pn_Load_X_regular]) {
958                                         exchange( info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
959                                         res |= CF_CHANGED;
960                                 }
961
962                                 kill_node(load);
963                                 reduce_adr_usage(ptr);
964                                 return res |= DF_CHANGED;
965                         }
966                 }
967
968                 if (is_Store(pred)) {
969                         /* check if we can pass through this store */
970                         ir_alias_relation rel = get_alias_relation(
971                                 current_ir_graph,
972                                 get_Store_ptr(pred),
973                                 get_irn_mode(get_Store_value(pred)),
974                                 ptr, load_mode);
975                         /* if the might be an alias, we cannot pass this Store */
976                         if (rel != ir_no_alias)
977                                 break;
978                         pred = skip_Proj(get_Store_mem(pred));
979                 } else if (is_Load(pred)) {
980                         pred = skip_Proj(get_Load_mem(pred));
981                 } else if (is_Call(pred)) {
982                         if (is_Call_pure(pred)) {
983                                 /* The called graph is at least pure, so there are no Store's
984                                    in it. We can handle it like a Load and skip it. */
985                                 pred = skip_Proj(get_Call_mem(pred));
986                         } else {
987                                 /* there might be Store's in the graph, stop here */
988                                 break;
989                         }
990                 } else {
991                         /* follow only Load chains */
992                         break;
993                 }
994
995                 /* check for cycles */
996                 if (NODE_VISITED(pred_info))
997                         break;
998                 MARK_NODE(pred_info);
999         }
1000
1001         if (is_Sync(pred)) {
1002                 int i;
1003
1004                 /* handle all Sync predecessors */
1005                 for (i = get_Sync_n_preds(pred) - 1; i >= 0; --i) {
1006                         res |= follow_Mem_chain(load, skip_Proj(get_Sync_pred(pred, i)));
1007                         if (res)
1008                                 return res;
1009                 }
1010         }
1011
1012         return res;
1013 }  /* follow_Mem_chain */
1014
1015 /*
1016  * Check if we can replace the load by a given const from
1017  * the const code irg.
1018  */
1019 ir_node *can_replace_load_by_const(const ir_node *load, ir_node *c) {
1020         ir_mode *c_mode = get_irn_mode(c);
1021         ir_mode *l_mode = get_Load_mode(load);
1022         ir_node *res    = NULL;
1023
1024         if (c_mode != l_mode) {
1025                 /* check, if the mode matches OR can be easily converted info */
1026                 if (is_reinterpret_cast(c_mode, l_mode)) {
1027                         /* we can safely cast */
1028                         dbg_info *dbg   = get_irn_dbg_info(load);
1029                         ir_node  *block = get_nodes_block(load);
1030
1031                         /* copy the value from the const code irg and cast it */
1032                         res = copy_const_value(dbg, c);
1033                         res = new_rd_Conv(dbg, current_ir_graph, block, res, l_mode);
1034                 }
1035         } else {
1036                 /* copy the value from the const code irg */
1037                 res = copy_const_value(get_irn_dbg_info(load), c);
1038         }
1039         return res;
1040 }  /* can_replace_load_by_const */
1041
1042 /**
1043  * optimize a Load
1044  *
1045  * @param load  the Load node
1046  */
1047 static unsigned optimize_load(ir_node *load)
1048 {
1049         ldst_info_t *info = get_irn_link(load);
1050         ir_node *mem, *ptr, *value;
1051         ir_entity *ent;
1052         unsigned res = 0;
1053
1054         /* do NOT touch volatile loads for now */
1055         if (get_Load_volatility(load) == volatility_is_volatile)
1056                 return 0;
1057
1058         /* the address of the load to be optimized */
1059         ptr = get_Load_ptr(load);
1060
1061         /*
1062          * Check if we can remove the exception from a Load:
1063          * This can be done, if the address is from an Sel(Alloc) and
1064          * the Sel type is a subtype of the allocated type.
1065          *
1066          * This optimizes some often used OO constructs,
1067          * like x = new O; x->t;
1068          */
1069         if (info->projs[pn_Load_X_except]) {
1070                 ir_node *addr = ptr;
1071
1072                 /* find base address */
1073                 while (is_Sel(addr))
1074                         addr = get_Sel_ptr(addr);
1075                 if (is_Alloc(skip_Proj(skip_Cast(addr)))) {
1076                         /* simple case: a direct load after an Alloc. Firm Alloc throw
1077                          * an exception in case of out-of-memory. So, there is no way for an
1078                          * exception in this load.
1079                          * This code is constructed by the "exception lowering" in the Jack compiler.
1080                          */
1081                         exchange(info->projs[pn_Load_X_except], new_Bad());
1082                         info->projs[pn_Load_X_except] = NULL;
1083                         exchange(info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
1084                         info->projs[pn_Load_X_regular] = NULL;
1085                         res |= CF_CHANGED;
1086                 }
1087         }
1088
1089         /* The mem of the Load. Must still be returned after optimization. */
1090         mem  = get_Load_mem(load);
1091
1092         if (! info->projs[pn_Load_res] && ! info->projs[pn_Load_X_except]) {
1093                 /* a Load which value is neither used nor exception checked, remove it */
1094                 exchange(info->projs[pn_Load_M], mem);
1095
1096                 if (info->projs[pn_Load_X_regular]) {
1097                         /* should not happen, but if it does, remove it */
1098                         exchange(info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
1099                         res |= CF_CHANGED;
1100                 }
1101                 kill_node(load);
1102                 reduce_adr_usage(ptr);
1103                 return res | DF_CHANGED;
1104         }
1105
1106         /* Load from a constant polymorphic field, where we can resolve
1107            polymorphism. */
1108         value = transform_polymorph_Load(load);
1109         if (value == load) {
1110                 value = NULL;
1111                 /* check if we can determine the entity that will be loaded */
1112                 ent = find_constant_entity(ptr);
1113                 if (ent != NULL) {
1114                         if ((allocation_static == get_entity_allocation(ent)) &&
1115                                 (visibility_external_allocated != get_entity_visibility(ent))) {
1116                                 /* a static allocation that is not external: there should be NO exception
1117                                  * when loading even if we cannot replace the load itself. */
1118
1119                                 /* no exception, clear the info field as it might be checked later again */
1120                                 if (info->projs[pn_Load_X_except]) {
1121                                         exchange(info->projs[pn_Load_X_except], new_Bad());
1122                                         info->projs[pn_Load_X_except] = NULL;
1123                                         res |= CF_CHANGED;
1124                                 }
1125                                 if (info->projs[pn_Load_X_regular]) {
1126                                         exchange(info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
1127                                         info->projs[pn_Load_X_regular] = NULL;
1128                                         res |= CF_CHANGED;
1129                                 }
1130
1131                                 if (variability_constant == get_entity_variability(ent)) {
1132                                         if (is_atomic_entity(ent)) {
1133                                                 /* Might not be atomic after
1134                                                    lowering of Sels.  In this
1135                                                    case we could also load, but
1136                                                    it's more complicated. */
1137                                                 /* more simpler case: we load the content of a constant value:
1138                                                  * replace it by the constant itself
1139                                                  */
1140                                                 value = get_atomic_ent_value(ent);
1141                                         } else {
1142                                                 if (ent->has_initializer) {
1143                                                         /* new style initializer */
1144                                                         value = find_compound_ent_value(ptr);
1145                                                 } else {
1146                                                         /* old style initializer */
1147                                                         compound_graph_path *path = get_accessed_path(ptr);
1148
1149                                                         if (path != NULL) {
1150                                                                 assert(is_proper_compound_graph_path(path, get_compound_graph_path_length(path)-1));
1151
1152                                                                 value = get_compound_ent_value_by_path(ent, path);
1153                                                                 free_compound_graph_path(path);
1154                                                         }
1155                                                 }
1156                                         }
1157                                         if (value != NULL)
1158                                                 value = can_replace_load_by_const(load, value);
1159                                 }
1160                         }
1161                 }
1162         }
1163         if (value != NULL) {
1164                 /* we completely replace the load by this value */
1165                 if (info->projs[pn_Load_X_except]) {
1166                         exchange(info->projs[pn_Load_X_except], new_Bad());
1167                         info->projs[pn_Load_X_except] = NULL;
1168                         res |= CF_CHANGED;
1169                 }
1170                 if (info->projs[pn_Load_X_regular]) {
1171                         exchange(info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
1172                         info->projs[pn_Load_X_regular] = NULL;
1173                         res |= CF_CHANGED;
1174                 }
1175                 if (info->projs[pn_Load_M]) {
1176                         exchange(info->projs[pn_Load_M], mem);
1177                         res |= DF_CHANGED;
1178                 }
1179                 if (info->projs[pn_Load_res]) {
1180                         exchange(info->projs[pn_Load_res], value);
1181                         res |= DF_CHANGED;
1182                 }
1183                 kill_node(load);
1184                 reduce_adr_usage(ptr);
1185                 return res;
1186         }
1187
1188         /* Check, if the address of this load is used more than once.
1189          * If not, this load cannot be removed in any case. */
1190         if (get_irn_n_uses(ptr) <= 1 && get_irn_n_uses(get_base_ptr(ptr)) <= 1)
1191                 return res;
1192
1193         /*
1194          * follow the memory chain as long as there are only Loads
1195          * and try to replace current Load or Store by a previous one.
1196          * Note that in unreachable loops it might happen that we reach
1197          * load again, as well as we can fall into a cycle.
1198          * We break such cycles using a special visited flag.
1199          */
1200         INC_MASTER();
1201         res = follow_Mem_chain(load, skip_Proj(mem));
1202         return res;
1203 }  /* optimize_load */
1204
1205 /**
1206  * Check whether a value of mode new_mode would completely overwrite a value
1207  * of mode old_mode in memory.
1208  */
1209 static int is_completely_overwritten(ir_mode *old_mode, ir_mode *new_mode)
1210 {
1211         return get_mode_size_bits(new_mode) >= get_mode_size_bits(old_mode);
1212 }  /* is_completely_overwritten */
1213
1214 /**
1215  * follow the memory chain as long as there are only Loads and alias free Stores.
1216  *
1217  * INC_MASTER() must be called before dive into
1218  */
1219 static unsigned follow_Mem_chain_for_Store(ir_node *store, ir_node *curr) {
1220         unsigned res = 0;
1221         ldst_info_t *info = get_irn_link(store);
1222         ir_node *pred;
1223         ir_node *ptr = get_Store_ptr(store);
1224         ir_node *mem = get_Store_mem(store);
1225         ir_node *value = get_Store_value(store);
1226         ir_mode *mode  = get_irn_mode(value);
1227         ir_node *block = get_nodes_block(store);
1228         ir_node *mblk  = get_Block_MacroBlock(block);
1229
1230         for (pred = curr; pred != store;) {
1231                 ldst_info_t *pred_info = get_irn_link(pred);
1232
1233                 /*
1234                  * BEWARE: one might think that checking the modes is useless, because
1235                  * if the pointers are identical, they refer to the same object.
1236                  * This is only true in strong typed languages, not is C were the following
1237                  * is possible *(ir_type1 *)p = a; *(ir_type2 *)p = b ...
1238                  * However, if the mode that is written have a bigger  or equal size the the old
1239                  * one, the old value is completely overwritten and can be killed ...
1240                  */
1241                 if (is_Store(pred) && get_Store_ptr(pred) == ptr &&
1242                     get_nodes_MacroBlock(pred) == mblk &&
1243                     is_completely_overwritten(get_irn_mode(get_Store_value(pred)), mode)) {
1244                         /*
1245                          * a Store after a Store in the same MacroBlock -- a write after write.
1246                          * We may remove the first Store, if it does not have an exception handler.
1247                          *
1248                          * TODO: What, if both have the same exception handler ???
1249                          */
1250                         if (get_Store_volatility(pred) != volatility_is_volatile && !pred_info->projs[pn_Store_X_except]) {
1251                                 DBG_OPT_WAW(pred, store);
1252                                 exchange(pred_info->projs[pn_Store_M], get_Store_mem(pred));
1253                                 kill_node(pred);
1254                                 reduce_adr_usage(ptr);
1255                                 return DF_CHANGED;
1256                         }
1257                 } else if (is_Load(pred) && get_Load_ptr(pred) == ptr &&
1258                            value == pred_info->projs[pn_Load_res]) {
1259                         /*
1260                          * a Store of a value just loaded from the same address
1261                          * -- a write after read.
1262                          * We may remove the Store, if it does not have an exception
1263                          * handler.
1264                          */
1265                         if (! info->projs[pn_Store_X_except]) {
1266                                 DBG_OPT_WAR(store, pred);
1267                                 exchange(info->projs[pn_Store_M], mem);
1268                                 kill_node(store);
1269                                 reduce_adr_usage(ptr);
1270                                 return DF_CHANGED;
1271                         }
1272                 }
1273
1274                 if (is_Store(pred)) {
1275                         /* check if we can pass through this store */
1276                         ir_alias_relation rel = get_alias_relation(
1277                                 current_ir_graph,
1278                                 get_Store_ptr(pred),
1279                                 get_irn_mode(get_Store_value(pred)),
1280                                 ptr, mode);
1281                         /* if the might be an alias, we cannot pass this Store */
1282                         if (rel != ir_no_alias)
1283                                 break;
1284                         pred = skip_Proj(get_Store_mem(pred));
1285                 } else if (is_Load(pred)) {
1286                         ir_alias_relation rel = get_alias_relation(
1287                                 current_ir_graph, get_Load_ptr(pred), get_Load_mode(pred),
1288                                 ptr, mode);
1289                         if (rel != ir_no_alias)
1290                                 break;
1291
1292                         pred = skip_Proj(get_Load_mem(pred));
1293                 } else {
1294                         /* follow only Load chains */
1295                         break;
1296                 }
1297
1298                 /* check for cycles */
1299                 if (NODE_VISITED(pred_info))
1300                         break;
1301                 MARK_NODE(pred_info);
1302         }
1303
1304         if (is_Sync(pred)) {
1305                 int i;
1306
1307                 /* handle all Sync predecessors */
1308                 for (i = get_Sync_n_preds(pred) - 1; i >= 0; --i) {
1309                         res |= follow_Mem_chain_for_Store(store, skip_Proj(get_Sync_pred(pred, i)));
1310                         if (res)
1311                                 break;
1312                 }
1313         }
1314         return res;
1315 }  /* follow_Mem_chain_for_Store */
1316
1317 /**
1318  * optimize a Store
1319  *
1320  * @param store  the Store node
1321  */
1322 static unsigned optimize_store(ir_node *store) {
1323         ir_node *ptr, *mem;
1324
1325         if (get_Store_volatility(store) == volatility_is_volatile)
1326                 return 0;
1327
1328         ptr = get_Store_ptr(store);
1329
1330         /* Check, if the address of this Store is used more than once.
1331          * If not, this Store cannot be removed in any case. */
1332         if (get_irn_n_uses(ptr) <= 1)
1333                 return 0;
1334
1335         mem = get_Store_mem(store);
1336
1337         /* follow the memory chain as long as there are only Loads */
1338         INC_MASTER();
1339
1340         return follow_Mem_chain_for_Store(store, skip_Proj(mem));
1341 }  /* optimize_store */
1342
1343 /**
1344  * walker, optimizes Phi after Stores to identical places:
1345  * Does the following optimization:
1346  * @verbatim
1347  *
1348  *   val1   val2   val3          val1  val2  val3
1349  *    |      |      |               \    |    /
1350  *  Store  Store  Store              \   |   /
1351  *      \    |    /                   PhiData
1352  *       \   |   /                       |
1353  *        \  |  /                      Store
1354  *          PhiM
1355  *
1356  * @endverbatim
1357  * This reduces the number of stores and allows for predicated execution.
1358  * Moves Stores back to the end of a function which may be bad.
1359  *
1360  * This is only possible if the predecessor blocks have only one successor.
1361  */
1362 static unsigned optimize_phi(ir_node *phi, walk_env_t *wenv)
1363 {
1364         int i, n;
1365         ir_node *store, *old_store, *ptr, *block, *phi_block, *phiM, *phiD, *exc, *projM;
1366         ir_mode *mode;
1367         ir_node **inM, **inD, **projMs;
1368         int *idx;
1369         dbg_info *db = NULL;
1370         ldst_info_t *info;
1371         block_info_t *bl_info;
1372         unsigned res = 0;
1373
1374         /* Must be a memory Phi */
1375         if (get_irn_mode(phi) != mode_M)
1376                 return 0;
1377
1378         n = get_Phi_n_preds(phi);
1379         if (n <= 0)
1380                 return 0;
1381
1382         /* must be only one user */
1383         projM = get_Phi_pred(phi, 0);
1384         if (get_irn_n_edges(projM) != 1)
1385                 return 0;
1386
1387         store = skip_Proj(projM);
1388         old_store = store;
1389         if (!is_Store(store))
1390                 return 0;
1391
1392         block = get_nodes_block(store);
1393
1394         /* abort on dead blocks */
1395         if (is_Block_dead(block))
1396                 return 0;
1397
1398         /* check if the block is post dominated by Phi-block
1399            and has no exception exit */
1400         bl_info = get_irn_link(block);
1401         if (bl_info->flags & BLOCK_HAS_EXC)
1402                 return 0;
1403
1404         phi_block = get_nodes_block(phi);
1405         if (! block_strictly_postdominates(phi_block, block))
1406                 return 0;
1407
1408         /* this is the address of the store */
1409         ptr  = get_Store_ptr(store);
1410         mode = get_irn_mode(get_Store_value(store));
1411         info = get_irn_link(store);
1412         exc  = info->exc_block;
1413
1414         for (i = 1; i < n; ++i) {
1415                 ir_node *pred = get_Phi_pred(phi, i);
1416
1417                 if (get_irn_n_edges(pred) != 1)
1418                         return 0;
1419
1420                 pred = skip_Proj(pred);
1421                 if (!is_Store(pred))
1422                         return 0;
1423
1424                 if (ptr != get_Store_ptr(pred) || mode != get_irn_mode(get_Store_value(pred)))
1425                         return 0;
1426
1427                 info = get_irn_link(pred);
1428
1429                 /* check, if all stores have the same exception flow */
1430                 if (exc != info->exc_block)
1431                         return 0;
1432
1433                 /* abort on dead blocks */
1434                 block = get_nodes_block(pred);
1435                 if (is_Block_dead(block))
1436                         return 0;
1437
1438                 /* check if the block is post dominated by Phi-block
1439                    and has no exception exit. Note that block must be different from
1440                    Phi-block, else we would move a Store from end End of a block to its
1441                    Start... */
1442                 bl_info = get_irn_link(block);
1443                 if (bl_info->flags & BLOCK_HAS_EXC)
1444                         return 0;
1445                 if (block == phi_block || ! block_postdominates(phi_block, block))
1446                         return 0;
1447         }
1448
1449         /*
1450          * ok, when we are here, we found all predecessors of a Phi that
1451          * are Stores to the same address and size. That means whatever
1452          * we do before we enter the block of the Phi, we do a Store.
1453          * So, we can move the Store to the current block:
1454          *
1455          *   val1    val2    val3          val1  val2  val3
1456          *    |       |       |               \    |    /
1457          * | Str | | Str | | Str |             \   |   /
1458          *      \     |     /                   PhiData
1459          *       \    |    /                       |
1460          *        \   |   /                       Str
1461          *           PhiM
1462          *
1463          * Is only allowed if the predecessor blocks have only one successor.
1464          */
1465
1466         NEW_ARR_A(ir_node *, projMs, n);
1467         NEW_ARR_A(ir_node *, inM, n);
1468         NEW_ARR_A(ir_node *, inD, n);
1469         NEW_ARR_A(int, idx, n);
1470
1471         /* Prepare: Collect all Store nodes.  We must do this
1472            first because we otherwise may loose a store when exchanging its
1473            memory Proj.
1474          */
1475         for (i = n - 1; i >= 0; --i) {
1476                 ir_node *store;
1477
1478                 projMs[i] = get_Phi_pred(phi, i);
1479                 assert(is_Proj(projMs[i]));
1480
1481                 store = get_Proj_pred(projMs[i]);
1482                 info  = get_irn_link(store);
1483
1484                 inM[i] = get_Store_mem(store);
1485                 inD[i] = get_Store_value(store);
1486                 idx[i] = info->exc_idx;
1487         }
1488         block = get_nodes_block(phi);
1489
1490         /* second step: create a new memory Phi */
1491         phiM = new_rd_Phi(get_irn_dbg_info(phi), current_ir_graph, block, n, inM, mode_M);
1492
1493         /* third step: create a new data Phi */
1494         phiD = new_rd_Phi(get_irn_dbg_info(phi), current_ir_graph, block, n, inD, mode);
1495
1496         /* rewire memory and kill the node */
1497         for (i = n - 1; i >= 0; --i) {
1498                 ir_node *proj  = projMs[i];
1499
1500                 if(is_Proj(proj)) {
1501                         ir_node *store = get_Proj_pred(proj);
1502                         exchange(proj, inM[i]);
1503                         kill_node(store);
1504                 }
1505         }
1506
1507         /* fourth step: create the Store */
1508         store = new_rd_Store(db, current_ir_graph, block, phiM, ptr, phiD);
1509 #ifdef DO_CACHEOPT
1510         co_set_irn_name(store, co_get_irn_ident(old_store));
1511 #endif
1512
1513         projM = new_rd_Proj(NULL, current_ir_graph, block, store, mode_M, pn_Store_M);
1514
1515         info = get_ldst_info(store, &wenv->obst);
1516         info->projs[pn_Store_M] = projM;
1517
1518         /* fifths step: repair exception flow */
1519         if (exc) {
1520                 ir_node *projX = new_rd_Proj(NULL, current_ir_graph, block, store, mode_X, pn_Store_X_except);
1521
1522                 info->projs[pn_Store_X_except] = projX;
1523                 info->exc_block                = exc;
1524                 info->exc_idx                  = idx[0];
1525
1526                 for (i = 0; i < n; ++i) {
1527                         set_Block_cfgpred(exc, idx[i], projX);
1528                 }
1529
1530                 if (n > 1) {
1531                         /* the exception block should be optimized as some inputs are identical now */
1532                 }
1533
1534                 res |= CF_CHANGED;
1535         }
1536
1537         /* sixth step: replace old Phi */
1538         exchange(phi, projM);
1539
1540         return res | DF_CHANGED;
1541 }  /* optimize_phi */
1542
1543 /**
1544  * walker, do the optimizations
1545  */
1546 static void do_load_store_optimize(ir_node *n, void *env) {
1547         walk_env_t *wenv = env;
1548
1549         switch (get_irn_opcode(n)) {
1550
1551         case iro_Load:
1552                 wenv->changes |= optimize_load(n);
1553                 break;
1554
1555         case iro_Store:
1556                 wenv->changes |= optimize_store(n);
1557                 break;
1558
1559         case iro_Phi:
1560                 wenv->changes |= optimize_phi(n, wenv);
1561                 break;
1562
1563         default:
1564                 ;
1565         }
1566 }  /* do_load_store_optimize */
1567
1568 /** A scc. */
1569 typedef struct scc {
1570         ir_node *head;          /**< the head of the list */
1571 } scc;
1572
1573 /** A node entry. */
1574 typedef struct node_entry {
1575         unsigned DFSnum;    /**< the DFS number of this node */
1576         unsigned low;       /**< the low number of this node */
1577         ir_node  *header;   /**< the header of this node */
1578         int      in_stack;  /**< flag, set if the node is on the stack */
1579         ir_node  *next;     /**< link to the next node the the same scc */
1580         scc      *pscc;     /**< the scc of this node */
1581         unsigned POnum;     /**< the post order number for blocks */
1582 } node_entry;
1583
1584 /** A loop entry. */
1585 typedef struct loop_env {
1586         ir_phase ph;           /**< the phase object */
1587         ir_node  **stack;      /**< the node stack */
1588         int      tos;          /**< tos index */
1589         unsigned nextDFSnum;   /**< the current DFS number */
1590         unsigned POnum;        /**< current post order number */
1591
1592         unsigned changes;      /**< a bitmask of graph changes */
1593 } loop_env;
1594
1595 /**
1596 * Gets the node_entry of a node
1597 */
1598 static node_entry *get_irn_ne(ir_node *irn, loop_env *env) {
1599         ir_phase   *ph = &env->ph;
1600         node_entry *e  = phase_get_irn_data(&env->ph, irn);
1601
1602         if (! e) {
1603                 e = phase_alloc(ph, sizeof(*e));
1604                 memset(e, 0, sizeof(*e));
1605                 phase_set_irn_data(ph, irn, e);
1606         }
1607         return e;
1608 }  /* get_irn_ne */
1609
1610 /**
1611  * Push a node onto the stack.
1612  *
1613  * @param env   the loop environment
1614  * @param n     the node to push
1615  */
1616 static void push(loop_env *env, ir_node *n) {
1617         node_entry *e;
1618
1619         if (env->tos == ARR_LEN(env->stack)) {
1620                 int nlen = ARR_LEN(env->stack) * 2;
1621                 ARR_RESIZE(ir_node *, env->stack, nlen);
1622         }
1623         env->stack[env->tos++] = n;
1624         e = get_irn_ne(n, env);
1625         e->in_stack = 1;
1626 }  /* push */
1627
1628 /**
1629  * pop a node from the stack
1630  *
1631  * @param env   the loop environment
1632  *
1633  * @return  The topmost node
1634  */
1635 static ir_node *pop(loop_env *env) {
1636         ir_node *n = env->stack[--env->tos];
1637         node_entry *e = get_irn_ne(n, env);
1638
1639         e->in_stack = 0;
1640         return n;
1641 }  /* pop */
1642
1643 /**
1644  * Check if irn is a region constant.
1645  * The block or irn must strictly dominate the header block.
1646  *
1647  * @param irn           the node to check
1648  * @param header_block  the header block of the induction variable
1649  */
1650 static int is_rc(ir_node *irn, ir_node *header_block) {
1651         ir_node *block = get_nodes_block(irn);
1652
1653         return (block != header_block) && block_dominates(block, header_block);
1654 }  /* is_rc */
1655
1656 typedef struct phi_entry phi_entry;
1657 struct phi_entry {
1658         ir_node   *phi;    /**< A phi with a region const memory. */
1659         int       pos;     /**< The position of the region const memory */
1660         ir_node   *load;   /**< the newly created load for this phi */
1661         phi_entry *next;
1662 };
1663
1664 /**
1665  * Move loops out of loops if possible.
1666  *
1667  * @param pscc   the loop described by an SCC
1668  * @param env    the loop environment
1669  */
1670 static void move_loads_out_of_loops(scc *pscc, loop_env *env) {
1671         ir_node   *phi, *load, *next, *other, *next_other;
1672         ir_entity *ent;
1673         int       j;
1674         phi_entry *phi_list = NULL;
1675
1676         /* collect all outer memories */
1677         for (phi = pscc->head; phi != NULL; phi = next) {
1678                 node_entry *ne = get_irn_ne(phi, env);
1679                 next = ne->next;
1680
1681                 /* check all memory Phi's */
1682                 if (! is_Phi(phi))
1683                         continue;
1684
1685                 assert(get_irn_mode(phi) == mode_M && "DFS geturn non-memory Phi");
1686
1687                 for (j = get_irn_arity(phi) - 1; j >= 0; --j) {
1688                         ir_node    *pred = get_irn_n(phi, j);
1689                         node_entry *pe   = get_irn_ne(pred, env);
1690
1691                         if (pe->pscc != ne->pscc) {
1692                                 /* not in the same SCC, is region const */
1693                                 phi_entry *pe = phase_alloc(&env->ph, sizeof(*pe));
1694
1695                                 pe->phi  = phi;
1696                                 pe->pos  = j;
1697                                 pe->next = phi_list;
1698                                 phi_list = pe;
1699                         }
1700                 }
1701         }
1702         /* no Phis no fun */
1703         assert(phi_list != NULL && "DFS found a loop without Phi");
1704
1705         for (load = pscc->head; load; load = next) {
1706                 ir_mode *load_mode;
1707                 node_entry *ne = get_irn_ne(load, env);
1708                 next = ne->next;
1709
1710                 if (is_Load(load)) {
1711                         ldst_info_t *info = get_irn_link(load);
1712                         ir_node     *ptr = get_Load_ptr(load);
1713
1714                         /* for now, we cannot handle Loads with exceptions */
1715                         if (info->projs[pn_Load_res] == NULL || info->projs[pn_Load_X_regular] != NULL || info->projs[pn_Load_X_except] != NULL)
1716                                 continue;
1717
1718                         /* for now, we can only handle Load(Global) */
1719                         if (! is_Global(ptr))
1720                                 continue;
1721                         ent = get_Global_entity(ptr);
1722                         load_mode = get_Load_mode(load);
1723                         for (other = pscc->head; other != NULL; other = next_other) {
1724                                 node_entry *ne = get_irn_ne(other, env);
1725                                 next_other = ne->next;
1726
1727                                 if (is_Store(other)) {
1728                                         ir_alias_relation rel = get_alias_relation(
1729                                                 current_ir_graph,
1730                                                 get_Store_ptr(other),
1731                                                 get_irn_mode(get_Store_value(other)),
1732                                                 ptr, load_mode);
1733                                         /* if the might be an alias, we cannot pass this Store */
1734                                         if (rel != ir_no_alias)
1735                                                 break;
1736                                 }
1737                                 /* only pure Calls are allowed here, so ignore them */
1738                         }
1739                         if (other == NULL) {
1740                                 ldst_info_t *ninfo;
1741                                 phi_entry   *pe;
1742                                 dbg_info    *db;
1743
1744                                 /* for now, we cannot handle more than one input */
1745                                 if (phi_list->next != NULL)
1746                                         return;
1747
1748                                 /* yep, no aliasing Store found, Load can be moved */
1749                                 DB((dbg, LEVEL_1, "  Found a Load that could be moved: %+F\n", load));
1750
1751                                 db   = get_irn_dbg_info(load);
1752                                 for (pe = phi_list; pe != NULL; pe = pe->next) {
1753                                         int     pos   = pe->pos;
1754                                         ir_node *phi  = pe->phi;
1755                                         ir_node *blk  = get_nodes_block(phi);
1756                                         ir_node *pred = get_Block_cfgpred_block(blk, pos);
1757                                         ir_node *irn, *mem;
1758
1759                                         pe->load = irn = new_rd_Load(db, current_ir_graph, pred, get_Phi_pred(phi, pos), ptr, load_mode);
1760                                         ninfo = get_ldst_info(irn, phase_obst(&env->ph));
1761
1762                                         ninfo->projs[pn_Load_M] = mem = new_r_Proj(current_ir_graph, pred, irn, mode_M, pn_Load_M);
1763                                         set_Phi_pred(phi, pos, mem);
1764
1765                                         ninfo->projs[pn_Load_res] = new_r_Proj(current_ir_graph, pred, irn, load_mode, pn_Load_res);
1766
1767                                         DB((dbg, LEVEL_1, "  Created %+F in %+F\n", irn, pred));
1768                                 }
1769
1770                                 /* now kill the old Load */
1771                                 exchange(info->projs[pn_Load_M], get_Load_mem(load));
1772                                 exchange(info->projs[pn_Load_res], ninfo->projs[pn_Load_res]);
1773
1774                                 env->changes |= DF_CHANGED;
1775                         }
1776                 }
1777         }
1778 }  /* move_loads_out_of_loops */
1779
1780 /**
1781  * Process a loop SCC.
1782  *
1783  * @param pscc  the SCC
1784  * @param env   the loop environment
1785  */
1786 static void process_loop(scc *pscc, loop_env *env) {
1787         ir_node *irn, *next, *header = NULL;
1788         node_entry *b, *h = NULL;
1789         int j, only_phi, num_outside, process = 0;
1790         ir_node *out_rc;
1791
1792         /* find the header block for this scc */
1793         for (irn = pscc->head; irn; irn = next) {
1794                 node_entry *e = get_irn_ne(irn, env);
1795                 ir_node *block = get_nodes_block(irn);
1796
1797                 next = e->next;
1798                 b = get_irn_ne(block, env);
1799
1800                 if (header) {
1801                         if (h->POnum < b->POnum) {
1802                                 header = block;
1803                                 h      = b;
1804                         }
1805                 }
1806                 else {
1807                         header = block;
1808                         h      = b;
1809                 }
1810         }
1811
1812         /* check if this scc contains only Phi, Loads or Stores nodes */
1813         only_phi    = 1;
1814         num_outside = 0;
1815         out_rc      = NULL;
1816         for (irn = pscc->head; irn; irn = next) {
1817                 node_entry *e = get_irn_ne(irn, env);
1818
1819                 next = e->next;
1820                 switch (get_irn_opcode(irn)) {
1821                 case iro_Call:
1822                         if (is_Call_pure(irn)) {
1823                                 /* pure calls can be treated like loads */
1824                                 only_phi = 0;
1825                                 break;
1826                         }
1827                         /* non-pure calls must be handle like may-alias Stores */
1828                         goto fail;
1829                 case iro_CopyB:
1830                         /* cannot handle CopyB yet */
1831                         goto fail;
1832                 case iro_Load:
1833                         process = 1;
1834                         if (get_Load_volatility(irn) == volatility_is_volatile) {
1835                                 /* cannot handle loops with volatile Loads */
1836                                 goto fail;
1837                         }
1838                         only_phi = 0;
1839                         break;
1840                 case iro_Store:
1841                         if (get_Store_volatility(irn) == volatility_is_volatile) {
1842                                 /* cannot handle loops with volatile Stores */
1843                                 goto fail;
1844                         }
1845                         only_phi = 0;
1846                         break;
1847                 default:
1848                         only_phi = 0;
1849                         break;
1850                 case iro_Phi:
1851                         for (j = get_irn_arity(irn) - 1; j >= 0; --j) {
1852                                 ir_node *pred  = get_irn_n(irn, j);
1853                                 node_entry *pe = get_irn_ne(pred, env);
1854
1855                                 if (pe->pscc != e->pscc) {
1856                                         /* not in the same SCC, must be a region const */
1857                                         if (! is_rc(pred, header)) {
1858                                                 /* not a memory loop */
1859                                                 goto fail;
1860                                         }
1861                                         if (! out_rc) {
1862                                                 out_rc = pred;
1863                                                 ++num_outside;
1864                                         } else if (out_rc != pred) {
1865                                                 ++num_outside;
1866                                         }
1867                                 }
1868                         }
1869                         break;
1870                 }
1871         }
1872         if (! process)
1873                 goto fail;
1874
1875         /* found a memory loop */
1876         DB((dbg, LEVEL_2, "  Found a memory loop:\n  "));
1877         if (only_phi && num_outside == 1) {
1878                 /* a phi cycle with only one real predecessor can be collapsed */
1879                 DB((dbg, LEVEL_2, "  Found an USELESS Phi cycle:\n  "));
1880
1881                 for (irn = pscc->head; irn; irn = next) {
1882                         node_entry *e = get_irn_ne(irn, env);
1883                         next = e->next;
1884                         e->header = NULL;
1885                         exchange(irn, out_rc);
1886                 }
1887                 env->changes |= DF_CHANGED;
1888                 return;
1889         }
1890
1891         /* set the header for every node in this scc */
1892         for (irn = pscc->head; irn; irn = next) {
1893                 node_entry *e = get_irn_ne(irn, env);
1894                 e->header = header;
1895                 next = e->next;
1896                 DB((dbg, LEVEL_2, " %+F,", irn));
1897         }
1898         DB((dbg, LEVEL_2, "\n"));
1899
1900         move_loads_out_of_loops(pscc, env);
1901
1902 fail:
1903         ;
1904 }  /* process_loop */
1905
1906 /**
1907  * Process a SCC.
1908  *
1909  * @param pscc  the SCC
1910  * @param env   the loop environment
1911  */
1912 static void process_scc(scc *pscc, loop_env *env) {
1913         ir_node *head = pscc->head;
1914         node_entry *e = get_irn_ne(head, env);
1915
1916 #ifdef DEBUG_libfirm
1917         {
1918                 ir_node *irn, *next;
1919
1920                 DB((dbg, LEVEL_4, " SCC at %p:\n ", pscc));
1921                 for (irn = pscc->head; irn; irn = next) {
1922                         node_entry *e = get_irn_ne(irn, env);
1923
1924                         next = e->next;
1925
1926                         DB((dbg, LEVEL_4, " %+F,", irn));
1927                 }
1928                 DB((dbg, LEVEL_4, "\n"));
1929         }
1930 #endif
1931
1932         if (e->next != NULL) {
1933                 /* this SCC has more than one member */
1934                 process_loop(pscc, env);
1935         }
1936 }  /* process_scc */
1937
1938 /**
1939  * Do Tarjan's SCC algorithm and drive load/store optimization.
1940  *
1941  * @param irn  start at this node
1942  * @param env  the loop environment
1943  */
1944 static void dfs(ir_node *irn, loop_env *env)
1945 {
1946         int i, n;
1947         node_entry *node = get_irn_ne(irn, env);
1948
1949         mark_irn_visited(irn);
1950
1951         node->DFSnum = env->nextDFSnum++;
1952         node->low    = node->DFSnum;
1953         push(env, irn);
1954
1955         /* handle preds */
1956         if (is_Phi(irn) || is_Sync(irn)) {
1957                 n = get_irn_arity(irn);
1958                 for (i = 0; i < n; ++i) {
1959                         ir_node *pred = get_irn_n(irn, i);
1960                         node_entry *o = get_irn_ne(pred, env);
1961
1962                         if (irn_not_visited(pred)) {
1963                                 dfs(pred, env);
1964                                 node->low = MIN(node->low, o->low);
1965                         }
1966                         if (o->DFSnum < node->DFSnum && o->in_stack)
1967                                 node->low = MIN(o->DFSnum, node->low);
1968                 }
1969         } else if (is_fragile_op(irn)) {
1970                 ir_node *pred = get_fragile_op_mem(irn);
1971                 node_entry *o = get_irn_ne(pred, env);
1972
1973                 if (irn_not_visited(pred)) {
1974                         dfs(pred, env);
1975                         node->low = MIN(node->low, o->low);
1976                 }
1977                 if (o->DFSnum < node->DFSnum && o->in_stack)
1978                         node->low = MIN(o->DFSnum, node->low);
1979         } else if (is_Proj(irn)) {
1980                 ir_node *pred = get_Proj_pred(irn);
1981                 node_entry *o = get_irn_ne(pred, env);
1982
1983                 if (irn_not_visited(pred)) {
1984                         dfs(pred, env);
1985                         node->low = MIN(node->low, o->low);
1986                 }
1987                 if (o->DFSnum < node->DFSnum && o->in_stack)
1988                         node->low = MIN(o->DFSnum, node->low);
1989         }
1990         else {
1991                  /* IGNORE predecessors */
1992         }
1993
1994         if (node->low == node->DFSnum) {
1995                 scc *pscc = phase_alloc(&env->ph, sizeof(*pscc));
1996                 ir_node *x;
1997
1998                 pscc->head = NULL;
1999                 do {
2000                         node_entry *e;
2001
2002                         x = pop(env);
2003                         e = get_irn_ne(x, env);
2004                         e->pscc    = pscc;
2005                         e->next    = pscc->head;
2006                         pscc->head = x;
2007                 } while (x != irn);
2008
2009                 process_scc(pscc, env);
2010         }
2011 }  /* dfs */
2012
2013 /**
2014  * Do the DFS on the memory edges a graph.
2015  *
2016  * @param irg  the graph to process
2017  * @param env  the loop environment
2018  */
2019 static void do_dfs(ir_graph *irg, loop_env *env) {
2020         ir_graph *rem = current_ir_graph;
2021         ir_node  *endblk, *end;
2022         int      i;
2023
2024         current_ir_graph = irg;
2025         inc_irg_visited(irg);
2026
2027         /* visit all memory nodes */
2028         endblk = get_irg_end_block(irg);
2029         for (i = get_Block_n_cfgpreds(endblk) - 1; i >= 0; --i) {
2030                 ir_node *pred = get_Block_cfgpred(endblk, i);
2031
2032                 pred = skip_Proj(pred);
2033                 if (is_Return(pred))
2034                         dfs(get_Return_mem(pred), env);
2035                 else if (is_Raise(pred))
2036                         dfs(get_Raise_mem(pred), env);
2037                 else if (is_fragile_op(pred))
2038                         dfs(get_fragile_op_mem(pred), env);
2039                 else {
2040                         assert(0 && "Unknown EndBlock predecessor");
2041                 }
2042         }
2043
2044         /* visit the keep-alives */
2045         end = get_irg_end(irg);
2046         for (i = get_End_n_keepalives(end) - 1; i >= 0; --i) {
2047                 ir_node *ka = get_End_keepalive(end, i);
2048
2049                 if (is_Phi(ka) && irn_not_visited(ka))
2050                         dfs(ka, env);
2051         }
2052         current_ir_graph = rem;
2053 }  /* do_dfs */
2054
2055 /**
2056  * Initialize new phase data. We do this always explicit, so return NULL here
2057  */
2058 static void *init_loop_data(ir_phase *ph, const ir_node *irn, void *data) {
2059         (void)ph;
2060         (void)irn;
2061         (void)data;
2062         return NULL;
2063 }  /* init_loop_data */
2064
2065 /**
2066  * Optimize Loads/Stores in loops.
2067  *
2068  * @param irg  the graph
2069  */
2070 static int optimize_loops(ir_graph *irg) {
2071         loop_env env;
2072
2073         env.stack         = NEW_ARR_F(ir_node *, 128);
2074         env.tos           = 0;
2075         env.nextDFSnum    = 0;
2076         env.POnum         = 0;
2077         env.changes       = 0;
2078         phase_init(&env.ph, "ldstopt", irg, PHASE_DEFAULT_GROWTH, init_loop_data, NULL);
2079
2080         /* calculate the SCC's and drive loop optimization. */
2081         do_dfs(irg, &env);
2082
2083         DEL_ARR_F(env.stack);
2084         phase_free(&env.ph);
2085
2086         return env.changes;
2087 }  /* optimize_loops */
2088
2089 /*
2090  * do the load store optimization
2091  */
2092 void optimize_load_store(ir_graph *irg) {
2093         walk_env_t env;
2094
2095         FIRM_DBG_REGISTER(dbg, "firm.opt.ldstopt");
2096
2097         assert(get_irg_phase_state(irg) != phase_building);
2098         assert(get_irg_pinned(irg) != op_pin_state_floats &&
2099                 "LoadStore optimization needs pinned graph");
2100
2101         /* we need landing pads */
2102         remove_critical_cf_edges(irg);
2103
2104         edges_assure(irg);
2105
2106         /* for Phi optimization post-dominators are needed ... */
2107         assure_postdoms(irg);
2108
2109         if (get_opt_alias_analysis()) {
2110                 assure_irg_address_taken_computed(irg);
2111                 assure_irp_globals_address_taken_computed();
2112         }
2113
2114         obstack_init(&env.obst);
2115         env.changes = 0;
2116
2117         /* init the links, then collect Loads/Stores/Proj's in lists */
2118         master_visited = 0;
2119         irg_walk_graph(irg, firm_clear_link, collect_nodes, &env);
2120
2121         /* now we have collected enough information, optimize */
2122         irg_walk_graph(irg, NULL, do_load_store_optimize, &env);
2123
2124         env.changes |= optimize_loops(irg);
2125
2126         obstack_free(&env.obst, NULL);
2127
2128         /* Handle graph state */
2129         if (env.changes) {
2130                 set_irg_outs_inconsistent(irg);
2131         }
2132
2133         if (env.changes & CF_CHANGED) {
2134                 /* is this really needed: Yes, control flow changed, block might
2135                 have Bad() predecessors. */
2136                 set_irg_doms_inconsistent(irg);
2137         }
2138 }  /* optimize_load_store */