Remove the unused attribute const arch_env_t *arch_env from struct dump_env and also...
[libfirm] / ir / opt / ldstopt.c
1 /*
2  * Copyright (C) 1995-2008 University of Karlsruhe.  All right reserved.
3  *
4  * This file is part of libFirm.
5  *
6  * This file may be distributed and/or modified under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation and appearing in the file LICENSE.GPL included in the
9  * packaging of this file.
10  *
11  * Licensees holding valid libFirm Professional Edition licenses may use
12  * this file in accordance with the libFirm Commercial License.
13  * Agreement provided with the Software.
14  *
15  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17  * PURPOSE.
18  */
19
20 /**
21  * @file
22  * @brief   Load/Store optimizations.
23  * @author  Michael Beck
24  * @version $Id$
25  */
26 #ifdef HAVE_CONFIG_H
27 # include "config.h"
28 #endif
29
30 #include <string.h>
31
32 #include "iroptimize.h"
33 #include "irnode_t.h"
34 #include "irgraph_t.h"
35 #include "irmode_t.h"
36 #include "iropt_t.h"
37 #include "ircons_t.h"
38 #include "irgmod.h"
39 #include "irgwalk.h"
40 #include "irvrfy.h"
41 #include "tv_t.h"
42 #include "dbginfo_t.h"
43 #include "iropt_dbg.h"
44 #include "irflag_t.h"
45 #include "array_t.h"
46 #include "irhooks.h"
47 #include "iredges.h"
48 #include "irtools.h"
49 #include "opt_polymorphy.h"
50 #include "irmemory.h"
51 #include "irphase_t.h"
52 #include "irgopt.h"
53 #include "debug.h"
54
55 /** The debug handle. */
56 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
57
58 #ifdef DO_CACHEOPT
59 #include "cacheopt/cachesim.h"
60 #endif
61
62 #undef IMAX
63 #define IMAX(a,b)       ((a) > (b) ? (a) : (b))
64
65 #define MAX_PROJ        IMAX(IMAX(pn_Load_max, pn_Store_max), pn_Call_max)
66
67 enum changes_t {
68         DF_CHANGED = 1,       /**< data flow changed */
69         CF_CHANGED = 2,       /**< control flow changed */
70 };
71
72 /**
73  * walker environment
74  */
75 typedef struct _walk_env_t {
76         struct obstack obst;          /**< list of all stores */
77         unsigned changes;             /**< a bitmask of graph changes */
78 } walk_env_t;
79
80 /** A Load/Store info. */
81 typedef struct _ldst_info_t {
82         ir_node  *projs[MAX_PROJ];    /**< list of Proj's of this node */
83         ir_node  *exc_block;          /**< the exception block if available */
84         int      exc_idx;             /**< predecessor index in the exception block */
85         unsigned visited;             /**< visited counter for breaking loops */
86 } ldst_info_t;
87
88 /**
89  * flags for control flow.
90  */
91 enum block_flags_t {
92         BLOCK_HAS_COND = 1,      /**< Block has conditional control flow */
93         BLOCK_HAS_EXC  = 2       /**< Block has exceptional control flow */
94 };
95
96 /**
97  * a Block info.
98  */
99 typedef struct _block_info_t {
100         unsigned flags;               /**< flags for the block */
101 } block_info_t;
102
103 /** the master visited flag for loop detection. */
104 static unsigned master_visited = 0;
105
106 #define INC_MASTER()       ++master_visited
107 #define MARK_NODE(info)    (info)->visited = master_visited
108 #define NODE_VISITED(info) (info)->visited >= master_visited
109
110 /**
111  * get the Load/Store info of a node
112  */
113 static ldst_info_t *get_ldst_info(ir_node *node, struct obstack *obst) {
114         ldst_info_t *info = get_irn_link(node);
115
116         if (! info) {
117                 info = obstack_alloc(obst, sizeof(*info));
118                 memset(info, 0, sizeof(*info));
119                 set_irn_link(node, info);
120         }
121         return info;
122 }  /* get_ldst_info */
123
124 /**
125  * get the Block info of a node
126  */
127 static block_info_t *get_block_info(ir_node *node, struct obstack *obst) {
128         block_info_t *info = get_irn_link(node);
129
130         if (! info) {
131                 info = obstack_alloc(obst, sizeof(*info));
132                 memset(info, 0, sizeof(*info));
133                 set_irn_link(node, info);
134         }
135         return info;
136 }  /* get_block_info */
137
138 /**
139  * update the projection info for a Load/Store
140  */
141 static unsigned update_projs(ldst_info_t *info, ir_node *proj)
142 {
143         long nr = get_Proj_proj(proj);
144
145         assert(0 <= nr && nr <= MAX_PROJ && "Wrong proj from LoadStore");
146
147         if (info->projs[nr]) {
148                 /* there is already one, do CSE */
149                 exchange(proj, info->projs[nr]);
150                 return DF_CHANGED;
151         }
152         else {
153                 info->projs[nr] = proj;
154                 return 0;
155         }
156 }  /* update_projs */
157
158 /**
159  * update the exception block info for a Load/Store node.
160  *
161  * @param info   the load/store info struct
162  * @param block  the exception handler block for this load/store
163  * @param pos    the control flow input of the block
164  */
165 static unsigned update_exc(ldst_info_t *info, ir_node *block, int pos)
166 {
167         assert(info->exc_block == NULL && "more than one exception block found");
168
169         info->exc_block = block;
170         info->exc_idx   = pos;
171         return 0;
172 }  /* update_exc */
173
174 /** Return the number of uses of an address node */
175 #define get_irn_n_uses(adr)     get_irn_n_edges(adr)
176
177 /**
178  * walker, collects all Load/Store/Proj nodes
179  *
180  * walks from Start -> End
181  */
182 static void collect_nodes(ir_node *node, void *env)
183 {
184         ir_opcode   opcode = get_irn_opcode(node);
185         ir_node     *pred, *blk, *pred_blk;
186         ldst_info_t *ldst_info;
187         walk_env_t  *wenv = env;
188
189         if (opcode == iro_Proj) {
190                 pred   = get_Proj_pred(node);
191                 opcode = get_irn_opcode(pred);
192
193                 if (opcode == iro_Load || opcode == iro_Store || opcode == iro_Call) {
194                         ldst_info = get_ldst_info(pred, &wenv->obst);
195
196                         wenv->changes |= update_projs(ldst_info, node);
197
198                         /*
199                          * Place the Proj's to the same block as the
200                          * predecessor Load. This is always ok and prevents
201                          * "non-SSA" form after optimizations if the Proj
202                          * is in a wrong block.
203                          */
204                         blk      = get_nodes_block(node);
205                         pred_blk = get_nodes_block(pred);
206                         if (blk != pred_blk) {
207                                 wenv->changes |= DF_CHANGED;
208                                 set_nodes_block(node, pred_blk);
209                         }
210                 }
211         } else if (opcode == iro_Block) {
212                 int i;
213
214                 for (i = get_Block_n_cfgpreds(node) - 1; i >= 0; --i) {
215                         ir_node      *pred_block, *proj;
216                         block_info_t *bl_info;
217                         int          is_exc = 0;
218
219                         pred = proj = get_Block_cfgpred(node, i);
220
221                         if (is_Proj(proj)) {
222                                 pred   = get_Proj_pred(proj);
223                                 is_exc = get_Proj_proj(proj) == pn_Generic_X_except;
224                         }
225
226                         /* ignore Bad predecessors, they will be removed later */
227                         if (is_Bad(pred))
228                                 continue;
229
230                         pred_block = get_nodes_block(pred);
231                         bl_info    = get_block_info(pred_block, &wenv->obst);
232
233                         if (is_fragile_op(pred) && is_exc)
234                                 bl_info->flags |= BLOCK_HAS_EXC;
235                         else if (is_irn_forking(pred))
236                                 bl_info->flags |= BLOCK_HAS_COND;
237
238                         opcode = get_irn_opcode(pred);
239                         if (is_exc && (opcode == iro_Load || opcode == iro_Store || opcode == iro_Call)) {
240                                 ldst_info = get_ldst_info(pred, &wenv->obst);
241
242                                 wenv->changes |= update_exc(ldst_info, node, i);
243                         }
244                 }
245         }
246 }  /* collect_nodes */
247
248 /**
249  * Returns an entity if the address ptr points to a constant one.
250  *
251  * @param ptr  the address
252  *
253  * @return an entity or NULL
254  */
255 static ir_entity *find_constant_entity(ir_node *ptr)
256 {
257         for (;;) {
258                 if (is_SymConst(ptr) && get_SymConst_kind(ptr) == symconst_addr_ent) {
259                         return get_SymConst_entity(ptr);
260                 } else if (is_Sel(ptr)) {
261                         ir_entity *ent = get_Sel_entity(ptr);
262                         ir_type   *tp  = get_entity_owner(ent);
263
264                         /* Do not fiddle with polymorphism. */
265                         if (is_Class_type(get_entity_owner(ent)) &&
266                                 ((get_entity_n_overwrites(ent)    != 0) ||
267                                 (get_entity_n_overwrittenby(ent) != 0)   ) )
268                                 return NULL;
269
270                         if (is_Array_type(tp)) {
271                                 /* check bounds */
272                                 int i, n;
273
274                                 for (i = 0, n = get_Sel_n_indexs(ptr); i < n; ++i) {
275                                         ir_node *bound;
276                                         tarval *tlower, *tupper;
277                                         ir_node *index = get_Sel_index(ptr, i);
278                                         tarval *tv     = computed_value(index);
279
280                                         /* check if the index is constant */
281                                         if (tv == tarval_bad)
282                                                 return NULL;
283
284                                         bound  = get_array_lower_bound(tp, i);
285                                         tlower = computed_value(bound);
286                                         bound  = get_array_upper_bound(tp, i);
287                                         tupper = computed_value(bound);
288
289                                         if (tlower == tarval_bad || tupper == tarval_bad)
290                                                 return NULL;
291
292                                         if (tarval_cmp(tv, tlower) & pn_Cmp_Lt)
293                                                 return NULL;
294                                         if (tarval_cmp(tupper, tv) & pn_Cmp_Lt)
295                                                 return NULL;
296
297                                         /* ok, bounds check finished */
298                                 }
299                         }
300
301                         if (variability_constant == get_entity_variability(ent))
302                                 return ent;
303
304                         /* try next */
305                         ptr = get_Sel_ptr(ptr);
306                 } else if (is_Add(ptr)) {
307                         ir_node *l = get_Add_left(ptr);
308                         ir_node *r = get_Add_right(ptr);
309
310                         if (get_irn_mode(l) == get_irn_mode(ptr) && is_Const(r))
311                                 ptr = l;
312                         else if (get_irn_mode(r) == get_irn_mode(ptr) && is_Const(l))
313                                 ptr = r;
314                         else
315                                 return NULL;
316
317                         /* for now, we support only one addition, reassoc should fold all others */
318                         if (! is_SymConst(ptr) && !is_Sel(ptr))
319                                 return NULL;
320                 } else if (is_Sub(ptr)) {
321                         ir_node *l = get_Sub_left(ptr);
322                         ir_node *r = get_Sub_right(ptr);
323
324                         if (get_irn_mode(l) == get_irn_mode(ptr) &&     is_Const(r))
325                                 ptr = l;
326                         else
327                                 return NULL;
328                         /* for now, we support only one substraction, reassoc should fold all others */
329                         if (! is_SymConst(ptr) && !is_Sel(ptr))
330                                 return NULL;
331                 } else
332                         return NULL;
333         }
334 }  /* find_constant_entity */
335
336 /**
337  * Return the Selection index of a Sel node from dimension n
338  */
339 static long get_Sel_array_index_long(ir_node *n, int dim) {
340         ir_node *index = get_Sel_index(n, dim);
341         assert(is_Const(index));
342         return get_tarval_long(get_Const_tarval(index));
343 }  /* get_Sel_array_index_long */
344
345 /**
346  * Returns the accessed component graph path for an
347  * node computing an address.
348  *
349  * @param ptr    the node computing the address
350  * @param depth  current depth in steps upward from the root
351  *               of the address
352  */
353 static compound_graph_path *rec_get_accessed_path(ir_node *ptr, int depth) {
354         compound_graph_path *res = NULL;
355         ir_entity           *root, *field, *ent;
356         int                 path_len, pos, idx;
357         tarval              *tv;
358         ir_type             *tp;
359
360         if (is_SymConst(ptr)) {
361                 /* a SymConst. If the depth is 0, this is an access to a global
362                  * entity and we don't need a component path, else we know
363                  * at least its length.
364                  */
365                 assert(get_SymConst_kind(ptr) == symconst_addr_ent);
366                 root = get_SymConst_entity(ptr);
367                 res = (depth == 0) ? NULL : new_compound_graph_path(get_entity_type(root), depth);
368         } else if (is_Sel(ptr)) {
369                 /* it's a Sel, go up until we find the root */
370                 res = rec_get_accessed_path(get_Sel_ptr(ptr), depth+1);
371                 if (res == NULL)
372                         return NULL;
373
374                 /* fill up the step in the path at the current position */
375                 field    = get_Sel_entity(ptr);
376                 path_len = get_compound_graph_path_length(res);
377                 pos      = path_len - depth - 1;
378                 set_compound_graph_path_node(res, pos, field);
379
380                 if (is_Array_type(get_entity_owner(field))) {
381                         assert(get_Sel_n_indexs(ptr) == 1 && "multi dim arrays not implemented");
382                         set_compound_graph_path_array_index(res, pos, get_Sel_array_index_long(ptr, 0));
383                 }
384         } else if (is_Add(ptr)) {
385                 ir_node *l    = get_Add_left(ptr);
386                 ir_node *r    = get_Add_right(ptr);
387                 ir_mode *mode = get_irn_mode(ptr);
388                 tarval  *tmp;
389
390                 if (is_Const(r) && get_irn_mode(l) == mode) {
391                         ptr = l;
392                         tv  = get_Const_tarval(r);
393                 } else {
394                         ptr = r;
395                         tv  = get_Const_tarval(l);
396                 }
397 ptr_arith:
398                 mode = get_tarval_mode(tv);
399                 tmp  = tv;
400
401                 /* ptr must be a Sel or a SymConst, this was checked in find_constant_entity() */
402                 if (is_Sel(ptr)) {
403                         field = get_Sel_entity(ptr);
404                 } else {
405                         field = get_SymConst_entity(ptr);
406                 }
407                 idx = 0;
408                 for (ent = field;;) {
409                         unsigned size;
410                         tarval   *sz, *tv_index, *tlower, *tupper;
411                         ir_node  *bound;
412
413                         tp = get_entity_type(ent);
414                         if (! is_Array_type(tp))
415                                 break;
416                         ent = get_array_element_entity(tp);
417                         size = get_type_size_bytes(get_entity_type(ent));
418                         sz   = new_tarval_from_long(size, mode);
419
420                         tv_index = tarval_div(tmp, sz);
421                         tmp      = tarval_mod(tmp, sz);
422
423                         if (tv_index == tarval_bad || tmp == tarval_bad)
424                                 return NULL;
425
426                         assert(get_array_n_dimensions(tp) == 1 && "multiarrays not implemented");
427                         bound  = get_array_lower_bound(tp, 0);
428                         tlower = computed_value(bound);
429                         bound  = get_array_upper_bound(tp, 0);
430                         tupper = computed_value(bound);
431
432                         if (tlower == tarval_bad || tupper == tarval_bad)
433                                 return NULL;
434
435                         if (tarval_cmp(tv_index, tlower) & pn_Cmp_Lt)
436                                 return NULL;
437                         if (tarval_cmp(tupper, tv_index) & pn_Cmp_Lt)
438                                 return NULL;
439
440                         /* ok, bounds check finished */
441                         ++idx;
442                 }
443                 if (! tarval_is_null(tmp)) {
444                         /* access to some struct/union member */
445                         return NULL;
446                 }
447
448                 /* should be at least ONE array */
449                 if (idx == 0)
450                         return NULL;
451
452                 res = rec_get_accessed_path(ptr, depth + idx);
453                 if (res == NULL)
454                         return NULL;
455
456                 path_len = get_compound_graph_path_length(res);
457                 pos      = path_len - depth - idx;
458
459                 for (ent = field;;) {
460                         unsigned size;
461                         tarval   *sz, *tv_index;
462                         long     index;
463
464                         tp = get_entity_type(ent);
465                         if (! is_Array_type(tp))
466                                 break;
467                         ent = get_array_element_entity(tp);
468                         set_compound_graph_path_node(res, pos, ent);
469
470                         size = get_type_size_bytes(get_entity_type(ent));
471                         sz   = new_tarval_from_long(size, mode);
472
473                         tv_index = tarval_div(tv, sz);
474                         tv       = tarval_mod(tv, sz);
475
476                         /* worked above, should work again */
477                         assert(tv_index != tarval_bad && tv != tarval_bad);
478
479                         /* bounds already checked above */
480                         index = get_tarval_long(tv_index);
481                         set_compound_graph_path_array_index(res, pos, index);
482                         ++pos;
483                 }
484         } else if (is_Sub(ptr)) {
485                 ir_node *l = get_Sub_left(ptr);
486                 ir_node *r = get_Sub_right(ptr);
487
488                 ptr = l;
489                 tv  = get_Const_tarval(r);
490                 tv  = tarval_neg(tv);
491                 goto ptr_arith;
492         }
493         return res;
494 }  /* rec_get_accessed_path */
495
496 /**
497  * Returns an access path or NULL.  The access path is only
498  * valid, if the graph is in phase_high and _no_ address computation is used.
499  */
500 static compound_graph_path *get_accessed_path(ir_node *ptr) {
501         compound_graph_path *gr = rec_get_accessed_path(ptr, 0);
502         return gr;
503 }  /* get_accessed_path */
504
505 typedef struct path_entry {
506         ir_entity         *ent;
507         struct path_entry *next;
508         long              index;
509 } path_entry;
510
511 static ir_node *rec_find_compound_ent_value(ir_node *ptr, path_entry *next) {
512         path_entry       entry, *p;
513         ir_entity        *ent, *field;
514         ir_initializer_t *initializer;
515         tarval           *tv;
516         ir_type          *tp;
517         unsigned         n;
518
519         entry.next = next;
520         if (is_SymConst(ptr)) {
521                 /* found the root */
522                 ent         = get_SymConst_entity(ptr);
523                 initializer = get_entity_initializer(ent);
524                 for (p = next; p != NULL;) {
525                         if (initializer->kind != IR_INITIALIZER_COMPOUND)
526                                 return NULL;
527                         n  = get_initializer_compound_n_entries(initializer);
528                         tp = get_entity_type(ent);
529
530                         if (is_Array_type(tp)) {
531                                 ent = get_array_element_entity(tp);
532                                 if (ent != p->ent) {
533                                         /* a missing [0] */
534                                         if (0 >= n)
535                                                 return NULL;
536                                         initializer = get_initializer_compound_value(initializer, 0);
537                                         continue;
538                                 }
539                         }
540                         if (p->index >= (int) n)
541                                 return NULL;
542                         initializer = get_initializer_compound_value(initializer, p->index);
543
544                         ent = p->ent;
545                         p   = p->next;
546                 }
547                 tp = get_entity_type(ent);
548                 while (is_Array_type(tp)) {
549                         ent = get_array_element_entity(tp);
550                         tp = get_entity_type(ent);
551                         /* a missing [0] */
552                         n  = get_initializer_compound_n_entries(initializer);
553                         if (0 >= n)
554                                 return NULL;
555                         initializer = get_initializer_compound_value(initializer, 0);
556                 }
557
558                 switch (initializer->kind) {
559                 case IR_INITIALIZER_CONST:
560                         return get_initializer_const_value(initializer);
561                 case IR_INITIALIZER_TARVAL:
562                 case IR_INITIALIZER_NULL:
563                 default:
564                         return NULL;
565                 }
566         } else if (is_Sel(ptr)) {
567                 entry.ent = field = get_Sel_entity(ptr);
568                 tp = get_entity_owner(field);
569                 if (is_Array_type(tp)) {
570                         assert(get_Sel_n_indexs(ptr) == 1 && "multi dim arrays not implemented");
571                         entry.index = get_Sel_array_index_long(ptr, 0) - get_array_lower_bound_int(tp, 0);
572                 } else {
573                         int i, n_members = get_compound_n_members(tp);
574                         for (i = 0; i < n_members; ++i) {
575                                 if (get_compound_member(tp, i) == field)
576                                         break;
577                         }
578                         if (i >= n_members) {
579                                 /* not found: should NOT happen */
580                                 return NULL;
581                         }
582                         entry.index = i;
583                 }
584                 return rec_find_compound_ent_value(get_Sel_ptr(ptr), &entry);
585         }  else if (is_Add(ptr)) {
586                 ir_node  *l = get_Add_left(ptr);
587                 ir_node  *r = get_Add_right(ptr);
588                 ir_mode  *mode;
589                 unsigned pos;
590
591                 if (is_Const(r)) {
592                         ptr = l;
593                         tv  = get_Const_tarval(r);
594                 } else {
595                         ptr = r;
596                         tv  = get_Const_tarval(l);
597                 }
598 ptr_arith:
599                 mode = get_tarval_mode(tv);
600
601                 /* ptr must be a Sel or a SymConst, this was checked in find_constant_entity() */
602                 if (is_Sel(ptr)) {
603                         field = get_Sel_entity(ptr);
604                 } else {
605                         field = get_SymConst_entity(ptr);
606                 }
607
608                 /* count needed entries */
609                 pos = 0;
610                 for (ent = field;;) {
611                         tp = get_entity_type(ent);
612                         if (! is_Array_type(tp))
613                                 break;
614                         ent = get_array_element_entity(tp);
615                         ++pos;
616                 }
617                 /* should be at least ONE entry */
618                 if (pos == 0)
619                         return NULL;
620
621                 /* allocate the right number of entries */
622                 NEW_ARR_A(path_entry, p, pos);
623
624                 /* fill them up */
625                 pos = 0;
626                 for (ent = field;;) {
627                         unsigned size;
628                         tarval   *sz, *tv_index, *tlower, *tupper;
629                         long     index;
630                         ir_node  *bound;
631
632                         tp = get_entity_type(ent);
633                         if (! is_Array_type(tp))
634                                 break;
635                         ent = get_array_element_entity(tp);
636                         p[pos].ent  = ent;
637                         p[pos].next = &p[pos + 1];
638
639                         size = get_type_size_bytes(get_entity_type(ent));
640                         sz   = new_tarval_from_long(size, mode);
641
642                         tv_index = tarval_div(tv, sz);
643                         tv       = tarval_mod(tv, sz);
644
645                         if (tv_index == tarval_bad || tv == tarval_bad)
646                                 return NULL;
647
648                         assert(get_array_n_dimensions(tp) == 1 && "multiarrays not implemented");
649                         bound  = get_array_lower_bound(tp, 0);
650                         tlower = computed_value(bound);
651                         bound  = get_array_upper_bound(tp, 0);
652                         tupper = computed_value(bound);
653
654                         if (tlower == tarval_bad || tupper == tarval_bad)
655                                 return NULL;
656
657                         if (tarval_cmp(tv_index, tlower) & pn_Cmp_Lt)
658                                 return NULL;
659                         if (tarval_cmp(tupper, tv_index) & pn_Cmp_Lt)
660                                 return NULL;
661
662                         /* ok, bounds check finished */
663                         index = get_tarval_long(tv_index);
664                         p[pos].index = index;
665                         ++pos;
666                 }
667                 if (! tarval_is_null(tv)) {
668                         /* hmm, wrong access */
669                         return NULL;
670                 }
671                 p[pos - 1].next = next;
672                 return rec_find_compound_ent_value(ptr, p);
673         } else if (is_Sub(ptr)) {
674                 ir_node *l = get_Sub_left(ptr);
675                 ir_node *r = get_Sub_right(ptr);
676
677                 ptr = l;
678                 tv  = get_Const_tarval(r);
679                 tv  = tarval_neg(tv);
680                 goto ptr_arith;
681         }
682         return NULL;
683 }
684
685 static ir_node *find_compound_ent_value(ir_node *ptr) {
686         return rec_find_compound_ent_value(ptr, NULL);
687 }
688
689 /* forward */
690 static void reduce_adr_usage(ir_node *ptr);
691
692 /**
693  * Update a Load that may have lost its users.
694  */
695 static void handle_load_update(ir_node *load) {
696         ldst_info_t *info = get_irn_link(load);
697
698         /* do NOT touch volatile loads for now */
699         if (get_Load_volatility(load) == volatility_is_volatile)
700                 return;
701
702         if (! info->projs[pn_Load_res] && ! info->projs[pn_Load_X_except]) {
703                 ir_node *ptr = get_Load_ptr(load);
704                 ir_node *mem = get_Load_mem(load);
705
706                 /* a Load whose value is neither used nor exception checked, remove it */
707                 exchange(info->projs[pn_Load_M], mem);
708                 if (info->projs[pn_Load_X_regular])
709                         exchange(info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
710                 kill_node(load);
711                 reduce_adr_usage(ptr);
712         }
713 }  /* handle_load_update */
714
715 /**
716  * A use of an address node has vanished. Check if this was a Proj
717  * node and update the counters.
718  */
719 static void reduce_adr_usage(ir_node *ptr) {
720         if (is_Proj(ptr)) {
721                 if (get_irn_n_edges(ptr) <= 0) {
722                         /* this Proj is dead now */
723                         ir_node *pred = get_Proj_pred(ptr);
724
725                         if (is_Load(pred)) {
726                                 ldst_info_t *info = get_irn_link(pred);
727                                 info->projs[get_Proj_proj(ptr)] = NULL;
728
729                                 /* this node lost its result proj, handle that */
730                                 handle_load_update(pred);
731                         }
732                 }
733         }
734 }  /* reduce_adr_usage */
735
736 /**
737  * Check, if an already existing value of mode old_mode can be converted
738  * into the needed one new_mode without loss.
739  */
740 static int can_use_stored_value(ir_mode *old_mode, ir_mode *new_mode) {
741         if (old_mode == new_mode)
742                 return 1;
743
744         /* if both modes are two-complement ones, we can always convert the
745            Stored value into the needed one. */
746         if (get_mode_size_bits(old_mode) >= get_mode_size_bits(new_mode) &&
747                   get_mode_arithmetic(old_mode) == irma_twos_complement &&
748                   get_mode_arithmetic(new_mode) == irma_twos_complement)
749                 return 1;
750         return 0;
751 }  /* can_use_stored_value */
752
753 /**
754  * Check whether a Call is at least pure, ie. does only read memory.
755  */
756 static unsigned is_Call_pure(ir_node *call) {
757         ir_type *call_tp = get_Call_type(call);
758         unsigned prop = get_method_additional_properties(call_tp);
759
760         /* check first the call type */
761         if ((prop & (mtp_property_const|mtp_property_pure)) == 0) {
762                 /* try the called entity */
763                 ir_node *ptr = get_Call_ptr(call);
764
765                 if (is_Global(ptr)) {
766                         ir_entity *ent = get_Global_entity(ptr);
767
768                         prop = get_entity_additional_properties(ent);
769                 }
770         }
771         return (prop & (mtp_property_const|mtp_property_pure)) != 0;
772 }  /* is_Call_pure */
773
774 static ir_node *get_base_and_offset(ir_node *ptr, long *pOffset)
775 {
776         ir_mode *mode  = get_irn_mode(ptr);
777         long    offset = 0;
778
779         /* TODO: long might not be enough, we should probably use some tarval thingy... */
780         for (;;) {
781                 if (is_Add(ptr)) {
782                         ir_node *l = get_Add_left(ptr);
783                         ir_node *r = get_Add_right(ptr);
784
785                         if (get_irn_mode(l) != mode || !is_Const(r))
786                                 break;
787
788                         offset += get_tarval_long(get_Const_tarval(r));
789                         ptr     = l;
790                 } else if (is_Sub(ptr)) {
791                         ir_node *l = get_Sub_left(ptr);
792                         ir_node *r = get_Sub_right(ptr);
793
794                         if (get_irn_mode(l) != mode || !is_Const(r))
795                                 break;
796
797                         offset -= get_tarval_long(get_Const_tarval(r));
798                         ptr     = l;
799                 } else if (is_Sel(ptr)) {
800                         ir_entity *ent = get_Sel_entity(ptr);
801                         ir_type   *tp  = get_entity_owner(ent);
802
803                         if (is_Array_type(tp)) {
804                                 int     size;
805                                 ir_node *index;
806
807                                 /* only one dimensional arrays yet */
808                                 if (get_Sel_n_indexs(ptr) != 1)
809                                         break;
810                                 index = get_Sel_index(ptr, 0);
811                                 if (! is_Const(index))
812                                         break;
813
814                                 tp = get_entity_type(ent);
815                                 if (get_type_state(tp) != layout_fixed)
816                                         break;
817
818                                 size    = get_type_size_bytes(tp);
819                                 offset += size * get_tarval_long(get_Const_tarval(index));
820                         } else {
821                                 if (get_type_state(tp) != layout_fixed)
822                                         break;
823                                 offset += get_entity_offset(ent);
824                         }
825                         ptr = get_Sel_ptr(ptr);
826                 } else
827                         break;
828         }
829
830         *pOffset = offset;
831         return ptr;
832 }
833
834 static int try_load_after_store(ir_node *load,
835                 ir_node *load_base_ptr, long load_offset, ir_node *store)
836 {
837         ldst_info_t *info;
838         ir_node *store_ptr      = get_Store_ptr(store);
839         long     store_offset;
840         ir_node *store_base_ptr = get_base_and_offset(store_ptr, &store_offset);
841         ir_node *store_value;
842         ir_mode *store_mode;
843         ir_node *load_ptr;
844         ir_mode *load_mode;
845         long     load_mode_len;
846         long     store_mode_len;
847         long     delta;
848         int      res;
849
850         if (load_base_ptr != store_base_ptr)
851                 return 0;
852
853         load_mode      = get_Load_mode(load);
854         load_mode_len  = get_mode_size_bytes(load_mode);
855         store_mode     = get_irn_mode(get_Store_value(store));
856         store_mode_len = get_mode_size_bytes(store_mode);
857         delta          = load_offset - store_offset;
858         store_value    = get_Store_value(store);
859
860         if (delta != 0 || store_mode != load_mode) {
861                 if (delta < 0 || delta + load_mode_len > store_mode_len)
862                         return 0;
863
864                 if (get_mode_arithmetic(store_mode) != irma_twos_complement ||
865                         get_mode_arithmetic(load_mode)  != irma_twos_complement)
866                         return 0;
867
868
869                 /* produce a shift to adjust offset delta */
870                 if (delta > 0) {
871                         ir_node *cnst;
872
873                         /* FIXME: only true for little endian */
874                         cnst        = new_Const_long(mode_Iu, delta * 8);
875                         store_value = new_r_Shr(current_ir_graph, get_nodes_block(load),
876                                                                         store_value, cnst, store_mode);
877                 }
878
879                 /* add an convert if needed */
880                 if (store_mode != load_mode) {
881                         store_value = new_r_Conv(current_ir_graph, get_nodes_block(load),
882                                                                          store_value, load_mode);
883                 }
884         }
885
886         DBG_OPT_RAW(load, store_value);
887
888         info = get_irn_link(load);
889         if (info->projs[pn_Load_M])
890                 exchange(info->projs[pn_Load_M], get_Load_mem(load));
891
892         res = 0;
893         /* no exception */
894         if (info->projs[pn_Load_X_except]) {
895                 exchange( info->projs[pn_Load_X_except], new_Bad());
896                 res |= CF_CHANGED;
897         }
898         if (info->projs[pn_Load_X_regular]) {
899                 exchange( info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
900                 res |= CF_CHANGED;
901         }
902
903         if (info->projs[pn_Load_res])
904                 exchange(info->projs[pn_Load_res], store_value);
905
906         load_ptr = get_Load_ptr(load);
907         kill_node(load);
908         reduce_adr_usage(load_ptr);
909         return res | DF_CHANGED;
910 }
911
912 /**
913  * Follow the memory chain as long as there are only Loads,
914  * alias free Stores, and constant Calls and try to replace the
915  * current Load by a previous ones.
916  * Note that in unreachable loops it might happen that we reach
917  * load again, as well as we can fall into a cycle.
918  * We break such cycles using a special visited flag.
919  *
920  * INC_MASTER() must be called before dive into
921  */
922 static unsigned follow_Mem_chain(ir_node *load, ir_node *curr) {
923         unsigned    res = 0;
924         ldst_info_t *info = get_irn_link(load);
925         ir_node     *pred;
926         ir_node     *ptr       = get_Load_ptr(load);
927         ir_node     *mem       = get_Load_mem(load);
928         ir_mode     *load_mode = get_Load_mode(load);
929
930         for (pred = curr; load != pred; ) {
931                 ldst_info_t *pred_info = get_irn_link(pred);
932
933                 /*
934                  * a Load immediately after a Store -- a read after write.
935                  * We may remove the Load, if both Load & Store does not have an
936                  * exception handler OR they are in the same MacroBlock. In the latter
937                  * case the Load cannot throw an exception when the previous Store was
938                  * quiet.
939                  *
940                  * Why we need to check for Store Exception? If the Store cannot
941                  * be executed (ROM) the exception handler might simply jump into
942                  * the load MacroBlock :-(
943                  * We could make it a little bit better if we would know that the
944                  * exception handler of the Store jumps directly to the end...
945                  */
946                 if (is_Store(pred) && ((pred_info->projs[pn_Store_X_except] == NULL
947                                 && info->projs[pn_Load_X_except] == NULL)
948                                 || get_nodes_MacroBlock(load) == get_nodes_MacroBlock(pred)))
949                 {
950                         long    load_offset;
951                         ir_node *base_ptr = get_base_and_offset(ptr, &load_offset);
952                         int     changes   = try_load_after_store(load, base_ptr, load_offset, pred);
953
954                         if (changes != 0)
955                                 return res | changes;
956                 } else if (is_Load(pred) && get_Load_ptr(pred) == ptr &&
957                            can_use_stored_value(get_Load_mode(pred), load_mode)) {
958                         /*
959                          * a Load after a Load -- a read after read.
960                          * We may remove the second Load, if it does not have an exception handler
961                          * OR they are in the same MacroBlock. In the later case the Load cannot
962                          * throw an exception when the previous Load was quiet.
963                          *
964                          * Here, there is no need to check if the previous Load has an exception
965                          * hander because they would have exact the same exception...
966                          */
967                         if (info->projs[pn_Load_X_except] == NULL || get_nodes_MacroBlock(load) == get_nodes_MacroBlock(pred)) {
968                                 ir_node *value;
969
970                                 DBG_OPT_RAR(load, pred);
971
972                                 /* the result is used */
973                                 if (info->projs[pn_Load_res]) {
974                                         if (pred_info->projs[pn_Load_res] == NULL) {
975                                                 /* create a new Proj again */
976                                                 pred_info->projs[pn_Load_res] = new_r_Proj(current_ir_graph, get_nodes_block(pred), pred, get_Load_mode(pred), pn_Load_res);
977                                         }
978                                         value = pred_info->projs[pn_Load_res];
979
980                                         /* add an convert if needed */
981                                         if (get_Load_mode(pred) != load_mode) {
982                                                 value = new_r_Conv(current_ir_graph, get_nodes_block(load), value, load_mode);
983                                         }
984
985                                         exchange(info->projs[pn_Load_res], value);
986                                 }
987
988                                 if (info->projs[pn_Load_M])
989                                         exchange(info->projs[pn_Load_M], mem);
990
991                                 /* no exception */
992                                 if (info->projs[pn_Load_X_except]) {
993                                         exchange(info->projs[pn_Load_X_except], new_Bad());
994                                         res |= CF_CHANGED;
995                                 }
996                                 if (info->projs[pn_Load_X_regular]) {
997                                         exchange( info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
998                                         res |= CF_CHANGED;
999                                 }
1000
1001                                 kill_node(load);
1002                                 reduce_adr_usage(ptr);
1003                                 return res |= DF_CHANGED;
1004                         }
1005                 }
1006
1007                 if (is_Store(pred)) {
1008                         /* check if we can pass through this store */
1009                         ir_alias_relation rel = get_alias_relation(
1010                                 current_ir_graph,
1011                                 get_Store_ptr(pred),
1012                                 get_irn_mode(get_Store_value(pred)),
1013                                 ptr, load_mode);
1014                         /* if the might be an alias, we cannot pass this Store */
1015                         if (rel != ir_no_alias)
1016                                 break;
1017                         pred = skip_Proj(get_Store_mem(pred));
1018                 } else if (is_Load(pred)) {
1019                         pred = skip_Proj(get_Load_mem(pred));
1020                 } else if (is_Call(pred)) {
1021                         if (is_Call_pure(pred)) {
1022                                 /* The called graph is at least pure, so there are no Store's
1023                                    in it. We can handle it like a Load and skip it. */
1024                                 pred = skip_Proj(get_Call_mem(pred));
1025                         } else {
1026                                 /* there might be Store's in the graph, stop here */
1027                                 break;
1028                         }
1029                 } else {
1030                         /* follow only Load chains */
1031                         break;
1032                 }
1033
1034                 /* check for cycles */
1035                 if (NODE_VISITED(pred_info))
1036                         break;
1037                 MARK_NODE(pred_info);
1038         }
1039
1040         if (is_Sync(pred)) {
1041                 int i;
1042
1043                 /* handle all Sync predecessors */
1044                 for (i = get_Sync_n_preds(pred) - 1; i >= 0; --i) {
1045                         res |= follow_Mem_chain(load, skip_Proj(get_Sync_pred(pred, i)));
1046                         if (res)
1047                                 return res;
1048                 }
1049         }
1050
1051         return res;
1052 }  /* follow_Mem_chain */
1053
1054 /*
1055  * Check if we can replace the load by a given const from
1056  * the const code irg.
1057  */
1058 ir_node *can_replace_load_by_const(const ir_node *load, ir_node *c) {
1059         ir_mode *c_mode = get_irn_mode(c);
1060         ir_mode *l_mode = get_Load_mode(load);
1061         ir_node *res    = NULL;
1062
1063         if (c_mode != l_mode) {
1064                 /* check, if the mode matches OR can be easily converted info */
1065                 if (is_reinterpret_cast(c_mode, l_mode)) {
1066                         /* we can safely cast */
1067                         dbg_info *dbg   = get_irn_dbg_info(load);
1068                         ir_node  *block = get_nodes_block(load);
1069
1070                         /* copy the value from the const code irg and cast it */
1071                         res = copy_const_value(dbg, c);
1072                         res = new_rd_Conv(dbg, current_ir_graph, block, res, l_mode);
1073                 }
1074         } else {
1075                 /* copy the value from the const code irg */
1076                 res = copy_const_value(get_irn_dbg_info(load), c);
1077         }
1078         return res;
1079 }  /* can_replace_load_by_const */
1080
1081 /**
1082  * optimize a Load
1083  *
1084  * @param load  the Load node
1085  */
1086 static unsigned optimize_load(ir_node *load)
1087 {
1088         ldst_info_t *info = get_irn_link(load);
1089         ir_node     *mem, *ptr, *value;
1090         ir_entity   *ent;
1091         long        dummy;
1092         unsigned    res = 0;
1093
1094         /* do NOT touch volatile loads for now */
1095         if (get_Load_volatility(load) == volatility_is_volatile)
1096                 return 0;
1097
1098         /* the address of the load to be optimized */
1099         ptr = get_Load_ptr(load);
1100
1101         /*
1102          * Check if we can remove the exception from a Load:
1103          * This can be done, if the address is from an Sel(Alloc) and
1104          * the Sel type is a subtype of the allocated type.
1105          *
1106          * This optimizes some often used OO constructs,
1107          * like x = new O; x->t;
1108          */
1109         if (info->projs[pn_Load_X_except]) {
1110                 ir_node *addr = ptr;
1111
1112                 /* find base address */
1113                 while (is_Sel(addr))
1114                         addr = get_Sel_ptr(addr);
1115                 if (is_Alloc(skip_Proj(skip_Cast(addr)))) {
1116                         /* simple case: a direct load after an Alloc. Firm Alloc throw
1117                          * an exception in case of out-of-memory. So, there is no way for an
1118                          * exception in this load.
1119                          * This code is constructed by the "exception lowering" in the Jack compiler.
1120                          */
1121                         exchange(info->projs[pn_Load_X_except], new_Bad());
1122                         info->projs[pn_Load_X_except] = NULL;
1123                         exchange(info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
1124                         info->projs[pn_Load_X_regular] = NULL;
1125                         res |= CF_CHANGED;
1126                 }
1127         }
1128
1129         /* The mem of the Load. Must still be returned after optimization. */
1130         mem = get_Load_mem(load);
1131
1132         if (! info->projs[pn_Load_res] && ! info->projs[pn_Load_X_except]) {
1133                 /* a Load which value is neither used nor exception checked, remove it */
1134                 exchange(info->projs[pn_Load_M], mem);
1135
1136                 if (info->projs[pn_Load_X_regular]) {
1137                         /* should not happen, but if it does, remove it */
1138                         exchange(info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
1139                         res |= CF_CHANGED;
1140                 }
1141                 kill_node(load);
1142                 reduce_adr_usage(ptr);
1143                 return res | DF_CHANGED;
1144         }
1145
1146         /* Load from a constant polymorphic field, where we can resolve
1147            polymorphism. */
1148         value = transform_polymorph_Load(load);
1149         if (value == load) {
1150                 value = NULL;
1151                 /* check if we can determine the entity that will be loaded */
1152                 ent = find_constant_entity(ptr);
1153                 if (ent != NULL                                     &&
1154                     allocation_static == get_entity_allocation(ent) &&
1155                     visibility_external_allocated != get_entity_visibility(ent)) {
1156                         /* a static allocation that is not external: there should be NO exception
1157                          * when loading even if we cannot replace the load itself. */
1158
1159                         /* no exception, clear the info field as it might be checked later again */
1160                         if (info->projs[pn_Load_X_except]) {
1161                                 exchange(info->projs[pn_Load_X_except], new_Bad());
1162                                 info->projs[pn_Load_X_except] = NULL;
1163                                 res |= CF_CHANGED;
1164                         }
1165                         if (info->projs[pn_Load_X_regular]) {
1166                                 exchange(info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
1167                                 info->projs[pn_Load_X_regular] = NULL;
1168                                 res |= CF_CHANGED;
1169                         }
1170
1171                         if (variability_constant == get_entity_variability(ent)) {
1172                                 if (is_atomic_entity(ent)) {
1173                                         /* Might not be atomic after lowering of Sels.  In this case we
1174                                          * could also load, but it's more complicated. */
1175                                         /* more simpler case: we load the content of a constant value:
1176                                          * replace it by the constant itself */
1177                                         value = get_atomic_ent_value(ent);
1178                                 } else if (ent->has_initializer) {
1179                                         /* new style initializer */
1180                                         value = find_compound_ent_value(ptr);
1181                                 } else {
1182                                         /* old style initializer */
1183                                         compound_graph_path *path = get_accessed_path(ptr);
1184
1185                                         if (path != NULL) {
1186                                                 assert(is_proper_compound_graph_path(path, get_compound_graph_path_length(path)-1));
1187
1188                                                 value = get_compound_ent_value_by_path(ent, path);
1189                                                 DB((dbg, LEVEL_1, "  Constant access at %F%F resulted in %+F\n", ent, path, value));
1190                                                 free_compound_graph_path(path);
1191                                         }
1192                                 }
1193                                 if (value != NULL)
1194                                         value = can_replace_load_by_const(load, value);
1195                         }
1196                 }
1197         }
1198         if (value != NULL) {
1199                 /* we completely replace the load by this value */
1200                 if (info->projs[pn_Load_X_except]) {
1201                         exchange(info->projs[pn_Load_X_except], new_Bad());
1202                         info->projs[pn_Load_X_except] = NULL;
1203                         res |= CF_CHANGED;
1204                 }
1205                 if (info->projs[pn_Load_X_regular]) {
1206                         exchange(info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
1207                         info->projs[pn_Load_X_regular] = NULL;
1208                         res |= CF_CHANGED;
1209                 }
1210                 if (info->projs[pn_Load_M]) {
1211                         exchange(info->projs[pn_Load_M], mem);
1212                         res |= DF_CHANGED;
1213                 }
1214                 if (info->projs[pn_Load_res]) {
1215                         exchange(info->projs[pn_Load_res], value);
1216                         res |= DF_CHANGED;
1217                 }
1218                 kill_node(load);
1219                 reduce_adr_usage(ptr);
1220                 return res;
1221         }
1222
1223         /* Check, if the address of this load is used more than once.
1224          * If not, more load cannot be removed in any case. */
1225         if (get_irn_n_uses(ptr) <= 1 && get_irn_n_uses(get_base_and_offset(ptr, &dummy)) <= 1)
1226                 return res;
1227
1228         /*
1229          * follow the memory chain as long as there are only Loads
1230          * and try to replace current Load or Store by a previous one.
1231          * Note that in unreachable loops it might happen that we reach
1232          * load again, as well as we can fall into a cycle.
1233          * We break such cycles using a special visited flag.
1234          */
1235         INC_MASTER();
1236         res = follow_Mem_chain(load, skip_Proj(mem));
1237         return res;
1238 }  /* optimize_load */
1239
1240 /**
1241  * Check whether a value of mode new_mode would completely overwrite a value
1242  * of mode old_mode in memory.
1243  */
1244 static int is_completely_overwritten(ir_mode *old_mode, ir_mode *new_mode)
1245 {
1246         return get_mode_size_bits(new_mode) >= get_mode_size_bits(old_mode);
1247 }  /* is_completely_overwritten */
1248
1249 /**
1250  * Check whether small is a part of large (starting at same address).
1251  */
1252 static int is_partially_same(ir_node *small, ir_node *large)
1253 {
1254         ir_mode *sm = get_irn_mode(small);
1255         ir_mode *lm = get_irn_mode(large);
1256
1257         /* FIXME: Check endianness */
1258         return is_Conv(small) && get_Conv_op(small) == large
1259             && get_mode_size_bytes(sm) < get_mode_size_bytes(lm)
1260             && get_mode_arithmetic(sm) == irma_twos_complement
1261             && get_mode_arithmetic(lm) == irma_twos_complement;
1262 }  /* is_partially_same */
1263
1264 /**
1265  * follow the memory chain as long as there are only Loads and alias free Stores.
1266  *
1267  * INC_MASTER() must be called before dive into
1268  */
1269 static unsigned follow_Mem_chain_for_Store(ir_node *store, ir_node *curr) {
1270         unsigned res = 0;
1271         ldst_info_t *info = get_irn_link(store);
1272         ir_node *pred;
1273         ir_node *ptr = get_Store_ptr(store);
1274         ir_node *mem = get_Store_mem(store);
1275         ir_node *value = get_Store_value(store);
1276         ir_mode *mode  = get_irn_mode(value);
1277         ir_node *block = get_nodes_block(store);
1278         ir_node *mblk  = get_Block_MacroBlock(block);
1279
1280         for (pred = curr; pred != store;) {
1281                 ldst_info_t *pred_info = get_irn_link(pred);
1282
1283                 /*
1284                  * BEWARE: one might think that checking the modes is useless, because
1285                  * if the pointers are identical, they refer to the same object.
1286                  * This is only true in strong typed languages, not is C were the following
1287                  * is possible *(ir_type1 *)p = a; *(ir_type2 *)p = b ...
1288                  * However, if the size of the mode that is written is bigger or equal the
1289                  * size of the old one, the old value is completely overwritten and can be
1290                  * killed ...
1291                  */
1292                 if (is_Store(pred) && get_Store_ptr(pred) == ptr &&
1293             get_nodes_MacroBlock(pred) == mblk) {
1294                         /*
1295                          * a Store after a Store in the same MacroBlock -- a write after write.
1296                          */
1297
1298                         /*
1299                          * We may remove the first Store, if the old value is completely
1300                          * overwritten or the old value is a part of the new value,
1301                          * and if it does not have an exception handler.
1302                          *
1303                          * TODO: What, if both have the same exception handler ???
1304                          */
1305                         if (get_Store_volatility(pred) != volatility_is_volatile
1306                                 && !pred_info->projs[pn_Store_X_except]) {
1307                                 ir_node *predvalue = get_Store_value(pred);
1308                                 ir_mode *predmode  = get_irn_mode(predvalue);
1309
1310                                 if(is_completely_overwritten(predmode, mode)
1311                                         || is_partially_same(predvalue, value)) {
1312                                         DBG_OPT_WAW(pred, store);
1313                                         exchange(pred_info->projs[pn_Store_M], get_Store_mem(pred));
1314                                         kill_node(pred);
1315                                         reduce_adr_usage(ptr);
1316                                         return DF_CHANGED;
1317                                 }
1318                         }
1319
1320                         /*
1321                          * We may remove the Store, if the old value already contains
1322                          * the new value, and if it does not have an exception handler.
1323                          *
1324                          * TODO: What, if both have the same exception handler ???
1325                          */
1326                         if (get_Store_volatility(store) != volatility_is_volatile
1327                                 && !info->projs[pn_Store_X_except]) {
1328                                 ir_node *predvalue = get_Store_value(pred);
1329
1330                                 if(is_partially_same(value, predvalue)) {
1331                                         DBG_OPT_WAW(pred, store);
1332                                         exchange(info->projs[pn_Store_M], mem);
1333                                         kill_node(store);
1334                                         reduce_adr_usage(ptr);
1335                                         return DF_CHANGED;
1336                                 }
1337                         }
1338                 } else if (is_Load(pred) && get_Load_ptr(pred) == ptr &&
1339                            value == pred_info->projs[pn_Load_res]) {
1340                         /*
1341                          * a Store of a value just loaded from the same address
1342                          * -- a write after read.
1343                          * We may remove the Store, if it does not have an exception
1344                          * handler.
1345                          */
1346                         if (! info->projs[pn_Store_X_except]) {
1347                                 DBG_OPT_WAR(store, pred);
1348                                 exchange(info->projs[pn_Store_M], mem);
1349                                 kill_node(store);
1350                                 reduce_adr_usage(ptr);
1351                                 return DF_CHANGED;
1352                         }
1353                 }
1354
1355                 if (is_Store(pred)) {
1356                         /* check if we can pass through this store */
1357                         ir_alias_relation rel = get_alias_relation(
1358                                 current_ir_graph,
1359                                 get_Store_ptr(pred),
1360                                 get_irn_mode(get_Store_value(pred)),
1361                                 ptr, mode);
1362                         /* if the might be an alias, we cannot pass this Store */
1363                         if (rel != ir_no_alias)
1364                                 break;
1365                         pred = skip_Proj(get_Store_mem(pred));
1366                 } else if (is_Load(pred)) {
1367                         ir_alias_relation rel = get_alias_relation(
1368                                 current_ir_graph, get_Load_ptr(pred), get_Load_mode(pred),
1369                                 ptr, mode);
1370                         if (rel != ir_no_alias)
1371                                 break;
1372
1373                         pred = skip_Proj(get_Load_mem(pred));
1374                 } else {
1375                         /* follow only Load chains */
1376                         break;
1377                 }
1378
1379                 /* check for cycles */
1380                 if (NODE_VISITED(pred_info))
1381                         break;
1382                 MARK_NODE(pred_info);
1383         }
1384
1385         if (is_Sync(pred)) {
1386                 int i;
1387
1388                 /* handle all Sync predecessors */
1389                 for (i = get_Sync_n_preds(pred) - 1; i >= 0; --i) {
1390                         res |= follow_Mem_chain_for_Store(store, skip_Proj(get_Sync_pred(pred, i)));
1391                         if (res)
1392                                 break;
1393                 }
1394         }
1395         return res;
1396 }  /* follow_Mem_chain_for_Store */
1397
1398 /** find entity used as base for an address calculation */
1399 static ir_entity *find_entity(ir_node *ptr)
1400 {
1401         switch(get_irn_opcode(ptr)) {
1402         case iro_SymConst:
1403                 return get_SymConst_entity(ptr);
1404         case iro_Sel: {
1405                 ir_node *pred = get_Sel_ptr(ptr);
1406                 if (get_irg_frame(get_irn_irg(ptr)) == pred)
1407                         return get_Sel_entity(ptr);
1408
1409                 return find_entity(pred);
1410         }
1411         case iro_Sub:
1412         case iro_Add: {
1413                 ir_node *left = get_binop_left(ptr);
1414                 ir_node *right;
1415                 if (mode_is_reference(get_irn_mode(left)))
1416                         return find_entity(left);
1417                 right = get_binop_right(ptr);
1418                 if (mode_is_reference(get_irn_mode(right)))
1419                         return find_entity(right);
1420                 return NULL;
1421         }
1422         default:
1423                 return NULL;
1424         }
1425 }
1426
1427 /**
1428  * optimize a Store
1429  *
1430  * @param store  the Store node
1431  */
1432 static unsigned optimize_store(ir_node *store) {
1433         ir_node   *ptr;
1434         ir_node   *mem;
1435         ir_entity *entity;
1436
1437         if (get_Store_volatility(store) == volatility_is_volatile)
1438                 return 0;
1439
1440         ptr    = get_Store_ptr(store);
1441         entity = find_entity(ptr);
1442
1443         /* a store to an entity which is never read is unnecessary */
1444         if (entity != NULL && !(get_entity_usage(entity) & ir_usage_read)) {
1445                 ldst_info_t *info = get_irn_link(store);
1446                 if (info->projs[pn_Store_X_except] == NULL) {
1447                         exchange(info->projs[pn_Store_M], get_Store_mem(store));
1448                         kill_node(store);
1449                         reduce_adr_usage(ptr);
1450                         return DF_CHANGED;
1451                 }
1452         }
1453
1454         /* Check, if the address of this Store is used more than once.
1455          * If not, this Store cannot be removed in any case. */
1456         if (get_irn_n_uses(ptr) <= 1)
1457                 return 0;
1458
1459         mem = get_Store_mem(store);
1460
1461         /* follow the memory chain as long as there are only Loads */
1462         INC_MASTER();
1463
1464         return follow_Mem_chain_for_Store(store, skip_Proj(mem));
1465 }  /* optimize_store */
1466
1467 /**
1468  * walker, optimizes Phi after Stores to identical places:
1469  * Does the following optimization:
1470  * @verbatim
1471  *
1472  *   val1   val2   val3          val1  val2  val3
1473  *    |      |      |               \    |    /
1474  *  Store  Store  Store              \   |   /
1475  *      \    |    /                   PhiData
1476  *       \   |   /                       |
1477  *        \  |  /                      Store
1478  *          PhiM
1479  *
1480  * @endverbatim
1481  * This reduces the number of stores and allows for predicated execution.
1482  * Moves Stores back to the end of a function which may be bad.
1483  *
1484  * This is only possible if the predecessor blocks have only one successor.
1485  */
1486 static unsigned optimize_phi(ir_node *phi, walk_env_t *wenv)
1487 {
1488         int i, n;
1489         ir_node *store, *old_store, *ptr, *block, *phi_block, *phiM, *phiD, *exc, *projM;
1490         ir_mode *mode;
1491         ir_node **inM, **inD, **projMs;
1492         int *idx;
1493         dbg_info *db = NULL;
1494         ldst_info_t *info;
1495         block_info_t *bl_info;
1496         unsigned res = 0;
1497
1498         /* Must be a memory Phi */
1499         if (get_irn_mode(phi) != mode_M)
1500                 return 0;
1501
1502         n = get_Phi_n_preds(phi);
1503         if (n <= 0)
1504                 return 0;
1505
1506         /* must be only one user */
1507         projM = get_Phi_pred(phi, 0);
1508         if (get_irn_n_edges(projM) != 1)
1509                 return 0;
1510
1511         store = skip_Proj(projM);
1512         old_store = store;
1513         if (!is_Store(store))
1514                 return 0;
1515
1516         block = get_nodes_block(store);
1517
1518         /* abort on dead blocks */
1519         if (is_Block_dead(block))
1520                 return 0;
1521
1522         /* check if the block is post dominated by Phi-block
1523            and has no exception exit */
1524         bl_info = get_irn_link(block);
1525         if (bl_info->flags & BLOCK_HAS_EXC)
1526                 return 0;
1527
1528         phi_block = get_nodes_block(phi);
1529         if (! block_strictly_postdominates(phi_block, block))
1530                 return 0;
1531
1532         /* this is the address of the store */
1533         ptr  = get_Store_ptr(store);
1534         mode = get_irn_mode(get_Store_value(store));
1535         info = get_irn_link(store);
1536         exc  = info->exc_block;
1537
1538         for (i = 1; i < n; ++i) {
1539                 ir_node *pred = get_Phi_pred(phi, i);
1540
1541                 if (get_irn_n_edges(pred) != 1)
1542                         return 0;
1543
1544                 pred = skip_Proj(pred);
1545                 if (!is_Store(pred))
1546                         return 0;
1547
1548                 if (ptr != get_Store_ptr(pred) || mode != get_irn_mode(get_Store_value(pred)))
1549                         return 0;
1550
1551                 info = get_irn_link(pred);
1552
1553                 /* check, if all stores have the same exception flow */
1554                 if (exc != info->exc_block)
1555                         return 0;
1556
1557                 /* abort on dead blocks */
1558                 block = get_nodes_block(pred);
1559                 if (is_Block_dead(block))
1560                         return 0;
1561
1562                 /* check if the block is post dominated by Phi-block
1563                    and has no exception exit. Note that block must be different from
1564                    Phi-block, else we would move a Store from end End of a block to its
1565                    Start... */
1566                 bl_info = get_irn_link(block);
1567                 if (bl_info->flags & BLOCK_HAS_EXC)
1568                         return 0;
1569                 if (block == phi_block || ! block_postdominates(phi_block, block))
1570                         return 0;
1571         }
1572
1573         /*
1574          * ok, when we are here, we found all predecessors of a Phi that
1575          * are Stores to the same address and size. That means whatever
1576          * we do before we enter the block of the Phi, we do a Store.
1577          * So, we can move the Store to the current block:
1578          *
1579          *   val1    val2    val3          val1  val2  val3
1580          *    |       |       |               \    |    /
1581          * | Str | | Str | | Str |             \   |   /
1582          *      \     |     /                   PhiData
1583          *       \    |    /                       |
1584          *        \   |   /                       Str
1585          *           PhiM
1586          *
1587          * Is only allowed if the predecessor blocks have only one successor.
1588          */
1589
1590         NEW_ARR_A(ir_node *, projMs, n);
1591         NEW_ARR_A(ir_node *, inM, n);
1592         NEW_ARR_A(ir_node *, inD, n);
1593         NEW_ARR_A(int, idx, n);
1594
1595         /* Prepare: Collect all Store nodes.  We must do this
1596            first because we otherwise may loose a store when exchanging its
1597            memory Proj.
1598          */
1599         for (i = n - 1; i >= 0; --i) {
1600                 ir_node *store;
1601
1602                 projMs[i] = get_Phi_pred(phi, i);
1603                 assert(is_Proj(projMs[i]));
1604
1605                 store = get_Proj_pred(projMs[i]);
1606                 info  = get_irn_link(store);
1607
1608                 inM[i] = get_Store_mem(store);
1609                 inD[i] = get_Store_value(store);
1610                 idx[i] = info->exc_idx;
1611         }
1612         block = get_nodes_block(phi);
1613
1614         /* second step: create a new memory Phi */
1615         phiM = new_rd_Phi(get_irn_dbg_info(phi), current_ir_graph, block, n, inM, mode_M);
1616
1617         /* third step: create a new data Phi */
1618         phiD = new_rd_Phi(get_irn_dbg_info(phi), current_ir_graph, block, n, inD, mode);
1619
1620         /* rewire memory and kill the node */
1621         for (i = n - 1; i >= 0; --i) {
1622                 ir_node *proj  = projMs[i];
1623
1624                 if(is_Proj(proj)) {
1625                         ir_node *store = get_Proj_pred(proj);
1626                         exchange(proj, inM[i]);
1627                         kill_node(store);
1628                 }
1629         }
1630
1631         /* fourth step: create the Store */
1632         store = new_rd_Store(db, current_ir_graph, block, phiM, ptr, phiD);
1633 #ifdef DO_CACHEOPT
1634         co_set_irn_name(store, co_get_irn_ident(old_store));
1635 #endif
1636
1637         projM = new_rd_Proj(NULL, current_ir_graph, block, store, mode_M, pn_Store_M);
1638
1639         info = get_ldst_info(store, &wenv->obst);
1640         info->projs[pn_Store_M] = projM;
1641
1642         /* fifths step: repair exception flow */
1643         if (exc) {
1644                 ir_node *projX = new_rd_Proj(NULL, current_ir_graph, block, store, mode_X, pn_Store_X_except);
1645
1646                 info->projs[pn_Store_X_except] = projX;
1647                 info->exc_block                = exc;
1648                 info->exc_idx                  = idx[0];
1649
1650                 for (i = 0; i < n; ++i) {
1651                         set_Block_cfgpred(exc, idx[i], projX);
1652                 }
1653
1654                 if (n > 1) {
1655                         /* the exception block should be optimized as some inputs are identical now */
1656                 }
1657
1658                 res |= CF_CHANGED;
1659         }
1660
1661         /* sixth step: replace old Phi */
1662         exchange(phi, projM);
1663
1664         return res | DF_CHANGED;
1665 }  /* optimize_phi */
1666
1667 /**
1668  * walker, do the optimizations
1669  */
1670 static void do_load_store_optimize(ir_node *n, void *env) {
1671         walk_env_t *wenv = env;
1672
1673         switch (get_irn_opcode(n)) {
1674
1675         case iro_Load:
1676                 wenv->changes |= optimize_load(n);
1677                 break;
1678
1679         case iro_Store:
1680                 wenv->changes |= optimize_store(n);
1681                 break;
1682
1683         case iro_Phi:
1684                 wenv->changes |= optimize_phi(n, wenv);
1685                 break;
1686
1687         default:
1688                 ;
1689         }
1690 }  /* do_load_store_optimize */
1691
1692 /** A scc. */
1693 typedef struct scc {
1694         ir_node *head;          /**< the head of the list */
1695 } scc;
1696
1697 /** A node entry. */
1698 typedef struct node_entry {
1699         unsigned DFSnum;    /**< the DFS number of this node */
1700         unsigned low;       /**< the low number of this node */
1701         ir_node  *header;   /**< the header of this node */
1702         int      in_stack;  /**< flag, set if the node is on the stack */
1703         ir_node  *next;     /**< link to the next node the the same scc */
1704         scc      *pscc;     /**< the scc of this node */
1705         unsigned POnum;     /**< the post order number for blocks */
1706 } node_entry;
1707
1708 /** A loop entry. */
1709 typedef struct loop_env {
1710         ir_phase ph;           /**< the phase object */
1711         ir_node  **stack;      /**< the node stack */
1712         int      tos;          /**< tos index */
1713         unsigned nextDFSnum;   /**< the current DFS number */
1714         unsigned POnum;        /**< current post order number */
1715
1716         unsigned changes;      /**< a bitmask of graph changes */
1717 } loop_env;
1718
1719 /**
1720 * Gets the node_entry of a node
1721 */
1722 static node_entry *get_irn_ne(ir_node *irn, loop_env *env) {
1723         ir_phase   *ph = &env->ph;
1724         node_entry *e  = phase_get_irn_data(&env->ph, irn);
1725
1726         if (! e) {
1727                 e = phase_alloc(ph, sizeof(*e));
1728                 memset(e, 0, sizeof(*e));
1729                 phase_set_irn_data(ph, irn, e);
1730         }
1731         return e;
1732 }  /* get_irn_ne */
1733
1734 /**
1735  * Push a node onto the stack.
1736  *
1737  * @param env   the loop environment
1738  * @param n     the node to push
1739  */
1740 static void push(loop_env *env, ir_node *n) {
1741         node_entry *e;
1742
1743         if (env->tos == ARR_LEN(env->stack)) {
1744                 int nlen = ARR_LEN(env->stack) * 2;
1745                 ARR_RESIZE(ir_node *, env->stack, nlen);
1746         }
1747         env->stack[env->tos++] = n;
1748         e = get_irn_ne(n, env);
1749         e->in_stack = 1;
1750 }  /* push */
1751
1752 /**
1753  * pop a node from the stack
1754  *
1755  * @param env   the loop environment
1756  *
1757  * @return  The topmost node
1758  */
1759 static ir_node *pop(loop_env *env) {
1760         ir_node *n = env->stack[--env->tos];
1761         node_entry *e = get_irn_ne(n, env);
1762
1763         e->in_stack = 0;
1764         return n;
1765 }  /* pop */
1766
1767 /**
1768  * Check if irn is a region constant.
1769  * The block or irn must strictly dominate the header block.
1770  *
1771  * @param irn           the node to check
1772  * @param header_block  the header block of the induction variable
1773  */
1774 static int is_rc(ir_node *irn, ir_node *header_block) {
1775         ir_node *block = get_nodes_block(irn);
1776
1777         return (block != header_block) && block_dominates(block, header_block);
1778 }  /* is_rc */
1779
1780 typedef struct phi_entry phi_entry;
1781 struct phi_entry {
1782         ir_node   *phi;    /**< A phi with a region const memory. */
1783         int       pos;     /**< The position of the region const memory */
1784         ir_node   *load;   /**< the newly created load for this phi */
1785         phi_entry *next;
1786 };
1787
1788 /**
1789  * Move loops out of loops if possible.
1790  *
1791  * @param pscc   the loop described by an SCC
1792  * @param env    the loop environment
1793  */
1794 static void move_loads_out_of_loops(scc *pscc, loop_env *env) {
1795         ir_node   *phi, *load, *next, *other, *next_other;
1796         ir_entity *ent;
1797         int       j;
1798         phi_entry *phi_list = NULL;
1799
1800         /* collect all outer memories */
1801         for (phi = pscc->head; phi != NULL; phi = next) {
1802                 node_entry *ne = get_irn_ne(phi, env);
1803                 next = ne->next;
1804
1805                 /* check all memory Phi's */
1806                 if (! is_Phi(phi))
1807                         continue;
1808
1809                 assert(get_irn_mode(phi) == mode_M && "DFS geturn non-memory Phi");
1810
1811                 for (j = get_irn_arity(phi) - 1; j >= 0; --j) {
1812                         ir_node    *pred = get_irn_n(phi, j);
1813                         node_entry *pe   = get_irn_ne(pred, env);
1814
1815                         if (pe->pscc != ne->pscc) {
1816                                 /* not in the same SCC, is region const */
1817                                 phi_entry *pe = phase_alloc(&env->ph, sizeof(*pe));
1818
1819                                 pe->phi  = phi;
1820                                 pe->pos  = j;
1821                                 pe->next = phi_list;
1822                                 phi_list = pe;
1823                         }
1824                 }
1825         }
1826         /* no Phis no fun */
1827         assert(phi_list != NULL && "DFS found a loop without Phi");
1828
1829         for (load = pscc->head; load; load = next) {
1830                 ir_mode *load_mode;
1831                 node_entry *ne = get_irn_ne(load, env);
1832                 next = ne->next;
1833
1834                 if (is_Load(load)) {
1835                         ldst_info_t *info = get_irn_link(load);
1836                         ir_node     *ptr = get_Load_ptr(load);
1837
1838                         /* for now, we cannot handle Loads with exceptions */
1839                         if (info->projs[pn_Load_res] == NULL || info->projs[pn_Load_X_regular] != NULL || info->projs[pn_Load_X_except] != NULL)
1840                                 continue;
1841
1842                         /* for now, we can only handle Load(Global) */
1843                         if (! is_Global(ptr))
1844                                 continue;
1845                         ent = get_Global_entity(ptr);
1846                         load_mode = get_Load_mode(load);
1847                         for (other = pscc->head; other != NULL; other = next_other) {
1848                                 node_entry *ne = get_irn_ne(other, env);
1849                                 next_other = ne->next;
1850
1851                                 if (is_Store(other)) {
1852                                         ir_alias_relation rel = get_alias_relation(
1853                                                 current_ir_graph,
1854                                                 get_Store_ptr(other),
1855                                                 get_irn_mode(get_Store_value(other)),
1856                                                 ptr, load_mode);
1857                                         /* if the might be an alias, we cannot pass this Store */
1858                                         if (rel != ir_no_alias)
1859                                                 break;
1860                                 }
1861                                 /* only pure Calls are allowed here, so ignore them */
1862                         }
1863                         if (other == NULL) {
1864                                 ldst_info_t *ninfo;
1865                                 phi_entry   *pe;
1866                                 dbg_info    *db;
1867
1868                                 /* for now, we cannot handle more than one input */
1869                                 if (phi_list->next != NULL)
1870                                         return;
1871
1872                                 /* yep, no aliasing Store found, Load can be moved */
1873                                 DB((dbg, LEVEL_1, "  Found a Load that could be moved: %+F\n", load));
1874
1875                                 db   = get_irn_dbg_info(load);
1876                                 for (pe = phi_list; pe != NULL; pe = pe->next) {
1877                                         int     pos   = pe->pos;
1878                                         ir_node *phi  = pe->phi;
1879                                         ir_node *blk  = get_nodes_block(phi);
1880                                         ir_node *pred = get_Block_cfgpred_block(blk, pos);
1881                                         ir_node *irn, *mem;
1882
1883                                         pe->load = irn = new_rd_Load(db, current_ir_graph, pred, get_Phi_pred(phi, pos), ptr, load_mode);
1884                                         ninfo = get_ldst_info(irn, phase_obst(&env->ph));
1885
1886                                         ninfo->projs[pn_Load_M] = mem = new_r_Proj(current_ir_graph, pred, irn, mode_M, pn_Load_M);
1887                                         set_Phi_pred(phi, pos, mem);
1888
1889                                         ninfo->projs[pn_Load_res] = new_r_Proj(current_ir_graph, pred, irn, load_mode, pn_Load_res);
1890
1891                                         DB((dbg, LEVEL_1, "  Created %+F in %+F\n", irn, pred));
1892                                 }
1893
1894                                 /* now kill the old Load */
1895                                 exchange(info->projs[pn_Load_M], get_Load_mem(load));
1896                                 exchange(info->projs[pn_Load_res], ninfo->projs[pn_Load_res]);
1897
1898                                 env->changes |= DF_CHANGED;
1899                         }
1900                 }
1901         }
1902 }  /* move_loads_out_of_loops */
1903
1904 /**
1905  * Process a loop SCC.
1906  *
1907  * @param pscc  the SCC
1908  * @param env   the loop environment
1909  */
1910 static void process_loop(scc *pscc, loop_env *env) {
1911         ir_node *irn, *next, *header = NULL;
1912         node_entry *b, *h = NULL;
1913         int j, only_phi, num_outside, process = 0;
1914         ir_node *out_rc;
1915
1916         /* find the header block for this scc */
1917         for (irn = pscc->head; irn; irn = next) {
1918                 node_entry *e = get_irn_ne(irn, env);
1919                 ir_node *block = get_nodes_block(irn);
1920
1921                 next = e->next;
1922                 b = get_irn_ne(block, env);
1923
1924                 if (header) {
1925                         if (h->POnum < b->POnum) {
1926                                 header = block;
1927                                 h      = b;
1928                         }
1929                 }
1930                 else {
1931                         header = block;
1932                         h      = b;
1933                 }
1934         }
1935
1936         /* check if this scc contains only Phi, Loads or Stores nodes */
1937         only_phi    = 1;
1938         num_outside = 0;
1939         out_rc      = NULL;
1940         for (irn = pscc->head; irn; irn = next) {
1941                 node_entry *e = get_irn_ne(irn, env);
1942
1943                 next = e->next;
1944                 switch (get_irn_opcode(irn)) {
1945                 case iro_Call:
1946                         if (is_Call_pure(irn)) {
1947                                 /* pure calls can be treated like loads */
1948                                 only_phi = 0;
1949                                 break;
1950                         }
1951                         /* non-pure calls must be handle like may-alias Stores */
1952                         goto fail;
1953                 case iro_CopyB:
1954                         /* cannot handle CopyB yet */
1955                         goto fail;
1956                 case iro_Load:
1957                         process = 1;
1958                         if (get_Load_volatility(irn) == volatility_is_volatile) {
1959                                 /* cannot handle loops with volatile Loads */
1960                                 goto fail;
1961                         }
1962                         only_phi = 0;
1963                         break;
1964                 case iro_Store:
1965                         if (get_Store_volatility(irn) == volatility_is_volatile) {
1966                                 /* cannot handle loops with volatile Stores */
1967                                 goto fail;
1968                         }
1969                         only_phi = 0;
1970                         break;
1971                 default:
1972                         only_phi = 0;
1973                         break;
1974                 case iro_Phi:
1975                         for (j = get_irn_arity(irn) - 1; j >= 0; --j) {
1976                                 ir_node *pred  = get_irn_n(irn, j);
1977                                 node_entry *pe = get_irn_ne(pred, env);
1978
1979                                 if (pe->pscc != e->pscc) {
1980                                         /* not in the same SCC, must be a region const */
1981                                         if (! is_rc(pred, header)) {
1982                                                 /* not a memory loop */
1983                                                 goto fail;
1984                                         }
1985                                         if (! out_rc) {
1986                                                 out_rc = pred;
1987                                                 ++num_outside;
1988                                         } else if (out_rc != pred) {
1989                                                 ++num_outside;
1990                                         }
1991                                 }
1992                         }
1993                         break;
1994                 }
1995         }
1996         if (! process)
1997                 goto fail;
1998
1999         /* found a memory loop */
2000         DB((dbg, LEVEL_2, "  Found a memory loop:\n  "));
2001         if (only_phi && num_outside == 1) {
2002                 /* a phi cycle with only one real predecessor can be collapsed */
2003                 DB((dbg, LEVEL_2, "  Found an USELESS Phi cycle:\n  "));
2004
2005                 for (irn = pscc->head; irn; irn = next) {
2006                         node_entry *e = get_irn_ne(irn, env);
2007                         next = e->next;
2008                         e->header = NULL;
2009                         exchange(irn, out_rc);
2010                 }
2011                 env->changes |= DF_CHANGED;
2012                 return;
2013         }
2014
2015         /* set the header for every node in this scc */
2016         for (irn = pscc->head; irn; irn = next) {
2017                 node_entry *e = get_irn_ne(irn, env);
2018                 e->header = header;
2019                 next = e->next;
2020                 DB((dbg, LEVEL_2, " %+F,", irn));
2021         }
2022         DB((dbg, LEVEL_2, "\n"));
2023
2024         move_loads_out_of_loops(pscc, env);
2025
2026 fail:
2027         ;
2028 }  /* process_loop */
2029
2030 /**
2031  * Process a SCC.
2032  *
2033  * @param pscc  the SCC
2034  * @param env   the loop environment
2035  */
2036 static void process_scc(scc *pscc, loop_env *env) {
2037         ir_node *head = pscc->head;
2038         node_entry *e = get_irn_ne(head, env);
2039
2040 #ifdef DEBUG_libfirm
2041         {
2042                 ir_node *irn, *next;
2043
2044                 DB((dbg, LEVEL_4, " SCC at %p:\n ", pscc));
2045                 for (irn = pscc->head; irn; irn = next) {
2046                         node_entry *e = get_irn_ne(irn, env);
2047
2048                         next = e->next;
2049
2050                         DB((dbg, LEVEL_4, " %+F,", irn));
2051                 }
2052                 DB((dbg, LEVEL_4, "\n"));
2053         }
2054 #endif
2055
2056         if (e->next != NULL) {
2057                 /* this SCC has more than one member */
2058                 process_loop(pscc, env);
2059         }
2060 }  /* process_scc */
2061
2062 /**
2063  * Do Tarjan's SCC algorithm and drive load/store optimization.
2064  *
2065  * @param irn  start at this node
2066  * @param env  the loop environment
2067  */
2068 static void dfs(ir_node *irn, loop_env *env)
2069 {
2070         int i, n;
2071         node_entry *node = get_irn_ne(irn, env);
2072
2073         mark_irn_visited(irn);
2074
2075         node->DFSnum = env->nextDFSnum++;
2076         node->low    = node->DFSnum;
2077         push(env, irn);
2078
2079         /* handle preds */
2080         if (is_Phi(irn) || is_Sync(irn)) {
2081                 n = get_irn_arity(irn);
2082                 for (i = 0; i < n; ++i) {
2083                         ir_node *pred = get_irn_n(irn, i);
2084                         node_entry *o = get_irn_ne(pred, env);
2085
2086                         if (!irn_visited(pred)) {
2087                                 dfs(pred, env);
2088                                 node->low = MIN(node->low, o->low);
2089                         }
2090                         if (o->DFSnum < node->DFSnum && o->in_stack)
2091                                 node->low = MIN(o->DFSnum, node->low);
2092                 }
2093         } else if (is_fragile_op(irn)) {
2094                 ir_node *pred = get_fragile_op_mem(irn);
2095                 node_entry *o = get_irn_ne(pred, env);
2096
2097                 if (!irn_visited(pred)) {
2098                         dfs(pred, env);
2099                         node->low = MIN(node->low, o->low);
2100                 }
2101                 if (o->DFSnum < node->DFSnum && o->in_stack)
2102                         node->low = MIN(o->DFSnum, node->low);
2103         } else if (is_Proj(irn)) {
2104                 ir_node *pred = get_Proj_pred(irn);
2105                 node_entry *o = get_irn_ne(pred, env);
2106
2107                 if (!irn_visited(pred)) {
2108                         dfs(pred, env);
2109                         node->low = MIN(node->low, o->low);
2110                 }
2111                 if (o->DFSnum < node->DFSnum && o->in_stack)
2112                         node->low = MIN(o->DFSnum, node->low);
2113         }
2114         else {
2115                  /* IGNORE predecessors */
2116         }
2117
2118         if (node->low == node->DFSnum) {
2119                 scc *pscc = phase_alloc(&env->ph, sizeof(*pscc));
2120                 ir_node *x;
2121
2122                 pscc->head = NULL;
2123                 do {
2124                         node_entry *e;
2125
2126                         x = pop(env);
2127                         e = get_irn_ne(x, env);
2128                         e->pscc    = pscc;
2129                         e->next    = pscc->head;
2130                         pscc->head = x;
2131                 } while (x != irn);
2132
2133                 process_scc(pscc, env);
2134         }
2135 }  /* dfs */
2136
2137 /**
2138  * Do the DFS on the memory edges a graph.
2139  *
2140  * @param irg  the graph to process
2141  * @param env  the loop environment
2142  */
2143 static void do_dfs(ir_graph *irg, loop_env *env) {
2144         ir_graph *rem = current_ir_graph;
2145         ir_node  *endblk, *end;
2146         int      i;
2147
2148         current_ir_graph = irg;
2149         inc_irg_visited(irg);
2150
2151         /* visit all memory nodes */
2152         endblk = get_irg_end_block(irg);
2153         for (i = get_Block_n_cfgpreds(endblk) - 1; i >= 0; --i) {
2154                 ir_node *pred = get_Block_cfgpred(endblk, i);
2155
2156                 pred = skip_Proj(pred);
2157                 if (is_Return(pred))
2158                         dfs(get_Return_mem(pred), env);
2159                 else if (is_Raise(pred))
2160                         dfs(get_Raise_mem(pred), env);
2161                 else if (is_fragile_op(pred))
2162                         dfs(get_fragile_op_mem(pred), env);
2163                 else {
2164                         assert(0 && "Unknown EndBlock predecessor");
2165                 }
2166         }
2167
2168         /* visit the keep-alives */
2169         end = get_irg_end(irg);
2170         for (i = get_End_n_keepalives(end) - 1; i >= 0; --i) {
2171                 ir_node *ka = get_End_keepalive(end, i);
2172
2173                 if (is_Phi(ka) && !irn_visited(ka))
2174                         dfs(ka, env);
2175         }
2176         current_ir_graph = rem;
2177 }  /* do_dfs */
2178
2179 /**
2180  * Initialize new phase data. We do this always explicit, so return NULL here
2181  */
2182 static void *init_loop_data(ir_phase *ph, const ir_node *irn, void *data) {
2183         (void)ph;
2184         (void)irn;
2185         (void)data;
2186         return NULL;
2187 }  /* init_loop_data */
2188
2189 /**
2190  * Optimize Loads/Stores in loops.
2191  *
2192  * @param irg  the graph
2193  */
2194 static int optimize_loops(ir_graph *irg) {
2195         loop_env env;
2196
2197         env.stack         = NEW_ARR_F(ir_node *, 128);
2198         env.tos           = 0;
2199         env.nextDFSnum    = 0;
2200         env.POnum         = 0;
2201         env.changes       = 0;
2202         phase_init(&env.ph, "ldstopt", irg, PHASE_DEFAULT_GROWTH, init_loop_data, NULL);
2203
2204         /* calculate the SCC's and drive loop optimization. */
2205         do_dfs(irg, &env);
2206
2207         DEL_ARR_F(env.stack);
2208         phase_free(&env.ph);
2209
2210         return env.changes;
2211 }  /* optimize_loops */
2212
2213 /*
2214  * do the load store optimization
2215  */
2216 int optimize_load_store(ir_graph *irg) {
2217         walk_env_t env;
2218
2219         FIRM_DBG_REGISTER(dbg, "firm.opt.ldstopt");
2220
2221         assert(get_irg_phase_state(irg) != phase_building);
2222         assert(get_irg_pinned(irg) != op_pin_state_floats &&
2223                 "LoadStore optimization needs pinned graph");
2224
2225         /* we need landing pads */
2226         remove_critical_cf_edges(irg);
2227
2228         edges_assure(irg);
2229
2230         /* for Phi optimization post-dominators are needed ... */
2231         assure_postdoms(irg);
2232
2233         if (get_opt_alias_analysis()) {
2234                 assure_irg_entity_usage_computed(irg);
2235                 assure_irp_globals_entity_usage_computed();
2236         }
2237
2238         obstack_init(&env.obst);
2239         env.changes = 0;
2240
2241         /* init the links, then collect Loads/Stores/Proj's in lists */
2242         master_visited = 0;
2243         irg_walk_graph(irg, firm_clear_link, collect_nodes, &env);
2244
2245         /* now we have collected enough information, optimize */
2246         irg_walk_graph(irg, NULL, do_load_store_optimize, &env);
2247
2248         env.changes |= optimize_loops(irg);
2249
2250         obstack_free(&env.obst, NULL);
2251
2252         /* Handle graph state */
2253         if (env.changes) {
2254                 set_irg_outs_inconsistent(irg);
2255                 set_irg_entity_usage_state(irg, ir_entity_usage_not_computed);
2256         }
2257
2258         if (env.changes & CF_CHANGED) {
2259                 /* is this really needed: Yes, control flow changed, block might
2260                 have Bad() predecessors. */
2261                 set_irg_doms_inconsistent(irg);
2262         }
2263         return (int) env.changes;
2264 }  /* optimize_load_store */