reenable floatin of const functions (don't care about obscure endless loop cases...
[libfirm] / ir / opt / ldstopt.c
1 /*
2  * Copyright (C) 1995-2008 University of Karlsruhe.  All right reserved.
3  *
4  * This file is part of libFirm.
5  *
6  * This file may be distributed and/or modified under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation and appearing in the file LICENSE.GPL included in the
9  * packaging of this file.
10  *
11  * Licensees holding valid libFirm Professional Edition licenses may use
12  * this file in accordance with the libFirm Commercial License.
13  * Agreement provided with the Software.
14  *
15  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17  * PURPOSE.
18  */
19
20 /**
21  * @file
22  * @brief   Load/Store optimizations.
23  * @author  Michael Beck
24  * @version $Id$
25  */
26 #ifdef HAVE_CONFIG_H
27 # include "config.h"
28 #endif
29
30 #include <string.h>
31
32 #include "iroptimize.h"
33 #include "irnode_t.h"
34 #include "irgraph_t.h"
35 #include "irmode_t.h"
36 #include "iropt_t.h"
37 #include "ircons_t.h"
38 #include "irgmod.h"
39 #include "irgwalk.h"
40 #include "irvrfy.h"
41 #include "tv_t.h"
42 #include "dbginfo_t.h"
43 #include "iropt_dbg.h"
44 #include "irflag_t.h"
45 #include "array.h"
46 #include "irhooks.h"
47 #include "iredges.h"
48 #include "irtools.h"
49 #include "opt_polymorphy.h"
50 #include "irmemory.h"
51 #include "xmalloc.h"
52 #include "irphase_t.h"
53 #include "irgopt.h"
54 #include "debug.h"
55
56 /** The debug handle. */
57 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
58
59 #ifdef DO_CACHEOPT
60 #include "cacheopt/cachesim.h"
61 #endif
62
63 #undef IMAX
64 #define IMAX(a,b)       ((a) > (b) ? (a) : (b))
65
66 #define MAX_PROJ        IMAX(IMAX(pn_Load_max, pn_Store_max), pn_Call_max)
67
68 enum changes_t {
69         DF_CHANGED = 1,       /**< data flow changed */
70         CF_CHANGED = 2,       /**< control flow changed */
71 };
72
73 /**
74  * walker environment
75  */
76 typedef struct _walk_env_t {
77         struct obstack obst;          /**< list of all stores */
78         unsigned changes;             /**< a bitmask of graph changes */
79 } walk_env_t;
80
81 /** A Load/Store info. */
82 typedef struct _ldst_info_t {
83         ir_node  *projs[MAX_PROJ];    /**< list of Proj's of this node */
84         ir_node  *exc_block;          /**< the exception block if available */
85         int      exc_idx;             /**< predecessor index in the exception block */
86         unsigned visited;             /**< visited counter for breaking loops */
87 } ldst_info_t;
88
89 /**
90  * flags for control flow.
91  */
92 enum block_flags_t {
93         BLOCK_HAS_COND = 1,      /**< Block has conditional control flow */
94         BLOCK_HAS_EXC  = 2       /**< Block has exceptional control flow */
95 };
96
97 /**
98  * a Block info.
99  */
100 typedef struct _block_info_t {
101         unsigned flags;               /**< flags for the block */
102 } block_info_t;
103
104 /** the master visited flag for loop detection. */
105 static unsigned master_visited = 0;
106
107 #define INC_MASTER()       ++master_visited
108 #define MARK_NODE(info)    (info)->visited = master_visited
109 #define NODE_VISITED(info) (info)->visited >= master_visited
110
111 /**
112  * get the Load/Store info of a node
113  */
114 static ldst_info_t *get_ldst_info(ir_node *node, struct obstack *obst) {
115         ldst_info_t *info = get_irn_link(node);
116
117         if (! info) {
118                 info = obstack_alloc(obst, sizeof(*info));
119                 memset(info, 0, sizeof(*info));
120                 set_irn_link(node, info);
121         }
122         return info;
123 }  /* get_ldst_info */
124
125 /**
126  * get the Block info of a node
127  */
128 static block_info_t *get_block_info(ir_node *node, struct obstack *obst) {
129         block_info_t *info = get_irn_link(node);
130
131         if (! info) {
132                 info = obstack_alloc(obst, sizeof(*info));
133                 memset(info, 0, sizeof(*info));
134                 set_irn_link(node, info);
135         }
136         return info;
137 }  /* get_block_info */
138
139 /**
140  * update the projection info for a Load/Store
141  */
142 static unsigned update_projs(ldst_info_t *info, ir_node *proj)
143 {
144         long nr = get_Proj_proj(proj);
145
146         assert(0 <= nr && nr <= MAX_PROJ && "Wrong proj from LoadStore");
147
148         if (info->projs[nr]) {
149                 /* there is already one, do CSE */
150                 exchange(proj, info->projs[nr]);
151                 return DF_CHANGED;
152         }
153         else {
154                 info->projs[nr] = proj;
155                 return 0;
156         }
157 }  /* update_projs */
158
159 /**
160  * update the exception block info for a Load/Store node.
161  *
162  * @param info   the load/store info struct
163  * @param block  the exception handler block for this load/store
164  * @param pos    the control flow input of the block
165  */
166 static unsigned update_exc(ldst_info_t *info, ir_node *block, int pos)
167 {
168         assert(info->exc_block == NULL && "more than one exception block found");
169
170         info->exc_block = block;
171         info->exc_idx   = pos;
172         return 0;
173 }  /* update_exc */
174
175 /** Return the number of uses of an address node */
176 #define get_irn_n_uses(adr)     get_irn_n_edges(adr)
177
178 /**
179  * walker, collects all Load/Store/Proj nodes
180  *
181  * walks from Start -> End
182  */
183 static void collect_nodes(ir_node *node, void *env)
184 {
185         ir_opcode   opcode = get_irn_opcode(node);
186         ir_node     *pred, *blk, *pred_blk;
187         ldst_info_t *ldst_info;
188         walk_env_t  *wenv = env;
189
190         if (opcode == iro_Proj) {
191                 pred   = get_Proj_pred(node);
192                 opcode = get_irn_opcode(pred);
193
194                 if (opcode == iro_Load || opcode == iro_Store || opcode == iro_Call) {
195                         ldst_info = get_ldst_info(pred, &wenv->obst);
196
197                         wenv->changes |= update_projs(ldst_info, node);
198
199                         /*
200                          * Place the Proj's to the same block as the
201                          * predecessor Load. This is always ok and prevents
202                          * "non-SSA" form after optimizations if the Proj
203                          * is in a wrong block.
204                          */
205                         blk      = get_nodes_block(node);
206                         pred_blk = get_nodes_block(pred);
207                         if (blk != pred_blk) {
208                                 wenv->changes |= DF_CHANGED;
209                                 set_nodes_block(node, pred_blk);
210                         }
211                 }
212         } else if (opcode == iro_Block) {
213                 int i;
214
215                 for (i = get_Block_n_cfgpreds(node) - 1; i >= 0; --i) {
216                         ir_node      *pred_block, *proj;
217                         block_info_t *bl_info;
218                         int          is_exc = 0;
219
220                         pred = proj = get_Block_cfgpred(node, i);
221
222                         if (is_Proj(proj)) {
223                                 pred   = get_Proj_pred(proj);
224                                 is_exc = get_Proj_proj(proj) == pn_Generic_X_except;
225                         }
226
227                         /* ignore Bad predecessors, they will be removed later */
228                         if (is_Bad(pred))
229                                 continue;
230
231                         pred_block = get_nodes_block(pred);
232                         bl_info    = get_block_info(pred_block, &wenv->obst);
233
234                         if (is_fragile_op(pred) && is_exc)
235                                 bl_info->flags |= BLOCK_HAS_EXC;
236                         else if (is_irn_forking(pred))
237                                 bl_info->flags |= BLOCK_HAS_COND;
238
239                         opcode = get_irn_opcode(pred);
240                         if (is_exc && (opcode == iro_Load || opcode == iro_Store || opcode == iro_Call)) {
241                                 ldst_info = get_ldst_info(pred, &wenv->obst);
242
243                                 wenv->changes |= update_exc(ldst_info, node, i);
244                         }
245                 }
246         }
247 }  /* collect_nodes */
248
249 /**
250  * Returns an entity if the address ptr points to a constant one.
251  *
252  * @param ptr  the address
253  *
254  * @return an entity or NULL
255  */
256 static ir_entity *find_constant_entity(ir_node *ptr)
257 {
258         for (;;) {
259                 if (is_SymConst(ptr) && get_SymConst_kind(ptr) == symconst_addr_ent) {
260                         ir_entity *ent = get_SymConst_entity(ptr);
261                         if (variability_constant == get_entity_variability(ent))
262                                 return ent;
263                         return NULL;
264                 } else if (is_Sel(ptr)) {
265                         ir_entity *ent = get_Sel_entity(ptr);
266                         ir_type   *tp  = get_entity_owner(ent);
267
268                         /* Do not fiddle with polymorphism. */
269                         if (is_Class_type(get_entity_owner(ent)) &&
270                                 ((get_entity_n_overwrites(ent)    != 0) ||
271                                 (get_entity_n_overwrittenby(ent) != 0)   ) )
272                                 return NULL;
273
274                         if (is_Array_type(tp)) {
275                                 /* check bounds */
276                                 int i, n;
277
278                                 for (i = 0, n = get_Sel_n_indexs(ptr); i < n; ++i) {
279                                         ir_node *bound;
280                                         tarval *tlower, *tupper;
281                                         ir_node *index = get_Sel_index(ptr, i);
282                                         tarval *tv     = computed_value(index);
283
284                                         /* check if the index is constant */
285                                         if (tv == tarval_bad)
286                                                 return NULL;
287
288                                         bound  = get_array_lower_bound(tp, i);
289                                         tlower = computed_value(bound);
290                                         bound  = get_array_upper_bound(tp, i);
291                                         tupper = computed_value(bound);
292
293                                         if (tlower == tarval_bad || tupper == tarval_bad)
294                                                 return NULL;
295
296                                         if (tarval_cmp(tv, tlower) & pn_Cmp_Lt)
297                                                 return NULL;
298                                         if (tarval_cmp(tupper, tv) & pn_Cmp_Lt)
299                                                 return NULL;
300
301                                         /* ok, bounds check finished */
302                                 }
303                         }
304
305                         if (variability_constant == get_entity_variability(ent))
306                                 return ent;
307
308                         /* try next */
309                         ptr = get_Sel_ptr(ptr);
310                 } else if (is_Add(ptr)) {
311                         ir_node *l = get_Add_left(ptr);
312                         ir_node *r = get_Add_right(ptr);
313
314                         if (get_irn_mode(l) == get_irn_mode(ptr) && is_Const(r))
315                                 ptr = l;
316                         else if (get_irn_mode(r) == get_irn_mode(ptr) && is_Const(l))
317                                 ptr = r;
318                         else
319                                 return NULL;
320
321                         /* for now, we support only one addition, reassoc should fold all others */
322                         if (! is_SymConst(ptr) && !is_Sel(ptr))
323                                 return NULL;
324                 } else if (is_Sub(ptr)) {
325                         ir_node *l = get_Sub_left(ptr);
326                         ir_node *r = get_Sub_right(ptr);
327
328                         if (get_irn_mode(l) == get_irn_mode(ptr) &&     is_Const(r))
329                                 ptr = l;
330                         else
331                                 return NULL;
332                         /* for now, we support only one substraction, reassoc should fold all others */
333                         if (! is_SymConst(ptr) && !is_Sel(ptr))
334                                 return NULL;
335                 } else
336                         return NULL;
337         }
338 }  /* find_constant_entity */
339
340 /**
341  * Return the Selection index of a Sel node from dimension n
342  */
343 static long get_Sel_array_index_long(ir_node *n, int dim) {
344         ir_node *index = get_Sel_index(n, dim);
345         assert(is_Const(index));
346         return get_tarval_long(get_Const_tarval(index));
347 }  /* get_Sel_array_index_long */
348
349 /**
350  * Returns the accessed component graph path for an
351  * node computing an address.
352  *
353  * @param ptr    the node computing the address
354  * @param depth  current depth in steps upward from the root
355  *               of the address
356  */
357 static compound_graph_path *rec_get_accessed_path(ir_node *ptr, int depth) {
358         compound_graph_path *res = NULL;
359         ir_entity           *root, *field, *ent;
360         int                 path_len, pos, idx;
361         tarval              *tv;
362         ir_type             *tp;
363
364         if (is_SymConst(ptr)) {
365                 /* a SymConst. If the depth is 0, this is an access to a global
366                  * entity and we don't need a component path, else we know
367                  * at least it's length.
368                  */
369                 assert(get_SymConst_kind(ptr) == symconst_addr_ent);
370                 root = get_SymConst_entity(ptr);
371                 res = (depth == 0) ? NULL : new_compound_graph_path(get_entity_type(root), depth);
372         } else if (is_Sel(ptr)) {
373                 /* it's a Sel, go up until we find the root */
374                 res = rec_get_accessed_path(get_Sel_ptr(ptr), depth+1);
375                 if (res == NULL)
376                         return NULL;
377
378                 /* fill up the step in the path at the current position */
379                 field    = get_Sel_entity(ptr);
380                 path_len = get_compound_graph_path_length(res);
381                 pos      = path_len - depth - 1;
382                 set_compound_graph_path_node(res, pos, field);
383
384                 if (is_Array_type(get_entity_owner(field))) {
385                         assert(get_Sel_n_indexs(ptr) == 1 && "multi dim arrays not implemented");
386                         set_compound_graph_path_array_index(res, pos, get_Sel_array_index_long(ptr, 0));
387                 }
388         } else if (is_Add(ptr)) {
389                 ir_node *l    = get_Add_left(ptr);
390                 ir_node *r    = get_Add_right(ptr);
391                 ir_mode *mode = get_irn_mode(ptr);
392                 tarval  *tmp;
393
394                 if (is_Const(r) && get_irn_mode(l) == mode) {
395                         ptr = l;
396                         tv  = get_Const_tarval(r);
397                 } else {
398                         ptr = r;
399                         tv  = get_Const_tarval(l);
400                 }
401 ptr_arith:
402                 mode = get_tarval_mode(tv);
403                 tmp  = tv;
404
405                 /* ptr must be a Sel or a SymConst, this was checked in find_constant_entity() */
406                 if (is_Sel(ptr)) {
407                         field = get_Sel_entity(ptr);
408                 } else {
409                         field = get_SymConst_entity(ptr);
410                 }
411                 idx = 0;
412                 for (ent = field;;) {
413                         unsigned size;
414                         tarval   *sz, *tv_index, *tlower, *tupper;
415                         ir_node  *bound;
416
417                         tp = get_entity_type(ent);
418                         if (! is_Array_type(tp))
419                                 break;
420                         ent = get_array_element_entity(tp);
421                         size = get_type_size_bytes(get_entity_type(ent));
422                         sz   = new_tarval_from_long(size, mode);
423
424                         tv_index = tarval_div(tmp, sz);
425                         tmp      = tarval_mod(tmp, sz);
426
427                         if (tv_index == tarval_bad || tmp == tarval_bad)
428                                 return NULL;
429
430                         assert(get_array_n_dimensions(tp) == 1 && "multiarrays not implemented");
431                         bound  = get_array_lower_bound(tp, 0);
432                         tlower = computed_value(bound);
433                         bound  = get_array_upper_bound(tp, 0);
434                         tupper = computed_value(bound);
435
436                         if (tlower == tarval_bad || tupper == tarval_bad)
437                                 return NULL;
438
439                         if (tarval_cmp(tv_index, tlower) & pn_Cmp_Lt)
440                                 return NULL;
441                         if (tarval_cmp(tupper, tv_index) & pn_Cmp_Lt)
442                                 return NULL;
443
444                         /* ok, bounds check finished */
445                         ++idx;
446                 }
447                 if (! tarval_is_null(tmp)) {
448                         /* access to some struct/union member */
449                         return NULL;
450                 }
451
452                 /* should be at least ONE array */
453                 if (idx == 0)
454                         return NULL;
455
456                 res = rec_get_accessed_path(ptr, depth + idx);
457                 if (res == NULL)
458                         return NULL;
459
460                 path_len = get_compound_graph_path_length(res);
461                 pos      = path_len - depth - idx;
462
463                 for (ent = field;;) {
464                         unsigned size;
465                         tarval   *sz, *tv_index;
466                         long     index;
467
468                         tp = get_entity_type(ent);
469                         if (! is_Array_type(tp))
470                                 break;
471                         ent = get_array_element_entity(tp);
472                         set_compound_graph_path_node(res, pos, ent);
473
474                         size = get_type_size_bytes(get_entity_type(ent));
475                         sz   = new_tarval_from_long(size, mode);
476
477                         tv_index = tarval_div(tv, sz);
478                         tv       = tarval_mod(tv, sz);
479
480                         /* worked above, should work again */
481                         assert(tv_index != tarval_bad && tv != tarval_bad);
482
483                         /* bounds already checked above */
484                         index = get_tarval_long(tv_index);
485                         set_compound_graph_path_array_index(res, pos, index);
486                         ++pos;
487                 }
488         } else if (is_Sub(ptr)) {
489                 ir_node *l = get_Sub_left(ptr);
490                 ir_node *r = get_Sub_right(ptr);
491
492                 ptr = l;
493                 tv  = get_Const_tarval(r);
494                 tv  = tarval_neg(tv);
495                 goto ptr_arith;
496         }
497         return res;
498 }  /* rec_get_accessed_path */
499
500 /**
501  * Returns an access path or NULL.  The access path is only
502  * valid, if the graph is in phase_high and _no_ address computation is used.
503  */
504 static compound_graph_path *get_accessed_path(ir_node *ptr) {
505         compound_graph_path *gr = rec_get_accessed_path(ptr, 0);
506         return gr;
507 }  /* get_accessed_path */
508
509 typedef struct path_entry {
510         ir_entity         *ent;
511         struct path_entry *next;
512         long              index;
513 } path_entry;
514
515 static ir_node *rec_find_compound_ent_value(ir_node *ptr, path_entry *next) {
516         path_entry       entry, *p;
517         ir_entity        *ent, *field;
518         ir_initializer_t *initializer;
519         tarval           *tv;
520         ir_type          *tp;
521         unsigned         n;
522
523         entry.next = next;
524         if (is_SymConst(ptr)) {
525                 /* found the root */
526                 ent         = get_SymConst_entity(ptr);
527                 initializer = get_entity_initializer(ent);
528                 for (p = next; p != NULL;) {
529                         if (initializer->kind != IR_INITIALIZER_COMPOUND)
530                                 return NULL;
531                         n  = get_initializer_compound_n_entries(initializer);
532                         tp = get_entity_type(ent);
533
534                         if (is_Array_type(tp)) {
535                                 ent = get_array_element_entity(tp);
536                                 if (ent != p->ent) {
537                                         /* a missing [0] */
538                                         if (0 >= n)
539                                                 return NULL;
540                                         initializer = get_initializer_compound_value(initializer, 0);
541                                         continue;
542                                 }
543                         }
544                         if (p->index >= (int) n)
545                                 return NULL;
546                         initializer = get_initializer_compound_value(initializer, p->index);
547
548                         ent = p->ent;
549                         p   = p->next;
550                 }
551                 tp = get_entity_type(ent);
552                 while (is_Array_type(tp)) {
553                         ent = get_array_element_entity(tp);
554                         tp = get_entity_type(ent);
555                         /* a missing [0] */
556                         n  = get_initializer_compound_n_entries(initializer);
557                         if (0 >= n)
558                                 return NULL;
559                         initializer = get_initializer_compound_value(initializer, 0);
560                 }
561
562                 switch (initializer->kind) {
563                 case IR_INITIALIZER_CONST:
564                         return get_initializer_const_value(initializer);
565                 case IR_INITIALIZER_TARVAL:
566                 case IR_INITIALIZER_NULL:
567                 default:
568                         return NULL;
569                 }
570         } else if (is_Sel(ptr)) {
571                 entry.ent = field = get_Sel_entity(ptr);
572                 tp = get_entity_owner(field);
573                 if (is_Array_type(tp)) {
574                         assert(get_Sel_n_indexs(ptr) == 1 && "multi dim arrays not implemented");
575                         entry.index = get_Sel_array_index_long(ptr, 0) - get_array_lower_bound_int(tp, 0);
576                 } else {
577                         int i, n_members = get_compound_n_members(tp);
578                         for (i = 0; i < n_members; ++i) {
579                                 if (get_compound_member(tp, i) == field)
580                                         break;
581                         }
582                         if (i >= n_members) {
583                                 /* not found: should NOT happen */
584                                 return NULL;
585                         }
586                         entry.index = i;
587                 }
588                 return rec_find_compound_ent_value(get_Sel_ptr(ptr), &entry);
589         }  else if (is_Add(ptr)) {
590                 ir_node  *l = get_Add_left(ptr);
591                 ir_node  *r = get_Add_right(ptr);
592                 ir_mode  *mode;
593                 unsigned pos;
594
595                 if (is_Const(r)) {
596                         ptr = l;
597                         tv  = get_Const_tarval(r);
598                 } else {
599                         ptr = r;
600                         tv  = get_Const_tarval(l);
601                 }
602 ptr_arith:
603                 mode = get_tarval_mode(tv);
604
605                 /* ptr must be a Sel or a SymConst, this was checked in find_constant_entity() */
606                 if (is_Sel(ptr)) {
607                         field = get_Sel_entity(ptr);
608                 } else {
609                         field = get_SymConst_entity(ptr);
610                 }
611
612                 /* count needed entries */
613                 pos = 0;
614                 for (ent = field;;) {
615                         tp = get_entity_type(ent);
616                         if (! is_Array_type(tp))
617                                 break;
618                         ent = get_array_element_entity(tp);
619                         ++pos;
620                 }
621                 /* should be at least ONE entry */
622                 if (pos == 0)
623                         return NULL;
624
625                 /* allocate the right number of entries */
626                 NEW_ARR_A(path_entry, p, pos);
627
628                 /* fill them up */
629                 pos = 0;
630                 for (ent = field;;) {
631                         unsigned size;
632                         tarval   *sz, *tv_index, *tlower, *tupper;
633                         long     index;
634                         ir_node  *bound;
635
636                         tp = get_entity_type(ent);
637                         if (! is_Array_type(tp))
638                                 break;
639                         ent = get_array_element_entity(tp);
640                         p[pos].ent  = ent;
641                         p[pos].next = &p[pos + 1];
642
643                         size = get_type_size_bytes(get_entity_type(ent));
644                         sz   = new_tarval_from_long(size, mode);
645
646                         tv_index = tarval_div(tv, sz);
647                         tv       = tarval_mod(tv, sz);
648
649                         if (tv_index == tarval_bad || tv == tarval_bad)
650                                 return NULL;
651
652                         assert(get_array_n_dimensions(tp) == 1 && "multiarrays not implemented");
653                         bound  = get_array_lower_bound(tp, 0);
654                         tlower = computed_value(bound);
655                         bound  = get_array_upper_bound(tp, 0);
656                         tupper = computed_value(bound);
657
658                         if (tlower == tarval_bad || tupper == tarval_bad)
659                                 return NULL;
660
661                         if (tarval_cmp(tv_index, tlower) & pn_Cmp_Lt)
662                                 return NULL;
663                         if (tarval_cmp(tupper, tv_index) & pn_Cmp_Lt)
664                                 return NULL;
665
666                         /* ok, bounds check finished */
667                         index = get_tarval_long(tv_index);
668                         p[pos].index = index;
669                         ++pos;
670                 }
671                 if (! tarval_is_null(tv)) {
672                         /* hmm, wrong access */
673                         return NULL;
674                 }
675                 p[pos - 1].next = next;
676                 return rec_find_compound_ent_value(ptr, p);
677         } else if (is_Sub(ptr)) {
678                 ir_node *l = get_Sub_left(ptr);
679                 ir_node *r = get_Sub_right(ptr);
680
681                 ptr = l;
682                 tv  = get_Const_tarval(r);
683                 tv  = tarval_neg(tv);
684                 goto ptr_arith;
685         }
686         return NULL;
687 }
688
689 static ir_node *find_compound_ent_value(ir_node *ptr) {
690         return rec_find_compound_ent_value(ptr, NULL);
691 }
692
693 /* forward */
694 static void reduce_adr_usage(ir_node *ptr);
695
696 /**
697  * Update a Load that may lost it's usage.
698  */
699 static void handle_load_update(ir_node *load) {
700         ldst_info_t *info = get_irn_link(load);
701
702         /* do NOT touch volatile loads for now */
703         if (get_Load_volatility(load) == volatility_is_volatile)
704                 return;
705
706         if (! info->projs[pn_Load_res] && ! info->projs[pn_Load_X_except]) {
707                 ir_node *ptr = get_Load_ptr(load);
708                 ir_node *mem = get_Load_mem(load);
709
710                 /* a Load which value is neither used nor exception checked, remove it */
711                 exchange(info->projs[pn_Load_M], mem);
712                 if (info->projs[pn_Load_X_regular])
713                         exchange(info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
714                 kill_node(load);
715                 reduce_adr_usage(ptr);
716         }
717 }  /* handle_load_update */
718
719 /**
720  * A Use of an address node is vanished. Check if this was a Proj
721  * node and update the counters.
722  */
723 static void reduce_adr_usage(ir_node *ptr) {
724         if (is_Proj(ptr)) {
725                 if (get_irn_n_edges(ptr) <= 0) {
726                         /* this Proj is dead now */
727                         ir_node *pred = get_Proj_pred(ptr);
728
729                         if (is_Load(pred)) {
730                                 ldst_info_t *info = get_irn_link(pred);
731                                 info->projs[get_Proj_proj(ptr)] = NULL;
732
733                                 /* this node lost it's result proj, handle that */
734                                 handle_load_update(pred);
735                         }
736                 }
737         }
738 }  /* reduce_adr_usage */
739
740 /**
741  * Check, if an already existing value of mode old_mode can be converted
742  * into the needed one new_mode without loss.
743  */
744 static int can_use_stored_value(ir_mode *old_mode, ir_mode *new_mode) {
745         if (old_mode == new_mode)
746                 return 1;
747
748         /* if both modes are two-complement ones, we can always convert the
749            Stored value into the needed one. */
750         if (get_mode_size_bits(old_mode) >= get_mode_size_bits(new_mode) &&
751                   get_mode_arithmetic(old_mode) == irma_twos_complement &&
752                   get_mode_arithmetic(new_mode) == irma_twos_complement)
753                 return 1;
754         return 0;
755 }  /* can_use_stored_value */
756
757 /**
758  * Check whether a Call is at least pure, ie. does only read memory.
759  */
760 static unsigned is_Call_pure(ir_node *call) {
761         ir_type *call_tp = get_Call_type(call);
762         unsigned prop = get_method_additional_properties(call_tp);
763
764         /* check first the call type */
765         if ((prop & (mtp_property_const|mtp_property_pure)) == 0) {
766                 /* try the called entity */
767                 ir_node *ptr = get_Call_ptr(call);
768
769                 if (is_Global(ptr)) {
770                         ir_entity *ent = get_Global_entity(ptr);
771
772                         prop = get_entity_additional_properties(ent);
773                 }
774         }
775         return (prop & (mtp_property_const|mtp_property_pure)) != 0;
776 }  /* is_Call_pure */
777
778 static ir_node *get_base_and_offset(ir_node *ptr, long *pOffset)
779 {
780         ir_mode *mode  = get_irn_mode(ptr);
781         long    offset = 0;
782
783         /* TODO: long might not be enough, we should probably use some tarval thingy... */
784         for (;;) {
785                 if (is_Add(ptr)) {
786                         ir_node *l = get_Add_left(ptr);
787                         ir_node *r = get_Add_right(ptr);
788
789                         if (get_irn_mode(l) != mode || !is_Const(r))
790                                 break;
791
792                         offset += get_tarval_long(get_Const_tarval(r));
793                         ptr     = l;
794                 } else if (is_Sub(ptr)) {
795                         ir_node *l = get_Sub_left(ptr);
796                         ir_node *r = get_Sub_right(ptr);
797
798                         if (get_irn_mode(l) != mode || !is_Const(r))
799                                 break;
800
801                         offset -= get_tarval_long(get_Const_tarval(r));
802                         ptr     = l;
803                 } else if (is_Sel(ptr)) {
804                         ir_entity *ent = get_Sel_entity(ptr);
805                         ir_type   *tp  = get_entity_owner(ent);
806
807                         if (is_Array_type(tp)) {
808                                 int     size;
809                                 ir_node *index;
810
811                                 /* only one dimensional arrays yet */
812                                 if (get_Sel_n_indexs(ptr) != 1)
813                                         break;
814                                 index = get_Sel_index(ptr, 0);
815                                 if (! is_Const(index))
816                                         break;
817
818                                 tp = get_entity_type(ent);
819                                 if (get_type_state(tp) != layout_fixed)
820                                         break;
821
822                                 size    = get_type_size_bytes(tp);
823                                 offset += size * get_tarval_long(get_Const_tarval(index));
824                         } else {
825                                 if (get_type_state(tp) != layout_fixed)
826                                         break;
827                                 offset += get_entity_offset(ent);
828                         }
829                         ptr = get_Sel_ptr(ptr);
830                 } else
831                         break;
832         }
833
834         *pOffset = offset;
835         return ptr;
836 }
837
838 static int try_load_after_store(ir_node *load,
839                 ir_node *load_base_ptr, long load_offset, ir_node *store)
840 {
841         ldst_info_t *info;
842         ir_node *store_ptr      = get_Store_ptr(store);
843         long     store_offset;
844         ir_node *store_base_ptr = get_base_and_offset(store_ptr, &store_offset);
845         ir_node *store_value;
846         ir_mode *store_mode;
847         ir_node *load_ptr;
848         ir_mode *load_mode;
849         long     load_mode_len;
850         long     store_mode_len;
851         long     delta;
852         int      res;
853
854         if (load_base_ptr != store_base_ptr)
855                 return 0;
856
857         load_mode      = get_Load_mode(load);
858         load_mode_len  = get_mode_size_bytes(load_mode);
859         store_mode     = get_irn_mode(get_Store_value(store));
860         store_mode_len = get_mode_size_bytes(store_mode);
861         delta          = load_offset - store_offset;
862         if (delta < 0 || delta + load_mode_len > store_mode_len)
863                 return 0;
864
865         if (get_mode_arithmetic(store_mode) != irma_twos_complement ||
866             get_mode_arithmetic(load_mode)  != irma_twos_complement)
867                 return 0;
868
869         store_value = get_Store_value(store);
870
871         /* produce a shift to adjust offset delta */
872         if (delta > 0) {
873                 ir_node *cnst;
874
875                 /* FIXME: only true for little endian */
876                 cnst        = new_Const_long(mode_Iu, delta * 8);
877                 store_value = new_r_Shr(current_ir_graph, get_nodes_block(load),
878                                         store_value, cnst, store_mode);
879         }
880
881         /* add an convert if needed */
882         if (store_mode != load_mode) {
883                 store_value = new_r_Conv(current_ir_graph, get_nodes_block(load),
884                                          store_value, load_mode);
885         }
886
887         DBG_OPT_RAW(load, store_value);
888
889         info = get_irn_link(load);
890         if (info->projs[pn_Load_M])
891                 exchange(info->projs[pn_Load_M], get_Load_mem(load));
892
893         res = 0;
894         /* no exception */
895         if (info->projs[pn_Load_X_except]) {
896                 exchange( info->projs[pn_Load_X_except], new_Bad());
897                 res |= CF_CHANGED;
898         }
899         if (info->projs[pn_Load_X_regular]) {
900                 exchange( info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
901                 res |= CF_CHANGED;
902         }
903
904         if (info->projs[pn_Load_res])
905                 exchange(info->projs[pn_Load_res], store_value);
906
907         load_ptr = get_Load_ptr(load);
908         kill_node(load);
909         reduce_adr_usage(load_ptr);
910         return res | DF_CHANGED;
911 }
912
913 /**
914  * Follow the memory chain as long as there are only Loads,
915  * alias free Stores, and constant Calls and try to replace the
916  * current Load by a previous ones.
917  * Note that in unreachable loops it might happen that we reach
918  * load again, as well as we can fall into a cycle.
919  * We break such cycles using a special visited flag.
920  *
921  * INC_MASTER() must be called before dive into
922  */
923 static unsigned follow_Mem_chain(ir_node *load, ir_node *curr) {
924         unsigned    res = 0;
925         ldst_info_t *info = get_irn_link(load);
926         ir_node     *pred;
927         ir_node     *ptr       = get_Load_ptr(load);
928         ir_node     *mem       = get_Load_mem(load);
929         ir_mode     *load_mode = get_Load_mode(load);
930
931         for (pred = curr; load != pred; ) {
932                 ldst_info_t *pred_info = get_irn_link(pred);
933
934                 /*
935                  * a Load immediately after a Store -- a read after write.
936                  * We may remove the Load, if both Load & Store does not have an
937                  * exception handler OR they are in the same MacroBlock. In the latter
938                  * case the Load cannot throw an exception when the previous Store was
939                  * quiet.
940                  *
941                  * Why we need to check for Store Exception? If the Store cannot
942                  * be executed (ROM) the exception handler might simply jump into
943                  * the load MacroBlock :-(
944                  * We could make it a little bit better if we would know that the
945                  * exception handler of the Store jumps directly to the end...
946                  */
947                 if (is_Store(pred) && ((pred_info->projs[pn_Store_X_except] == NULL
948                                 && info->projs[pn_Load_X_except] == NULL)
949                                 || get_nodes_MacroBlock(load) == get_nodes_MacroBlock(pred)))
950                 {
951                         long    load_offset;
952                         ir_node *base_ptr = get_base_and_offset(ptr, &load_offset);
953                         int     changes   = try_load_after_store(load, base_ptr, load_offset, pred);
954
955                         if (changes != 0)
956                                 return res | changes;
957                 } else if (is_Load(pred) && get_Load_ptr(pred) == ptr &&
958                            can_use_stored_value(get_Load_mode(pred), load_mode)) {
959                         /*
960                          * a Load after a Load -- a read after read.
961                          * We may remove the second Load, if it does not have an exception handler
962                          * OR they are in the same MacroBlock. In the later case the Load cannot
963                          * throw an exception when the previous Load was quiet.
964                          *
965                          * Here, there is no need to check if the previous Load has an exception
966                          * hander because they would have exact the same exception...
967                          */
968                         if (info->projs[pn_Load_X_except] == NULL || get_nodes_MacroBlock(load) == get_nodes_MacroBlock(pred)) {
969                                 ir_node *value;
970
971                                 DBG_OPT_RAR(load, pred);
972
973                                 /* the result is used */
974                                 if (info->projs[pn_Load_res]) {
975                                         if (pred_info->projs[pn_Load_res] == NULL) {
976                                                 /* create a new Proj again */
977                                                 pred_info->projs[pn_Load_res] = new_r_Proj(current_ir_graph, get_nodes_block(pred), pred, get_Load_mode(pred), pn_Load_res);
978                                         }
979                                         value = pred_info->projs[pn_Load_res];
980
981                                         /* add an convert if needed */
982                                         if (get_Load_mode(pred) != load_mode) {
983                                                 value = new_r_Conv(current_ir_graph, get_nodes_block(load), value, load_mode);
984                                         }
985
986                                         exchange(info->projs[pn_Load_res], value);
987                                 }
988
989                                 if (info->projs[pn_Load_M])
990                                         exchange(info->projs[pn_Load_M], mem);
991
992                                 /* no exception */
993                                 if (info->projs[pn_Load_X_except]) {
994                                         exchange(info->projs[pn_Load_X_except], new_Bad());
995                                         res |= CF_CHANGED;
996                                 }
997                                 if (info->projs[pn_Load_X_regular]) {
998                                         exchange( info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
999                                         res |= CF_CHANGED;
1000                                 }
1001
1002                                 kill_node(load);
1003                                 reduce_adr_usage(ptr);
1004                                 return res |= DF_CHANGED;
1005                         }
1006                 }
1007
1008                 if (is_Store(pred)) {
1009                         /* check if we can pass through this store */
1010                         ir_alias_relation rel = get_alias_relation(
1011                                 current_ir_graph,
1012                                 get_Store_ptr(pred),
1013                                 get_irn_mode(get_Store_value(pred)),
1014                                 ptr, load_mode);
1015                         /* if the might be an alias, we cannot pass this Store */
1016                         if (rel != ir_no_alias)
1017                                 break;
1018                         pred = skip_Proj(get_Store_mem(pred));
1019                 } else if (is_Load(pred)) {
1020                         pred = skip_Proj(get_Load_mem(pred));
1021                 } else if (is_Call(pred)) {
1022                         if (is_Call_pure(pred)) {
1023                                 /* The called graph is at least pure, so there are no Store's
1024                                    in it. We can handle it like a Load and skip it. */
1025                                 pred = skip_Proj(get_Call_mem(pred));
1026                         } else {
1027                                 /* there might be Store's in the graph, stop here */
1028                                 break;
1029                         }
1030                 } else {
1031                         /* follow only Load chains */
1032                         break;
1033                 }
1034
1035                 /* check for cycles */
1036                 if (NODE_VISITED(pred_info))
1037                         break;
1038                 MARK_NODE(pred_info);
1039         }
1040
1041         if (is_Sync(pred)) {
1042                 int i;
1043
1044                 /* handle all Sync predecessors */
1045                 for (i = get_Sync_n_preds(pred) - 1; i >= 0; --i) {
1046                         res |= follow_Mem_chain(load, skip_Proj(get_Sync_pred(pred, i)));
1047                         if (res)
1048                                 return res;
1049                 }
1050         }
1051
1052         return res;
1053 }  /* follow_Mem_chain */
1054
1055 /*
1056  * Check if we can replace the load by a given const from
1057  * the const code irg.
1058  */
1059 ir_node *can_replace_load_by_const(const ir_node *load, ir_node *c) {
1060         ir_mode *c_mode = get_irn_mode(c);
1061         ir_mode *l_mode = get_Load_mode(load);
1062         ir_node *res    = NULL;
1063
1064         if (c_mode != l_mode) {
1065                 /* check, if the mode matches OR can be easily converted info */
1066                 if (is_reinterpret_cast(c_mode, l_mode)) {
1067                         /* we can safely cast */
1068                         dbg_info *dbg   = get_irn_dbg_info(load);
1069                         ir_node  *block = get_nodes_block(load);
1070
1071                         /* copy the value from the const code irg and cast it */
1072                         res = copy_const_value(dbg, c);
1073                         res = new_rd_Conv(dbg, current_ir_graph, block, res, l_mode);
1074                 }
1075         } else {
1076                 /* copy the value from the const code irg */
1077                 res = copy_const_value(get_irn_dbg_info(load), c);
1078         }
1079         return res;
1080 }  /* can_replace_load_by_const */
1081
1082 /**
1083  * optimize a Load
1084  *
1085  * @param load  the Load node
1086  */
1087 static unsigned optimize_load(ir_node *load)
1088 {
1089         ldst_info_t *info = get_irn_link(load);
1090         ir_node     *mem, *ptr, *value;
1091         ir_entity   *ent;
1092         long        dummy;
1093         unsigned    res = 0;
1094
1095         /* do NOT touch volatile loads for now */
1096         if (get_Load_volatility(load) == volatility_is_volatile)
1097                 return 0;
1098
1099         /* the address of the load to be optimized */
1100         ptr = get_Load_ptr(load);
1101
1102         /*
1103          * Check if we can remove the exception from a Load:
1104          * This can be done, if the address is from an Sel(Alloc) and
1105          * the Sel type is a subtype of the allocated type.
1106          *
1107          * This optimizes some often used OO constructs,
1108          * like x = new O; x->t;
1109          */
1110         if (info->projs[pn_Load_X_except]) {
1111                 ir_node *addr = ptr;
1112
1113                 /* find base address */
1114                 while (is_Sel(addr))
1115                         addr = get_Sel_ptr(addr);
1116                 if (is_Alloc(skip_Proj(skip_Cast(addr)))) {
1117                         /* simple case: a direct load after an Alloc. Firm Alloc throw
1118                          * an exception in case of out-of-memory. So, there is no way for an
1119                          * exception in this load.
1120                          * This code is constructed by the "exception lowering" in the Jack compiler.
1121                          */
1122                         exchange(info->projs[pn_Load_X_except], new_Bad());
1123                         info->projs[pn_Load_X_except] = NULL;
1124                         exchange(info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
1125                         info->projs[pn_Load_X_regular] = NULL;
1126                         res |= CF_CHANGED;
1127                 }
1128         }
1129
1130         /* The mem of the Load. Must still be returned after optimization. */
1131         mem  = get_Load_mem(load);
1132
1133         if (! info->projs[pn_Load_res] && ! info->projs[pn_Load_X_except]) {
1134                 /* a Load which value is neither used nor exception checked, remove it */
1135                 exchange(info->projs[pn_Load_M], mem);
1136
1137                 if (info->projs[pn_Load_X_regular]) {
1138                         /* should not happen, but if it does, remove it */
1139                         exchange(info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
1140                         res |= CF_CHANGED;
1141                 }
1142                 kill_node(load);
1143                 reduce_adr_usage(ptr);
1144                 return res | DF_CHANGED;
1145         }
1146
1147         /* Load from a constant polymorphic field, where we can resolve
1148            polymorphism. */
1149         value = transform_polymorph_Load(load);
1150         if (value == load) {
1151                 value = NULL;
1152                 /* check if we can determine the entity that will be loaded */
1153                 ent = find_constant_entity(ptr);
1154                 if (ent != NULL) {
1155                         if ((allocation_static == get_entity_allocation(ent)) &&
1156                                 (visibility_external_allocated != get_entity_visibility(ent))) {
1157                                 /* a static allocation that is not external: there should be NO exception
1158                                  * when loading even if we cannot replace the load itself. */
1159
1160                                 /* no exception, clear the info field as it might be checked later again */
1161                                 if (info->projs[pn_Load_X_except]) {
1162                                         exchange(info->projs[pn_Load_X_except], new_Bad());
1163                                         info->projs[pn_Load_X_except] = NULL;
1164                                         res |= CF_CHANGED;
1165                                 }
1166                                 if (info->projs[pn_Load_X_regular]) {
1167                                         exchange(info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
1168                                         info->projs[pn_Load_X_regular] = NULL;
1169                                         res |= CF_CHANGED;
1170                                 }
1171
1172                                 if (variability_constant == get_entity_variability(ent)) {
1173                                         if (is_atomic_entity(ent)) {
1174                                                 /* Might not be atomic after
1175                                                    lowering of Sels.  In this
1176                                                    case we could also load, but
1177                                                    it's more complicated. */
1178                                                 /* more simpler case: we load the content of a constant value:
1179                                                  * replace it by the constant itself
1180                                                  */
1181                                                 value = get_atomic_ent_value(ent);
1182                                         } else {
1183                                                 if (ent->has_initializer) {
1184                                                         /* new style initializer */
1185                                                         value = find_compound_ent_value(ptr);
1186                                                 } else {
1187                                                         /* old style initializer */
1188                                                         compound_graph_path *path = get_accessed_path(ptr);
1189
1190                                                         if (path != NULL) {
1191                                                                 assert(is_proper_compound_graph_path(path, get_compound_graph_path_length(path)-1));
1192
1193                                                                 value = get_compound_ent_value_by_path(ent, path);
1194                                                                 DB((dbg, LEVEL_1, "  Constant access at %F%F resulted in %+F\n", ent, path, value));
1195                                                                 free_compound_graph_path(path);
1196                                                         }
1197                                                 }
1198                                         }
1199                                         if (value != NULL)
1200                                                 value = can_replace_load_by_const(load, value);
1201                                 }
1202                         }
1203                 }
1204         }
1205         if (value != NULL) {
1206                 /* we completely replace the load by this value */
1207                 if (info->projs[pn_Load_X_except]) {
1208                         exchange(info->projs[pn_Load_X_except], new_Bad());
1209                         info->projs[pn_Load_X_except] = NULL;
1210                         res |= CF_CHANGED;
1211                 }
1212                 if (info->projs[pn_Load_X_regular]) {
1213                         exchange(info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
1214                         info->projs[pn_Load_X_regular] = NULL;
1215                         res |= CF_CHANGED;
1216                 }
1217                 if (info->projs[pn_Load_M]) {
1218                         exchange(info->projs[pn_Load_M], mem);
1219                         res |= DF_CHANGED;
1220                 }
1221                 if (info->projs[pn_Load_res]) {
1222                         exchange(info->projs[pn_Load_res], value);
1223                         res |= DF_CHANGED;
1224                 }
1225                 kill_node(load);
1226                 reduce_adr_usage(ptr);
1227                 return res;
1228         }
1229
1230         /* Check, if the address of this load is used more than once.
1231          * If not, more load cannot be removed in any case. */
1232         if (get_irn_n_uses(ptr) <= 1 && get_irn_n_uses(get_base_and_offset(ptr, &dummy)) <= 1)
1233                 return res;
1234
1235         /*
1236          * follow the memory chain as long as there are only Loads
1237          * and try to replace current Load or Store by a previous one.
1238          * Note that in unreachable loops it might happen that we reach
1239          * load again, as well as we can fall into a cycle.
1240          * We break such cycles using a special visited flag.
1241          */
1242         INC_MASTER();
1243         res = follow_Mem_chain(load, skip_Proj(mem));
1244         return res;
1245 }  /* optimize_load */
1246
1247 /**
1248  * Check whether a value of mode new_mode would completely overwrite a value
1249  * of mode old_mode in memory.
1250  */
1251 static int is_completely_overwritten(ir_mode *old_mode, ir_mode *new_mode)
1252 {
1253         return get_mode_size_bits(new_mode) >= get_mode_size_bits(old_mode);
1254 }  /* is_completely_overwritten */
1255
1256 /**
1257  * follow the memory chain as long as there are only Loads and alias free Stores.
1258  *
1259  * INC_MASTER() must be called before dive into
1260  */
1261 static unsigned follow_Mem_chain_for_Store(ir_node *store, ir_node *curr) {
1262         unsigned res = 0;
1263         ldst_info_t *info = get_irn_link(store);
1264         ir_node *pred;
1265         ir_node *ptr = get_Store_ptr(store);
1266         ir_node *mem = get_Store_mem(store);
1267         ir_node *value = get_Store_value(store);
1268         ir_mode *mode  = get_irn_mode(value);
1269         ir_node *block = get_nodes_block(store);
1270         ir_node *mblk  = get_Block_MacroBlock(block);
1271
1272         for (pred = curr; pred != store;) {
1273                 ldst_info_t *pred_info = get_irn_link(pred);
1274
1275                 /*
1276                  * BEWARE: one might think that checking the modes is useless, because
1277                  * if the pointers are identical, they refer to the same object.
1278                  * This is only true in strong typed languages, not is C were the following
1279                  * is possible *(ir_type1 *)p = a; *(ir_type2 *)p = b ...
1280                  * However, if the mode that is written have a bigger  or equal size the the old
1281                  * one, the old value is completely overwritten and can be killed ...
1282                  */
1283                 if (is_Store(pred) && get_Store_ptr(pred) == ptr &&
1284                     get_nodes_MacroBlock(pred) == mblk &&
1285                     is_completely_overwritten(get_irn_mode(get_Store_value(pred)), mode)) {
1286                         /*
1287                          * a Store after a Store in the same MacroBlock -- a write after write.
1288                          * We may remove the first Store, if it does not have an exception handler.
1289                          *
1290                          * TODO: What, if both have the same exception handler ???
1291                          */
1292                         if (get_Store_volatility(pred) != volatility_is_volatile && !pred_info->projs[pn_Store_X_except]) {
1293                                 DBG_OPT_WAW(pred, store);
1294                                 exchange(pred_info->projs[pn_Store_M], get_Store_mem(pred));
1295                                 kill_node(pred);
1296                                 reduce_adr_usage(ptr);
1297                                 return DF_CHANGED;
1298                         }
1299                 } else if (is_Load(pred) && get_Load_ptr(pred) == ptr &&
1300                            value == pred_info->projs[pn_Load_res]) {
1301                         /*
1302                          * a Store of a value just loaded from the same address
1303                          * -- a write after read.
1304                          * We may remove the Store, if it does not have an exception
1305                          * handler.
1306                          */
1307                         if (! info->projs[pn_Store_X_except]) {
1308                                 DBG_OPT_WAR(store, pred);
1309                                 exchange(info->projs[pn_Store_M], mem);
1310                                 kill_node(store);
1311                                 reduce_adr_usage(ptr);
1312                                 return DF_CHANGED;
1313                         }
1314                 }
1315
1316                 if (is_Store(pred)) {
1317                         /* check if we can pass through this store */
1318                         ir_alias_relation rel = get_alias_relation(
1319                                 current_ir_graph,
1320                                 get_Store_ptr(pred),
1321                                 get_irn_mode(get_Store_value(pred)),
1322                                 ptr, mode);
1323                         /* if the might be an alias, we cannot pass this Store */
1324                         if (rel != ir_no_alias)
1325                                 break;
1326                         pred = skip_Proj(get_Store_mem(pred));
1327                 } else if (is_Load(pred)) {
1328                         ir_alias_relation rel = get_alias_relation(
1329                                 current_ir_graph, get_Load_ptr(pred), get_Load_mode(pred),
1330                                 ptr, mode);
1331                         if (rel != ir_no_alias)
1332                                 break;
1333
1334                         pred = skip_Proj(get_Load_mem(pred));
1335                 } else {
1336                         /* follow only Load chains */
1337                         break;
1338                 }
1339
1340                 /* check for cycles */
1341                 if (NODE_VISITED(pred_info))
1342                         break;
1343                 MARK_NODE(pred_info);
1344         }
1345
1346         if (is_Sync(pred)) {
1347                 int i;
1348
1349                 /* handle all Sync predecessors */
1350                 for (i = get_Sync_n_preds(pred) - 1; i >= 0; --i) {
1351                         res |= follow_Mem_chain_for_Store(store, skip_Proj(get_Sync_pred(pred, i)));
1352                         if (res)
1353                                 break;
1354                 }
1355         }
1356         return res;
1357 }  /* follow_Mem_chain_for_Store */
1358
1359 /** find entity used as base for an address calculation */
1360 static ir_entity *find_entity(ir_node *ptr)
1361 {
1362         switch(get_irn_opcode(ptr)) {
1363         case iro_SymConst:
1364                 return get_SymConst_entity(ptr);
1365         case iro_Sel: {
1366                 ir_node *pred = get_Sel_ptr(ptr);
1367                 if (get_irg_frame(get_irn_irg(ptr)) == pred)
1368                         return get_Sel_entity(ptr);
1369
1370                 return find_entity(pred);
1371         }
1372         case iro_Sub:
1373         case iro_Add: {
1374                 ir_node *left = get_binop_left(ptr);
1375                 ir_node *right;
1376                 if (mode_is_reference(get_irn_mode(left)))
1377                         return find_entity(left);
1378                 right = get_binop_right(ptr);
1379                 if (mode_is_reference(get_irn_mode(right)))
1380                         return find_entity(right);
1381                 return NULL;
1382         }
1383         default:
1384                 return NULL;
1385         }
1386 }
1387
1388 /**
1389  * optimize a Store
1390  *
1391  * @param store  the Store node
1392  */
1393 static unsigned optimize_store(ir_node *store) {
1394         ir_node   *ptr;
1395         ir_node   *mem;
1396         ir_entity *entity;
1397
1398         if (get_Store_volatility(store) == volatility_is_volatile)
1399                 return 0;
1400
1401         ptr    = get_Store_ptr(store);
1402         entity = find_entity(ptr);
1403
1404         /* a store to an entity which is never read is unnecessary */
1405         if (entity != NULL && !(get_entity_usage(entity) & ir_usage_read)) {
1406                 ldst_info_t *info = get_irn_link(store);
1407                 if (info->projs[pn_Store_X_except] == NULL) {
1408                         exchange(info->projs[pn_Store_M], get_Store_mem(store));
1409                         kill_node(store);
1410                         reduce_adr_usage(ptr);
1411                         return DF_CHANGED;
1412                 }
1413         }
1414
1415         /* Check, if the address of this Store is used more than once.
1416          * If not, this Store cannot be removed in any case. */
1417         if (get_irn_n_uses(ptr) <= 1)
1418                 return 0;
1419
1420         mem = get_Store_mem(store);
1421
1422         /* follow the memory chain as long as there are only Loads */
1423         INC_MASTER();
1424
1425         return follow_Mem_chain_for_Store(store, skip_Proj(mem));
1426 }  /* optimize_store */
1427
1428 /**
1429  * walker, optimizes Phi after Stores to identical places:
1430  * Does the following optimization:
1431  * @verbatim
1432  *
1433  *   val1   val2   val3          val1  val2  val3
1434  *    |      |      |               \    |    /
1435  *  Store  Store  Store              \   |   /
1436  *      \    |    /                   PhiData
1437  *       \   |   /                       |
1438  *        \  |  /                      Store
1439  *          PhiM
1440  *
1441  * @endverbatim
1442  * This reduces the number of stores and allows for predicated execution.
1443  * Moves Stores back to the end of a function which may be bad.
1444  *
1445  * This is only possible if the predecessor blocks have only one successor.
1446  */
1447 static unsigned optimize_phi(ir_node *phi, walk_env_t *wenv)
1448 {
1449         int i, n;
1450         ir_node *store, *old_store, *ptr, *block, *phi_block, *phiM, *phiD, *exc, *projM;
1451         ir_mode *mode;
1452         ir_node **inM, **inD, **projMs;
1453         int *idx;
1454         dbg_info *db = NULL;
1455         ldst_info_t *info;
1456         block_info_t *bl_info;
1457         unsigned res = 0;
1458
1459         /* Must be a memory Phi */
1460         if (get_irn_mode(phi) != mode_M)
1461                 return 0;
1462
1463         n = get_Phi_n_preds(phi);
1464         if (n <= 0)
1465                 return 0;
1466
1467         /* must be only one user */
1468         projM = get_Phi_pred(phi, 0);
1469         if (get_irn_n_edges(projM) != 1)
1470                 return 0;
1471
1472         store = skip_Proj(projM);
1473         old_store = store;
1474         if (!is_Store(store))
1475                 return 0;
1476
1477         block = get_nodes_block(store);
1478
1479         /* abort on dead blocks */
1480         if (is_Block_dead(block))
1481                 return 0;
1482
1483         /* check if the block is post dominated by Phi-block
1484            and has no exception exit */
1485         bl_info = get_irn_link(block);
1486         if (bl_info->flags & BLOCK_HAS_EXC)
1487                 return 0;
1488
1489         phi_block = get_nodes_block(phi);
1490         if (! block_strictly_postdominates(phi_block, block))
1491                 return 0;
1492
1493         /* this is the address of the store */
1494         ptr  = get_Store_ptr(store);
1495         mode = get_irn_mode(get_Store_value(store));
1496         info = get_irn_link(store);
1497         exc  = info->exc_block;
1498
1499         for (i = 1; i < n; ++i) {
1500                 ir_node *pred = get_Phi_pred(phi, i);
1501
1502                 if (get_irn_n_edges(pred) != 1)
1503                         return 0;
1504
1505                 pred = skip_Proj(pred);
1506                 if (!is_Store(pred))
1507                         return 0;
1508
1509                 if (ptr != get_Store_ptr(pred) || mode != get_irn_mode(get_Store_value(pred)))
1510                         return 0;
1511
1512                 info = get_irn_link(pred);
1513
1514                 /* check, if all stores have the same exception flow */
1515                 if (exc != info->exc_block)
1516                         return 0;
1517
1518                 /* abort on dead blocks */
1519                 block = get_nodes_block(pred);
1520                 if (is_Block_dead(block))
1521                         return 0;
1522
1523                 /* check if the block is post dominated by Phi-block
1524                    and has no exception exit. Note that block must be different from
1525                    Phi-block, else we would move a Store from end End of a block to its
1526                    Start... */
1527                 bl_info = get_irn_link(block);
1528                 if (bl_info->flags & BLOCK_HAS_EXC)
1529                         return 0;
1530                 if (block == phi_block || ! block_postdominates(phi_block, block))
1531                         return 0;
1532         }
1533
1534         /*
1535          * ok, when we are here, we found all predecessors of a Phi that
1536          * are Stores to the same address and size. That means whatever
1537          * we do before we enter the block of the Phi, we do a Store.
1538          * So, we can move the Store to the current block:
1539          *
1540          *   val1    val2    val3          val1  val2  val3
1541          *    |       |       |               \    |    /
1542          * | Str | | Str | | Str |             \   |   /
1543          *      \     |     /                   PhiData
1544          *       \    |    /                       |
1545          *        \   |   /                       Str
1546          *           PhiM
1547          *
1548          * Is only allowed if the predecessor blocks have only one successor.
1549          */
1550
1551         NEW_ARR_A(ir_node *, projMs, n);
1552         NEW_ARR_A(ir_node *, inM, n);
1553         NEW_ARR_A(ir_node *, inD, n);
1554         NEW_ARR_A(int, idx, n);
1555
1556         /* Prepare: Collect all Store nodes.  We must do this
1557            first because we otherwise may loose a store when exchanging its
1558            memory Proj.
1559          */
1560         for (i = n - 1; i >= 0; --i) {
1561                 ir_node *store;
1562
1563                 projMs[i] = get_Phi_pred(phi, i);
1564                 assert(is_Proj(projMs[i]));
1565
1566                 store = get_Proj_pred(projMs[i]);
1567                 info  = get_irn_link(store);
1568
1569                 inM[i] = get_Store_mem(store);
1570                 inD[i] = get_Store_value(store);
1571                 idx[i] = info->exc_idx;
1572         }
1573         block = get_nodes_block(phi);
1574
1575         /* second step: create a new memory Phi */
1576         phiM = new_rd_Phi(get_irn_dbg_info(phi), current_ir_graph, block, n, inM, mode_M);
1577
1578         /* third step: create a new data Phi */
1579         phiD = new_rd_Phi(get_irn_dbg_info(phi), current_ir_graph, block, n, inD, mode);
1580
1581         /* rewire memory and kill the node */
1582         for (i = n - 1; i >= 0; --i) {
1583                 ir_node *proj  = projMs[i];
1584
1585                 if(is_Proj(proj)) {
1586                         ir_node *store = get_Proj_pred(proj);
1587                         exchange(proj, inM[i]);
1588                         kill_node(store);
1589                 }
1590         }
1591
1592         /* fourth step: create the Store */
1593         store = new_rd_Store(db, current_ir_graph, block, phiM, ptr, phiD);
1594 #ifdef DO_CACHEOPT
1595         co_set_irn_name(store, co_get_irn_ident(old_store));
1596 #endif
1597
1598         projM = new_rd_Proj(NULL, current_ir_graph, block, store, mode_M, pn_Store_M);
1599
1600         info = get_ldst_info(store, &wenv->obst);
1601         info->projs[pn_Store_M] = projM;
1602
1603         /* fifths step: repair exception flow */
1604         if (exc) {
1605                 ir_node *projX = new_rd_Proj(NULL, current_ir_graph, block, store, mode_X, pn_Store_X_except);
1606
1607                 info->projs[pn_Store_X_except] = projX;
1608                 info->exc_block                = exc;
1609                 info->exc_idx                  = idx[0];
1610
1611                 for (i = 0; i < n; ++i) {
1612                         set_Block_cfgpred(exc, idx[i], projX);
1613                 }
1614
1615                 if (n > 1) {
1616                         /* the exception block should be optimized as some inputs are identical now */
1617                 }
1618
1619                 res |= CF_CHANGED;
1620         }
1621
1622         /* sixth step: replace old Phi */
1623         exchange(phi, projM);
1624
1625         return res | DF_CHANGED;
1626 }  /* optimize_phi */
1627
1628 /**
1629  * walker, do the optimizations
1630  */
1631 static void do_load_store_optimize(ir_node *n, void *env) {
1632         walk_env_t *wenv = env;
1633
1634         switch (get_irn_opcode(n)) {
1635
1636         case iro_Load:
1637                 wenv->changes |= optimize_load(n);
1638                 break;
1639
1640         case iro_Store:
1641                 wenv->changes |= optimize_store(n);
1642                 break;
1643
1644         case iro_Phi:
1645                 wenv->changes |= optimize_phi(n, wenv);
1646                 break;
1647
1648         default:
1649                 ;
1650         }
1651 }  /* do_load_store_optimize */
1652
1653 /** A scc. */
1654 typedef struct scc {
1655         ir_node *head;          /**< the head of the list */
1656 } scc;
1657
1658 /** A node entry. */
1659 typedef struct node_entry {
1660         unsigned DFSnum;    /**< the DFS number of this node */
1661         unsigned low;       /**< the low number of this node */
1662         ir_node  *header;   /**< the header of this node */
1663         int      in_stack;  /**< flag, set if the node is on the stack */
1664         ir_node  *next;     /**< link to the next node the the same scc */
1665         scc      *pscc;     /**< the scc of this node */
1666         unsigned POnum;     /**< the post order number for blocks */
1667 } node_entry;
1668
1669 /** A loop entry. */
1670 typedef struct loop_env {
1671         ir_phase ph;           /**< the phase object */
1672         ir_node  **stack;      /**< the node stack */
1673         int      tos;          /**< tos index */
1674         unsigned nextDFSnum;   /**< the current DFS number */
1675         unsigned POnum;        /**< current post order number */
1676
1677         unsigned changes;      /**< a bitmask of graph changes */
1678 } loop_env;
1679
1680 /**
1681 * Gets the node_entry of a node
1682 */
1683 static node_entry *get_irn_ne(ir_node *irn, loop_env *env) {
1684         ir_phase   *ph = &env->ph;
1685         node_entry *e  = phase_get_irn_data(&env->ph, irn);
1686
1687         if (! e) {
1688                 e = phase_alloc(ph, sizeof(*e));
1689                 memset(e, 0, sizeof(*e));
1690                 phase_set_irn_data(ph, irn, e);
1691         }
1692         return e;
1693 }  /* get_irn_ne */
1694
1695 /**
1696  * Push a node onto the stack.
1697  *
1698  * @param env   the loop environment
1699  * @param n     the node to push
1700  */
1701 static void push(loop_env *env, ir_node *n) {
1702         node_entry *e;
1703
1704         if (env->tos == ARR_LEN(env->stack)) {
1705                 int nlen = ARR_LEN(env->stack) * 2;
1706                 ARR_RESIZE(ir_node *, env->stack, nlen);
1707         }
1708         env->stack[env->tos++] = n;
1709         e = get_irn_ne(n, env);
1710         e->in_stack = 1;
1711 }  /* push */
1712
1713 /**
1714  * pop a node from the stack
1715  *
1716  * @param env   the loop environment
1717  *
1718  * @return  The topmost node
1719  */
1720 static ir_node *pop(loop_env *env) {
1721         ir_node *n = env->stack[--env->tos];
1722         node_entry *e = get_irn_ne(n, env);
1723
1724         e->in_stack = 0;
1725         return n;
1726 }  /* pop */
1727
1728 /**
1729  * Check if irn is a region constant.
1730  * The block or irn must strictly dominate the header block.
1731  *
1732  * @param irn           the node to check
1733  * @param header_block  the header block of the induction variable
1734  */
1735 static int is_rc(ir_node *irn, ir_node *header_block) {
1736         ir_node *block = get_nodes_block(irn);
1737
1738         return (block != header_block) && block_dominates(block, header_block);
1739 }  /* is_rc */
1740
1741 typedef struct phi_entry phi_entry;
1742 struct phi_entry {
1743         ir_node   *phi;    /**< A phi with a region const memory. */
1744         int       pos;     /**< The position of the region const memory */
1745         ir_node   *load;   /**< the newly created load for this phi */
1746         phi_entry *next;
1747 };
1748
1749 /**
1750  * Move loops out of loops if possible.
1751  *
1752  * @param pscc   the loop described by an SCC
1753  * @param env    the loop environment
1754  */
1755 static void move_loads_out_of_loops(scc *pscc, loop_env *env) {
1756         ir_node   *phi, *load, *next, *other, *next_other;
1757         ir_entity *ent;
1758         int       j;
1759         phi_entry *phi_list = NULL;
1760
1761         /* collect all outer memories */
1762         for (phi = pscc->head; phi != NULL; phi = next) {
1763                 node_entry *ne = get_irn_ne(phi, env);
1764                 next = ne->next;
1765
1766                 /* check all memory Phi's */
1767                 if (! is_Phi(phi))
1768                         continue;
1769
1770                 assert(get_irn_mode(phi) == mode_M && "DFS geturn non-memory Phi");
1771
1772                 for (j = get_irn_arity(phi) - 1; j >= 0; --j) {
1773                         ir_node    *pred = get_irn_n(phi, j);
1774                         node_entry *pe   = get_irn_ne(pred, env);
1775
1776                         if (pe->pscc != ne->pscc) {
1777                                 /* not in the same SCC, is region const */
1778                                 phi_entry *pe = phase_alloc(&env->ph, sizeof(*pe));
1779
1780                                 pe->phi  = phi;
1781                                 pe->pos  = j;
1782                                 pe->next = phi_list;
1783                                 phi_list = pe;
1784                         }
1785                 }
1786         }
1787         /* no Phis no fun */
1788         assert(phi_list != NULL && "DFS found a loop without Phi");
1789
1790         for (load = pscc->head; load; load = next) {
1791                 ir_mode *load_mode;
1792                 node_entry *ne = get_irn_ne(load, env);
1793                 next = ne->next;
1794
1795                 if (is_Load(load)) {
1796                         ldst_info_t *info = get_irn_link(load);
1797                         ir_node     *ptr = get_Load_ptr(load);
1798
1799                         /* for now, we cannot handle Loads with exceptions */
1800                         if (info->projs[pn_Load_res] == NULL || info->projs[pn_Load_X_regular] != NULL || info->projs[pn_Load_X_except] != NULL)
1801                                 continue;
1802
1803                         /* for now, we can only handle Load(Global) */
1804                         if (! is_Global(ptr))
1805                                 continue;
1806                         ent = get_Global_entity(ptr);
1807                         load_mode = get_Load_mode(load);
1808                         for (other = pscc->head; other != NULL; other = next_other) {
1809                                 node_entry *ne = get_irn_ne(other, env);
1810                                 next_other = ne->next;
1811
1812                                 if (is_Store(other)) {
1813                                         ir_alias_relation rel = get_alias_relation(
1814                                                 current_ir_graph,
1815                                                 get_Store_ptr(other),
1816                                                 get_irn_mode(get_Store_value(other)),
1817                                                 ptr, load_mode);
1818                                         /* if the might be an alias, we cannot pass this Store */
1819                                         if (rel != ir_no_alias)
1820                                                 break;
1821                                 }
1822                                 /* only pure Calls are allowed here, so ignore them */
1823                         }
1824                         if (other == NULL) {
1825                                 ldst_info_t *ninfo;
1826                                 phi_entry   *pe;
1827                                 dbg_info    *db;
1828
1829                                 /* for now, we cannot handle more than one input */
1830                                 if (phi_list->next != NULL)
1831                                         return;
1832
1833                                 /* yep, no aliasing Store found, Load can be moved */
1834                                 DB((dbg, LEVEL_1, "  Found a Load that could be moved: %+F\n", load));
1835
1836                                 db   = get_irn_dbg_info(load);
1837                                 for (pe = phi_list; pe != NULL; pe = pe->next) {
1838                                         int     pos   = pe->pos;
1839                                         ir_node *phi  = pe->phi;
1840                                         ir_node *blk  = get_nodes_block(phi);
1841                                         ir_node *pred = get_Block_cfgpred_block(blk, pos);
1842                                         ir_node *irn, *mem;
1843
1844                                         pe->load = irn = new_rd_Load(db, current_ir_graph, pred, get_Phi_pred(phi, pos), ptr, load_mode);
1845                                         ninfo = get_ldst_info(irn, phase_obst(&env->ph));
1846
1847                                         ninfo->projs[pn_Load_M] = mem = new_r_Proj(current_ir_graph, pred, irn, mode_M, pn_Load_M);
1848                                         set_Phi_pred(phi, pos, mem);
1849
1850                                         ninfo->projs[pn_Load_res] = new_r_Proj(current_ir_graph, pred, irn, load_mode, pn_Load_res);
1851
1852                                         DB((dbg, LEVEL_1, "  Created %+F in %+F\n", irn, pred));
1853                                 }
1854
1855                                 /* now kill the old Load */
1856                                 exchange(info->projs[pn_Load_M], get_Load_mem(load));
1857                                 exchange(info->projs[pn_Load_res], ninfo->projs[pn_Load_res]);
1858
1859                                 env->changes |= DF_CHANGED;
1860                         }
1861                 }
1862         }
1863 }  /* move_loads_out_of_loops */
1864
1865 /**
1866  * Process a loop SCC.
1867  *
1868  * @param pscc  the SCC
1869  * @param env   the loop environment
1870  */
1871 static void process_loop(scc *pscc, loop_env *env) {
1872         ir_node *irn, *next, *header = NULL;
1873         node_entry *b, *h = NULL;
1874         int j, only_phi, num_outside, process = 0;
1875         ir_node *out_rc;
1876
1877         /* find the header block for this scc */
1878         for (irn = pscc->head; irn; irn = next) {
1879                 node_entry *e = get_irn_ne(irn, env);
1880                 ir_node *block = get_nodes_block(irn);
1881
1882                 next = e->next;
1883                 b = get_irn_ne(block, env);
1884
1885                 if (header) {
1886                         if (h->POnum < b->POnum) {
1887                                 header = block;
1888                                 h      = b;
1889                         }
1890                 }
1891                 else {
1892                         header = block;
1893                         h      = b;
1894                 }
1895         }
1896
1897         /* check if this scc contains only Phi, Loads or Stores nodes */
1898         only_phi    = 1;
1899         num_outside = 0;
1900         out_rc      = NULL;
1901         for (irn = pscc->head; irn; irn = next) {
1902                 node_entry *e = get_irn_ne(irn, env);
1903
1904                 next = e->next;
1905                 switch (get_irn_opcode(irn)) {
1906                 case iro_Call:
1907                         if (is_Call_pure(irn)) {
1908                                 /* pure calls can be treated like loads */
1909                                 only_phi = 0;
1910                                 break;
1911                         }
1912                         /* non-pure calls must be handle like may-alias Stores */
1913                         goto fail;
1914                 case iro_CopyB:
1915                         /* cannot handle CopyB yet */
1916                         goto fail;
1917                 case iro_Load:
1918                         process = 1;
1919                         if (get_Load_volatility(irn) == volatility_is_volatile) {
1920                                 /* cannot handle loops with volatile Loads */
1921                                 goto fail;
1922                         }
1923                         only_phi = 0;
1924                         break;
1925                 case iro_Store:
1926                         if (get_Store_volatility(irn) == volatility_is_volatile) {
1927                                 /* cannot handle loops with volatile Stores */
1928                                 goto fail;
1929                         }
1930                         only_phi = 0;
1931                         break;
1932                 default:
1933                         only_phi = 0;
1934                         break;
1935                 case iro_Phi:
1936                         for (j = get_irn_arity(irn) - 1; j >= 0; --j) {
1937                                 ir_node *pred  = get_irn_n(irn, j);
1938                                 node_entry *pe = get_irn_ne(pred, env);
1939
1940                                 if (pe->pscc != e->pscc) {
1941                                         /* not in the same SCC, must be a region const */
1942                                         if (! is_rc(pred, header)) {
1943                                                 /* not a memory loop */
1944                                                 goto fail;
1945                                         }
1946                                         if (! out_rc) {
1947                                                 out_rc = pred;
1948                                                 ++num_outside;
1949                                         } else if (out_rc != pred) {
1950                                                 ++num_outside;
1951                                         }
1952                                 }
1953                         }
1954                         break;
1955                 }
1956         }
1957         if (! process)
1958                 goto fail;
1959
1960         /* found a memory loop */
1961         DB((dbg, LEVEL_2, "  Found a memory loop:\n  "));
1962         if (only_phi && num_outside == 1) {
1963                 /* a phi cycle with only one real predecessor can be collapsed */
1964                 DB((dbg, LEVEL_2, "  Found an USELESS Phi cycle:\n  "));
1965
1966                 for (irn = pscc->head; irn; irn = next) {
1967                         node_entry *e = get_irn_ne(irn, env);
1968                         next = e->next;
1969                         e->header = NULL;
1970                         exchange(irn, out_rc);
1971                 }
1972                 env->changes |= DF_CHANGED;
1973                 return;
1974         }
1975
1976         /* set the header for every node in this scc */
1977         for (irn = pscc->head; irn; irn = next) {
1978                 node_entry *e = get_irn_ne(irn, env);
1979                 e->header = header;
1980                 next = e->next;
1981                 DB((dbg, LEVEL_2, " %+F,", irn));
1982         }
1983         DB((dbg, LEVEL_2, "\n"));
1984
1985         move_loads_out_of_loops(pscc, env);
1986
1987 fail:
1988         ;
1989 }  /* process_loop */
1990
1991 /**
1992  * Process a SCC.
1993  *
1994  * @param pscc  the SCC
1995  * @param env   the loop environment
1996  */
1997 static void process_scc(scc *pscc, loop_env *env) {
1998         ir_node *head = pscc->head;
1999         node_entry *e = get_irn_ne(head, env);
2000
2001 #ifdef DEBUG_libfirm
2002         {
2003                 ir_node *irn, *next;
2004
2005                 DB((dbg, LEVEL_4, " SCC at %p:\n ", pscc));
2006                 for (irn = pscc->head; irn; irn = next) {
2007                         node_entry *e = get_irn_ne(irn, env);
2008
2009                         next = e->next;
2010
2011                         DB((dbg, LEVEL_4, " %+F,", irn));
2012                 }
2013                 DB((dbg, LEVEL_4, "\n"));
2014         }
2015 #endif
2016
2017         if (e->next != NULL) {
2018                 /* this SCC has more than one member */
2019                 process_loop(pscc, env);
2020         }
2021 }  /* process_scc */
2022
2023 /**
2024  * Do Tarjan's SCC algorithm and drive load/store optimization.
2025  *
2026  * @param irn  start at this node
2027  * @param env  the loop environment
2028  */
2029 static void dfs(ir_node *irn, loop_env *env)
2030 {
2031         int i, n;
2032         node_entry *node = get_irn_ne(irn, env);
2033
2034         mark_irn_visited(irn);
2035
2036         node->DFSnum = env->nextDFSnum++;
2037         node->low    = node->DFSnum;
2038         push(env, irn);
2039
2040         /* handle preds */
2041         if (is_Phi(irn) || is_Sync(irn)) {
2042                 n = get_irn_arity(irn);
2043                 for (i = 0; i < n; ++i) {
2044                         ir_node *pred = get_irn_n(irn, i);
2045                         node_entry *o = get_irn_ne(pred, env);
2046
2047                         if (irn_not_visited(pred)) {
2048                                 dfs(pred, env);
2049                                 node->low = MIN(node->low, o->low);
2050                         }
2051                         if (o->DFSnum < node->DFSnum && o->in_stack)
2052                                 node->low = MIN(o->DFSnum, node->low);
2053                 }
2054         } else if (is_fragile_op(irn)) {
2055                 ir_node *pred = get_fragile_op_mem(irn);
2056                 node_entry *o = get_irn_ne(pred, env);
2057
2058                 if (irn_not_visited(pred)) {
2059                         dfs(pred, env);
2060                         node->low = MIN(node->low, o->low);
2061                 }
2062                 if (o->DFSnum < node->DFSnum && o->in_stack)
2063                         node->low = MIN(o->DFSnum, node->low);
2064         } else if (is_Proj(irn)) {
2065                 ir_node *pred = get_Proj_pred(irn);
2066                 node_entry *o = get_irn_ne(pred, env);
2067
2068                 if (irn_not_visited(pred)) {
2069                         dfs(pred, env);
2070                         node->low = MIN(node->low, o->low);
2071                 }
2072                 if (o->DFSnum < node->DFSnum && o->in_stack)
2073                         node->low = MIN(o->DFSnum, node->low);
2074         }
2075         else {
2076                  /* IGNORE predecessors */
2077         }
2078
2079         if (node->low == node->DFSnum) {
2080                 scc *pscc = phase_alloc(&env->ph, sizeof(*pscc));
2081                 ir_node *x;
2082
2083                 pscc->head = NULL;
2084                 do {
2085                         node_entry *e;
2086
2087                         x = pop(env);
2088                         e = get_irn_ne(x, env);
2089                         e->pscc    = pscc;
2090                         e->next    = pscc->head;
2091                         pscc->head = x;
2092                 } while (x != irn);
2093
2094                 process_scc(pscc, env);
2095         }
2096 }  /* dfs */
2097
2098 /**
2099  * Do the DFS on the memory edges a graph.
2100  *
2101  * @param irg  the graph to process
2102  * @param env  the loop environment
2103  */
2104 static void do_dfs(ir_graph *irg, loop_env *env) {
2105         ir_graph *rem = current_ir_graph;
2106         ir_node  *endblk, *end;
2107         int      i;
2108
2109         current_ir_graph = irg;
2110         inc_irg_visited(irg);
2111
2112         /* visit all memory nodes */
2113         endblk = get_irg_end_block(irg);
2114         for (i = get_Block_n_cfgpreds(endblk) - 1; i >= 0; --i) {
2115                 ir_node *pred = get_Block_cfgpred(endblk, i);
2116
2117                 pred = skip_Proj(pred);
2118                 if (is_Return(pred))
2119                         dfs(get_Return_mem(pred), env);
2120                 else if (is_Raise(pred))
2121                         dfs(get_Raise_mem(pred), env);
2122                 else if (is_fragile_op(pred))
2123                         dfs(get_fragile_op_mem(pred), env);
2124                 else {
2125                         assert(0 && "Unknown EndBlock predecessor");
2126                 }
2127         }
2128
2129         /* visit the keep-alives */
2130         end = get_irg_end(irg);
2131         for (i = get_End_n_keepalives(end) - 1; i >= 0; --i) {
2132                 ir_node *ka = get_End_keepalive(end, i);
2133
2134                 if (is_Phi(ka) && irn_not_visited(ka))
2135                         dfs(ka, env);
2136         }
2137         current_ir_graph = rem;
2138 }  /* do_dfs */
2139
2140 /**
2141  * Initialize new phase data. We do this always explicit, so return NULL here
2142  */
2143 static void *init_loop_data(ir_phase *ph, const ir_node *irn, void *data) {
2144         (void)ph;
2145         (void)irn;
2146         (void)data;
2147         return NULL;
2148 }  /* init_loop_data */
2149
2150 /**
2151  * Optimize Loads/Stores in loops.
2152  *
2153  * @param irg  the graph
2154  */
2155 static int optimize_loops(ir_graph *irg) {
2156         loop_env env;
2157
2158         env.stack         = NEW_ARR_F(ir_node *, 128);
2159         env.tos           = 0;
2160         env.nextDFSnum    = 0;
2161         env.POnum         = 0;
2162         env.changes       = 0;
2163         phase_init(&env.ph, "ldstopt", irg, PHASE_DEFAULT_GROWTH, init_loop_data, NULL);
2164
2165         /* calculate the SCC's and drive loop optimization. */
2166         do_dfs(irg, &env);
2167
2168         DEL_ARR_F(env.stack);
2169         phase_free(&env.ph);
2170
2171         return env.changes;
2172 }  /* optimize_loops */
2173
2174 /*
2175  * do the load store optimization
2176  */
2177 void optimize_load_store(ir_graph *irg) {
2178         walk_env_t env;
2179
2180         FIRM_DBG_REGISTER(dbg, "firm.opt.ldstopt");
2181
2182         assert(get_irg_phase_state(irg) != phase_building);
2183         assert(get_irg_pinned(irg) != op_pin_state_floats &&
2184                 "LoadStore optimization needs pinned graph");
2185
2186         /* we need landing pads */
2187         remove_critical_cf_edges(irg);
2188
2189         edges_assure(irg);
2190
2191         /* for Phi optimization post-dominators are needed ... */
2192         assure_postdoms(irg);
2193
2194         if (get_opt_alias_analysis()) {
2195                 assure_irg_entity_usage_computed(irg);
2196                 assure_irp_globals_entity_usage_computed();
2197         }
2198
2199         obstack_init(&env.obst);
2200         env.changes = 0;
2201
2202         /* init the links, then collect Loads/Stores/Proj's in lists */
2203         master_visited = 0;
2204         irg_walk_graph(irg, firm_clear_link, collect_nodes, &env);
2205
2206         /* now we have collected enough information, optimize */
2207         irg_walk_graph(irg, NULL, do_load_store_optimize, &env);
2208
2209         env.changes |= optimize_loops(irg);
2210
2211         obstack_free(&env.obst, NULL);
2212
2213         /* Handle graph state */
2214         if (env.changes) {
2215                 set_irg_outs_inconsistent(irg);
2216                 set_irg_entity_usage_state(irg, ir_entity_usage_not_computed);
2217         }
2218
2219         if (env.changes & CF_CHANGED) {
2220                 /* is this really needed: Yes, control flow changed, block might
2221                 have Bad() predecessors. */
2222                 set_irg_doms_inconsistent(irg);
2223         }
2224 }  /* optimize_load_store */