15964a359d7f71fadb5461172c654f51fe78c706
[libfirm] / ir / opt / ldstopt.c
1 /*
2  * Copyright (C) 1995-2011 University of Karlsruhe.  All right reserved.
3  *
4  * This file is part of libFirm.
5  *
6  * This file may be distributed and/or modified under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation and appearing in the file LICENSE.GPL included in the
9  * packaging of this file.
10  *
11  * Licensees holding valid libFirm Professional Edition licenses may use
12  * this file in accordance with the libFirm Commercial License.
13  * Agreement provided with the Software.
14  *
15  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17  * PURPOSE.
18  */
19
20 /**
21  * @file
22  * @brief   Load/Store optimizations.
23  * @author  Michael Beck
24  * @version $Id$
25  */
26 #include "config.h"
27
28 #include <string.h>
29
30 #include "iroptimize.h"
31 #include "irnode_t.h"
32 #include "irgraph_t.h"
33 #include "irmode_t.h"
34 #include "iropt_t.h"
35 #include "ircons_t.h"
36 #include "irgmod.h"
37 #include "irgwalk.h"
38 #include "tv_t.h"
39 #include "dbginfo_t.h"
40 #include "iropt_dbg.h"
41 #include "irflag_t.h"
42 #include "array_t.h"
43 #include "irhooks.h"
44 #include "iredges.h"
45 #include "irpass.h"
46 #include "opt_polymorphy.h"
47 #include "irmemory.h"
48 #include "irphase_t.h"
49 #include "irgopt.h"
50 #include "set.h"
51 #include "debug.h"
52
53 /** The debug handle. */
54 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
55
56 #ifdef DO_CACHEOPT
57 #include "cacheopt/cachesim.h"
58 #endif
59
60 #undef IMAX
61 #define IMAX(a,b)   ((a) > (b) ? (a) : (b))
62
63 #define MAX_PROJ    IMAX(IMAX((long)pn_Load_max, (long)pn_Store_max), (long)pn_Call_max)
64
65 enum changes_t {
66         DF_CHANGED = 1,       /**< data flow changed */
67         CF_CHANGED = 2,       /**< control flow changed */
68 };
69
70 /**
71  * walker environment
72  */
73 typedef struct walk_env_t {
74         struct obstack obst;          /**< list of all stores */
75         unsigned changes;             /**< a bitmask of graph changes */
76 } walk_env_t;
77
78 /** A Load/Store info. */
79 typedef struct ldst_info_t {
80         ir_node  *projs[MAX_PROJ];    /**< list of Proj's of this node */
81         ir_node  *exc_block;          /**< the exception block if available */
82         int      exc_idx;             /**< predecessor index in the exception block */
83         unsigned visited;             /**< visited counter for breaking loops */
84 } ldst_info_t;
85
86 /**
87  * flags for control flow.
88  */
89 enum block_flags_t {
90         BLOCK_HAS_COND = 1,      /**< Block has conditional control flow */
91         BLOCK_HAS_EXC  = 2       /**< Block has exceptional control flow */
92 };
93
94 /**
95  * a Block info.
96  */
97 typedef struct block_info_t {
98         unsigned flags;               /**< flags for the block */
99 } block_info_t;
100
101 /** the master visited flag for loop detection. */
102 static unsigned master_visited = 0;
103
104 #define INC_MASTER()       ++master_visited
105 #define MARK_NODE(info)    (info)->visited = master_visited
106 #define NODE_VISITED(info) (info)->visited >= master_visited
107
108 /**
109  * get the Load/Store info of a node
110  */
111 static ldst_info_t *get_ldst_info(ir_node *node, struct obstack *obst)
112 {
113         ldst_info_t *info = (ldst_info_t*)get_irn_link(node);
114
115         if (! info) {
116                 info = OALLOCZ(obst, ldst_info_t);
117                 set_irn_link(node, info);
118         }
119         return info;
120 }  /* get_ldst_info */
121
122 /**
123  * get the Block info of a node
124  */
125 static block_info_t *get_block_info(ir_node *node, struct obstack *obst)
126 {
127         block_info_t *info = (block_info_t*)get_irn_link(node);
128
129         if (! info) {
130                 info = OALLOCZ(obst, block_info_t);
131                 set_irn_link(node, info);
132         }
133         return info;
134 }  /* get_block_info */
135
136 /**
137  * update the projection info for a Load/Store
138  */
139 static unsigned update_projs(ldst_info_t *info, ir_node *proj)
140 {
141         long nr = get_Proj_proj(proj);
142
143         assert(0 <= nr && nr <= MAX_PROJ && "Wrong proj from LoadStore");
144
145         if (info->projs[nr]) {
146                 /* there is already one, do CSE */
147                 exchange(proj, info->projs[nr]);
148                 return DF_CHANGED;
149         }
150         else {
151                 info->projs[nr] = proj;
152                 return 0;
153         }
154 }  /* update_projs */
155
156 /**
157  * update the exception block info for a Load/Store node.
158  *
159  * @param info   the load/store info struct
160  * @param block  the exception handler block for this load/store
161  * @param pos    the control flow input of the block
162  */
163 static unsigned update_exc(ldst_info_t *info, ir_node *block, int pos)
164 {
165         assert(info->exc_block == NULL && "more than one exception block found");
166
167         info->exc_block = block;
168         info->exc_idx   = pos;
169         return 0;
170 }  /* update_exc */
171
172 /** Return the number of uses of an address node */
173 #define get_irn_n_uses(adr)     get_irn_n_edges(adr)
174
175 /**
176  * walker, collects all Load/Store/Proj nodes
177  *
178  * walks from Start -> End
179  */
180 static void collect_nodes(ir_node *node, void *env)
181 {
182         walk_env_t  *wenv   = (walk_env_t *)env;
183         unsigned     opcode = get_irn_opcode(node);
184         ir_node     *pred, *blk, *pred_blk;
185         ldst_info_t *ldst_info;
186
187         if (opcode == iro_Proj) {
188                 pred   = get_Proj_pred(node);
189                 opcode = get_irn_opcode(pred);
190
191                 if (opcode == iro_Load || opcode == iro_Store || opcode == iro_Call) {
192                         ldst_info = get_ldst_info(pred, &wenv->obst);
193
194                         wenv->changes |= update_projs(ldst_info, node);
195
196                         /*
197                          * Place the Proj's to the same block as the
198                          * predecessor Load. This is always ok and prevents
199                          * "non-SSA" form after optimizations if the Proj
200                          * is in a wrong block.
201                          */
202                         blk      = get_nodes_block(node);
203                         pred_blk = get_nodes_block(pred);
204                         if (blk != pred_blk) {
205                                 wenv->changes |= DF_CHANGED;
206                                 set_nodes_block(node, pred_blk);
207                         }
208                 }
209         } else if (opcode == iro_Block) {
210                 int i;
211
212                 for (i = get_Block_n_cfgpreds(node) - 1; i >= 0; --i) {
213                         ir_node      *pred_block, *proj;
214                         block_info_t *bl_info;
215                         int          is_exc = 0;
216
217                         pred = proj = get_Block_cfgpred(node, i);
218
219                         if (is_Proj(proj)) {
220                                 pred   = get_Proj_pred(proj);
221                                 is_exc = get_Proj_proj(proj) == pn_Generic_X_except;
222                         }
223
224                         /* ignore Bad predecessors, they will be removed later */
225                         if (is_Bad(pred))
226                                 continue;
227
228                         pred_block = get_nodes_block(pred);
229                         bl_info    = get_block_info(pred_block, &wenv->obst);
230
231                         if (is_fragile_op(pred) && is_exc)
232                                 bl_info->flags |= BLOCK_HAS_EXC;
233                         else if (is_irn_forking(pred))
234                                 bl_info->flags |= BLOCK_HAS_COND;
235
236                         opcode = get_irn_opcode(pred);
237                         if (is_exc && (opcode == iro_Load || opcode == iro_Store || opcode == iro_Call)) {
238                                 ldst_info = get_ldst_info(pred, &wenv->obst);
239
240                                 wenv->changes |= update_exc(ldst_info, node, i);
241                         }
242                 }
243         }
244 }  /* collect_nodes */
245
246 /**
247  * Returns an entity if the address ptr points to a constant one.
248  *
249  * @param ptr  the address
250  *
251  * @return an entity or NULL
252  */
253 static ir_entity *find_constant_entity(ir_node *ptr)
254 {
255         for (;;) {
256                 if (is_SymConst(ptr) && get_SymConst_kind(ptr) == symconst_addr_ent) {
257                         return get_SymConst_entity(ptr);
258                 } else if (is_Sel(ptr)) {
259                         ir_entity *ent = get_Sel_entity(ptr);
260                         ir_type   *tp  = get_entity_owner(ent);
261
262                         /* Do not fiddle with polymorphism. */
263                         if (is_Class_type(get_entity_owner(ent)) &&
264                                 ((get_entity_n_overwrites(ent)    != 0) ||
265                                 (get_entity_n_overwrittenby(ent) != 0)   ) )
266                                 return NULL;
267
268                         if (is_Array_type(tp)) {
269                                 /* check bounds */
270                                 int i, n;
271
272                                 for (i = 0, n = get_Sel_n_indexs(ptr); i < n; ++i) {
273                                         ir_node   *bound;
274                                         ir_tarval *tlower, *tupper;
275                                         ir_node   *index = get_Sel_index(ptr, i);
276                                         ir_tarval *tv    = computed_value(index);
277
278                                         /* check if the index is constant */
279                                         if (tv == tarval_bad)
280                                                 return NULL;
281
282                                         bound  = get_array_lower_bound(tp, i);
283                                         tlower = computed_value(bound);
284                                         bound  = get_array_upper_bound(tp, i);
285                                         tupper = computed_value(bound);
286
287                                         if (tlower == tarval_bad || tupper == tarval_bad)
288                                                 return NULL;
289
290                                         if (tarval_cmp(tv, tlower) & pn_Cmp_Lt)
291                                                 return NULL;
292                                         if (tarval_cmp(tupper, tv) & pn_Cmp_Lt)
293                                                 return NULL;
294
295                                         /* ok, bounds check finished */
296                                 }
297                         }
298
299                         if (get_entity_linkage(ent) & IR_LINKAGE_CONSTANT)
300                                 return ent;
301
302                         /* try next */
303                         ptr = get_Sel_ptr(ptr);
304                 } else if (is_Add(ptr)) {
305                         ir_node *l = get_Add_left(ptr);
306                         ir_node *r = get_Add_right(ptr);
307
308                         if (get_irn_mode(l) == get_irn_mode(ptr) && is_Const(r))
309                                 ptr = l;
310                         else if (get_irn_mode(r) == get_irn_mode(ptr) && is_Const(l))
311                                 ptr = r;
312                         else
313                                 return NULL;
314
315                         /* for now, we support only one addition, reassoc should fold all others */
316                         if (! is_SymConst(ptr) && !is_Sel(ptr))
317                                 return NULL;
318                 } else if (is_Sub(ptr)) {
319                         ir_node *l = get_Sub_left(ptr);
320                         ir_node *r = get_Sub_right(ptr);
321
322                         if (get_irn_mode(l) == get_irn_mode(ptr) && is_Const(r))
323                                 ptr = l;
324                         else
325                                 return NULL;
326                         /* for now, we support only one substraction, reassoc should fold all others */
327                         if (! is_SymConst(ptr) && !is_Sel(ptr))
328                                 return NULL;
329                 } else
330                         return NULL;
331         }
332 }  /* find_constant_entity */
333
334 /**
335  * Return the Selection index of a Sel node from dimension n
336  */
337 static long get_Sel_array_index_long(ir_node *n, int dim)
338 {
339         ir_node *index = get_Sel_index(n, dim);
340         assert(is_Const(index));
341         return get_tarval_long(get_Const_tarval(index));
342 }  /* get_Sel_array_index_long */
343
344 /**
345  * Returns the accessed component graph path for an
346  * node computing an address.
347  *
348  * @param ptr    the node computing the address
349  * @param depth  current depth in steps upward from the root
350  *               of the address
351  */
352 static compound_graph_path *rec_get_accessed_path(ir_node *ptr, int depth)
353 {
354         compound_graph_path *res = NULL;
355         ir_entity           *root, *field, *ent;
356         int                 path_len, pos, idx;
357         ir_tarval           *tv;
358         ir_type             *tp;
359
360         if (is_SymConst(ptr)) {
361                 /* a SymConst. If the depth is 0, this is an access to a global
362                  * entity and we don't need a component path, else we know
363                  * at least its length.
364                  */
365                 assert(get_SymConst_kind(ptr) == symconst_addr_ent);
366                 root = get_SymConst_entity(ptr);
367                 res = (depth == 0) ? NULL : new_compound_graph_path(get_entity_type(root), depth);
368         } else if (is_Sel(ptr)) {
369                 /* it's a Sel, go up until we find the root */
370                 res = rec_get_accessed_path(get_Sel_ptr(ptr), depth+1);
371                 if (res == NULL)
372                         return NULL;
373
374                 /* fill up the step in the path at the current position */
375                 field    = get_Sel_entity(ptr);
376                 path_len = get_compound_graph_path_length(res);
377                 pos      = path_len - depth - 1;
378                 set_compound_graph_path_node(res, pos, field);
379
380                 if (is_Array_type(get_entity_owner(field))) {
381                         assert(get_Sel_n_indexs(ptr) == 1 && "multi dim arrays not implemented");
382                         set_compound_graph_path_array_index(res, pos, get_Sel_array_index_long(ptr, 0));
383                 }
384         } else if (is_Add(ptr)) {
385                 ir_mode   *mode;
386                 ir_tarval *tmp;
387
388                 {
389                         ir_node   *l    = get_Add_left(ptr);
390                         ir_node   *r    = get_Add_right(ptr);
391                         if (is_Const(r) && get_irn_mode(l) == get_irn_mode(ptr)) {
392                                 ptr = l;
393                                 tv  = get_Const_tarval(r);
394                         } else {
395                                 ptr = r;
396                                 tv  = get_Const_tarval(l);
397                         }
398                 }
399 ptr_arith:
400                 mode = get_tarval_mode(tv);
401                 tmp  = tv;
402
403                 /* ptr must be a Sel or a SymConst, this was checked in find_constant_entity() */
404                 if (is_Sel(ptr)) {
405                         field = get_Sel_entity(ptr);
406                 } else {
407                         field = get_SymConst_entity(ptr);
408                 }
409                 idx = 0;
410                 for (ent = field;;) {
411                         unsigned   size;
412                         ir_tarval *sz, *tv_index, *tlower, *tupper;
413                         ir_node   *bound;
414
415                         tp = get_entity_type(ent);
416                         if (! is_Array_type(tp))
417                                 break;
418                         ent = get_array_element_entity(tp);
419                         size = get_type_size_bytes(get_entity_type(ent));
420                         sz   = new_tarval_from_long(size, mode);
421
422                         tv_index = tarval_div(tmp, sz);
423                         tmp      = tarval_mod(tmp, sz);
424
425                         if (tv_index == tarval_bad || tmp == tarval_bad)
426                                 return NULL;
427
428                         assert(get_array_n_dimensions(tp) == 1 && "multiarrays not implemented");
429                         bound  = get_array_lower_bound(tp, 0);
430                         tlower = computed_value(bound);
431                         bound  = get_array_upper_bound(tp, 0);
432                         tupper = computed_value(bound);
433
434                         if (tlower == tarval_bad || tupper == tarval_bad)
435                                 return NULL;
436
437                         if (tarval_cmp(tv_index, tlower) & pn_Cmp_Lt)
438                                 return NULL;
439                         if (tarval_cmp(tupper, tv_index) & pn_Cmp_Lt)
440                                 return NULL;
441
442                         /* ok, bounds check finished */
443                         ++idx;
444                 }
445                 if (! tarval_is_null(tmp)) {
446                         /* access to some struct/union member */
447                         return NULL;
448                 }
449
450                 /* should be at least ONE array */
451                 if (idx == 0)
452                         return NULL;
453
454                 res = rec_get_accessed_path(ptr, depth + idx);
455                 if (res == NULL)
456                         return NULL;
457
458                 path_len = get_compound_graph_path_length(res);
459                 pos      = path_len - depth - idx;
460
461                 for (ent = field;;) {
462                         unsigned   size;
463                         ir_tarval *sz, *tv_index;
464                         long       index;
465
466                         tp = get_entity_type(ent);
467                         if (! is_Array_type(tp))
468                                 break;
469                         ent = get_array_element_entity(tp);
470                         set_compound_graph_path_node(res, pos, ent);
471
472                         size = get_type_size_bytes(get_entity_type(ent));
473                         sz   = new_tarval_from_long(size, mode);
474
475                         tv_index = tarval_div(tv, sz);
476                         tv       = tarval_mod(tv, sz);
477
478                         /* worked above, should work again */
479                         assert(tv_index != tarval_bad && tv != tarval_bad);
480
481                         /* bounds already checked above */
482                         index = get_tarval_long(tv_index);
483                         set_compound_graph_path_array_index(res, pos, index);
484                         ++pos;
485                 }
486         } else if (is_Sub(ptr)) {
487                 ir_node *l = get_Sub_left(ptr);
488                 ir_node *r = get_Sub_right(ptr);
489
490                 ptr = l;
491                 tv  = get_Const_tarval(r);
492                 tv  = tarval_neg(tv);
493                 goto ptr_arith;
494         }
495         return res;
496 }  /* rec_get_accessed_path */
497
498 /**
499  * Returns an access path or NULL.  The access path is only
500  * valid, if the graph is in phase_high and _no_ address computation is used.
501  */
502 static compound_graph_path *get_accessed_path(ir_node *ptr)
503 {
504         compound_graph_path *gr = rec_get_accessed_path(ptr, 0);
505         return gr;
506 }  /* get_accessed_path */
507
508 typedef struct path_entry {
509         ir_entity         *ent;
510         struct path_entry *next;
511         long              index;
512 } path_entry;
513
514 static ir_node *rec_find_compound_ent_value(ir_node *ptr, path_entry *next)
515 {
516         path_entry       entry, *p;
517         ir_entity        *ent, *field;
518         ir_initializer_t *initializer;
519         ir_tarval        *tv;
520         ir_type          *tp;
521         unsigned         n;
522
523         entry.next = next;
524         if (is_SymConst(ptr)) {
525                 /* found the root */
526                 ent         = get_SymConst_entity(ptr);
527                 initializer = get_entity_initializer(ent);
528                 for (p = next; p != NULL;) {
529                         if (initializer->kind != IR_INITIALIZER_COMPOUND)
530                                 return NULL;
531                         n  = get_initializer_compound_n_entries(initializer);
532                         tp = get_entity_type(ent);
533
534                         if (is_Array_type(tp)) {
535                                 ent = get_array_element_entity(tp);
536                                 if (ent != p->ent) {
537                                         /* a missing [0] */
538                                         if (0 >= n)
539                                                 return NULL;
540                                         initializer = get_initializer_compound_value(initializer, 0);
541                                         continue;
542                                 }
543                         }
544                         if (p->index >= (int) n)
545                                 return NULL;
546                         initializer = get_initializer_compound_value(initializer, p->index);
547
548                         ent = p->ent;
549                         p   = p->next;
550                 }
551                 tp = get_entity_type(ent);
552                 while (is_Array_type(tp)) {
553                         ent = get_array_element_entity(tp);
554                         tp = get_entity_type(ent);
555                         /* a missing [0] */
556                         n  = get_initializer_compound_n_entries(initializer);
557                         if (0 >= n)
558                                 return NULL;
559                         initializer = get_initializer_compound_value(initializer, 0);
560                 }
561
562                 switch (initializer->kind) {
563                 case IR_INITIALIZER_CONST:
564                         return get_initializer_const_value(initializer);
565                 case IR_INITIALIZER_TARVAL:
566                 case IR_INITIALIZER_NULL:
567                 default:
568                         return NULL;
569                 }
570         } else if (is_Sel(ptr)) {
571                 entry.ent = field = get_Sel_entity(ptr);
572                 tp = get_entity_owner(field);
573                 if (is_Array_type(tp)) {
574                         assert(get_Sel_n_indexs(ptr) == 1 && "multi dim arrays not implemented");
575                         entry.index = get_Sel_array_index_long(ptr, 0) - get_array_lower_bound_int(tp, 0);
576                 } else {
577                         int i, n_members = get_compound_n_members(tp);
578                         for (i = 0; i < n_members; ++i) {
579                                 if (get_compound_member(tp, i) == field)
580                                         break;
581                         }
582                         if (i >= n_members) {
583                                 /* not found: should NOT happen */
584                                 return NULL;
585                         }
586                         entry.index = i;
587                 }
588                 return rec_find_compound_ent_value(get_Sel_ptr(ptr), &entry);
589         }  else if (is_Add(ptr)) {
590                 ir_mode  *mode;
591                 unsigned pos;
592
593                 {
594                         ir_node *l = get_Add_left(ptr);
595                         ir_node *r = get_Add_right(ptr);
596                         if (is_Const(r)) {
597                                 ptr = l;
598                                 tv  = get_Const_tarval(r);
599                         } else {
600                                 ptr = r;
601                                 tv  = get_Const_tarval(l);
602                         }
603                 }
604 ptr_arith:
605                 mode = get_tarval_mode(tv);
606
607                 /* ptr must be a Sel or a SymConst, this was checked in find_constant_entity() */
608                 if (is_Sel(ptr)) {
609                         field = get_Sel_entity(ptr);
610                 } else {
611                         field = get_SymConst_entity(ptr);
612                 }
613
614                 /* count needed entries */
615                 pos = 0;
616                 for (ent = field;;) {
617                         tp = get_entity_type(ent);
618                         if (! is_Array_type(tp))
619                                 break;
620                         ent = get_array_element_entity(tp);
621                         ++pos;
622                 }
623                 /* should be at least ONE entry */
624                 if (pos == 0)
625                         return NULL;
626
627                 /* allocate the right number of entries */
628                 NEW_ARR_A(path_entry, p, pos);
629
630                 /* fill them up */
631                 pos = 0;
632                 for (ent = field;;) {
633                         unsigned   size;
634                         ir_tarval *sz, *tv_index, *tlower, *tupper;
635                         long       index;
636                         ir_node   *bound;
637
638                         tp = get_entity_type(ent);
639                         if (! is_Array_type(tp))
640                                 break;
641                         ent = get_array_element_entity(tp);
642                         p[pos].ent  = ent;
643                         p[pos].next = &p[pos + 1];
644
645                         size = get_type_size_bytes(get_entity_type(ent));
646                         sz   = new_tarval_from_long(size, mode);
647
648                         tv_index = tarval_div(tv, sz);
649                         tv       = tarval_mod(tv, sz);
650
651                         if (tv_index == tarval_bad || tv == tarval_bad)
652                                 return NULL;
653
654                         assert(get_array_n_dimensions(tp) == 1 && "multiarrays not implemented");
655                         bound  = get_array_lower_bound(tp, 0);
656                         tlower = computed_value(bound);
657                         bound  = get_array_upper_bound(tp, 0);
658                         tupper = computed_value(bound);
659
660                         if (tlower == tarval_bad || tupper == tarval_bad)
661                                 return NULL;
662
663                         if (tarval_cmp(tv_index, tlower) & pn_Cmp_Lt)
664                                 return NULL;
665                         if (tarval_cmp(tupper, tv_index) & pn_Cmp_Lt)
666                                 return NULL;
667
668                         /* ok, bounds check finished */
669                         index = get_tarval_long(tv_index);
670                         p[pos].index = index;
671                         ++pos;
672                 }
673                 if (! tarval_is_null(tv)) {
674                         /* hmm, wrong access */
675                         return NULL;
676                 }
677                 p[pos - 1].next = next;
678                 return rec_find_compound_ent_value(ptr, p);
679         } else if (is_Sub(ptr)) {
680                 ir_node *l = get_Sub_left(ptr);
681                 ir_node *r = get_Sub_right(ptr);
682
683                 ptr = l;
684                 tv  = get_Const_tarval(r);
685                 tv  = tarval_neg(tv);
686                 goto ptr_arith;
687         }
688         return NULL;
689 }
690
691 static ir_node *find_compound_ent_value(ir_node *ptr)
692 {
693         return rec_find_compound_ent_value(ptr, NULL);
694 }
695
696 /* forward */
697 static void reduce_adr_usage(ir_node *ptr);
698
699 /**
700  * Update a Load that may have lost its users.
701  */
702 static void handle_load_update(ir_node *load)
703 {
704         ldst_info_t *info = (ldst_info_t*)get_irn_link(load);
705
706         /* do NOT touch volatile loads for now */
707         if (get_Load_volatility(load) == volatility_is_volatile)
708                 return;
709
710         if (! info->projs[pn_Load_res] && ! info->projs[pn_Load_X_except]) {
711                 ir_node *ptr = get_Load_ptr(load);
712                 ir_node *mem = get_Load_mem(load);
713
714                 /* a Load whose value is neither used nor exception checked, remove it */
715                 exchange(info->projs[pn_Load_M], mem);
716                 if (info->projs[pn_Load_X_regular])
717                         exchange(info->projs[pn_Load_X_regular], new_r_Jmp(get_nodes_block(load)));
718                 kill_node(load);
719                 reduce_adr_usage(ptr);
720         }
721 }  /* handle_load_update */
722
723 /**
724  * A use of an address node has vanished. Check if this was a Proj
725  * node and update the counters.
726  */
727 static void reduce_adr_usage(ir_node *ptr)
728 {
729         ir_node *pred;
730         if (!is_Proj(ptr))
731                 return;
732         if (get_irn_n_edges(ptr) > 0)
733                 return;
734
735         /* this Proj is dead now */
736         pred = get_Proj_pred(ptr);
737         if (is_Load(pred)) {
738                 ldst_info_t *info = (ldst_info_t*)get_irn_link(pred);
739                 info->projs[get_Proj_proj(ptr)] = NULL;
740
741                 /* this node lost its result proj, handle that */
742                 handle_load_update(pred);
743         }
744 }  /* reduce_adr_usage */
745
746 /**
747  * Check, if an already existing value of mode old_mode can be converted
748  * into the needed one new_mode without loss.
749  */
750 static int can_use_stored_value(ir_mode *old_mode, ir_mode *new_mode)
751 {
752         if (old_mode == new_mode)
753                 return 1;
754
755         /* if both modes are two-complement ones, we can always convert the
756            Stored value into the needed one. */
757         if (get_mode_size_bits(old_mode) >= get_mode_size_bits(new_mode) &&
758                   get_mode_arithmetic(old_mode) == irma_twos_complement &&
759                   get_mode_arithmetic(new_mode) == irma_twos_complement)
760                 return 1;
761         return 0;
762 }  /* can_use_stored_value */
763
764 /**
765  * Check whether a Call is at least pure, ie. does only read memory.
766  */
767 static unsigned is_Call_pure(ir_node *call)
768 {
769         ir_type *call_tp = get_Call_type(call);
770         unsigned prop = get_method_additional_properties(call_tp);
771
772         /* check first the call type */
773         if ((prop & (mtp_property_const|mtp_property_pure)) == 0) {
774                 /* try the called entity */
775                 ir_node *ptr = get_Call_ptr(call);
776
777                 if (is_Global(ptr)) {
778                         ir_entity *ent = get_Global_entity(ptr);
779
780                         prop = get_entity_additional_properties(ent);
781                 }
782         }
783         return (prop & (mtp_property_const|mtp_property_pure)) != 0;
784 }  /* is_Call_pure */
785
786 static ir_node *get_base_and_offset(ir_node *ptr, long *pOffset)
787 {
788         ir_mode *mode  = get_irn_mode(ptr);
789         long    offset = 0;
790
791         /* TODO: long might not be enough, we should probably use some tarval thingy... */
792         for (;;) {
793                 if (is_Add(ptr)) {
794                         ir_node *l = get_Add_left(ptr);
795                         ir_node *r = get_Add_right(ptr);
796
797                         if (get_irn_mode(l) != mode || !is_Const(r))
798                                 break;
799
800                         offset += get_tarval_long(get_Const_tarval(r));
801                         ptr     = l;
802                 } else if (is_Sub(ptr)) {
803                         ir_node *l = get_Sub_left(ptr);
804                         ir_node *r = get_Sub_right(ptr);
805
806                         if (get_irn_mode(l) != mode || !is_Const(r))
807                                 break;
808
809                         offset -= get_tarval_long(get_Const_tarval(r));
810                         ptr     = l;
811                 } else if (is_Sel(ptr)) {
812                         ir_entity *ent = get_Sel_entity(ptr);
813                         ir_type   *tp  = get_entity_owner(ent);
814
815                         if (is_Array_type(tp)) {
816                                 int     size;
817                                 ir_node *index;
818
819                                 /* only one dimensional arrays yet */
820                                 if (get_Sel_n_indexs(ptr) != 1)
821                                         break;
822                                 index = get_Sel_index(ptr, 0);
823                                 if (! is_Const(index))
824                                         break;
825
826                                 tp = get_entity_type(ent);
827                                 if (get_type_state(tp) != layout_fixed)
828                                         break;
829
830                                 size    = get_type_size_bytes(tp);
831                                 offset += size * get_tarval_long(get_Const_tarval(index));
832                         } else {
833                                 if (get_type_state(tp) != layout_fixed)
834                                         break;
835                                 offset += get_entity_offset(ent);
836                         }
837                         ptr = get_Sel_ptr(ptr);
838                 } else
839                         break;
840         }
841
842         *pOffset = offset;
843         return ptr;
844 }
845
846 static int try_load_after_store(ir_node *load,
847                 ir_node *load_base_ptr, long load_offset, ir_node *store)
848 {
849         ldst_info_t *info;
850         ir_node *store_ptr      = get_Store_ptr(store);
851         long     store_offset;
852         ir_node *store_base_ptr = get_base_and_offset(store_ptr, &store_offset);
853         ir_node *store_value;
854         ir_mode *store_mode;
855         ir_node *load_ptr;
856         ir_mode *load_mode;
857         long     load_mode_len;
858         long     store_mode_len;
859         long     delta;
860         int      res;
861
862         if (load_base_ptr != store_base_ptr)
863                 return 0;
864
865         load_mode      = get_Load_mode(load);
866         load_mode_len  = get_mode_size_bytes(load_mode);
867         store_mode     = get_irn_mode(get_Store_value(store));
868         store_mode_len = get_mode_size_bytes(store_mode);
869         delta          = load_offset - store_offset;
870         store_value    = get_Store_value(store);
871
872         if (delta != 0 || store_mode != load_mode) {
873                 if (delta < 0 || delta + load_mode_len > store_mode_len)
874                         return 0;
875
876                 if (get_mode_arithmetic(store_mode) != irma_twos_complement ||
877                         get_mode_arithmetic(load_mode)  != irma_twos_complement)
878                         return 0;
879
880
881                 /* produce a shift to adjust offset delta */
882                 if (delta > 0) {
883                         ir_node *cnst;
884                         ir_graph *irg = get_irn_irg(load);
885
886                         /* FIXME: only true for little endian */
887                         cnst        = new_r_Const_long(irg, mode_Iu, delta * 8);
888                         store_value = new_r_Shr(get_nodes_block(load),
889                                                                         store_value, cnst, store_mode);
890                 }
891
892                 /* add an convert if needed */
893                 if (store_mode != load_mode) {
894                         store_value = new_r_Conv(get_nodes_block(load), store_value, load_mode);
895                 }
896         }
897
898         DBG_OPT_RAW(load, store_value);
899
900         info = (ldst_info_t*)get_irn_link(load);
901         if (info->projs[pn_Load_M])
902                 exchange(info->projs[pn_Load_M], get_Load_mem(load));
903
904         res = 0;
905         /* no exception */
906         if (info->projs[pn_Load_X_except]) {
907                 ir_graph *irg = get_irn_irg(load);
908                 exchange( info->projs[pn_Load_X_except], new_r_Bad(irg));
909                 res |= CF_CHANGED;
910         }
911         if (info->projs[pn_Load_X_regular]) {
912                 exchange( info->projs[pn_Load_X_regular], new_r_Jmp(get_nodes_block(load)));
913                 res |= CF_CHANGED;
914         }
915
916         if (info->projs[pn_Load_res])
917                 exchange(info->projs[pn_Load_res], store_value);
918
919         load_ptr = get_Load_ptr(load);
920         kill_node(load);
921         reduce_adr_usage(load_ptr);
922         return res | DF_CHANGED;
923 }
924
925 /**
926  * Follow the memory chain as long as there are only Loads,
927  * alias free Stores, and constant Calls and try to replace the
928  * current Load by a previous ones.
929  * Note that in unreachable loops it might happen that we reach
930  * load again, as well as we can fall into a cycle.
931  * We break such cycles using a special visited flag.
932  *
933  * INC_MASTER() must be called before dive into
934  */
935 static unsigned follow_Mem_chain(ir_node *load, ir_node *curr)
936 {
937         unsigned    res = 0;
938         ldst_info_t *info = (ldst_info_t*)get_irn_link(load);
939         ir_node     *pred;
940         ir_node     *ptr       = get_Load_ptr(load);
941         ir_node     *mem       = get_Load_mem(load);
942         ir_mode     *load_mode = get_Load_mode(load);
943
944         for (pred = curr; load != pred; ) {
945                 ldst_info_t *pred_info = (ldst_info_t*)get_irn_link(pred);
946
947                 /*
948                  * a Load immediately after a Store -- a read after write.
949                  * We may remove the Load, if both Load & Store does not have an
950                  * exception handler OR they are in the same Block. In the latter
951                  * case the Load cannot throw an exception when the previous Store was
952                  * quiet.
953                  *
954                  * Why we need to check for Store Exception? If the Store cannot
955                  * be executed (ROM) the exception handler might simply jump into
956                  * the load Block :-(
957                  * We could make it a little bit better if we would know that the
958                  * exception handler of the Store jumps directly to the end...
959                  */
960                 if (is_Store(pred) && ((pred_info->projs[pn_Store_X_except] == NULL
961                                 && info->projs[pn_Load_X_except] == NULL)
962                                 || get_nodes_block(load) == get_nodes_block(pred)))
963                 {
964                         long    load_offset;
965                         ir_node *base_ptr = get_base_and_offset(ptr, &load_offset);
966                         int     changes   = try_load_after_store(load, base_ptr, load_offset, pred);
967
968                         if (changes != 0)
969                                 return res | changes;
970                 } else if (is_Load(pred) && get_Load_ptr(pred) == ptr &&
971                            can_use_stored_value(get_Load_mode(pred), load_mode)) {
972                         /*
973                          * a Load after a Load -- a read after read.
974                          * We may remove the second Load, if it does not have an exception
975                          * handler OR they are in the same Block. In the later case
976                          * the Load cannot throw an exception when the previous Load was
977                          * quiet.
978                          *
979                          * Here, there is no need to check if the previous Load has an
980                          * exception hander because they would have exact the same
981                          * exception...
982                          */
983                         if (info->projs[pn_Load_X_except] == NULL
984                                         || get_nodes_block(load) == get_nodes_block(pred)) {
985                                 ir_node *value;
986
987                                 DBG_OPT_RAR(load, pred);
988
989                                 /* the result is used */
990                                 if (info->projs[pn_Load_res]) {
991                                         if (pred_info->projs[pn_Load_res] == NULL) {
992                                                 /* create a new Proj again */
993                                                 pred_info->projs[pn_Load_res] = new_r_Proj(pred, get_Load_mode(pred), pn_Load_res);
994                                         }
995                                         value = pred_info->projs[pn_Load_res];
996
997                                         /* add an convert if needed */
998                                         if (get_Load_mode(pred) != load_mode) {
999                                                 value = new_r_Conv(get_nodes_block(load), value, load_mode);
1000                                         }
1001
1002                                         exchange(info->projs[pn_Load_res], value);
1003                                 }
1004
1005                                 if (info->projs[pn_Load_M])
1006                                         exchange(info->projs[pn_Load_M], mem);
1007
1008                                 /* no exception */
1009                                 if (info->projs[pn_Load_X_except]) {
1010                                         ir_graph *irg = get_irn_irg(load);
1011                                         exchange(info->projs[pn_Load_X_except], new_r_Bad(irg));
1012                                         res |= CF_CHANGED;
1013                                 }
1014                                 if (info->projs[pn_Load_X_regular]) {
1015                                         exchange( info->projs[pn_Load_X_regular], new_r_Jmp(get_nodes_block(load)));
1016                                         res |= CF_CHANGED;
1017                                 }
1018
1019                                 kill_node(load);
1020                                 reduce_adr_usage(ptr);
1021                                 return res |= DF_CHANGED;
1022                         }
1023                 }
1024
1025                 if (is_Store(pred)) {
1026                         /* check if we can pass through this store */
1027                         ir_alias_relation rel = get_alias_relation(
1028                                 get_Store_ptr(pred),
1029                                 get_irn_mode(get_Store_value(pred)),
1030                                 ptr, load_mode);
1031                         /* if the might be an alias, we cannot pass this Store */
1032                         if (rel != ir_no_alias)
1033                                 break;
1034                         pred = skip_Proj(get_Store_mem(pred));
1035                 } else if (is_Load(pred)) {
1036                         pred = skip_Proj(get_Load_mem(pred));
1037                 } else if (is_Call(pred)) {
1038                         if (is_Call_pure(pred)) {
1039                                 /* The called graph is at least pure, so there are no Store's
1040                                    in it. We can handle it like a Load and skip it. */
1041                                 pred = skip_Proj(get_Call_mem(pred));
1042                         } else {
1043                                 /* there might be Store's in the graph, stop here */
1044                                 break;
1045                         }
1046                 } else {
1047                         /* follow only Load chains */
1048                         break;
1049                 }
1050
1051                 /* check for cycles */
1052                 if (NODE_VISITED(pred_info))
1053                         break;
1054                 MARK_NODE(pred_info);
1055         }
1056
1057         if (is_Sync(pred)) {
1058                 int i;
1059
1060                 /* handle all Sync predecessors */
1061                 for (i = get_Sync_n_preds(pred) - 1; i >= 0; --i) {
1062                         res |= follow_Mem_chain(load, skip_Proj(get_Sync_pred(pred, i)));
1063                         if (res)
1064                                 return res;
1065                 }
1066         }
1067
1068         return res;
1069 }  /* follow_Mem_chain */
1070
1071 /*
1072  * Check if we can replace the load by a given const from
1073  * the const code irg.
1074  */
1075 ir_node *can_replace_load_by_const(const ir_node *load, ir_node *c)
1076 {
1077         ir_mode  *c_mode = get_irn_mode(c);
1078         ir_mode  *l_mode = get_Load_mode(load);
1079         ir_node  *block  = get_nodes_block(load);
1080         dbg_info *dbgi   = get_irn_dbg_info(load);
1081         ir_node  *res    = copy_const_value(dbgi, c, block);
1082
1083         if (c_mode != l_mode) {
1084                 /* check, if the mode matches OR can be easily converted info */
1085                 if (is_reinterpret_cast(c_mode, l_mode)) {
1086                         /* copy the value from the const code irg and cast it */
1087                         res = new_rd_Conv(dbgi, block, res, l_mode);
1088                 }
1089                 return NULL;
1090         }
1091         return res;
1092 }
1093
1094 /**
1095  * optimize a Load
1096  *
1097  * @param load  the Load node
1098  */
1099 static unsigned optimize_load(ir_node *load)
1100 {
1101         ldst_info_t *info = (ldst_info_t*)get_irn_link(load);
1102         ir_node     *mem, *ptr, *value;
1103         ir_entity   *ent;
1104         long        dummy;
1105         unsigned    res = 0;
1106
1107         /* do NOT touch volatile loads for now */
1108         if (get_Load_volatility(load) == volatility_is_volatile)
1109                 return 0;
1110
1111         /* the address of the load to be optimized */
1112         ptr = get_Load_ptr(load);
1113
1114         /* The mem of the Load. Must still be returned after optimization. */
1115         mem = get_Load_mem(load);
1116
1117         if (info->projs[pn_Load_res] == NULL
1118                         && info->projs[pn_Load_X_except] == NULL) {
1119                 /* the value is never used and we don't care about exceptions, remove */
1120                 exchange(info->projs[pn_Load_M], mem);
1121
1122                 if (info->projs[pn_Load_X_regular]) {
1123                         /* should not happen, but if it does, remove it */
1124                         exchange(info->projs[pn_Load_X_regular], new_r_Jmp(get_nodes_block(load)));
1125                         res |= CF_CHANGED;
1126                 }
1127                 kill_node(load);
1128                 reduce_adr_usage(ptr);
1129                 return res | DF_CHANGED;
1130         }
1131
1132         /* Load from a constant polymorphic field, where we can resolve
1133            polymorphism. */
1134         value = transform_polymorph_Load(load);
1135         if (value == load) {
1136                 value = NULL;
1137                 /* check if we can determine the entity that will be loaded */
1138                 ent = find_constant_entity(ptr);
1139                 if (ent != NULL
1140                                 && get_entity_visibility(ent) != ir_visibility_external) {
1141                         /* a static allocation that is not external: there should be NO
1142                          * exception when loading even if we cannot replace the load itself.
1143                          */
1144
1145                         /* no exception, clear the info field as it might be checked later again */
1146                         if (info->projs[pn_Load_X_except]) {
1147                                 ir_graph *irg = get_irn_irg(load);
1148                                 exchange(info->projs[pn_Load_X_except], new_r_Bad(irg));
1149                                 info->projs[pn_Load_X_except] = NULL;
1150                                 res |= CF_CHANGED;
1151                         }
1152                         if (info->projs[pn_Load_X_regular]) {
1153                                 exchange(info->projs[pn_Load_X_regular], new_r_Jmp(get_nodes_block(load)));
1154                                 info->projs[pn_Load_X_regular] = NULL;
1155                                 res |= CF_CHANGED;
1156                         }
1157
1158                         if (get_entity_linkage(ent) & IR_LINKAGE_CONSTANT) {
1159                                 if (ent->initializer != NULL) {
1160                                         /* new style initializer */
1161                                         value = find_compound_ent_value(ptr);
1162                                 } else if (entity_has_compound_ent_values(ent)) {
1163                                         /* old style initializer */
1164                                         compound_graph_path *path = get_accessed_path(ptr);
1165
1166                                         if (path != NULL) {
1167                                                 assert(is_proper_compound_graph_path(path, get_compound_graph_path_length(path)-1));
1168
1169                                                 value = get_compound_ent_value_by_path(ent, path);
1170                                                 DB((dbg, LEVEL_1, "  Constant access at %F%F resulted in %+F\n", ent, path, value));
1171                                                 free_compound_graph_path(path);
1172                                         }
1173                                 }
1174                                 if (value != NULL)
1175                                         value = can_replace_load_by_const(load, value);
1176                         }
1177                 }
1178         }
1179         if (value != NULL) {
1180                 /* we completely replace the load by this value */
1181                 if (info->projs[pn_Load_X_except]) {
1182                         ir_graph *irg = get_irn_irg(load);
1183                         exchange(info->projs[pn_Load_X_except], new_r_Bad(irg));
1184                         info->projs[pn_Load_X_except] = NULL;
1185                         res |= CF_CHANGED;
1186                 }
1187                 if (info->projs[pn_Load_X_regular]) {
1188                         exchange(info->projs[pn_Load_X_regular], new_r_Jmp(get_nodes_block(load)));
1189                         info->projs[pn_Load_X_regular] = NULL;
1190                         res |= CF_CHANGED;
1191                 }
1192                 if (info->projs[pn_Load_M]) {
1193                         exchange(info->projs[pn_Load_M], mem);
1194                         res |= DF_CHANGED;
1195                 }
1196                 if (info->projs[pn_Load_res]) {
1197                         exchange(info->projs[pn_Load_res], value);
1198                         res |= DF_CHANGED;
1199                 }
1200                 kill_node(load);
1201                 reduce_adr_usage(ptr);
1202                 return res;
1203         }
1204
1205         /* Check, if the address of this load is used more than once.
1206          * If not, more load cannot be removed in any case. */
1207         if (get_irn_n_uses(ptr) <= 1 && get_irn_n_uses(get_base_and_offset(ptr, &dummy)) <= 1)
1208                 return res;
1209
1210         /*
1211          * follow the memory chain as long as there are only Loads
1212          * and try to replace current Load or Store by a previous one.
1213          * Note that in unreachable loops it might happen that we reach
1214          * load again, as well as we can fall into a cycle.
1215          * We break such cycles using a special visited flag.
1216          */
1217         INC_MASTER();
1218         res = follow_Mem_chain(load, skip_Proj(mem));
1219         return res;
1220 }  /* optimize_load */
1221
1222 /**
1223  * Check whether a value of mode new_mode would completely overwrite a value
1224  * of mode old_mode in memory.
1225  */
1226 static int is_completely_overwritten(ir_mode *old_mode, ir_mode *new_mode)
1227 {
1228         return get_mode_size_bits(new_mode) >= get_mode_size_bits(old_mode);
1229 }  /* is_completely_overwritten */
1230
1231 /**
1232  * Check whether small is a part of large (starting at same address).
1233  */
1234 static int is_partially_same(ir_node *small, ir_node *large)
1235 {
1236         ir_mode *sm = get_irn_mode(small);
1237         ir_mode *lm = get_irn_mode(large);
1238
1239         /* FIXME: Check endianness */
1240         return is_Conv(small) && get_Conv_op(small) == large
1241             && get_mode_size_bytes(sm) < get_mode_size_bytes(lm)
1242             && get_mode_arithmetic(sm) == irma_twos_complement
1243             && get_mode_arithmetic(lm) == irma_twos_complement;
1244 }  /* is_partially_same */
1245
1246 /**
1247  * follow the memory chain as long as there are only Loads and alias free Stores.
1248  *
1249  * INC_MASTER() must be called before dive into
1250  */
1251 static unsigned follow_Mem_chain_for_Store(ir_node *store, ir_node *curr)
1252 {
1253         unsigned res = 0;
1254         ldst_info_t *info = (ldst_info_t*)get_irn_link(store);
1255         ir_node *pred;
1256         ir_node *ptr = get_Store_ptr(store);
1257         ir_node *mem = get_Store_mem(store);
1258         ir_node *value = get_Store_value(store);
1259         ir_mode *mode  = get_irn_mode(value);
1260         ir_node *block = get_nodes_block(store);
1261
1262         for (pred = curr; pred != store;) {
1263                 ldst_info_t *pred_info = (ldst_info_t*)get_irn_link(pred);
1264
1265                 /*
1266                  * BEWARE: one might think that checking the modes is useless, because
1267                  * if the pointers are identical, they refer to the same object.
1268                  * This is only true in strong typed languages, not is C were the following
1269                  * is possible *(ir_type1 *)p = a; *(ir_type2 *)p = b ...
1270                  * However, if the size of the mode that is written is bigger or equal the
1271                  * size of the old one, the old value is completely overwritten and can be
1272                  * killed ...
1273                  */
1274                 if (is_Store(pred) && get_Store_ptr(pred) == ptr &&
1275             get_nodes_block(pred) == block) {
1276                         /*
1277                          * a Store after a Store in the same Block -- a write after write.
1278                          */
1279
1280                         /*
1281                          * We may remove the first Store, if the old value is completely
1282                          * overwritten or the old value is a part of the new value,
1283                          * and if it does not have an exception handler.
1284                          *
1285                          * TODO: What, if both have the same exception handler ???
1286                          */
1287                         if (get_Store_volatility(pred) != volatility_is_volatile
1288                                 && !pred_info->projs[pn_Store_X_except]) {
1289                                 ir_node *predvalue = get_Store_value(pred);
1290                                 ir_mode *predmode  = get_irn_mode(predvalue);
1291
1292                                 if (is_completely_overwritten(predmode, mode)
1293                                         || is_partially_same(predvalue, value)) {
1294                                         DBG_OPT_WAW(pred, store);
1295                                         exchange(pred_info->projs[pn_Store_M], get_Store_mem(pred));
1296                                         kill_node(pred);
1297                                         reduce_adr_usage(ptr);
1298                                         return DF_CHANGED;
1299                                 }
1300                         }
1301
1302                         /*
1303                          * We may remove the Store, if the old value already contains
1304                          * the new value, and if it does not have an exception handler.
1305                          *
1306                          * TODO: What, if both have the same exception handler ???
1307                          */
1308                         if (get_Store_volatility(store) != volatility_is_volatile
1309                                 && !info->projs[pn_Store_X_except]) {
1310                                 ir_node *predvalue = get_Store_value(pred);
1311
1312                                 if (is_partially_same(value, predvalue)) {
1313                                         DBG_OPT_WAW(pred, store);
1314                                         exchange(info->projs[pn_Store_M], mem);
1315                                         kill_node(store);
1316                                         reduce_adr_usage(ptr);
1317                                         return DF_CHANGED;
1318                                 }
1319                         }
1320                 } else if (is_Load(pred) && get_Load_ptr(pred) == ptr &&
1321                            value == pred_info->projs[pn_Load_res]) {
1322                         /*
1323                          * a Store of a value just loaded from the same address
1324                          * -- a write after read.
1325                          * We may remove the Store, if it does not have an exception
1326                          * handler.
1327                          */
1328                         if (! info->projs[pn_Store_X_except]) {
1329                                 DBG_OPT_WAR(store, pred);
1330                                 exchange(info->projs[pn_Store_M], mem);
1331                                 kill_node(store);
1332                                 reduce_adr_usage(ptr);
1333                                 return DF_CHANGED;
1334                         }
1335                 }
1336
1337                 if (is_Store(pred)) {
1338                         /* check if we can pass through this store */
1339                         ir_alias_relation rel = get_alias_relation(
1340                                 get_Store_ptr(pred),
1341                                 get_irn_mode(get_Store_value(pred)),
1342                                 ptr, mode);
1343                         /* if the might be an alias, we cannot pass this Store */
1344                         if (rel != ir_no_alias)
1345                                 break;
1346                         pred = skip_Proj(get_Store_mem(pred));
1347                 } else if (is_Load(pred)) {
1348                         ir_alias_relation rel = get_alias_relation(
1349                                 get_Load_ptr(pred), get_Load_mode(pred),
1350                                 ptr, mode);
1351                         if (rel != ir_no_alias)
1352                                 break;
1353
1354                         pred = skip_Proj(get_Load_mem(pred));
1355                 } else {
1356                         /* follow only Load chains */
1357                         break;
1358                 }
1359
1360                 /* check for cycles */
1361                 if (NODE_VISITED(pred_info))
1362                         break;
1363                 MARK_NODE(pred_info);
1364         }
1365
1366         if (is_Sync(pred)) {
1367                 int i;
1368
1369                 /* handle all Sync predecessors */
1370                 for (i = get_Sync_n_preds(pred) - 1; i >= 0; --i) {
1371                         res |= follow_Mem_chain_for_Store(store, skip_Proj(get_Sync_pred(pred, i)));
1372                         if (res)
1373                                 break;
1374                 }
1375         }
1376         return res;
1377 }  /* follow_Mem_chain_for_Store */
1378
1379 /** find entity used as base for an address calculation */
1380 static ir_entity *find_entity(ir_node *ptr)
1381 {
1382         switch (get_irn_opcode(ptr)) {
1383         case iro_SymConst:
1384                 return get_SymConst_entity(ptr);
1385         case iro_Sel: {
1386                 ir_node *pred = get_Sel_ptr(ptr);
1387                 if (get_irg_frame(get_irn_irg(ptr)) == pred)
1388                         return get_Sel_entity(ptr);
1389
1390                 return find_entity(pred);
1391         }
1392         case iro_Sub:
1393         case iro_Add: {
1394                 ir_node *left = get_binop_left(ptr);
1395                 ir_node *right;
1396                 if (mode_is_reference(get_irn_mode(left)))
1397                         return find_entity(left);
1398                 right = get_binop_right(ptr);
1399                 if (mode_is_reference(get_irn_mode(right)))
1400                         return find_entity(right);
1401                 return NULL;
1402         }
1403         default:
1404                 return NULL;
1405         }
1406 }
1407
1408 /**
1409  * optimize a Store
1410  *
1411  * @param store  the Store node
1412  */
1413 static unsigned optimize_store(ir_node *store)
1414 {
1415         ir_node   *ptr;
1416         ir_node   *mem;
1417         ir_entity *entity;
1418
1419         if (get_Store_volatility(store) == volatility_is_volatile)
1420                 return 0;
1421
1422         ptr    = get_Store_ptr(store);
1423         entity = find_entity(ptr);
1424
1425         /* a store to an entity which is never read is unnecessary */
1426         if (entity != NULL && !(get_entity_usage(entity) & ir_usage_read)) {
1427                 ldst_info_t *info = (ldst_info_t*)get_irn_link(store);
1428                 if (info->projs[pn_Store_X_except] == NULL) {
1429                         DB((dbg, LEVEL_1, "  Killing useless %+F to never read entity %+F\n", store, entity));
1430                         exchange(info->projs[pn_Store_M], get_Store_mem(store));
1431                         kill_node(store);
1432                         reduce_adr_usage(ptr);
1433                         return DF_CHANGED;
1434                 }
1435         }
1436
1437         /* Check, if the address of this Store is used more than once.
1438          * If not, this Store cannot be removed in any case. */
1439         if (get_irn_n_uses(ptr) <= 1)
1440                 return 0;
1441
1442         mem = get_Store_mem(store);
1443
1444         /* follow the memory chain as long as there are only Loads */
1445         INC_MASTER();
1446
1447         return follow_Mem_chain_for_Store(store, skip_Proj(mem));
1448 }  /* optimize_store */
1449
1450 /**
1451  * walker, optimizes Phi after Stores to identical places:
1452  * Does the following optimization:
1453  * @verbatim
1454  *
1455  *   val1   val2   val3          val1  val2  val3
1456  *    |      |      |               \    |    /
1457  *  Store  Store  Store              \   |   /
1458  *      \    |    /                   PhiData
1459  *       \   |   /                       |
1460  *        \  |  /                      Store
1461  *          PhiM
1462  *
1463  * @endverbatim
1464  * This reduces the number of stores and allows for predicated execution.
1465  * Moves Stores back to the end of a function which may be bad.
1466  *
1467  * This is only possible if the predecessor blocks have only one successor.
1468  */
1469 static unsigned optimize_phi(ir_node *phi, walk_env_t *wenv)
1470 {
1471         int i, n;
1472         ir_node *store, *old_store, *ptr, *block, *phi_block, *phiM, *phiD, *exc, *projM;
1473         ir_mode *mode;
1474         ir_node **inM, **inD, **projMs;
1475         int *idx;
1476         dbg_info *db = NULL;
1477         ldst_info_t *info;
1478         block_info_t *bl_info;
1479         unsigned res = 0;
1480
1481         /* Must be a memory Phi */
1482         if (get_irn_mode(phi) != mode_M)
1483                 return 0;
1484
1485         n = get_Phi_n_preds(phi);
1486         if (n <= 0)
1487                 return 0;
1488
1489         /* must be only one user */
1490         projM = get_Phi_pred(phi, 0);
1491         if (get_irn_n_edges(projM) != 1)
1492                 return 0;
1493
1494         store = skip_Proj(projM);
1495         old_store = store;
1496         if (!is_Store(store))
1497                 return 0;
1498
1499         block = get_nodes_block(store);
1500
1501         /* abort on dead blocks */
1502         if (is_Block_dead(block))
1503                 return 0;
1504
1505         /* check if the block is post dominated by Phi-block
1506            and has no exception exit */
1507         bl_info = (block_info_t*)get_irn_link(block);
1508         if (bl_info->flags & BLOCK_HAS_EXC)
1509                 return 0;
1510
1511         phi_block = get_nodes_block(phi);
1512         if (! block_strictly_postdominates(phi_block, block))
1513                 return 0;
1514
1515         /* this is the address of the store */
1516         ptr  = get_Store_ptr(store);
1517         mode = get_irn_mode(get_Store_value(store));
1518         info = (ldst_info_t*)get_irn_link(store);
1519         exc  = info->exc_block;
1520
1521         for (i = 1; i < n; ++i) {
1522                 ir_node *pred = get_Phi_pred(phi, i);
1523
1524                 if (get_irn_n_edges(pred) != 1)
1525                         return 0;
1526
1527                 pred = skip_Proj(pred);
1528                 if (!is_Store(pred))
1529                         return 0;
1530
1531                 if (ptr != get_Store_ptr(pred) || mode != get_irn_mode(get_Store_value(pred)))
1532                         return 0;
1533
1534                 info = (ldst_info_t*)get_irn_link(pred);
1535
1536                 /* check, if all stores have the same exception flow */
1537                 if (exc != info->exc_block)
1538                         return 0;
1539
1540                 /* abort on dead blocks */
1541                 block = get_nodes_block(pred);
1542                 if (is_Block_dead(block))
1543                         return 0;
1544
1545                 /* check if the block is post dominated by Phi-block
1546                    and has no exception exit. Note that block must be different from
1547                    Phi-block, else we would move a Store from end End of a block to its
1548                    Start... */
1549                 bl_info = (block_info_t*)get_irn_link(block);
1550                 if (bl_info->flags & BLOCK_HAS_EXC)
1551                         return 0;
1552                 if (block == phi_block || ! block_postdominates(phi_block, block))
1553                         return 0;
1554         }
1555
1556         /*
1557          * ok, when we are here, we found all predecessors of a Phi that
1558          * are Stores to the same address and size. That means whatever
1559          * we do before we enter the block of the Phi, we do a Store.
1560          * So, we can move the Store to the current block:
1561          *
1562          *   val1    val2    val3          val1  val2  val3
1563          *    |       |       |               \    |    /
1564          * | Str | | Str | | Str |             \   |   /
1565          *      \     |     /                   PhiData
1566          *       \    |    /                       |
1567          *        \   |   /                       Str
1568          *           PhiM
1569          *
1570          * Is only allowed if the predecessor blocks have only one successor.
1571          */
1572
1573         NEW_ARR_A(ir_node *, projMs, n);
1574         NEW_ARR_A(ir_node *, inM, n);
1575         NEW_ARR_A(ir_node *, inD, n);
1576         NEW_ARR_A(int, idx, n);
1577
1578         /* Prepare: Collect all Store nodes.  We must do this
1579            first because we otherwise may loose a store when exchanging its
1580            memory Proj.
1581          */
1582         for (i = n - 1; i >= 0; --i) {
1583                 ir_node *store;
1584
1585                 projMs[i] = get_Phi_pred(phi, i);
1586                 assert(is_Proj(projMs[i]));
1587
1588                 store = get_Proj_pred(projMs[i]);
1589                 info  = (ldst_info_t*)get_irn_link(store);
1590
1591                 inM[i] = get_Store_mem(store);
1592                 inD[i] = get_Store_value(store);
1593                 idx[i] = info->exc_idx;
1594         }
1595         block = get_nodes_block(phi);
1596
1597         /* second step: create a new memory Phi */
1598         phiM = new_rd_Phi(get_irn_dbg_info(phi), block, n, inM, mode_M);
1599
1600         /* third step: create a new data Phi */
1601         phiD = new_rd_Phi(get_irn_dbg_info(phi), block, n, inD, mode);
1602
1603         /* rewire memory and kill the node */
1604         for (i = n - 1; i >= 0; --i) {
1605                 ir_node *proj  = projMs[i];
1606
1607                 if (is_Proj(proj)) {
1608                         ir_node *store = get_Proj_pred(proj);
1609                         exchange(proj, inM[i]);
1610                         kill_node(store);
1611                 }
1612         }
1613
1614         /* fourth step: create the Store */
1615         store = new_rd_Store(db, block, phiM, ptr, phiD, cons_none);
1616 #ifdef DO_CACHEOPT
1617         co_set_irn_name(store, co_get_irn_ident(old_store));
1618 #endif
1619
1620         projM = new_rd_Proj(NULL, store, mode_M, pn_Store_M);
1621
1622         info = get_ldst_info(store, &wenv->obst);
1623         info->projs[pn_Store_M] = projM;
1624
1625         /* fifths step: repair exception flow */
1626         if (exc) {
1627                 ir_node *projX = new_rd_Proj(NULL, store, mode_X, pn_Store_X_except);
1628
1629                 info->projs[pn_Store_X_except] = projX;
1630                 info->exc_block                = exc;
1631                 info->exc_idx                  = idx[0];
1632
1633                 for (i = 0; i < n; ++i) {
1634                         set_Block_cfgpred(exc, idx[i], projX);
1635                 }
1636
1637                 if (n > 1) {
1638                         /* the exception block should be optimized as some inputs are identical now */
1639                 }
1640
1641                 res |= CF_CHANGED;
1642         }
1643
1644         /* sixth step: replace old Phi */
1645         exchange(phi, projM);
1646
1647         return res | DF_CHANGED;
1648 }  /* optimize_phi */
1649
1650 /**
1651  * walker, do the optimizations
1652  */
1653 static void do_load_store_optimize(ir_node *n, void *env)
1654 {
1655         walk_env_t *wenv = (walk_env_t*)env;
1656
1657         switch (get_irn_opcode(n)) {
1658
1659         case iro_Load:
1660                 wenv->changes |= optimize_load(n);
1661                 break;
1662
1663         case iro_Store:
1664                 wenv->changes |= optimize_store(n);
1665                 break;
1666
1667         case iro_Phi:
1668                 wenv->changes |= optimize_phi(n, wenv);
1669                 break;
1670
1671         default:
1672                 ;
1673         }
1674 }  /* do_load_store_optimize */
1675
1676 /** A scc. */
1677 typedef struct scc {
1678         ir_node *head;      /**< the head of the list */
1679 } scc;
1680
1681 /** A node entry. */
1682 typedef struct node_entry {
1683         unsigned DFSnum;    /**< the DFS number of this node */
1684         unsigned low;       /**< the low number of this node */
1685         int      in_stack;  /**< flag, set if the node is on the stack */
1686         ir_node  *next;     /**< link to the next node the the same scc */
1687         scc      *pscc;     /**< the scc of this node */
1688         unsigned POnum;     /**< the post order number for blocks */
1689 } node_entry;
1690
1691 /** A loop entry. */
1692 typedef struct loop_env {
1693         ir_phase ph;           /**< the phase object */
1694         ir_node  **stack;      /**< the node stack */
1695         size_t   tos;          /**< tos index */
1696         unsigned nextDFSnum;   /**< the current DFS number */
1697         unsigned POnum;        /**< current post order number */
1698
1699         unsigned changes;      /**< a bitmask of graph changes */
1700 } loop_env;
1701
1702 /**
1703 * Gets the node_entry of a node
1704 */
1705 static node_entry *get_irn_ne(ir_node *irn, loop_env *env)
1706 {
1707         ir_phase   *ph = &env->ph;
1708         node_entry *e  = (node_entry*)phase_get_irn_data(&env->ph, irn);
1709
1710         if (! e) {
1711                 e = (node_entry*)phase_alloc(ph, sizeof(*e));
1712                 memset(e, 0, sizeof(*e));
1713                 phase_set_irn_data(ph, irn, e);
1714         }
1715         return e;
1716 }  /* get_irn_ne */
1717
1718 /**
1719  * Push a node onto the stack.
1720  *
1721  * @param env   the loop environment
1722  * @param n     the node to push
1723  */
1724 static void push(loop_env *env, ir_node *n)
1725 {
1726         node_entry *e;
1727
1728         if (env->tos == ARR_LEN(env->stack)) {
1729                 size_t nlen = ARR_LEN(env->stack) * 2;
1730                 ARR_RESIZE(ir_node *, env->stack, nlen);
1731         }
1732         env->stack[env->tos++] = n;
1733         e = get_irn_ne(n, env);
1734         e->in_stack = 1;
1735 }  /* push */
1736
1737 /**
1738  * pop a node from the stack
1739  *
1740  * @param env   the loop environment
1741  *
1742  * @return  The topmost node
1743  */
1744 static ir_node *pop(loop_env *env)
1745 {
1746         ir_node *n = env->stack[--env->tos];
1747         node_entry *e = get_irn_ne(n, env);
1748
1749         e->in_stack = 0;
1750         return n;
1751 }  /* pop */
1752
1753 /**
1754  * Check if irn is a region constant.
1755  * The block or irn must strictly dominate the header block.
1756  *
1757  * @param irn           the node to check
1758  * @param header_block  the header block of the induction variable
1759  */
1760 static int is_rc(ir_node *irn, ir_node *header_block)
1761 {
1762         ir_node *block = get_nodes_block(irn);
1763
1764         return (block != header_block) && block_dominates(block, header_block);
1765 }  /* is_rc */
1766
1767 typedef struct phi_entry phi_entry;
1768 struct phi_entry {
1769         ir_node   *phi;    /**< A phi with a region const memory. */
1770         int       pos;     /**< The position of the region const memory */
1771         ir_node   *load;   /**< the newly created load for this phi */
1772         phi_entry *next;
1773 };
1774
1775 /**
1776  * An entry in the avail set.
1777  */
1778 typedef struct avail_entry_t {
1779         ir_node *ptr;   /**< the address pointer */
1780         ir_mode *mode;  /**< the load mode */
1781         ir_node *load;  /**< the associated Load */
1782 } avail_entry_t;
1783
1784 /**
1785  * Compare two avail entries.
1786  */
1787 static int cmp_avail_entry(const void *elt, const void *key, size_t size)
1788 {
1789         const avail_entry_t *a = (const avail_entry_t*)elt;
1790         const avail_entry_t *b = (const avail_entry_t*)key;
1791         (void) size;
1792
1793         return a->ptr != b->ptr || a->mode != b->mode;
1794 }  /* cmp_avail_entry */
1795
1796 /**
1797  * Calculate the hash value of an avail entry.
1798  */
1799 static unsigned hash_cache_entry(const avail_entry_t *entry)
1800 {
1801         return get_irn_idx(entry->ptr) * 9 + HASH_PTR(entry->mode);
1802 }  /* hash_cache_entry */
1803
1804 /**
1805  * Move loops out of loops if possible.
1806  *
1807  * @param pscc   the loop described by an SCC
1808  * @param env    the loop environment
1809  */
1810 static void move_loads_out_of_loops(scc *pscc, loop_env *env)
1811 {
1812         ir_node   *phi, *load, *next, *other, *next_other;
1813         ir_entity *ent;
1814         int       j;
1815         phi_entry *phi_list = NULL;
1816         set       *avail;
1817
1818         avail = new_set(cmp_avail_entry, 8);
1819
1820         /* collect all outer memories */
1821         for (phi = pscc->head; phi != NULL; phi = next) {
1822                 node_entry *ne = get_irn_ne(phi, env);
1823                 next = ne->next;
1824
1825                 /* check all memory Phi's */
1826                 if (! is_Phi(phi))
1827                         continue;
1828
1829                 assert(get_irn_mode(phi) == mode_M && "DFS return non-memory Phi");
1830
1831                 for (j = get_irn_arity(phi) - 1; j >= 0; --j) {
1832                         ir_node    *pred = get_irn_n(phi, j);
1833                         node_entry *pe   = get_irn_ne(pred, env);
1834
1835                         if (pe->pscc != ne->pscc) {
1836                                 /* not in the same SCC, is region const */
1837                                 phi_entry *pe = (phi_entry*)phase_alloc(&env->ph, sizeof(*pe));
1838
1839                                 pe->phi  = phi;
1840                                 pe->pos  = j;
1841                                 pe->next = phi_list;
1842                                 phi_list = pe;
1843                         }
1844                 }
1845         }
1846         /* no Phis no fun */
1847         assert(phi_list != NULL && "DFS found a loop without Phi");
1848
1849         /* for now, we cannot handle more than one input (only reducible cf) */
1850         if (phi_list->next != NULL)
1851                 return;
1852
1853         for (load = pscc->head; load; load = next) {
1854                 ir_mode *load_mode;
1855                 node_entry *ne = get_irn_ne(load, env);
1856                 next = ne->next;
1857
1858                 if (is_Load(load)) {
1859                         ldst_info_t *info = (ldst_info_t*)get_irn_link(load);
1860                         ir_node     *ptr = get_Load_ptr(load);
1861
1862                         /* for now, we cannot handle Loads with exceptions */
1863                         if (info->projs[pn_Load_res] == NULL || info->projs[pn_Load_X_regular] != NULL || info->projs[pn_Load_X_except] != NULL)
1864                                 continue;
1865
1866                         /* for now, we can only move Load(Global) */
1867                         if (! is_Global(ptr))
1868                                 continue;
1869                         ent       = get_Global_entity(ptr);
1870                         load_mode = get_Load_mode(load);
1871                         for (other = pscc->head; other != NULL; other = next_other) {
1872                                 node_entry *ne = get_irn_ne(other, env);
1873                                 next_other = ne->next;
1874
1875                                 if (is_Store(other)) {
1876                                         ir_alias_relation rel = get_alias_relation(
1877                                                 get_Store_ptr(other),
1878                                                 get_irn_mode(get_Store_value(other)),
1879                                                 ptr, load_mode);
1880                                         /* if the might be an alias, we cannot pass this Store */
1881                                         if (rel != ir_no_alias)
1882                                                 break;
1883                                 }
1884                                 /* only Phis and pure Calls are allowed here, so ignore them */
1885                         }
1886                         if (other == NULL) {
1887                                 ldst_info_t *ninfo = NULL;
1888                                 phi_entry   *pe;
1889                                 dbg_info    *db;
1890
1891                                 /* yep, no aliasing Store found, Load can be moved */
1892                                 DB((dbg, LEVEL_1, "  Found a Load that could be moved: %+F\n", load));
1893
1894                                 db   = get_irn_dbg_info(load);
1895                                 for (pe = phi_list; pe != NULL; pe = pe->next) {
1896                                         int     pos   = pe->pos;
1897                                         ir_node *phi  = pe->phi;
1898                                         ir_node *blk  = get_nodes_block(phi);
1899                                         ir_node *pred = get_Block_cfgpred_block(blk, pos);
1900                                         ir_node *irn, *mem;
1901                                         avail_entry_t entry, *res;
1902
1903                                         entry.ptr  = ptr;
1904                                         entry.mode = load_mode;
1905                                         res = (avail_entry_t*)set_find(avail, &entry, sizeof(entry), hash_cache_entry(&entry));
1906                                         if (res != NULL) {
1907                                                 irn = res->load;
1908                                         } else {
1909                                                 irn = new_rd_Load(db, pred, get_Phi_pred(phi, pos), ptr, load_mode, cons_none);
1910                                                 entry.load = irn;
1911                                                 set_insert(avail, &entry, sizeof(entry), hash_cache_entry(&entry));
1912                                                 DB((dbg, LEVEL_1, "  Created %+F in %+F\n", irn, pred));
1913                                         }
1914                                         pe->load = irn;
1915                                         ninfo = get_ldst_info(irn, phase_obst(&env->ph));
1916
1917                                         ninfo->projs[pn_Load_M] = mem = new_r_Proj(irn, mode_M, pn_Load_M);
1918                                         set_Phi_pred(phi, pos, mem);
1919
1920                                         ninfo->projs[pn_Load_res] = new_r_Proj(irn, load_mode, pn_Load_res);
1921                                 }
1922
1923                                 /* now kill the old Load */
1924                                 exchange(info->projs[pn_Load_M], get_Load_mem(load));
1925                                 exchange(info->projs[pn_Load_res], ninfo->projs[pn_Load_res]);
1926
1927                                 env->changes |= DF_CHANGED;
1928                         }
1929                 }
1930         }
1931         del_set(avail);
1932 }  /* move_loads_out_of_loops */
1933
1934 /**
1935  * Process a loop SCC.
1936  *
1937  * @param pscc  the SCC
1938  * @param env   the loop environment
1939  */
1940 static void process_loop(scc *pscc, loop_env *env)
1941 {
1942         ir_node *irn, *next, *header = NULL;
1943         node_entry *b, *h = NULL;
1944         int j, only_phi, num_outside, process = 0;
1945         ir_node *out_rc;
1946
1947         /* find the header block for this scc */
1948         for (irn = pscc->head; irn; irn = next) {
1949                 node_entry *e = get_irn_ne(irn, env);
1950                 ir_node *block = get_nodes_block(irn);
1951
1952                 next = e->next;
1953                 b = get_irn_ne(block, env);
1954
1955                 if (header != NULL) {
1956                         if (h->POnum < b->POnum) {
1957                                 header = block;
1958                                 h      = b;
1959                         }
1960                 } else {
1961                         header = block;
1962                         h      = b;
1963                 }
1964         }
1965
1966         /* check if this scc contains only Phi, Loads or Stores nodes */
1967         only_phi    = 1;
1968         num_outside = 0;
1969         out_rc      = NULL;
1970         for (irn = pscc->head; irn; irn = next) {
1971                 node_entry *e = get_irn_ne(irn, env);
1972
1973                 next = e->next;
1974                 switch (get_irn_opcode(irn)) {
1975                 case iro_Call:
1976                         if (is_Call_pure(irn)) {
1977                                 /* pure calls can be treated like loads */
1978                                 only_phi = 0;
1979                                 break;
1980                         }
1981                         /* non-pure calls must be handle like may-alias Stores */
1982                         goto fail;
1983                 case iro_CopyB:
1984                         /* cannot handle CopyB yet */
1985                         goto fail;
1986                 case iro_Load:
1987                         process = 1;
1988                         if (get_Load_volatility(irn) == volatility_is_volatile) {
1989                                 /* cannot handle loops with volatile Loads */
1990                                 goto fail;
1991                         }
1992                         only_phi = 0;
1993                         break;
1994                 case iro_Store:
1995                         if (get_Store_volatility(irn) == volatility_is_volatile) {
1996                                 /* cannot handle loops with volatile Stores */
1997                                 goto fail;
1998                         }
1999                         only_phi = 0;
2000                         break;
2001                 default:
2002                         only_phi = 0;
2003                         break;
2004                 case iro_Phi:
2005                         for (j = get_irn_arity(irn) - 1; j >= 0; --j) {
2006                                 ir_node *pred  = get_irn_n(irn, j);
2007                                 node_entry *pe = get_irn_ne(pred, env);
2008
2009                                 if (pe->pscc != e->pscc) {
2010                                         /* not in the same SCC, must be a region const */
2011                                         if (! is_rc(pred, header)) {
2012                                                 /* not a memory loop */
2013                                                 goto fail;
2014                                         }
2015                                         if (out_rc == NULL) {
2016                                                 /* first region constant */
2017                                                 out_rc = pred;
2018                                                 ++num_outside;
2019                                         } else if (out_rc != pred) {
2020                                                 /* another region constant */
2021                                                 ++num_outside;
2022                                         }
2023                                 }
2024                         }
2025                         break;
2026                 }
2027         }
2028         if (! process)
2029                 goto fail;
2030
2031         /* found a memory loop */
2032         DB((dbg, LEVEL_2, "  Found a memory loop:\n  "));
2033         if (only_phi && num_outside == 1) {
2034                 /* a phi cycle with only one real predecessor can be collapsed */
2035                 DB((dbg, LEVEL_2, "  Found an USELESS Phi cycle:\n  "));
2036
2037                 for (irn = pscc->head; irn; irn = next) {
2038                         node_entry *e = get_irn_ne(irn, env);
2039                         next = e->next;
2040                         exchange(irn, out_rc);
2041                 }
2042                 env->changes |= DF_CHANGED;
2043                 return;
2044         }
2045
2046 #ifdef DEBUG_libfirm
2047         for (irn = pscc->head; irn; irn = next) {
2048                 node_entry *e = get_irn_ne(irn, env);
2049                 next = e->next;
2050                 DB((dbg, LEVEL_2, " %+F,", irn));
2051         }
2052         DB((dbg, LEVEL_2, "\n"));
2053 #endif
2054         move_loads_out_of_loops(pscc, env);
2055
2056 fail:
2057         ;
2058 }  /* process_loop */
2059
2060 /**
2061  * Process a SCC.
2062  *
2063  * @param pscc  the SCC
2064  * @param env   the loop environment
2065  */
2066 static void process_scc(scc *pscc, loop_env *env)
2067 {
2068         ir_node *head = pscc->head;
2069         node_entry *e = get_irn_ne(head, env);
2070
2071 #ifdef DEBUG_libfirm
2072         {
2073                 ir_node *irn, *next;
2074
2075                 DB((dbg, LEVEL_4, " SCC at %p:\n ", pscc));
2076                 for (irn = pscc->head; irn; irn = next) {
2077                         node_entry *e = get_irn_ne(irn, env);
2078
2079                         next = e->next;
2080
2081                         DB((dbg, LEVEL_4, " %+F,", irn));
2082                 }
2083                 DB((dbg, LEVEL_4, "\n"));
2084         }
2085 #endif
2086
2087         if (e->next != NULL) {
2088                 /* this SCC has more than one member */
2089                 process_loop(pscc, env);
2090         }
2091 }  /* process_scc */
2092
2093 /**
2094  * Do Tarjan's SCC algorithm and drive load/store optimization.
2095  *
2096  * @param irn  start at this node
2097  * @param env  the loop environment
2098  */
2099 static void dfs(ir_node *irn, loop_env *env)
2100 {
2101         int i, n;
2102         node_entry *node = get_irn_ne(irn, env);
2103
2104         mark_irn_visited(irn);
2105
2106         node->DFSnum = env->nextDFSnum++;
2107         node->low    = node->DFSnum;
2108         push(env, irn);
2109
2110         /* handle preds */
2111         if (is_Phi(irn) || is_Sync(irn)) {
2112                 n = get_irn_arity(irn);
2113                 for (i = 0; i < n; ++i) {
2114                         ir_node *pred = get_irn_n(irn, i);
2115                         node_entry *o = get_irn_ne(pred, env);
2116
2117                         if (!irn_visited(pred)) {
2118                                 dfs(pred, env);
2119                                 node->low = MIN(node->low, o->low);
2120                         }
2121                         if (o->DFSnum < node->DFSnum && o->in_stack)
2122                                 node->low = MIN(o->DFSnum, node->low);
2123                 }
2124         } else if (is_fragile_op(irn)) {
2125                 ir_node *pred = get_fragile_op_mem(irn);
2126                 node_entry *o = get_irn_ne(pred, env);
2127
2128                 if (!irn_visited(pred)) {
2129                         dfs(pred, env);
2130                         node->low = MIN(node->low, o->low);
2131                 }
2132                 if (o->DFSnum < node->DFSnum && o->in_stack)
2133                         node->low = MIN(o->DFSnum, node->low);
2134         } else if (is_Proj(irn)) {
2135                 ir_node *pred = get_Proj_pred(irn);
2136                 node_entry *o = get_irn_ne(pred, env);
2137
2138                 if (!irn_visited(pred)) {
2139                         dfs(pred, env);
2140                         node->low = MIN(node->low, o->low);
2141                 }
2142                 if (o->DFSnum < node->DFSnum && o->in_stack)
2143                         node->low = MIN(o->DFSnum, node->low);
2144         }
2145         else {
2146                  /* IGNORE predecessors */
2147         }
2148
2149         if (node->low == node->DFSnum) {
2150                 scc *pscc = (scc*)phase_alloc(&env->ph, sizeof(*pscc));
2151                 ir_node *x;
2152
2153                 pscc->head = NULL;
2154                 do {
2155                         node_entry *e;
2156
2157                         x = pop(env);
2158                         e = get_irn_ne(x, env);
2159                         e->pscc    = pscc;
2160                         e->next    = pscc->head;
2161                         pscc->head = x;
2162                 } while (x != irn);
2163
2164                 process_scc(pscc, env);
2165         }
2166 }  /* dfs */
2167
2168 /**
2169  * Do the DFS on the memory edges a graph.
2170  *
2171  * @param irg  the graph to process
2172  * @param env  the loop environment
2173  */
2174 static void do_dfs(ir_graph *irg, loop_env *env)
2175 {
2176         ir_node  *endblk, *end;
2177         int      i;
2178
2179         inc_irg_visited(irg);
2180
2181         /* visit all memory nodes */
2182         endblk = get_irg_end_block(irg);
2183         for (i = get_Block_n_cfgpreds(endblk) - 1; i >= 0; --i) {
2184                 ir_node *pred = get_Block_cfgpred(endblk, i);
2185
2186                 pred = skip_Proj(pred);
2187                 if (is_Return(pred))
2188                         dfs(get_Return_mem(pred), env);
2189                 else if (is_Raise(pred))
2190                         dfs(get_Raise_mem(pred), env);
2191                 else if (is_fragile_op(pred))
2192                         dfs(get_fragile_op_mem(pred), env);
2193                 else {
2194                         assert(0 && "Unknown EndBlock predecessor");
2195                 }
2196         }
2197
2198         /* visit the keep-alives */
2199         end = get_irg_end(irg);
2200         for (i = get_End_n_keepalives(end) - 1; i >= 0; --i) {
2201                 ir_node *ka = get_End_keepalive(end, i);
2202
2203                 if (is_Phi(ka) && !irn_visited(ka))
2204                         dfs(ka, env);
2205         }
2206 }  /* do_dfs */
2207
2208 /**
2209  * Optimize Loads/Stores in loops.
2210  *
2211  * @param irg  the graph
2212  */
2213 static int optimize_loops(ir_graph *irg)
2214 {
2215         loop_env env;
2216
2217         env.stack         = NEW_ARR_F(ir_node *, 128);
2218         env.tos           = 0;
2219         env.nextDFSnum    = 0;
2220         env.POnum         = 0;
2221         env.changes       = 0;
2222         phase_init(&env.ph, irg, phase_irn_init_default);
2223
2224         /* calculate the SCC's and drive loop optimization. */
2225         do_dfs(irg, &env);
2226
2227         DEL_ARR_F(env.stack);
2228         phase_deinit(&env.ph);
2229
2230         return env.changes;
2231 }  /* optimize_loops */
2232
2233 /*
2234  * do the load store optimization
2235  */
2236 int optimize_load_store(ir_graph *irg)
2237 {
2238         walk_env_t env;
2239
2240         FIRM_DBG_REGISTER(dbg, "firm.opt.ldstopt");
2241
2242         assert(get_irg_phase_state(irg) != phase_building);
2243         assert(get_irg_pinned(irg) != op_pin_state_floats &&
2244                 "LoadStore optimization needs pinned graph");
2245
2246         /* we need landing pads */
2247         remove_critical_cf_edges(irg);
2248
2249         edges_assure(irg);
2250
2251         /* for Phi optimization post-dominators are needed ... */
2252         assure_postdoms(irg);
2253
2254         if (get_opt_alias_analysis()) {
2255                 assure_irg_entity_usage_computed(irg);
2256                 assure_irp_globals_entity_usage_computed();
2257         }
2258
2259         obstack_init(&env.obst);
2260         env.changes = 0;
2261
2262         /* init the links, then collect Loads/Stores/Proj's in lists */
2263         master_visited = 0;
2264         irg_walk_graph(irg, firm_clear_link, collect_nodes, &env);
2265
2266         /* now we have collected enough information, optimize */
2267         irg_walk_graph(irg, NULL, do_load_store_optimize, &env);
2268
2269         env.changes |= optimize_loops(irg);
2270
2271         obstack_free(&env.obst, NULL);
2272
2273         /* Handle graph state */
2274         if (env.changes) {
2275                 set_irg_outs_inconsistent(irg);
2276                 set_irg_entity_usage_state(irg, ir_entity_usage_not_computed);
2277         }
2278
2279         if (env.changes & CF_CHANGED) {
2280                 /* is this really needed: Yes, control flow changed, block might
2281                 have Bad() predecessors. */
2282                 set_irg_doms_inconsistent(irg);
2283         }
2284         return env.changes != 0;
2285 }  /* optimize_load_store */
2286
2287 ir_graph_pass_t *optimize_load_store_pass(const char *name)
2288 {
2289         return def_graph_pass_ret(name ? name : "ldst", optimize_load_store);
2290 }  /* optimize_load_store_pass */