convert opts to use the opt_manage framework
[libfirm] / ir / opt / ldstopt.c
1 /*
2  * Copyright (C) 1995-2011 University of Karlsruhe.  All right reserved.
3  *
4  * This file is part of libFirm.
5  *
6  * This file may be distributed and/or modified under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation and appearing in the file LICENSE.GPL included in the
9  * packaging of this file.
10  *
11  * Licensees holding valid libFirm Professional Edition licenses may use
12  * this file in accordance with the libFirm Commercial License.
13  * Agreement provided with the Software.
14  *
15  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17  * PURPOSE.
18  */
19
20 /**
21  * @file
22  * @brief   Load/Store optimizations.
23  * @author  Michael Beck
24  * @version $Id$
25  */
26 #include "config.h"
27
28 #include <string.h>
29
30 #include "iroptimize.h"
31 #include "irnode_t.h"
32 #include "irgraph_t.h"
33 #include "irmode_t.h"
34 #include "iropt_t.h"
35 #include "ircons_t.h"
36 #include "irgmod.h"
37 #include "irgwalk.h"
38 #include "tv_t.h"
39 #include "dbginfo_t.h"
40 #include "iropt_dbg.h"
41 #include "irflag_t.h"
42 #include "array_t.h"
43 #include "irhooks.h"
44 #include "iredges.h"
45 #include "irpass.h"
46 #include "opt_polymorphy.h"
47 #include "irmemory.h"
48 #include "irphase_t.h"
49 #include "irgopt.h"
50 #include "set.h"
51 #include "be.h"
52 #include "debug.h"
53 #include "opt_manage.h"
54
55 /** The debug handle. */
56 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
57
58 #undef IMAX
59 #define IMAX(a,b)   ((a) > (b) ? (a) : (b))
60
61 #define MAX_PROJ    IMAX(IMAX((long)pn_Load_max, (long)pn_Store_max), (long)pn_Call_max)
62
63 enum changes_t {
64         DF_CHANGED = 1,       /**< data flow changed */
65         CF_CHANGED = 2,       /**< control flow changed */
66 };
67
68 /**
69  * walker environment
70  */
71 typedef struct walk_env_t {
72         struct obstack obst;          /**< list of all stores */
73         unsigned changes;             /**< a bitmask of graph changes */
74 } walk_env_t;
75
76 /** A Load/Store info. */
77 typedef struct ldst_info_t {
78         ir_node  *projs[MAX_PROJ+1];  /**< list of Proj's of this node */
79         ir_node  *exc_block;          /**< the exception block if available */
80         int      exc_idx;             /**< predecessor index in the exception block */
81         unsigned visited;             /**< visited counter for breaking loops */
82 } ldst_info_t;
83
84 /**
85  * flags for control flow.
86  */
87 enum block_flags_t {
88         BLOCK_HAS_COND = 1,      /**< Block has conditional control flow */
89         BLOCK_HAS_EXC  = 2       /**< Block has exceptional control flow */
90 };
91
92 /**
93  * a Block info.
94  */
95 typedef struct block_info_t {
96         unsigned flags;               /**< flags for the block */
97 } block_info_t;
98
99 /** the master visited flag for loop detection. */
100 static unsigned master_visited = 0;
101
102 #define INC_MASTER()       ++master_visited
103 #define MARK_NODE(info)    (info)->visited = master_visited
104 #define NODE_VISITED(info) (info)->visited >= master_visited
105
106 /**
107  * get the Load/Store info of a node
108  */
109 static ldst_info_t *get_ldst_info(ir_node *node, struct obstack *obst)
110 {
111         ldst_info_t *info = (ldst_info_t*)get_irn_link(node);
112
113         if (! info) {
114                 info = OALLOCZ(obst, ldst_info_t);
115                 set_irn_link(node, info);
116         }
117         return info;
118 }  /* get_ldst_info */
119
120 /**
121  * get the Block info of a node
122  */
123 static block_info_t *get_block_info(ir_node *node, struct obstack *obst)
124 {
125         block_info_t *info = (block_info_t*)get_irn_link(node);
126
127         if (! info) {
128                 info = OALLOCZ(obst, block_info_t);
129                 set_irn_link(node, info);
130         }
131         return info;
132 }  /* get_block_info */
133
134 /**
135  * update the projection info for a Load/Store
136  */
137 static unsigned update_projs(ldst_info_t *info, ir_node *proj)
138 {
139         long nr = get_Proj_proj(proj);
140
141         assert(0 <= nr && nr <= MAX_PROJ && "Wrong proj from LoadStore");
142
143         if (info->projs[nr]) {
144                 /* there is already one, do CSE */
145                 exchange(proj, info->projs[nr]);
146                 return DF_CHANGED;
147         }
148         else {
149                 info->projs[nr] = proj;
150                 return 0;
151         }
152 }  /* update_projs */
153
154 /**
155  * update the exception block info for a Load/Store node.
156  *
157  * @param info   the load/store info struct
158  * @param block  the exception handler block for this load/store
159  * @param pos    the control flow input of the block
160  */
161 static unsigned update_exc(ldst_info_t *info, ir_node *block, int pos)
162 {
163         assert(info->exc_block == NULL && "more than one exception block found");
164
165         info->exc_block = block;
166         info->exc_idx   = pos;
167         return 0;
168 }  /* update_exc */
169
170 /** Return the number of uses of an address node */
171 #define get_irn_n_uses(adr)     get_irn_n_edges(adr)
172
173 /**
174  * walker, collects all Load/Store/Proj nodes
175  *
176  * walks from Start -> End
177  */
178 static void collect_nodes(ir_node *node, void *env)
179 {
180         walk_env_t  *wenv   = (walk_env_t *)env;
181         unsigned     opcode = get_irn_opcode(node);
182         ir_node     *pred, *blk, *pred_blk;
183         ldst_info_t *ldst_info;
184
185         if (opcode == iro_Proj) {
186                 pred   = get_Proj_pred(node);
187                 opcode = get_irn_opcode(pred);
188
189                 if (opcode == iro_Load || opcode == iro_Store || opcode == iro_Call) {
190                         ldst_info = get_ldst_info(pred, &wenv->obst);
191
192                         wenv->changes |= update_projs(ldst_info, node);
193
194                         /*
195                          * Place the Proj's to the same block as the
196                          * predecessor Load. This is always ok and prevents
197                          * "non-SSA" form after optimizations if the Proj
198                          * is in a wrong block.
199                          */
200                         blk      = get_nodes_block(node);
201                         pred_blk = get_nodes_block(pred);
202                         if (blk != pred_blk) {
203                                 wenv->changes |= DF_CHANGED;
204                                 set_nodes_block(node, pred_blk);
205                         }
206                 }
207         } else if (opcode == iro_Block) {
208                 int i;
209
210                 for (i = get_Block_n_cfgpreds(node) - 1; i >= 0; --i) {
211                         ir_node      *pred_block, *proj;
212                         block_info_t *bl_info;
213                         int          is_exc = 0;
214
215                         pred = proj = get_Block_cfgpred(node, i);
216
217                         if (is_Proj(proj)) {
218                                 pred   = get_Proj_pred(proj);
219                                 is_exc = is_x_except_Proj(proj);
220                         }
221
222                         /* ignore Bad predecessors, they will be removed later */
223                         if (is_Bad(pred))
224                                 continue;
225
226                         pred_block = get_nodes_block(pred);
227                         bl_info    = get_block_info(pred_block, &wenv->obst);
228
229                         if (is_fragile_op(pred) && is_exc)
230                                 bl_info->flags |= BLOCK_HAS_EXC;
231                         else if (is_irn_forking(pred))
232                                 bl_info->flags |= BLOCK_HAS_COND;
233
234                         opcode = get_irn_opcode(pred);
235                         if (is_exc && (opcode == iro_Load || opcode == iro_Store || opcode == iro_Call)) {
236                                 ldst_info = get_ldst_info(pred, &wenv->obst);
237
238                                 wenv->changes |= update_exc(ldst_info, node, i);
239                         }
240                 }
241         }
242 }  /* collect_nodes */
243
244 /**
245  * Returns an entity if the address ptr points to a constant one.
246  *
247  * @param ptr  the address
248  *
249  * @return an entity or NULL
250  */
251 static ir_entity *find_constant_entity(ir_node *ptr)
252 {
253         for (;;) {
254                 if (is_SymConst(ptr) && get_SymConst_kind(ptr) == symconst_addr_ent) {
255                         return get_SymConst_entity(ptr);
256                 } else if (is_Sel(ptr)) {
257                         ir_entity *ent = get_Sel_entity(ptr);
258                         ir_type   *tp  = get_entity_owner(ent);
259
260                         /* Do not fiddle with polymorphism. */
261                         if (is_Class_type(get_entity_owner(ent)) &&
262                                 ((get_entity_n_overwrites(ent)    != 0) ||
263                                 (get_entity_n_overwrittenby(ent) != 0)   ) )
264                                 return NULL;
265
266                         if (is_Array_type(tp)) {
267                                 /* check bounds */
268                                 int i, n;
269
270                                 for (i = 0, n = get_Sel_n_indexs(ptr); i < n; ++i) {
271                                         ir_node   *bound;
272                                         ir_tarval *tlower, *tupper;
273                                         ir_node   *index = get_Sel_index(ptr, i);
274                                         ir_tarval *tv    = computed_value(index);
275
276                                         /* check if the index is constant */
277                                         if (tv == tarval_bad)
278                                                 return NULL;
279
280                                         bound  = get_array_lower_bound(tp, i);
281                                         tlower = computed_value(bound);
282                                         bound  = get_array_upper_bound(tp, i);
283                                         tupper = computed_value(bound);
284
285                                         if (tlower == tarval_bad || tupper == tarval_bad)
286                                                 return NULL;
287
288                                         if (tarval_cmp(tv, tlower) == ir_relation_less)
289                                                 return NULL;
290                                         if (tarval_cmp(tupper, tv) == ir_relation_less)
291                                                 return NULL;
292
293                                         /* ok, bounds check finished */
294                                 }
295                         }
296
297                         if (get_entity_linkage(ent) & IR_LINKAGE_CONSTANT)
298                                 return ent;
299
300                         /* try next */
301                         ptr = get_Sel_ptr(ptr);
302                 } else if (is_Add(ptr)) {
303                         ir_node *l = get_Add_left(ptr);
304                         ir_node *r = get_Add_right(ptr);
305
306                         if (get_irn_mode(l) == get_irn_mode(ptr) && is_Const(r))
307                                 ptr = l;
308                         else if (get_irn_mode(r) == get_irn_mode(ptr) && is_Const(l))
309                                 ptr = r;
310                         else
311                                 return NULL;
312
313                         /* for now, we support only one addition, reassoc should fold all others */
314                         if (! is_SymConst(ptr) && !is_Sel(ptr))
315                                 return NULL;
316                 } else if (is_Sub(ptr)) {
317                         ir_node *l = get_Sub_left(ptr);
318                         ir_node *r = get_Sub_right(ptr);
319
320                         if (get_irn_mode(l) == get_irn_mode(ptr) && is_Const(r))
321                                 ptr = l;
322                         else
323                                 return NULL;
324                         /* for now, we support only one substraction, reassoc should fold all others */
325                         if (! is_SymConst(ptr) && !is_Sel(ptr))
326                                 return NULL;
327                 } else
328                         return NULL;
329         }
330 }  /* find_constant_entity */
331
332 /**
333  * Return the Selection index of a Sel node from dimension n
334  */
335 static long get_Sel_array_index_long(ir_node *n, int dim)
336 {
337         ir_node *index = get_Sel_index(n, dim);
338         assert(is_Const(index));
339         return get_tarval_long(get_Const_tarval(index));
340 }  /* get_Sel_array_index_long */
341
342 /**
343  * Returns the accessed component graph path for an
344  * node computing an address.
345  *
346  * @param ptr    the node computing the address
347  * @param depth  current depth in steps upward from the root
348  *               of the address
349  */
350 static compound_graph_path *rec_get_accessed_path(ir_node *ptr, size_t depth)
351 {
352         compound_graph_path *res = NULL;
353         ir_entity           *root, *field, *ent;
354         size_t              path_len, pos, idx;
355         ir_tarval           *tv;
356         ir_type             *tp;
357
358         if (is_SymConst(ptr)) {
359                 /* a SymConst. If the depth is 0, this is an access to a global
360                  * entity and we don't need a component path, else we know
361                  * at least its length.
362                  */
363                 assert(get_SymConst_kind(ptr) == symconst_addr_ent);
364                 root = get_SymConst_entity(ptr);
365                 res = (depth == 0) ? NULL : new_compound_graph_path(get_entity_type(root), depth);
366         } else if (is_Sel(ptr)) {
367                 /* it's a Sel, go up until we find the root */
368                 res = rec_get_accessed_path(get_Sel_ptr(ptr), depth+1);
369                 if (res == NULL)
370                         return NULL;
371
372                 /* fill up the step in the path at the current position */
373                 field    = get_Sel_entity(ptr);
374                 path_len = get_compound_graph_path_length(res);
375                 pos      = path_len - depth - 1;
376                 set_compound_graph_path_node(res, pos, field);
377
378                 if (is_Array_type(get_entity_owner(field))) {
379                         assert(get_Sel_n_indexs(ptr) == 1 && "multi dim arrays not implemented");
380                         set_compound_graph_path_array_index(res, pos, get_Sel_array_index_long(ptr, 0));
381                 }
382         } else if (is_Add(ptr)) {
383                 ir_mode   *mode;
384                 ir_tarval *tmp;
385
386                 {
387                         ir_node   *l    = get_Add_left(ptr);
388                         ir_node   *r    = get_Add_right(ptr);
389                         if (is_Const(r) && get_irn_mode(l) == get_irn_mode(ptr)) {
390                                 ptr = l;
391                                 tv  = get_Const_tarval(r);
392                         } else {
393                                 ptr = r;
394                                 tv  = get_Const_tarval(l);
395                         }
396                 }
397 ptr_arith:
398                 mode = get_tarval_mode(tv);
399                 tmp  = tv;
400
401                 /* ptr must be a Sel or a SymConst, this was checked in find_constant_entity() */
402                 if (is_Sel(ptr)) {
403                         field = get_Sel_entity(ptr);
404                 } else {
405                         field = get_SymConst_entity(ptr);
406                 }
407                 idx = 0;
408                 for (ent = field;;) {
409                         unsigned   size;
410                         ir_tarval *sz, *tv_index, *tlower, *tupper;
411                         ir_node   *bound;
412
413                         tp = get_entity_type(ent);
414                         if (! is_Array_type(tp))
415                                 break;
416                         ent = get_array_element_entity(tp);
417                         size = get_type_size_bytes(get_entity_type(ent));
418                         sz   = new_tarval_from_long(size, mode);
419
420                         tv_index = tarval_div(tmp, sz);
421                         tmp      = tarval_mod(tmp, sz);
422
423                         if (tv_index == tarval_bad || tmp == tarval_bad)
424                                 return NULL;
425
426                         assert(get_array_n_dimensions(tp) == 1 && "multiarrays not implemented");
427                         bound  = get_array_lower_bound(tp, 0);
428                         tlower = computed_value(bound);
429                         bound  = get_array_upper_bound(tp, 0);
430                         tupper = computed_value(bound);
431
432                         if (tlower == tarval_bad || tupper == tarval_bad)
433                                 return NULL;
434
435                         if (tarval_cmp(tv_index, tlower) == ir_relation_less)
436                                 return NULL;
437                         if (tarval_cmp(tupper, tv_index) == ir_relation_less)
438                                 return NULL;
439
440                         /* ok, bounds check finished */
441                         ++idx;
442                 }
443                 if (! tarval_is_null(tmp)) {
444                         /* access to some struct/union member */
445                         return NULL;
446                 }
447
448                 /* should be at least ONE array */
449                 if (idx == 0)
450                         return NULL;
451
452                 res = rec_get_accessed_path(ptr, depth + idx);
453                 if (res == NULL)
454                         return NULL;
455
456                 path_len = get_compound_graph_path_length(res);
457                 pos      = path_len - depth - idx;
458
459                 for (ent = field;;) {
460                         unsigned   size;
461                         ir_tarval *sz, *tv_index;
462                         long       index;
463
464                         tp = get_entity_type(ent);
465                         if (! is_Array_type(tp))
466                                 break;
467                         ent = get_array_element_entity(tp);
468                         set_compound_graph_path_node(res, pos, ent);
469
470                         size = get_type_size_bytes(get_entity_type(ent));
471                         sz   = new_tarval_from_long(size, mode);
472
473                         tv_index = tarval_div(tv, sz);
474                         tv       = tarval_mod(tv, sz);
475
476                         /* worked above, should work again */
477                         assert(tv_index != tarval_bad && tv != tarval_bad);
478
479                         /* bounds already checked above */
480                         index = get_tarval_long(tv_index);
481                         set_compound_graph_path_array_index(res, pos, index);
482                         ++pos;
483                 }
484         } else if (is_Sub(ptr)) {
485                 ir_node *l = get_Sub_left(ptr);
486                 ir_node *r = get_Sub_right(ptr);
487
488                 ptr = l;
489                 tv  = get_Const_tarval(r);
490                 tv  = tarval_neg(tv);
491                 goto ptr_arith;
492         }
493         return res;
494 }  /* rec_get_accessed_path */
495
496 /**
497  * Returns an access path or NULL.  The access path is only
498  * valid, if the graph is in phase_high and _no_ address computation is used.
499  */
500 static compound_graph_path *get_accessed_path(ir_node *ptr)
501 {
502         compound_graph_path *gr = rec_get_accessed_path(ptr, 0);
503         return gr;
504 }  /* get_accessed_path */
505
506 typedef struct path_entry {
507         ir_entity         *ent;
508         struct path_entry *next;
509         size_t            index;
510 } path_entry;
511
512 static ir_node *rec_find_compound_ent_value(ir_node *ptr, path_entry *next)
513 {
514         path_entry       entry, *p;
515         ir_entity        *ent, *field;
516         ir_initializer_t *initializer;
517         ir_tarval        *tv;
518         ir_type          *tp;
519         size_t           n;
520
521         entry.next = next;
522         if (is_SymConst(ptr)) {
523                 /* found the root */
524                 ent         = get_SymConst_entity(ptr);
525                 initializer = get_entity_initializer(ent);
526                 for (p = next; p != NULL;) {
527                         if (initializer->kind != IR_INITIALIZER_COMPOUND)
528                                 return NULL;
529                         n  = get_initializer_compound_n_entries(initializer);
530                         tp = get_entity_type(ent);
531
532                         if (is_Array_type(tp)) {
533                                 ent = get_array_element_entity(tp);
534                                 if (ent != p->ent) {
535                                         /* a missing [0] */
536                                         if (0 >= n)
537                                                 return NULL;
538                                         initializer = get_initializer_compound_value(initializer, 0);
539                                         continue;
540                                 }
541                         }
542                         if (p->index >= n)
543                                 return NULL;
544                         initializer = get_initializer_compound_value(initializer, p->index);
545
546                         ent = p->ent;
547                         p   = p->next;
548                 }
549                 tp = get_entity_type(ent);
550                 while (is_Array_type(tp)) {
551                         ent = get_array_element_entity(tp);
552                         tp = get_entity_type(ent);
553                         /* a missing [0] */
554                         n  = get_initializer_compound_n_entries(initializer);
555                         if (0 >= n)
556                                 return NULL;
557                         initializer = get_initializer_compound_value(initializer, 0);
558                 }
559
560                 switch (initializer->kind) {
561                 case IR_INITIALIZER_CONST:
562                         return get_initializer_const_value(initializer);
563                 case IR_INITIALIZER_TARVAL:
564                 case IR_INITIALIZER_NULL:
565                 default:
566                         return NULL;
567                 }
568         } else if (is_Sel(ptr)) {
569                 entry.ent = field = get_Sel_entity(ptr);
570                 tp = get_entity_owner(field);
571                 if (is_Array_type(tp)) {
572                         assert(get_Sel_n_indexs(ptr) == 1 && "multi dim arrays not implemented");
573                         entry.index = get_Sel_array_index_long(ptr, 0) - get_array_lower_bound_int(tp, 0);
574                 } else {
575                         size_t i, n_members = get_compound_n_members(tp);
576                         for (i = 0; i < n_members; ++i) {
577                                 if (get_compound_member(tp, i) == field)
578                                         break;
579                         }
580                         if (i >= n_members) {
581                                 /* not found: should NOT happen */
582                                 return NULL;
583                         }
584                         entry.index = i;
585                 }
586                 return rec_find_compound_ent_value(get_Sel_ptr(ptr), &entry);
587         }  else if (is_Add(ptr)) {
588                 ir_mode  *mode;
589                 unsigned pos;
590
591                 {
592                         ir_node *l = get_Add_left(ptr);
593                         ir_node *r = get_Add_right(ptr);
594                         if (is_Const(r)) {
595                                 ptr = l;
596                                 tv  = get_Const_tarval(r);
597                         } else {
598                                 ptr = r;
599                                 tv  = get_Const_tarval(l);
600                         }
601                 }
602 ptr_arith:
603                 mode = get_tarval_mode(tv);
604
605                 /* ptr must be a Sel or a SymConst, this was checked in find_constant_entity() */
606                 if (is_Sel(ptr)) {
607                         field = get_Sel_entity(ptr);
608                 } else {
609                         field = get_SymConst_entity(ptr);
610                 }
611
612                 /* count needed entries */
613                 pos = 0;
614                 for (ent = field;;) {
615                         tp = get_entity_type(ent);
616                         if (! is_Array_type(tp))
617                                 break;
618                         ent = get_array_element_entity(tp);
619                         ++pos;
620                 }
621                 /* should be at least ONE entry */
622                 if (pos == 0)
623                         return NULL;
624
625                 /* allocate the right number of entries */
626                 NEW_ARR_A(path_entry, p, pos);
627
628                 /* fill them up */
629                 pos = 0;
630                 for (ent = field;;) {
631                         unsigned   size;
632                         ir_tarval *sz, *tv_index, *tlower, *tupper;
633                         long       index;
634                         ir_node   *bound;
635
636                         tp = get_entity_type(ent);
637                         if (! is_Array_type(tp))
638                                 break;
639                         ent = get_array_element_entity(tp);
640                         p[pos].ent  = ent;
641                         p[pos].next = &p[pos + 1];
642
643                         size = get_type_size_bytes(get_entity_type(ent));
644                         sz   = new_tarval_from_long(size, mode);
645
646                         tv_index = tarval_div(tv, sz);
647                         tv       = tarval_mod(tv, sz);
648
649                         if (tv_index == tarval_bad || tv == tarval_bad)
650                                 return NULL;
651
652                         assert(get_array_n_dimensions(tp) == 1 && "multiarrays not implemented");
653                         bound  = get_array_lower_bound(tp, 0);
654                         tlower = computed_value(bound);
655                         bound  = get_array_upper_bound(tp, 0);
656                         tupper = computed_value(bound);
657
658                         if (tlower == tarval_bad || tupper == tarval_bad)
659                                 return NULL;
660
661                         if (tarval_cmp(tv_index, tlower) == ir_relation_less)
662                                 return NULL;
663                         if (tarval_cmp(tupper, tv_index) == ir_relation_less)
664                                 return NULL;
665
666                         /* ok, bounds check finished */
667                         index = get_tarval_long(tv_index);
668                         p[pos].index = index;
669                         ++pos;
670                 }
671                 if (! tarval_is_null(tv)) {
672                         /* hmm, wrong access */
673                         return NULL;
674                 }
675                 p[pos - 1].next = next;
676                 return rec_find_compound_ent_value(ptr, p);
677         } else if (is_Sub(ptr)) {
678                 ir_node *l = get_Sub_left(ptr);
679                 ir_node *r = get_Sub_right(ptr);
680
681                 ptr = l;
682                 tv  = get_Const_tarval(r);
683                 tv  = tarval_neg(tv);
684                 goto ptr_arith;
685         }
686         return NULL;
687 }
688
689 static ir_node *find_compound_ent_value(ir_node *ptr)
690 {
691         return rec_find_compound_ent_value(ptr, NULL);
692 }
693
694 /* forward */
695 static void reduce_adr_usage(ir_node *ptr);
696
697 /**
698  * Update a Load that may have lost its users.
699  */
700 static void handle_load_update(ir_node *load)
701 {
702         ldst_info_t *info = (ldst_info_t*)get_irn_link(load);
703
704         /* do NOT touch volatile loads for now */
705         if (get_Load_volatility(load) == volatility_is_volatile)
706                 return;
707
708         if (! info->projs[pn_Load_res] && ! info->projs[pn_Load_X_except]) {
709                 ir_node *ptr = get_Load_ptr(load);
710                 ir_node *mem = get_Load_mem(load);
711
712                 /* a Load whose value is neither used nor exception checked, remove it */
713                 exchange(info->projs[pn_Load_M], mem);
714                 if (info->projs[pn_Load_X_regular])
715                         exchange(info->projs[pn_Load_X_regular], new_r_Jmp(get_nodes_block(load)));
716                 kill_node(load);
717                 reduce_adr_usage(ptr);
718         }
719 }  /* handle_load_update */
720
721 /**
722  * A use of an address node has vanished. Check if this was a Proj
723  * node and update the counters.
724  */
725 static void reduce_adr_usage(ir_node *ptr)
726 {
727         ir_node *pred;
728         if (!is_Proj(ptr))
729                 return;
730         if (get_irn_n_edges(ptr) > 0)
731                 return;
732
733         /* this Proj is dead now */
734         pred = get_Proj_pred(ptr);
735         if (is_Load(pred)) {
736                 ldst_info_t *info = (ldst_info_t*)get_irn_link(pred);
737                 info->projs[get_Proj_proj(ptr)] = NULL;
738
739                 /* this node lost its result proj, handle that */
740                 handle_load_update(pred);
741         }
742 }  /* reduce_adr_usage */
743
744 /**
745  * Check, if an already existing value of mode old_mode can be converted
746  * into the needed one new_mode without loss.
747  */
748 static int can_use_stored_value(ir_mode *old_mode, ir_mode *new_mode)
749 {
750         unsigned old_size;
751         unsigned new_size;
752         if (old_mode == new_mode)
753                 return true;
754
755         old_size = get_mode_size_bits(old_mode);
756         new_size = get_mode_size_bits(new_mode);
757
758         /* if both modes are two-complement ones, we can always convert the
759            Stored value into the needed one. (on big endian machines we currently
760            only support this for modes of same size) */
761         if (old_size >= new_size &&
762                   get_mode_arithmetic(old_mode) == irma_twos_complement &&
763                   get_mode_arithmetic(new_mode) == irma_twos_complement &&
764                   (!be_get_backend_param()->byte_order_big_endian
765                 || old_size == new_size)) {
766                 return true;
767         }
768         return false;
769 }
770
771 /**
772  * Check whether a Call is at least pure, i.e. does only read memory.
773  */
774 static unsigned is_Call_pure(ir_node *call)
775 {
776         ir_type *call_tp = get_Call_type(call);
777         unsigned prop = get_method_additional_properties(call_tp);
778
779         /* check first the call type */
780         if ((prop & (mtp_property_const|mtp_property_pure)) == 0) {
781                 /* try the called entity */
782                 ir_node *ptr = get_Call_ptr(call);
783
784                 if (is_Global(ptr)) {
785                         ir_entity *ent = get_Global_entity(ptr);
786
787                         prop = get_entity_additional_properties(ent);
788                 }
789         }
790         return (prop & (mtp_property_const|mtp_property_pure)) != 0;
791 }  /* is_Call_pure */
792
793 static ir_node *get_base_and_offset(ir_node *ptr, long *pOffset)
794 {
795         ir_mode *mode  = get_irn_mode(ptr);
796         long    offset = 0;
797
798         /* TODO: long might not be enough, we should probably use some tarval thingy... */
799         for (;;) {
800                 if (is_Add(ptr)) {
801                         ir_node *l = get_Add_left(ptr);
802                         ir_node *r = get_Add_right(ptr);
803
804                         if (get_irn_mode(l) != mode || !is_Const(r))
805                                 break;
806
807                         offset += get_tarval_long(get_Const_tarval(r));
808                         ptr     = l;
809                 } else if (is_Sub(ptr)) {
810                         ir_node *l = get_Sub_left(ptr);
811                         ir_node *r = get_Sub_right(ptr);
812
813                         if (get_irn_mode(l) != mode || !is_Const(r))
814                                 break;
815
816                         offset -= get_tarval_long(get_Const_tarval(r));
817                         ptr     = l;
818                 } else if (is_Sel(ptr)) {
819                         ir_entity *ent = get_Sel_entity(ptr);
820                         ir_type   *tp  = get_entity_owner(ent);
821
822                         if (is_Array_type(tp)) {
823                                 int     size;
824                                 ir_node *index;
825
826                                 /* only one dimensional arrays yet */
827                                 if (get_Sel_n_indexs(ptr) != 1)
828                                         break;
829                                 index = get_Sel_index(ptr, 0);
830                                 if (! is_Const(index))
831                                         break;
832
833                                 tp = get_entity_type(ent);
834                                 if (get_type_state(tp) != layout_fixed)
835                                         break;
836
837                                 size    = get_type_size_bytes(tp);
838                                 offset += size * get_tarval_long(get_Const_tarval(index));
839                         } else {
840                                 if (get_type_state(tp) != layout_fixed)
841                                         break;
842                                 offset += get_entity_offset(ent);
843                         }
844                         ptr = get_Sel_ptr(ptr);
845                 } else
846                         break;
847         }
848
849         *pOffset = offset;
850         return ptr;
851 }
852
853 static int try_load_after_store(ir_node *load,
854                 ir_node *load_base_ptr, long load_offset, ir_node *store)
855 {
856         ldst_info_t *info;
857         ir_node *store_ptr      = get_Store_ptr(store);
858         long     store_offset;
859         ir_node *store_base_ptr = get_base_and_offset(store_ptr, &store_offset);
860         ir_node *store_value;
861         ir_mode *store_mode;
862         ir_node *load_ptr;
863         ir_mode *load_mode;
864         long     load_mode_len;
865         long     store_mode_len;
866         long     delta;
867         int      res;
868
869         if (load_base_ptr != store_base_ptr)
870                 return 0;
871
872         load_mode      = get_Load_mode(load);
873         load_mode_len  = get_mode_size_bytes(load_mode);
874         store_mode     = get_irn_mode(get_Store_value(store));
875         store_mode_len = get_mode_size_bytes(store_mode);
876         delta          = load_offset - store_offset;
877         store_value    = get_Store_value(store);
878
879         if (delta != 0 || store_mode != load_mode) {
880                 /* TODO: implement for big-endian */
881                 if (delta < 0 || delta + load_mode_len > store_mode_len
882                                 || (be_get_backend_param()->byte_order_big_endian
883                                     && load_mode_len != store_mode_len))
884                         return 0;
885
886                 if (get_mode_arithmetic(store_mode) != irma_twos_complement ||
887                         get_mode_arithmetic(load_mode)  != irma_twos_complement)
888                         return 0;
889
890
891                 /* produce a shift to adjust offset delta */
892                 if (delta > 0) {
893                         ir_node *cnst;
894                         ir_graph *irg = get_irn_irg(load);
895
896                         cnst        = new_r_Const_long(irg, mode_Iu, delta * 8);
897                         store_value = new_r_Shr(get_nodes_block(load),
898                                                                         store_value, cnst, store_mode);
899                 }
900
901                 /* add an convert if needed */
902                 if (store_mode != load_mode) {
903                         store_value = new_r_Conv(get_nodes_block(load), store_value, load_mode);
904                 }
905         }
906
907         DBG_OPT_RAW(load, store_value);
908
909         info = (ldst_info_t*)get_irn_link(load);
910         if (info->projs[pn_Load_M])
911                 exchange(info->projs[pn_Load_M], get_Load_mem(load));
912
913         res = 0;
914         /* no exception */
915         if (info->projs[pn_Load_X_except]) {
916                 ir_graph *irg = get_irn_irg(load);
917                 exchange( info->projs[pn_Load_X_except], new_r_Bad(irg, mode_X));
918                 res |= CF_CHANGED;
919         }
920         if (info->projs[pn_Load_X_regular]) {
921                 exchange( info->projs[pn_Load_X_regular], new_r_Jmp(get_nodes_block(load)));
922                 res |= CF_CHANGED;
923         }
924
925         if (info->projs[pn_Load_res])
926                 exchange(info->projs[pn_Load_res], store_value);
927
928         load_ptr = get_Load_ptr(load);
929         kill_node(load);
930         reduce_adr_usage(load_ptr);
931         return res | DF_CHANGED;
932 }
933
934 /**
935  * Follow the memory chain as long as there are only Loads,
936  * alias free Stores, and constant Calls and try to replace the
937  * current Load by a previous ones.
938  * Note that in unreachable loops it might happen that we reach
939  * load again, as well as we can fall into a cycle.
940  * We break such cycles using a special visited flag.
941  *
942  * INC_MASTER() must be called before dive into
943  */
944 static unsigned follow_Mem_chain(ir_node *load, ir_node *curr)
945 {
946         unsigned    res = 0;
947         ldst_info_t *info = (ldst_info_t*)get_irn_link(load);
948         ir_node     *pred;
949         ir_node     *ptr       = get_Load_ptr(load);
950         ir_node     *mem       = get_Load_mem(load);
951         ir_mode     *load_mode = get_Load_mode(load);
952
953         for (pred = curr; load != pred; ) {
954                 ldst_info_t *pred_info = (ldst_info_t*)get_irn_link(pred);
955
956                 /*
957                  * a Load immediately after a Store -- a read after write.
958                  * We may remove the Load, if both Load & Store does not have an
959                  * exception handler OR they are in the same Block. In the latter
960                  * case the Load cannot throw an exception when the previous Store was
961                  * quiet.
962                  *
963                  * Why we need to check for Store Exception? If the Store cannot
964                  * be executed (ROM) the exception handler might simply jump into
965                  * the load Block :-(
966                  * We could make it a little bit better if we would know that the
967                  * exception handler of the Store jumps directly to the end...
968                  */
969                 if (is_Store(pred) && ((pred_info->projs[pn_Store_X_except] == NULL
970                                 && info->projs[pn_Load_X_except] == NULL)
971                                 || get_nodes_block(load) == get_nodes_block(pred)))
972                 {
973                         long    load_offset;
974                         ir_node *base_ptr = get_base_and_offset(ptr, &load_offset);
975                         int     changes   = try_load_after_store(load, base_ptr, load_offset, pred);
976
977                         if (changes != 0)
978                                 return res | changes;
979                 } else if (is_Load(pred) && get_Load_ptr(pred) == ptr &&
980                            can_use_stored_value(get_Load_mode(pred), load_mode)) {
981                         /*
982                          * a Load after a Load -- a read after read.
983                          * We may remove the second Load, if it does not have an exception
984                          * handler OR they are in the same Block. In the later case
985                          * the Load cannot throw an exception when the previous Load was
986                          * quiet.
987                          *
988                          * Here, there is no need to check if the previous Load has an
989                          * exception hander because they would have exact the same
990                          * exception...
991                          *
992                          * TODO: implement load-after-load with different mode for big
993                          *       endian
994                          */
995                         if (info->projs[pn_Load_X_except] == NULL
996                                         || get_nodes_block(load) == get_nodes_block(pred)) {
997                                 ir_node *value;
998
999                                 DBG_OPT_RAR(load, pred);
1000
1001                                 /* the result is used */
1002                                 if (info->projs[pn_Load_res]) {
1003                                         if (pred_info->projs[pn_Load_res] == NULL) {
1004                                                 /* create a new Proj again */
1005                                                 pred_info->projs[pn_Load_res] = new_r_Proj(pred, get_Load_mode(pred), pn_Load_res);
1006                                         }
1007                                         value = pred_info->projs[pn_Load_res];
1008
1009                                         /* add an convert if needed */
1010                                         if (get_Load_mode(pred) != load_mode) {
1011                                                 value = new_r_Conv(get_nodes_block(load), value, load_mode);
1012                                         }
1013
1014                                         exchange(info->projs[pn_Load_res], value);
1015                                 }
1016
1017                                 if (info->projs[pn_Load_M])
1018                                         exchange(info->projs[pn_Load_M], mem);
1019
1020                                 /* no exception */
1021                                 if (info->projs[pn_Load_X_except]) {
1022                                         ir_graph *irg = get_irn_irg(load);
1023                                         exchange(info->projs[pn_Load_X_except], new_r_Bad(irg, mode_X));
1024                                         res |= CF_CHANGED;
1025                                 }
1026                                 if (info->projs[pn_Load_X_regular]) {
1027                                         exchange( info->projs[pn_Load_X_regular], new_r_Jmp(get_nodes_block(load)));
1028                                         res |= CF_CHANGED;
1029                                 }
1030
1031                                 kill_node(load);
1032                                 reduce_adr_usage(ptr);
1033                                 return res |= DF_CHANGED;
1034                         }
1035                 }
1036
1037                 if (is_Store(pred)) {
1038                         /* check if we can pass through this store */
1039                         ir_alias_relation rel = get_alias_relation(
1040                                 get_Store_ptr(pred),
1041                                 get_irn_mode(get_Store_value(pred)),
1042                                 ptr, load_mode);
1043                         /* if the might be an alias, we cannot pass this Store */
1044                         if (rel != ir_no_alias)
1045                                 break;
1046                         pred = skip_Proj(get_Store_mem(pred));
1047                 } else if (is_Load(pred)) {
1048                         pred = skip_Proj(get_Load_mem(pred));
1049                 } else if (is_Call(pred)) {
1050                         if (is_Call_pure(pred)) {
1051                                 /* The called graph is at least pure, so there are no Store's
1052                                    in it. We can handle it like a Load and skip it. */
1053                                 pred = skip_Proj(get_Call_mem(pred));
1054                         } else {
1055                                 /* there might be Store's in the graph, stop here */
1056                                 break;
1057                         }
1058                 } else {
1059                         /* follow only Load chains */
1060                         break;
1061                 }
1062
1063                 /* check for cycles */
1064                 if (NODE_VISITED(pred_info))
1065                         break;
1066                 MARK_NODE(pred_info);
1067         }
1068
1069         if (is_Sync(pred)) {
1070                 int i;
1071
1072                 /* handle all Sync predecessors */
1073                 for (i = get_Sync_n_preds(pred) - 1; i >= 0; --i) {
1074                         res |= follow_Mem_chain(load, skip_Proj(get_Sync_pred(pred, i)));
1075                         if (res)
1076                                 return res;
1077                 }
1078         }
1079
1080         return res;
1081 }  /* follow_Mem_chain */
1082
1083 /*
1084  * Check if we can replace the load by a given const from
1085  * the const code irg.
1086  */
1087 ir_node *can_replace_load_by_const(const ir_node *load, ir_node *c)
1088 {
1089         ir_mode  *c_mode = get_irn_mode(c);
1090         ir_mode  *l_mode = get_Load_mode(load);
1091         ir_node  *block  = get_nodes_block(load);
1092         dbg_info *dbgi   = get_irn_dbg_info(load);
1093         ir_node  *res    = copy_const_value(dbgi, c, block);
1094
1095         if (c_mode != l_mode) {
1096                 /* check, if the mode matches OR can be easily converted info */
1097                 if (is_reinterpret_cast(c_mode, l_mode)) {
1098                         /* copy the value from the const code irg and cast it */
1099                         res = new_rd_Conv(dbgi, block, res, l_mode);
1100                 } else {
1101                         return NULL;
1102                 }
1103         }
1104         return res;
1105 }
1106
1107 /**
1108  * optimize a Load
1109  *
1110  * @param load  the Load node
1111  */
1112 static unsigned optimize_load(ir_node *load)
1113 {
1114         ldst_info_t *info = (ldst_info_t*)get_irn_link(load);
1115         ir_node     *mem, *ptr, *value;
1116         ir_entity   *ent;
1117         long        dummy;
1118         unsigned    res = 0;
1119
1120         /* do NOT touch volatile loads for now */
1121         if (get_Load_volatility(load) == volatility_is_volatile)
1122                 return 0;
1123
1124         /* the address of the load to be optimized */
1125         ptr = get_Load_ptr(load);
1126
1127         /* The mem of the Load. Must still be returned after optimization. */
1128         mem = get_Load_mem(load);
1129
1130         if (info->projs[pn_Load_res] == NULL
1131                         && info->projs[pn_Load_X_except] == NULL) {
1132                 /* the value is never used and we don't care about exceptions, remove */
1133                 exchange(info->projs[pn_Load_M], mem);
1134
1135                 if (info->projs[pn_Load_X_regular]) {
1136                         /* should not happen, but if it does, remove it */
1137                         exchange(info->projs[pn_Load_X_regular], new_r_Jmp(get_nodes_block(load)));
1138                         res |= CF_CHANGED;
1139                 }
1140                 kill_node(load);
1141                 reduce_adr_usage(ptr);
1142                 return res | DF_CHANGED;
1143         }
1144
1145         /* Load from a constant polymorphic field, where we can resolve
1146            polymorphism. */
1147         value = transform_polymorph_Load(load);
1148         if (value == load) {
1149                 value = NULL;
1150                 /* check if we can determine the entity that will be loaded */
1151                 ent = find_constant_entity(ptr);
1152                 if (ent != NULL
1153                                 && get_entity_visibility(ent) != ir_visibility_external) {
1154                         /* a static allocation that is not external: there should be NO
1155                          * exception when loading even if we cannot replace the load itself.
1156                          */
1157
1158                         /* no exception, clear the info field as it might be checked later again */
1159                         if (info->projs[pn_Load_X_except]) {
1160                                 ir_graph *irg = get_irn_irg(load);
1161                                 exchange(info->projs[pn_Load_X_except], new_r_Bad(irg, mode_X));
1162                                 info->projs[pn_Load_X_except] = NULL;
1163                                 res |= CF_CHANGED;
1164                         }
1165                         if (info->projs[pn_Load_X_regular]) {
1166                                 exchange(info->projs[pn_Load_X_regular], new_r_Jmp(get_nodes_block(load)));
1167                                 info->projs[pn_Load_X_regular] = NULL;
1168                                 res |= CF_CHANGED;
1169                         }
1170
1171                         if (get_entity_linkage(ent) & IR_LINKAGE_CONSTANT) {
1172                                 if (has_entity_initializer(ent)) {
1173                                         /* new style initializer */
1174                                         value = find_compound_ent_value(ptr);
1175                                 } else if (entity_has_compound_ent_values(ent)) {
1176                                         /* old style initializer */
1177                                         compound_graph_path *path = get_accessed_path(ptr);
1178
1179                                         if (path != NULL) {
1180                                                 assert(is_proper_compound_graph_path(path, get_compound_graph_path_length(path)-1));
1181
1182                                                 value = get_compound_ent_value_by_path(ent, path);
1183                                                 DB((dbg, LEVEL_1, "  Constant access at %F%F resulted in %+F\n", ent, path, value));
1184                                                 free_compound_graph_path(path);
1185                                         }
1186                                 }
1187                                 if (value != NULL) {
1188                                         ir_graph *irg = get_irn_irg(load);
1189                                         value = can_replace_load_by_const(load, value);
1190                                         if (value != NULL && is_Sel(ptr) &&
1191                                                         !is_irg_state(irg, IR_GRAPH_STATE_IMPLICIT_BITFIELD_MASKING)) {
1192                                                 /* frontend has inserted masking operations after bitfield accesses,
1193                                                  * so we might have to shift the const. */
1194                                                 unsigned char bit_offset = get_entity_offset_bits_remainder(get_Sel_entity(ptr));
1195                                                 ir_tarval *tv_old = get_Const_tarval(value);
1196                                                 ir_tarval *tv_offset = new_tarval_from_long(bit_offset, mode_Bu);
1197                                                 ir_tarval *tv_new = tarval_shl(tv_old, tv_offset);
1198                                                 value = new_r_Const(irg, tv_new);
1199                                         }
1200                                 }
1201                         }
1202                 }
1203         }
1204         if (value != NULL) {
1205                 /* we completely replace the load by this value */
1206                 if (info->projs[pn_Load_X_except]) {
1207                         ir_graph *irg = get_irn_irg(load);
1208                         exchange(info->projs[pn_Load_X_except], new_r_Bad(irg, mode_X));
1209                         info->projs[pn_Load_X_except] = NULL;
1210                         res |= CF_CHANGED;
1211                 }
1212                 if (info->projs[pn_Load_X_regular]) {
1213                         exchange(info->projs[pn_Load_X_regular], new_r_Jmp(get_nodes_block(load)));
1214                         info->projs[pn_Load_X_regular] = NULL;
1215                         res |= CF_CHANGED;
1216                 }
1217                 if (info->projs[pn_Load_M]) {
1218                         exchange(info->projs[pn_Load_M], mem);
1219                         res |= DF_CHANGED;
1220                 }
1221                 if (info->projs[pn_Load_res]) {
1222                         exchange(info->projs[pn_Load_res], value);
1223                         res |= DF_CHANGED;
1224                 }
1225                 kill_node(load);
1226                 reduce_adr_usage(ptr);
1227                 return res;
1228         }
1229
1230         /* Check, if the address of this load is used more than once.
1231          * If not, more load cannot be removed in any case. */
1232         if (get_irn_n_uses(ptr) <= 1 && get_irn_n_uses(get_base_and_offset(ptr, &dummy)) <= 1)
1233                 return res;
1234
1235         /*
1236          * follow the memory chain as long as there are only Loads
1237          * and try to replace current Load or Store by a previous one.
1238          * Note that in unreachable loops it might happen that we reach
1239          * load again, as well as we can fall into a cycle.
1240          * We break such cycles using a special visited flag.
1241          */
1242         INC_MASTER();
1243         res = follow_Mem_chain(load, skip_Proj(mem));
1244         return res;
1245 }  /* optimize_load */
1246
1247 /**
1248  * Check whether a value of mode new_mode would completely overwrite a value
1249  * of mode old_mode in memory.
1250  */
1251 static int is_completely_overwritten(ir_mode *old_mode, ir_mode *new_mode)
1252 {
1253         return get_mode_size_bits(new_mode) >= get_mode_size_bits(old_mode);
1254 }  /* is_completely_overwritten */
1255
1256 /**
1257  * Check whether small is a part of large (starting at same address).
1258  */
1259 static int is_partially_same(ir_node *small, ir_node *large)
1260 {
1261         ir_mode *sm = get_irn_mode(small);
1262         ir_mode *lm = get_irn_mode(large);
1263
1264         /* FIXME: Check endianness */
1265         return is_Conv(small) && get_Conv_op(small) == large
1266             && get_mode_size_bytes(sm) < get_mode_size_bytes(lm)
1267             && get_mode_arithmetic(sm) == irma_twos_complement
1268             && get_mode_arithmetic(lm) == irma_twos_complement;
1269 }  /* is_partially_same */
1270
1271 /**
1272  * follow the memory chain as long as there are only Loads and alias free Stores.
1273  *
1274  * INC_MASTER() must be called before dive into
1275  */
1276 static unsigned follow_Mem_chain_for_Store(ir_node *store, ir_node *curr)
1277 {
1278         unsigned res = 0;
1279         ldst_info_t *info = (ldst_info_t*)get_irn_link(store);
1280         ir_node *pred;
1281         ir_node *ptr = get_Store_ptr(store);
1282         ir_node *mem = get_Store_mem(store);
1283         ir_node *value = get_Store_value(store);
1284         ir_mode *mode  = get_irn_mode(value);
1285         ir_node *block = get_nodes_block(store);
1286
1287         for (pred = curr; pred != store;) {
1288                 ldst_info_t *pred_info = (ldst_info_t*)get_irn_link(pred);
1289
1290                 /*
1291                  * BEWARE: one might think that checking the modes is useless, because
1292                  * if the pointers are identical, they refer to the same object.
1293                  * This is only true in strong typed languages, not is C were the following
1294                  * is possible *(ir_type1 *)p = a; *(ir_type2 *)p = b ...
1295                  * However, if the size of the mode that is written is bigger or equal the
1296                  * size of the old one, the old value is completely overwritten and can be
1297                  * killed ...
1298                  */
1299                 if (is_Store(pred) && get_Store_ptr(pred) == ptr &&
1300             get_nodes_block(pred) == block) {
1301                         /*
1302                          * a Store after a Store in the same Block -- a write after write.
1303                          */
1304
1305                         /*
1306                          * We may remove the first Store, if the old value is completely
1307                          * overwritten or the old value is a part of the new value,
1308                          * and if it does not have an exception handler.
1309                          *
1310                          * TODO: What, if both have the same exception handler ???
1311                          */
1312                         if (get_Store_volatility(pred) != volatility_is_volatile
1313                                 && !pred_info->projs[pn_Store_X_except]) {
1314                                 ir_node *predvalue = get_Store_value(pred);
1315                                 ir_mode *predmode  = get_irn_mode(predvalue);
1316
1317                                 if (is_completely_overwritten(predmode, mode)
1318                                         || is_partially_same(predvalue, value)) {
1319                                         DBG_OPT_WAW(pred, store);
1320                                         exchange(pred_info->projs[pn_Store_M], get_Store_mem(pred));
1321                                         kill_node(pred);
1322                                         reduce_adr_usage(ptr);
1323                                         return DF_CHANGED;
1324                                 }
1325                         }
1326
1327                         /*
1328                          * We may remove the Store, if the old value already contains
1329                          * the new value, and if it does not have an exception handler.
1330                          *
1331                          * TODO: What, if both have the same exception handler ???
1332                          */
1333                         if (get_Store_volatility(store) != volatility_is_volatile
1334                                 && !info->projs[pn_Store_X_except]) {
1335                                 ir_node *predvalue = get_Store_value(pred);
1336
1337                                 if (is_partially_same(value, predvalue)) {
1338                                         DBG_OPT_WAW(pred, store);
1339                                         exchange(info->projs[pn_Store_M], mem);
1340                                         kill_node(store);
1341                                         reduce_adr_usage(ptr);
1342                                         return DF_CHANGED;
1343                                 }
1344                         }
1345                 } else if (is_Load(pred) && get_Load_ptr(pred) == ptr &&
1346                            value == pred_info->projs[pn_Load_res]) {
1347                         /*
1348                          * a Store of a value just loaded from the same address
1349                          * -- a write after read.
1350                          * We may remove the Store, if it does not have an exception
1351                          * handler.
1352                          */
1353                         if (! info->projs[pn_Store_X_except]) {
1354                                 DBG_OPT_WAR(store, pred);
1355                                 exchange(info->projs[pn_Store_M], mem);
1356                                 kill_node(store);
1357                                 reduce_adr_usage(ptr);
1358                                 return DF_CHANGED;
1359                         }
1360                 }
1361
1362                 if (is_Store(pred)) {
1363                         /* check if we can pass through this store */
1364                         ir_alias_relation rel = get_alias_relation(
1365                                 get_Store_ptr(pred),
1366                                 get_irn_mode(get_Store_value(pred)),
1367                                 ptr, mode);
1368                         /* if the might be an alias, we cannot pass this Store */
1369                         if (rel != ir_no_alias)
1370                                 break;
1371                         pred = skip_Proj(get_Store_mem(pred));
1372                 } else if (is_Load(pred)) {
1373                         ir_alias_relation rel = get_alias_relation(
1374                                 get_Load_ptr(pred), get_Load_mode(pred),
1375                                 ptr, mode);
1376                         if (rel != ir_no_alias)
1377                                 break;
1378
1379                         pred = skip_Proj(get_Load_mem(pred));
1380                 } else {
1381                         /* follow only Load chains */
1382                         break;
1383                 }
1384
1385                 /* check for cycles */
1386                 if (NODE_VISITED(pred_info))
1387                         break;
1388                 MARK_NODE(pred_info);
1389         }
1390
1391         if (is_Sync(pred)) {
1392                 int i;
1393
1394                 /* handle all Sync predecessors */
1395                 for (i = get_Sync_n_preds(pred) - 1; i >= 0; --i) {
1396                         res |= follow_Mem_chain_for_Store(store, skip_Proj(get_Sync_pred(pred, i)));
1397                         if (res)
1398                                 break;
1399                 }
1400         }
1401         return res;
1402 }  /* follow_Mem_chain_for_Store */
1403
1404 /** find entity used as base for an address calculation */
1405 static ir_entity *find_entity(ir_node *ptr)
1406 {
1407         switch (get_irn_opcode(ptr)) {
1408         case iro_SymConst:
1409                 return get_SymConst_entity(ptr);
1410         case iro_Sel: {
1411                 ir_node *pred = get_Sel_ptr(ptr);
1412                 if (get_irg_frame(get_irn_irg(ptr)) == pred)
1413                         return get_Sel_entity(ptr);
1414
1415                 return find_entity(pred);
1416         }
1417         case iro_Sub:
1418         case iro_Add: {
1419                 ir_node *left = get_binop_left(ptr);
1420                 ir_node *right;
1421                 if (mode_is_reference(get_irn_mode(left)))
1422                         return find_entity(left);
1423                 right = get_binop_right(ptr);
1424                 if (mode_is_reference(get_irn_mode(right)))
1425                         return find_entity(right);
1426                 return NULL;
1427         }
1428         default:
1429                 return NULL;
1430         }
1431 }
1432
1433 /**
1434  * optimize a Store
1435  *
1436  * @param store  the Store node
1437  */
1438 static unsigned optimize_store(ir_node *store)
1439 {
1440         ir_node   *ptr;
1441         ir_node   *mem;
1442         ir_entity *entity;
1443
1444         if (get_Store_volatility(store) == volatility_is_volatile)
1445                 return 0;
1446
1447         ptr    = get_Store_ptr(store);
1448         entity = find_entity(ptr);
1449
1450         /* a store to an entity which is never read is unnecessary */
1451         if (entity != NULL && !(get_entity_usage(entity) & ir_usage_read)) {
1452                 ldst_info_t *info = (ldst_info_t*)get_irn_link(store);
1453                 if (info->projs[pn_Store_X_except] == NULL) {
1454                         DB((dbg, LEVEL_1, "  Killing useless %+F to never read entity %+F\n", store, entity));
1455                         exchange(info->projs[pn_Store_M], get_Store_mem(store));
1456                         kill_node(store);
1457                         reduce_adr_usage(ptr);
1458                         return DF_CHANGED;
1459                 }
1460         }
1461
1462         /* Check, if the address of this Store is used more than once.
1463          * If not, this Store cannot be removed in any case. */
1464         if (get_irn_n_uses(ptr) <= 1)
1465                 return 0;
1466
1467         mem = get_Store_mem(store);
1468
1469         /* follow the memory chain as long as there are only Loads */
1470         INC_MASTER();
1471
1472         return follow_Mem_chain_for_Store(store, skip_Proj(mem));
1473 }  /* optimize_store */
1474
1475 /**
1476  * walker, optimizes Phi after Stores to identical places:
1477  * Does the following optimization:
1478  * @verbatim
1479  *
1480  *   val1   val2   val3          val1  val2  val3
1481  *    |      |      |               \    |    /
1482  *  Store  Store  Store              \   |   /
1483  *      \    |    /                   PhiData
1484  *       \   |   /                       |
1485  *        \  |  /                      Store
1486  *          PhiM
1487  *
1488  * @endverbatim
1489  * This reduces the number of stores and allows for predicated execution.
1490  * Moves Stores back to the end of a function which may be bad.
1491  *
1492  * This is only possible if the predecessor blocks have only one successor.
1493  */
1494 static unsigned optimize_phi(ir_node *phi, walk_env_t *wenv)
1495 {
1496         int i, n;
1497         ir_node *store, *ptr, *block, *phi_block, *phiM, *phiD, *exc, *projM;
1498 #ifdef DO_CACHEOPT
1499         ir_node *old_store;
1500 #endif
1501         ir_mode *mode;
1502         ir_node **inM, **inD, **projMs;
1503         int *idx;
1504         dbg_info *db = NULL;
1505         ldst_info_t *info;
1506         block_info_t *bl_info;
1507         unsigned res = 0;
1508
1509         /* Must be a memory Phi */
1510         if (get_irn_mode(phi) != mode_M)
1511                 return 0;
1512
1513         n = get_Phi_n_preds(phi);
1514         if (n <= 0)
1515                 return 0;
1516
1517         /* must be only one user */
1518         projM = get_Phi_pred(phi, 0);
1519         if (get_irn_n_edges(projM) != 1)
1520                 return 0;
1521
1522         store = skip_Proj(projM);
1523 #ifdef DO_CACHEOPT
1524         old_store = store;
1525 #endif
1526         if (!is_Store(store))
1527                 return 0;
1528
1529         block = get_nodes_block(store);
1530
1531         /* check if the block is post dominated by Phi-block
1532            and has no exception exit */
1533         bl_info = (block_info_t*)get_irn_link(block);
1534         if (bl_info->flags & BLOCK_HAS_EXC)
1535                 return 0;
1536
1537         phi_block = get_nodes_block(phi);
1538         if (! block_strictly_postdominates(phi_block, block))
1539                 return 0;
1540
1541         /* this is the address of the store */
1542         ptr  = get_Store_ptr(store);
1543         mode = get_irn_mode(get_Store_value(store));
1544         info = (ldst_info_t*)get_irn_link(store);
1545         exc  = info->exc_block;
1546
1547         for (i = 1; i < n; ++i) {
1548                 ir_node *pred = get_Phi_pred(phi, i);
1549
1550                 if (get_irn_n_edges(pred) != 1)
1551                         return 0;
1552
1553                 pred = skip_Proj(pred);
1554                 if (!is_Store(pred))
1555                         return 0;
1556
1557                 if (ptr != get_Store_ptr(pred) || mode != get_irn_mode(get_Store_value(pred)))
1558                         return 0;
1559
1560                 info = (ldst_info_t*)get_irn_link(pred);
1561
1562                 /* check, if all stores have the same exception flow */
1563                 if (exc != info->exc_block)
1564                         return 0;
1565
1566                 block = get_nodes_block(pred);
1567
1568                 /* check if the block is post dominated by Phi-block
1569                    and has no exception exit. Note that block must be different from
1570                    Phi-block, else we would move a Store from end End of a block to its
1571                    Start... */
1572                 bl_info = (block_info_t*)get_irn_link(block);
1573                 if (bl_info->flags & BLOCK_HAS_EXC)
1574                         return 0;
1575                 if (block == phi_block || ! block_postdominates(phi_block, block))
1576                         return 0;
1577         }
1578
1579         /*
1580          * ok, when we are here, we found all predecessors of a Phi that
1581          * are Stores to the same address and size. That means whatever
1582          * we do before we enter the block of the Phi, we do a Store.
1583          * So, we can move the Store to the current block:
1584          *
1585          *   val1    val2    val3          val1  val2  val3
1586          *    |       |       |               \    |    /
1587          * | Str | | Str | | Str |             \   |   /
1588          *      \     |     /                   PhiData
1589          *       \    |    /                       |
1590          *        \   |   /                       Str
1591          *           PhiM
1592          *
1593          * Is only allowed if the predecessor blocks have only one successor.
1594          */
1595
1596         NEW_ARR_A(ir_node *, projMs, n);
1597         NEW_ARR_A(ir_node *, inM, n);
1598         NEW_ARR_A(ir_node *, inD, n);
1599         NEW_ARR_A(int, idx, n);
1600
1601         /* Prepare: Collect all Store nodes.  We must do this
1602            first because we otherwise may loose a store when exchanging its
1603            memory Proj.
1604          */
1605         for (i = n - 1; i >= 0; --i) {
1606                 ir_node *store;
1607
1608                 projMs[i] = get_Phi_pred(phi, i);
1609                 assert(is_Proj(projMs[i]));
1610
1611                 store = get_Proj_pred(projMs[i]);
1612                 info  = (ldst_info_t*)get_irn_link(store);
1613
1614                 inM[i] = get_Store_mem(store);
1615                 inD[i] = get_Store_value(store);
1616                 idx[i] = info->exc_idx;
1617         }
1618         block = get_nodes_block(phi);
1619
1620         /* second step: create a new memory Phi */
1621         phiM = new_rd_Phi(get_irn_dbg_info(phi), block, n, inM, mode_M);
1622
1623         /* third step: create a new data Phi */
1624         phiD = new_rd_Phi(get_irn_dbg_info(phi), block, n, inD, mode);
1625
1626         /* rewire memory and kill the node */
1627         for (i = n - 1; i >= 0; --i) {
1628                 ir_node *proj  = projMs[i];
1629
1630                 if (is_Proj(proj)) {
1631                         ir_node *store = get_Proj_pred(proj);
1632                         exchange(proj, inM[i]);
1633                         kill_node(store);
1634                 }
1635         }
1636
1637         /* fourth step: create the Store */
1638         store = new_rd_Store(db, block, phiM, ptr, phiD, cons_none);
1639 #ifdef DO_CACHEOPT
1640         co_set_irn_name(store, co_get_irn_ident(old_store));
1641 #endif
1642
1643         projM = new_rd_Proj(NULL, store, mode_M, pn_Store_M);
1644
1645         info = get_ldst_info(store, &wenv->obst);
1646         info->projs[pn_Store_M] = projM;
1647
1648         /* fifths step: repair exception flow */
1649         if (exc) {
1650                 ir_node *projX = new_rd_Proj(NULL, store, mode_X, pn_Store_X_except);
1651
1652                 info->projs[pn_Store_X_except] = projX;
1653                 info->exc_block                = exc;
1654                 info->exc_idx                  = idx[0];
1655
1656                 for (i = 0; i < n; ++i) {
1657                         set_Block_cfgpred(exc, idx[i], projX);
1658                 }
1659
1660                 if (n > 1) {
1661                         /* the exception block should be optimized as some inputs are identical now */
1662                 }
1663
1664                 res |= CF_CHANGED;
1665         }
1666
1667         /* sixth step: replace old Phi */
1668         exchange(phi, projM);
1669
1670         return res | DF_CHANGED;
1671 }  /* optimize_phi */
1672
1673 /**
1674  * walker, do the optimizations
1675  */
1676 static void do_load_store_optimize(ir_node *n, void *env)
1677 {
1678         walk_env_t *wenv = (walk_env_t*)env;
1679
1680         switch (get_irn_opcode(n)) {
1681
1682         case iro_Load:
1683                 wenv->changes |= optimize_load(n);
1684                 break;
1685
1686         case iro_Store:
1687                 wenv->changes |= optimize_store(n);
1688                 break;
1689
1690         case iro_Phi:
1691                 wenv->changes |= optimize_phi(n, wenv);
1692                 break;
1693
1694         default:
1695                 break;
1696         }
1697 }  /* do_load_store_optimize */
1698
1699 /** A scc. */
1700 typedef struct scc {
1701         ir_node *head;      /**< the head of the list */
1702 } scc;
1703
1704 /** A node entry. */
1705 typedef struct node_entry {
1706         unsigned DFSnum;    /**< the DFS number of this node */
1707         unsigned low;       /**< the low number of this node */
1708         int      in_stack;  /**< flag, set if the node is on the stack */
1709         ir_node  *next;     /**< link to the next node the the same scc */
1710         scc      *pscc;     /**< the scc of this node */
1711         unsigned POnum;     /**< the post order number for blocks */
1712 } node_entry;
1713
1714 /** A loop entry. */
1715 typedef struct loop_env {
1716         ir_phase ph;           /**< the phase object */
1717         ir_node  **stack;      /**< the node stack */
1718         size_t   tos;          /**< tos index */
1719         unsigned nextDFSnum;   /**< the current DFS number */
1720         unsigned POnum;        /**< current post order number */
1721
1722         unsigned changes;      /**< a bitmask of graph changes */
1723 } loop_env;
1724
1725 /**
1726 * Gets the node_entry of a node
1727 */
1728 static node_entry *get_irn_ne(ir_node *irn, loop_env *env)
1729 {
1730         ir_phase   *ph = &env->ph;
1731         node_entry *e  = (node_entry*)phase_get_irn_data(&env->ph, irn);
1732
1733         if (! e) {
1734                 e = (node_entry*)phase_alloc(ph, sizeof(*e));
1735                 memset(e, 0, sizeof(*e));
1736                 phase_set_irn_data(ph, irn, e);
1737         }
1738         return e;
1739 }  /* get_irn_ne */
1740
1741 /**
1742  * Push a node onto the stack.
1743  *
1744  * @param env   the loop environment
1745  * @param n     the node to push
1746  */
1747 static void push(loop_env *env, ir_node *n)
1748 {
1749         node_entry *e;
1750
1751         if (env->tos == ARR_LEN(env->stack)) {
1752                 size_t nlen = ARR_LEN(env->stack) * 2;
1753                 ARR_RESIZE(ir_node *, env->stack, nlen);
1754         }
1755         env->stack[env->tos++] = n;
1756         e = get_irn_ne(n, env);
1757         e->in_stack = 1;
1758 }  /* push */
1759
1760 /**
1761  * pop a node from the stack
1762  *
1763  * @param env   the loop environment
1764  *
1765  * @return  The topmost node
1766  */
1767 static ir_node *pop(loop_env *env)
1768 {
1769         ir_node *n = env->stack[--env->tos];
1770         node_entry *e = get_irn_ne(n, env);
1771
1772         e->in_stack = 0;
1773         return n;
1774 }  /* pop */
1775
1776 /**
1777  * Check if irn is a region constant.
1778  * The block or irn must strictly dominate the header block.
1779  *
1780  * @param irn           the node to check
1781  * @param header_block  the header block of the induction variable
1782  */
1783 static int is_rc(ir_node *irn, ir_node *header_block)
1784 {
1785         ir_node *block = get_nodes_block(irn);
1786
1787         return (block != header_block) && block_dominates(block, header_block);
1788 }  /* is_rc */
1789
1790 typedef struct phi_entry phi_entry;
1791 struct phi_entry {
1792         ir_node   *phi;    /**< A phi with a region const memory. */
1793         int       pos;     /**< The position of the region const memory */
1794         ir_node   *load;   /**< the newly created load for this phi */
1795         phi_entry *next;
1796 };
1797
1798 /**
1799  * An entry in the avail set.
1800  */
1801 typedef struct avail_entry_t {
1802         ir_node *ptr;   /**< the address pointer */
1803         ir_mode *mode;  /**< the load mode */
1804         ir_node *load;  /**< the associated Load */
1805 } avail_entry_t;
1806
1807 /**
1808  * Compare two avail entries.
1809  */
1810 static int cmp_avail_entry(const void *elt, const void *key, size_t size)
1811 {
1812         const avail_entry_t *a = (const avail_entry_t*)elt;
1813         const avail_entry_t *b = (const avail_entry_t*)key;
1814         (void) size;
1815
1816         return a->ptr != b->ptr || a->mode != b->mode;
1817 }  /* cmp_avail_entry */
1818
1819 /**
1820  * Calculate the hash value of an avail entry.
1821  */
1822 static unsigned hash_cache_entry(const avail_entry_t *entry)
1823 {
1824         return get_irn_idx(entry->ptr) * 9 + HASH_PTR(entry->mode);
1825 }  /* hash_cache_entry */
1826
1827 /**
1828  * Move loops out of loops if possible.
1829  *
1830  * @param pscc   the loop described by an SCC
1831  * @param env    the loop environment
1832  */
1833 static void move_loads_out_of_loops(scc *pscc, loop_env *env)
1834 {
1835         ir_node   *phi, *load, *next, *other, *next_other;
1836         int       j;
1837         phi_entry *phi_list = NULL;
1838         set       *avail;
1839
1840         avail = new_set(cmp_avail_entry, 8);
1841
1842         /* collect all outer memories */
1843         for (phi = pscc->head; phi != NULL; phi = next) {
1844                 node_entry *ne = get_irn_ne(phi, env);
1845                 next = ne->next;
1846
1847                 /* check all memory Phi's */
1848                 if (! is_Phi(phi))
1849                         continue;
1850
1851                 assert(get_irn_mode(phi) == mode_M && "DFS return non-memory Phi");
1852
1853                 for (j = get_irn_arity(phi) - 1; j >= 0; --j) {
1854                         ir_node    *pred = get_irn_n(phi, j);
1855                         node_entry *pe   = get_irn_ne(pred, env);
1856
1857                         if (pe->pscc != ne->pscc) {
1858                                 /* not in the same SCC, is region const */
1859                                 phi_entry *pe = (phi_entry*)phase_alloc(&env->ph, sizeof(*pe));
1860
1861                                 pe->phi  = phi;
1862                                 pe->pos  = j;
1863                                 pe->next = phi_list;
1864                                 phi_list = pe;
1865                         }
1866                 }
1867         }
1868         /* no Phis no fun */
1869         assert(phi_list != NULL && "DFS found a loop without Phi");
1870
1871         /* for now, we cannot handle more than one input (only reducible cf) */
1872         if (phi_list->next != NULL)
1873                 return;
1874
1875         for (load = pscc->head; load; load = next) {
1876                 ir_mode *load_mode;
1877                 node_entry *ne = get_irn_ne(load, env);
1878                 next = ne->next;
1879
1880                 if (is_Load(load)) {
1881                         ldst_info_t *info = (ldst_info_t*)get_irn_link(load);
1882                         ir_node     *ptr = get_Load_ptr(load);
1883
1884                         /* for now, we cannot handle Loads with exceptions */
1885                         if (info->projs[pn_Load_res] == NULL || info->projs[pn_Load_X_regular] != NULL || info->projs[pn_Load_X_except] != NULL)
1886                                 continue;
1887
1888                         /* for now, we can only move Load(Global) */
1889                         if (! is_Global(ptr))
1890                                 continue;
1891                         load_mode = get_Load_mode(load);
1892                         for (other = pscc->head; other != NULL; other = next_other) {
1893                                 node_entry *ne = get_irn_ne(other, env);
1894                                 next_other = ne->next;
1895
1896                                 if (is_Store(other)) {
1897                                         ir_alias_relation rel = get_alias_relation(
1898                                                 get_Store_ptr(other),
1899                                                 get_irn_mode(get_Store_value(other)),
1900                                                 ptr, load_mode);
1901                                         /* if the might be an alias, we cannot pass this Store */
1902                                         if (rel != ir_no_alias)
1903                                                 break;
1904                                 }
1905                                 /* only Phis and pure Calls are allowed here, so ignore them */
1906                         }
1907                         if (other == NULL) {
1908                                 ldst_info_t *ninfo = NULL;
1909                                 phi_entry   *pe;
1910                                 dbg_info    *db;
1911
1912                                 /* yep, no aliasing Store found, Load can be moved */
1913                                 DB((dbg, LEVEL_1, "  Found a Load that could be moved: %+F\n", load));
1914
1915                                 db   = get_irn_dbg_info(load);
1916                                 for (pe = phi_list; pe != NULL; pe = pe->next) {
1917                                         int     pos   = pe->pos;
1918                                         ir_node *phi  = pe->phi;
1919                                         ir_node *blk  = get_nodes_block(phi);
1920                                         ir_node *pred = get_Block_cfgpred_block(blk, pos);
1921                                         ir_node *irn, *mem;
1922                                         avail_entry_t entry, *res;
1923
1924                                         entry.ptr  = ptr;
1925                                         entry.mode = load_mode;
1926                                         res = (avail_entry_t*)set_find(avail, &entry, sizeof(entry), hash_cache_entry(&entry));
1927                                         if (res != NULL) {
1928                                                 irn = res->load;
1929                                         } else {
1930                                                 irn = new_rd_Load(db, pred, get_Phi_pred(phi, pos), ptr, load_mode, cons_none);
1931                                                 entry.load = irn;
1932                                                 set_insert(avail, &entry, sizeof(entry), hash_cache_entry(&entry));
1933                                                 DB((dbg, LEVEL_1, "  Created %+F in %+F\n", irn, pred));
1934                                         }
1935                                         pe->load = irn;
1936                                         ninfo = get_ldst_info(irn, phase_obst(&env->ph));
1937
1938                                         ninfo->projs[pn_Load_M] = mem = new_r_Proj(irn, mode_M, pn_Load_M);
1939                                         if (res == NULL) {
1940                                                 /* irn is from cache, so do not set phi pred again.
1941                                                  * There might be other Loads between phi and irn already.
1942                                                  */
1943                                                 set_Phi_pred(phi, pos, mem);
1944                                         }
1945
1946                                         ninfo->projs[pn_Load_res] = new_r_Proj(irn, load_mode, pn_Load_res);
1947                                 }
1948
1949                                 /* now kill the old Load */
1950                                 exchange(info->projs[pn_Load_M], get_Load_mem(load));
1951                                 exchange(info->projs[pn_Load_res], ninfo->projs[pn_Load_res]);
1952
1953                                 env->changes |= DF_CHANGED;
1954                         }
1955                 }
1956         }
1957         del_set(avail);
1958 }  /* move_loads_out_of_loops */
1959
1960 /**
1961  * Process a loop SCC.
1962  *
1963  * @param pscc  the SCC
1964  * @param env   the loop environment
1965  */
1966 static void process_loop(scc *pscc, loop_env *env)
1967 {
1968         ir_node *irn, *next, *header = NULL;
1969         node_entry *b, *h = NULL;
1970         int j, only_phi, num_outside, process = 0;
1971         ir_node *out_rc;
1972
1973         /* find the header block for this scc */
1974         for (irn = pscc->head; irn; irn = next) {
1975                 node_entry *e = get_irn_ne(irn, env);
1976                 ir_node *block = get_nodes_block(irn);
1977
1978                 next = e->next;
1979                 b = get_irn_ne(block, env);
1980
1981                 if (header != NULL) {
1982                         if (h->POnum < b->POnum) {
1983                                 header = block;
1984                                 h      = b;
1985                         }
1986                 } else {
1987                         header = block;
1988                         h      = b;
1989                 }
1990         }
1991
1992         /* check if this scc contains only Phi, Loads or Stores nodes */
1993         only_phi    = 1;
1994         num_outside = 0;
1995         out_rc      = NULL;
1996         for (irn = pscc->head; irn; irn = next) {
1997                 node_entry *e = get_irn_ne(irn, env);
1998
1999                 next = e->next;
2000                 switch (get_irn_opcode(irn)) {
2001                 case iro_Call:
2002                         if (is_Call_pure(irn)) {
2003                                 /* pure calls can be treated like loads */
2004                                 only_phi = 0;
2005                                 break;
2006                         }
2007                         /* non-pure calls must be handle like may-alias Stores */
2008                         goto fail;
2009                 case iro_CopyB:
2010                         /* cannot handle CopyB yet */
2011                         goto fail;
2012                 case iro_Load:
2013                         process = 1;
2014                         if (get_Load_volatility(irn) == volatility_is_volatile) {
2015                                 /* cannot handle loops with volatile Loads */
2016                                 goto fail;
2017                         }
2018                         only_phi = 0;
2019                         break;
2020                 case iro_Store:
2021                         if (get_Store_volatility(irn) == volatility_is_volatile) {
2022                                 /* cannot handle loops with volatile Stores */
2023                                 goto fail;
2024                         }
2025                         only_phi = 0;
2026                         break;
2027                 default:
2028                         only_phi = 0;
2029                         break;
2030                 case iro_Phi:
2031                         for (j = get_irn_arity(irn) - 1; j >= 0; --j) {
2032                                 ir_node *pred  = get_irn_n(irn, j);
2033                                 node_entry *pe = get_irn_ne(pred, env);
2034
2035                                 if (pe->pscc != e->pscc) {
2036                                         /* not in the same SCC, must be a region const */
2037                                         if (! is_rc(pred, header)) {
2038                                                 /* not a memory loop */
2039                                                 goto fail;
2040                                         }
2041                                         if (out_rc == NULL) {
2042                                                 /* first region constant */
2043                                                 out_rc = pred;
2044                                                 ++num_outside;
2045                                         } else if (out_rc != pred) {
2046                                                 /* another region constant */
2047                                                 ++num_outside;
2048                                         }
2049                                 }
2050                         }
2051                         break;
2052                 }
2053         }
2054         if (! process)
2055                 goto fail;
2056
2057         /* found a memory loop */
2058         DB((dbg, LEVEL_2, "  Found a memory loop:\n  "));
2059         if (only_phi && num_outside == 1) {
2060                 /* a phi cycle with only one real predecessor can be collapsed */
2061                 DB((dbg, LEVEL_2, "  Found an USELESS Phi cycle:\n  "));
2062
2063                 for (irn = pscc->head; irn; irn = next) {
2064                         node_entry *e = get_irn_ne(irn, env);
2065                         next = e->next;
2066                         exchange(irn, out_rc);
2067                 }
2068                 env->changes |= DF_CHANGED;
2069                 return;
2070         }
2071
2072 #ifdef DEBUG_libfirm
2073         for (irn = pscc->head; irn; irn = next) {
2074                 node_entry *e = get_irn_ne(irn, env);
2075                 next = e->next;
2076                 DB((dbg, LEVEL_2, " %+F,", irn));
2077         }
2078         DB((dbg, LEVEL_2, "\n"));
2079 #endif
2080         move_loads_out_of_loops(pscc, env);
2081
2082 fail:
2083         ;
2084 }  /* process_loop */
2085
2086 /**
2087  * Process a SCC.
2088  *
2089  * @param pscc  the SCC
2090  * @param env   the loop environment
2091  */
2092 static void process_scc(scc *pscc, loop_env *env)
2093 {
2094         ir_node *head = pscc->head;
2095         node_entry *e = get_irn_ne(head, env);
2096
2097 #ifdef DEBUG_libfirm
2098         {
2099                 ir_node *irn, *next;
2100
2101                 DB((dbg, LEVEL_4, " SCC at %p:\n ", pscc));
2102                 for (irn = pscc->head; irn; irn = next) {
2103                         node_entry *e = get_irn_ne(irn, env);
2104
2105                         next = e->next;
2106
2107                         DB((dbg, LEVEL_4, " %+F,", irn));
2108                 }
2109                 DB((dbg, LEVEL_4, "\n"));
2110         }
2111 #endif
2112
2113         if (e->next != NULL) {
2114                 /* this SCC has more than one member */
2115                 process_loop(pscc, env);
2116         }
2117 }  /* process_scc */
2118
2119 /**
2120  * Do Tarjan's SCC algorithm and drive load/store optimization.
2121  *
2122  * @param irn  start at this node
2123  * @param env  the loop environment
2124  */
2125 static void dfs(ir_node *irn, loop_env *env)
2126 {
2127         int i, n;
2128         node_entry *node = get_irn_ne(irn, env);
2129
2130         mark_irn_visited(irn);
2131
2132         node->DFSnum = env->nextDFSnum++;
2133         node->low    = node->DFSnum;
2134         push(env, irn);
2135
2136         /* handle preds */
2137         if (is_Phi(irn) || is_Sync(irn)) {
2138                 n = get_irn_arity(irn);
2139                 for (i = 0; i < n; ++i) {
2140                         ir_node *pred = get_irn_n(irn, i);
2141                         node_entry *o = get_irn_ne(pred, env);
2142
2143                         if (!irn_visited(pred)) {
2144                                 dfs(pred, env);
2145                                 node->low = MIN(node->low, o->low);
2146                         }
2147                         if (o->DFSnum < node->DFSnum && o->in_stack)
2148                                 node->low = MIN(o->DFSnum, node->low);
2149                 }
2150         } else if (is_fragile_op(irn)) {
2151                 ir_node *pred = get_fragile_op_mem(irn);
2152                 node_entry *o = get_irn_ne(pred, env);
2153
2154                 if (!irn_visited(pred)) {
2155                         dfs(pred, env);
2156                         node->low = MIN(node->low, o->low);
2157                 }
2158                 if (o->DFSnum < node->DFSnum && o->in_stack)
2159                         node->low = MIN(o->DFSnum, node->low);
2160         } else if (is_Proj(irn)) {
2161                 ir_node *pred = get_Proj_pred(irn);
2162                 node_entry *o = get_irn_ne(pred, env);
2163
2164                 if (!irn_visited(pred)) {
2165                         dfs(pred, env);
2166                         node->low = MIN(node->low, o->low);
2167                 }
2168                 if (o->DFSnum < node->DFSnum && o->in_stack)
2169                         node->low = MIN(o->DFSnum, node->low);
2170         }
2171         else {
2172                  /* IGNORE predecessors */
2173         }
2174
2175         if (node->low == node->DFSnum) {
2176                 scc *pscc = (scc*)phase_alloc(&env->ph, sizeof(*pscc));
2177                 ir_node *x;
2178
2179                 pscc->head = NULL;
2180                 do {
2181                         node_entry *e;
2182
2183                         x = pop(env);
2184                         e = get_irn_ne(x, env);
2185                         e->pscc    = pscc;
2186                         e->next    = pscc->head;
2187                         pscc->head = x;
2188                 } while (x != irn);
2189
2190                 process_scc(pscc, env);
2191         }
2192 }  /* dfs */
2193
2194 /**
2195  * Do the DFS on the memory edges a graph.
2196  *
2197  * @param irg  the graph to process
2198  * @param env  the loop environment
2199  */
2200 static void do_dfs(ir_graph *irg, loop_env *env)
2201 {
2202         ir_node  *endblk, *end;
2203         int      i;
2204
2205         inc_irg_visited(irg);
2206
2207         /* visit all memory nodes */
2208         endblk = get_irg_end_block(irg);
2209         for (i = get_Block_n_cfgpreds(endblk) - 1; i >= 0; --i) {
2210                 ir_node *pred = get_Block_cfgpred(endblk, i);
2211
2212                 pred = skip_Proj(pred);
2213                 if (is_Return(pred)) {
2214                         dfs(get_Return_mem(pred), env);
2215                 } else if (is_Raise(pred)) {
2216                         dfs(get_Raise_mem(pred), env);
2217                 } else if (is_fragile_op(pred)) {
2218                         dfs(get_fragile_op_mem(pred), env);
2219                 } else if (is_Bad(pred)) {
2220                         /* ignore non-optimized block predecessor */
2221                 } else {
2222                         assert(0 && "Unknown EndBlock predecessor");
2223                 }
2224         }
2225
2226         /* visit the keep-alives */
2227         end = get_irg_end(irg);
2228         for (i = get_End_n_keepalives(end) - 1; i >= 0; --i) {
2229                 ir_node *ka = get_End_keepalive(end, i);
2230
2231                 if (is_Phi(ka) && !irn_visited(ka))
2232                         dfs(ka, env);
2233         }
2234 }  /* do_dfs */
2235
2236 /**
2237  * Optimize Loads/Stores in loops.
2238  *
2239  * @param irg  the graph
2240  */
2241 static int optimize_loops(ir_graph *irg)
2242 {
2243         loop_env env;
2244
2245         env.stack         = NEW_ARR_F(ir_node *, 128);
2246         env.tos           = 0;
2247         env.nextDFSnum    = 0;
2248         env.POnum         = 0;
2249         env.changes       = 0;
2250         phase_init(&env.ph, irg, phase_irn_init_default);
2251
2252         /* calculate the SCC's and drive loop optimization. */
2253         do_dfs(irg, &env);
2254
2255         DEL_ARR_F(env.stack);
2256         phase_deinit(&env.ph);
2257
2258         return env.changes;
2259 }  /* optimize_loops */
2260
2261 /*
2262  * do the load store optimization
2263  */
2264 static ir_graph_state_t do_loadstore_opt(ir_graph *irg)
2265 {
2266         walk_env_t env;
2267         ir_graph_state_t res = 0;
2268
2269         FIRM_DBG_REGISTER(dbg, "firm.opt.ldstopt");
2270
2271         assert(get_irg_phase_state(irg) != phase_building);
2272         assert(get_irg_pinned(irg) != op_pin_state_floats &&
2273                 "LoadStore optimization needs pinned graph");
2274
2275         if (get_opt_alias_analysis()) {
2276                 assure_irp_globals_entity_usage_computed();
2277         }
2278
2279         obstack_init(&env.obst);
2280         env.changes = 0;
2281
2282         /* init the links, then collect Loads/Stores/Proj's in lists */
2283         master_visited = 0;
2284         irg_walk_graph(irg, firm_clear_link, collect_nodes, &env);
2285
2286         /* now we have collected enough information, optimize */
2287         irg_walk_graph(irg, NULL, do_load_store_optimize, &env);
2288
2289         env.changes |= optimize_loops(irg);
2290
2291         obstack_free(&env.obst, NULL);
2292
2293         /* Handle graph state */
2294         if (env.changes) {
2295                 set_irg_entity_usage_state(irg, ir_entity_usage_not_computed);
2296                 edges_deactivate(irg);
2297         }
2298
2299         if (env.changes & CF_CHANGED) {
2300                 /* control flow changed, block might have Bad() predecessors. */
2301                 set_irg_doms_inconsistent(irg);
2302         } else {
2303                 res |= IR_GRAPH_STATE_CONSISTENT_DOMINANCE | IR_GRAPH_STATE_NO_BAD_BLOCKS;
2304         }
2305
2306         return res;
2307 }
2308
2309 optdesc_t opt_loadstore = {
2310         "load-store",
2311         IR_GRAPH_STATE_NO_UNREACHABLE_BLOCKS | IR_GRAPH_STATE_CONSISTENT_OUT_EDGES | IR_GRAPH_STATE_NO_CRITICAL_EDGES | IR_GRAPH_STATE_CONSISTENT_DOMINANCE | IR_GRAPH_STATE_CONSISTENT_ENTITY_USAGE,
2312         do_loadstore_opt,
2313 };
2314
2315 int optimize_load_store(ir_graph *irg)
2316 {
2317         perform_irg_optimization(irg, &opt_loadstore);
2318         return 1;
2319 }
2320
2321 ir_graph_pass_t *optimize_load_store_pass(const char *name)
2322 {
2323         return def_graph_pass_ret(name ? name : "ldst", optimize_load_store);
2324 }  /* optimize_load_store_pass */