rename ir_phase to ir_nodemap and simplify interface
[libfirm] / ir / opt / ldstopt.c
1 /*
2  * Copyright (C) 1995-2011 University of Karlsruhe.  All right reserved.
3  *
4  * This file is part of libFirm.
5  *
6  * This file may be distributed and/or modified under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation and appearing in the file LICENSE.GPL included in the
9  * packaging of this file.
10  *
11  * Licensees holding valid libFirm Professional Edition licenses may use
12  * this file in accordance with the libFirm Commercial License.
13  * Agreement provided with the Software.
14  *
15  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17  * PURPOSE.
18  */
19
20 /**
21  * @file
22  * @brief   Load/Store optimizations.
23  * @author  Michael Beck
24  * @version $Id$
25  */
26 #include "config.h"
27
28 #include <string.h>
29
30 #include "iroptimize.h"
31 #include "irnode_t.h"
32 #include "irgraph_t.h"
33 #include "irmode_t.h"
34 #include "iropt_t.h"
35 #include "ircons_t.h"
36 #include "irgmod.h"
37 #include "irgwalk.h"
38 #include "irtools.h"
39 #include "tv_t.h"
40 #include "dbginfo_t.h"
41 #include "iropt_dbg.h"
42 #include "irflag_t.h"
43 #include "array_t.h"
44 #include "irhooks.h"
45 #include "iredges.h"
46 #include "irpass.h"
47 #include "opt_polymorphy.h"
48 #include "irmemory.h"
49 #include "irnodehashmap.h"
50 #include "irgopt.h"
51 #include "set.h"
52 #include "be.h"
53 #include "debug.h"
54 #include "opt_manage.h"
55
56 /** The debug handle. */
57 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
58
59 #undef IMAX
60 #define IMAX(a,b)   ((a) > (b) ? (a) : (b))
61
62 #define MAX_PROJ    IMAX(IMAX((long)pn_Load_max, (long)pn_Store_max), (long)pn_Call_max)
63
64 enum changes_t {
65         DF_CHANGED = 1,       /**< data flow changed */
66         CF_CHANGED = 2,       /**< control flow changed */
67 };
68
69 /**
70  * walker environment
71  */
72 typedef struct walk_env_t {
73         struct obstack obst;          /**< list of all stores */
74         unsigned changes;             /**< a bitmask of graph changes */
75 } walk_env_t;
76
77 /** A Load/Store info. */
78 typedef struct ldst_info_t {
79         ir_node  *projs[MAX_PROJ+1];  /**< list of Proj's of this node */
80         ir_node  *exc_block;          /**< the exception block if available */
81         int      exc_idx;             /**< predecessor index in the exception block */
82         unsigned visited;             /**< visited counter for breaking loops */
83 } ldst_info_t;
84
85 /**
86  * flags for control flow.
87  */
88 enum block_flags_t {
89         BLOCK_HAS_COND = 1,      /**< Block has conditional control flow */
90         BLOCK_HAS_EXC  = 2       /**< Block has exceptional control flow */
91 };
92
93 /**
94  * a Block info.
95  */
96 typedef struct block_info_t {
97         unsigned flags;               /**< flags for the block */
98 } block_info_t;
99
100 /** the master visited flag for loop detection. */
101 static unsigned master_visited = 0;
102
103 #define INC_MASTER()       ++master_visited
104 #define MARK_NODE(info)    (info)->visited = master_visited
105 #define NODE_VISITED(info) (info)->visited >= master_visited
106
107 /**
108  * get the Load/Store info of a node
109  */
110 static ldst_info_t *get_ldst_info(ir_node *node, struct obstack *obst)
111 {
112         ldst_info_t *info = (ldst_info_t*)get_irn_link(node);
113
114         if (! info) {
115                 info = OALLOCZ(obst, ldst_info_t);
116                 set_irn_link(node, info);
117         }
118         return info;
119 }  /* get_ldst_info */
120
121 /**
122  * get the Block info of a node
123  */
124 static block_info_t *get_block_info(ir_node *node, struct obstack *obst)
125 {
126         block_info_t *info = (block_info_t*)get_irn_link(node);
127
128         if (! info) {
129                 info = OALLOCZ(obst, block_info_t);
130                 set_irn_link(node, info);
131         }
132         return info;
133 }  /* get_block_info */
134
135 /**
136  * update the projection info for a Load/Store
137  */
138 static unsigned update_projs(ldst_info_t *info, ir_node *proj)
139 {
140         long nr = get_Proj_proj(proj);
141
142         assert(0 <= nr && nr <= MAX_PROJ && "Wrong proj from LoadStore");
143
144         if (info->projs[nr]) {
145                 /* there is already one, do CSE */
146                 exchange(proj, info->projs[nr]);
147                 return DF_CHANGED;
148         }
149         else {
150                 info->projs[nr] = proj;
151                 return 0;
152         }
153 }  /* update_projs */
154
155 /**
156  * update the exception block info for a Load/Store node.
157  *
158  * @param info   the load/store info struct
159  * @param block  the exception handler block for this load/store
160  * @param pos    the control flow input of the block
161  */
162 static unsigned update_exc(ldst_info_t *info, ir_node *block, int pos)
163 {
164         assert(info->exc_block == NULL && "more than one exception block found");
165
166         info->exc_block = block;
167         info->exc_idx   = pos;
168         return 0;
169 }  /* update_exc */
170
171 /** Return the number of uses of an address node */
172 #define get_irn_n_uses(adr)     get_irn_n_edges(adr)
173
174 /**
175  * walker, collects all Load/Store/Proj nodes
176  *
177  * walks from Start -> End
178  */
179 static void collect_nodes(ir_node *node, void *env)
180 {
181         walk_env_t  *wenv   = (walk_env_t *)env;
182         unsigned     opcode = get_irn_opcode(node);
183         ir_node     *pred, *blk, *pred_blk;
184         ldst_info_t *ldst_info;
185
186         if (opcode == iro_Proj) {
187                 pred   = get_Proj_pred(node);
188                 opcode = get_irn_opcode(pred);
189
190                 if (opcode == iro_Load || opcode == iro_Store || opcode == iro_Call) {
191                         ldst_info = get_ldst_info(pred, &wenv->obst);
192
193                         wenv->changes |= update_projs(ldst_info, node);
194
195                         /*
196                          * Place the Proj's to the same block as the
197                          * predecessor Load. This is always ok and prevents
198                          * "non-SSA" form after optimizations if the Proj
199                          * is in a wrong block.
200                          */
201                         blk      = get_nodes_block(node);
202                         pred_blk = get_nodes_block(pred);
203                         if (blk != pred_blk) {
204                                 wenv->changes |= DF_CHANGED;
205                                 set_nodes_block(node, pred_blk);
206                         }
207                 }
208         } else if (opcode == iro_Block) {
209                 int i;
210
211                 for (i = get_Block_n_cfgpreds(node) - 1; i >= 0; --i) {
212                         ir_node      *pred_block, *proj;
213                         block_info_t *bl_info;
214                         int          is_exc = 0;
215
216                         pred = proj = get_Block_cfgpred(node, i);
217
218                         if (is_Proj(proj)) {
219                                 pred   = get_Proj_pred(proj);
220                                 is_exc = is_x_except_Proj(proj);
221                         }
222
223                         /* ignore Bad predecessors, they will be removed later */
224                         if (is_Bad(pred))
225                                 continue;
226
227                         pred_block = get_nodes_block(pred);
228                         bl_info    = get_block_info(pred_block, &wenv->obst);
229
230                         if (is_fragile_op(pred) && is_exc)
231                                 bl_info->flags |= BLOCK_HAS_EXC;
232                         else if (is_irn_forking(pred))
233                                 bl_info->flags |= BLOCK_HAS_COND;
234
235                         opcode = get_irn_opcode(pred);
236                         if (is_exc && (opcode == iro_Load || opcode == iro_Store || opcode == iro_Call)) {
237                                 ldst_info = get_ldst_info(pred, &wenv->obst);
238
239                                 wenv->changes |= update_exc(ldst_info, node, i);
240                         }
241                 }
242         }
243 }  /* collect_nodes */
244
245 /**
246  * Returns an entity if the address ptr points to a constant one.
247  *
248  * @param ptr  the address
249  *
250  * @return an entity or NULL
251  */
252 static ir_entity *find_constant_entity(ir_node *ptr)
253 {
254         for (;;) {
255                 if (is_SymConst(ptr) && get_SymConst_kind(ptr) == symconst_addr_ent) {
256                         return get_SymConst_entity(ptr);
257                 } else if (is_Sel(ptr)) {
258                         ir_entity *ent = get_Sel_entity(ptr);
259                         ir_type   *tp  = get_entity_owner(ent);
260
261                         /* Do not fiddle with polymorphism. */
262                         if (is_Class_type(get_entity_owner(ent)) &&
263                                 ((get_entity_n_overwrites(ent)    != 0) ||
264                                 (get_entity_n_overwrittenby(ent) != 0)   ) )
265                                 return NULL;
266
267                         if (is_Array_type(tp)) {
268                                 /* check bounds */
269                                 int i, n;
270
271                                 for (i = 0, n = get_Sel_n_indexs(ptr); i < n; ++i) {
272                                         ir_node   *bound;
273                                         ir_tarval *tlower, *tupper;
274                                         ir_node   *index = get_Sel_index(ptr, i);
275                                         ir_tarval *tv    = computed_value(index);
276
277                                         /* check if the index is constant */
278                                         if (tv == tarval_bad)
279                                                 return NULL;
280
281                                         bound  = get_array_lower_bound(tp, i);
282                                         tlower = computed_value(bound);
283                                         bound  = get_array_upper_bound(tp, i);
284                                         tupper = computed_value(bound);
285
286                                         if (tlower == tarval_bad || tupper == tarval_bad)
287                                                 return NULL;
288
289                                         if (tarval_cmp(tv, tlower) == ir_relation_less)
290                                                 return NULL;
291                                         if (tarval_cmp(tupper, tv) == ir_relation_less)
292                                                 return NULL;
293
294                                         /* ok, bounds check finished */
295                                 }
296                         }
297
298                         if (get_entity_linkage(ent) & IR_LINKAGE_CONSTANT)
299                                 return ent;
300
301                         /* try next */
302                         ptr = get_Sel_ptr(ptr);
303                 } else if (is_Add(ptr)) {
304                         ir_node *l = get_Add_left(ptr);
305                         ir_node *r = get_Add_right(ptr);
306
307                         if (get_irn_mode(l) == get_irn_mode(ptr) && is_Const(r))
308                                 ptr = l;
309                         else if (get_irn_mode(r) == get_irn_mode(ptr) && is_Const(l))
310                                 ptr = r;
311                         else
312                                 return NULL;
313
314                         /* for now, we support only one addition, reassoc should fold all others */
315                         if (! is_SymConst(ptr) && !is_Sel(ptr))
316                                 return NULL;
317                 } else if (is_Sub(ptr)) {
318                         ir_node *l = get_Sub_left(ptr);
319                         ir_node *r = get_Sub_right(ptr);
320
321                         if (get_irn_mode(l) == get_irn_mode(ptr) && is_Const(r))
322                                 ptr = l;
323                         else
324                                 return NULL;
325                         /* for now, we support only one substraction, reassoc should fold all others */
326                         if (! is_SymConst(ptr) && !is_Sel(ptr))
327                                 return NULL;
328                 } else
329                         return NULL;
330         }
331 }  /* find_constant_entity */
332
333 /**
334  * Return the Selection index of a Sel node from dimension n
335  */
336 static long get_Sel_array_index_long(ir_node *n, int dim)
337 {
338         ir_node *index = get_Sel_index(n, dim);
339         assert(is_Const(index));
340         return get_tarval_long(get_Const_tarval(index));
341 }  /* get_Sel_array_index_long */
342
343 /**
344  * Returns the accessed component graph path for an
345  * node computing an address.
346  *
347  * @param ptr    the node computing the address
348  * @param depth  current depth in steps upward from the root
349  *               of the address
350  */
351 static compound_graph_path *rec_get_accessed_path(ir_node *ptr, size_t depth)
352 {
353         compound_graph_path *res = NULL;
354         ir_entity           *root, *field, *ent;
355         size_t              path_len, pos, idx;
356         ir_tarval           *tv;
357         ir_type             *tp;
358
359         if (is_SymConst(ptr)) {
360                 /* a SymConst. If the depth is 0, this is an access to a global
361                  * entity and we don't need a component path, else we know
362                  * at least its length.
363                  */
364                 assert(get_SymConst_kind(ptr) == symconst_addr_ent);
365                 root = get_SymConst_entity(ptr);
366                 res = (depth == 0) ? NULL : new_compound_graph_path(get_entity_type(root), depth);
367         } else if (is_Sel(ptr)) {
368                 /* it's a Sel, go up until we find the root */
369                 res = rec_get_accessed_path(get_Sel_ptr(ptr), depth+1);
370                 if (res == NULL)
371                         return NULL;
372
373                 /* fill up the step in the path at the current position */
374                 field    = get_Sel_entity(ptr);
375                 path_len = get_compound_graph_path_length(res);
376                 pos      = path_len - depth - 1;
377                 set_compound_graph_path_node(res, pos, field);
378
379                 if (is_Array_type(get_entity_owner(field))) {
380                         assert(get_Sel_n_indexs(ptr) == 1 && "multi dim arrays not implemented");
381                         set_compound_graph_path_array_index(res, pos, get_Sel_array_index_long(ptr, 0));
382                 }
383         } else if (is_Add(ptr)) {
384                 ir_mode   *mode;
385                 ir_tarval *tmp;
386
387                 {
388                         ir_node   *l    = get_Add_left(ptr);
389                         ir_node   *r    = get_Add_right(ptr);
390                         if (is_Const(r) && get_irn_mode(l) == get_irn_mode(ptr)) {
391                                 ptr = l;
392                                 tv  = get_Const_tarval(r);
393                         } else {
394                                 ptr = r;
395                                 tv  = get_Const_tarval(l);
396                         }
397                 }
398 ptr_arith:
399                 mode = get_tarval_mode(tv);
400                 tmp  = tv;
401
402                 /* ptr must be a Sel or a SymConst, this was checked in find_constant_entity() */
403                 if (is_Sel(ptr)) {
404                         field = get_Sel_entity(ptr);
405                 } else {
406                         field = get_SymConst_entity(ptr);
407                 }
408                 idx = 0;
409                 for (ent = field;;) {
410                         unsigned   size;
411                         ir_tarval *sz, *tv_index, *tlower, *tupper;
412                         ir_node   *bound;
413
414                         tp = get_entity_type(ent);
415                         if (! is_Array_type(tp))
416                                 break;
417                         ent = get_array_element_entity(tp);
418                         size = get_type_size_bytes(get_entity_type(ent));
419                         sz   = new_tarval_from_long(size, mode);
420
421                         tv_index = tarval_div(tmp, sz);
422                         tmp      = tarval_mod(tmp, sz);
423
424                         if (tv_index == tarval_bad || tmp == tarval_bad)
425                                 return NULL;
426
427                         assert(get_array_n_dimensions(tp) == 1 && "multiarrays not implemented");
428                         bound  = get_array_lower_bound(tp, 0);
429                         tlower = computed_value(bound);
430                         bound  = get_array_upper_bound(tp, 0);
431                         tupper = computed_value(bound);
432
433                         if (tlower == tarval_bad || tupper == tarval_bad)
434                                 return NULL;
435
436                         if (tarval_cmp(tv_index, tlower) == ir_relation_less)
437                                 return NULL;
438                         if (tarval_cmp(tupper, tv_index) == ir_relation_less)
439                                 return NULL;
440
441                         /* ok, bounds check finished */
442                         ++idx;
443                 }
444                 if (! tarval_is_null(tmp)) {
445                         /* access to some struct/union member */
446                         return NULL;
447                 }
448
449                 /* should be at least ONE array */
450                 if (idx == 0)
451                         return NULL;
452
453                 res = rec_get_accessed_path(ptr, depth + idx);
454                 if (res == NULL)
455                         return NULL;
456
457                 path_len = get_compound_graph_path_length(res);
458                 pos      = path_len - depth - idx;
459
460                 for (ent = field;;) {
461                         unsigned   size;
462                         ir_tarval *sz, *tv_index;
463                         long       index;
464
465                         tp = get_entity_type(ent);
466                         if (! is_Array_type(tp))
467                                 break;
468                         ent = get_array_element_entity(tp);
469                         set_compound_graph_path_node(res, pos, ent);
470
471                         size = get_type_size_bytes(get_entity_type(ent));
472                         sz   = new_tarval_from_long(size, mode);
473
474                         tv_index = tarval_div(tv, sz);
475                         tv       = tarval_mod(tv, sz);
476
477                         /* worked above, should work again */
478                         assert(tv_index != tarval_bad && tv != tarval_bad);
479
480                         /* bounds already checked above */
481                         index = get_tarval_long(tv_index);
482                         set_compound_graph_path_array_index(res, pos, index);
483                         ++pos;
484                 }
485         } else if (is_Sub(ptr)) {
486                 ir_node *l = get_Sub_left(ptr);
487                 ir_node *r = get_Sub_right(ptr);
488
489                 ptr = l;
490                 tv  = get_Const_tarval(r);
491                 tv  = tarval_neg(tv);
492                 goto ptr_arith;
493         }
494         return res;
495 }  /* rec_get_accessed_path */
496
497 /**
498  * Returns an access path or NULL.  The access path is only
499  * valid, if the graph is in phase_high and _no_ address computation is used.
500  */
501 static compound_graph_path *get_accessed_path(ir_node *ptr)
502 {
503         compound_graph_path *gr = rec_get_accessed_path(ptr, 0);
504         return gr;
505 }  /* get_accessed_path */
506
507 typedef struct path_entry {
508         ir_entity         *ent;
509         struct path_entry *next;
510         size_t            index;
511 } path_entry;
512
513 static ir_node *rec_find_compound_ent_value(ir_node *ptr, path_entry *next)
514 {
515         path_entry       entry, *p;
516         ir_entity        *ent, *field;
517         ir_initializer_t *initializer;
518         ir_tarval        *tv;
519         ir_type          *tp;
520         size_t           n;
521
522         entry.next = next;
523         if (is_SymConst(ptr)) {
524                 /* found the root */
525                 ent         = get_SymConst_entity(ptr);
526                 initializer = get_entity_initializer(ent);
527                 for (p = next; p != NULL;) {
528                         if (initializer->kind != IR_INITIALIZER_COMPOUND)
529                                 return NULL;
530                         n  = get_initializer_compound_n_entries(initializer);
531                         tp = get_entity_type(ent);
532
533                         if (is_Array_type(tp)) {
534                                 ent = get_array_element_entity(tp);
535                                 if (ent != p->ent) {
536                                         /* a missing [0] */
537                                         if (0 >= n)
538                                                 return NULL;
539                                         initializer = get_initializer_compound_value(initializer, 0);
540                                         continue;
541                                 }
542                         }
543                         if (p->index >= n)
544                                 return NULL;
545                         initializer = get_initializer_compound_value(initializer, p->index);
546
547                         ent = p->ent;
548                         p   = p->next;
549                 }
550                 tp = get_entity_type(ent);
551                 while (is_Array_type(tp)) {
552                         ent = get_array_element_entity(tp);
553                         tp = get_entity_type(ent);
554                         /* a missing [0] */
555                         n  = get_initializer_compound_n_entries(initializer);
556                         if (0 >= n)
557                                 return NULL;
558                         initializer = get_initializer_compound_value(initializer, 0);
559                 }
560
561                 switch (initializer->kind) {
562                 case IR_INITIALIZER_CONST:
563                         return get_initializer_const_value(initializer);
564                 case IR_INITIALIZER_TARVAL:
565                 case IR_INITIALIZER_NULL:
566                 default:
567                         return NULL;
568                 }
569         } else if (is_Sel(ptr)) {
570                 entry.ent = field = get_Sel_entity(ptr);
571                 tp = get_entity_owner(field);
572                 if (is_Array_type(tp)) {
573                         assert(get_Sel_n_indexs(ptr) == 1 && "multi dim arrays not implemented");
574                         entry.index = get_Sel_array_index_long(ptr, 0) - get_array_lower_bound_int(tp, 0);
575                 } else {
576                         size_t i, n_members = get_compound_n_members(tp);
577                         for (i = 0; i < n_members; ++i) {
578                                 if (get_compound_member(tp, i) == field)
579                                         break;
580                         }
581                         if (i >= n_members) {
582                                 /* not found: should NOT happen */
583                                 return NULL;
584                         }
585                         entry.index = i;
586                 }
587                 return rec_find_compound_ent_value(get_Sel_ptr(ptr), &entry);
588         }  else if (is_Add(ptr)) {
589                 ir_mode  *mode;
590                 unsigned pos;
591
592                 {
593                         ir_node *l = get_Add_left(ptr);
594                         ir_node *r = get_Add_right(ptr);
595                         if (is_Const(r)) {
596                                 ptr = l;
597                                 tv  = get_Const_tarval(r);
598                         } else {
599                                 ptr = r;
600                                 tv  = get_Const_tarval(l);
601                         }
602                 }
603 ptr_arith:
604                 mode = get_tarval_mode(tv);
605
606                 /* ptr must be a Sel or a SymConst, this was checked in find_constant_entity() */
607                 if (is_Sel(ptr)) {
608                         field = get_Sel_entity(ptr);
609                 } else {
610                         field = get_SymConst_entity(ptr);
611                 }
612
613                 /* count needed entries */
614                 pos = 0;
615                 for (ent = field;;) {
616                         tp = get_entity_type(ent);
617                         if (! is_Array_type(tp))
618                                 break;
619                         ent = get_array_element_entity(tp);
620                         ++pos;
621                 }
622                 /* should be at least ONE entry */
623                 if (pos == 0)
624                         return NULL;
625
626                 /* allocate the right number of entries */
627                 NEW_ARR_A(path_entry, p, pos);
628
629                 /* fill them up */
630                 pos = 0;
631                 for (ent = field;;) {
632                         unsigned   size;
633                         ir_tarval *sz, *tv_index, *tlower, *tupper;
634                         long       index;
635                         ir_node   *bound;
636
637                         tp = get_entity_type(ent);
638                         if (! is_Array_type(tp))
639                                 break;
640                         ent = get_array_element_entity(tp);
641                         p[pos].ent  = ent;
642                         p[pos].next = &p[pos + 1];
643
644                         size = get_type_size_bytes(get_entity_type(ent));
645                         sz   = new_tarval_from_long(size, mode);
646
647                         tv_index = tarval_div(tv, sz);
648                         tv       = tarval_mod(tv, sz);
649
650                         if (tv_index == tarval_bad || tv == tarval_bad)
651                                 return NULL;
652
653                         assert(get_array_n_dimensions(tp) == 1 && "multiarrays not implemented");
654                         bound  = get_array_lower_bound(tp, 0);
655                         tlower = computed_value(bound);
656                         bound  = get_array_upper_bound(tp, 0);
657                         tupper = computed_value(bound);
658
659                         if (tlower == tarval_bad || tupper == tarval_bad)
660                                 return NULL;
661
662                         if (tarval_cmp(tv_index, tlower) == ir_relation_less)
663                                 return NULL;
664                         if (tarval_cmp(tupper, tv_index) == ir_relation_less)
665                                 return NULL;
666
667                         /* ok, bounds check finished */
668                         index = get_tarval_long(tv_index);
669                         p[pos].index = index;
670                         ++pos;
671                 }
672                 if (! tarval_is_null(tv)) {
673                         /* hmm, wrong access */
674                         return NULL;
675                 }
676                 p[pos - 1].next = next;
677                 return rec_find_compound_ent_value(ptr, p);
678         } else if (is_Sub(ptr)) {
679                 ir_node *l = get_Sub_left(ptr);
680                 ir_node *r = get_Sub_right(ptr);
681
682                 ptr = l;
683                 tv  = get_Const_tarval(r);
684                 tv  = tarval_neg(tv);
685                 goto ptr_arith;
686         }
687         return NULL;
688 }
689
690 static ir_node *find_compound_ent_value(ir_node *ptr)
691 {
692         return rec_find_compound_ent_value(ptr, NULL);
693 }
694
695 /* forward */
696 static void reduce_adr_usage(ir_node *ptr);
697
698 /**
699  * Update a Load that may have lost its users.
700  */
701 static void handle_load_update(ir_node *load)
702 {
703         ldst_info_t *info = (ldst_info_t*)get_irn_link(load);
704
705         /* do NOT touch volatile loads for now */
706         if (get_Load_volatility(load) == volatility_is_volatile)
707                 return;
708
709         if (! info->projs[pn_Load_res] && ! info->projs[pn_Load_X_except]) {
710                 ir_node *ptr = get_Load_ptr(load);
711                 ir_node *mem = get_Load_mem(load);
712
713                 /* a Load whose value is neither used nor exception checked, remove it */
714                 exchange(info->projs[pn_Load_M], mem);
715                 if (info->projs[pn_Load_X_regular])
716                         exchange(info->projs[pn_Load_X_regular], new_r_Jmp(get_nodes_block(load)));
717                 kill_node(load);
718                 reduce_adr_usage(ptr);
719         }
720 }  /* handle_load_update */
721
722 /**
723  * A use of an address node has vanished. Check if this was a Proj
724  * node and update the counters.
725  */
726 static void reduce_adr_usage(ir_node *ptr)
727 {
728         ir_node *pred;
729         if (!is_Proj(ptr))
730                 return;
731         if (get_irn_n_edges(ptr) > 0)
732                 return;
733
734         /* this Proj is dead now */
735         pred = get_Proj_pred(ptr);
736         if (is_Load(pred)) {
737                 ldst_info_t *info = (ldst_info_t*)get_irn_link(pred);
738                 info->projs[get_Proj_proj(ptr)] = NULL;
739
740                 /* this node lost its result proj, handle that */
741                 handle_load_update(pred);
742         }
743 }  /* reduce_adr_usage */
744
745 /**
746  * Check, if an already existing value of mode old_mode can be converted
747  * into the needed one new_mode without loss.
748  */
749 static int can_use_stored_value(ir_mode *old_mode, ir_mode *new_mode)
750 {
751         unsigned old_size;
752         unsigned new_size;
753         if (old_mode == new_mode)
754                 return true;
755
756         old_size = get_mode_size_bits(old_mode);
757         new_size = get_mode_size_bits(new_mode);
758
759         /* if both modes are two-complement ones, we can always convert the
760            Stored value into the needed one. (on big endian machines we currently
761            only support this for modes of same size) */
762         if (old_size >= new_size &&
763                   get_mode_arithmetic(old_mode) == irma_twos_complement &&
764                   get_mode_arithmetic(new_mode) == irma_twos_complement &&
765                   (!be_get_backend_param()->byte_order_big_endian
766                 || old_size == new_size)) {
767                 return true;
768         }
769         return false;
770 }
771
772 /**
773  * Check whether a Call is at least pure, i.e. does only read memory.
774  */
775 static unsigned is_Call_pure(ir_node *call)
776 {
777         ir_type *call_tp = get_Call_type(call);
778         unsigned prop = get_method_additional_properties(call_tp);
779
780         /* check first the call type */
781         if ((prop & (mtp_property_const|mtp_property_pure)) == 0) {
782                 /* try the called entity */
783                 ir_node *ptr = get_Call_ptr(call);
784
785                 if (is_Global(ptr)) {
786                         ir_entity *ent = get_Global_entity(ptr);
787
788                         prop = get_entity_additional_properties(ent);
789                 }
790         }
791         return (prop & (mtp_property_const|mtp_property_pure)) != 0;
792 }  /* is_Call_pure */
793
794 static ir_node *get_base_and_offset(ir_node *ptr, long *pOffset)
795 {
796         ir_mode *mode  = get_irn_mode(ptr);
797         long    offset = 0;
798
799         /* TODO: long might not be enough, we should probably use some tarval thingy... */
800         for (;;) {
801                 if (is_Add(ptr)) {
802                         ir_node *l = get_Add_left(ptr);
803                         ir_node *r = get_Add_right(ptr);
804
805                         if (get_irn_mode(l) != mode || !is_Const(r))
806                                 break;
807
808                         offset += get_tarval_long(get_Const_tarval(r));
809                         ptr     = l;
810                 } else if (is_Sub(ptr)) {
811                         ir_node *l = get_Sub_left(ptr);
812                         ir_node *r = get_Sub_right(ptr);
813
814                         if (get_irn_mode(l) != mode || !is_Const(r))
815                                 break;
816
817                         offset -= get_tarval_long(get_Const_tarval(r));
818                         ptr     = l;
819                 } else if (is_Sel(ptr)) {
820                         ir_entity *ent = get_Sel_entity(ptr);
821                         ir_type   *tp  = get_entity_owner(ent);
822
823                         if (is_Array_type(tp)) {
824                                 int     size;
825                                 ir_node *index;
826
827                                 /* only one dimensional arrays yet */
828                                 if (get_Sel_n_indexs(ptr) != 1)
829                                         break;
830                                 index = get_Sel_index(ptr, 0);
831                                 if (! is_Const(index))
832                                         break;
833
834                                 tp = get_entity_type(ent);
835                                 if (get_type_state(tp) != layout_fixed)
836                                         break;
837
838                                 size    = get_type_size_bytes(tp);
839                                 offset += size * get_tarval_long(get_Const_tarval(index));
840                         } else {
841                                 if (get_type_state(tp) != layout_fixed)
842                                         break;
843                                 offset += get_entity_offset(ent);
844                         }
845                         ptr = get_Sel_ptr(ptr);
846                 } else
847                         break;
848         }
849
850         *pOffset = offset;
851         return ptr;
852 }
853
854 static int try_load_after_store(ir_node *load,
855                 ir_node *load_base_ptr, long load_offset, ir_node *store)
856 {
857         ldst_info_t *info;
858         ir_node *store_ptr      = get_Store_ptr(store);
859         long     store_offset;
860         ir_node *store_base_ptr = get_base_and_offset(store_ptr, &store_offset);
861         ir_node *store_value;
862         ir_mode *store_mode;
863         ir_node *load_ptr;
864         ir_mode *load_mode;
865         long     load_mode_len;
866         long     store_mode_len;
867         long     delta;
868         int      res;
869
870         if (load_base_ptr != store_base_ptr)
871                 return 0;
872
873         load_mode      = get_Load_mode(load);
874         load_mode_len  = get_mode_size_bytes(load_mode);
875         store_mode     = get_irn_mode(get_Store_value(store));
876         store_mode_len = get_mode_size_bytes(store_mode);
877         delta          = load_offset - store_offset;
878         store_value    = get_Store_value(store);
879
880         if (delta != 0 || store_mode != load_mode) {
881                 /* TODO: implement for big-endian */
882                 if (delta < 0 || delta + load_mode_len > store_mode_len
883                                 || (be_get_backend_param()->byte_order_big_endian
884                                     && load_mode_len != store_mode_len))
885                         return 0;
886
887                 if (get_mode_arithmetic(store_mode) != irma_twos_complement ||
888                         get_mode_arithmetic(load_mode)  != irma_twos_complement)
889                         return 0;
890
891
892                 /* produce a shift to adjust offset delta */
893                 if (delta > 0) {
894                         ir_node *cnst;
895                         ir_graph *irg = get_irn_irg(load);
896
897                         cnst        = new_r_Const_long(irg, mode_Iu, delta * 8);
898                         store_value = new_r_Shr(get_nodes_block(load),
899                                                                         store_value, cnst, store_mode);
900                 }
901
902                 /* add an convert if needed */
903                 if (store_mode != load_mode) {
904                         store_value = new_r_Conv(get_nodes_block(load), store_value, load_mode);
905                 }
906         }
907
908         DBG_OPT_RAW(load, store_value);
909
910         info = (ldst_info_t*)get_irn_link(load);
911         if (info->projs[pn_Load_M])
912                 exchange(info->projs[pn_Load_M], get_Load_mem(load));
913
914         res = 0;
915         /* no exception */
916         if (info->projs[pn_Load_X_except]) {
917                 ir_graph *irg = get_irn_irg(load);
918                 exchange( info->projs[pn_Load_X_except], new_r_Bad(irg, mode_X));
919                 res |= CF_CHANGED;
920         }
921         if (info->projs[pn_Load_X_regular]) {
922                 exchange( info->projs[pn_Load_X_regular], new_r_Jmp(get_nodes_block(load)));
923                 res |= CF_CHANGED;
924         }
925
926         if (info->projs[pn_Load_res])
927                 exchange(info->projs[pn_Load_res], store_value);
928
929         load_ptr = get_Load_ptr(load);
930         kill_node(load);
931         reduce_adr_usage(load_ptr);
932         return res | DF_CHANGED;
933 }
934
935 /**
936  * Follow the memory chain as long as there are only Loads,
937  * alias free Stores, and constant Calls and try to replace the
938  * current Load by a previous ones.
939  * Note that in unreachable loops it might happen that we reach
940  * load again, as well as we can fall into a cycle.
941  * We break such cycles using a special visited flag.
942  *
943  * INC_MASTER() must be called before dive into
944  */
945 static unsigned follow_Mem_chain(ir_node *load, ir_node *curr)
946 {
947         unsigned    res = 0;
948         ldst_info_t *info = (ldst_info_t*)get_irn_link(load);
949         ir_node     *pred;
950         ir_node     *ptr       = get_Load_ptr(load);
951         ir_node     *mem       = get_Load_mem(load);
952         ir_mode     *load_mode = get_Load_mode(load);
953
954         for (pred = curr; load != pred; ) {
955                 ldst_info_t *pred_info = (ldst_info_t*)get_irn_link(pred);
956
957                 /*
958                  * a Load immediately after a Store -- a read after write.
959                  * We may remove the Load, if both Load & Store does not have an
960                  * exception handler OR they are in the same Block. In the latter
961                  * case the Load cannot throw an exception when the previous Store was
962                  * quiet.
963                  *
964                  * Why we need to check for Store Exception? If the Store cannot
965                  * be executed (ROM) the exception handler might simply jump into
966                  * the load Block :-(
967                  * We could make it a little bit better if we would know that the
968                  * exception handler of the Store jumps directly to the end...
969                  */
970                 if (is_Store(pred) && ((pred_info->projs[pn_Store_X_except] == NULL
971                                 && info->projs[pn_Load_X_except] == NULL)
972                                 || get_nodes_block(load) == get_nodes_block(pred)))
973                 {
974                         long    load_offset;
975                         ir_node *base_ptr = get_base_and_offset(ptr, &load_offset);
976                         int     changes   = try_load_after_store(load, base_ptr, load_offset, pred);
977
978                         if (changes != 0)
979                                 return res | changes;
980                 } else if (is_Load(pred) && get_Load_ptr(pred) == ptr &&
981                            can_use_stored_value(get_Load_mode(pred), load_mode)) {
982                         /*
983                          * a Load after a Load -- a read after read.
984                          * We may remove the second Load, if it does not have an exception
985                          * handler OR they are in the same Block. In the later case
986                          * the Load cannot throw an exception when the previous Load was
987                          * quiet.
988                          *
989                          * Here, there is no need to check if the previous Load has an
990                          * exception hander because they would have exact the same
991                          * exception...
992                          *
993                          * TODO: implement load-after-load with different mode for big
994                          *       endian
995                          */
996                         if (info->projs[pn_Load_X_except] == NULL
997                                         || get_nodes_block(load) == get_nodes_block(pred)) {
998                                 ir_node *value;
999
1000                                 DBG_OPT_RAR(load, pred);
1001
1002                                 /* the result is used */
1003                                 if (info->projs[pn_Load_res]) {
1004                                         if (pred_info->projs[pn_Load_res] == NULL) {
1005                                                 /* create a new Proj again */
1006                                                 pred_info->projs[pn_Load_res] = new_r_Proj(pred, get_Load_mode(pred), pn_Load_res);
1007                                         }
1008                                         value = pred_info->projs[pn_Load_res];
1009
1010                                         /* add an convert if needed */
1011                                         if (get_Load_mode(pred) != load_mode) {
1012                                                 value = new_r_Conv(get_nodes_block(load), value, load_mode);
1013                                         }
1014
1015                                         exchange(info->projs[pn_Load_res], value);
1016                                 }
1017
1018                                 if (info->projs[pn_Load_M])
1019                                         exchange(info->projs[pn_Load_M], mem);
1020
1021                                 /* no exception */
1022                                 if (info->projs[pn_Load_X_except]) {
1023                                         ir_graph *irg = get_irn_irg(load);
1024                                         exchange(info->projs[pn_Load_X_except], new_r_Bad(irg, mode_X));
1025                                         res |= CF_CHANGED;
1026                                 }
1027                                 if (info->projs[pn_Load_X_regular]) {
1028                                         exchange( info->projs[pn_Load_X_regular], new_r_Jmp(get_nodes_block(load)));
1029                                         res |= CF_CHANGED;
1030                                 }
1031
1032                                 kill_node(load);
1033                                 reduce_adr_usage(ptr);
1034                                 return res |= DF_CHANGED;
1035                         }
1036                 }
1037
1038                 if (is_Store(pred)) {
1039                         /* check if we can pass through this store */
1040                         ir_alias_relation rel = get_alias_relation(
1041                                 get_Store_ptr(pred),
1042                                 get_irn_mode(get_Store_value(pred)),
1043                                 ptr, load_mode);
1044                         /* if the might be an alias, we cannot pass this Store */
1045                         if (rel != ir_no_alias)
1046                                 break;
1047                         pred = skip_Proj(get_Store_mem(pred));
1048                 } else if (is_Load(pred)) {
1049                         pred = skip_Proj(get_Load_mem(pred));
1050                 } else if (is_Call(pred)) {
1051                         if (is_Call_pure(pred)) {
1052                                 /* The called graph is at least pure, so there are no Store's
1053                                    in it. We can handle it like a Load and skip it. */
1054                                 pred = skip_Proj(get_Call_mem(pred));
1055                         } else {
1056                                 /* there might be Store's in the graph, stop here */
1057                                 break;
1058                         }
1059                 } else {
1060                         /* follow only Load chains */
1061                         break;
1062                 }
1063
1064                 /* check for cycles */
1065                 if (NODE_VISITED(pred_info))
1066                         break;
1067                 MARK_NODE(pred_info);
1068         }
1069
1070         if (is_Sync(pred)) {
1071                 int i;
1072
1073                 /* handle all Sync predecessors */
1074                 for (i = get_Sync_n_preds(pred) - 1; i >= 0; --i) {
1075                         res |= follow_Mem_chain(load, skip_Proj(get_Sync_pred(pred, i)));
1076                         if (res)
1077                                 return res;
1078                 }
1079         }
1080
1081         return res;
1082 }  /* follow_Mem_chain */
1083
1084 /*
1085  * Check if we can replace the load by a given const from
1086  * the const code irg.
1087  */
1088 ir_node *can_replace_load_by_const(const ir_node *load, ir_node *c)
1089 {
1090         ir_mode  *c_mode = get_irn_mode(c);
1091         ir_mode  *l_mode = get_Load_mode(load);
1092         ir_node  *block  = get_nodes_block(load);
1093         dbg_info *dbgi   = get_irn_dbg_info(load);
1094         ir_node  *res    = copy_const_value(dbgi, c, block);
1095
1096         if (c_mode != l_mode) {
1097                 /* check, if the mode matches OR can be easily converted info */
1098                 if (is_reinterpret_cast(c_mode, l_mode)) {
1099                         /* copy the value from the const code irg and cast it */
1100                         res = new_rd_Conv(dbgi, block, res, l_mode);
1101                 } else {
1102                         return NULL;
1103                 }
1104         }
1105         return res;
1106 }
1107
1108 /**
1109  * optimize a Load
1110  *
1111  * @param load  the Load node
1112  */
1113 static unsigned optimize_load(ir_node *load)
1114 {
1115         ldst_info_t *info = (ldst_info_t*)get_irn_link(load);
1116         ir_node     *mem, *ptr, *value;
1117         ir_entity   *ent;
1118         long        dummy;
1119         unsigned    res = 0;
1120
1121         /* do NOT touch volatile loads for now */
1122         if (get_Load_volatility(load) == volatility_is_volatile)
1123                 return 0;
1124
1125         /* the address of the load to be optimized */
1126         ptr = get_Load_ptr(load);
1127
1128         /* The mem of the Load. Must still be returned after optimization. */
1129         mem = get_Load_mem(load);
1130
1131         if (info->projs[pn_Load_res] == NULL
1132                         && info->projs[pn_Load_X_except] == NULL) {
1133                 /* the value is never used and we don't care about exceptions, remove */
1134                 exchange(info->projs[pn_Load_M], mem);
1135
1136                 if (info->projs[pn_Load_X_regular]) {
1137                         /* should not happen, but if it does, remove it */
1138                         exchange(info->projs[pn_Load_X_regular], new_r_Jmp(get_nodes_block(load)));
1139                         res |= CF_CHANGED;
1140                 }
1141                 kill_node(load);
1142                 reduce_adr_usage(ptr);
1143                 return res | DF_CHANGED;
1144         }
1145
1146         /* Load from a constant polymorphic field, where we can resolve
1147            polymorphism. */
1148         value = transform_polymorph_Load(load);
1149         if (value == load) {
1150                 value = NULL;
1151                 /* check if we can determine the entity that will be loaded */
1152                 ent = find_constant_entity(ptr);
1153                 if (ent != NULL
1154                                 && get_entity_visibility(ent) != ir_visibility_external) {
1155                         /* a static allocation that is not external: there should be NO
1156                          * exception when loading even if we cannot replace the load itself.
1157                          */
1158
1159                         /* no exception, clear the info field as it might be checked later again */
1160                         if (info->projs[pn_Load_X_except]) {
1161                                 ir_graph *irg = get_irn_irg(load);
1162                                 exchange(info->projs[pn_Load_X_except], new_r_Bad(irg, mode_X));
1163                                 info->projs[pn_Load_X_except] = NULL;
1164                                 res |= CF_CHANGED;
1165                         }
1166                         if (info->projs[pn_Load_X_regular]) {
1167                                 exchange(info->projs[pn_Load_X_regular], new_r_Jmp(get_nodes_block(load)));
1168                                 info->projs[pn_Load_X_regular] = NULL;
1169                                 res |= CF_CHANGED;
1170                         }
1171
1172                         if (get_entity_linkage(ent) & IR_LINKAGE_CONSTANT) {
1173                                 if (has_entity_initializer(ent)) {
1174                                         /* new style initializer */
1175                                         value = find_compound_ent_value(ptr);
1176                                 } else if (entity_has_compound_ent_values(ent)) {
1177                                         /* old style initializer */
1178                                         compound_graph_path *path = get_accessed_path(ptr);
1179
1180                                         if (path != NULL) {
1181                                                 assert(is_proper_compound_graph_path(path, get_compound_graph_path_length(path)-1));
1182
1183                                                 value = get_compound_ent_value_by_path(ent, path);
1184                                                 DB((dbg, LEVEL_1, "  Constant access at %F%F resulted in %+F\n", ent, path, value));
1185                                                 free_compound_graph_path(path);
1186                                         }
1187                                 }
1188                                 if (value != NULL) {
1189                                         ir_graph *irg = get_irn_irg(load);
1190                                         value = can_replace_load_by_const(load, value);
1191                                         if (value != NULL && is_Sel(ptr) &&
1192                                                         !is_irg_state(irg, IR_GRAPH_STATE_IMPLICIT_BITFIELD_MASKING)) {
1193                                                 /* frontend has inserted masking operations after bitfield accesses,
1194                                                  * so we might have to shift the const. */
1195                                                 unsigned char bit_offset = get_entity_offset_bits_remainder(get_Sel_entity(ptr));
1196                                                 ir_tarval *tv_old = get_Const_tarval(value);
1197                                                 ir_tarval *tv_offset = new_tarval_from_long(bit_offset, mode_Bu);
1198                                                 ir_tarval *tv_new = tarval_shl(tv_old, tv_offset);
1199                                                 value = new_r_Const(irg, tv_new);
1200                                         }
1201                                 }
1202                         }
1203                 }
1204         }
1205         if (value != NULL) {
1206                 /* we completely replace the load by this value */
1207                 if (info->projs[pn_Load_X_except]) {
1208                         ir_graph *irg = get_irn_irg(load);
1209                         exchange(info->projs[pn_Load_X_except], new_r_Bad(irg, mode_X));
1210                         info->projs[pn_Load_X_except] = NULL;
1211                         res |= CF_CHANGED;
1212                 }
1213                 if (info->projs[pn_Load_X_regular]) {
1214                         exchange(info->projs[pn_Load_X_regular], new_r_Jmp(get_nodes_block(load)));
1215                         info->projs[pn_Load_X_regular] = NULL;
1216                         res |= CF_CHANGED;
1217                 }
1218                 if (info->projs[pn_Load_M]) {
1219                         exchange(info->projs[pn_Load_M], mem);
1220                         res |= DF_CHANGED;
1221                 }
1222                 if (info->projs[pn_Load_res]) {
1223                         exchange(info->projs[pn_Load_res], value);
1224                         res |= DF_CHANGED;
1225                 }
1226                 kill_node(load);
1227                 reduce_adr_usage(ptr);
1228                 return res;
1229         }
1230
1231         /* Check, if the address of this load is used more than once.
1232          * If not, more load cannot be removed in any case. */
1233         if (get_irn_n_uses(ptr) <= 1 && get_irn_n_uses(get_base_and_offset(ptr, &dummy)) <= 1)
1234                 return res;
1235
1236         /*
1237          * follow the memory chain as long as there are only Loads
1238          * and try to replace current Load or Store by a previous one.
1239          * Note that in unreachable loops it might happen that we reach
1240          * load again, as well as we can fall into a cycle.
1241          * We break such cycles using a special visited flag.
1242          */
1243         INC_MASTER();
1244         res = follow_Mem_chain(load, skip_Proj(mem));
1245         return res;
1246 }  /* optimize_load */
1247
1248 /**
1249  * Check whether a value of mode new_mode would completely overwrite a value
1250  * of mode old_mode in memory.
1251  */
1252 static int is_completely_overwritten(ir_mode *old_mode, ir_mode *new_mode)
1253 {
1254         return get_mode_size_bits(new_mode) >= get_mode_size_bits(old_mode);
1255 }  /* is_completely_overwritten */
1256
1257 /**
1258  * Check whether small is a part of large (starting at same address).
1259  */
1260 static int is_partially_same(ir_node *small, ir_node *large)
1261 {
1262         ir_mode *sm = get_irn_mode(small);
1263         ir_mode *lm = get_irn_mode(large);
1264
1265         /* FIXME: Check endianness */
1266         return is_Conv(small) && get_Conv_op(small) == large
1267             && get_mode_size_bytes(sm) < get_mode_size_bytes(lm)
1268             && get_mode_arithmetic(sm) == irma_twos_complement
1269             && get_mode_arithmetic(lm) == irma_twos_complement;
1270 }  /* is_partially_same */
1271
1272 /**
1273  * follow the memory chain as long as there are only Loads and alias free Stores.
1274  *
1275  * INC_MASTER() must be called before dive into
1276  */
1277 static unsigned follow_Mem_chain_for_Store(ir_node *store, ir_node *curr)
1278 {
1279         unsigned res = 0;
1280         ldst_info_t *info = (ldst_info_t*)get_irn_link(store);
1281         ir_node *pred;
1282         ir_node *ptr = get_Store_ptr(store);
1283         ir_node *mem = get_Store_mem(store);
1284         ir_node *value = get_Store_value(store);
1285         ir_mode *mode  = get_irn_mode(value);
1286         ir_node *block = get_nodes_block(store);
1287
1288         for (pred = curr; pred != store;) {
1289                 ldst_info_t *pred_info = (ldst_info_t*)get_irn_link(pred);
1290
1291                 /*
1292                  * BEWARE: one might think that checking the modes is useless, because
1293                  * if the pointers are identical, they refer to the same object.
1294                  * This is only true in strong typed languages, not is C were the following
1295                  * is possible *(ir_type1 *)p = a; *(ir_type2 *)p = b ...
1296                  * However, if the size of the mode that is written is bigger or equal the
1297                  * size of the old one, the old value is completely overwritten and can be
1298                  * killed ...
1299                  */
1300                 if (is_Store(pred) && get_Store_ptr(pred) == ptr &&
1301             get_nodes_block(pred) == block) {
1302                         /*
1303                          * a Store after a Store in the same Block -- a write after write.
1304                          */
1305
1306                         /*
1307                          * We may remove the first Store, if the old value is completely
1308                          * overwritten or the old value is a part of the new value,
1309                          * and if it does not have an exception handler.
1310                          *
1311                          * TODO: What, if both have the same exception handler ???
1312                          */
1313                         if (get_Store_volatility(pred) != volatility_is_volatile
1314                                 && !pred_info->projs[pn_Store_X_except]) {
1315                                 ir_node *predvalue = get_Store_value(pred);
1316                                 ir_mode *predmode  = get_irn_mode(predvalue);
1317
1318                                 if (is_completely_overwritten(predmode, mode)
1319                                         || is_partially_same(predvalue, value)) {
1320                                         DBG_OPT_WAW(pred, store);
1321                                         exchange(pred_info->projs[pn_Store_M], get_Store_mem(pred));
1322                                         kill_node(pred);
1323                                         reduce_adr_usage(ptr);
1324                                         return DF_CHANGED;
1325                                 }
1326                         }
1327
1328                         /*
1329                          * We may remove the Store, if the old value already contains
1330                          * the new value, and if it does not have an exception handler.
1331                          *
1332                          * TODO: What, if both have the same exception handler ???
1333                          */
1334                         if (get_Store_volatility(store) != volatility_is_volatile
1335                                 && !info->projs[pn_Store_X_except]) {
1336                                 ir_node *predvalue = get_Store_value(pred);
1337
1338                                 if (is_partially_same(value, predvalue)) {
1339                                         DBG_OPT_WAW(pred, store);
1340                                         exchange(info->projs[pn_Store_M], mem);
1341                                         kill_node(store);
1342                                         reduce_adr_usage(ptr);
1343                                         return DF_CHANGED;
1344                                 }
1345                         }
1346                 } else if (is_Load(pred) && get_Load_ptr(pred) == ptr &&
1347                            value == pred_info->projs[pn_Load_res]) {
1348                         /*
1349                          * a Store of a value just loaded from the same address
1350                          * -- a write after read.
1351                          * We may remove the Store, if it does not have an exception
1352                          * handler.
1353                          */
1354                         if (! info->projs[pn_Store_X_except]) {
1355                                 DBG_OPT_WAR(store, pred);
1356                                 exchange(info->projs[pn_Store_M], mem);
1357                                 kill_node(store);
1358                                 reduce_adr_usage(ptr);
1359                                 return DF_CHANGED;
1360                         }
1361                 }
1362
1363                 if (is_Store(pred)) {
1364                         /* check if we can pass through this store */
1365                         ir_alias_relation rel = get_alias_relation(
1366                                 get_Store_ptr(pred),
1367                                 get_irn_mode(get_Store_value(pred)),
1368                                 ptr, mode);
1369                         /* if the might be an alias, we cannot pass this Store */
1370                         if (rel != ir_no_alias)
1371                                 break;
1372                         pred = skip_Proj(get_Store_mem(pred));
1373                 } else if (is_Load(pred)) {
1374                         ir_alias_relation rel = get_alias_relation(
1375                                 get_Load_ptr(pred), get_Load_mode(pred),
1376                                 ptr, mode);
1377                         if (rel != ir_no_alias)
1378                                 break;
1379
1380                         pred = skip_Proj(get_Load_mem(pred));
1381                 } else {
1382                         /* follow only Load chains */
1383                         break;
1384                 }
1385
1386                 /* check for cycles */
1387                 if (NODE_VISITED(pred_info))
1388                         break;
1389                 MARK_NODE(pred_info);
1390         }
1391
1392         if (is_Sync(pred)) {
1393                 int i;
1394
1395                 /* handle all Sync predecessors */
1396                 for (i = get_Sync_n_preds(pred) - 1; i >= 0; --i) {
1397                         res |= follow_Mem_chain_for_Store(store, skip_Proj(get_Sync_pred(pred, i)));
1398                         if (res)
1399                                 break;
1400                 }
1401         }
1402         return res;
1403 }  /* follow_Mem_chain_for_Store */
1404
1405 /** find entity used as base for an address calculation */
1406 static ir_entity *find_entity(ir_node *ptr)
1407 {
1408         switch (get_irn_opcode(ptr)) {
1409         case iro_SymConst:
1410                 return get_SymConst_entity(ptr);
1411         case iro_Sel: {
1412                 ir_node *pred = get_Sel_ptr(ptr);
1413                 if (get_irg_frame(get_irn_irg(ptr)) == pred)
1414                         return get_Sel_entity(ptr);
1415
1416                 return find_entity(pred);
1417         }
1418         case iro_Sub:
1419         case iro_Add: {
1420                 ir_node *left = get_binop_left(ptr);
1421                 ir_node *right;
1422                 if (mode_is_reference(get_irn_mode(left)))
1423                         return find_entity(left);
1424                 right = get_binop_right(ptr);
1425                 if (mode_is_reference(get_irn_mode(right)))
1426                         return find_entity(right);
1427                 return NULL;
1428         }
1429         default:
1430                 return NULL;
1431         }
1432 }
1433
1434 /**
1435  * optimize a Store
1436  *
1437  * @param store  the Store node
1438  */
1439 static unsigned optimize_store(ir_node *store)
1440 {
1441         ir_node   *ptr;
1442         ir_node   *mem;
1443         ir_entity *entity;
1444
1445         if (get_Store_volatility(store) == volatility_is_volatile)
1446                 return 0;
1447
1448         ptr    = get_Store_ptr(store);
1449         entity = find_entity(ptr);
1450
1451         /* a store to an entity which is never read is unnecessary */
1452         if (entity != NULL && !(get_entity_usage(entity) & ir_usage_read)) {
1453                 ldst_info_t *info = (ldst_info_t*)get_irn_link(store);
1454                 if (info->projs[pn_Store_X_except] == NULL) {
1455                         DB((dbg, LEVEL_1, "  Killing useless %+F to never read entity %+F\n", store, entity));
1456                         exchange(info->projs[pn_Store_M], get_Store_mem(store));
1457                         kill_node(store);
1458                         reduce_adr_usage(ptr);
1459                         return DF_CHANGED;
1460                 }
1461         }
1462
1463         /* Check, if the address of this Store is used more than once.
1464          * If not, this Store cannot be removed in any case. */
1465         if (get_irn_n_uses(ptr) <= 1)
1466                 return 0;
1467
1468         mem = get_Store_mem(store);
1469
1470         /* follow the memory chain as long as there are only Loads */
1471         INC_MASTER();
1472
1473         return follow_Mem_chain_for_Store(store, skip_Proj(mem));
1474 }  /* optimize_store */
1475
1476 /**
1477  * walker, optimizes Phi after Stores to identical places:
1478  * Does the following optimization:
1479  * @verbatim
1480  *
1481  *   val1   val2   val3          val1  val2  val3
1482  *    |      |      |               \    |    /
1483  *  Store  Store  Store              \   |   /
1484  *      \    |    /                   PhiData
1485  *       \   |   /                       |
1486  *        \  |  /                      Store
1487  *          PhiM
1488  *
1489  * @endverbatim
1490  * This reduces the number of stores and allows for predicated execution.
1491  * Moves Stores back to the end of a function which may be bad.
1492  *
1493  * This is only possible if the predecessor blocks have only one successor.
1494  */
1495 static unsigned optimize_phi(ir_node *phi, walk_env_t *wenv)
1496 {
1497         int i, n;
1498         ir_node *store, *ptr, *block, *phi_block, *phiM, *phiD, *exc, *projM;
1499 #ifdef DO_CACHEOPT
1500         ir_node *old_store;
1501 #endif
1502         ir_mode *mode;
1503         ir_node **inM, **inD, **projMs;
1504         int *idx;
1505         dbg_info *db = NULL;
1506         ldst_info_t *info;
1507         block_info_t *bl_info;
1508         unsigned res = 0;
1509
1510         /* Must be a memory Phi */
1511         if (get_irn_mode(phi) != mode_M)
1512                 return 0;
1513
1514         n = get_Phi_n_preds(phi);
1515         if (n <= 0)
1516                 return 0;
1517
1518         /* must be only one user */
1519         projM = get_Phi_pred(phi, 0);
1520         if (get_irn_n_edges(projM) != 1)
1521                 return 0;
1522
1523         store = skip_Proj(projM);
1524 #ifdef DO_CACHEOPT
1525         old_store = store;
1526 #endif
1527         if (!is_Store(store))
1528                 return 0;
1529
1530         block = get_nodes_block(store);
1531
1532         /* check if the block is post dominated by Phi-block
1533            and has no exception exit */
1534         bl_info = (block_info_t*)get_irn_link(block);
1535         if (bl_info->flags & BLOCK_HAS_EXC)
1536                 return 0;
1537
1538         phi_block = get_nodes_block(phi);
1539         if (! block_strictly_postdominates(phi_block, block))
1540                 return 0;
1541
1542         /* this is the address of the store */
1543         ptr  = get_Store_ptr(store);
1544         mode = get_irn_mode(get_Store_value(store));
1545         info = (ldst_info_t*)get_irn_link(store);
1546         exc  = info->exc_block;
1547
1548         for (i = 1; i < n; ++i) {
1549                 ir_node *pred = get_Phi_pred(phi, i);
1550
1551                 if (get_irn_n_edges(pred) != 1)
1552                         return 0;
1553
1554                 pred = skip_Proj(pred);
1555                 if (!is_Store(pred))
1556                         return 0;
1557
1558                 if (ptr != get_Store_ptr(pred) || mode != get_irn_mode(get_Store_value(pred)))
1559                         return 0;
1560
1561                 info = (ldst_info_t*)get_irn_link(pred);
1562
1563                 /* check, if all stores have the same exception flow */
1564                 if (exc != info->exc_block)
1565                         return 0;
1566
1567                 block = get_nodes_block(pred);
1568
1569                 /* check if the block is post dominated by Phi-block
1570                    and has no exception exit. Note that block must be different from
1571                    Phi-block, else we would move a Store from end End of a block to its
1572                    Start... */
1573                 bl_info = (block_info_t*)get_irn_link(block);
1574                 if (bl_info->flags & BLOCK_HAS_EXC)
1575                         return 0;
1576                 if (block == phi_block || ! block_postdominates(phi_block, block))
1577                         return 0;
1578         }
1579
1580         /*
1581          * ok, when we are here, we found all predecessors of a Phi that
1582          * are Stores to the same address and size. That means whatever
1583          * we do before we enter the block of the Phi, we do a Store.
1584          * So, we can move the Store to the current block:
1585          *
1586          *   val1    val2    val3          val1  val2  val3
1587          *    |       |       |               \    |    /
1588          * | Str | | Str | | Str |             \   |   /
1589          *      \     |     /                   PhiData
1590          *       \    |    /                       |
1591          *        \   |   /                       Str
1592          *           PhiM
1593          *
1594          * Is only allowed if the predecessor blocks have only one successor.
1595          */
1596
1597         NEW_ARR_A(ir_node *, projMs, n);
1598         NEW_ARR_A(ir_node *, inM, n);
1599         NEW_ARR_A(ir_node *, inD, n);
1600         NEW_ARR_A(int, idx, n);
1601
1602         /* Prepare: Collect all Store nodes.  We must do this
1603            first because we otherwise may loose a store when exchanging its
1604            memory Proj.
1605          */
1606         for (i = n - 1; i >= 0; --i) {
1607                 ir_node *store;
1608
1609                 projMs[i] = get_Phi_pred(phi, i);
1610                 assert(is_Proj(projMs[i]));
1611
1612                 store = get_Proj_pred(projMs[i]);
1613                 info  = (ldst_info_t*)get_irn_link(store);
1614
1615                 inM[i] = get_Store_mem(store);
1616                 inD[i] = get_Store_value(store);
1617                 idx[i] = info->exc_idx;
1618         }
1619         block = get_nodes_block(phi);
1620
1621         /* second step: create a new memory Phi */
1622         phiM = new_rd_Phi(get_irn_dbg_info(phi), block, n, inM, mode_M);
1623
1624         /* third step: create a new data Phi */
1625         phiD = new_rd_Phi(get_irn_dbg_info(phi), block, n, inD, mode);
1626
1627         /* rewire memory and kill the node */
1628         for (i = n - 1; i >= 0; --i) {
1629                 ir_node *proj  = projMs[i];
1630
1631                 if (is_Proj(proj)) {
1632                         ir_node *store = get_Proj_pred(proj);
1633                         exchange(proj, inM[i]);
1634                         kill_node(store);
1635                 }
1636         }
1637
1638         /* fourth step: create the Store */
1639         store = new_rd_Store(db, block, phiM, ptr, phiD, cons_none);
1640 #ifdef DO_CACHEOPT
1641         co_set_irn_name(store, co_get_irn_ident(old_store));
1642 #endif
1643
1644         projM = new_rd_Proj(NULL, store, mode_M, pn_Store_M);
1645
1646         info = get_ldst_info(store, &wenv->obst);
1647         info->projs[pn_Store_M] = projM;
1648
1649         /* fifths step: repair exception flow */
1650         if (exc) {
1651                 ir_node *projX = new_rd_Proj(NULL, store, mode_X, pn_Store_X_except);
1652
1653                 info->projs[pn_Store_X_except] = projX;
1654                 info->exc_block                = exc;
1655                 info->exc_idx                  = idx[0];
1656
1657                 for (i = 0; i < n; ++i) {
1658                         set_Block_cfgpred(exc, idx[i], projX);
1659                 }
1660
1661                 if (n > 1) {
1662                         /* the exception block should be optimized as some inputs are identical now */
1663                 }
1664
1665                 res |= CF_CHANGED;
1666         }
1667
1668         /* sixth step: replace old Phi */
1669         exchange(phi, projM);
1670
1671         return res | DF_CHANGED;
1672 }  /* optimize_phi */
1673
1674 /**
1675  * walker, do the optimizations
1676  */
1677 static void do_load_store_optimize(ir_node *n, void *env)
1678 {
1679         walk_env_t *wenv = (walk_env_t*)env;
1680
1681         switch (get_irn_opcode(n)) {
1682
1683         case iro_Load:
1684                 wenv->changes |= optimize_load(n);
1685                 break;
1686
1687         case iro_Store:
1688                 wenv->changes |= optimize_store(n);
1689                 break;
1690
1691         case iro_Phi:
1692                 wenv->changes |= optimize_phi(n, wenv);
1693                 break;
1694
1695         default:
1696                 break;
1697         }
1698 }  /* do_load_store_optimize */
1699
1700 /** A scc. */
1701 typedef struct scc {
1702         ir_node *head;      /**< the head of the list */
1703 } scc;
1704
1705 /** A node entry. */
1706 typedef struct node_entry {
1707         unsigned DFSnum;    /**< the DFS number of this node */
1708         unsigned low;       /**< the low number of this node */
1709         int      in_stack;  /**< flag, set if the node is on the stack */
1710         ir_node  *next;     /**< link to the next node the the same scc */
1711         scc      *pscc;     /**< the scc of this node */
1712         unsigned POnum;     /**< the post order number for blocks */
1713 } node_entry;
1714
1715 /** A loop entry. */
1716 typedef struct loop_env {
1717         ir_nodehashmap_t map;
1718         struct obstack   obst;
1719         ir_node          **stack;      /**< the node stack */
1720         size_t           tos;          /**< tos index */
1721         unsigned         nextDFSnum;   /**< the current DFS number */
1722         unsigned         POnum;        /**< current post order number */
1723
1724         unsigned         changes;      /**< a bitmask of graph changes */
1725 } loop_env;
1726
1727 /**
1728 * Gets the node_entry of a node
1729 */
1730 static node_entry *get_irn_ne(ir_node *irn, loop_env *env)
1731 {
1732         node_entry *e = (node_entry*)ir_nodehashmap_get(&env->map, irn);
1733
1734         if (e == NULL) {
1735                 e = OALLOC(&env->obst, node_entry);
1736                 memset(e, 0, sizeof(*e));
1737                 ir_nodehashmap_insert(&env->map, irn, e);
1738         }
1739         return e;
1740 }  /* get_irn_ne */
1741
1742 /**
1743  * Push a node onto the stack.
1744  *
1745  * @param env   the loop environment
1746  * @param n     the node to push
1747  */
1748 static void push(loop_env *env, ir_node *n)
1749 {
1750         node_entry *e;
1751
1752         if (env->tos == ARR_LEN(env->stack)) {
1753                 size_t nlen = ARR_LEN(env->stack) * 2;
1754                 ARR_RESIZE(ir_node *, env->stack, nlen);
1755         }
1756         env->stack[env->tos++] = n;
1757         e = get_irn_ne(n, env);
1758         e->in_stack = 1;
1759 }  /* push */
1760
1761 /**
1762  * pop a node from the stack
1763  *
1764  * @param env   the loop environment
1765  *
1766  * @return  The topmost node
1767  */
1768 static ir_node *pop(loop_env *env)
1769 {
1770         ir_node *n = env->stack[--env->tos];
1771         node_entry *e = get_irn_ne(n, env);
1772
1773         e->in_stack = 0;
1774         return n;
1775 }  /* pop */
1776
1777 /**
1778  * Check if irn is a region constant.
1779  * The block or irn must strictly dominate the header block.
1780  *
1781  * @param irn           the node to check
1782  * @param header_block  the header block of the induction variable
1783  */
1784 static int is_rc(ir_node *irn, ir_node *header_block)
1785 {
1786         ir_node *block = get_nodes_block(irn);
1787
1788         return (block != header_block) && block_dominates(block, header_block);
1789 }  /* is_rc */
1790
1791 typedef struct phi_entry phi_entry;
1792 struct phi_entry {
1793         ir_node   *phi;    /**< A phi with a region const memory. */
1794         int       pos;     /**< The position of the region const memory */
1795         ir_node   *load;   /**< the newly created load for this phi */
1796         phi_entry *next;
1797 };
1798
1799 /**
1800  * An entry in the avail set.
1801  */
1802 typedef struct avail_entry_t {
1803         ir_node *ptr;   /**< the address pointer */
1804         ir_mode *mode;  /**< the load mode */
1805         ir_node *load;  /**< the associated Load */
1806 } avail_entry_t;
1807
1808 /**
1809  * Compare two avail entries.
1810  */
1811 static int cmp_avail_entry(const void *elt, const void *key, size_t size)
1812 {
1813         const avail_entry_t *a = (const avail_entry_t*)elt;
1814         const avail_entry_t *b = (const avail_entry_t*)key;
1815         (void) size;
1816
1817         return a->ptr != b->ptr || a->mode != b->mode;
1818 }  /* cmp_avail_entry */
1819
1820 /**
1821  * Calculate the hash value of an avail entry.
1822  */
1823 static unsigned hash_cache_entry(const avail_entry_t *entry)
1824 {
1825         return get_irn_idx(entry->ptr) * 9 + HASH_PTR(entry->mode);
1826 }  /* hash_cache_entry */
1827
1828 /**
1829  * Move loops out of loops if possible.
1830  *
1831  * @param pscc   the loop described by an SCC
1832  * @param env    the loop environment
1833  */
1834 static void move_loads_out_of_loops(scc *pscc, loop_env *env)
1835 {
1836         ir_node   *phi, *load, *next, *other, *next_other;
1837         int       j;
1838         phi_entry *phi_list = NULL;
1839         set       *avail;
1840
1841         avail = new_set(cmp_avail_entry, 8);
1842
1843         /* collect all outer memories */
1844         for (phi = pscc->head; phi != NULL; phi = next) {
1845                 node_entry *ne = get_irn_ne(phi, env);
1846                 next = ne->next;
1847
1848                 /* check all memory Phi's */
1849                 if (! is_Phi(phi))
1850                         continue;
1851
1852                 assert(get_irn_mode(phi) == mode_M && "DFS return non-memory Phi");
1853
1854                 for (j = get_irn_arity(phi) - 1; j >= 0; --j) {
1855                         ir_node    *pred = get_irn_n(phi, j);
1856                         node_entry *pe   = get_irn_ne(pred, env);
1857
1858                         if (pe->pscc != ne->pscc) {
1859                                 /* not in the same SCC, is region const */
1860                                 phi_entry *pe = OALLOC(&env->obst, phi_entry);
1861
1862                                 pe->phi  = phi;
1863                                 pe->pos  = j;
1864                                 pe->next = phi_list;
1865                                 phi_list = pe;
1866                         }
1867                 }
1868         }
1869         /* no Phis no fun */
1870         assert(phi_list != NULL && "DFS found a loop without Phi");
1871
1872         /* for now, we cannot handle more than one input (only reducible cf) */
1873         if (phi_list->next != NULL)
1874                 return;
1875
1876         for (load = pscc->head; load; load = next) {
1877                 ir_mode *load_mode;
1878                 node_entry *ne = get_irn_ne(load, env);
1879                 next = ne->next;
1880
1881                 if (is_Load(load)) {
1882                         ldst_info_t *info = (ldst_info_t*)get_irn_link(load);
1883                         ir_node     *ptr = get_Load_ptr(load);
1884
1885                         /* for now, we cannot handle Loads with exceptions */
1886                         if (info->projs[pn_Load_res] == NULL || info->projs[pn_Load_X_regular] != NULL || info->projs[pn_Load_X_except] != NULL)
1887                                 continue;
1888
1889                         /* for now, we can only move Load(Global) */
1890                         if (! is_Global(ptr))
1891                                 continue;
1892                         load_mode = get_Load_mode(load);
1893                         for (other = pscc->head; other != NULL; other = next_other) {
1894                                 node_entry *ne = get_irn_ne(other, env);
1895                                 next_other = ne->next;
1896
1897                                 if (is_Store(other)) {
1898                                         ir_alias_relation rel = get_alias_relation(
1899                                                 get_Store_ptr(other),
1900                                                 get_irn_mode(get_Store_value(other)),
1901                                                 ptr, load_mode);
1902                                         /* if the might be an alias, we cannot pass this Store */
1903                                         if (rel != ir_no_alias)
1904                                                 break;
1905                                 }
1906                                 /* only Phis and pure Calls are allowed here, so ignore them */
1907                         }
1908                         if (other == NULL) {
1909                                 ldst_info_t *ninfo = NULL;
1910                                 phi_entry   *pe;
1911                                 dbg_info    *db;
1912
1913                                 /* yep, no aliasing Store found, Load can be moved */
1914                                 DB((dbg, LEVEL_1, "  Found a Load that could be moved: %+F\n", load));
1915
1916                                 db   = get_irn_dbg_info(load);
1917                                 for (pe = phi_list; pe != NULL; pe = pe->next) {
1918                                         int     pos   = pe->pos;
1919                                         ir_node *phi  = pe->phi;
1920                                         ir_node *blk  = get_nodes_block(phi);
1921                                         ir_node *pred = get_Block_cfgpred_block(blk, pos);
1922                                         ir_node *irn, *mem;
1923                                         avail_entry_t entry, *res;
1924
1925                                         entry.ptr  = ptr;
1926                                         entry.mode = load_mode;
1927                                         res = (avail_entry_t*)set_find(avail, &entry, sizeof(entry), hash_cache_entry(&entry));
1928                                         if (res != NULL) {
1929                                                 irn = res->load;
1930                                         } else {
1931                                                 irn = new_rd_Load(db, pred, get_Phi_pred(phi, pos), ptr, load_mode, cons_none);
1932                                                 entry.load = irn;
1933                                                 set_insert(avail, &entry, sizeof(entry), hash_cache_entry(&entry));
1934                                                 DB((dbg, LEVEL_1, "  Created %+F in %+F\n", irn, pred));
1935                                         }
1936                                         pe->load = irn;
1937                                         ninfo = get_ldst_info(irn, &env->obst);
1938
1939                                         ninfo->projs[pn_Load_M] = mem = new_r_Proj(irn, mode_M, pn_Load_M);
1940                                         if (res == NULL) {
1941                                                 /* irn is from cache, so do not set phi pred again.
1942                                                  * There might be other Loads between phi and irn already.
1943                                                  */
1944                                                 set_Phi_pred(phi, pos, mem);
1945                                         }
1946
1947                                         ninfo->projs[pn_Load_res] = new_r_Proj(irn, load_mode, pn_Load_res);
1948                                 }
1949
1950                                 /* now kill the old Load */
1951                                 exchange(info->projs[pn_Load_M], get_Load_mem(load));
1952                                 exchange(info->projs[pn_Load_res], ninfo->projs[pn_Load_res]);
1953
1954                                 env->changes |= DF_CHANGED;
1955                         }
1956                 }
1957         }
1958         del_set(avail);
1959 }  /* move_loads_out_of_loops */
1960
1961 /**
1962  * Process a loop SCC.
1963  *
1964  * @param pscc  the SCC
1965  * @param env   the loop environment
1966  */
1967 static void process_loop(scc *pscc, loop_env *env)
1968 {
1969         ir_node *irn, *next, *header = NULL;
1970         node_entry *b, *h = NULL;
1971         int j, only_phi, num_outside, process = 0;
1972         ir_node *out_rc;
1973
1974         /* find the header block for this scc */
1975         for (irn = pscc->head; irn; irn = next) {
1976                 node_entry *e = get_irn_ne(irn, env);
1977                 ir_node *block = get_nodes_block(irn);
1978
1979                 next = e->next;
1980                 b = get_irn_ne(block, env);
1981
1982                 if (header != NULL) {
1983                         if (h->POnum < b->POnum) {
1984                                 header = block;
1985                                 h      = b;
1986                         }
1987                 } else {
1988                         header = block;
1989                         h      = b;
1990                 }
1991         }
1992
1993         /* check if this scc contains only Phi, Loads or Stores nodes */
1994         only_phi    = 1;
1995         num_outside = 0;
1996         out_rc      = NULL;
1997         for (irn = pscc->head; irn; irn = next) {
1998                 node_entry *e = get_irn_ne(irn, env);
1999
2000                 next = e->next;
2001                 switch (get_irn_opcode(irn)) {
2002                 case iro_Call:
2003                         if (is_Call_pure(irn)) {
2004                                 /* pure calls can be treated like loads */
2005                                 only_phi = 0;
2006                                 break;
2007                         }
2008                         /* non-pure calls must be handle like may-alias Stores */
2009                         goto fail;
2010                 case iro_CopyB:
2011                         /* cannot handle CopyB yet */
2012                         goto fail;
2013                 case iro_Load:
2014                         process = 1;
2015                         if (get_Load_volatility(irn) == volatility_is_volatile) {
2016                                 /* cannot handle loops with volatile Loads */
2017                                 goto fail;
2018                         }
2019                         only_phi = 0;
2020                         break;
2021                 case iro_Store:
2022                         if (get_Store_volatility(irn) == volatility_is_volatile) {
2023                                 /* cannot handle loops with volatile Stores */
2024                                 goto fail;
2025                         }
2026                         only_phi = 0;
2027                         break;
2028                 default:
2029                         only_phi = 0;
2030                         break;
2031                 case iro_Phi:
2032                         for (j = get_irn_arity(irn) - 1; j >= 0; --j) {
2033                                 ir_node *pred  = get_irn_n(irn, j);
2034                                 node_entry *pe = get_irn_ne(pred, env);
2035
2036                                 if (pe->pscc != e->pscc) {
2037                                         /* not in the same SCC, must be a region const */
2038                                         if (! is_rc(pred, header)) {
2039                                                 /* not a memory loop */
2040                                                 goto fail;
2041                                         }
2042                                         if (out_rc == NULL) {
2043                                                 /* first region constant */
2044                                                 out_rc = pred;
2045                                                 ++num_outside;
2046                                         } else if (out_rc != pred) {
2047                                                 /* another region constant */
2048                                                 ++num_outside;
2049                                         }
2050                                 }
2051                         }
2052                         break;
2053                 }
2054         }
2055         if (! process)
2056                 goto fail;
2057
2058         /* found a memory loop */
2059         DB((dbg, LEVEL_2, "  Found a memory loop:\n  "));
2060         if (only_phi && num_outside == 1) {
2061                 /* a phi cycle with only one real predecessor can be collapsed */
2062                 DB((dbg, LEVEL_2, "  Found an USELESS Phi cycle:\n  "));
2063
2064                 for (irn = pscc->head; irn; irn = next) {
2065                         node_entry *e = get_irn_ne(irn, env);
2066                         next = e->next;
2067                         exchange(irn, out_rc);
2068                 }
2069                 env->changes |= DF_CHANGED;
2070                 return;
2071         }
2072
2073 #ifdef DEBUG_libfirm
2074         for (irn = pscc->head; irn; irn = next) {
2075                 node_entry *e = get_irn_ne(irn, env);
2076                 next = e->next;
2077                 DB((dbg, LEVEL_2, " %+F,", irn));
2078         }
2079         DB((dbg, LEVEL_2, "\n"));
2080 #endif
2081         move_loads_out_of_loops(pscc, env);
2082
2083 fail:
2084         ;
2085 }  /* process_loop */
2086
2087 /**
2088  * Process a SCC.
2089  *
2090  * @param pscc  the SCC
2091  * @param env   the loop environment
2092  */
2093 static void process_scc(scc *pscc, loop_env *env)
2094 {
2095         ir_node *head = pscc->head;
2096         node_entry *e = get_irn_ne(head, env);
2097
2098 #ifdef DEBUG_libfirm
2099         {
2100                 ir_node *irn, *next;
2101
2102                 DB((dbg, LEVEL_4, " SCC at %p:\n ", pscc));
2103                 for (irn = pscc->head; irn; irn = next) {
2104                         node_entry *e = get_irn_ne(irn, env);
2105
2106                         next = e->next;
2107
2108                         DB((dbg, LEVEL_4, " %+F,", irn));
2109                 }
2110                 DB((dbg, LEVEL_4, "\n"));
2111         }
2112 #endif
2113
2114         if (e->next != NULL) {
2115                 /* this SCC has more than one member */
2116                 process_loop(pscc, env);
2117         }
2118 }  /* process_scc */
2119
2120 /**
2121  * Do Tarjan's SCC algorithm and drive load/store optimization.
2122  *
2123  * @param irn  start at this node
2124  * @param env  the loop environment
2125  */
2126 static void dfs(ir_node *irn, loop_env *env)
2127 {
2128         int i, n;
2129         node_entry *node = get_irn_ne(irn, env);
2130
2131         mark_irn_visited(irn);
2132
2133         node->DFSnum = env->nextDFSnum++;
2134         node->low    = node->DFSnum;
2135         push(env, irn);
2136
2137         /* handle preds */
2138         if (is_Phi(irn) || is_Sync(irn)) {
2139                 n = get_irn_arity(irn);
2140                 for (i = 0; i < n; ++i) {
2141                         ir_node *pred = get_irn_n(irn, i);
2142                         node_entry *o = get_irn_ne(pred, env);
2143
2144                         if (!irn_visited(pred)) {
2145                                 dfs(pred, env);
2146                                 node->low = MIN(node->low, o->low);
2147                         }
2148                         if (o->DFSnum < node->DFSnum && o->in_stack)
2149                                 node->low = MIN(o->DFSnum, node->low);
2150                 }
2151         } else if (is_fragile_op(irn)) {
2152                 ir_node *pred = get_fragile_op_mem(irn);
2153                 node_entry *o = get_irn_ne(pred, env);
2154
2155                 if (!irn_visited(pred)) {
2156                         dfs(pred, env);
2157                         node->low = MIN(node->low, o->low);
2158                 }
2159                 if (o->DFSnum < node->DFSnum && o->in_stack)
2160                         node->low = MIN(o->DFSnum, node->low);
2161         } else if (is_Proj(irn)) {
2162                 ir_node *pred = get_Proj_pred(irn);
2163                 node_entry *o = get_irn_ne(pred, env);
2164
2165                 if (!irn_visited(pred)) {
2166                         dfs(pred, env);
2167                         node->low = MIN(node->low, o->low);
2168                 }
2169                 if (o->DFSnum < node->DFSnum && o->in_stack)
2170                         node->low = MIN(o->DFSnum, node->low);
2171         }
2172         else {
2173                  /* IGNORE predecessors */
2174         }
2175
2176         if (node->low == node->DFSnum) {
2177                 scc *pscc = OALLOC(&env->obst, scc);
2178                 ir_node *x;
2179
2180                 pscc->head = NULL;
2181                 do {
2182                         node_entry *e;
2183
2184                         x = pop(env);
2185                         e = get_irn_ne(x, env);
2186                         e->pscc    = pscc;
2187                         e->next    = pscc->head;
2188                         pscc->head = x;
2189                 } while (x != irn);
2190
2191                 process_scc(pscc, env);
2192         }
2193 }  /* dfs */
2194
2195 /**
2196  * Do the DFS on the memory edges a graph.
2197  *
2198  * @param irg  the graph to process
2199  * @param env  the loop environment
2200  */
2201 static void do_dfs(ir_graph *irg, loop_env *env)
2202 {
2203         ir_node  *endblk, *end;
2204         int      i;
2205
2206         inc_irg_visited(irg);
2207
2208         /* visit all memory nodes */
2209         endblk = get_irg_end_block(irg);
2210         for (i = get_Block_n_cfgpreds(endblk) - 1; i >= 0; --i) {
2211                 ir_node *pred = get_Block_cfgpred(endblk, i);
2212
2213                 pred = skip_Proj(pred);
2214                 if (is_Return(pred)) {
2215                         dfs(get_Return_mem(pred), env);
2216                 } else if (is_Raise(pred)) {
2217                         dfs(get_Raise_mem(pred), env);
2218                 } else if (is_fragile_op(pred)) {
2219                         dfs(get_fragile_op_mem(pred), env);
2220                 } else if (is_Bad(pred)) {
2221                         /* ignore non-optimized block predecessor */
2222                 } else {
2223                         assert(0 && "Unknown EndBlock predecessor");
2224                 }
2225         }
2226
2227         /* visit the keep-alives */
2228         end = get_irg_end(irg);
2229         for (i = get_End_n_keepalives(end) - 1; i >= 0; --i) {
2230                 ir_node *ka = get_End_keepalive(end, i);
2231
2232                 if (is_Phi(ka) && !irn_visited(ka))
2233                         dfs(ka, env);
2234         }
2235 }  /* do_dfs */
2236
2237 /**
2238  * Optimize Loads/Stores in loops.
2239  *
2240  * @param irg  the graph
2241  */
2242 static int optimize_loops(ir_graph *irg)
2243 {
2244         loop_env env;
2245
2246         env.stack         = NEW_ARR_F(ir_node *, 128);
2247         env.tos           = 0;
2248         env.nextDFSnum    = 0;
2249         env.POnum         = 0;
2250         env.changes       = 0;
2251         ir_nodehashmap_init(&env.map);
2252         obstack_init(&env.obst);
2253
2254         /* calculate the SCC's and drive loop optimization. */
2255         do_dfs(irg, &env);
2256
2257         DEL_ARR_F(env.stack);
2258         obstack_free(&env.obst, NULL);
2259         ir_nodehashmap_destroy(&env.map);
2260
2261         return env.changes;
2262 }  /* optimize_loops */
2263
2264 /*
2265  * do the load store optimization
2266  */
2267 static ir_graph_state_t do_loadstore_opt(ir_graph *irg)
2268 {
2269         walk_env_t env;
2270         ir_graph_state_t res = 0;
2271
2272         FIRM_DBG_REGISTER(dbg, "firm.opt.ldstopt");
2273
2274         assert(get_irg_phase_state(irg) != phase_building);
2275         assert(get_irg_pinned(irg) != op_pin_state_floats &&
2276                 "LoadStore optimization needs pinned graph");
2277
2278         if (get_opt_alias_analysis()) {
2279                 assure_irp_globals_entity_usage_computed();
2280         }
2281
2282         obstack_init(&env.obst);
2283         env.changes = 0;
2284
2285         /* init the links, then collect Loads/Stores/Proj's in lists */
2286         master_visited = 0;
2287         irg_walk_graph(irg, firm_clear_link, collect_nodes, &env);
2288
2289         /* now we have collected enough information, optimize */
2290         irg_walk_graph(irg, NULL, do_load_store_optimize, &env);
2291
2292         env.changes |= optimize_loops(irg);
2293
2294         obstack_free(&env.obst, NULL);
2295
2296         /* Handle graph state */
2297         if (env.changes) {
2298                 edges_deactivate(irg);
2299         }
2300
2301         if (!(env.changes & CF_CHANGED)) {
2302                 res |= IR_GRAPH_STATE_CONSISTENT_DOMINANCE | IR_GRAPH_STATE_NO_BADS;
2303         }
2304
2305         return res;
2306 }
2307
2308 static optdesc_t opt_loadstore = {
2309         "load-store",
2310         IR_GRAPH_STATE_NO_UNREACHABLE_CODE | IR_GRAPH_STATE_CONSISTENT_OUT_EDGES | IR_GRAPH_STATE_NO_CRITICAL_EDGES | IR_GRAPH_STATE_CONSISTENT_DOMINANCE | IR_GRAPH_STATE_CONSISTENT_ENTITY_USAGE,
2311         do_loadstore_opt,
2312 };
2313
2314 int optimize_load_store(ir_graph *irg)
2315 {
2316         perform_irg_optimization(irg, &opt_loadstore);
2317         return 1;
2318 }
2319
2320 ir_graph_pass_t *optimize_load_store_pass(const char *name)
2321 {
2322         return def_graph_pass_ret(name ? name : "ldst", optimize_load_store);
2323 }  /* optimize_load_store_pass */