correctly implement memop handling
[libfirm] / ir / opt / ldstopt.c
1 /*
2  * Copyright (C) 1995-2011 University of Karlsruhe.  All right reserved.
3  *
4  * This file is part of libFirm.
5  *
6  * This file may be distributed and/or modified under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation and appearing in the file LICENSE.GPL included in the
9  * packaging of this file.
10  *
11  * Licensees holding valid libFirm Professional Edition licenses may use
12  * this file in accordance with the libFirm Commercial License.
13  * Agreement provided with the Software.
14  *
15  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17  * PURPOSE.
18  */
19
20 /**
21  * @file
22  * @brief   Load/Store optimizations.
23  * @author  Michael Beck
24  */
25 #include "config.h"
26
27 #include <string.h>
28
29 #include "iroptimize.h"
30 #include "irnode_t.h"
31 #include "irgraph_t.h"
32 #include "irmode_t.h"
33 #include "iropt_t.h"
34 #include "ircons_t.h"
35 #include "irgmod.h"
36 #include "irgwalk.h"
37 #include "irtools.h"
38 #include "tv_t.h"
39 #include "dbginfo_t.h"
40 #include "iropt_dbg.h"
41 #include "irflag_t.h"
42 #include "array_t.h"
43 #include "irhooks.h"
44 #include "iredges.h"
45 #include "irpass.h"
46 #include "irmemory.h"
47 #include "irnodehashmap.h"
48 #include "irgopt.h"
49 #include "set.h"
50 #include "be.h"
51 #include "debug.h"
52 #include "opt_manage.h"
53
54 /** The debug handle. */
55 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
56
57 #undef IMAX
58 #define IMAX(a,b)   ((a) > (b) ? (a) : (b))
59
60 #define MAX_PROJ    IMAX(IMAX((long)pn_Load_max, (long)pn_Store_max), (long)pn_Call_max)
61
62 enum changes_t {
63         DF_CHANGED = 1,       /**< data flow changed */
64         CF_CHANGED = 2,       /**< control flow changed */
65 };
66
67 /**
68  * walker environment
69  */
70 typedef struct walk_env_t {
71         struct obstack obst;          /**< list of all stores */
72         unsigned changes;             /**< a bitmask of graph changes */
73 } walk_env_t;
74
75 /** A Load/Store info. */
76 typedef struct ldst_info_t {
77         ir_node  *projs[MAX_PROJ+1];  /**< list of Proj's of this node */
78         ir_node  *exc_block;          /**< the exception block if available */
79         int      exc_idx;             /**< predecessor index in the exception block */
80         unsigned visited;             /**< visited counter for breaking loops */
81 } ldst_info_t;
82
83 /**
84  * flags for control flow.
85  */
86 enum block_flags_t {
87         BLOCK_HAS_COND = 1,      /**< Block has conditional control flow */
88         BLOCK_HAS_EXC  = 2       /**< Block has exceptional control flow */
89 };
90
91 /**
92  * a Block info.
93  */
94 typedef struct block_info_t {
95         unsigned flags;               /**< flags for the block */
96 } block_info_t;
97
98 /** the master visited flag for loop detection. */
99 static unsigned master_visited = 0;
100
101 #define INC_MASTER()       ++master_visited
102 #define MARK_NODE(info)    (info)->visited = master_visited
103 #define NODE_VISITED(info) (info)->visited >= master_visited
104
105 /**
106  * get the Load/Store info of a node
107  */
108 static ldst_info_t *get_ldst_info(ir_node *node, struct obstack *obst)
109 {
110         ldst_info_t *info = (ldst_info_t*)get_irn_link(node);
111
112         if (! info) {
113                 info = OALLOCZ(obst, ldst_info_t);
114                 set_irn_link(node, info);
115         }
116         return info;
117 }  /* get_ldst_info */
118
119 /**
120  * get the Block info of a node
121  */
122 static block_info_t *get_block_info(ir_node *node, struct obstack *obst)
123 {
124         block_info_t *info = (block_info_t*)get_irn_link(node);
125
126         if (! info) {
127                 info = OALLOCZ(obst, block_info_t);
128                 set_irn_link(node, info);
129         }
130         return info;
131 }  /* get_block_info */
132
133 /**
134  * update the projection info for a Load/Store
135  */
136 static unsigned update_projs(ldst_info_t *info, ir_node *proj)
137 {
138         long nr = get_Proj_proj(proj);
139
140         assert(0 <= nr && nr <= MAX_PROJ && "Wrong proj from LoadStore");
141
142         if (info->projs[nr]) {
143                 /* there is already one, do CSE */
144                 exchange(proj, info->projs[nr]);
145                 return DF_CHANGED;
146         }
147         else {
148                 info->projs[nr] = proj;
149                 return 0;
150         }
151 }  /* update_projs */
152
153 /**
154  * update the exception block info for a Load/Store node.
155  *
156  * @param info   the load/store info struct
157  * @param block  the exception handler block for this load/store
158  * @param pos    the control flow input of the block
159  */
160 static unsigned update_exc(ldst_info_t *info, ir_node *block, int pos)
161 {
162         assert(info->exc_block == NULL && "more than one exception block found");
163
164         info->exc_block = block;
165         info->exc_idx   = pos;
166         return 0;
167 }  /* update_exc */
168
169 /** Return the number of uses of an address node */
170 #define get_irn_n_uses(adr)     get_irn_n_edges(adr)
171
172 /**
173  * walker, collects all Load/Store/Proj nodes
174  *
175  * walks from Start -> End
176  */
177 static void collect_nodes(ir_node *node, void *env)
178 {
179         walk_env_t  *wenv   = (walk_env_t *)env;
180         unsigned     opcode = get_irn_opcode(node);
181         ir_node     *pred, *blk, *pred_blk;
182         ldst_info_t *ldst_info;
183
184         if (opcode == iro_Proj) {
185                 pred   = get_Proj_pred(node);
186                 opcode = get_irn_opcode(pred);
187
188                 if (opcode == iro_Load || opcode == iro_Store || opcode == iro_Call) {
189                         ldst_info = get_ldst_info(pred, &wenv->obst);
190
191                         wenv->changes |= update_projs(ldst_info, node);
192
193                         /*
194                          * Place the Proj's to the same block as the
195                          * predecessor Load. This is always ok and prevents
196                          * "non-SSA" form after optimizations if the Proj
197                          * is in a wrong block.
198                          */
199                         blk      = get_nodes_block(node);
200                         pred_blk = get_nodes_block(pred);
201                         if (blk != pred_blk) {
202                                 wenv->changes |= DF_CHANGED;
203                                 set_nodes_block(node, pred_blk);
204                         }
205                 }
206         } else if (opcode == iro_Block) {
207                 int i;
208
209                 for (i = get_Block_n_cfgpreds(node) - 1; i >= 0; --i) {
210                         ir_node      *pred_block, *proj;
211                         block_info_t *bl_info;
212                         int          is_exc = 0;
213
214                         pred = proj = get_Block_cfgpred(node, i);
215
216                         if (is_Proj(proj)) {
217                                 pred   = get_Proj_pred(proj);
218                                 is_exc = is_x_except_Proj(proj);
219                         }
220
221                         /* ignore Bad predecessors, they will be removed later */
222                         if (is_Bad(pred))
223                                 continue;
224
225                         pred_block = get_nodes_block(pred);
226                         bl_info    = get_block_info(pred_block, &wenv->obst);
227
228                         if (is_fragile_op(pred) && is_exc)
229                                 bl_info->flags |= BLOCK_HAS_EXC;
230                         else if (is_irn_forking(pred))
231                                 bl_info->flags |= BLOCK_HAS_COND;
232
233                         opcode = get_irn_opcode(pred);
234                         if (is_exc && (opcode == iro_Load || opcode == iro_Store || opcode == iro_Call)) {
235                                 ldst_info = get_ldst_info(pred, &wenv->obst);
236
237                                 wenv->changes |= update_exc(ldst_info, node, i);
238                         }
239                 }
240         }
241 }  /* collect_nodes */
242
243 /**
244  * Returns an entity if the address ptr points to a constant one.
245  *
246  * @param ptr  the address
247  *
248  * @return an entity or NULL
249  */
250 static ir_entity *find_constant_entity(ir_node *ptr)
251 {
252         for (;;) {
253                 if (is_SymConst(ptr) && get_SymConst_kind(ptr) == symconst_addr_ent) {
254                         return get_SymConst_entity(ptr);
255                 } else if (is_Sel(ptr)) {
256                         ir_entity *ent = get_Sel_entity(ptr);
257                         ir_type   *tp  = get_entity_owner(ent);
258
259                         /* Do not fiddle with polymorphism. */
260                         if (is_Class_type(get_entity_owner(ent)) &&
261                                 ((get_entity_n_overwrites(ent)    != 0) ||
262                                 (get_entity_n_overwrittenby(ent) != 0)   ) )
263                                 return NULL;
264
265                         if (is_Array_type(tp)) {
266                                 /* check bounds */
267                                 int i, n;
268
269                                 for (i = 0, n = get_Sel_n_indexs(ptr); i < n; ++i) {
270                                         ir_node   *bound;
271                                         ir_tarval *tlower, *tupper;
272                                         ir_node   *index = get_Sel_index(ptr, i);
273                                         ir_tarval *tv    = computed_value(index);
274
275                                         /* check if the index is constant */
276                                         if (tv == tarval_bad)
277                                                 return NULL;
278
279                                         bound  = get_array_lower_bound(tp, i);
280                                         tlower = computed_value(bound);
281                                         bound  = get_array_upper_bound(tp, i);
282                                         tupper = computed_value(bound);
283
284                                         if (tlower == tarval_bad || tupper == tarval_bad)
285                                                 return NULL;
286
287                                         if (tarval_cmp(tv, tlower) == ir_relation_less)
288                                                 return NULL;
289                                         if (tarval_cmp(tupper, tv) == ir_relation_less)
290                                                 return NULL;
291
292                                         /* ok, bounds check finished */
293                                 }
294                         }
295
296                         if (get_entity_linkage(ent) & IR_LINKAGE_CONSTANT)
297                                 return ent;
298
299                         /* try next */
300                         ptr = get_Sel_ptr(ptr);
301                 } else if (is_Add(ptr)) {
302                         ir_node *l = get_Add_left(ptr);
303                         ir_node *r = get_Add_right(ptr);
304
305                         if (get_irn_mode(l) == get_irn_mode(ptr) && is_Const(r))
306                                 ptr = l;
307                         else if (get_irn_mode(r) == get_irn_mode(ptr) && is_Const(l))
308                                 ptr = r;
309                         else
310                                 return NULL;
311
312                         /* for now, we support only one addition, reassoc should fold all others */
313                         if (! is_SymConst(ptr) && !is_Sel(ptr))
314                                 return NULL;
315                 } else if (is_Sub(ptr)) {
316                         ir_node *l = get_Sub_left(ptr);
317                         ir_node *r = get_Sub_right(ptr);
318
319                         if (get_irn_mode(l) == get_irn_mode(ptr) && is_Const(r))
320                                 ptr = l;
321                         else
322                                 return NULL;
323                         /* for now, we support only one substraction, reassoc should fold all others */
324                         if (! is_SymConst(ptr) && !is_Sel(ptr))
325                                 return NULL;
326                 } else
327                         return NULL;
328         }
329 }  /* find_constant_entity */
330
331 /**
332  * Return the Selection index of a Sel node from dimension n
333  */
334 static long get_Sel_array_index_long(ir_node *n, int dim)
335 {
336         ir_node *index = get_Sel_index(n, dim);
337         assert(is_Const(index));
338         return get_tarval_long(get_Const_tarval(index));
339 }  /* get_Sel_array_index_long */
340
341 /**
342  * Returns the accessed component graph path for an
343  * node computing an address.
344  *
345  * @param ptr    the node computing the address
346  * @param depth  current depth in steps upward from the root
347  *               of the address
348  */
349 static compound_graph_path *rec_get_accessed_path(ir_node *ptr, size_t depth)
350 {
351         compound_graph_path *res = NULL;
352         ir_entity           *root, *field, *ent;
353         size_t              path_len, pos, idx;
354         ir_tarval           *tv;
355         ir_type             *tp;
356
357         if (is_SymConst(ptr)) {
358                 /* a SymConst. If the depth is 0, this is an access to a global
359                  * entity and we don't need a component path, else we know
360                  * at least its length.
361                  */
362                 assert(get_SymConst_kind(ptr) == symconst_addr_ent);
363                 root = get_SymConst_entity(ptr);
364                 res = (depth == 0) ? NULL : new_compound_graph_path(get_entity_type(root), depth);
365         } else if (is_Sel(ptr)) {
366                 /* it's a Sel, go up until we find the root */
367                 res = rec_get_accessed_path(get_Sel_ptr(ptr), depth+1);
368                 if (res == NULL)
369                         return NULL;
370
371                 /* fill up the step in the path at the current position */
372                 field    = get_Sel_entity(ptr);
373                 path_len = get_compound_graph_path_length(res);
374                 pos      = path_len - depth - 1;
375                 set_compound_graph_path_node(res, pos, field);
376
377                 if (is_Array_type(get_entity_owner(field))) {
378                         assert(get_Sel_n_indexs(ptr) == 1 && "multi dim arrays not implemented");
379                         set_compound_graph_path_array_index(res, pos, get_Sel_array_index_long(ptr, 0));
380                 }
381         } else if (is_Add(ptr)) {
382                 ir_mode   *mode;
383                 ir_tarval *tmp;
384
385                 {
386                         ir_node   *l    = get_Add_left(ptr);
387                         ir_node   *r    = get_Add_right(ptr);
388                         if (is_Const(r) && get_irn_mode(l) == get_irn_mode(ptr)) {
389                                 ptr = l;
390                                 tv  = get_Const_tarval(r);
391                         } else {
392                                 ptr = r;
393                                 tv  = get_Const_tarval(l);
394                         }
395                 }
396 ptr_arith:
397                 mode = get_tarval_mode(tv);
398                 tmp  = tv;
399
400                 /* ptr must be a Sel or a SymConst, this was checked in find_constant_entity() */
401                 if (is_Sel(ptr)) {
402                         field = get_Sel_entity(ptr);
403                 } else {
404                         field = get_SymConst_entity(ptr);
405                 }
406                 idx = 0;
407                 for (ent = field;;) {
408                         unsigned   size;
409                         ir_tarval *sz, *tv_index, *tlower, *tupper;
410                         ir_node   *bound;
411
412                         tp = get_entity_type(ent);
413                         if (! is_Array_type(tp))
414                                 break;
415                         ent = get_array_element_entity(tp);
416                         size = get_type_size_bytes(get_entity_type(ent));
417                         sz   = new_tarval_from_long(size, mode);
418
419                         tv_index = tarval_div(tmp, sz);
420                         tmp      = tarval_mod(tmp, sz);
421
422                         if (tv_index == tarval_bad || tmp == tarval_bad)
423                                 return NULL;
424
425                         assert(get_array_n_dimensions(tp) == 1 && "multiarrays not implemented");
426                         bound  = get_array_lower_bound(tp, 0);
427                         tlower = computed_value(bound);
428                         bound  = get_array_upper_bound(tp, 0);
429                         tupper = computed_value(bound);
430
431                         if (tlower == tarval_bad || tupper == tarval_bad)
432                                 return NULL;
433
434                         if (tarval_cmp(tv_index, tlower) == ir_relation_less)
435                                 return NULL;
436                         if (tarval_cmp(tupper, tv_index) == ir_relation_less)
437                                 return NULL;
438
439                         /* ok, bounds check finished */
440                         ++idx;
441                 }
442                 if (! tarval_is_null(tmp)) {
443                         /* access to some struct/union member */
444                         return NULL;
445                 }
446
447                 /* should be at least ONE array */
448                 if (idx == 0)
449                         return NULL;
450
451                 res = rec_get_accessed_path(ptr, depth + idx);
452                 if (res == NULL)
453                         return NULL;
454
455                 path_len = get_compound_graph_path_length(res);
456                 pos      = path_len - depth - idx;
457
458                 for (ent = field;;) {
459                         unsigned   size;
460                         ir_tarval *sz, *tv_index;
461                         long       index;
462
463                         tp = get_entity_type(ent);
464                         if (! is_Array_type(tp))
465                                 break;
466                         ent = get_array_element_entity(tp);
467                         set_compound_graph_path_node(res, pos, ent);
468
469                         size = get_type_size_bytes(get_entity_type(ent));
470                         sz   = new_tarval_from_long(size, mode);
471
472                         tv_index = tarval_div(tv, sz);
473                         tv       = tarval_mod(tv, sz);
474
475                         /* worked above, should work again */
476                         assert(tv_index != tarval_bad && tv != tarval_bad);
477
478                         /* bounds already checked above */
479                         index = get_tarval_long(tv_index);
480                         set_compound_graph_path_array_index(res, pos, index);
481                         ++pos;
482                 }
483         } else if (is_Sub(ptr)) {
484                 ir_node *l = get_Sub_left(ptr);
485                 ir_node *r = get_Sub_right(ptr);
486
487                 ptr = l;
488                 tv  = get_Const_tarval(r);
489                 tv  = tarval_neg(tv);
490                 goto ptr_arith;
491         }
492         return res;
493 }  /* rec_get_accessed_path */
494
495 /**
496  * Returns an access path or NULL.  The access path is only
497  * valid, if the graph is in phase_high and _no_ address computation is used.
498  */
499 static compound_graph_path *get_accessed_path(ir_node *ptr)
500 {
501         compound_graph_path *gr = rec_get_accessed_path(ptr, 0);
502         return gr;
503 }  /* get_accessed_path */
504
505 typedef struct path_entry {
506         ir_entity         *ent;
507         struct path_entry *next;
508         size_t            index;
509 } path_entry;
510
511 static ir_node *rec_find_compound_ent_value(ir_node *ptr, path_entry *next)
512 {
513         path_entry       entry, *p;
514         ir_entity        *ent, *field;
515         ir_initializer_t *initializer;
516         ir_tarval        *tv;
517         ir_type          *tp;
518         size_t           n;
519
520         entry.next = next;
521         if (is_SymConst(ptr)) {
522                 /* found the root */
523                 ent         = get_SymConst_entity(ptr);
524                 initializer = get_entity_initializer(ent);
525                 for (p = next; p != NULL;) {
526                         if (initializer->kind != IR_INITIALIZER_COMPOUND)
527                                 return NULL;
528                         n  = get_initializer_compound_n_entries(initializer);
529                         tp = get_entity_type(ent);
530
531                         if (is_Array_type(tp)) {
532                                 ent = get_array_element_entity(tp);
533                                 if (ent != p->ent) {
534                                         /* a missing [0] */
535                                         if (0 >= n)
536                                                 return NULL;
537                                         initializer = get_initializer_compound_value(initializer, 0);
538                                         continue;
539                                 }
540                         }
541                         if (p->index >= n)
542                                 return NULL;
543                         initializer = get_initializer_compound_value(initializer, p->index);
544
545                         ent = p->ent;
546                         p   = p->next;
547                 }
548                 tp = get_entity_type(ent);
549                 while (is_Array_type(tp)) {
550                         ent = get_array_element_entity(tp);
551                         tp = get_entity_type(ent);
552                         /* a missing [0] */
553                         n  = get_initializer_compound_n_entries(initializer);
554                         if (0 >= n)
555                                 return NULL;
556                         initializer = get_initializer_compound_value(initializer, 0);
557                 }
558
559                 switch (initializer->kind) {
560                 case IR_INITIALIZER_CONST:
561                         return get_initializer_const_value(initializer);
562                 case IR_INITIALIZER_TARVAL:
563                 case IR_INITIALIZER_NULL:
564                 default:
565                         return NULL;
566                 }
567         } else if (is_Sel(ptr)) {
568                 entry.ent = field = get_Sel_entity(ptr);
569                 tp = get_entity_owner(field);
570                 if (is_Array_type(tp)) {
571                         assert(get_Sel_n_indexs(ptr) == 1 && "multi dim arrays not implemented");
572                         entry.index = get_Sel_array_index_long(ptr, 0) - get_array_lower_bound_int(tp, 0);
573                 } else {
574                         size_t i, n_members = get_compound_n_members(tp);
575                         for (i = 0; i < n_members; ++i) {
576                                 if (get_compound_member(tp, i) == field)
577                                         break;
578                         }
579                         if (i >= n_members) {
580                                 /* not found: should NOT happen */
581                                 return NULL;
582                         }
583                         entry.index = i;
584                 }
585                 return rec_find_compound_ent_value(get_Sel_ptr(ptr), &entry);
586         }  else if (is_Add(ptr)) {
587                 ir_mode  *mode;
588                 unsigned pos;
589
590                 {
591                         ir_node *l = get_Add_left(ptr);
592                         ir_node *r = get_Add_right(ptr);
593                         if (is_Const(r)) {
594                                 ptr = l;
595                                 tv  = get_Const_tarval(r);
596                         } else {
597                                 ptr = r;
598                                 tv  = get_Const_tarval(l);
599                         }
600                 }
601 ptr_arith:
602                 mode = get_tarval_mode(tv);
603
604                 /* ptr must be a Sel or a SymConst, this was checked in find_constant_entity() */
605                 if (is_Sel(ptr)) {
606                         field = get_Sel_entity(ptr);
607                 } else {
608                         field = get_SymConst_entity(ptr);
609                 }
610
611                 /* count needed entries */
612                 pos = 0;
613                 for (ent = field;;) {
614                         tp = get_entity_type(ent);
615                         if (! is_Array_type(tp))
616                                 break;
617                         ent = get_array_element_entity(tp);
618                         ++pos;
619                 }
620                 /* should be at least ONE entry */
621                 if (pos == 0)
622                         return NULL;
623
624                 /* allocate the right number of entries */
625                 NEW_ARR_A(path_entry, p, pos);
626
627                 /* fill them up */
628                 pos = 0;
629                 for (ent = field;;) {
630                         unsigned   size;
631                         ir_tarval *sz, *tv_index, *tlower, *tupper;
632                         long       index;
633                         ir_node   *bound;
634
635                         tp = get_entity_type(ent);
636                         if (! is_Array_type(tp))
637                                 break;
638                         ent = get_array_element_entity(tp);
639                         p[pos].ent  = ent;
640                         p[pos].next = &p[pos + 1];
641
642                         size = get_type_size_bytes(get_entity_type(ent));
643                         sz   = new_tarval_from_long(size, mode);
644
645                         tv_index = tarval_div(tv, sz);
646                         tv       = tarval_mod(tv, sz);
647
648                         if (tv_index == tarval_bad || tv == tarval_bad)
649                                 return NULL;
650
651                         assert(get_array_n_dimensions(tp) == 1 && "multiarrays not implemented");
652                         bound  = get_array_lower_bound(tp, 0);
653                         tlower = computed_value(bound);
654                         bound  = get_array_upper_bound(tp, 0);
655                         tupper = computed_value(bound);
656
657                         if (tlower == tarval_bad || tupper == tarval_bad)
658                                 return NULL;
659
660                         if (tarval_cmp(tv_index, tlower) == ir_relation_less)
661                                 return NULL;
662                         if (tarval_cmp(tupper, tv_index) == ir_relation_less)
663                                 return NULL;
664
665                         /* ok, bounds check finished */
666                         index = get_tarval_long(tv_index);
667                         p[pos].index = index;
668                         ++pos;
669                 }
670                 if (! tarval_is_null(tv)) {
671                         /* hmm, wrong access */
672                         return NULL;
673                 }
674                 p[pos - 1].next = next;
675                 return rec_find_compound_ent_value(ptr, p);
676         } else if (is_Sub(ptr)) {
677                 ir_node *l = get_Sub_left(ptr);
678                 ir_node *r = get_Sub_right(ptr);
679
680                 ptr = l;
681                 tv  = get_Const_tarval(r);
682                 tv  = tarval_neg(tv);
683                 goto ptr_arith;
684         }
685         return NULL;
686 }
687
688 static ir_node *find_compound_ent_value(ir_node *ptr)
689 {
690         return rec_find_compound_ent_value(ptr, NULL);
691 }
692
693 /* forward */
694 static void reduce_adr_usage(ir_node *ptr);
695
696 /**
697  * Update a Load that may have lost its users.
698  */
699 static void handle_load_update(ir_node *load)
700 {
701         ldst_info_t *info = (ldst_info_t*)get_irn_link(load);
702
703         /* do NOT touch volatile loads for now */
704         if (get_Load_volatility(load) == volatility_is_volatile)
705                 return;
706
707         if (! info->projs[pn_Load_res] && ! info->projs[pn_Load_X_except]) {
708                 ir_node *ptr = get_Load_ptr(load);
709                 ir_node *mem = get_Load_mem(load);
710
711                 /* a Load whose value is neither used nor exception checked, remove it */
712                 exchange(info->projs[pn_Load_M], mem);
713                 if (info->projs[pn_Load_X_regular])
714                         exchange(info->projs[pn_Load_X_regular], new_r_Jmp(get_nodes_block(load)));
715                 kill_node(load);
716                 reduce_adr_usage(ptr);
717         }
718 }  /* handle_load_update */
719
720 /**
721  * A use of an address node has vanished. Check if this was a Proj
722  * node and update the counters.
723  */
724 static void reduce_adr_usage(ir_node *ptr)
725 {
726         ir_node *pred;
727         if (!is_Proj(ptr))
728                 return;
729         if (get_irn_n_edges(ptr) > 0)
730                 return;
731
732         /* this Proj is dead now */
733         pred = get_Proj_pred(ptr);
734         if (is_Load(pred)) {
735                 ldst_info_t *info = (ldst_info_t*)get_irn_link(pred);
736                 info->projs[get_Proj_proj(ptr)] = NULL;
737
738                 /* this node lost its result proj, handle that */
739                 handle_load_update(pred);
740         }
741 }  /* reduce_adr_usage */
742
743 /**
744  * Check, if an already existing value of mode old_mode can be converted
745  * into the needed one new_mode without loss.
746  */
747 static int can_use_stored_value(ir_mode *old_mode, ir_mode *new_mode)
748 {
749         unsigned old_size;
750         unsigned new_size;
751         if (old_mode == new_mode)
752                 return true;
753
754         old_size = get_mode_size_bits(old_mode);
755         new_size = get_mode_size_bits(new_mode);
756
757         /* if both modes are two-complement ones, we can always convert the
758            Stored value into the needed one. (on big endian machines we currently
759            only support this for modes of same size) */
760         if (old_size >= new_size &&
761                   get_mode_arithmetic(old_mode) == irma_twos_complement &&
762                   get_mode_arithmetic(new_mode) == irma_twos_complement &&
763                   (!be_get_backend_param()->byte_order_big_endian
764                 || old_size == new_size)) {
765                 return true;
766         }
767         return false;
768 }
769
770 /**
771  * Check whether a Call is at least pure, i.e. does only read memory.
772  */
773 static unsigned is_Call_pure(ir_node *call)
774 {
775         ir_type *call_tp = get_Call_type(call);
776         unsigned prop = get_method_additional_properties(call_tp);
777
778         /* check first the call type */
779         if ((prop & (mtp_property_const|mtp_property_pure)) == 0) {
780                 /* try the called entity */
781                 ir_node *ptr = get_Call_ptr(call);
782
783                 if (is_SymConst_addr_ent(ptr)) {
784                         ir_entity *ent = get_SymConst_entity(ptr);
785
786                         prop = get_entity_additional_properties(ent);
787                 }
788         }
789         return (prop & (mtp_property_const|mtp_property_pure)) != 0;
790 }  /* is_Call_pure */
791
792 static ir_node *get_base_and_offset(ir_node *ptr, long *pOffset)
793 {
794         ir_mode *mode  = get_irn_mode(ptr);
795         long    offset = 0;
796
797         /* TODO: long might not be enough, we should probably use some tarval thingy... */
798         for (;;) {
799                 if (is_Add(ptr)) {
800                         ir_node *l = get_Add_left(ptr);
801                         ir_node *r = get_Add_right(ptr);
802
803                         if (get_irn_mode(l) != mode || !is_Const(r))
804                                 break;
805
806                         offset += get_tarval_long(get_Const_tarval(r));
807                         ptr     = l;
808                 } else if (is_Sub(ptr)) {
809                         ir_node *l = get_Sub_left(ptr);
810                         ir_node *r = get_Sub_right(ptr);
811
812                         if (get_irn_mode(l) != mode || !is_Const(r))
813                                 break;
814
815                         offset -= get_tarval_long(get_Const_tarval(r));
816                         ptr     = l;
817                 } else if (is_Sel(ptr)) {
818                         ir_entity *ent = get_Sel_entity(ptr);
819                         ir_type   *tp  = get_entity_owner(ent);
820
821                         if (is_Array_type(tp)) {
822                                 int     size;
823                                 ir_node *index;
824
825                                 /* only one dimensional arrays yet */
826                                 if (get_Sel_n_indexs(ptr) != 1)
827                                         break;
828                                 index = get_Sel_index(ptr, 0);
829                                 if (! is_Const(index))
830                                         break;
831
832                                 tp = get_entity_type(ent);
833                                 if (get_type_state(tp) != layout_fixed)
834                                         break;
835
836                                 size    = get_type_size_bytes(tp);
837                                 offset += size * get_tarval_long(get_Const_tarval(index));
838                         } else {
839                                 if (get_type_state(tp) != layout_fixed)
840                                         break;
841                                 offset += get_entity_offset(ent);
842                         }
843                         ptr = get_Sel_ptr(ptr);
844                 } else
845                         break;
846         }
847
848         *pOffset = offset;
849         return ptr;
850 }
851
852 static int try_load_after_store(ir_node *load,
853                 ir_node *load_base_ptr, long load_offset, ir_node *store)
854 {
855         ldst_info_t *info;
856         ir_node *store_ptr      = get_Store_ptr(store);
857         long     store_offset;
858         ir_node *store_base_ptr = get_base_and_offset(store_ptr, &store_offset);
859         ir_node *store_value;
860         ir_mode *store_mode;
861         ir_node *load_ptr;
862         ir_mode *load_mode;
863         long     load_mode_len;
864         long     store_mode_len;
865         long     delta;
866         int      res;
867
868         if (load_base_ptr != store_base_ptr)
869                 return 0;
870
871         load_mode      = get_Load_mode(load);
872         load_mode_len  = get_mode_size_bytes(load_mode);
873         store_mode     = get_irn_mode(get_Store_value(store));
874         store_mode_len = get_mode_size_bytes(store_mode);
875         delta          = load_offset - store_offset;
876         store_value    = get_Store_value(store);
877
878         if (delta != 0 || store_mode != load_mode) {
879                 /* TODO: implement for big-endian */
880                 if (delta < 0 || delta + load_mode_len > store_mode_len
881                                 || (be_get_backend_param()->byte_order_big_endian
882                                     && load_mode_len != store_mode_len))
883                         return 0;
884
885                 if (get_mode_arithmetic(store_mode) != irma_twos_complement ||
886                         get_mode_arithmetic(load_mode)  != irma_twos_complement)
887                         return 0;
888
889
890                 /* produce a shift to adjust offset delta */
891                 if (delta > 0) {
892                         ir_node *cnst;
893                         ir_graph *irg = get_irn_irg(load);
894
895                         cnst        = new_r_Const_long(irg, mode_Iu, delta * 8);
896                         store_value = new_r_Shr(get_nodes_block(load),
897                                                                         store_value, cnst, store_mode);
898                 }
899
900                 /* add an convert if needed */
901                 if (store_mode != load_mode) {
902                         store_value = new_r_Conv(get_nodes_block(load), store_value, load_mode);
903                 }
904         }
905
906         DBG_OPT_RAW(load, store_value);
907
908         info = (ldst_info_t*)get_irn_link(load);
909         if (info->projs[pn_Load_M])
910                 exchange(info->projs[pn_Load_M], get_Load_mem(load));
911
912         res = 0;
913         /* no exception */
914         if (info->projs[pn_Load_X_except]) {
915                 ir_graph *irg = get_irn_irg(load);
916                 exchange( info->projs[pn_Load_X_except], new_r_Bad(irg, mode_X));
917                 res |= CF_CHANGED;
918         }
919         if (info->projs[pn_Load_X_regular]) {
920                 exchange( info->projs[pn_Load_X_regular], new_r_Jmp(get_nodes_block(load)));
921                 res |= CF_CHANGED;
922         }
923
924         if (info->projs[pn_Load_res])
925                 exchange(info->projs[pn_Load_res], store_value);
926
927         load_ptr = get_Load_ptr(load);
928         kill_node(load);
929         reduce_adr_usage(load_ptr);
930         return res | DF_CHANGED;
931 }
932
933 /**
934  * Follow the memory chain as long as there are only Loads,
935  * alias free Stores, and constant Calls and try to replace the
936  * current Load by a previous ones.
937  * Note that in unreachable loops it might happen that we reach
938  * load again, as well as we can fall into a cycle.
939  * We break such cycles using a special visited flag.
940  *
941  * INC_MASTER() must be called before dive into
942  */
943 static unsigned follow_Mem_chain(ir_node *load, ir_node *curr)
944 {
945         unsigned    res = 0;
946         ldst_info_t *info = (ldst_info_t*)get_irn_link(load);
947         ir_node     *pred;
948         ir_node     *ptr       = get_Load_ptr(load);
949         ir_node     *mem       = get_Load_mem(load);
950         ir_mode     *load_mode = get_Load_mode(load);
951
952         for (pred = curr; load != pred; ) {
953                 ldst_info_t *pred_info = (ldst_info_t*)get_irn_link(pred);
954
955                 /*
956                  * a Load immediately after a Store -- a read after write.
957                  * We may remove the Load, if both Load & Store does not have an
958                  * exception handler OR they are in the same Block. In the latter
959                  * case the Load cannot throw an exception when the previous Store was
960                  * quiet.
961                  *
962                  * Why we need to check for Store Exception? If the Store cannot
963                  * be executed (ROM) the exception handler might simply jump into
964                  * the load Block :-(
965                  * We could make it a little bit better if we would know that the
966                  * exception handler of the Store jumps directly to the end...
967                  */
968                 if (is_Store(pred) && ((pred_info->projs[pn_Store_X_except] == NULL
969                                 && info->projs[pn_Load_X_except] == NULL)
970                                 || get_nodes_block(load) == get_nodes_block(pred)))
971                 {
972                         long    load_offset;
973                         ir_node *base_ptr = get_base_and_offset(ptr, &load_offset);
974                         int     changes   = try_load_after_store(load, base_ptr, load_offset, pred);
975
976                         if (changes != 0)
977                                 return res | changes;
978                 } else if (is_Load(pred) && get_Load_ptr(pred) == ptr &&
979                            can_use_stored_value(get_Load_mode(pred), load_mode)) {
980                         /*
981                          * a Load after a Load -- a read after read.
982                          * We may remove the second Load, if it does not have an exception
983                          * handler OR they are in the same Block. In the later case
984                          * the Load cannot throw an exception when the previous Load was
985                          * quiet.
986                          *
987                          * Here, there is no need to check if the previous Load has an
988                          * exception hander because they would have exact the same
989                          * exception...
990                          *
991                          * TODO: implement load-after-load with different mode for big
992                          *       endian
993                          */
994                         if (info->projs[pn_Load_X_except] == NULL
995                                         || get_nodes_block(load) == get_nodes_block(pred)) {
996                                 ir_node *value;
997
998                                 DBG_OPT_RAR(load, pred);
999
1000                                 /* the result is used */
1001                                 if (info->projs[pn_Load_res]) {
1002                                         if (pred_info->projs[pn_Load_res] == NULL) {
1003                                                 /* create a new Proj again */
1004                                                 pred_info->projs[pn_Load_res] = new_r_Proj(pred, get_Load_mode(pred), pn_Load_res);
1005                                         }
1006                                         value = pred_info->projs[pn_Load_res];
1007
1008                                         /* add an convert if needed */
1009                                         if (get_Load_mode(pred) != load_mode) {
1010                                                 value = new_r_Conv(get_nodes_block(load), value, load_mode);
1011                                         }
1012
1013                                         exchange(info->projs[pn_Load_res], value);
1014                                 }
1015
1016                                 if (info->projs[pn_Load_M])
1017                                         exchange(info->projs[pn_Load_M], mem);
1018
1019                                 /* no exception */
1020                                 if (info->projs[pn_Load_X_except]) {
1021                                         ir_graph *irg = get_irn_irg(load);
1022                                         exchange(info->projs[pn_Load_X_except], new_r_Bad(irg, mode_X));
1023                                         res |= CF_CHANGED;
1024                                 }
1025                                 if (info->projs[pn_Load_X_regular]) {
1026                                         exchange( info->projs[pn_Load_X_regular], new_r_Jmp(get_nodes_block(load)));
1027                                         res |= CF_CHANGED;
1028                                 }
1029
1030                                 kill_node(load);
1031                                 reduce_adr_usage(ptr);
1032                                 return res |= DF_CHANGED;
1033                         }
1034                 }
1035
1036                 if (is_Store(pred)) {
1037                         /* check if we can pass through this store */
1038                         ir_alias_relation rel = get_alias_relation(
1039                                 get_Store_ptr(pred),
1040                                 get_irn_mode(get_Store_value(pred)),
1041                                 ptr, load_mode);
1042                         /* if the might be an alias, we cannot pass this Store */
1043                         if (rel != ir_no_alias)
1044                                 break;
1045                         pred = skip_Proj(get_Store_mem(pred));
1046                 } else if (is_Load(pred)) {
1047                         pred = skip_Proj(get_Load_mem(pred));
1048                 } else if (is_Call(pred)) {
1049                         if (is_Call_pure(pred)) {
1050                                 /* The called graph is at least pure, so there are no Store's
1051                                    in it. We can handle it like a Load and skip it. */
1052                                 pred = skip_Proj(get_Call_mem(pred));
1053                         } else {
1054                                 /* there might be Store's in the graph, stop here */
1055                                 break;
1056                         }
1057                 } else {
1058                         /* follow only Load chains */
1059                         break;
1060                 }
1061
1062                 /* check for cycles */
1063                 if (NODE_VISITED(pred_info))
1064                         break;
1065                 MARK_NODE(pred_info);
1066         }
1067
1068         if (is_Sync(pred)) {
1069                 int i;
1070
1071                 /* handle all Sync predecessors */
1072                 for (i = get_Sync_n_preds(pred) - 1; i >= 0; --i) {
1073                         res |= follow_Mem_chain(load, skip_Proj(get_Sync_pred(pred, i)));
1074                         if (res)
1075                                 return res;
1076                 }
1077         }
1078
1079         return res;
1080 }  /* follow_Mem_chain */
1081
1082 /*
1083  * Check if we can replace the load by a given const from
1084  * the const code irg.
1085  */
1086 ir_node *can_replace_load_by_const(const ir_node *load, ir_node *c)
1087 {
1088         ir_mode  *c_mode = get_irn_mode(c);
1089         ir_mode  *l_mode = get_Load_mode(load);
1090         ir_node  *block  = get_nodes_block(load);
1091         dbg_info *dbgi   = get_irn_dbg_info(load);
1092         ir_node  *res    = copy_const_value(dbgi, c, block);
1093
1094         if (c_mode != l_mode) {
1095                 /* check, if the mode matches OR can be easily converted info */
1096                 if (is_reinterpret_cast(c_mode, l_mode)) {
1097                         /* copy the value from the const code irg and cast it */
1098                         res = new_rd_Conv(dbgi, block, res, l_mode);
1099                 } else {
1100                         return NULL;
1101                 }
1102         }
1103         return res;
1104 }
1105
1106 /**
1107  * optimize a Load
1108  *
1109  * @param load  the Load node
1110  */
1111 static unsigned optimize_load(ir_node *load)
1112 {
1113         ldst_info_t *info = (ldst_info_t*)get_irn_link(load);
1114         ir_node     *mem, *ptr, *value;
1115         ir_entity   *ent;
1116         long        dummy;
1117         unsigned    res = 0;
1118
1119         /* do NOT touch volatile loads for now */
1120         if (get_Load_volatility(load) == volatility_is_volatile)
1121                 return 0;
1122
1123         /* the address of the load to be optimized */
1124         ptr = get_Load_ptr(load);
1125
1126         /* The mem of the Load. Must still be returned after optimization. */
1127         mem = get_Load_mem(load);
1128
1129         if (info->projs[pn_Load_res] == NULL
1130                         && info->projs[pn_Load_X_except] == NULL) {
1131                 /* the value is never used and we don't care about exceptions, remove */
1132                 exchange(info->projs[pn_Load_M], mem);
1133
1134                 if (info->projs[pn_Load_X_regular]) {
1135                         /* should not happen, but if it does, remove it */
1136                         exchange(info->projs[pn_Load_X_regular], new_r_Jmp(get_nodes_block(load)));
1137                         res |= CF_CHANGED;
1138                 }
1139                 kill_node(load);
1140                 reduce_adr_usage(ptr);
1141                 return res | DF_CHANGED;
1142         }
1143
1144         value = NULL;
1145         /* check if we can determine the entity that will be loaded */
1146         ent = find_constant_entity(ptr);
1147         if (ent != NULL
1148                         && get_entity_visibility(ent) != ir_visibility_external) {
1149                 /* a static allocation that is not external: there should be NO
1150                  * exception when loading even if we cannot replace the load itself.
1151                  */
1152
1153                 /* no exception, clear the info field as it might be checked later again */
1154                 if (info->projs[pn_Load_X_except]) {
1155                         ir_graph *irg = get_irn_irg(load);
1156                         exchange(info->projs[pn_Load_X_except], new_r_Bad(irg, mode_X));
1157                         info->projs[pn_Load_X_except] = NULL;
1158                         res |= CF_CHANGED;
1159                 }
1160                 if (info->projs[pn_Load_X_regular]) {
1161                         exchange(info->projs[pn_Load_X_regular], new_r_Jmp(get_nodes_block(load)));
1162                         info->projs[pn_Load_X_regular] = NULL;
1163                         res |= CF_CHANGED;
1164                 }
1165
1166                 if (get_entity_linkage(ent) & IR_LINKAGE_CONSTANT) {
1167                         if (has_entity_initializer(ent)) {
1168                                 /* new style initializer */
1169                                 value = find_compound_ent_value(ptr);
1170                         } else if (entity_has_compound_ent_values(ent)) {
1171                                 /* old style initializer */
1172                                 compound_graph_path *path = get_accessed_path(ptr);
1173
1174                                 if (path != NULL) {
1175                                         assert(is_proper_compound_graph_path(path, get_compound_graph_path_length(path)-1));
1176
1177                                         value = get_compound_ent_value_by_path(ent, path);
1178                                         DB((dbg, LEVEL_1, "  Constant access at %F%F resulted in %+F\n", ent, path, value));
1179                                         free_compound_graph_path(path);
1180                                 }
1181                         }
1182                         if (value != NULL) {
1183                                 ir_graph *irg = get_irn_irg(load);
1184                                 value = can_replace_load_by_const(load, value);
1185                                 if (value != NULL && is_Sel(ptr) &&
1186                                                 !is_irg_state(irg, IR_GRAPH_STATE_IMPLICIT_BITFIELD_MASKING)) {
1187                                         /* frontend has inserted masking operations after bitfield accesses,
1188                                          * so we might have to shift the const. */
1189                                         unsigned char bit_offset = get_entity_offset_bits_remainder(get_Sel_entity(ptr));
1190                                         ir_tarval *tv_old = get_Const_tarval(value);
1191                                         ir_tarval *tv_offset = new_tarval_from_long(bit_offset, mode_Bu);
1192                                         ir_tarval *tv_new = tarval_shl(tv_old, tv_offset);
1193                                         value = new_r_Const(irg, tv_new);
1194                                 }
1195                         }
1196                 }
1197         }
1198         if (value != NULL) {
1199                 /* we completely replace the load by this value */
1200                 if (info->projs[pn_Load_X_except]) {
1201                         ir_graph *irg = get_irn_irg(load);
1202                         exchange(info->projs[pn_Load_X_except], new_r_Bad(irg, mode_X));
1203                         info->projs[pn_Load_X_except] = NULL;
1204                         res |= CF_CHANGED;
1205                 }
1206                 if (info->projs[pn_Load_X_regular]) {
1207                         exchange(info->projs[pn_Load_X_regular], new_r_Jmp(get_nodes_block(load)));
1208                         info->projs[pn_Load_X_regular] = NULL;
1209                         res |= CF_CHANGED;
1210                 }
1211                 if (info->projs[pn_Load_M]) {
1212                         exchange(info->projs[pn_Load_M], mem);
1213                         res |= DF_CHANGED;
1214                 }
1215                 if (info->projs[pn_Load_res]) {
1216                         exchange(info->projs[pn_Load_res], value);
1217                         res |= DF_CHANGED;
1218                 }
1219                 kill_node(load);
1220                 reduce_adr_usage(ptr);
1221                 return res;
1222         }
1223
1224         /* Check, if the address of this load is used more than once.
1225          * If not, more load cannot be removed in any case. */
1226         if (get_irn_n_uses(ptr) <= 1 && get_irn_n_uses(get_base_and_offset(ptr, &dummy)) <= 1)
1227                 return res;
1228
1229         /*
1230          * follow the memory chain as long as there are only Loads
1231          * and try to replace current Load or Store by a previous one.
1232          * Note that in unreachable loops it might happen that we reach
1233          * load again, as well as we can fall into a cycle.
1234          * We break such cycles using a special visited flag.
1235          */
1236         INC_MASTER();
1237         res = follow_Mem_chain(load, skip_Proj(mem));
1238         return res;
1239 }  /* optimize_load */
1240
1241 /**
1242  * Check whether a value of mode new_mode would completely overwrite a value
1243  * of mode old_mode in memory.
1244  */
1245 static int is_completely_overwritten(ir_mode *old_mode, ir_mode *new_mode)
1246 {
1247         return get_mode_size_bits(new_mode) >= get_mode_size_bits(old_mode);
1248 }  /* is_completely_overwritten */
1249
1250 /**
1251  * Check whether small is a part of large (starting at same address).
1252  */
1253 static int is_partially_same(ir_node *small, ir_node *large)
1254 {
1255         ir_mode *sm = get_irn_mode(small);
1256         ir_mode *lm = get_irn_mode(large);
1257
1258         /* FIXME: Check endianness */
1259         return is_Conv(small) && get_Conv_op(small) == large
1260             && get_mode_size_bytes(sm) < get_mode_size_bytes(lm)
1261             && get_mode_arithmetic(sm) == irma_twos_complement
1262             && get_mode_arithmetic(lm) == irma_twos_complement;
1263 }  /* is_partially_same */
1264
1265 /**
1266  * follow the memory chain as long as there are only Loads and alias free Stores.
1267  *
1268  * INC_MASTER() must be called before dive into
1269  */
1270 static unsigned follow_Mem_chain_for_Store(ir_node *store, ir_node *curr)
1271 {
1272         unsigned res = 0;
1273         ldst_info_t *info = (ldst_info_t*)get_irn_link(store);
1274         ir_node *pred;
1275         ir_node *ptr = get_Store_ptr(store);
1276         ir_node *mem = get_Store_mem(store);
1277         ir_node *value = get_Store_value(store);
1278         ir_mode *mode  = get_irn_mode(value);
1279         ir_node *block = get_nodes_block(store);
1280
1281         for (pred = curr; pred != store;) {
1282                 ldst_info_t *pred_info = (ldst_info_t*)get_irn_link(pred);
1283
1284                 /*
1285                  * BEWARE: one might think that checking the modes is useless, because
1286                  * if the pointers are identical, they refer to the same object.
1287                  * This is only true in strong typed languages, not is C were the following
1288                  * is possible *(ir_type1 *)p = a; *(ir_type2 *)p = b ...
1289                  * However, if the size of the mode that is written is bigger or equal the
1290                  * size of the old one, the old value is completely overwritten and can be
1291                  * killed ...
1292                  */
1293                 if (is_Store(pred) && get_Store_ptr(pred) == ptr &&
1294             get_nodes_block(pred) == block) {
1295                         /*
1296                          * a Store after a Store in the same Block -- a write after write.
1297                          */
1298
1299                         /*
1300                          * We may remove the first Store, if the old value is completely
1301                          * overwritten or the old value is a part of the new value,
1302                          * and if it does not have an exception handler.
1303                          *
1304                          * TODO: What, if both have the same exception handler ???
1305                          */
1306                         if (get_Store_volatility(pred) != volatility_is_volatile
1307                                 && !pred_info->projs[pn_Store_X_except]) {
1308                                 ir_node *predvalue = get_Store_value(pred);
1309                                 ir_mode *predmode  = get_irn_mode(predvalue);
1310
1311                                 if (is_completely_overwritten(predmode, mode)
1312                                         || is_partially_same(predvalue, value)) {
1313                                         DBG_OPT_WAW(pred, store);
1314                                         exchange(pred_info->projs[pn_Store_M], get_Store_mem(pred));
1315                                         kill_node(pred);
1316                                         reduce_adr_usage(ptr);
1317                                         return DF_CHANGED;
1318                                 }
1319                         }
1320
1321                         /*
1322                          * We may remove the Store, if the old value already contains
1323                          * the new value, and if it does not have an exception handler.
1324                          *
1325                          * TODO: What, if both have the same exception handler ???
1326                          */
1327                         if (get_Store_volatility(store) != volatility_is_volatile
1328                                 && !info->projs[pn_Store_X_except]) {
1329                                 ir_node *predvalue = get_Store_value(pred);
1330
1331                                 if (is_partially_same(value, predvalue)) {
1332                                         DBG_OPT_WAW(pred, store);
1333                                         exchange(info->projs[pn_Store_M], mem);
1334                                         kill_node(store);
1335                                         reduce_adr_usage(ptr);
1336                                         return DF_CHANGED;
1337                                 }
1338                         }
1339                 } else if (is_Load(pred) && get_Load_ptr(pred) == ptr &&
1340                            value == pred_info->projs[pn_Load_res]) {
1341                         /*
1342                          * a Store of a value just loaded from the same address
1343                          * -- a write after read.
1344                          * We may remove the Store, if it does not have an exception
1345                          * handler.
1346                          */
1347                         if (! info->projs[pn_Store_X_except]) {
1348                                 DBG_OPT_WAR(store, pred);
1349                                 exchange(info->projs[pn_Store_M], mem);
1350                                 kill_node(store);
1351                                 reduce_adr_usage(ptr);
1352                                 return DF_CHANGED;
1353                         }
1354                 }
1355
1356                 if (is_Store(pred)) {
1357                         /* check if we can pass through this store */
1358                         ir_alias_relation rel = get_alias_relation(
1359                                 get_Store_ptr(pred),
1360                                 get_irn_mode(get_Store_value(pred)),
1361                                 ptr, mode);
1362                         /* if the might be an alias, we cannot pass this Store */
1363                         if (rel != ir_no_alias)
1364                                 break;
1365                         pred = skip_Proj(get_Store_mem(pred));
1366                 } else if (is_Load(pred)) {
1367                         ir_alias_relation rel = get_alias_relation(
1368                                 get_Load_ptr(pred), get_Load_mode(pred),
1369                                 ptr, mode);
1370                         if (rel != ir_no_alias)
1371                                 break;
1372
1373                         pred = skip_Proj(get_Load_mem(pred));
1374                 } else {
1375                         /* follow only Load chains */
1376                         break;
1377                 }
1378
1379                 /* check for cycles */
1380                 if (NODE_VISITED(pred_info))
1381                         break;
1382                 MARK_NODE(pred_info);
1383         }
1384
1385         if (is_Sync(pred)) {
1386                 int i;
1387
1388                 /* handle all Sync predecessors */
1389                 for (i = get_Sync_n_preds(pred) - 1; i >= 0; --i) {
1390                         res |= follow_Mem_chain_for_Store(store, skip_Proj(get_Sync_pred(pred, i)));
1391                         if (res)
1392                                 break;
1393                 }
1394         }
1395         return res;
1396 }  /* follow_Mem_chain_for_Store */
1397
1398 /** find entity used as base for an address calculation */
1399 static ir_entity *find_entity(ir_node *ptr)
1400 {
1401         switch (get_irn_opcode(ptr)) {
1402         case iro_SymConst:
1403                 return get_SymConst_entity(ptr);
1404         case iro_Sel: {
1405                 ir_node *pred = get_Sel_ptr(ptr);
1406                 if (get_irg_frame(get_irn_irg(ptr)) == pred)
1407                         return get_Sel_entity(ptr);
1408
1409                 return find_entity(pred);
1410         }
1411         case iro_Sub:
1412         case iro_Add: {
1413                 ir_node *left = get_binop_left(ptr);
1414                 ir_node *right;
1415                 if (mode_is_reference(get_irn_mode(left)))
1416                         return find_entity(left);
1417                 right = get_binop_right(ptr);
1418                 if (mode_is_reference(get_irn_mode(right)))
1419                         return find_entity(right);
1420                 return NULL;
1421         }
1422         default:
1423                 return NULL;
1424         }
1425 }
1426
1427 /**
1428  * optimize a Store
1429  *
1430  * @param store  the Store node
1431  */
1432 static unsigned optimize_store(ir_node *store)
1433 {
1434         ir_node   *ptr;
1435         ir_node   *mem;
1436         ir_entity *entity;
1437
1438         if (get_Store_volatility(store) == volatility_is_volatile)
1439                 return 0;
1440
1441         ptr    = get_Store_ptr(store);
1442         entity = find_entity(ptr);
1443
1444         /* a store to an entity which is never read is unnecessary */
1445         if (entity != NULL && !(get_entity_usage(entity) & ir_usage_read)) {
1446                 ldst_info_t *info = (ldst_info_t*)get_irn_link(store);
1447                 if (info->projs[pn_Store_X_except] == NULL) {
1448                         DB((dbg, LEVEL_1, "  Killing useless %+F to never read entity %+F\n", store, entity));
1449                         exchange(info->projs[pn_Store_M], get_Store_mem(store));
1450                         kill_node(store);
1451                         reduce_adr_usage(ptr);
1452                         return DF_CHANGED;
1453                 }
1454         }
1455
1456         /* Check, if the address of this Store is used more than once.
1457          * If not, this Store cannot be removed in any case. */
1458         if (get_irn_n_uses(ptr) <= 1)
1459                 return 0;
1460
1461         mem = get_Store_mem(store);
1462
1463         /* follow the memory chain as long as there are only Loads */
1464         INC_MASTER();
1465
1466         return follow_Mem_chain_for_Store(store, skip_Proj(mem));
1467 }  /* optimize_store */
1468
1469 /**
1470  * walker, optimizes Phi after Stores to identical places:
1471  * Does the following optimization:
1472  * @verbatim
1473  *
1474  *   val1   val2   val3          val1  val2  val3
1475  *    |      |      |               \    |    /
1476  *  Store  Store  Store              \   |   /
1477  *      \    |    /                   PhiData
1478  *       \   |   /                       |
1479  *        \  |  /                      Store
1480  *          PhiM
1481  *
1482  * @endverbatim
1483  * This reduces the number of stores and allows for predicated execution.
1484  * Moves Stores back to the end of a function which may be bad.
1485  *
1486  * This is only possible if the predecessor blocks have only one successor.
1487  */
1488 static unsigned optimize_phi(ir_node *phi, walk_env_t *wenv)
1489 {
1490         int i, n;
1491         ir_node *store, *ptr, *block, *phi_block, *phiM, *phiD, *exc, *projM;
1492 #ifdef DO_CACHEOPT
1493         ir_node *old_store;
1494 #endif
1495         ir_mode *mode;
1496         ir_node **inM, **inD, **projMs;
1497         int *idx;
1498         dbg_info *db = NULL;
1499         ldst_info_t *info;
1500         block_info_t *bl_info;
1501         unsigned res = 0;
1502
1503         /* Must be a memory Phi */
1504         if (get_irn_mode(phi) != mode_M)
1505                 return 0;
1506
1507         n = get_Phi_n_preds(phi);
1508         if (n <= 0)
1509                 return 0;
1510
1511         /* must be only one user */
1512         projM = get_Phi_pred(phi, 0);
1513         if (get_irn_n_edges(projM) != 1)
1514                 return 0;
1515
1516         store = skip_Proj(projM);
1517 #ifdef DO_CACHEOPT
1518         old_store = store;
1519 #endif
1520         if (!is_Store(store))
1521                 return 0;
1522
1523         block = get_nodes_block(store);
1524
1525         /* check if the block is post dominated by Phi-block
1526            and has no exception exit */
1527         bl_info = (block_info_t*)get_irn_link(block);
1528         if (bl_info->flags & BLOCK_HAS_EXC)
1529                 return 0;
1530
1531         phi_block = get_nodes_block(phi);
1532         if (! block_strictly_postdominates(phi_block, block))
1533                 return 0;
1534
1535         /* this is the address of the store */
1536         ptr  = get_Store_ptr(store);
1537         mode = get_irn_mode(get_Store_value(store));
1538         info = (ldst_info_t*)get_irn_link(store);
1539         exc  = info->exc_block;
1540
1541         for (i = 1; i < n; ++i) {
1542                 ir_node *pred = get_Phi_pred(phi, i);
1543
1544                 if (get_irn_n_edges(pred) != 1)
1545                         return 0;
1546
1547                 pred = skip_Proj(pred);
1548                 if (!is_Store(pred))
1549                         return 0;
1550
1551                 if (ptr != get_Store_ptr(pred) || mode != get_irn_mode(get_Store_value(pred)))
1552                         return 0;
1553
1554                 info = (ldst_info_t*)get_irn_link(pred);
1555
1556                 /* check, if all stores have the same exception flow */
1557                 if (exc != info->exc_block)
1558                         return 0;
1559
1560                 block = get_nodes_block(pred);
1561
1562                 /* check if the block is post dominated by Phi-block
1563                    and has no exception exit. Note that block must be different from
1564                    Phi-block, else we would move a Store from end End of a block to its
1565                    Start... */
1566                 bl_info = (block_info_t*)get_irn_link(block);
1567                 if (bl_info->flags & BLOCK_HAS_EXC)
1568                         return 0;
1569                 if (block == phi_block || ! block_postdominates(phi_block, block))
1570                         return 0;
1571         }
1572
1573         /*
1574          * ok, when we are here, we found all predecessors of a Phi that
1575          * are Stores to the same address and size. That means whatever
1576          * we do before we enter the block of the Phi, we do a Store.
1577          * So, we can move the Store to the current block:
1578          *
1579          *   val1    val2    val3          val1  val2  val3
1580          *    |       |       |               \    |    /
1581          * | Str | | Str | | Str |             \   |   /
1582          *      \     |     /                   PhiData
1583          *       \    |    /                       |
1584          *        \   |   /                       Str
1585          *           PhiM
1586          *
1587          * Is only allowed if the predecessor blocks have only one successor.
1588          */
1589
1590         NEW_ARR_A(ir_node *, projMs, n);
1591         NEW_ARR_A(ir_node *, inM, n);
1592         NEW_ARR_A(ir_node *, inD, n);
1593         NEW_ARR_A(int, idx, n);
1594
1595         /* Prepare: Collect all Store nodes.  We must do this
1596            first because we otherwise may loose a store when exchanging its
1597            memory Proj.
1598          */
1599         for (i = n - 1; i >= 0; --i) {
1600                 ir_node *store;
1601
1602                 projMs[i] = get_Phi_pred(phi, i);
1603                 assert(is_Proj(projMs[i]));
1604
1605                 store = get_Proj_pred(projMs[i]);
1606                 info  = (ldst_info_t*)get_irn_link(store);
1607
1608                 inM[i] = get_Store_mem(store);
1609                 inD[i] = get_Store_value(store);
1610                 idx[i] = info->exc_idx;
1611         }
1612         block = get_nodes_block(phi);
1613
1614         /* second step: create a new memory Phi */
1615         phiM = new_rd_Phi(get_irn_dbg_info(phi), block, n, inM, mode_M);
1616
1617         /* third step: create a new data Phi */
1618         phiD = new_rd_Phi(get_irn_dbg_info(phi), block, n, inD, mode);
1619
1620         /* rewire memory and kill the node */
1621         for (i = n - 1; i >= 0; --i) {
1622                 ir_node *proj  = projMs[i];
1623
1624                 if (is_Proj(proj)) {
1625                         ir_node *store = get_Proj_pred(proj);
1626                         exchange(proj, inM[i]);
1627                         kill_node(store);
1628                 }
1629         }
1630
1631         /* fourth step: create the Store */
1632         store = new_rd_Store(db, block, phiM, ptr, phiD, cons_none);
1633 #ifdef DO_CACHEOPT
1634         co_set_irn_name(store, co_get_irn_ident(old_store));
1635 #endif
1636
1637         projM = new_rd_Proj(NULL, store, mode_M, pn_Store_M);
1638
1639         info = get_ldst_info(store, &wenv->obst);
1640         info->projs[pn_Store_M] = projM;
1641
1642         /* fifths step: repair exception flow */
1643         if (exc) {
1644                 ir_node *projX = new_rd_Proj(NULL, store, mode_X, pn_Store_X_except);
1645
1646                 info->projs[pn_Store_X_except] = projX;
1647                 info->exc_block                = exc;
1648                 info->exc_idx                  = idx[0];
1649
1650                 for (i = 0; i < n; ++i) {
1651                         set_Block_cfgpred(exc, idx[i], projX);
1652                 }
1653
1654                 if (n > 1) {
1655                         /* the exception block should be optimized as some inputs are identical now */
1656                 }
1657
1658                 res |= CF_CHANGED;
1659         }
1660
1661         /* sixth step: replace old Phi */
1662         exchange(phi, projM);
1663
1664         return res | DF_CHANGED;
1665 }  /* optimize_phi */
1666
1667 /**
1668  * walker, do the optimizations
1669  */
1670 static void do_load_store_optimize(ir_node *n, void *env)
1671 {
1672         walk_env_t *wenv = (walk_env_t*)env;
1673
1674         switch (get_irn_opcode(n)) {
1675
1676         case iro_Load:
1677                 wenv->changes |= optimize_load(n);
1678                 break;
1679
1680         case iro_Store:
1681                 wenv->changes |= optimize_store(n);
1682                 break;
1683
1684         case iro_Phi:
1685                 wenv->changes |= optimize_phi(n, wenv);
1686                 break;
1687
1688         default:
1689                 break;
1690         }
1691 }  /* do_load_store_optimize */
1692
1693 /** A scc. */
1694 typedef struct scc {
1695         ir_node *head;      /**< the head of the list */
1696 } scc;
1697
1698 /** A node entry. */
1699 typedef struct node_entry {
1700         unsigned DFSnum;    /**< the DFS number of this node */
1701         unsigned low;       /**< the low number of this node */
1702         int      in_stack;  /**< flag, set if the node is on the stack */
1703         ir_node  *next;     /**< link to the next node the the same scc */
1704         scc      *pscc;     /**< the scc of this node */
1705         unsigned POnum;     /**< the post order number for blocks */
1706 } node_entry;
1707
1708 /** A loop entry. */
1709 typedef struct loop_env {
1710         ir_nodehashmap_t map;
1711         struct obstack   obst;
1712         ir_node          **stack;      /**< the node stack */
1713         size_t           tos;          /**< tos index */
1714         unsigned         nextDFSnum;   /**< the current DFS number */
1715         unsigned         POnum;        /**< current post order number */
1716
1717         unsigned         changes;      /**< a bitmask of graph changes */
1718 } loop_env;
1719
1720 /**
1721 * Gets the node_entry of a node
1722 */
1723 static node_entry *get_irn_ne(ir_node *irn, loop_env *env)
1724 {
1725         node_entry *e = (node_entry*)ir_nodehashmap_get(&env->map, irn);
1726
1727         if (e == NULL) {
1728                 e = OALLOC(&env->obst, node_entry);
1729                 memset(e, 0, sizeof(*e));
1730                 ir_nodehashmap_insert(&env->map, irn, e);
1731         }
1732         return e;
1733 }  /* get_irn_ne */
1734
1735 /**
1736  * Push a node onto the stack.
1737  *
1738  * @param env   the loop environment
1739  * @param n     the node to push
1740  */
1741 static void push(loop_env *env, ir_node *n)
1742 {
1743         node_entry *e;
1744
1745         if (env->tos == ARR_LEN(env->stack)) {
1746                 size_t nlen = ARR_LEN(env->stack) * 2;
1747                 ARR_RESIZE(ir_node *, env->stack, nlen);
1748         }
1749         env->stack[env->tos++] = n;
1750         e = get_irn_ne(n, env);
1751         e->in_stack = 1;
1752 }  /* push */
1753
1754 /**
1755  * pop a node from the stack
1756  *
1757  * @param env   the loop environment
1758  *
1759  * @return  The topmost node
1760  */
1761 static ir_node *pop(loop_env *env)
1762 {
1763         ir_node *n = env->stack[--env->tos];
1764         node_entry *e = get_irn_ne(n, env);
1765
1766         e->in_stack = 0;
1767         return n;
1768 }  /* pop */
1769
1770 /**
1771  * Check if irn is a region constant.
1772  * The block or irn must strictly dominate the header block.
1773  *
1774  * @param irn           the node to check
1775  * @param header_block  the header block of the induction variable
1776  */
1777 static int is_rc(ir_node *irn, ir_node *header_block)
1778 {
1779         ir_node *block = get_nodes_block(irn);
1780
1781         return (block != header_block) && block_dominates(block, header_block);
1782 }  /* is_rc */
1783
1784 typedef struct phi_entry phi_entry;
1785 struct phi_entry {
1786         ir_node   *phi;    /**< A phi with a region const memory. */
1787         int       pos;     /**< The position of the region const memory */
1788         ir_node   *load;   /**< the newly created load for this phi */
1789         phi_entry *next;
1790 };
1791
1792 /**
1793  * An entry in the avail set.
1794  */
1795 typedef struct avail_entry_t {
1796         ir_node *ptr;   /**< the address pointer */
1797         ir_mode *mode;  /**< the load mode */
1798         ir_node *load;  /**< the associated Load */
1799 } avail_entry_t;
1800
1801 /**
1802  * Compare two avail entries.
1803  */
1804 static int cmp_avail_entry(const void *elt, const void *key, size_t size)
1805 {
1806         const avail_entry_t *a = (const avail_entry_t*)elt;
1807         const avail_entry_t *b = (const avail_entry_t*)key;
1808         (void) size;
1809
1810         return a->ptr != b->ptr || a->mode != b->mode;
1811 }  /* cmp_avail_entry */
1812
1813 /**
1814  * Calculate the hash value of an avail entry.
1815  */
1816 static unsigned hash_cache_entry(const avail_entry_t *entry)
1817 {
1818         return get_irn_idx(entry->ptr) * 9 + HASH_PTR(entry->mode);
1819 }  /* hash_cache_entry */
1820
1821 /**
1822  * Move loops out of loops if possible.
1823  *
1824  * @param pscc   the loop described by an SCC
1825  * @param env    the loop environment
1826  */
1827 static void move_loads_out_of_loops(scc *pscc, loop_env *env)
1828 {
1829         ir_node   *phi, *load, *next, *other, *next_other;
1830         int       j;
1831         phi_entry *phi_list = NULL;
1832         set       *avail;
1833
1834         avail = new_set(cmp_avail_entry, 8);
1835
1836         /* collect all outer memories */
1837         for (phi = pscc->head; phi != NULL; phi = next) {
1838                 node_entry *ne = get_irn_ne(phi, env);
1839                 next = ne->next;
1840
1841                 /* check all memory Phi's */
1842                 if (! is_Phi(phi))
1843                         continue;
1844
1845                 assert(get_irn_mode(phi) == mode_M && "DFS return non-memory Phi");
1846
1847                 for (j = get_irn_arity(phi) - 1; j >= 0; --j) {
1848                         ir_node    *pred = get_irn_n(phi, j);
1849                         node_entry *pe   = get_irn_ne(pred, env);
1850
1851                         if (pe->pscc != ne->pscc) {
1852                                 /* not in the same SCC, is region const */
1853                                 phi_entry *pe = OALLOC(&env->obst, phi_entry);
1854
1855                                 pe->phi  = phi;
1856                                 pe->pos  = j;
1857                                 pe->next = phi_list;
1858                                 phi_list = pe;
1859                         }
1860                 }
1861         }
1862         /* no Phis no fun */
1863         assert(phi_list != NULL && "DFS found a loop without Phi");
1864
1865         /* for now, we cannot handle more than one input (only reducible cf) */
1866         if (phi_list->next != NULL)
1867                 return;
1868
1869         for (load = pscc->head; load; load = next) {
1870                 ir_mode *load_mode;
1871                 node_entry *ne = get_irn_ne(load, env);
1872                 next = ne->next;
1873
1874                 if (is_Load(load)) {
1875                         ldst_info_t *info = (ldst_info_t*)get_irn_link(load);
1876                         ir_node     *ptr = get_Load_ptr(load);
1877
1878                         /* for now, we cannot handle Loads with exceptions */
1879                         if (info->projs[pn_Load_res] == NULL || info->projs[pn_Load_X_regular] != NULL || info->projs[pn_Load_X_except] != NULL)
1880                                 continue;
1881
1882                         /* for now, we can only move Load(Global) */
1883                         if (! is_SymConst_addr_ent(ptr))
1884                                 continue;
1885                         load_mode = get_Load_mode(load);
1886                         for (other = pscc->head; other != NULL; other = next_other) {
1887                                 node_entry *ne = get_irn_ne(other, env);
1888                                 next_other = ne->next;
1889
1890                                 if (is_Store(other)) {
1891                                         ir_alias_relation rel = get_alias_relation(
1892                                                 get_Store_ptr(other),
1893                                                 get_irn_mode(get_Store_value(other)),
1894                                                 ptr, load_mode);
1895                                         /* if the might be an alias, we cannot pass this Store */
1896                                         if (rel != ir_no_alias)
1897                                                 break;
1898                                 }
1899                                 /* only Phis and pure Calls are allowed here, so ignore them */
1900                         }
1901                         if (other == NULL) {
1902                                 ldst_info_t *ninfo = NULL;
1903                                 phi_entry   *pe;
1904                                 dbg_info    *db;
1905
1906                                 /* yep, no aliasing Store found, Load can be moved */
1907                                 DB((dbg, LEVEL_1, "  Found a Load that could be moved: %+F\n", load));
1908
1909                                 db   = get_irn_dbg_info(load);
1910                                 for (pe = phi_list; pe != NULL; pe = pe->next) {
1911                                         int     pos   = pe->pos;
1912                                         ir_node *phi  = pe->phi;
1913                                         ir_node *blk  = get_nodes_block(phi);
1914                                         ir_node *pred = get_Block_cfgpred_block(blk, pos);
1915                                         ir_node *irn, *mem;
1916                                         avail_entry_t entry, *res;
1917
1918                                         entry.ptr  = ptr;
1919                                         entry.mode = load_mode;
1920                                         res = (avail_entry_t*)set_find(avail, &entry, sizeof(entry), hash_cache_entry(&entry));
1921                                         if (res != NULL) {
1922                                                 irn = res->load;
1923                                         } else {
1924                                                 irn = new_rd_Load(db, pred, get_Phi_pred(phi, pos), ptr, load_mode, cons_none);
1925                                                 entry.load = irn;
1926                                                 set_insert(avail, &entry, sizeof(entry), hash_cache_entry(&entry));
1927                                                 DB((dbg, LEVEL_1, "  Created %+F in %+F\n", irn, pred));
1928                                         }
1929                                         pe->load = irn;
1930                                         ninfo = get_ldst_info(irn, &env->obst);
1931
1932                                         ninfo->projs[pn_Load_M] = mem = new_r_Proj(irn, mode_M, pn_Load_M);
1933                                         if (res == NULL) {
1934                                                 /* irn is from cache, so do not set phi pred again.
1935                                                  * There might be other Loads between phi and irn already.
1936                                                  */
1937                                                 set_Phi_pred(phi, pos, mem);
1938                                         }
1939
1940                                         ninfo->projs[pn_Load_res] = new_r_Proj(irn, load_mode, pn_Load_res);
1941                                 }
1942
1943                                 /* now kill the old Load */
1944                                 exchange(info->projs[pn_Load_M], get_Load_mem(load));
1945                                 exchange(info->projs[pn_Load_res], ninfo->projs[pn_Load_res]);
1946
1947                                 env->changes |= DF_CHANGED;
1948                         }
1949                 }
1950         }
1951         del_set(avail);
1952 }  /* move_loads_out_of_loops */
1953
1954 /**
1955  * Process a loop SCC.
1956  *
1957  * @param pscc  the SCC
1958  * @param env   the loop environment
1959  */
1960 static void process_loop(scc *pscc, loop_env *env)
1961 {
1962         ir_node *irn, *next, *header = NULL;
1963         node_entry *b, *h = NULL;
1964         int j, only_phi, num_outside, process = 0;
1965         ir_node *out_rc;
1966
1967         /* find the header block for this scc */
1968         for (irn = pscc->head; irn; irn = next) {
1969                 node_entry *e = get_irn_ne(irn, env);
1970                 ir_node *block = get_nodes_block(irn);
1971
1972                 next = e->next;
1973                 b = get_irn_ne(block, env);
1974
1975                 if (header != NULL) {
1976                         if (h->POnum < b->POnum) {
1977                                 header = block;
1978                                 h      = b;
1979                         }
1980                 } else {
1981                         header = block;
1982                         h      = b;
1983                 }
1984         }
1985
1986         /* check if this scc contains only Phi, Loads or Stores nodes */
1987         only_phi    = 1;
1988         num_outside = 0;
1989         out_rc      = NULL;
1990         for (irn = pscc->head; irn; irn = next) {
1991                 node_entry *e = get_irn_ne(irn, env);
1992
1993                 next = e->next;
1994                 switch (get_irn_opcode(irn)) {
1995                 case iro_Call:
1996                         if (is_Call_pure(irn)) {
1997                                 /* pure calls can be treated like loads */
1998                                 only_phi = 0;
1999                                 break;
2000                         }
2001                         /* non-pure calls must be handle like may-alias Stores */
2002                         goto fail;
2003                 case iro_CopyB:
2004                         /* cannot handle CopyB yet */
2005                         goto fail;
2006                 case iro_Load:
2007                         process = 1;
2008                         if (get_Load_volatility(irn) == volatility_is_volatile) {
2009                                 /* cannot handle loops with volatile Loads */
2010                                 goto fail;
2011                         }
2012                         only_phi = 0;
2013                         break;
2014                 case iro_Store:
2015                         if (get_Store_volatility(irn) == volatility_is_volatile) {
2016                                 /* cannot handle loops with volatile Stores */
2017                                 goto fail;
2018                         }
2019                         only_phi = 0;
2020                         break;
2021                 default:
2022                         only_phi = 0;
2023                         break;
2024                 case iro_Phi:
2025                         for (j = get_irn_arity(irn) - 1; j >= 0; --j) {
2026                                 ir_node *pred  = get_irn_n(irn, j);
2027                                 node_entry *pe = get_irn_ne(pred, env);
2028
2029                                 if (pe->pscc != e->pscc) {
2030                                         /* not in the same SCC, must be a region const */
2031                                         if (! is_rc(pred, header)) {
2032                                                 /* not a memory loop */
2033                                                 goto fail;
2034                                         }
2035                                         if (out_rc == NULL) {
2036                                                 /* first region constant */
2037                                                 out_rc = pred;
2038                                                 ++num_outside;
2039                                         } else if (out_rc != pred) {
2040                                                 /* another region constant */
2041                                                 ++num_outside;
2042                                         }
2043                                 }
2044                         }
2045                         break;
2046                 }
2047         }
2048         if (! process)
2049                 goto fail;
2050
2051         /* found a memory loop */
2052         DB((dbg, LEVEL_2, "  Found a memory loop:\n  "));
2053         if (only_phi && num_outside == 1) {
2054                 /* a phi cycle with only one real predecessor can be collapsed */
2055                 DB((dbg, LEVEL_2, "  Found an USELESS Phi cycle:\n  "));
2056
2057                 for (irn = pscc->head; irn; irn = next) {
2058                         node_entry *e = get_irn_ne(irn, env);
2059                         next = e->next;
2060                         exchange(irn, out_rc);
2061                 }
2062                 env->changes |= DF_CHANGED;
2063                 return;
2064         }
2065
2066 #ifdef DEBUG_libfirm
2067         for (irn = pscc->head; irn; irn = next) {
2068                 node_entry *e = get_irn_ne(irn, env);
2069                 next = e->next;
2070                 DB((dbg, LEVEL_2, " %+F,", irn));
2071         }
2072         DB((dbg, LEVEL_2, "\n"));
2073 #endif
2074         move_loads_out_of_loops(pscc, env);
2075
2076 fail:
2077         ;
2078 }  /* process_loop */
2079
2080 /**
2081  * Process a SCC.
2082  *
2083  * @param pscc  the SCC
2084  * @param env   the loop environment
2085  */
2086 static void process_scc(scc *pscc, loop_env *env)
2087 {
2088         ir_node *head = pscc->head;
2089         node_entry *e = get_irn_ne(head, env);
2090
2091 #ifdef DEBUG_libfirm
2092         {
2093                 ir_node *irn, *next;
2094
2095                 DB((dbg, LEVEL_4, " SCC at %p:\n ", pscc));
2096                 for (irn = pscc->head; irn; irn = next) {
2097                         node_entry *e = get_irn_ne(irn, env);
2098
2099                         next = e->next;
2100
2101                         DB((dbg, LEVEL_4, " %+F,", irn));
2102                 }
2103                 DB((dbg, LEVEL_4, "\n"));
2104         }
2105 #endif
2106
2107         if (e->next != NULL) {
2108                 /* this SCC has more than one member */
2109                 process_loop(pscc, env);
2110         }
2111 }  /* process_scc */
2112
2113 /**
2114  * Do Tarjan's SCC algorithm and drive load/store optimization.
2115  *
2116  * @param irn  start at this node
2117  * @param env  the loop environment
2118  */
2119 static void dfs(ir_node *irn, loop_env *env)
2120 {
2121         int i, n;
2122         node_entry *node = get_irn_ne(irn, env);
2123
2124         mark_irn_visited(irn);
2125
2126         node->DFSnum = env->nextDFSnum++;
2127         node->low    = node->DFSnum;
2128         push(env, irn);
2129
2130         /* handle preds */
2131         if (is_Phi(irn) || is_Sync(irn)) {
2132                 n = get_irn_arity(irn);
2133                 for (i = 0; i < n; ++i) {
2134                         ir_node *pred = get_irn_n(irn, i);
2135                         node_entry *o = get_irn_ne(pred, env);
2136
2137                         if (!irn_visited(pred)) {
2138                                 dfs(pred, env);
2139                                 node->low = MIN(node->low, o->low);
2140                         }
2141                         if (o->DFSnum < node->DFSnum && o->in_stack)
2142                                 node->low = MIN(o->DFSnum, node->low);
2143                 }
2144         } else if (is_fragile_op(irn)) {
2145                 ir_node *pred = get_memop_mem(irn);
2146                 node_entry *o = get_irn_ne(pred, env);
2147
2148                 if (!irn_visited(pred)) {
2149                         dfs(pred, env);
2150                         node->low = MIN(node->low, o->low);
2151                 }
2152                 if (o->DFSnum < node->DFSnum && o->in_stack)
2153                         node->low = MIN(o->DFSnum, node->low);
2154         } else if (is_Proj(irn)) {
2155                 ir_node *pred = get_Proj_pred(irn);
2156                 node_entry *o = get_irn_ne(pred, env);
2157
2158                 if (!irn_visited(pred)) {
2159                         dfs(pred, env);
2160                         node->low = MIN(node->low, o->low);
2161                 }
2162                 if (o->DFSnum < node->DFSnum && o->in_stack)
2163                         node->low = MIN(o->DFSnum, node->low);
2164         }
2165         else {
2166                  /* IGNORE predecessors */
2167         }
2168
2169         if (node->low == node->DFSnum) {
2170                 scc *pscc = OALLOC(&env->obst, scc);
2171                 ir_node *x;
2172
2173                 pscc->head = NULL;
2174                 do {
2175                         node_entry *e;
2176
2177                         x = pop(env);
2178                         e = get_irn_ne(x, env);
2179                         e->pscc    = pscc;
2180                         e->next    = pscc->head;
2181                         pscc->head = x;
2182                 } while (x != irn);
2183
2184                 process_scc(pscc, env);
2185         }
2186 }  /* dfs */
2187
2188 /**
2189  * Do the DFS on the memory edges a graph.
2190  *
2191  * @param irg  the graph to process
2192  * @param env  the loop environment
2193  */
2194 static void do_dfs(ir_graph *irg, loop_env *env)
2195 {
2196         ir_node  *endblk, *end;
2197         int      i;
2198
2199         inc_irg_visited(irg);
2200
2201         /* visit all memory nodes */
2202         endblk = get_irg_end_block(irg);
2203         for (i = get_Block_n_cfgpreds(endblk) - 1; i >= 0; --i) {
2204                 ir_node *pred = get_Block_cfgpred(endblk, i);
2205
2206                 pred = skip_Proj(pred);
2207                 if (is_Return(pred)) {
2208                         dfs(get_Return_mem(pred), env);
2209                 } else if (is_Raise(pred)) {
2210                         dfs(get_Raise_mem(pred), env);
2211                 } else if (is_fragile_op(pred)) {
2212                         dfs(get_memop_mem(pred), env);
2213                 } else if (is_Bad(pred)) {
2214                         /* ignore non-optimized block predecessor */
2215                 } else {
2216                         assert(0 && "Unknown EndBlock predecessor");
2217                 }
2218         }
2219
2220         /* visit the keep-alives */
2221         end = get_irg_end(irg);
2222         for (i = get_End_n_keepalives(end) - 1; i >= 0; --i) {
2223                 ir_node *ka = get_End_keepalive(end, i);
2224
2225                 if (is_Phi(ka) && !irn_visited(ka))
2226                         dfs(ka, env);
2227         }
2228 }  /* do_dfs */
2229
2230 /**
2231  * Optimize Loads/Stores in loops.
2232  *
2233  * @param irg  the graph
2234  */
2235 static int optimize_loops(ir_graph *irg)
2236 {
2237         loop_env env;
2238
2239         env.stack         = NEW_ARR_F(ir_node *, 128);
2240         env.tos           = 0;
2241         env.nextDFSnum    = 0;
2242         env.POnum         = 0;
2243         env.changes       = 0;
2244         ir_nodehashmap_init(&env.map);
2245         obstack_init(&env.obst);
2246
2247         /* calculate the SCC's and drive loop optimization. */
2248         do_dfs(irg, &env);
2249
2250         DEL_ARR_F(env.stack);
2251         obstack_free(&env.obst, NULL);
2252         ir_nodehashmap_destroy(&env.map);
2253
2254         return env.changes;
2255 }  /* optimize_loops */
2256
2257 /*
2258  * do the load store optimization
2259  */
2260 static ir_graph_state_t do_loadstore_opt(ir_graph *irg)
2261 {
2262         walk_env_t env;
2263         ir_graph_state_t res = 0;
2264
2265         FIRM_DBG_REGISTER(dbg, "firm.opt.ldstopt");
2266
2267         assert(get_irg_phase_state(irg) != phase_building);
2268         assert(get_irg_pinned(irg) != op_pin_state_floats &&
2269                 "LoadStore optimization needs pinned graph");
2270
2271         if (get_opt_alias_analysis()) {
2272                 assure_irp_globals_entity_usage_computed();
2273         }
2274
2275         obstack_init(&env.obst);
2276         env.changes = 0;
2277
2278         /* init the links, then collect Loads/Stores/Proj's in lists */
2279         master_visited = 0;
2280         irg_walk_graph(irg, firm_clear_link, collect_nodes, &env);
2281
2282         /* now we have collected enough information, optimize */
2283         irg_walk_graph(irg, NULL, do_load_store_optimize, &env);
2284
2285         env.changes |= optimize_loops(irg);
2286
2287         obstack_free(&env.obst, NULL);
2288
2289         /* Handle graph state */
2290         if (env.changes) {
2291                 edges_deactivate(irg);
2292         }
2293
2294         if (!(env.changes & CF_CHANGED)) {
2295                 res |= IR_GRAPH_STATE_CONSISTENT_DOMINANCE | IR_GRAPH_STATE_NO_BADS;
2296         }
2297
2298         return res;
2299 }
2300
2301 static optdesc_t opt_loadstore = {
2302         "load-store",
2303         IR_GRAPH_STATE_NO_UNREACHABLE_CODE | IR_GRAPH_STATE_CONSISTENT_OUT_EDGES | IR_GRAPH_STATE_NO_CRITICAL_EDGES | IR_GRAPH_STATE_CONSISTENT_DOMINANCE | IR_GRAPH_STATE_CONSISTENT_ENTITY_USAGE,
2304         do_loadstore_opt,
2305 };
2306
2307 int optimize_load_store(ir_graph *irg)
2308 {
2309         perform_irg_optimization(irg, &opt_loadstore);
2310         return 1;
2311 }
2312
2313 ir_graph_pass_t *optimize_load_store_pass(const char *name)
2314 {
2315         return def_graph_pass_ret(name ? name : "ldst", optimize_load_store);
2316 }  /* optimize_load_store_pass */