beloopana: Remove duplicate comments.
[libfirm] / ir / opt / ldstopt.c
1 /*
2  * This file is part of libFirm.
3  * Copyright (C) 2012 University of Karlsruhe.
4  */
5
6 /**
7  * @file
8  * @brief   Load/Store optimizations.
9  * @author  Michael Beck
10  */
11 #include "config.h"
12
13 #include <string.h>
14
15 #include "iroptimize.h"
16 #include "irnode_t.h"
17 #include "irgraph_t.h"
18 #include "irmode_t.h"
19 #include "iropt_t.h"
20 #include "ircons_t.h"
21 #include "irgmod.h"
22 #include "irgwalk.h"
23 #include "irtools.h"
24 #include "tv_t.h"
25 #include "dbginfo_t.h"
26 #include "iropt_dbg.h"
27 #include "irflag_t.h"
28 #include "array_t.h"
29 #include "irhooks.h"
30 #include "iredges.h"
31 #include "irpass.h"
32 #include "irmemory.h"
33 #include "irnodehashmap.h"
34 #include "irgopt.h"
35 #include "set.h"
36 #include "be.h"
37 #include "debug.h"
38
39 /** The debug handle. */
40 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
41
42 #undef IMAX
43 #define IMAX(a,b)   ((a) > (b) ? (a) : (b))
44
45 #define MAX_PROJ    IMAX(IMAX((long)pn_Load_max, (long)pn_Store_max), (long)pn_Call_max)
46
47 enum changes_t {
48         DF_CHANGED = 1,       /**< data flow changed */
49         CF_CHANGED = 2,       /**< control flow changed */
50 };
51
52 /**
53  * walker environment
54  */
55 typedef struct walk_env_t {
56         struct obstack obst;          /**< list of all stores */
57         unsigned changes;             /**< a bitmask of graph changes */
58 } walk_env_t;
59
60 /** A Load/Store info. */
61 typedef struct ldst_info_t {
62         ir_node  *projs[MAX_PROJ+1];  /**< list of Proj's of this node */
63         ir_node  *exc_block;          /**< the exception block if available */
64         int      exc_idx;             /**< predecessor index in the exception block */
65         unsigned visited;             /**< visited counter for breaking loops */
66 } ldst_info_t;
67
68 /**
69  * flags for control flow.
70  */
71 enum block_flags_t {
72         BLOCK_HAS_COND = 1,      /**< Block has conditional control flow */
73         BLOCK_HAS_EXC  = 2       /**< Block has exceptional control flow */
74 };
75
76 /**
77  * a Block info.
78  */
79 typedef struct block_info_t {
80         unsigned flags;               /**< flags for the block */
81 } block_info_t;
82
83 /** the master visited flag for loop detection. */
84 static unsigned master_visited = 0;
85
86 #define INC_MASTER()       ++master_visited
87 #define MARK_NODE(info)    (info)->visited = master_visited
88 #define NODE_VISITED(info) (info)->visited >= master_visited
89
90 /**
91  * get the Load/Store info of a node
92  */
93 static ldst_info_t *get_ldst_info(ir_node *node, struct obstack *obst)
94 {
95         ldst_info_t *info = (ldst_info_t*)get_irn_link(node);
96
97         if (! info) {
98                 info = OALLOCZ(obst, ldst_info_t);
99                 set_irn_link(node, info);
100         }
101         return info;
102 }
103
104 /**
105  * get the Block info of a node
106  */
107 static block_info_t *get_block_info(ir_node *node, struct obstack *obst)
108 {
109         block_info_t *info = (block_info_t*)get_irn_link(node);
110
111         if (! info) {
112                 info = OALLOCZ(obst, block_info_t);
113                 set_irn_link(node, info);
114         }
115         return info;
116 }
117
118 /**
119  * update the projection info for a Load/Store
120  */
121 static unsigned update_projs(ldst_info_t *info, ir_node *proj)
122 {
123         long nr = get_Proj_proj(proj);
124
125         assert(0 <= nr && nr <= MAX_PROJ && "Wrong proj from LoadStore");
126
127         if (info->projs[nr]) {
128                 /* there is already one, do CSE */
129                 exchange(proj, info->projs[nr]);
130                 return DF_CHANGED;
131         }
132         else {
133                 info->projs[nr] = proj;
134                 return 0;
135         }
136 }
137
138 /**
139  * update the exception block info for a Load/Store node.
140  *
141  * @param info   the load/store info struct
142  * @param block  the exception handler block for this load/store
143  * @param pos    the control flow input of the block
144  */
145 static unsigned update_exc(ldst_info_t *info, ir_node *block, int pos)
146 {
147         assert(info->exc_block == NULL && "more than one exception block found");
148
149         info->exc_block = block;
150         info->exc_idx   = pos;
151         return 0;
152 }
153
154 /**
155  * walker, collects all Load/Store/Proj nodes
156  *
157  * walks from Start -> End
158  */
159 static void collect_nodes(ir_node *node, void *env)
160 {
161         walk_env_t  *wenv   = (walk_env_t *)env;
162         unsigned     opcode = get_irn_opcode(node);
163         ir_node     *pred, *blk, *pred_blk;
164         ldst_info_t *ldst_info;
165
166         if (opcode == iro_Proj) {
167                 pred   = get_Proj_pred(node);
168                 opcode = get_irn_opcode(pred);
169
170                 if (opcode == iro_Load || opcode == iro_Store || opcode == iro_Call) {
171                         ldst_info = get_ldst_info(pred, &wenv->obst);
172
173                         wenv->changes |= update_projs(ldst_info, node);
174
175                         /*
176                          * Place the Proj's to the same block as the
177                          * predecessor Load. This is always ok and prevents
178                          * "non-SSA" form after optimizations if the Proj
179                          * is in a wrong block.
180                          */
181                         blk      = get_nodes_block(node);
182                         pred_blk = get_nodes_block(pred);
183                         if (blk != pred_blk) {
184                                 wenv->changes |= DF_CHANGED;
185                                 set_nodes_block(node, pred_blk);
186                         }
187                 }
188         } else if (opcode == iro_Block) {
189                 int i;
190
191                 for (i = get_Block_n_cfgpreds(node) - 1; i >= 0; --i) {
192                         ir_node      *pred_block, *proj;
193                         block_info_t *bl_info;
194                         int          is_exc = 0;
195
196                         pred = proj = get_Block_cfgpred(node, i);
197
198                         if (is_Proj(proj)) {
199                                 pred   = get_Proj_pred(proj);
200                                 is_exc = is_x_except_Proj(proj);
201                         }
202
203                         /* ignore Bad predecessors, they will be removed later */
204                         if (is_Bad(pred))
205                                 continue;
206
207                         pred_block = get_nodes_block(pred);
208                         bl_info    = get_block_info(pred_block, &wenv->obst);
209
210                         if (is_fragile_op(pred) && is_exc)
211                                 bl_info->flags |= BLOCK_HAS_EXC;
212                         else if (is_irn_forking(pred))
213                                 bl_info->flags |= BLOCK_HAS_COND;
214
215                         opcode = get_irn_opcode(pred);
216                         if (is_exc && (opcode == iro_Load || opcode == iro_Store || opcode == iro_Call)) {
217                                 ldst_info = get_ldst_info(pred, &wenv->obst);
218
219                                 wenv->changes |= update_exc(ldst_info, node, i);
220                         }
221                 }
222         }
223 }
224
225 /**
226  * Returns an entity if the address ptr points to a constant one.
227  *
228  * @param ptr  the address
229  *
230  * @return an entity or NULL
231  */
232 static ir_entity *find_constant_entity(ir_node *ptr)
233 {
234         for (;;) {
235                 if (is_SymConst(ptr) && get_SymConst_kind(ptr) == symconst_addr_ent) {
236                         return get_SymConst_entity(ptr);
237                 } else if (is_Sel(ptr)) {
238                         ir_entity *ent = get_Sel_entity(ptr);
239                         ir_type   *tp  = get_entity_owner(ent);
240
241                         /* Do not fiddle with polymorphism. */
242                         if (is_Class_type(tp) &&
243                                 ((get_entity_n_overwrites(ent)    != 0) ||
244                                 (get_entity_n_overwrittenby(ent) != 0)   ) )
245                                 return NULL;
246
247                         if (is_Array_type(tp)) {
248                                 /* check bounds */
249                                 int i, n;
250
251                                 for (i = 0, n = get_Sel_n_indexs(ptr); i < n; ++i) {
252                                         ir_node   *bound;
253                                         ir_tarval *tlower, *tupper;
254                                         ir_node   *index = get_Sel_index(ptr, i);
255                                         ir_tarval *tv    = computed_value(index);
256
257                                         /* check if the index is constant */
258                                         if (tv == tarval_bad)
259                                                 return NULL;
260
261                                         bound  = get_array_lower_bound(tp, i);
262                                         tlower = computed_value(bound);
263                                         bound  = get_array_upper_bound(tp, i);
264                                         tupper = computed_value(bound);
265
266                                         if (tlower == tarval_bad || tupper == tarval_bad)
267                                                 return NULL;
268
269                                         if (tarval_cmp(tv, tlower) == ir_relation_less)
270                                                 return NULL;
271                                         if (tarval_cmp(tupper, tv) == ir_relation_less)
272                                                 return NULL;
273
274                                         /* ok, bounds check finished */
275                                 }
276                         }
277
278                         if (get_entity_linkage(ent) & IR_LINKAGE_CONSTANT)
279                                 return ent;
280
281                         /* try next */
282                         ptr = get_Sel_ptr(ptr);
283                 } else if (is_Add(ptr)) {
284                         ir_node *l = get_Add_left(ptr);
285                         ir_node *r = get_Add_right(ptr);
286
287                         if (get_irn_mode(l) == get_irn_mode(ptr) && is_Const(r))
288                                 ptr = l;
289                         else if (get_irn_mode(r) == get_irn_mode(ptr) && is_Const(l))
290                                 ptr = r;
291                         else
292                                 return NULL;
293
294                         /* for now, we support only one addition, reassoc should fold all others */
295                         if (! is_SymConst(ptr) && !is_Sel(ptr))
296                                 return NULL;
297                 } else if (is_Sub(ptr)) {
298                         ir_node *l = get_Sub_left(ptr);
299                         ir_node *r = get_Sub_right(ptr);
300
301                         if (get_irn_mode(l) == get_irn_mode(ptr) && is_Const(r))
302                                 ptr = l;
303                         else
304                                 return NULL;
305                         /* for now, we support only one substraction, reassoc should fold all others */
306                         if (! is_SymConst(ptr) && !is_Sel(ptr))
307                                 return NULL;
308                 } else
309                         return NULL;
310         }
311 }
312
313 /**
314  * Return the Selection index of a Sel node from dimension n
315  */
316 static long get_Sel_array_index_long(ir_node *n, int dim)
317 {
318         ir_node *index = get_Sel_index(n, dim);
319         return get_tarval_long(get_Const_tarval(index));
320 }
321
322 typedef struct path_entry {
323         ir_entity         *ent;
324         struct path_entry *next;
325         size_t            index;
326 } path_entry;
327
328 static ir_node *rec_find_compound_ent_value(ir_node *ptr, path_entry *next)
329 {
330         path_entry       entry, *p;
331         ir_entity        *ent, *field;
332         ir_initializer_t *initializer;
333         ir_tarval        *tv;
334         ir_type          *tp;
335         size_t           n;
336
337         entry.next = next;
338         if (is_SymConst(ptr)) {
339                 /* found the root */
340                 ent         = get_SymConst_entity(ptr);
341                 initializer = get_entity_initializer(ent);
342                 for (p = next; p != NULL;) {
343                         if (initializer->kind != IR_INITIALIZER_COMPOUND)
344                                 return NULL;
345                         n  = get_initializer_compound_n_entries(initializer);
346                         tp = get_entity_type(ent);
347
348                         if (is_Array_type(tp)) {
349                                 ent = get_array_element_entity(tp);
350                                 if (ent != p->ent) {
351                                         /* a missing [0] */
352                                         if (0 >= n)
353                                                 return NULL;
354                                         initializer = get_initializer_compound_value(initializer, 0);
355                                         continue;
356                                 }
357                         }
358                         if (p->index >= n)
359                                 return NULL;
360                         initializer = get_initializer_compound_value(initializer, p->index);
361
362                         ent = p->ent;
363                         p   = p->next;
364                 }
365                 tp = get_entity_type(ent);
366                 while (is_Array_type(tp)) {
367                         ent = get_array_element_entity(tp);
368                         tp = get_entity_type(ent);
369                         /* a missing [0] */
370                         n  = get_initializer_compound_n_entries(initializer);
371                         if (0 >= n)
372                                 return NULL;
373                         initializer = get_initializer_compound_value(initializer, 0);
374                 }
375
376                 switch (initializer->kind) {
377                 case IR_INITIALIZER_CONST:
378                         return get_initializer_const_value(initializer);
379                 case IR_INITIALIZER_TARVAL:
380                 case IR_INITIALIZER_NULL:
381                 default:
382                         return NULL;
383                 }
384         } else if (is_Sel(ptr)) {
385                 entry.ent = field = get_Sel_entity(ptr);
386                 tp = get_entity_owner(field);
387                 if (is_Array_type(tp)) {
388                         assert(get_Sel_n_indexs(ptr) == 1 && "multi dim arrays not implemented");
389                         entry.index = get_Sel_array_index_long(ptr, 0) - get_array_lower_bound_int(tp, 0);
390                 } else {
391                         size_t i, n_members = get_compound_n_members(tp);
392                         for (i = 0; i < n_members; ++i) {
393                                 if (get_compound_member(tp, i) == field)
394                                         break;
395                         }
396                         if (i >= n_members) {
397                                 /* not found: should NOT happen */
398                                 return NULL;
399                         }
400                         entry.index = i;
401                 }
402                 return rec_find_compound_ent_value(get_Sel_ptr(ptr), &entry);
403         }  else if (is_Add(ptr)) {
404                 ir_mode  *mode;
405                 unsigned pos;
406
407                 {
408                         ir_node *l = get_Add_left(ptr);
409                         ir_node *r = get_Add_right(ptr);
410                         if (is_Const(r)) {
411                                 ptr = l;
412                                 tv  = get_Const_tarval(r);
413                         } else {
414                                 ptr = r;
415                                 tv  = get_Const_tarval(l);
416                         }
417                 }
418 ptr_arith:
419                 mode = get_tarval_mode(tv);
420
421                 /* ptr must be a Sel or a SymConst, this was checked in find_constant_entity() */
422                 if (is_Sel(ptr)) {
423                         field = get_Sel_entity(ptr);
424                 } else {
425                         field = get_SymConst_entity(ptr);
426                 }
427
428                 /* count needed entries */
429                 pos = 0;
430                 for (ent = field;;) {
431                         tp = get_entity_type(ent);
432                         if (! is_Array_type(tp))
433                                 break;
434                         ent = get_array_element_entity(tp);
435                         ++pos;
436                 }
437                 /* should be at least ONE entry */
438                 if (pos == 0)
439                         return NULL;
440
441                 /* allocate the right number of entries */
442                 NEW_ARR_A(path_entry, p, pos);
443
444                 /* fill them up */
445                 pos = 0;
446                 for (ent = field;;) {
447                         unsigned   size;
448                         ir_tarval *sz, *tv_index, *tlower, *tupper;
449                         long       index;
450                         ir_node   *bound;
451
452                         tp = get_entity_type(ent);
453                         if (! is_Array_type(tp))
454                                 break;
455                         ent = get_array_element_entity(tp);
456                         p[pos].ent  = ent;
457                         p[pos].next = &p[pos + 1];
458
459                         size = get_type_size_bytes(get_entity_type(ent));
460                         sz   = new_tarval_from_long(size, mode);
461
462                         tv_index = tarval_div(tv, sz);
463                         tv       = tarval_mod(tv, sz);
464
465                         if (tv_index == tarval_bad || tv == tarval_bad)
466                                 return NULL;
467
468                         assert(get_array_n_dimensions(tp) == 1 && "multiarrays not implemented");
469                         bound  = get_array_lower_bound(tp, 0);
470                         tlower = computed_value(bound);
471                         bound  = get_array_upper_bound(tp, 0);
472                         tupper = computed_value(bound);
473
474                         if (tlower == tarval_bad || tupper == tarval_bad)
475                                 return NULL;
476
477                         if (tarval_cmp(tv_index, tlower) == ir_relation_less)
478                                 return NULL;
479                         if (tarval_cmp(tupper, tv_index) == ir_relation_less)
480                                 return NULL;
481
482                         /* ok, bounds check finished */
483                         index = get_tarval_long(tv_index);
484                         p[pos].index = index;
485                         ++pos;
486                 }
487                 if (! tarval_is_null(tv)) {
488                         /* hmm, wrong access */
489                         return NULL;
490                 }
491                 p[pos - 1].next = next;
492                 return rec_find_compound_ent_value(ptr, p);
493         } else if (is_Sub(ptr)) {
494                 ir_node *l = get_Sub_left(ptr);
495                 ir_node *r = get_Sub_right(ptr);
496
497                 ptr = l;
498                 tv  = get_Const_tarval(r);
499                 tv  = tarval_neg(tv);
500                 goto ptr_arith;
501         }
502         return NULL;
503 }
504
505 static ir_node *find_compound_ent_value(ir_node *ptr)
506 {
507         return rec_find_compound_ent_value(ptr, NULL);
508 }
509
510 /* forward */
511 static void reduce_adr_usage(ir_node *ptr);
512
513 /**
514  * Update a Load that may have lost its users.
515  */
516 static void handle_load_update(ir_node *load)
517 {
518         ldst_info_t *info = (ldst_info_t*)get_irn_link(load);
519
520         /* do NOT touch volatile loads for now */
521         if (get_Load_volatility(load) == volatility_is_volatile)
522                 return;
523
524         if (! info->projs[pn_Load_res] && ! info->projs[pn_Load_X_except]) {
525                 ir_node *ptr = get_Load_ptr(load);
526                 ir_node *mem = get_Load_mem(load);
527
528                 /* a Load whose value is neither used nor exception checked, remove it */
529                 exchange(info->projs[pn_Load_M], mem);
530                 if (info->projs[pn_Load_X_regular])
531                         exchange(info->projs[pn_Load_X_regular], new_r_Jmp(get_nodes_block(load)));
532                 kill_node(load);
533                 reduce_adr_usage(ptr);
534         }
535 }
536
537 /**
538  * A use of an address node has vanished. Check if this was a Proj
539  * node and update the counters.
540  */
541 static void reduce_adr_usage(ir_node *ptr)
542 {
543         ir_node *pred;
544         if (!is_Proj(ptr))
545                 return;
546         if (get_irn_n_edges(ptr) > 0)
547                 return;
548
549         /* this Proj is dead now */
550         pred = get_Proj_pred(ptr);
551         if (is_Load(pred)) {
552                 ldst_info_t *info = (ldst_info_t*)get_irn_link(pred);
553                 info->projs[get_Proj_proj(ptr)] = NULL;
554
555                 /* this node lost its result proj, handle that */
556                 handle_load_update(pred);
557         }
558 }
559
560 /**
561  * Check, if an already existing value of mode old_mode can be converted
562  * into the needed one new_mode without loss.
563  */
564 static int can_use_stored_value(ir_mode *old_mode, ir_mode *new_mode)
565 {
566         unsigned old_size;
567         unsigned new_size;
568         if (old_mode == new_mode)
569                 return true;
570
571         old_size = get_mode_size_bits(old_mode);
572         new_size = get_mode_size_bits(new_mode);
573
574         /* if both modes are two-complement ones, we can always convert the
575            Stored value into the needed one. (on big endian machines we currently
576            only support this for modes of same size) */
577         if (old_size >= new_size &&
578                   get_mode_arithmetic(old_mode) == irma_twos_complement &&
579                   get_mode_arithmetic(new_mode) == irma_twos_complement &&
580                   (!be_get_backend_param()->byte_order_big_endian
581                 || old_size == new_size)) {
582                 return true;
583         }
584         return false;
585 }
586
587 /**
588  * Check whether a Call is at least pure, i.e. does only read memory.
589  */
590 static unsigned is_Call_pure(ir_node *call)
591 {
592         ir_type *call_tp = get_Call_type(call);
593         unsigned prop = get_method_additional_properties(call_tp);
594
595         /* check first the call type */
596         if ((prop & (mtp_property_const|mtp_property_pure)) == 0) {
597                 /* try the called entity */
598                 ir_node *ptr = get_Call_ptr(call);
599
600                 if (is_SymConst_addr_ent(ptr)) {
601                         ir_entity *ent = get_SymConst_entity(ptr);
602
603                         prop = get_entity_additional_properties(ent);
604                 }
605         }
606         return (prop & (mtp_property_const|mtp_property_pure)) != 0;
607 }
608
609 static ir_node *get_base_and_offset(ir_node *ptr, long *pOffset)
610 {
611         ir_mode *mode  = get_irn_mode(ptr);
612         long    offset = 0;
613
614         /* TODO: long might not be enough, we should probably use some tarval thingy... */
615         for (;;) {
616                 if (is_Add(ptr)) {
617                         ir_node *l = get_Add_left(ptr);
618                         ir_node *r = get_Add_right(ptr);
619
620                         if (get_irn_mode(l) != mode || !is_Const(r))
621                                 break;
622
623                         offset += get_tarval_long(get_Const_tarval(r));
624                         ptr     = l;
625                 } else if (is_Sub(ptr)) {
626                         ir_node *l = get_Sub_left(ptr);
627                         ir_node *r = get_Sub_right(ptr);
628
629                         if (get_irn_mode(l) != mode || !is_Const(r))
630                                 break;
631
632                         offset -= get_tarval_long(get_Const_tarval(r));
633                         ptr     = l;
634                 } else if (is_Sel(ptr)) {
635                         ir_entity *ent = get_Sel_entity(ptr);
636                         ir_type   *tp  = get_entity_owner(ent);
637
638                         if (is_Array_type(tp)) {
639                                 int     size;
640                                 ir_node *index;
641
642                                 /* only one dimensional arrays yet */
643                                 if (get_Sel_n_indexs(ptr) != 1)
644                                         break;
645                                 index = get_Sel_index(ptr, 0);
646                                 if (! is_Const(index))
647                                         break;
648
649                                 tp = get_entity_type(ent);
650                                 if (get_type_state(tp) != layout_fixed)
651                                         break;
652
653                                 size    = get_type_size_bytes(tp);
654                                 offset += size * get_tarval_long(get_Const_tarval(index));
655                         } else {
656                                 if (get_type_state(tp) != layout_fixed)
657                                         break;
658                                 offset += get_entity_offset(ent);
659                         }
660                         ptr = get_Sel_ptr(ptr);
661                 } else
662                         break;
663         }
664
665         *pOffset = offset;
666         return ptr;
667 }
668
669 static int try_load_after_store(ir_node *load,
670                 ir_node *load_base_ptr, long load_offset, ir_node *store)
671 {
672         ldst_info_t *info;
673         ir_node *store_ptr      = get_Store_ptr(store);
674         long     store_offset;
675         ir_node *store_base_ptr = get_base_and_offset(store_ptr, &store_offset);
676         ir_node *store_value;
677         ir_mode *store_mode;
678         ir_node *load_ptr;
679         ir_mode *load_mode;
680         long     load_mode_len;
681         long     store_mode_len;
682         long     delta;
683         int      res;
684
685         if (load_base_ptr != store_base_ptr)
686                 return 0;
687
688         load_mode      = get_Load_mode(load);
689         load_mode_len  = get_mode_size_bytes(load_mode);
690         store_mode     = get_irn_mode(get_Store_value(store));
691         store_mode_len = get_mode_size_bytes(store_mode);
692         delta          = load_offset - store_offset;
693         store_value    = get_Store_value(store);
694
695         if (delta < 0 || delta+load_mode_len > store_mode_len)
696                 return 0;
697
698         if (store_mode != load_mode &&
699             get_mode_arithmetic(store_mode) == irma_twos_complement &&
700             get_mode_arithmetic(load_mode)  == irma_twos_complement) {
701
702                 /* produce a shift to adjust offset delta */
703                 unsigned const shift = be_get_backend_param()->byte_order_big_endian
704                         ? store_mode_len - load_mode_len - delta
705                         : delta;
706                 if (shift != 0) {
707                         ir_graph *const irg  = get_irn_irg(load);
708                         ir_node  *const cnst = new_r_Const_long(irg, mode_Iu, shift * 8);
709                         store_value = new_r_Shr(get_nodes_block(load),
710                                                                         store_value, cnst, store_mode);
711                 }
712
713                 store_value = new_r_Conv(get_nodes_block(load), store_value, load_mode);
714         } else {
715                 /* we would need some kind of bitcast node here */
716                 return 0;
717         }
718
719         DBG_OPT_RAW(load, store_value);
720
721         info = (ldst_info_t*)get_irn_link(load);
722         if (info->projs[pn_Load_M])
723                 exchange(info->projs[pn_Load_M], get_Load_mem(load));
724
725         res = 0;
726         /* no exception */
727         if (info->projs[pn_Load_X_except]) {
728                 ir_graph *irg = get_irn_irg(load);
729                 exchange( info->projs[pn_Load_X_except], new_r_Bad(irg, mode_X));
730                 res |= CF_CHANGED;
731         }
732         if (info->projs[pn_Load_X_regular]) {
733                 exchange( info->projs[pn_Load_X_regular], new_r_Jmp(get_nodes_block(load)));
734                 res |= CF_CHANGED;
735         }
736
737         if (info->projs[pn_Load_res])
738                 exchange(info->projs[pn_Load_res], store_value);
739
740         load_ptr = get_Load_ptr(load);
741         kill_node(load);
742         reduce_adr_usage(load_ptr);
743         return res | DF_CHANGED;
744 }
745
746 /**
747  * Follow the memory chain as long as there are only Loads,
748  * alias free Stores, and constant Calls and try to replace the
749  * current Load by a previous ones.
750  * Note that in unreachable loops it might happen that we reach
751  * load again, as well as we can fall into a cycle.
752  * We break such cycles using a special visited flag.
753  *
754  * INC_MASTER() must be called before dive into
755  */
756 static unsigned follow_Mem_chain(ir_node *load, ir_node *curr)
757 {
758         unsigned    res = 0;
759         ldst_info_t *info = (ldst_info_t*)get_irn_link(load);
760         ir_node     *pred;
761         ir_node     *ptr       = get_Load_ptr(load);
762         ir_node     *mem       = get_Load_mem(load);
763         ir_mode     *load_mode = get_Load_mode(load);
764
765         for (pred = curr; load != pred; ) {
766                 ldst_info_t *pred_info = (ldst_info_t*)get_irn_link(pred);
767
768                 /*
769                  * a Load immediately after a Store -- a read after write.
770                  * We may remove the Load, if both Load & Store does not have an
771                  * exception handler OR they are in the same Block. In the latter
772                  * case the Load cannot throw an exception when the previous Store was
773                  * quiet.
774                  *
775                  * Why we need to check for Store Exception? If the Store cannot
776                  * be executed (ROM) the exception handler might simply jump into
777                  * the load Block :-(
778                  * We could make it a little bit better if we would know that the
779                  * exception handler of the Store jumps directly to the end...
780                  */
781                 if (is_Store(pred) && ((pred_info->projs[pn_Store_X_except] == NULL
782                                 && info->projs[pn_Load_X_except] == NULL)
783                                 || get_nodes_block(load) == get_nodes_block(pred)))
784                 {
785                         long    load_offset;
786                         ir_node *base_ptr = get_base_and_offset(ptr, &load_offset);
787                         int     changes   = try_load_after_store(load, base_ptr, load_offset, pred);
788
789                         if (changes != 0)
790                                 return res | changes;
791                 } else if (is_Load(pred) && get_Load_ptr(pred) == ptr &&
792                            can_use_stored_value(get_Load_mode(pred), load_mode)) {
793                         /*
794                          * a Load after a Load -- a read after read.
795                          * We may remove the second Load, if it does not have an exception
796                          * handler OR they are in the same Block. In the later case
797                          * the Load cannot throw an exception when the previous Load was
798                          * quiet.
799                          *
800                          * Here, there is no need to check if the previous Load has an
801                          * exception hander because they would have exact the same
802                          * exception...
803                          *
804                          * TODO: implement load-after-load with different mode for big
805                          *       endian
806                          */
807                         if (info->projs[pn_Load_X_except] == NULL
808                                         || get_nodes_block(load) == get_nodes_block(pred)) {
809                                 ir_node *value;
810
811                                 DBG_OPT_RAR(load, pred);
812
813                                 /* the result is used */
814                                 if (info->projs[pn_Load_res]) {
815                                         if (pred_info->projs[pn_Load_res] == NULL) {
816                                                 /* create a new Proj again */
817                                                 pred_info->projs[pn_Load_res] = new_r_Proj(pred, get_Load_mode(pred), pn_Load_res);
818                                         }
819                                         value = pred_info->projs[pn_Load_res];
820
821                                         /* add an convert if needed */
822                                         if (get_Load_mode(pred) != load_mode) {
823                                                 value = new_r_Conv(get_nodes_block(load), value, load_mode);
824                                         }
825
826                                         exchange(info->projs[pn_Load_res], value);
827                                 }
828
829                                 if (info->projs[pn_Load_M])
830                                         exchange(info->projs[pn_Load_M], mem);
831
832                                 /* no exception */
833                                 if (info->projs[pn_Load_X_except]) {
834                                         ir_graph *irg = get_irn_irg(load);
835                                         exchange(info->projs[pn_Load_X_except], new_r_Bad(irg, mode_X));
836                                         res |= CF_CHANGED;
837                                 }
838                                 if (info->projs[pn_Load_X_regular]) {
839                                         exchange( info->projs[pn_Load_X_regular], new_r_Jmp(get_nodes_block(load)));
840                                         res |= CF_CHANGED;
841                                 }
842
843                                 kill_node(load);
844                                 reduce_adr_usage(ptr);
845                                 return res |= DF_CHANGED;
846                         }
847                 }
848
849                 if (is_Store(pred)) {
850                         /* check if we can pass through this store */
851                         ir_alias_relation rel = get_alias_relation(
852                                 get_Store_ptr(pred),
853                                 get_irn_mode(get_Store_value(pred)),
854                                 ptr, load_mode);
855                         /* if the might be an alias, we cannot pass this Store */
856                         if (rel != ir_no_alias)
857                                 break;
858                         pred = skip_Proj(get_Store_mem(pred));
859                 } else if (is_Load(pred)) {
860                         pred = skip_Proj(get_Load_mem(pred));
861                 } else if (is_Call(pred)) {
862                         if (is_Call_pure(pred)) {
863                                 /* The called graph is at least pure, so there are no Store's
864                                    in it. We can handle it like a Load and skip it. */
865                                 pred = skip_Proj(get_Call_mem(pred));
866                         } else {
867                                 /* there might be Store's in the graph, stop here */
868                                 break;
869                         }
870                 } else {
871                         /* follow only Load chains */
872                         break;
873                 }
874
875                 /* check for cycles */
876                 if (NODE_VISITED(pred_info))
877                         break;
878                 MARK_NODE(pred_info);
879         }
880
881         if (is_Sync(pred)) {
882                 int i;
883
884                 /* handle all Sync predecessors */
885                 for (i = get_Sync_n_preds(pred) - 1; i >= 0; --i) {
886                         res |= follow_Mem_chain(load, skip_Proj(get_Sync_pred(pred, i)));
887                         if (res)
888                                 return res;
889                 }
890         }
891
892         return res;
893 }
894
895 ir_node *can_replace_load_by_const(const ir_node *load, ir_node *c)
896 {
897         ir_mode  *c_mode = get_irn_mode(c);
898         ir_mode  *l_mode = get_Load_mode(load);
899         ir_node  *block  = get_nodes_block(load);
900         dbg_info *dbgi   = get_irn_dbg_info(load);
901         ir_node  *res    = copy_const_value(dbgi, c, block);
902
903         if (c_mode != l_mode) {
904                 /* check, if the mode matches OR can be easily converted info */
905                 if (is_reinterpret_cast(c_mode, l_mode)) {
906                         /* copy the value from the const code irg and cast it */
907                         res = new_rd_Conv(dbgi, block, res, l_mode);
908                 } else {
909                         return NULL;
910                 }
911         }
912         return res;
913 }
914
915 /**
916  * optimize a Load
917  *
918  * @param load  the Load node
919  */
920 static unsigned optimize_load(ir_node *load)
921 {
922         ldst_info_t *info = (ldst_info_t*)get_irn_link(load);
923         ir_node     *mem, *ptr, *value;
924         ir_entity   *ent;
925         long        dummy;
926         unsigned    res = 0;
927
928         /* do NOT touch volatile loads for now */
929         if (get_Load_volatility(load) == volatility_is_volatile)
930                 return 0;
931
932         /* the address of the load to be optimized */
933         ptr = get_Load_ptr(load);
934
935         /* The mem of the Load. Must still be returned after optimization. */
936         mem = get_Load_mem(load);
937
938         if (info->projs[pn_Load_res] == NULL
939                         && info->projs[pn_Load_X_except] == NULL) {
940                 /* the value is never used and we don't care about exceptions, remove */
941                 exchange(info->projs[pn_Load_M], mem);
942
943                 if (info->projs[pn_Load_X_regular]) {
944                         /* should not happen, but if it does, remove it */
945                         exchange(info->projs[pn_Load_X_regular], new_r_Jmp(get_nodes_block(load)));
946                         res |= CF_CHANGED;
947                 }
948                 kill_node(load);
949                 reduce_adr_usage(ptr);
950                 return res | DF_CHANGED;
951         }
952
953         value = NULL;
954         /* check if we can determine the entity that will be loaded */
955         ent = find_constant_entity(ptr);
956         if (ent != NULL
957                         && get_entity_visibility(ent) != ir_visibility_external) {
958                 /* a static allocation that is not external: there should be NO
959                  * exception when loading even if we cannot replace the load itself.
960                  */
961
962                 /* no exception, clear the info field as it might be checked later again */
963                 if (info->projs[pn_Load_X_except]) {
964                         ir_graph *irg = get_irn_irg(load);
965                         exchange(info->projs[pn_Load_X_except], new_r_Bad(irg, mode_X));
966                         info->projs[pn_Load_X_except] = NULL;
967                         res |= CF_CHANGED;
968                 }
969                 if (info->projs[pn_Load_X_regular]) {
970                         exchange(info->projs[pn_Load_X_regular], new_r_Jmp(get_nodes_block(load)));
971                         info->projs[pn_Load_X_regular] = NULL;
972                         res |= CF_CHANGED;
973                 }
974
975                 if (get_entity_linkage(ent) & IR_LINKAGE_CONSTANT) {
976                         if (has_entity_initializer(ent)) {
977                                 /* new style initializer */
978                                 value = find_compound_ent_value(ptr);
979                         }
980                         if (value != NULL) {
981                                 ir_graph *irg = get_irn_irg(load);
982                                 value = can_replace_load_by_const(load, value);
983                                 if (value != NULL && is_Sel(ptr)) {
984                                         /* frontend has inserted masking operations after bitfield accesses,
985                                          * so we might have to shift the const. */
986                                         unsigned char bit_offset = get_entity_offset_bits_remainder(get_Sel_entity(ptr));
987                                         if (bit_offset != 0) {
988                                                 if (is_Const(value)) {
989                                                         ir_tarval *tv_old = get_Const_tarval(value);
990                                                         ir_tarval *tv_offset = new_tarval_from_long(bit_offset, mode_Bu);
991                                                         ir_tarval *tv_new = tarval_shl(tv_old, tv_offset);
992                                                         value = new_r_Const(irg, tv_new);
993                                                 } else {
994                                                         value = NULL;
995                                                 }
996                                         }
997                                 }
998                         }
999                 }
1000         }
1001         if (value != NULL) {
1002                 /* we completely replace the load by this value */
1003                 if (info->projs[pn_Load_X_except]) {
1004                         ir_graph *irg = get_irn_irg(load);
1005                         exchange(info->projs[pn_Load_X_except], new_r_Bad(irg, mode_X));
1006                         info->projs[pn_Load_X_except] = NULL;
1007                         res |= CF_CHANGED;
1008                 }
1009                 if (info->projs[pn_Load_X_regular]) {
1010                         exchange(info->projs[pn_Load_X_regular], new_r_Jmp(get_nodes_block(load)));
1011                         info->projs[pn_Load_X_regular] = NULL;
1012                         res |= CF_CHANGED;
1013                 }
1014                 if (info->projs[pn_Load_M]) {
1015                         exchange(info->projs[pn_Load_M], mem);
1016                         res |= DF_CHANGED;
1017                 }
1018                 if (info->projs[pn_Load_res]) {
1019                         exchange(info->projs[pn_Load_res], value);
1020                         res |= DF_CHANGED;
1021                 }
1022                 kill_node(load);
1023                 reduce_adr_usage(ptr);
1024                 return res;
1025         }
1026
1027         /* Check, if the address of this load is used more than once.
1028          * If not, more load cannot be removed in any case. */
1029         if (get_irn_n_edges(ptr) <= 1 && get_irn_n_edges(get_base_and_offset(ptr, &dummy)) <= 1)
1030                 return res;
1031
1032         /*
1033          * follow the memory chain as long as there are only Loads
1034          * and try to replace current Load or Store by a previous one.
1035          * Note that in unreachable loops it might happen that we reach
1036          * load again, as well as we can fall into a cycle.
1037          * We break such cycles using a special visited flag.
1038          */
1039         INC_MASTER();
1040         res = follow_Mem_chain(load, skip_Proj(mem));
1041         return res;
1042 }
1043
1044 /**
1045  * Check whether a value of mode new_mode would completely overwrite a value
1046  * of mode old_mode in memory.
1047  */
1048 static int is_completely_overwritten(ir_mode *old_mode, ir_mode *new_mode)
1049 {
1050         return get_mode_size_bits(new_mode) >= get_mode_size_bits(old_mode);
1051 }
1052
1053 /**
1054  * Check whether small is a part of large (starting at same address).
1055  */
1056 static int is_partially_same(ir_node *small, ir_node *large)
1057 {
1058         ir_mode *sm = get_irn_mode(small);
1059         ir_mode *lm = get_irn_mode(large);
1060
1061         /* FIXME: Check endianness */
1062         return is_Conv(small) && get_Conv_op(small) == large
1063             && get_mode_size_bytes(sm) < get_mode_size_bytes(lm)
1064             && get_mode_arithmetic(sm) == irma_twos_complement
1065             && get_mode_arithmetic(lm) == irma_twos_complement;
1066 }
1067
1068 /**
1069  * follow the memory chain as long as there are only Loads and alias free Stores.
1070  *
1071  * INC_MASTER() must be called before dive into
1072  */
1073 static unsigned follow_Mem_chain_for_Store(ir_node *store, ir_node *curr)
1074 {
1075         unsigned res = 0;
1076         ldst_info_t *info = (ldst_info_t*)get_irn_link(store);
1077         ir_node *pred;
1078         ir_node *ptr = get_Store_ptr(store);
1079         ir_node *mem = get_Store_mem(store);
1080         ir_node *value = get_Store_value(store);
1081         ir_mode *mode  = get_irn_mode(value);
1082         ir_node *block = get_nodes_block(store);
1083
1084         for (pred = curr; pred != store;) {
1085                 ldst_info_t *pred_info = (ldst_info_t*)get_irn_link(pred);
1086
1087                 /*
1088                  * BEWARE: one might think that checking the modes is useless, because
1089                  * if the pointers are identical, they refer to the same object.
1090                  * This is only true in strong typed languages, not is C were the following
1091                  * is possible *(ir_type1 *)p = a; *(ir_type2 *)p = b ...
1092                  * However, if the size of the mode that is written is bigger or equal the
1093                  * size of the old one, the old value is completely overwritten and can be
1094                  * killed ...
1095                  */
1096                 if (is_Store(pred) && get_Store_ptr(pred) == ptr &&
1097             get_nodes_block(pred) == block) {
1098                         /*
1099                          * a Store after a Store in the same Block -- a write after write.
1100                          */
1101
1102                         /*
1103                          * We may remove the first Store, if the old value is completely
1104                          * overwritten or the old value is a part of the new value,
1105                          * and if it does not have an exception handler.
1106                          *
1107                          * TODO: What, if both have the same exception handler ???
1108                          */
1109                         if (get_Store_volatility(pred) != volatility_is_volatile
1110                                 && !pred_info->projs[pn_Store_X_except]) {
1111                                 ir_node *predvalue = get_Store_value(pred);
1112                                 ir_mode *predmode  = get_irn_mode(predvalue);
1113
1114                                 if (is_completely_overwritten(predmode, mode)
1115                                         || is_partially_same(predvalue, value)) {
1116                                         DBG_OPT_WAW(pred, store);
1117                                         exchange(pred_info->projs[pn_Store_M], get_Store_mem(pred));
1118                                         kill_node(pred);
1119                                         reduce_adr_usage(ptr);
1120                                         return DF_CHANGED;
1121                                 }
1122                         }
1123
1124                         /*
1125                          * We may remove the Store, if the old value already contains
1126                          * the new value, and if it does not have an exception handler.
1127                          *
1128                          * TODO: What, if both have the same exception handler ???
1129                          */
1130                         if (get_Store_volatility(store) != volatility_is_volatile
1131                                 && !info->projs[pn_Store_X_except]) {
1132                                 ir_node *predvalue = get_Store_value(pred);
1133
1134                                 if (is_partially_same(value, predvalue)) {
1135                                         DBG_OPT_WAW(pred, store);
1136                                         exchange(info->projs[pn_Store_M], mem);
1137                                         kill_node(store);
1138                                         reduce_adr_usage(ptr);
1139                                         return DF_CHANGED;
1140                                 }
1141                         }
1142                 } else if (is_Load(pred) && get_Load_ptr(pred) == ptr &&
1143                            value == pred_info->projs[pn_Load_res]) {
1144                         /*
1145                          * a Store of a value just loaded from the same address
1146                          * -- a write after read.
1147                          * We may remove the Store, if it does not have an exception
1148                          * handler.
1149                          */
1150                         if (! info->projs[pn_Store_X_except]) {
1151                                 DBG_OPT_WAR(store, pred);
1152                                 exchange(info->projs[pn_Store_M], mem);
1153                                 kill_node(store);
1154                                 reduce_adr_usage(ptr);
1155                                 return DF_CHANGED;
1156                         }
1157                 }
1158
1159                 if (is_Store(pred)) {
1160                         /* check if we can pass through this store */
1161                         ir_alias_relation rel = get_alias_relation(
1162                                 get_Store_ptr(pred),
1163                                 get_irn_mode(get_Store_value(pred)),
1164                                 ptr, mode);
1165                         /* if the might be an alias, we cannot pass this Store */
1166                         if (rel != ir_no_alias)
1167                                 break;
1168                         pred = skip_Proj(get_Store_mem(pred));
1169                 } else if (is_Load(pred)) {
1170                         ir_alias_relation rel = get_alias_relation(
1171                                 get_Load_ptr(pred), get_Load_mode(pred),
1172                                 ptr, mode);
1173                         if (rel != ir_no_alias)
1174                                 break;
1175
1176                         pred = skip_Proj(get_Load_mem(pred));
1177                 } else {
1178                         /* follow only Load chains */
1179                         break;
1180                 }
1181
1182                 /* check for cycles */
1183                 if (NODE_VISITED(pred_info))
1184                         break;
1185                 MARK_NODE(pred_info);
1186         }
1187
1188         if (is_Sync(pred)) {
1189                 int i;
1190
1191                 /* handle all Sync predecessors */
1192                 for (i = get_Sync_n_preds(pred) - 1; i >= 0; --i) {
1193                         res |= follow_Mem_chain_for_Store(store, skip_Proj(get_Sync_pred(pred, i)));
1194                         if (res)
1195                                 break;
1196                 }
1197         }
1198         return res;
1199 }
1200
1201 /** find entity used as base for an address calculation */
1202 static ir_entity *find_entity(ir_node *ptr)
1203 {
1204         switch (get_irn_opcode(ptr)) {
1205         case iro_SymConst:
1206                 return get_SymConst_entity(ptr);
1207         case iro_Sel: {
1208                 ir_node *pred = get_Sel_ptr(ptr);
1209                 if (get_irg_frame(get_irn_irg(ptr)) == pred)
1210                         return get_Sel_entity(ptr);
1211
1212                 return find_entity(pred);
1213         }
1214         case iro_Sub:
1215         case iro_Add: {
1216                 ir_node *left = get_binop_left(ptr);
1217                 ir_node *right;
1218                 if (mode_is_reference(get_irn_mode(left)))
1219                         return find_entity(left);
1220                 right = get_binop_right(ptr);
1221                 if (mode_is_reference(get_irn_mode(right)))
1222                         return find_entity(right);
1223                 return NULL;
1224         }
1225         default:
1226                 return NULL;
1227         }
1228 }
1229
1230 /**
1231  * optimize a Store
1232  *
1233  * @param store  the Store node
1234  */
1235 static unsigned optimize_store(ir_node *store)
1236 {
1237         ir_node   *ptr;
1238         ir_node   *mem;
1239         ir_entity *entity;
1240
1241         if (get_Store_volatility(store) == volatility_is_volatile)
1242                 return 0;
1243
1244         ptr    = get_Store_ptr(store);
1245         entity = find_entity(ptr);
1246
1247         /* a store to an entity which is never read is unnecessary */
1248         if (entity != NULL && !(get_entity_usage(entity) & ir_usage_read)) {
1249                 ldst_info_t *info = (ldst_info_t*)get_irn_link(store);
1250                 if (info->projs[pn_Store_X_except] == NULL) {
1251                         DB((dbg, LEVEL_1, "  Killing useless %+F to never read entity %+F\n", store, entity));
1252                         exchange(info->projs[pn_Store_M], get_Store_mem(store));
1253                         kill_node(store);
1254                         reduce_adr_usage(ptr);
1255                         return DF_CHANGED;
1256                 }
1257         }
1258
1259         /* Check, if the address of this Store is used more than once.
1260          * If not, this Store cannot be removed in any case. */
1261         if (get_irn_n_edges(ptr) <= 1)
1262                 return 0;
1263
1264         mem = get_Store_mem(store);
1265
1266         /* follow the memory chain as long as there are only Loads */
1267         INC_MASTER();
1268
1269         return follow_Mem_chain_for_Store(store, skip_Proj(mem));
1270 }
1271
1272 /* check if a node has more than one real user. Keepalive edges do not count as
1273  * real users */
1274 static bool has_multiple_users(const ir_node *node)
1275 {
1276         unsigned real_users = 0;
1277         foreach_out_edge(node, edge) {
1278                 ir_node *user = get_edge_src_irn(edge);
1279                 if (is_End(user))
1280                         continue;
1281                 ++real_users;
1282                 if (real_users > 1)
1283                         return true;
1284         }
1285         return false;
1286 }
1287
1288 /**
1289  * walker, optimizes Phi after Stores to identical places:
1290  * Does the following optimization:
1291  * @verbatim
1292  *
1293  *   val1   val2   val3          val1  val2  val3
1294  *    |      |      |               \    |    /
1295  *  Store  Store  Store              \   |   /
1296  *      \    |    /                   PhiData
1297  *       \   |   /                       |
1298  *        \  |  /                      Store
1299  *          PhiM
1300  *
1301  * @endverbatim
1302  * This reduces the number of stores and allows for predicated execution.
1303  * Moves Stores back to the end of a function which may be bad.
1304  *
1305  * This is only possible if the predecessor blocks have only one successor.
1306  */
1307 static unsigned optimize_phi(ir_node *phi, walk_env_t *wenv)
1308 {
1309         int i, n;
1310         ir_node *store, *ptr, *block, *phi_block, *phiM, *phiD, *exc, *projM;
1311 #ifdef DO_CACHEOPT
1312         ir_node *old_store;
1313 #endif
1314         ir_mode *mode;
1315         ir_node **inM, **inD, **projMs;
1316         int *idx;
1317         dbg_info *db = NULL;
1318         ldst_info_t *info;
1319         block_info_t *bl_info;
1320         unsigned res = 0;
1321
1322         /* Must be a memory Phi */
1323         if (get_irn_mode(phi) != mode_M)
1324                 return 0;
1325
1326         n = get_Phi_n_preds(phi);
1327         if (n <= 0)
1328                 return 0;
1329
1330         /* must be only one user */
1331         projM = get_Phi_pred(phi, 0);
1332         if (has_multiple_users(projM))
1333                 return 0;
1334
1335         store = skip_Proj(projM);
1336 #ifdef DO_CACHEOPT
1337         old_store = store;
1338 #endif
1339         if (!is_Store(store))
1340                 return 0;
1341
1342         block = get_nodes_block(store);
1343
1344         /* check if the block is post dominated by Phi-block
1345            and has no exception exit */
1346         bl_info = (block_info_t*)get_irn_link(block);
1347         if (bl_info->flags & BLOCK_HAS_EXC)
1348                 return 0;
1349
1350         phi_block = get_nodes_block(phi);
1351         if (! block_strictly_postdominates(phi_block, block))
1352                 return 0;
1353
1354         /* this is the address of the store */
1355         ptr  = get_Store_ptr(store);
1356         mode = get_irn_mode(get_Store_value(store));
1357         info = (ldst_info_t*)get_irn_link(store);
1358         exc  = info->exc_block;
1359
1360         for (i = 1; i < n; ++i) {
1361                 ir_node *pred = get_Phi_pred(phi, i);
1362
1363                 if (has_multiple_users(pred))
1364                         return 0;
1365
1366                 pred = skip_Proj(pred);
1367                 if (!is_Store(pred))
1368                         return 0;
1369
1370                 if (ptr != get_Store_ptr(pred) || mode != get_irn_mode(get_Store_value(pred)))
1371                         return 0;
1372
1373                 info = (ldst_info_t*)get_irn_link(pred);
1374
1375                 /* check, if all stores have the same exception flow */
1376                 if (exc != info->exc_block)
1377                         return 0;
1378
1379                 block = get_nodes_block(pred);
1380
1381                 /* check if the block is post dominated by Phi-block
1382                    and has no exception exit. Note that block must be different from
1383                    Phi-block, else we would move a Store from end End of a block to its
1384                    Start... */
1385                 bl_info = (block_info_t*)get_irn_link(block);
1386                 if (bl_info->flags & BLOCK_HAS_EXC)
1387                         return 0;
1388                 if (block == phi_block || ! block_postdominates(phi_block, block))
1389                         return 0;
1390         }
1391
1392         /*
1393          * ok, when we are here, we found all predecessors of a Phi that
1394          * are Stores to the same address and size. That means whatever
1395          * we do before we enter the block of the Phi, we do a Store.
1396          * So, we can move the Store to the current block:
1397          *
1398          *   val1    val2    val3          val1  val2  val3
1399          *    |       |       |               \    |    /
1400          * | Str | | Str | | Str |             \   |   /
1401          *      \     |     /                   PhiData
1402          *       \    |    /                       |
1403          *        \   |   /                       Str
1404          *           PhiM
1405          *
1406          * Is only allowed if the predecessor blocks have only one successor.
1407          */
1408
1409         NEW_ARR_A(ir_node *, projMs, n);
1410         NEW_ARR_A(ir_node *, inM, n);
1411         NEW_ARR_A(ir_node *, inD, n);
1412         NEW_ARR_A(int, idx, n);
1413
1414         /* Prepare: Collect all Store nodes.  We must do this
1415            first because we otherwise may loose a store when exchanging its
1416            memory Proj.
1417          */
1418         for (i = n - 1; i >= 0; --i) {
1419                 projMs[i] = get_Phi_pred(phi, i);
1420
1421                 ir_node *const store = get_Proj_pred(projMs[i]);
1422                 info  = (ldst_info_t*)get_irn_link(store);
1423
1424                 inM[i] = get_Store_mem(store);
1425                 inD[i] = get_Store_value(store);
1426                 idx[i] = info->exc_idx;
1427         }
1428         block = get_nodes_block(phi);
1429
1430         /* second step: create a new memory Phi */
1431         phiM = new_rd_Phi(get_irn_dbg_info(phi), block, n, inM, mode_M);
1432
1433         /* third step: create a new data Phi */
1434         phiD = new_rd_Phi(get_irn_dbg_info(phi), block, n, inD, mode);
1435
1436         /* rewire memory and kill the node */
1437         for (i = n - 1; i >= 0; --i) {
1438                 ir_node *proj  = projMs[i];
1439
1440                 if (is_Proj(proj)) {
1441                         ir_node *store = get_Proj_pred(proj);
1442                         exchange(proj, inM[i]);
1443                         kill_node(store);
1444                 }
1445         }
1446
1447         /* fourth step: create the Store */
1448         store = new_rd_Store(db, block, phiM, ptr, phiD, cons_none);
1449 #ifdef DO_CACHEOPT
1450         co_set_irn_name(store, co_get_irn_ident(old_store));
1451 #endif
1452
1453         projM = new_rd_Proj(NULL, store, mode_M, pn_Store_M);
1454
1455         info = get_ldst_info(store, &wenv->obst);
1456         info->projs[pn_Store_M] = projM;
1457
1458         /* fifths step: repair exception flow */
1459         if (exc) {
1460                 ir_node *projX = new_rd_Proj(NULL, store, mode_X, pn_Store_X_except);
1461
1462                 info->projs[pn_Store_X_except] = projX;
1463                 info->exc_block                = exc;
1464                 info->exc_idx                  = idx[0];
1465
1466                 for (i = 0; i < n; ++i) {
1467                         set_Block_cfgpred(exc, idx[i], projX);
1468                 }
1469
1470                 if (n > 1) {
1471                         /* the exception block should be optimized as some inputs are identical now */
1472                 }
1473
1474                 res |= CF_CHANGED;
1475         }
1476
1477         /* sixth step: replace old Phi */
1478         exchange(phi, projM);
1479
1480         return res | DF_CHANGED;
1481 }
1482
1483 static int optimize_conv_load(ir_node *conv)
1484 {
1485         ir_node *op = get_Conv_op(conv);
1486         if (!is_Proj(op))
1487                 return 0;
1488         if (has_multiple_users(op))
1489                 return 0;
1490         /* shrink mode of load if possible. */
1491         ir_node *load = get_Proj_pred(op);
1492         if (!is_Load(load))
1493                 return 0;
1494
1495         /* only do it if we are the only user (otherwise the risk is too
1496          * great that we end up with 2 loads instead of one). */
1497         ir_mode *mode      = get_irn_mode(conv);
1498         ir_mode *load_mode = get_Load_mode(load);
1499         int      bits_diff
1500                 = get_mode_size_bits(load_mode) - get_mode_size_bits(mode);
1501         if (mode_is_float(load_mode) || mode_is_float(mode) || bits_diff < 0)
1502             return 0;
1503
1504         if (be_get_backend_param()->byte_order_big_endian) {
1505                 if (bits_diff % 8 != 0)
1506                         return 0;
1507                 ir_graph *irg   = get_irn_irg(conv);
1508                 ir_node  *ptr   = get_Load_ptr(load);
1509                 ir_mode  *mode  = get_irn_mode(ptr);
1510                 ir_node  *delta = new_r_Const_long(irg, mode, bits_diff/8);
1511                 ir_node  *block = get_nodes_block(load);
1512                 ir_node  *add   = new_r_Add(block, ptr, delta, mode);
1513                 set_Load_ptr(load, add);
1514         }
1515         set_Load_mode(load, mode);
1516         set_irn_mode(op, mode);
1517         exchange(conv, op);
1518         return DF_CHANGED;
1519 }
1520
1521 /**
1522  * walker, do the optimizations
1523  */
1524 static void do_load_store_optimize(ir_node *n, void *env)
1525 {
1526         walk_env_t *wenv = (walk_env_t*)env;
1527
1528         switch (get_irn_opcode(n)) {
1529
1530         case iro_Load:
1531                 wenv->changes |= optimize_load(n);
1532                 break;
1533
1534         case iro_Store:
1535                 wenv->changes |= optimize_store(n);
1536                 break;
1537
1538         case iro_Phi:
1539                 wenv->changes |= optimize_phi(n, wenv);
1540                 break;
1541
1542         case iro_Conv:
1543                 wenv->changes |= optimize_conv_load(n);
1544                 break;
1545
1546         default:
1547                 break;
1548         }
1549 }
1550
1551 /** A scc. */
1552 typedef struct scc {
1553         ir_node *head;      /**< the head of the list */
1554 } scc;
1555
1556 /** A node entry. */
1557 typedef struct node_entry {
1558         unsigned DFSnum;    /**< the DFS number of this node */
1559         unsigned low;       /**< the low number of this node */
1560         int      in_stack;  /**< flag, set if the node is on the stack */
1561         ir_node  *next;     /**< link to the next node the the same scc */
1562         scc      *pscc;     /**< the scc of this node */
1563         unsigned POnum;     /**< the post order number for blocks */
1564 } node_entry;
1565
1566 /** A loop entry. */
1567 typedef struct loop_env {
1568         ir_nodehashmap_t map;
1569         struct obstack   obst;
1570         ir_node          **stack;      /**< the node stack */
1571         size_t           tos;          /**< tos index */
1572         unsigned         nextDFSnum;   /**< the current DFS number */
1573         unsigned         POnum;        /**< current post order number */
1574
1575         unsigned         changes;      /**< a bitmask of graph changes */
1576 } loop_env;
1577
1578 /**
1579 * Gets the node_entry of a node
1580 */
1581 static node_entry *get_irn_ne(ir_node *irn, loop_env *env)
1582 {
1583         node_entry *e = ir_nodehashmap_get(node_entry, &env->map, irn);
1584
1585         if (e == NULL) {
1586                 e = OALLOC(&env->obst, node_entry);
1587                 memset(e, 0, sizeof(*e));
1588                 ir_nodehashmap_insert(&env->map, irn, e);
1589         }
1590         return e;
1591 }
1592
1593 /**
1594  * Push a node onto the stack.
1595  *
1596  * @param env   the loop environment
1597  * @param n     the node to push
1598  */
1599 static void push(loop_env *env, ir_node *n)
1600 {
1601         node_entry *e;
1602
1603         if (env->tos == ARR_LEN(env->stack)) {
1604                 size_t nlen = ARR_LEN(env->stack) * 2;
1605                 ARR_RESIZE(ir_node *, env->stack, nlen);
1606         }
1607         env->stack[env->tos++] = n;
1608         e = get_irn_ne(n, env);
1609         e->in_stack = 1;
1610 }
1611
1612 /**
1613  * pop a node from the stack
1614  *
1615  * @param env   the loop environment
1616  *
1617  * @return  The topmost node
1618  */
1619 static ir_node *pop(loop_env *env)
1620 {
1621         ir_node *n = env->stack[--env->tos];
1622         node_entry *e = get_irn_ne(n, env);
1623
1624         e->in_stack = 0;
1625         return n;
1626 }
1627
1628 /**
1629  * Check if irn is a region constant.
1630  * The block or irn must strictly dominate the header block.
1631  *
1632  * @param irn           the node to check
1633  * @param header_block  the header block of the induction variable
1634  */
1635 static int is_rc(ir_node *irn, ir_node *header_block)
1636 {
1637         ir_node *block = get_nodes_block(irn);
1638
1639         return (block != header_block) && block_dominates(block, header_block);
1640 }
1641
1642 typedef struct phi_entry phi_entry;
1643 struct phi_entry {
1644         ir_node   *phi;    /**< A phi with a region const memory. */
1645         int       pos;     /**< The position of the region const memory */
1646         ir_node   *load;   /**< the newly created load for this phi */
1647         phi_entry *next;
1648 };
1649
1650 /**
1651  * An entry in the avail set.
1652  */
1653 typedef struct avail_entry_t {
1654         ir_node *ptr;   /**< the address pointer */
1655         ir_mode *mode;  /**< the load mode */
1656         ir_node *load;  /**< the associated Load */
1657 } avail_entry_t;
1658
1659 /**
1660  * Compare two avail entries.
1661  */
1662 static int cmp_avail_entry(const void *elt, const void *key, size_t size)
1663 {
1664         const avail_entry_t *a = (const avail_entry_t*)elt;
1665         const avail_entry_t *b = (const avail_entry_t*)key;
1666         (void) size;
1667
1668         return a->ptr != b->ptr || a->mode != b->mode;
1669 }
1670
1671 /**
1672  * Calculate the hash value of an avail entry.
1673  */
1674 static unsigned hash_cache_entry(const avail_entry_t *entry)
1675 {
1676         return get_irn_idx(entry->ptr) * 9 + hash_ptr(entry->mode);
1677 }
1678
1679 /**
1680  * Move loops out of loops if possible.
1681  *
1682  * @param pscc   the loop described by an SCC
1683  * @param env    the loop environment
1684  */
1685 static void move_loads_out_of_loops(scc *pscc, loop_env *env)
1686 {
1687         ir_node   *phi, *load, *next, *other, *next_other;
1688         int       j;
1689         phi_entry *phi_list = NULL;
1690         set       *avail;
1691
1692         /* collect all outer memories */
1693         for (phi = pscc->head; phi != NULL; phi = next) {
1694                 node_entry *ne = get_irn_ne(phi, env);
1695                 next = ne->next;
1696
1697                 /* check all memory Phi's */
1698                 if (! is_Phi(phi))
1699                         continue;
1700
1701                 assert(get_irn_mode(phi) == mode_M && "DFS return non-memory Phi");
1702
1703                 for (j = get_irn_arity(phi) - 1; j >= 0; --j) {
1704                         ir_node    *pred = get_irn_n(phi, j);
1705                         node_entry *pe   = get_irn_ne(pred, env);
1706
1707                         if (pe->pscc != ne->pscc) {
1708                                 /* not in the same SCC, is region const */
1709                                 phi_entry *pe = OALLOC(&env->obst, phi_entry);
1710
1711                                 pe->phi  = phi;
1712                                 pe->pos  = j;
1713                                 pe->next = phi_list;
1714                                 phi_list = pe;
1715                         }
1716                 }
1717         }
1718         /* no Phis no fun */
1719         assert(phi_list != NULL && "DFS found a loop without Phi");
1720
1721         /* for now, we cannot handle more than one input (only reducible cf) */
1722         if (phi_list->next != NULL)
1723                 return;
1724
1725         avail = new_set(cmp_avail_entry, 8);
1726
1727         for (load = pscc->head; load; load = next) {
1728                 ir_mode *load_mode;
1729                 node_entry *ne = get_irn_ne(load, env);
1730                 next = ne->next;
1731
1732                 if (is_Load(load)) {
1733                         ldst_info_t *info = (ldst_info_t*)get_irn_link(load);
1734                         ir_node     *ptr = get_Load_ptr(load);
1735
1736                         /* for now, we cannot handle Loads with exceptions */
1737                         if (info->projs[pn_Load_res] == NULL || info->projs[pn_Load_X_regular] != NULL || info->projs[pn_Load_X_except] != NULL)
1738                                 continue;
1739
1740                         /* for now, we can only move Load(Global) */
1741                         if (! is_SymConst_addr_ent(ptr))
1742                                 continue;
1743                         load_mode = get_Load_mode(load);
1744                         for (other = pscc->head; other != NULL; other = next_other) {
1745                                 node_entry *ne = get_irn_ne(other, env);
1746                                 next_other = ne->next;
1747
1748                                 if (is_Store(other)) {
1749                                         ir_alias_relation rel = get_alias_relation(
1750                                                 get_Store_ptr(other),
1751                                                 get_irn_mode(get_Store_value(other)),
1752                                                 ptr, load_mode);
1753                                         /* if the might be an alias, we cannot pass this Store */
1754                                         if (rel != ir_no_alias)
1755                                                 break;
1756                                 }
1757                                 /* only Phis and pure Calls are allowed here, so ignore them */
1758                         }
1759                         if (other == NULL) {
1760                                 ldst_info_t *ninfo = NULL;
1761                                 phi_entry   *pe;
1762                                 dbg_info    *db;
1763
1764                                 /* yep, no aliasing Store found, Load can be moved */
1765                                 DB((dbg, LEVEL_1, "  Found a Load that could be moved: %+F\n", load));
1766
1767                                 db   = get_irn_dbg_info(load);
1768                                 for (pe = phi_list; pe != NULL; pe = pe->next) {
1769                                         int     pos   = pe->pos;
1770                                         ir_node *phi  = pe->phi;
1771                                         ir_node *blk  = get_nodes_block(phi);
1772                                         ir_node *pred = get_Block_cfgpred_block(blk, pos);
1773                                         ir_node *irn, *mem;
1774                                         avail_entry_t entry, *res;
1775
1776                                         entry.ptr  = ptr;
1777                                         entry.mode = load_mode;
1778                                         res = set_find(avail_entry_t, avail, &entry, sizeof(entry), hash_cache_entry(&entry));
1779                                         if (res != NULL) {
1780                                                 irn = res->load;
1781                                         } else {
1782                                                 irn = new_rd_Load(db, pred, get_Phi_pred(phi, pos), ptr, load_mode, cons_none);
1783                                                 entry.load = irn;
1784                                                 (void)set_insert(avail_entry_t, avail, &entry, sizeof(entry), hash_cache_entry(&entry));
1785                                                 DB((dbg, LEVEL_1, "  Created %+F in %+F\n", irn, pred));
1786                                         }
1787                                         pe->load = irn;
1788                                         ninfo = get_ldst_info(irn, &env->obst);
1789
1790                                         ninfo->projs[pn_Load_M] = mem = new_r_Proj(irn, mode_M, pn_Load_M);
1791                                         if (res == NULL) {
1792                                                 /* irn is from cache, so do not set phi pred again.
1793                                                  * There might be other Loads between phi and irn already.
1794                                                  */
1795                                                 set_Phi_pred(phi, pos, mem);
1796                                         }
1797
1798                                         ninfo->projs[pn_Load_res] = new_r_Proj(irn, load_mode, pn_Load_res);
1799                                 }
1800
1801                                 /* now kill the old Load */
1802                                 exchange(info->projs[pn_Load_M], get_Load_mem(load));
1803                                 exchange(info->projs[pn_Load_res], ninfo->projs[pn_Load_res]);
1804
1805                                 env->changes |= DF_CHANGED;
1806                         }
1807                 }
1808         }
1809         del_set(avail);
1810 }
1811
1812 /**
1813  * Process a loop SCC.
1814  *
1815  * @param pscc  the SCC
1816  * @param env   the loop environment
1817  */
1818 static void process_loop(scc *pscc, loop_env *env)
1819 {
1820         ir_node *irn, *next, *header = NULL;
1821         node_entry *b, *h = NULL;
1822         int j, only_phi, num_outside, process = 0;
1823         ir_node *out_rc;
1824
1825         /* find the header block for this scc */
1826         for (irn = pscc->head; irn; irn = next) {
1827                 node_entry *e = get_irn_ne(irn, env);
1828                 ir_node *block = get_nodes_block(irn);
1829
1830                 next = e->next;
1831                 b = get_irn_ne(block, env);
1832
1833                 if (header != NULL) {
1834                         if (h->POnum < b->POnum) {
1835                                 header = block;
1836                                 h      = b;
1837                         }
1838                 } else {
1839                         header = block;
1840                         h      = b;
1841                 }
1842         }
1843
1844         /* check if this scc contains only Phi, Loads or Stores nodes */
1845         only_phi    = 1;
1846         num_outside = 0;
1847         out_rc      = NULL;
1848         for (irn = pscc->head; irn; irn = next) {
1849                 node_entry *e = get_irn_ne(irn, env);
1850
1851                 next = e->next;
1852                 switch (get_irn_opcode(irn)) {
1853                 case iro_Call:
1854                         if (is_Call_pure(irn)) {
1855                                 /* pure calls can be treated like loads */
1856                                 only_phi = 0;
1857                                 break;
1858                         }
1859                         /* non-pure calls must be handle like may-alias Stores */
1860                         goto fail;
1861                 case iro_CopyB:
1862                         /* cannot handle CopyB yet */
1863                         goto fail;
1864                 case iro_Load:
1865                         process = 1;
1866                         if (get_Load_volatility(irn) == volatility_is_volatile) {
1867                                 /* cannot handle loops with volatile Loads */
1868                                 goto fail;
1869                         }
1870                         only_phi = 0;
1871                         break;
1872                 case iro_Store:
1873                         if (get_Store_volatility(irn) == volatility_is_volatile) {
1874                                 /* cannot handle loops with volatile Stores */
1875                                 goto fail;
1876                         }
1877                         only_phi = 0;
1878                         break;
1879                 default:
1880                         only_phi = 0;
1881                         break;
1882                 case iro_Phi:
1883                         for (j = get_irn_arity(irn) - 1; j >= 0; --j) {
1884                                 ir_node *pred  = get_irn_n(irn, j);
1885                                 node_entry *pe = get_irn_ne(pred, env);
1886
1887                                 if (pe->pscc != e->pscc) {
1888                                         /* not in the same SCC, must be a region const */
1889                                         if (! is_rc(pred, header)) {
1890                                                 /* not a memory loop */
1891                                                 goto fail;
1892                                         }
1893                                         if (out_rc == NULL) {
1894                                                 /* first region constant */
1895                                                 out_rc = pred;
1896                                                 ++num_outside;
1897                                         } else if (out_rc != pred) {
1898                                                 /* another region constant */
1899                                                 ++num_outside;
1900                                         }
1901                                 }
1902                         }
1903                         break;
1904                 }
1905         }
1906         if (! process)
1907                 goto fail;
1908
1909         /* found a memory loop */
1910         DB((dbg, LEVEL_2, "  Found a memory loop:\n  "));
1911         if (only_phi && num_outside == 1) {
1912                 /* a phi cycle with only one real predecessor can be collapsed */
1913                 DB((dbg, LEVEL_2, "  Found an USELESS Phi cycle:\n  "));
1914
1915                 for (irn = pscc->head; irn; irn = next) {
1916                         node_entry *e = get_irn_ne(irn, env);
1917                         next = e->next;
1918                         exchange(irn, out_rc);
1919                 }
1920                 env->changes |= DF_CHANGED;
1921                 return;
1922         }
1923
1924 #ifdef DEBUG_libfirm
1925         for (irn = pscc->head; irn; irn = next) {
1926                 node_entry *e = get_irn_ne(irn, env);
1927                 next = e->next;
1928                 DB((dbg, LEVEL_2, " %+F,", irn));
1929         }
1930         DB((dbg, LEVEL_2, "\n"));
1931 #endif
1932         move_loads_out_of_loops(pscc, env);
1933
1934 fail:
1935         ;
1936 }
1937
1938 /**
1939  * Process a SCC.
1940  *
1941  * @param pscc  the SCC
1942  * @param env   the loop environment
1943  */
1944 static void process_scc(scc *pscc, loop_env *env)
1945 {
1946         ir_node *head = pscc->head;
1947         node_entry *e = get_irn_ne(head, env);
1948
1949 #ifdef DEBUG_libfirm
1950         {
1951                 ir_node *irn, *next;
1952
1953                 DB((dbg, LEVEL_4, " SCC at %p:\n ", pscc));
1954                 for (irn = pscc->head; irn; irn = next) {
1955                         node_entry *e = get_irn_ne(irn, env);
1956
1957                         next = e->next;
1958
1959                         DB((dbg, LEVEL_4, " %+F,", irn));
1960                 }
1961                 DB((dbg, LEVEL_4, "\n"));
1962         }
1963 #endif
1964
1965         if (e->next != NULL) {
1966                 /* this SCC has more than one member */
1967                 process_loop(pscc, env);
1968         }
1969 }
1970
1971 /**
1972  * Do Tarjan's SCC algorithm and drive load/store optimization.
1973  *
1974  * @param irn  start at this node
1975  * @param env  the loop environment
1976  */
1977 static void dfs(ir_node *irn, loop_env *env)
1978 {
1979         int i, n;
1980         node_entry *node = get_irn_ne(irn, env);
1981
1982         mark_irn_visited(irn);
1983
1984         node->DFSnum = env->nextDFSnum++;
1985         node->low    = node->DFSnum;
1986         push(env, irn);
1987
1988         /* handle preds */
1989         if (is_Phi(irn) || is_Sync(irn)) {
1990                 n = get_irn_arity(irn);
1991                 for (i = 0; i < n; ++i) {
1992                         ir_node *pred = get_irn_n(irn, i);
1993                         node_entry *o = get_irn_ne(pred, env);
1994
1995                         if (!irn_visited(pred)) {
1996                                 dfs(pred, env);
1997                                 node->low = MIN(node->low, o->low);
1998                         }
1999                         if (o->DFSnum < node->DFSnum && o->in_stack)
2000                                 node->low = MIN(o->DFSnum, node->low);
2001                 }
2002         } else if (is_fragile_op(irn)) {
2003                 ir_node *pred = get_memop_mem(irn);
2004                 node_entry *o = get_irn_ne(pred, env);
2005
2006                 if (!irn_visited(pred)) {
2007                         dfs(pred, env);
2008                         node->low = MIN(node->low, o->low);
2009                 }
2010                 if (o->DFSnum < node->DFSnum && o->in_stack)
2011                         node->low = MIN(o->DFSnum, node->low);
2012         } else if (is_Proj(irn)) {
2013                 ir_node *pred = get_Proj_pred(irn);
2014                 node_entry *o = get_irn_ne(pred, env);
2015
2016                 if (!irn_visited(pred)) {
2017                         dfs(pred, env);
2018                         node->low = MIN(node->low, o->low);
2019                 }
2020                 if (o->DFSnum < node->DFSnum && o->in_stack)
2021                         node->low = MIN(o->DFSnum, node->low);
2022         }
2023         else {
2024                  /* IGNORE predecessors */
2025         }
2026
2027         if (node->low == node->DFSnum) {
2028                 scc *pscc = OALLOC(&env->obst, scc);
2029                 ir_node *x;
2030
2031                 pscc->head = NULL;
2032                 do {
2033                         node_entry *e;
2034
2035                         x = pop(env);
2036                         e = get_irn_ne(x, env);
2037                         e->pscc    = pscc;
2038                         e->next    = pscc->head;
2039                         pscc->head = x;
2040                 } while (x != irn);
2041
2042                 process_scc(pscc, env);
2043         }
2044 }
2045
2046 /**
2047  * Do the DFS on the memory edges a graph.
2048  *
2049  * @param irg  the graph to process
2050  * @param env  the loop environment
2051  */
2052 static void do_dfs(ir_graph *irg, loop_env *env)
2053 {
2054         ir_node  *endblk, *end;
2055         int      i;
2056
2057         inc_irg_visited(irg);
2058
2059         /* visit all memory nodes */
2060         endblk = get_irg_end_block(irg);
2061         for (i = get_Block_n_cfgpreds(endblk) - 1; i >= 0; --i) {
2062                 ir_node *pred = get_Block_cfgpred(endblk, i);
2063
2064                 pred = skip_Proj(pred);
2065                 if (is_Return(pred)) {
2066                         dfs(get_Return_mem(pred), env);
2067                 } else if (is_Raise(pred)) {
2068                         dfs(get_Raise_mem(pred), env);
2069                 } else if (is_fragile_op(pred)) {
2070                         dfs(get_memop_mem(pred), env);
2071                 } else if (is_Bad(pred)) {
2072                         /* ignore non-optimized block predecessor */
2073                 } else {
2074                         assert(0 && "Unknown EndBlock predecessor");
2075                 }
2076         }
2077
2078         /* visit the keep-alives */
2079         end = get_irg_end(irg);
2080         for (i = get_End_n_keepalives(end) - 1; i >= 0; --i) {
2081                 ir_node *ka = get_End_keepalive(end, i);
2082
2083                 if (is_Phi(ka) && !irn_visited(ka))
2084                         dfs(ka, env);
2085         }
2086 }
2087
2088 /**
2089  * Optimize Loads/Stores in loops.
2090  *
2091  * @param irg  the graph
2092  */
2093 static int optimize_loops(ir_graph *irg)
2094 {
2095         loop_env env;
2096
2097         env.stack         = NEW_ARR_F(ir_node *, 128);
2098         env.tos           = 0;
2099         env.nextDFSnum    = 0;
2100         env.POnum         = 0;
2101         env.changes       = 0;
2102         ir_nodehashmap_init(&env.map);
2103         obstack_init(&env.obst);
2104
2105         /* calculate the SCC's and drive loop optimization. */
2106         do_dfs(irg, &env);
2107
2108         DEL_ARR_F(env.stack);
2109         obstack_free(&env.obst, NULL);
2110         ir_nodehashmap_destroy(&env.map);
2111
2112         return env.changes;
2113 }
2114
2115 void optimize_load_store(ir_graph *irg)
2116 {
2117         walk_env_t env;
2118
2119         assure_irg_properties(irg,
2120                 IR_GRAPH_PROPERTY_NO_UNREACHABLE_CODE
2121                 | IR_GRAPH_PROPERTY_CONSISTENT_OUT_EDGES
2122                 | IR_GRAPH_PROPERTY_NO_CRITICAL_EDGES
2123                 | IR_GRAPH_PROPERTY_CONSISTENT_DOMINANCE
2124                 | IR_GRAPH_PROPERTY_CONSISTENT_ENTITY_USAGE);
2125
2126         FIRM_DBG_REGISTER(dbg, "firm.opt.ldstopt");
2127
2128         assert(get_irg_pinned(irg) != op_pin_state_floats &&
2129                 "LoadStore optimization needs pinned graph");
2130
2131         if (get_opt_alias_analysis()) {
2132                 assure_irp_globals_entity_usage_computed();
2133         }
2134
2135         obstack_init(&env.obst);
2136         env.changes = 0;
2137
2138         /* init the links, then collect Loads/Stores/Proj's in lists */
2139         master_visited = 0;
2140         irg_walk_graph(irg, firm_clear_link, collect_nodes, &env);
2141
2142         /* now we have collected enough information, optimize */
2143         irg_walk_graph(irg, NULL, do_load_store_optimize, &env);
2144
2145         env.changes |= optimize_loops(irg);
2146
2147         obstack_free(&env.obst, NULL);
2148
2149         confirm_irg_properties(irg,
2150                 env.changes
2151                 ? env.changes & CF_CHANGED
2152                         ? IR_GRAPH_PROPERTIES_NONE
2153                         : IR_GRAPH_PROPERTIES_CONTROL_FLOW
2154                 : IR_GRAPH_PROPERTIES_ALL);
2155 }
2156
2157 ir_graph_pass_t *optimize_load_store_pass(const char *name)
2158 {
2159         return def_graph_pass(name ? name : "ldst", optimize_load_store);
2160 }