0a42f2904af0ca98ec25ede739841ebe00087259
[libfirm] / ir / opt / ldstopt.c
1 /*
2  * Copyright (C) 1995-2008 University of Karlsruhe.  All right reserved.
3  *
4  * This file is part of libFirm.
5  *
6  * This file may be distributed and/or modified under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation and appearing in the file LICENSE.GPL included in the
9  * packaging of this file.
10  *
11  * Licensees holding valid libFirm Professional Edition licenses may use
12  * this file in accordance with the libFirm Commercial License.
13  * Agreement provided with the Software.
14  *
15  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17  * PURPOSE.
18  */
19
20 /**
21  * @file
22  * @brief   Load/Store optimizations.
23  * @author  Michael Beck
24  * @version $Id$
25  */
26 #ifdef HAVE_CONFIG_H
27 # include "config.h"
28 #endif
29
30 #include <string.h>
31
32 #include "iroptimize.h"
33 #include "irnode_t.h"
34 #include "irgraph_t.h"
35 #include "irmode_t.h"
36 #include "iropt_t.h"
37 #include "ircons_t.h"
38 #include "irgmod.h"
39 #include "irgwalk.h"
40 #include "irvrfy.h"
41 #include "tv_t.h"
42 #include "dbginfo_t.h"
43 #include "iropt_dbg.h"
44 #include "irflag_t.h"
45 #include "array.h"
46 #include "irhooks.h"
47 #include "iredges.h"
48 #include "irtools.h"
49 #include "opt_polymorphy.h"
50 #include "irmemory.h"
51 #include "xmalloc.h"
52 #include "irphase_t.h"
53 #include "irgopt.h"
54 #include "debug.h"
55
56 /** The debug handle. */
57 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
58
59 #ifdef DO_CACHEOPT
60 #include "cacheopt/cachesim.h"
61 #endif
62
63 #undef IMAX
64 #define IMAX(a,b)       ((a) > (b) ? (a) : (b))
65
66 #define MAX_PROJ        IMAX(IMAX(pn_Load_max, pn_Store_max), pn_Call_max)
67
68 enum changes_t {
69         DF_CHANGED = 1,       /**< data flow changed */
70         CF_CHANGED = 2,       /**< control flow changed */
71 };
72
73 /**
74  * walker environment
75  */
76 typedef struct _walk_env_t {
77         struct obstack obst;          /**< list of all stores */
78         unsigned changes;             /**< a bitmask of graph changes */
79 } walk_env_t;
80
81 /** A Load/Store info. */
82 typedef struct _ldst_info_t {
83         ir_node  *projs[MAX_PROJ];    /**< list of Proj's of this node */
84         ir_node  *exc_block;          /**< the exception block if available */
85         int      exc_idx;             /**< predecessor index in the exception block */
86         unsigned visited;             /**< visited counter for breaking loops */
87 } ldst_info_t;
88
89 /**
90  * flags for control flow.
91  */
92 enum block_flags_t {
93         BLOCK_HAS_COND = 1,      /**< Block has conditional control flow */
94         BLOCK_HAS_EXC  = 2       /**< Block has exceptional control flow */
95 };
96
97 /**
98  * a Block info.
99  */
100 typedef struct _block_info_t {
101         unsigned flags;               /**< flags for the block */
102 } block_info_t;
103
104 /** the master visited flag for loop detection. */
105 static unsigned master_visited = 0;
106
107 #define INC_MASTER()       ++master_visited
108 #define MARK_NODE(info)    (info)->visited = master_visited
109 #define NODE_VISITED(info) (info)->visited >= master_visited
110
111 /**
112  * get the Load/Store info of a node
113  */
114 static ldst_info_t *get_ldst_info(ir_node *node, struct obstack *obst) {
115         ldst_info_t *info = get_irn_link(node);
116
117         if (! info) {
118                 info = obstack_alloc(obst, sizeof(*info));
119                 memset(info, 0, sizeof(*info));
120                 set_irn_link(node, info);
121         }
122         return info;
123 }  /* get_ldst_info */
124
125 /**
126  * get the Block info of a node
127  */
128 static block_info_t *get_block_info(ir_node *node, struct obstack *obst) {
129         block_info_t *info = get_irn_link(node);
130
131         if (! info) {
132                 info = obstack_alloc(obst, sizeof(*info));
133                 memset(info, 0, sizeof(*info));
134                 set_irn_link(node, info);
135         }
136         return info;
137 }  /* get_block_info */
138
139 /**
140  * update the projection info for a Load/Store
141  */
142 static unsigned update_projs(ldst_info_t *info, ir_node *proj)
143 {
144         long nr = get_Proj_proj(proj);
145
146         assert(0 <= nr && nr <= MAX_PROJ && "Wrong proj from LoadStore");
147
148         if (info->projs[nr]) {
149                 /* there is already one, do CSE */
150                 exchange(proj, info->projs[nr]);
151                 return DF_CHANGED;
152         }
153         else {
154                 info->projs[nr] = proj;
155                 return 0;
156         }
157 }  /* update_projs */
158
159 /**
160  * update the exception block info for a Load/Store node.
161  *
162  * @param info   the load/store info struct
163  * @param block  the exception handler block for this load/store
164  * @param pos    the control flow input of the block
165  */
166 static unsigned update_exc(ldst_info_t *info, ir_node *block, int pos)
167 {
168         assert(info->exc_block == NULL && "more than one exception block found");
169
170         info->exc_block = block;
171         info->exc_idx   = pos;
172         return 0;
173 }  /* update_exc */
174
175 /** Return the number of uses of an address node */
176 #define get_irn_n_uses(adr)     get_irn_n_edges(adr)
177
178 /**
179  * walker, collects all Load/Store/Proj nodes
180  *
181  * walks from Start -> End
182  */
183 static void collect_nodes(ir_node *node, void *env)
184 {
185         ir_opcode   opcode = get_irn_opcode(node);
186         ir_node     *pred, *blk, *pred_blk;
187         ldst_info_t *ldst_info;
188         walk_env_t  *wenv = env;
189
190         if (opcode == iro_Proj) {
191                 pred   = get_Proj_pred(node);
192                 opcode = get_irn_opcode(pred);
193
194                 if (opcode == iro_Load || opcode == iro_Store || opcode == iro_Call) {
195                         ldst_info = get_ldst_info(pred, &wenv->obst);
196
197                         wenv->changes |= update_projs(ldst_info, node);
198
199                         /*
200                          * Place the Proj's to the same block as the
201                          * predecessor Load. This is always ok and prevents
202                          * "non-SSA" form after optimizations if the Proj
203                          * is in a wrong block.
204                          */
205                         blk      = get_nodes_block(node);
206                         pred_blk = get_nodes_block(pred);
207                         if (blk != pred_blk) {
208                                 wenv->changes |= DF_CHANGED;
209                                 set_nodes_block(node, pred_blk);
210                         }
211                 }
212         } else if (opcode == iro_Block) {
213                 int i;
214
215                 for (i = get_Block_n_cfgpreds(node) - 1; i >= 0; --i) {
216                         ir_node      *pred_block, *proj;
217                         block_info_t *bl_info;
218                         int          is_exc = 0;
219
220                         pred = proj = get_Block_cfgpred(node, i);
221
222                         if (is_Proj(proj)) {
223                                 pred   = get_Proj_pred(proj);
224                                 is_exc = get_Proj_proj(proj) == pn_Generic_X_except;
225                         }
226
227                         /* ignore Bad predecessors, they will be removed later */
228                         if (is_Bad(pred))
229                                 continue;
230
231                         pred_block = get_nodes_block(pred);
232                         bl_info    = get_block_info(pred_block, &wenv->obst);
233
234                         if (is_fragile_op(pred) && is_exc)
235                                 bl_info->flags |= BLOCK_HAS_EXC;
236                         else if (is_irn_forking(pred))
237                                 bl_info->flags |= BLOCK_HAS_COND;
238
239                         opcode = get_irn_opcode(pred);
240                         if (is_exc && (opcode == iro_Load || opcode == iro_Store || opcode == iro_Call)) {
241                                 ldst_info = get_ldst_info(pred, &wenv->obst);
242
243                                 wenv->changes |= update_exc(ldst_info, node, i);
244                         }
245                 }
246         }
247 }  /* collect_nodes */
248
249 /**
250  * Returns an entity if the address ptr points to a constant one.
251  *
252  * @param ptr  the address
253  *
254  * @return an entity or NULL
255  */
256 static ir_entity *find_constant_entity(ir_node *ptr)
257 {
258         for (;;) {
259                 if (is_SymConst(ptr) && get_SymConst_kind(ptr) == symconst_addr_ent) {
260                         ir_entity *ent = get_SymConst_entity(ptr);
261                         if (variability_constant == get_entity_variability(ent))
262                                 return ent;
263                         return NULL;
264                 } else if (is_Sel(ptr)) {
265                         ir_entity *ent = get_Sel_entity(ptr);
266                         ir_type   *tp  = get_entity_owner(ent);
267
268                         /* Do not fiddle with polymorphism. */
269                         if (is_Class_type(get_entity_owner(ent)) &&
270                                 ((get_entity_n_overwrites(ent)    != 0) ||
271                                 (get_entity_n_overwrittenby(ent) != 0)   ) )
272                                 return NULL;
273
274                         if (is_Array_type(tp)) {
275                                 /* check bounds */
276                                 int i, n;
277
278                                 for (i = 0, n = get_Sel_n_indexs(ptr); i < n; ++i) {
279                                         ir_node *bound;
280                                         tarval *tlower, *tupper;
281                                         ir_node *index = get_Sel_index(ptr, i);
282                                         tarval *tv     = computed_value(index);
283
284                                         /* check if the index is constant */
285                                         if (tv == tarval_bad)
286                                                 return NULL;
287
288                                         bound  = get_array_lower_bound(tp, i);
289                                         tlower = computed_value(bound);
290                                         bound  = get_array_upper_bound(tp, i);
291                                         tupper = computed_value(bound);
292
293                                         if (tlower == tarval_bad || tupper == tarval_bad)
294                                                 return NULL;
295
296                                         if (tarval_cmp(tv, tlower) & pn_Cmp_Lt)
297                                                 return NULL;
298                                         if (tarval_cmp(tupper, tv) & pn_Cmp_Lt)
299                                                 return NULL;
300
301                                         /* ok, bounds check finished */
302                                 }
303                         }
304
305                         if (variability_constant == get_entity_variability(ent))
306                                 return ent;
307
308                         /* try next */
309                         ptr = get_Sel_ptr(ptr);
310                 } else
311                         return NULL;
312         }
313 }  /* find_constant_entity */
314
315 /**
316  * Return the Selection index of a Sel node from dimension n
317  */
318 static long get_Sel_array_index_long(ir_node *n, int dim) {
319         ir_node *index = get_Sel_index(n, dim);
320         assert(is_Const(index));
321         return get_tarval_long(get_Const_tarval(index));
322 }  /* get_Sel_array_index_long */
323
324 /**
325  * Returns the accessed component graph path for an
326  * node computing an address.
327  *
328  * @param ptr    the node computing the address
329  * @param depth  current depth in steps upward from the root
330  *               of the address
331  */
332 static compound_graph_path *rec_get_accessed_path(ir_node *ptr, int depth) {
333         compound_graph_path *res = NULL;
334         ir_entity           *root, *field;
335         int                 path_len, pos;
336
337         if (is_SymConst(ptr)) {
338                 /* a SymConst. If the depth is 0, this is an access to a global
339                  * entity and we don't need a component path, else we know
340                  * at least it's length.
341                  */
342                 assert(get_SymConst_kind(ptr) == symconst_addr_ent);
343                 root = get_SymConst_entity(ptr);
344                 res = (depth == 0) ? NULL : new_compound_graph_path(get_entity_type(root), depth);
345         } else {
346                 assert(is_Sel(ptr));
347                 /* it's a Sel, go up until we find the root */
348                 res = rec_get_accessed_path(get_Sel_ptr(ptr), depth+1);
349
350                 /* fill up the step in the path at the current position */
351                 field    = get_Sel_entity(ptr);
352                 path_len = get_compound_graph_path_length(res);
353                 pos      = path_len - depth - 1;
354                 set_compound_graph_path_node(res, pos, field);
355
356                 if (is_Array_type(get_entity_owner(field))) {
357                         assert(get_Sel_n_indexs(ptr) == 1 && "multi dim arrays not implemented");
358                         set_compound_graph_path_array_index(res, pos, get_Sel_array_index_long(ptr, 0));
359                 }
360         }
361         return res;
362 }  /* rec_get_accessed_path */
363
364 /**
365  * Returns an access path or NULL.  The access path is only
366  * valid, if the graph is in phase_high and _no_ address computation is used.
367  */
368 static compound_graph_path *get_accessed_path(ir_node *ptr) {
369         return rec_get_accessed_path(ptr, 0);
370 }  /* get_accessed_path */
371
372 typedef struct path_entry {
373         ir_entity         *ent;
374         struct path_entry *next;
375         long              index;
376 } path_entry;
377
378 static ir_node *rec_find_compound_ent_value(ir_node *ptr, path_entry *next) {
379         path_entry       entry, *p;
380         ir_entity        *ent;
381         ir_initializer_t *initializer;
382
383         entry.next      = next;
384
385         if (is_Sel(ptr)) {
386                 ir_entity *field;
387                 ir_type   *tp;
388
389                 entry.ent = field = get_Sel_entity(ptr);
390                 tp = get_entity_owner(field);
391                 if (is_Array_type(tp)) {
392                         assert(get_Sel_n_indexs(ptr) == 1 && "multi dim arrays not implemented");
393                         entry.index     = get_Sel_array_index_long(ptr, 0) - get_array_lower_bound_int(tp, 0);
394                 } else {
395                         int i, n_members = get_compound_n_members(tp);
396                         for (i = 0; i < n_members; ++i) {
397                                 if (get_compound_member(tp, i) == field)
398                                         break;
399                         }
400                         if (i >= n_members) {
401                                 /* not found: should NOT happen */
402                                 return NULL;
403                         }
404                         entry.index = i;
405                 }
406                 return rec_find_compound_ent_value(get_Sel_ptr(ptr), &entry);
407         }
408
409         /* found the end */
410         assert(is_SymConst(ptr));
411
412         ent = get_SymConst_entity(ptr);
413         initializer = get_entity_initializer(ent);
414         for (p = next; p != NULL; p = p->next) {
415                 unsigned n;
416
417                 if (initializer->kind != IR_INITIALIZER_COMPOUND)
418                         return NULL;
419
420                 n = get_initializer_compound_n_entries(initializer);
421                 if (p->index >= n)
422                         return NULL;
423                 initializer = get_initializer_compound_value(initializer, p->index);
424         }
425
426         switch (initializer->kind) {
427         case IR_INITIALIZER_CONST:
428                 return get_initializer_const_value(initializer);
429         case IR_INITIALIZER_TARVAL:
430         case IR_INITIALIZER_NULL:
431         default:
432                 return NULL;
433         }
434 }
435
436 static ir_node *find_compound_ent_value(ir_node *ptr) {
437         return rec_find_compound_ent_value(ptr, NULL);
438 }
439
440 /* forward */
441 static void reduce_adr_usage(ir_node *ptr);
442
443 /**
444  * Update a Load that may lost it's usage.
445  */
446 static void handle_load_update(ir_node *load) {
447         ldst_info_t *info = get_irn_link(load);
448
449         /* do NOT touch volatile loads for now */
450         if (get_Load_volatility(load) == volatility_is_volatile)
451                 return;
452
453         if (! info->projs[pn_Load_res] && ! info->projs[pn_Load_X_except]) {
454                 ir_node *ptr = get_Load_ptr(load);
455                 ir_node *mem = get_Load_mem(load);
456
457                 /* a Load which value is neither used nor exception checked, remove it */
458                 exchange(info->projs[pn_Load_M], mem);
459                 if (info->projs[pn_Load_X_regular])
460                         exchange(info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
461                 kill_node(load);
462                 reduce_adr_usage(ptr);
463         }
464 }  /* handle_load_update */
465
466 /**
467  * A Use of an address node is vanished. Check if this was a Proj
468  * node and update the counters.
469  */
470 static void reduce_adr_usage(ir_node *ptr) {
471         if (is_Proj(ptr)) {
472                 if (get_irn_n_edges(ptr) <= 0) {
473                         /* this Proj is dead now */
474                         ir_node *pred = get_Proj_pred(ptr);
475
476                         if (is_Load(pred)) {
477                                 ldst_info_t *info = get_irn_link(pred);
478                                 info->projs[get_Proj_proj(ptr)] = NULL;
479
480                                 /* this node lost it's result proj, handle that */
481                                 handle_load_update(pred);
482                         }
483                 }
484         }
485 }  /* reduce_adr_usage */
486
487 /**
488  * Check, if an already existing value of mode old_mode can be converted
489  * into the needed one new_mode without loss.
490  */
491 static int can_use_stored_value(ir_mode *old_mode, ir_mode *new_mode) {
492         if (old_mode == new_mode)
493                 return 1;
494
495         /* if both modes are two-complement ones, we can always convert the
496            Stored value into the needed one. */
497         if (get_mode_size_bits(old_mode) >= get_mode_size_bits(new_mode) &&
498                   get_mode_arithmetic(old_mode) == irma_twos_complement &&
499                   get_mode_arithmetic(new_mode) == irma_twos_complement)
500                 return 1;
501         return 0;
502 }  /* can_use_stored_value */
503
504 /**
505  * Check whether a Call is at least pure, ie. does only read memory.
506  */
507 static unsigned is_Call_pure(ir_node *call) {
508         ir_type *call_tp = get_Call_type(call);
509         unsigned prop = get_method_additional_properties(call_tp);
510
511         /* check first the call type */
512         if ((prop & (mtp_property_const|mtp_property_pure)) == 0) {
513                 /* try the called entity */
514                 ir_node *ptr = get_Call_ptr(call);
515
516                 if (is_Global(ptr)) {
517                         ir_entity *ent = get_Global_entity(ptr);
518
519                         prop = get_entity_additional_properties(ent);
520                 }
521         }
522         return (prop & (mtp_property_const|mtp_property_pure)) != 0;
523 }  /* is_Call_pure */
524
525 /**
526  * Follow the memory chain as long as there are only Loads,
527  * alias free Stores, and constant Calls and try to replace the
528  * current Load by a previous ones.
529  * Note that in unreachable loops it might happen that we reach
530  * load again, as well as we can fall into a cycle.
531  * We break such cycles using a special visited flag.
532  *
533  * INC_MASTER() must be called before dive into
534  */
535 static unsigned follow_Mem_chain(ir_node *load, ir_node *curr) {
536         unsigned res = 0;
537         ldst_info_t *info = get_irn_link(load);
538         ir_node *pred;
539         ir_node *ptr       = get_Load_ptr(load);
540         ir_node *mem       = get_Load_mem(load);
541         ir_mode *load_mode = get_Load_mode(load);
542
543         for (pred = curr; load != pred; ) {
544                 ldst_info_t *pred_info = get_irn_link(pred);
545
546                 /*
547                  * BEWARE: one might think that checking the modes is useless, because
548                  * if the pointers are identical, they refer to the same object.
549                  * This is only true in strong typed languages, not in C were the following
550                  * is possible a = *(ir_type1 *)p; b = *(ir_type2 *)p ...
551                  */
552                 if (is_Store(pred) && get_Store_ptr(pred) == ptr &&
553                     can_use_stored_value(get_irn_mode(get_Store_value(pred)), load_mode)) {
554                         /*
555                          * a Load immediately after a Store -- a read after write.
556                          * We may remove the Load, if both Load & Store does not have an exception handler
557                          * OR they are in the same MacroBlock. In the latter case the Load cannot
558                          * throw an exception when the previous Store was quiet.
559                          *
560                          * Why we need to check for Store Exception? If the Store cannot
561                          * be executed (ROM) the exception handler might simply jump into
562                          * the load MacroBlock :-(
563                          * We could make it a little bit better if we would know that the exception
564                          * handler of the Store jumps directly to the end...
565                          */
566                         if ((pred_info->projs[pn_Store_X_except] == NULL && info->projs[pn_Load_X_except] == NULL) ||
567                             get_nodes_MacroBlock(load) == get_nodes_MacroBlock(pred)) {
568                                 ir_node *value = get_Store_value(pred);
569
570                                 DBG_OPT_RAW(load, value);
571
572                                 /* add an convert if needed */
573                                 if (get_irn_mode(get_Store_value(pred)) != load_mode) {
574                                         value = new_r_Conv(current_ir_graph, get_nodes_block(load), value, load_mode);
575                                 }
576
577                                 if (info->projs[pn_Load_M])
578                                         exchange(info->projs[pn_Load_M], mem);
579
580                                 /* no exception */
581                                 if (info->projs[pn_Load_X_except]) {
582                                         exchange( info->projs[pn_Load_X_except], new_Bad());
583                                         res |= CF_CHANGED;
584                                 }
585                                 if (info->projs[pn_Load_X_regular]) {
586                                         exchange( info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
587                                         res |= CF_CHANGED;
588                                 }
589
590                                 if (info->projs[pn_Load_res])
591                                         exchange(info->projs[pn_Load_res], value);
592
593                                 kill_node(load);
594                                 reduce_adr_usage(ptr);
595                                 return res | DF_CHANGED;
596                         }
597                 } else if (is_Load(pred) && get_Load_ptr(pred) == ptr &&
598                            can_use_stored_value(get_Load_mode(pred), load_mode)) {
599                         /*
600                          * a Load after a Load -- a read after read.
601                          * We may remove the second Load, if it does not have an exception handler
602                          * OR they are in the same MacroBlock. In the later case the Load cannot
603                          * throw an exception when the previous Load was quiet.
604                          *
605                          * Here, there is no need to check if the previous Load has an exception
606                          * hander because they would have exact the same exception...
607                          */
608                         if (info->projs[pn_Load_X_except] == NULL || get_nodes_MacroBlock(load) == get_nodes_MacroBlock(pred)) {
609                                 ir_node *value;
610
611                                 DBG_OPT_RAR(load, pred);
612
613                                 /* the result is used */
614                                 if (info->projs[pn_Load_res]) {
615                                         if (pred_info->projs[pn_Load_res] == NULL) {
616                                                 /* create a new Proj again */
617                                                 pred_info->projs[pn_Load_res] = new_r_Proj(current_ir_graph, get_nodes_block(pred), pred, get_Load_mode(pred), pn_Load_res);
618                                         }
619                                         value = pred_info->projs[pn_Load_res];
620
621                                         /* add an convert if needed */
622                                         if (get_Load_mode(pred) != load_mode) {
623                                                 value = new_r_Conv(current_ir_graph, get_nodes_block(load), value, load_mode);
624                                         }
625
626                                         exchange(info->projs[pn_Load_res], value);
627                                 }
628
629                                 if (info->projs[pn_Load_M])
630                                         exchange(info->projs[pn_Load_M], mem);
631
632                                 /* no exception */
633                                 if (info->projs[pn_Load_X_except]) {
634                                         exchange(info->projs[pn_Load_X_except], new_Bad());
635                                         res |= CF_CHANGED;
636                                 }
637                                 if (info->projs[pn_Load_X_regular]) {
638                                         exchange( info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
639                                         res |= CF_CHANGED;
640                                 }
641
642                                 kill_node(load);
643                                 reduce_adr_usage(ptr);
644                                 return res |= DF_CHANGED;
645                         }
646                 }
647
648                 if (is_Store(pred)) {
649                         /* check if we can pass through this store */
650                         ir_alias_relation rel = get_alias_relation(
651                                 current_ir_graph,
652                                 get_Store_ptr(pred),
653                                 get_irn_mode(get_Store_value(pred)),
654                                 ptr, load_mode);
655                         /* if the might be an alias, we cannot pass this Store */
656                         if (rel != ir_no_alias)
657                                 break;
658                         pred = skip_Proj(get_Store_mem(pred));
659                 } else if (is_Load(pred)) {
660                         pred = skip_Proj(get_Load_mem(pred));
661                 } else if (is_Call(pred)) {
662                         if (is_Call_pure(pred)) {
663                                 /* The called graph is at least pure, so there are no Store's
664                                    in it. We can handle it like a Load and skip it. */
665                                 pred = skip_Proj(get_Call_mem(pred));
666                         } else {
667                                 /* there might be Store's in the graph, stop here */
668                                 break;
669                         }
670                 } else {
671                         /* follow only Load chains */
672                         break;
673                 }
674
675                 /* check for cycles */
676                 if (NODE_VISITED(pred_info))
677                         break;
678                 MARK_NODE(pred_info);
679         }
680
681         if (is_Sync(pred)) {
682                 int i;
683
684                 /* handle all Sync predecessors */
685                 for (i = get_Sync_n_preds(pred) - 1; i >= 0; --i) {
686                         res |= follow_Mem_chain(load, skip_Proj(get_Sync_pred(pred, i)));
687                         if (res)
688                                 return res;
689                 }
690         }
691
692         return res;
693 }  /* follow_Mem_chain */
694
695 /**
696  * optimize a Load
697  *
698  * @param load  the Load node
699  */
700 static unsigned optimize_load(ir_node *load)
701 {
702         ldst_info_t *info = get_irn_link(load);
703         ir_node *mem, *ptr, *new_node;
704         ir_entity *ent;
705         unsigned res = 0;
706
707         /* do NOT touch volatile loads for now */
708         if (get_Load_volatility(load) == volatility_is_volatile)
709                 return 0;
710
711         /* the address of the load to be optimized */
712         ptr = get_Load_ptr(load);
713
714         /*
715          * Check if we can remove the exception from a Load:
716          * This can be done, if the address is from an Sel(Alloc) and
717          * the Sel type is a subtype of the allocated type.
718          *
719          * This optimizes some often used OO constructs,
720          * like x = new O; x->t;
721          */
722         if (info->projs[pn_Load_X_except]) {
723                 if (is_Sel(ptr)) {
724                         ir_node *mem = get_Sel_mem(ptr);
725
726                         /* FIXME: works with the current FE, but better use the base */
727                         if (is_Alloc(skip_Proj(mem))) {
728                                 /* ok, check the types */
729                                 ir_entity *ent    = get_Sel_entity(ptr);
730                                 ir_type   *s_type = get_entity_type(ent);
731                                 ir_type   *a_type = get_Alloc_type(mem);
732
733                                 if (is_SubClass_of(s_type, a_type)) {
734                                         /* ok, condition met: there can't be an exception because
735                                         * Alloc guarantees that enough memory was allocated */
736
737                                         exchange(info->projs[pn_Load_X_except], new_Bad());
738                                         info->projs[pn_Load_X_except] = NULL;
739                                         exchange(info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
740                                         info->projs[pn_Load_X_regular] = NULL;
741                                         res |= CF_CHANGED;
742                                 }
743                         }
744                 } else if (is_Alloc(skip_Proj(skip_Cast(ptr)))) {
745                                 /* simple case: a direct load after an Alloc. Firm Alloc throw
746                                  * an exception in case of out-of-memory. So, there is no way for an
747                                  * exception in this load.
748                                  * This code is constructed by the "exception lowering" in the Jack compiler.
749                                  */
750                                 exchange(info->projs[pn_Load_X_except], new_Bad());
751                                 info->projs[pn_Load_X_except] = NULL;
752                                 exchange(info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
753                                 info->projs[pn_Load_X_regular] = NULL;
754                                 res |= CF_CHANGED;
755                 }
756         }
757
758         /* The mem of the Load. Must still be returned after optimization. */
759         mem  = get_Load_mem(load);
760
761         if (! info->projs[pn_Load_res] && ! info->projs[pn_Load_X_except]) {
762                 /* a Load which value is neither used nor exception checked, remove it */
763                 exchange(info->projs[pn_Load_M], mem);
764
765                 if (info->projs[pn_Load_X_regular]) {
766                         /* should not happen, but if it does, remove it */
767                         exchange(info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
768                         res |= CF_CHANGED;
769                 }
770                 kill_node(load);
771                 reduce_adr_usage(ptr);
772                 return res | DF_CHANGED;
773         }
774
775         /* Load from a constant polymorphic field, where we can resolve
776            polymorphism. */
777         new_node = transform_node_Load(load);
778         if (new_node != load) {
779                 if (info->projs[pn_Load_M]) {
780                         exchange(info->projs[pn_Load_M], mem);
781                         info->projs[pn_Load_M] = NULL;
782                 }
783                 if (info->projs[pn_Load_X_except]) {
784                         exchange(info->projs[pn_Load_X_except], new_Bad());
785                         info->projs[pn_Load_X_except] = NULL;
786                         res |= CF_CHANGED;
787                 }
788                 if (info->projs[pn_Load_X_regular]) {
789                         exchange(info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
790                         info->projs[pn_Load_X_regular] = NULL;
791                         res |= CF_CHANGED;
792                 }
793                 if (info->projs[pn_Load_res])
794                         exchange(info->projs[pn_Load_res], new_node);
795
796                 kill_node(load);
797                 reduce_adr_usage(ptr);
798                 return res | DF_CHANGED;
799         }
800
801         /* check if we can determine the entity that will be loaded */
802         ent = find_constant_entity(ptr);
803         if (ent) {
804                 if ((allocation_static == get_entity_allocation(ent)) &&
805                         (visibility_external_allocated != get_entity_visibility(ent))) {
806                         /* a static allocation that is not external: there should be NO exception
807                          * when loading. */
808
809                         /* no exception, clear the info field as it might be checked later again */
810                         if (info->projs[pn_Load_X_except]) {
811                                 exchange(info->projs[pn_Load_X_except], new_Bad());
812                                 info->projs[pn_Load_X_except] = NULL;
813                                 res |= CF_CHANGED;
814                         }
815                         if (info->projs[pn_Load_X_regular]) {
816                                 exchange(info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
817                                 info->projs[pn_Load_X_regular] = NULL;
818                                 res |= CF_CHANGED;
819                         }
820
821                         if (variability_constant == get_entity_variability(ent)) {
822                                 if (is_atomic_entity(ent)) {
823                                         /* Might not be atomic after
824                                            lowering of Sels.  In this
825                                            case we could also load, but
826                                            it's more complicated. */
827                                         /* more simpler case: we load the content of a constant value:
828                                          * replace it by the constant itself
829                                          */
830
831                                         /* no memory */
832                                         if (info->projs[pn_Load_M]) {
833                                                 exchange(info->projs[pn_Load_M], mem);
834                                                 res |= DF_CHANGED;
835                                         }
836                                         /* no result :-) */
837                                         if (info->projs[pn_Load_res]) {
838                                                 if (is_atomic_entity(ent)) {
839                                                         ir_node *c = copy_const_value(get_irn_dbg_info(load), get_atomic_ent_value(ent));
840
841                                                         DBG_OPT_RC(load, c);
842                                                         exchange(info->projs[pn_Load_res], c);
843                                                         res |= DF_CHANGED;
844                                                 }
845                                         }
846                                         kill_node(load);
847                                         reduce_adr_usage(ptr);
848                                         return res;
849                                 } else {
850                                         ir_node *c = NULL;
851                                         if (ent->has_initializer) {
852                                                 /* new style initializer */
853                                                 c = find_compound_ent_value(ptr);
854                                         } else {
855                                                 /* old style initializer */
856                                                 compound_graph_path *path = get_accessed_path(ptr);
857
858                                                 if (path) {
859                                                         assert(is_proper_compound_graph_path(path, get_compound_graph_path_length(path)-1));
860
861                                                         c = get_compound_ent_value_by_path(ent, path);
862                                                         free_compound_graph_path(path);
863                                                 }
864                                         }
865                                         if (c != NULL) {
866                                                 if (info->projs[pn_Load_M]) {
867                                                         exchange(info->projs[pn_Load_M], mem);
868                                                         res |= DF_CHANGED;
869                                                 }
870                                                 if (info->projs[pn_Load_res]) {
871                                                         exchange(info->projs[pn_Load_res], copy_const_value(get_irn_dbg_info(load), c));
872                                                         res |= DF_CHANGED;
873                                                 }
874                                                 kill_node(load);
875                                                 reduce_adr_usage(ptr);
876                                                 return res;
877                                         } else {
878                                                 /*  We can not determine a correct access path.  E.g., in jack, we load
879                                                 a byte from an object to generate an exception.   Happens in test program
880                                                 Reflectiontest.
881                                                 printf(">>>>>>>>>>>>> Found access to constant entity %s in function %s\n", get_entity_name(ent),
882                                                 get_entity_name(get_irg_entity(current_ir_graph)));
883                                                 ir_printf("  load: %+F\n", load);
884                                                 ir_printf("  ptr:  %+F\n", ptr);
885                                                 */
886                                         }
887                                 }
888                         }
889                 }
890         }
891
892         /* Check, if the address of this load is used more than once.
893          * If not, this load cannot be removed in any case. */
894         if (get_irn_n_uses(ptr) <= 1)
895                 return res;
896
897         /*
898          * follow the memory chain as long as there are only Loads
899          * and try to replace current Load or Store by a previous one.
900          * Note that in unreachable loops it might happen that we reach
901          * load again, as well as we can fall into a cycle.
902          * We break such cycles using a special visited flag.
903          */
904         INC_MASTER();
905         res = follow_Mem_chain(load, skip_Proj(mem));
906         return res;
907 }  /* optimize_load */
908
909 /**
910  * Check whether a value of mode new_mode would completely overwrite a value
911  * of mode old_mode in memory.
912  */
913 static int is_completely_overwritten(ir_mode *old_mode, ir_mode *new_mode)
914 {
915         return get_mode_size_bits(new_mode) >= get_mode_size_bits(old_mode);
916 }  /* is_completely_overwritten */
917
918 /**
919  * follow the memory chain as long as there are only Loads and alias free Stores.
920  *
921  * INC_MASTER() must be called before dive into
922  */
923 static unsigned follow_Mem_chain_for_Store(ir_node *store, ir_node *curr) {
924         unsigned res = 0;
925         ldst_info_t *info = get_irn_link(store);
926         ir_node *pred;
927         ir_node *ptr = get_Store_ptr(store);
928         ir_node *mem = get_Store_mem(store);
929         ir_node *value = get_Store_value(store);
930         ir_mode *mode  = get_irn_mode(value);
931         ir_node *block = get_nodes_block(store);
932         ir_node *mblk  = get_Block_MacroBlock(block);
933
934         for (pred = curr; pred != store;) {
935                 ldst_info_t *pred_info = get_irn_link(pred);
936
937                 /*
938                  * BEWARE: one might think that checking the modes is useless, because
939                  * if the pointers are identical, they refer to the same object.
940                  * This is only true in strong typed languages, not is C were the following
941                  * is possible *(ir_type1 *)p = a; *(ir_type2 *)p = b ...
942                  * However, if the mode that is written have a bigger  or equal size the the old
943                  * one, the old value is completely overwritten and can be killed ...
944                  */
945                 if (is_Store(pred) && get_Store_ptr(pred) == ptr &&
946                     get_nodes_MacroBlock(pred) == mblk &&
947                     is_completely_overwritten(get_irn_mode(get_Store_value(pred)), mode)) {
948                         /*
949                          * a Store after a Store in the same MacroBlock -- a write after write.
950                          * We may remove the first Store, if it does not have an exception handler.
951                          *
952                          * TODO: What, if both have the same exception handler ???
953                          */
954                         if (get_Store_volatility(pred) != volatility_is_volatile && !pred_info->projs[pn_Store_X_except]) {
955                                 DBG_OPT_WAW(pred, store);
956                                 exchange(pred_info->projs[pn_Store_M], get_Store_mem(pred));
957                                 kill_node(pred);
958                                 reduce_adr_usage(ptr);
959                                 return DF_CHANGED;
960                         }
961                 } else if (is_Load(pred) && get_Load_ptr(pred) == ptr &&
962                            value == pred_info->projs[pn_Load_res]) {
963                         /*
964                          * a Store of a value just loaded from the same address
965                          * -- a write after read.
966                          * We may remove the Store, if it does not have an exception
967                          * handler.
968                          */
969                         if (! info->projs[pn_Store_X_except]) {
970                                 DBG_OPT_WAR(store, pred);
971                                 exchange(info->projs[pn_Store_M], mem);
972                                 kill_node(store);
973                                 reduce_adr_usage(ptr);
974                                 return DF_CHANGED;
975                         }
976                 }
977
978                 if (is_Store(pred)) {
979                         /* check if we can pass thru this store */
980                         ir_alias_relation rel = get_alias_relation(
981                                 current_ir_graph,
982                                 get_Store_ptr(pred),
983                                 get_irn_mode(get_Store_value(pred)),
984                                 ptr, mode);
985                         /* if the might be an alias, we cannot pass this Store */
986                         if (rel != ir_no_alias)
987                                 break;
988                         pred = skip_Proj(get_Store_mem(pred));
989                 } else if (is_Load(pred)) {
990                         ir_alias_relation rel = get_alias_relation(
991                                 current_ir_graph, get_Load_ptr(pred), get_Load_mode(pred),
992                                 ptr, mode);
993                         if (rel != ir_no_alias)
994                                 break;
995
996                         pred = skip_Proj(get_Load_mem(pred));
997                 } else {
998                         /* follow only Load chains */
999                         break;
1000                 }
1001
1002                 /* check for cycles */
1003                 if (NODE_VISITED(pred_info))
1004                         break;
1005                 MARK_NODE(pred_info);
1006         }
1007
1008         if (is_Sync(pred)) {
1009                 int i;
1010
1011                 /* handle all Sync predecessors */
1012                 for (i = get_Sync_n_preds(pred) - 1; i >= 0; --i) {
1013                         res |= follow_Mem_chain_for_Store(store, skip_Proj(get_Sync_pred(pred, i)));
1014                         if (res)
1015                                 break;
1016                 }
1017         }
1018         return res;
1019 }  /* follow_Mem_chain_for_Store */
1020
1021 /**
1022  * optimize a Store
1023  *
1024  * @param store  the Store node
1025  */
1026 static unsigned optimize_store(ir_node *store) {
1027         ir_node *ptr, *mem;
1028
1029         if (get_Store_volatility(store) == volatility_is_volatile)
1030                 return 0;
1031
1032         ptr = get_Store_ptr(store);
1033
1034         /* Check, if the address of this Store is used more than once.
1035          * If not, this Store cannot be removed in any case. */
1036         if (get_irn_n_uses(ptr) <= 1)
1037                 return 0;
1038
1039         mem = get_Store_mem(store);
1040
1041         /* follow the memory chain as long as there are only Loads */
1042         INC_MASTER();
1043
1044         return follow_Mem_chain_for_Store(store, skip_Proj(mem));
1045 }  /* optimize_store */
1046
1047 /**
1048  * walker, optimizes Phi after Stores to identical places:
1049  * Does the following optimization:
1050  * @verbatim
1051  *
1052  *   val1   val2   val3          val1  val2  val3
1053  *    |      |      |               \    |    /
1054  *  Store  Store  Store              \   |   /
1055  *      \    |    /                   PhiData
1056  *       \   |   /                       |
1057  *        \  |  /                      Store
1058  *          PhiM
1059  *
1060  * @endverbatim
1061  * This reduces the number of stores and allows for predicated execution.
1062  * Moves Stores back to the end of a function which may be bad.
1063  *
1064  * This is only possible if the predecessor blocks have only one successor.
1065  */
1066 static unsigned optimize_phi(ir_node *phi, walk_env_t *wenv)
1067 {
1068         int i, n;
1069         ir_node *store, *old_store, *ptr, *block, *phi_block, *phiM, *phiD, *exc, *projM;
1070         ir_mode *mode;
1071         ir_node **inM, **inD, **projMs;
1072         int *idx;
1073         dbg_info *db = NULL;
1074         ldst_info_t *info;
1075         block_info_t *bl_info;
1076         unsigned res = 0;
1077
1078         /* Must be a memory Phi */
1079         if (get_irn_mode(phi) != mode_M)
1080                 return 0;
1081
1082         n = get_Phi_n_preds(phi);
1083         if (n <= 0)
1084                 return 0;
1085
1086         /* must be only one user */
1087         projM = get_Phi_pred(phi, 0);
1088         if (get_irn_n_edges(projM) != 1)
1089                 return 0;
1090
1091         store = skip_Proj(projM);
1092         old_store = store;
1093         if (!is_Store(store))
1094                 return 0;
1095
1096         block = get_nodes_block(store);
1097
1098         /* abort on dead blocks */
1099         if (is_Block_dead(block))
1100                 return 0;
1101
1102         /* check if the block is post dominated by Phi-block
1103            and has no exception exit */
1104         bl_info = get_irn_link(block);
1105         if (bl_info->flags & BLOCK_HAS_EXC)
1106                 return 0;
1107
1108         phi_block = get_nodes_block(phi);
1109         if (! block_strictly_postdominates(phi_block, block))
1110                 return 0;
1111
1112         /* this is the address of the store */
1113         ptr  = get_Store_ptr(store);
1114         mode = get_irn_mode(get_Store_value(store));
1115         info = get_irn_link(store);
1116         exc  = info->exc_block;
1117
1118         for (i = 1; i < n; ++i) {
1119                 ir_node *pred = get_Phi_pred(phi, i);
1120
1121                 if (get_irn_n_edges(pred) != 1)
1122                         return 0;
1123
1124                 pred = skip_Proj(pred);
1125                 if (!is_Store(pred))
1126                         return 0;
1127
1128                 if (ptr != get_Store_ptr(pred) || mode != get_irn_mode(get_Store_value(pred)))
1129                         return 0;
1130
1131                 info = get_irn_link(pred);
1132
1133                 /* check, if all stores have the same exception flow */
1134                 if (exc != info->exc_block)
1135                         return 0;
1136
1137                 /* abort on dead blocks */
1138                 block = get_nodes_block(pred);
1139                 if (is_Block_dead(block))
1140                         return 0;
1141
1142                 /* check if the block is post dominated by Phi-block
1143                    and has no exception exit. Note that block must be different from
1144                    Phi-block, else we would move a Store from end End of a block to its
1145                    Start... */
1146                 bl_info = get_irn_link(block);
1147                 if (bl_info->flags & BLOCK_HAS_EXC)
1148                         return 0;
1149                 if (block == phi_block || ! block_postdominates(phi_block, block))
1150                         return 0;
1151         }
1152
1153         /*
1154          * ok, when we are here, we found all predecessors of a Phi that
1155          * are Stores to the same address and size. That means whatever
1156          * we do before we enter the block of the Phi, we do a Store.
1157          * So, we can move the Store to the current block:
1158          *
1159          *   val1    val2    val3          val1  val2  val3
1160          *    |       |       |               \    |    /
1161          * | Str | | Str | | Str |             \   |   /
1162          *      \     |     /                   PhiData
1163          *       \    |    /                       |
1164          *        \   |   /                       Str
1165          *           PhiM
1166          *
1167          * Is only allowed if the predecessor blocks have only one successor.
1168          */
1169
1170         NEW_ARR_A(ir_node *, projMs, n);
1171         NEW_ARR_A(ir_node *, inM, n);
1172         NEW_ARR_A(ir_node *, inD, n);
1173         NEW_ARR_A(int, idx, n);
1174
1175         /* Prepare: Collect all Store nodes.  We must do this
1176            first because we otherwise may loose a store when exchanging its
1177            memory Proj.
1178          */
1179         for (i = n - 1; i >= 0; --i) {
1180                 ir_node *store;
1181
1182                 projMs[i] = get_Phi_pred(phi, i);
1183                 assert(is_Proj(projMs[i]));
1184
1185                 store = get_Proj_pred(projMs[i]);
1186                 info  = get_irn_link(store);
1187
1188                 inM[i] = get_Store_mem(store);
1189                 inD[i] = get_Store_value(store);
1190                 idx[i] = info->exc_idx;
1191         }
1192         block = get_nodes_block(phi);
1193
1194         /* second step: create a new memory Phi */
1195         phiM = new_rd_Phi(get_irn_dbg_info(phi), current_ir_graph, block, n, inM, mode_M);
1196
1197         /* third step: create a new data Phi */
1198         phiD = new_rd_Phi(get_irn_dbg_info(phi), current_ir_graph, block, n, inD, mode);
1199
1200         /* rewire memory and kill the node */
1201         for (i = n - 1; i >= 0; --i) {
1202                 ir_node *proj  = projMs[i];
1203
1204                 if(is_Proj(proj)) {
1205                         ir_node *store = get_Proj_pred(proj);
1206                         exchange(proj, inM[i]);
1207                         kill_node(store);
1208                 }
1209         }
1210
1211         /* fourth step: create the Store */
1212         store = new_rd_Store(db, current_ir_graph, block, phiM, ptr, phiD);
1213 #ifdef DO_CACHEOPT
1214         co_set_irn_name(store, co_get_irn_ident(old_store));
1215 #endif
1216
1217         projM = new_rd_Proj(NULL, current_ir_graph, block, store, mode_M, pn_Store_M);
1218
1219         info = get_ldst_info(store, &wenv->obst);
1220         info->projs[pn_Store_M] = projM;
1221
1222         /* fifths step: repair exception flow */
1223         if (exc) {
1224                 ir_node *projX = new_rd_Proj(NULL, current_ir_graph, block, store, mode_X, pn_Store_X_except);
1225
1226                 info->projs[pn_Store_X_except] = projX;
1227                 info->exc_block                = exc;
1228                 info->exc_idx                  = idx[0];
1229
1230                 for (i = 0; i < n; ++i) {
1231                         set_Block_cfgpred(exc, idx[i], projX);
1232                 }
1233
1234                 if (n > 1) {
1235                         /* the exception block should be optimized as some inputs are identical now */
1236                 }
1237
1238                 res |= CF_CHANGED;
1239         }
1240
1241         /* sixth step: replace old Phi */
1242         exchange(phi, projM);
1243
1244         return res | DF_CHANGED;
1245 }  /* optimize_phi */
1246
1247 /**
1248  * walker, do the optimizations
1249  */
1250 static void do_load_store_optimize(ir_node *n, void *env) {
1251         walk_env_t *wenv = env;
1252
1253         switch (get_irn_opcode(n)) {
1254
1255         case iro_Load:
1256                 wenv->changes |= optimize_load(n);
1257                 break;
1258
1259         case iro_Store:
1260                 wenv->changes |= optimize_store(n);
1261                 break;
1262
1263         case iro_Phi:
1264                 wenv->changes |= optimize_phi(n, wenv);
1265                 break;
1266
1267         default:
1268                 ;
1269         }
1270 }  /* do_load_store_optimize */
1271
1272 /** A scc. */
1273 typedef struct scc {
1274         ir_node *head;          /**< the head of the list */
1275 } scc;
1276
1277 /** A node entry. */
1278 typedef struct node_entry {
1279         unsigned DFSnum;    /**< the DFS number of this node */
1280         unsigned low;       /**< the low number of this node */
1281         ir_node  *header;   /**< the header of this node */
1282         int      in_stack;  /**< flag, set if the node is on the stack */
1283         ir_node  *next;     /**< link to the next node the the same scc */
1284         scc      *pscc;     /**< the scc of this node */
1285         unsigned POnum;     /**< the post order number for blocks */
1286 } node_entry;
1287
1288 /** A loop entry. */
1289 typedef struct loop_env {
1290         ir_phase ph;           /**< the phase object */
1291         ir_node  **stack;      /**< the node stack */
1292         int      tos;          /**< tos index */
1293         unsigned nextDFSnum;   /**< the current DFS number */
1294         unsigned POnum;        /**< current post order number */
1295
1296         unsigned changes;      /**< a bitmask of graph changes */
1297 } loop_env;
1298
1299 /**
1300 * Gets the node_entry of a node
1301 */
1302 static node_entry *get_irn_ne(ir_node *irn, loop_env *env) {
1303         ir_phase   *ph = &env->ph;
1304         node_entry *e  = phase_get_irn_data(&env->ph, irn);
1305
1306         if (! e) {
1307                 e = phase_alloc(ph, sizeof(*e));
1308                 memset(e, 0, sizeof(*e));
1309                 phase_set_irn_data(ph, irn, e);
1310         }
1311         return e;
1312 }  /* get_irn_ne */
1313
1314 /**
1315  * Push a node onto the stack.
1316  *
1317  * @param env   the loop environment
1318  * @param n     the node to push
1319  */
1320 static void push(loop_env *env, ir_node *n) {
1321         node_entry *e;
1322
1323         if (env->tos == ARR_LEN(env->stack)) {
1324                 int nlen = ARR_LEN(env->stack) * 2;
1325                 ARR_RESIZE(ir_node *, env->stack, nlen);
1326         }
1327         env->stack[env->tos++] = n;
1328         e = get_irn_ne(n, env);
1329         e->in_stack = 1;
1330 }  /* push */
1331
1332 /**
1333  * pop a node from the stack
1334  *
1335  * @param env   the loop environment
1336  *
1337  * @return  The topmost node
1338  */
1339 static ir_node *pop(loop_env *env) {
1340         ir_node *n = env->stack[--env->tos];
1341         node_entry *e = get_irn_ne(n, env);
1342
1343         e->in_stack = 0;
1344         return n;
1345 }  /* pop */
1346
1347 /**
1348  * Check if irn is a region constant.
1349  * The block or irn must strictly dominate the header block.
1350  *
1351  * @param irn           the node to check
1352  * @param header_block  the header block of the induction variable
1353  */
1354 static int is_rc(ir_node *irn, ir_node *header_block) {
1355         ir_node *block = get_nodes_block(irn);
1356
1357         return (block != header_block) && block_dominates(block, header_block);
1358 }  /* is_rc */
1359
1360 typedef struct phi_entry phi_entry;
1361 struct phi_entry {
1362         ir_node   *phi;    /**< A phi with a region const memory. */
1363         int       pos;     /**< The position of the region const memory */
1364         ir_node   *load;   /**< the newly created load for this phi */
1365         phi_entry *next;
1366 };
1367
1368 /**
1369  * Move loops out of loops if possible.
1370  *
1371  * @param pscc   the loop described by an SCC
1372  * @param env    the loop environment
1373  */
1374 static void move_loads_out_of_loops(scc *pscc, loop_env *env) {
1375         ir_node   *phi, *load, *next, *other, *next_other;
1376         ir_entity *ent;
1377         int       j;
1378         phi_entry *phi_list = NULL;
1379
1380         /* collect all outer memories */
1381         for (phi = pscc->head; phi != NULL; phi = next) {
1382                 node_entry *ne = get_irn_ne(phi, env);
1383                 next = ne->next;
1384
1385                 /* check all memory Phi's */
1386                 if (! is_Phi(phi))
1387                         continue;
1388
1389                 assert(get_irn_mode(phi) == mode_M && "DFS geturn non-memory Phi");
1390
1391                 for (j = get_irn_arity(phi) - 1; j >= 0; --j) {
1392                         ir_node    *pred = get_irn_n(phi, j);
1393                         node_entry *pe   = get_irn_ne(pred, env);
1394
1395                         if (pe->pscc != ne->pscc) {
1396                                 /* not in the same SCC, is region const */
1397                                 phi_entry *pe = phase_alloc(&env->ph, sizeof(*pe));
1398
1399                                 pe->phi  = phi;
1400                                 pe->pos  = j;
1401                                 pe->next = phi_list;
1402                                 phi_list = pe;
1403                         }
1404                 }
1405         }
1406         /* no Phis no fun */
1407         assert(phi_list != NULL && "DFS found a loop without Phi");
1408
1409         for (load = pscc->head; load; load = next) {
1410                 ir_mode *load_mode;
1411                 node_entry *ne = get_irn_ne(load, env);
1412                 next = ne->next;
1413
1414                 if (is_Load(load)) {
1415                         ldst_info_t *info = get_irn_link(load);
1416                         ir_node     *ptr = get_Load_ptr(load);
1417
1418                         /* for now, we cannot handle Loads with exceptions */
1419                         if (info->projs[pn_Load_res] == NULL || info->projs[pn_Load_X_regular] != NULL || info->projs[pn_Load_X_except] != NULL)
1420                                 continue;
1421
1422                         /* for now, we can only handle Load(Global) */
1423                         if (! is_Global(ptr))
1424                                 continue;
1425                         ent = get_Global_entity(ptr);
1426                         load_mode = get_Load_mode(load);
1427                         for (other = pscc->head; other != NULL; other = next_other) {
1428                                 node_entry *ne = get_irn_ne(other, env);
1429                                 next_other = ne->next;
1430
1431                                 if (is_Store(other)) {
1432                                         ir_alias_relation rel = get_alias_relation(
1433                                                 current_ir_graph,
1434                                                 get_Store_ptr(other),
1435                                                 get_irn_mode(get_Store_value(other)),
1436                                                 ptr, load_mode);
1437                                         /* if the might be an alias, we cannot pass this Store */
1438                                         if (rel != ir_no_alias)
1439                                                 break;
1440                                 }
1441                                 /* only pure Calls are allowed here, so ignore them */
1442                         }
1443                         if (other == NULL) {
1444                                 ldst_info_t *ninfo;
1445                                 phi_entry   *pe;
1446                                 dbg_info    *db;
1447
1448                                 /* for now, we cannot handle more than one input */
1449                                 if (phi_list->next != NULL)
1450                                         return;
1451
1452                                 /* yep, no aliasing Store found, Load can be moved */
1453                                 DB((dbg, LEVEL_1, "  Found a Load that could be moved: %+F\n", load));
1454
1455                                 db   = get_irn_dbg_info(load);
1456                                 for (pe = phi_list; pe != NULL; pe = pe->next) {
1457                                         int     pos   = pe->pos;
1458                                         ir_node *phi  = pe->phi;
1459                                         ir_node *blk  = get_nodes_block(phi);
1460                                         ir_node *pred = get_Block_cfgpred_block(blk, pos);
1461                                         ir_node *irn, *mem;
1462
1463                                         pe->load = irn = new_rd_Load(db, current_ir_graph, pred, get_Phi_pred(phi, pos), ptr, load_mode);
1464                                         ninfo = get_ldst_info(irn, phase_obst(&env->ph));
1465
1466                                         ninfo->projs[pn_Load_M] = mem = new_r_Proj(current_ir_graph, pred, irn, mode_M, pn_Load_M);
1467                                         set_Phi_pred(phi, pos, mem);
1468
1469                                         ninfo->projs[pn_Load_res] = new_r_Proj(current_ir_graph, pred, irn, load_mode, pn_Load_res);
1470
1471                                         DB((dbg, LEVEL_1, "  Created %+F in %+F\n", irn, pred));
1472                                 }
1473
1474                                 /* now kill the old Load */
1475                                 exchange(info->projs[pn_Load_M], get_Load_mem(load));
1476                                 exchange(info->projs[pn_Load_res], ninfo->projs[pn_Load_res]);
1477
1478                                 env->changes |= DF_CHANGED;
1479                         }
1480                 }
1481         }
1482 }  /* move_loads_out_of_loops */
1483
1484 /**
1485  * Process a loop SCC.
1486  *
1487  * @param pscc  the SCC
1488  * @param env   the loop environment
1489  */
1490 static void process_loop(scc *pscc, loop_env *env) {
1491         ir_node *irn, *next, *header = NULL;
1492         node_entry *b, *h = NULL;
1493         int j, only_phi, num_outside, process = 0;
1494         ir_node *out_rc;
1495
1496         /* find the header block for this scc */
1497         for (irn = pscc->head; irn; irn = next) {
1498                 node_entry *e = get_irn_ne(irn, env);
1499                 ir_node *block = get_nodes_block(irn);
1500
1501                 next = e->next;
1502                 b = get_irn_ne(block, env);
1503
1504                 if (header) {
1505                         if (h->POnum < b->POnum) {
1506                                 header = block;
1507                                 h      = b;
1508                         }
1509                 }
1510                 else {
1511                         header = block;
1512                         h      = b;
1513                 }
1514         }
1515
1516         /* check if this scc contains only Phi, Loads or Stores nodes */
1517         only_phi    = 1;
1518         num_outside = 0;
1519         out_rc      = NULL;
1520         for (irn = pscc->head; irn; irn = next) {
1521                 node_entry *e = get_irn_ne(irn, env);
1522
1523                 next = e->next;
1524                 switch (get_irn_opcode(irn)) {
1525                 case iro_Call:
1526                         if (is_Call_pure(irn)) {
1527                                 /* pure calls can be treated like loads */
1528                                 only_phi = 0;
1529                                 break;
1530                         }
1531                         /* non-pure calls must be handle like may-alias Stores */
1532                         goto fail;
1533                 case iro_CopyB:
1534                         /* cannot handle CopyB yet */
1535                         goto fail;
1536                 case iro_Load:
1537                         process = 1;
1538                         if (get_Load_volatility(irn) == volatility_is_volatile) {
1539                                 /* cannot handle loops with volatile Loads */
1540                                 goto fail;
1541                         }
1542                         only_phi = 0;
1543                         break;
1544                 case iro_Store:
1545                         if (get_Store_volatility(irn) == volatility_is_volatile) {
1546                                 /* cannot handle loops with volatile Stores */
1547                                 goto fail;
1548                         }
1549                         only_phi = 0;
1550                         break;
1551                 default:
1552                         only_phi = 0;
1553                         break;
1554                 case iro_Phi:
1555                         for (j = get_irn_arity(irn) - 1; j >= 0; --j) {
1556                                 ir_node *pred  = get_irn_n(irn, j);
1557                                 node_entry *pe = get_irn_ne(pred, env);
1558
1559                                 if (pe->pscc != e->pscc) {
1560                                         /* not in the same SCC, must be a region const */
1561                                         if (! is_rc(pred, header)) {
1562                                                 /* not a memory loop */
1563                                                 goto fail;
1564                                         }
1565                                         if (! out_rc) {
1566                                                 out_rc = pred;
1567                                                 ++num_outside;
1568                                         } else if (out_rc != pred) {
1569                                                 ++num_outside;
1570                                         }
1571                                 }
1572                         }
1573                         break;
1574                 }
1575         }
1576         if (! process)
1577                 goto fail;
1578
1579         /* found a memory loop */
1580         DB((dbg, LEVEL_2, "  Found a memory loop:\n  "));
1581         if (only_phi && num_outside == 1) {
1582                 /* a phi cycle with only one real predecessor can be collapsed */
1583                 DB((dbg, LEVEL_2, "  Found an USELESS Phi cycle:\n  "));
1584
1585                 for (irn = pscc->head; irn; irn = next) {
1586                         node_entry *e = get_irn_ne(irn, env);
1587                         next = e->next;
1588                         e->header = NULL;
1589                         exchange(irn, out_rc);
1590                 }
1591                 env->changes |= DF_CHANGED;
1592                 return;
1593         }
1594
1595         /* set the header for every node in this scc */
1596         for (irn = pscc->head; irn; irn = next) {
1597                 node_entry *e = get_irn_ne(irn, env);
1598                 e->header = header;
1599                 next = e->next;
1600                 DB((dbg, LEVEL_2, " %+F,", irn));
1601         }
1602         DB((dbg, LEVEL_2, "\n"));
1603
1604         move_loads_out_of_loops(pscc, env);
1605
1606 fail:
1607         ;
1608 }  /* process_loop */
1609
1610 /**
1611  * Process a SCC.
1612  *
1613  * @param pscc  the SCC
1614  * @param env   the loop environment
1615  */
1616 static void process_scc(scc *pscc, loop_env *env) {
1617         ir_node *head = pscc->head;
1618         node_entry *e = get_irn_ne(head, env);
1619
1620 #ifdef DEBUG_libfirm
1621         {
1622                 ir_node *irn, *next;
1623
1624                 DB((dbg, LEVEL_4, " SCC at %p:\n ", pscc));
1625                 for (irn = pscc->head; irn; irn = next) {
1626                         node_entry *e = get_irn_ne(irn, env);
1627
1628                         next = e->next;
1629
1630                         DB((dbg, LEVEL_4, " %+F,", irn));
1631                 }
1632                 DB((dbg, LEVEL_4, "\n"));
1633         }
1634 #endif
1635
1636         if (e->next != NULL) {
1637                 /* this SCC has more than one member */
1638                 process_loop(pscc, env);
1639         }
1640 }  /* process_scc */
1641
1642 /**
1643  * Do Tarjan's SCC algorithm and drive load/store optimization.
1644  *
1645  * @param irn  start at this node
1646  * @param env  the loop environment
1647  */
1648 static void dfs(ir_node *irn, loop_env *env)
1649 {
1650         int i, n;
1651         node_entry *node = get_irn_ne(irn, env);
1652
1653         mark_irn_visited(irn);
1654
1655         node->DFSnum = env->nextDFSnum++;
1656         node->low    = node->DFSnum;
1657         push(env, irn);
1658
1659         /* handle preds */
1660         if (is_Phi(irn) || is_Sync(irn)) {
1661                 n = get_irn_arity(irn);
1662                 for (i = 0; i < n; ++i) {
1663                         ir_node *pred = get_irn_n(irn, i);
1664                         node_entry *o = get_irn_ne(pred, env);
1665
1666                         if (irn_not_visited(pred)) {
1667                                 dfs(pred, env);
1668                                 node->low = MIN(node->low, o->low);
1669                         }
1670                         if (o->DFSnum < node->DFSnum && o->in_stack)
1671                                 node->low = MIN(o->DFSnum, node->low);
1672                 }
1673         } else if (is_fragile_op(irn)) {
1674                 ir_node *pred = get_fragile_op_mem(irn);
1675                 node_entry *o = get_irn_ne(pred, env);
1676
1677                 if (irn_not_visited(pred)) {
1678                         dfs(pred, env);
1679                         node->low = MIN(node->low, o->low);
1680                 }
1681                 if (o->DFSnum < node->DFSnum && o->in_stack)
1682                         node->low = MIN(o->DFSnum, node->low);
1683         } else if (is_Proj(irn)) {
1684                 ir_node *pred = get_Proj_pred(irn);
1685                 node_entry *o = get_irn_ne(pred, env);
1686
1687                 if (irn_not_visited(pred)) {
1688                         dfs(pred, env);
1689                         node->low = MIN(node->low, o->low);
1690                 }
1691                 if (o->DFSnum < node->DFSnum && o->in_stack)
1692                         node->low = MIN(o->DFSnum, node->low);
1693         }
1694         else {
1695                  /* IGNORE predecessors */
1696         }
1697
1698         if (node->low == node->DFSnum) {
1699                 scc *pscc = phase_alloc(&env->ph, sizeof(*pscc));
1700                 ir_node *x;
1701
1702                 pscc->head = NULL;
1703                 do {
1704                         node_entry *e;
1705
1706                         x = pop(env);
1707                         e = get_irn_ne(x, env);
1708                         e->pscc    = pscc;
1709                         e->next    = pscc->head;
1710                         pscc->head = x;
1711                 } while (x != irn);
1712
1713                 process_scc(pscc, env);
1714         }
1715 }  /* dfs */
1716
1717 /**
1718  * Do the DFS on the memory edges a graph.
1719  *
1720  * @param irg  the graph to process
1721  * @param env  the loop environment
1722  */
1723 static void do_dfs(ir_graph *irg, loop_env *env) {
1724         ir_graph *rem = current_ir_graph;
1725         ir_node  *endblk, *end;
1726         int      i;
1727
1728         current_ir_graph = irg;
1729         inc_irg_visited(irg);
1730
1731         /* visit all memory nodes */
1732         endblk = get_irg_end_block(irg);
1733         for (i = get_Block_n_cfgpreds(endblk) - 1; i >= 0; --i) {
1734                 ir_node *pred = get_Block_cfgpred(endblk, i);
1735
1736                 pred = skip_Proj(pred);
1737                 if (is_Return(pred))
1738                         dfs(get_Return_mem(pred), env);
1739                 else if (is_Raise(pred))
1740                         dfs(get_Raise_mem(pred), env);
1741                 else if (is_fragile_op(pred))
1742                         dfs(get_fragile_op_mem(pred), env);
1743                 else {
1744                         assert(0 && "Unknown EndBlock predecessor");
1745                 }
1746         }
1747
1748         /* visit the keep-alives */
1749         end = get_irg_end(irg);
1750         for (i = get_End_n_keepalives(end) - 1; i >= 0; --i) {
1751                 ir_node *ka = get_End_keepalive(end, i);
1752
1753                 if (is_Phi(ka) && irn_not_visited(ka))
1754                         dfs(ka, env);
1755         }
1756         current_ir_graph = rem;
1757 }  /* do_dfs */
1758
1759 /**
1760  * Initialize new phase data. We do this always explicit, so return NULL here
1761  */
1762 static void *init_loop_data(ir_phase *ph, const ir_node *irn, void *data) {
1763         (void)ph;
1764         (void)irn;
1765         (void)data;
1766         return NULL;
1767 }  /* init_loop_data */
1768
1769 /**
1770  * Optimize Loads/Stores in loops.
1771  *
1772  * @param irg  the graph
1773  */
1774 static int optimize_loops(ir_graph *irg) {
1775         loop_env env;
1776
1777         env.stack         = NEW_ARR_F(ir_node *, 128);
1778         env.tos           = 0;
1779         env.nextDFSnum    = 0;
1780         env.POnum         = 0;
1781         env.changes       = 0;
1782         phase_init(&env.ph, "ldstopt", irg, PHASE_DEFAULT_GROWTH, init_loop_data, NULL);
1783
1784         /* calculate the SCC's and drive loop optimization. */
1785         do_dfs(irg, &env);
1786
1787         DEL_ARR_F(env.stack);
1788         phase_free(&env.ph);
1789
1790         return env.changes;
1791 }  /* optimize_loops */
1792
1793 /*
1794  * do the load store optimization
1795  */
1796 void optimize_load_store(ir_graph *irg) {
1797         walk_env_t env;
1798
1799         FIRM_DBG_REGISTER(dbg, "firm.opt.ldstopt");
1800
1801         assert(get_irg_phase_state(irg) != phase_building);
1802         assert(get_irg_pinned(irg) != op_pin_state_floats &&
1803                 "LoadStore optimization needs pinned graph");
1804
1805         /* we need landing pads */
1806         remove_critical_cf_edges(irg);
1807
1808         edges_assure(irg);
1809
1810         /* for Phi optimization post-dominators are needed ... */
1811         assure_postdoms(irg);
1812
1813         if (get_opt_alias_analysis()) {
1814                 assure_irg_address_taken_computed(irg);
1815                 assure_irp_globals_address_taken_computed();
1816         }
1817
1818         obstack_init(&env.obst);
1819         env.changes = 0;
1820
1821         /* init the links, then collect Loads/Stores/Proj's in lists */
1822         master_visited = 0;
1823         irg_walk_graph(irg, firm_clear_link, collect_nodes, &env);
1824
1825         /* now we have collected enough information, optimize */
1826         irg_walk_graph(irg, NULL, do_load_store_optimize, &env);
1827
1828         env.changes |= optimize_loops(irg);
1829
1830         obstack_free(&env.obst, NULL);
1831
1832         /* Handle graph state */
1833         if (env.changes) {
1834                 set_irg_outs_inconsistent(irg);
1835         }
1836
1837         if (env.changes & CF_CHANGED) {
1838                 /* is this really needed: Yes, control flow changed, block might
1839                 have Bad() predecessors. */
1840                 set_irg_doms_inconsistent(irg);
1841         }
1842 }  /* optimize_load_store */