-BugFix: exp_gen sets must be cleaned
[libfirm] / ir / opt / ldstopt.c
1 /*
2  * Copyright (C) 1995-2008 University of Karlsruhe.  All right reserved.
3  *
4  * This file is part of libFirm.
5  *
6  * This file may be distributed and/or modified under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation and appearing in the file LICENSE.GPL included in the
9  * packaging of this file.
10  *
11  * Licensees holding valid libFirm Professional Edition licenses may use
12  * this file in accordance with the libFirm Commercial License.
13  * Agreement provided with the Software.
14  *
15  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17  * PURPOSE.
18  */
19
20 /**
21  * @file
22  * @brief   Load/Store optimizations.
23  * @author  Michael Beck
24  * @version $Id$
25  */
26 #ifdef HAVE_CONFIG_H
27 # include "config.h"
28 #endif
29
30 #include <string.h>
31
32 #include "iroptimize.h"
33 #include "irnode_t.h"
34 #include "irgraph_t.h"
35 #include "irmode_t.h"
36 #include "iropt_t.h"
37 #include "ircons_t.h"
38 #include "irgmod.h"
39 #include "irgwalk.h"
40 #include "irvrfy.h"
41 #include "tv_t.h"
42 #include "dbginfo_t.h"
43 #include "iropt_dbg.h"
44 #include "irflag_t.h"
45 #include "array.h"
46 #include "irhooks.h"
47 #include "iredges.h"
48 #include "irtools.h"
49 #include "opt_polymorphy.h"
50 #include "irmemory.h"
51 #include "xmalloc.h"
52 #include "irphase_t.h"
53 #include "irgopt.h"
54 #include "debug.h"
55
56 /** The debug handle. */
57 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
58
59 #ifdef DO_CACHEOPT
60 #include "cacheopt/cachesim.h"
61 #endif
62
63 #undef IMAX
64 #define IMAX(a,b)       ((a) > (b) ? (a) : (b))
65
66 #define MAX_PROJ        IMAX(IMAX(pn_Load_max, pn_Store_max), pn_Call_max)
67
68 enum changes_t {
69         DF_CHANGED = 1,       /**< data flow changed */
70         CF_CHANGED = 2,       /**< control flow changed */
71 };
72
73 /**
74  * walker environment
75  */
76 typedef struct _walk_env_t {
77         struct obstack obst;          /**< list of all stores */
78         unsigned changes;             /**< a bitmask of graph changes */
79 } walk_env_t;
80
81 /** A Load/Store info. */
82 typedef struct _ldst_info_t {
83         ir_node  *projs[MAX_PROJ];    /**< list of Proj's of this node */
84         ir_node  *exc_block;          /**< the exception block if available */
85         int      exc_idx;             /**< predecessor index in the exception block */
86         unsigned visited;             /**< visited counter for breaking loops */
87 } ldst_info_t;
88
89 /**
90  * flags for control flow.
91  */
92 enum block_flags_t {
93         BLOCK_HAS_COND = 1,      /**< Block has conditional control flow */
94         BLOCK_HAS_EXC  = 2       /**< Block has exceptional control flow */
95 };
96
97 /**
98  * a Block info.
99  */
100 typedef struct _block_info_t {
101         unsigned flags;               /**< flags for the block */
102 } block_info_t;
103
104 /** the master visited flag for loop detection. */
105 static unsigned master_visited = 0;
106
107 #define INC_MASTER()       ++master_visited
108 #define MARK_NODE(info)    (info)->visited = master_visited
109 #define NODE_VISITED(info) (info)->visited >= master_visited
110
111 /**
112  * get the Load/Store info of a node
113  */
114 static ldst_info_t *get_ldst_info(ir_node *node, struct obstack *obst) {
115         ldst_info_t *info = get_irn_link(node);
116
117         if (! info) {
118                 info = obstack_alloc(obst, sizeof(*info));
119                 memset(info, 0, sizeof(*info));
120                 set_irn_link(node, info);
121         }
122         return info;
123 }  /* get_ldst_info */
124
125 /**
126  * get the Block info of a node
127  */
128 static block_info_t *get_block_info(ir_node *node, struct obstack *obst) {
129         block_info_t *info = get_irn_link(node);
130
131         if (! info) {
132                 info = obstack_alloc(obst, sizeof(*info));
133                 memset(info, 0, sizeof(*info));
134                 set_irn_link(node, info);
135         }
136         return info;
137 }  /* get_block_info */
138
139 /**
140  * update the projection info for a Load/Store
141  */
142 static unsigned update_projs(ldst_info_t *info, ir_node *proj)
143 {
144         long nr = get_Proj_proj(proj);
145
146         assert(0 <= nr && nr <= MAX_PROJ && "Wrong proj from LoadStore");
147
148         if (info->projs[nr]) {
149                 /* there is already one, do CSE */
150                 exchange(proj, info->projs[nr]);
151                 return DF_CHANGED;
152         }
153         else {
154                 info->projs[nr] = proj;
155                 return 0;
156         }
157 }  /* update_projs */
158
159 /**
160  * update the exception block info for a Load/Store node.
161  *
162  * @param info   the load/store info struct
163  * @param block  the exception handler block for this load/store
164  * @param pos    the control flow input of the block
165  */
166 static unsigned update_exc(ldst_info_t *info, ir_node *block, int pos)
167 {
168         assert(info->exc_block == NULL && "more than one exception block found");
169
170         info->exc_block = block;
171         info->exc_idx   = pos;
172         return 0;
173 }  /* update_exc */
174
175 /** Return the number of uses of an address node */
176 #define get_irn_n_uses(adr)     get_irn_n_edges(adr)
177
178 /**
179  * walker, collects all Load/Store/Proj nodes
180  *
181  * walks from Start -> End
182  */
183 static void collect_nodes(ir_node *node, void *env)
184 {
185         ir_opcode   opcode = get_irn_opcode(node);
186         ir_node     *pred, *blk, *pred_blk;
187         ldst_info_t *ldst_info;
188         walk_env_t  *wenv = env;
189
190         if (opcode == iro_Proj) {
191                 pred   = get_Proj_pred(node);
192                 opcode = get_irn_opcode(pred);
193
194                 if (opcode == iro_Load || opcode == iro_Store || opcode == iro_Call) {
195                         ldst_info = get_ldst_info(pred, &wenv->obst);
196
197                         wenv->changes |= update_projs(ldst_info, node);
198
199                         /*
200                          * Place the Proj's to the same block as the
201                          * predecessor Load. This is always ok and prevents
202                          * "non-SSA" form after optimizations if the Proj
203                          * is in a wrong block.
204                          */
205                         blk      = get_nodes_block(node);
206                         pred_blk = get_nodes_block(pred);
207                         if (blk != pred_blk) {
208                                 wenv->changes |= DF_CHANGED;
209                                 set_nodes_block(node, pred_blk);
210                         }
211                 }
212         } else if (opcode == iro_Block) {
213                 int i;
214
215                 for (i = get_Block_n_cfgpreds(node) - 1; i >= 0; --i) {
216                         ir_node      *pred_block, *proj;
217                         block_info_t *bl_info;
218                         int          is_exc = 0;
219
220                         pred = proj = get_Block_cfgpred(node, i);
221
222                         if (is_Proj(proj)) {
223                                 pred   = get_Proj_pred(proj);
224                                 is_exc = get_Proj_proj(proj) == pn_Generic_X_except;
225                         }
226
227                         /* ignore Bad predecessors, they will be removed later */
228                         if (is_Bad(pred))
229                                 continue;
230
231                         pred_block = get_nodes_block(pred);
232                         bl_info    = get_block_info(pred_block, &wenv->obst);
233
234                         if (is_fragile_op(pred) && is_exc)
235                                 bl_info->flags |= BLOCK_HAS_EXC;
236                         else if (is_irn_forking(pred))
237                                 bl_info->flags |= BLOCK_HAS_COND;
238
239                         opcode = get_irn_opcode(pred);
240                         if (is_exc && (opcode == iro_Load || opcode == iro_Store || opcode == iro_Call)) {
241                                 ldst_info = get_ldst_info(pred, &wenv->obst);
242
243                                 wenv->changes |= update_exc(ldst_info, node, i);
244                         }
245                 }
246         }
247 }  /* collect_nodes */
248
249 /**
250  * Returns an entity if the address ptr points to a constant one.
251  *
252  * @param ptr  the address
253  *
254  * @return an entity or NULL
255  */
256 static ir_entity *find_constant_entity(ir_node *ptr)
257 {
258         for (;;) {
259                 ir_op *op = get_irn_op(ptr);
260
261                 if (op == op_SymConst && (get_SymConst_kind(ptr) == symconst_addr_ent)) {
262                         ir_entity *ent = get_SymConst_entity(ptr);
263                         if (variability_constant == get_entity_variability(ent))
264                                 return ent;
265                         return NULL;
266                 } else if (op == op_Sel) {
267                         ir_entity *ent = get_Sel_entity(ptr);
268                         ir_type   *tp  = get_entity_owner(ent);
269
270                         /* Do not fiddle with polymorphism. */
271                         if (is_Class_type(get_entity_owner(ent)) &&
272                                 ((get_entity_n_overwrites(ent)    != 0) ||
273                                 (get_entity_n_overwrittenby(ent) != 0)   ) )
274                                 return NULL;
275
276                         if (is_Array_type(tp)) {
277                                 /* check bounds */
278                                 int i, n;
279
280                                 for (i = 0, n = get_Sel_n_indexs(ptr); i < n; ++i) {
281                                         ir_node *bound;
282                                         tarval *tlower, *tupper;
283                                         ir_node *index = get_Sel_index(ptr, i);
284                                         tarval *tv     = computed_value(index);
285
286                                         /* check if the index is constant */
287                                         if (tv == tarval_bad)
288                                                 return NULL;
289
290                                         bound  = get_array_lower_bound(tp, i);
291                                         tlower = computed_value(bound);
292                                         bound  = get_array_upper_bound(tp, i);
293                                         tupper = computed_value(bound);
294
295                                         if (tlower == tarval_bad || tupper == tarval_bad)
296                                                 return NULL;
297
298                                         if (tarval_cmp(tv, tlower) & pn_Cmp_Lt)
299                                                 return NULL;
300                                         if (tarval_cmp(tupper, tv) & pn_Cmp_Lt)
301                                                 return NULL;
302
303                                         /* ok, bounds check finished */
304                                 }
305                         }
306
307                         if (variability_constant == get_entity_variability(ent))
308                                 return ent;
309
310                         /* try next */
311                         ptr = get_Sel_ptr(ptr);
312                 } else
313                         return NULL;
314         }
315 }  /* find_constant_entity */
316
317 /**
318  * Return the Selection index of a Sel node from dimension n
319  */
320 static long get_Sel_array_index_long(ir_node *n, int dim) {
321         ir_node *index = get_Sel_index(n, dim);
322         assert(is_Const(index));
323         return get_tarval_long(get_Const_tarval(index));
324 }  /* get_Sel_array_index_long */
325
326 /**
327  * Returns the accessed component graph path for an
328  * node computing an address.
329  *
330  * @param ptr    the node computing the address
331  * @param depth  current depth in steps upward from the root
332  *               of the address
333  */
334 static compound_graph_path *rec_get_accessed_path(ir_node *ptr, int depth) {
335         compound_graph_path *res = NULL;
336         ir_entity           *root, *field;
337         int                 path_len, pos;
338
339         if (get_irn_op(ptr) == op_SymConst) {
340                 /* a SymConst. If the depth is 0, this is an access to a global
341                  * entity and we don't need a component path, else we know
342                  * at least it's length.
343                  */
344                 assert(get_SymConst_kind(ptr) == symconst_addr_ent);
345                 root = get_SymConst_entity(ptr);
346                 res = (depth == 0) ? NULL : new_compound_graph_path(get_entity_type(root), depth);
347         } else {
348                 assert(get_irn_op(ptr) == op_Sel);
349                 /* it's a Sel, go up until we find the root */
350                 res = rec_get_accessed_path(get_Sel_ptr(ptr), depth+1);
351
352                 /* fill up the step in the path at the current position */
353                 field    = get_Sel_entity(ptr);
354                 path_len = get_compound_graph_path_length(res);
355                 pos      = path_len - depth - 1;
356                 set_compound_graph_path_node(res, pos, field);
357
358                 if (is_Array_type(get_entity_owner(field))) {
359                         assert(get_Sel_n_indexs(ptr) == 1 && "multi dim arrays not implemented");
360                         set_compound_graph_path_array_index(res, pos, get_Sel_array_index_long(ptr, 0));
361                 }
362         }
363         return res;
364 }  /* rec_get_accessed_path */
365
366 /** Returns an access path or NULL.  The access path is only
367  *  valid, if the graph is in phase_high and _no_ address computation is used.
368  */
369 static compound_graph_path *get_accessed_path(ir_node *ptr) {
370         return rec_get_accessed_path(ptr, 0);
371 }  /* get_accessed_path */
372
373 /* forward */
374 static void reduce_adr_usage(ir_node *ptr);
375
376 /**
377  * Update a Load that may lost it's usage.
378  */
379 static void handle_load_update(ir_node *load) {
380         ldst_info_t *info = get_irn_link(load);
381
382         /* do NOT touch volatile loads for now */
383         if (get_Load_volatility(load) == volatility_is_volatile)
384                 return;
385
386         if (! info->projs[pn_Load_res] && ! info->projs[pn_Load_X_except]) {
387                 ir_node *ptr = get_Load_ptr(load);
388                 ir_node *mem = get_Load_mem(load);
389
390                 /* a Load which value is neither used nor exception checked, remove it */
391                 exchange(info->projs[pn_Load_M], mem);
392                 if (info->projs[pn_Load_X_regular])
393                         exchange(info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
394                 kill_node(load);
395                 reduce_adr_usage(ptr);
396         }
397 }  /* handle_load_update */
398
399 /**
400  * A Use of an address node is vanished. Check if this was a Proj
401  * node and update the counters.
402  */
403 static void reduce_adr_usage(ir_node *ptr) {
404         if (is_Proj(ptr)) {
405                 if (get_irn_n_edges(ptr) <= 0) {
406                         /* this Proj is dead now */
407                         ir_node *pred = get_Proj_pred(ptr);
408
409                         if (is_Load(pred)) {
410                                 ldst_info_t *info = get_irn_link(pred);
411                                 info->projs[get_Proj_proj(ptr)] = NULL;
412
413                                 /* this node lost it's result proj, handle that */
414                                 handle_load_update(pred);
415                         }
416                 }
417         }
418 }  /* reduce_adr_usage */
419
420 /**
421  * Check, if an already existing value of mode old_mode can be converted
422  * into the needed one new_mode without loss.
423  */
424 static int can_use_stored_value(ir_mode *old_mode, ir_mode *new_mode) {
425         if (old_mode == new_mode)
426                 return 1;
427
428         /* if both modes are two-complement ones, we can always convert the
429            Stored value into the needed one. */
430         if (get_mode_size_bits(old_mode) >= get_mode_size_bits(new_mode) &&
431                   get_mode_arithmetic(old_mode) == irma_twos_complement &&
432                   get_mode_arithmetic(new_mode) == irma_twos_complement)
433                 return 1;
434         return 0;
435 }  /* can_use_stored_value */
436
437 /**
438  * Check whether a Call is at least pure, ie. does only read memory.
439  */
440 static unsigned is_Call_pure(ir_node *call) {
441         ir_type *call_tp = get_Call_type(call);
442         unsigned prop = get_method_additional_properties(call_tp);
443
444         /* check first the call type */
445         if ((prop & (mtp_property_const|mtp_property_pure)) == 0) {
446                 /* try the called entity */
447                 ir_node *ptr = get_Call_ptr(call);
448
449                 if (is_Global(ptr)) {
450                         ir_entity *ent = get_Global_entity(ptr);
451
452                         prop = get_entity_additional_properties(ent);
453                 }
454         }
455         return (prop & (mtp_property_const|mtp_property_pure)) != 0;
456 }  /* is_Call_pure */
457
458 /**
459  * Follow the memory chain as long as there are only Loads,
460  * alias free Stores, and constant Calls and try to replace the
461  * current Load by a previous ones.
462  * Note that in unreachable loops it might happen that we reach
463  * load again, as well as we can fall into a cycle.
464  * We break such cycles using a special visited flag.
465  *
466  * INC_MASTER() must be called before dive into
467  */
468 static unsigned follow_Mem_chain(ir_node *load, ir_node *curr) {
469         unsigned res = 0;
470         ldst_info_t *info = get_irn_link(load);
471         ir_node *pred;
472         ir_node *ptr       = get_Load_ptr(load);
473         ir_node *mem       = get_Load_mem(load);
474         ir_mode *load_mode = get_Load_mode(load);
475
476         for (pred = curr; load != pred; ) {
477                 ldst_info_t *pred_info = get_irn_link(pred);
478
479                 /*
480                  * BEWARE: one might think that checking the modes is useless, because
481                  * if the pointers are identical, they refer to the same object.
482                  * This is only true in strong typed languages, not in C were the following
483                  * is possible a = *(ir_type1 *)p; b = *(ir_type2 *)p ...
484                  */
485                 if (is_Store(pred) && get_Store_ptr(pred) == ptr &&
486                     can_use_stored_value(get_irn_mode(get_Store_value(pred)), load_mode)) {
487                         /*
488                          * a Load immediately after a Store -- a read after write.
489                          * We may remove the Load, if both Load & Store does not have an exception handler
490                          * OR they are in the same MacroBlock. In the latter case the Load cannot
491                          * throw an exception when the previous Store was quiet.
492                          *
493                          * Why we need to check for Store Exception? If the Store cannot
494                          * be executed (ROM) the exception handler might simply jump into
495                          * the load MacroBlock :-(
496                          * We could make it a little bit better if we would know that the exception
497                          * handler of the Store jumps directly to the end...
498                          */
499                         if ((pred_info->projs[pn_Store_X_except] == NULL && info->projs[pn_Load_X_except] == NULL) ||
500                             get_nodes_MacroBlock(load) == get_nodes_MacroBlock(pred)) {
501                                 ir_node *value = get_Store_value(pred);
502
503                                 DBG_OPT_RAW(load, value);
504
505                                 /* add an convert if needed */
506                                 if (get_irn_mode(get_Store_value(pred)) != load_mode) {
507                                         value = new_r_Conv(current_ir_graph, get_nodes_block(load), value, load_mode);
508                                 }
509
510                                 if (info->projs[pn_Load_M])
511                                         exchange(info->projs[pn_Load_M], mem);
512
513                                 /* no exception */
514                                 if (info->projs[pn_Load_X_except]) {
515                                         exchange( info->projs[pn_Load_X_except], new_Bad());
516                                         res |= CF_CHANGED;
517                                 }
518                                 if (info->projs[pn_Load_X_regular]) {
519                                         exchange( info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
520                                         res |= CF_CHANGED;
521                                 }
522
523                                 if (info->projs[pn_Load_res])
524                                         exchange(info->projs[pn_Load_res], value);
525
526                                 kill_node(load);
527                                 reduce_adr_usage(ptr);
528                                 return res | DF_CHANGED;
529                         }
530                 } else if (is_Load(pred) && get_Load_ptr(pred) == ptr &&
531                            can_use_stored_value(get_Load_mode(pred), load_mode)) {
532                         /*
533                          * a Load after a Load -- a read after read.
534                          * We may remove the second Load, if it does not have an exception handler
535                          * OR they are in the same MacroBlock. In the later case the Load cannot
536                          * throw an exception when the previous Load was quiet.
537                          *
538                          * Here, there is no need to check if the previous Load has an exception
539                          * hander because they would have exact the same exception...
540                          */
541                         if (info->projs[pn_Load_X_except] == NULL || get_nodes_MacroBlock(load) == get_nodes_MacroBlock(pred)) {
542                                 ir_node *value;
543
544                                 DBG_OPT_RAR(load, pred);
545
546                                 /* the result is used */
547                                 if (info->projs[pn_Load_res]) {
548                                         if (pred_info->projs[pn_Load_res] == NULL) {
549                                                 /* create a new Proj again */
550                                                 pred_info->projs[pn_Load_res] = new_r_Proj(current_ir_graph, get_nodes_block(pred), pred, get_Load_mode(pred), pn_Load_res);
551                                         }
552                                         value = pred_info->projs[pn_Load_res];
553
554                                         /* add an convert if needed */
555                                         if (get_Load_mode(pred) != load_mode) {
556                                                 value = new_r_Conv(current_ir_graph, get_nodes_block(load), value, load_mode);
557                                         }
558
559                                         exchange(info->projs[pn_Load_res], value);
560                                 }
561
562                                 if (info->projs[pn_Load_M])
563                                         exchange(info->projs[pn_Load_M], mem);
564
565                                 /* no exception */
566                                 if (info->projs[pn_Load_X_except]) {
567                                         exchange(info->projs[pn_Load_X_except], new_Bad());
568                                         res |= CF_CHANGED;
569                                 }
570                                 if (info->projs[pn_Load_X_regular]) {
571                                         exchange( info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
572                                         res |= CF_CHANGED;
573                                 }
574
575                                 kill_node(load);
576                                 reduce_adr_usage(ptr);
577                                 return res |= DF_CHANGED;
578                         }
579                 }
580
581                 if (is_Store(pred)) {
582                         /* check if we can pass through this store */
583                         ir_alias_relation rel = get_alias_relation(
584                                 current_ir_graph,
585                                 get_Store_ptr(pred),
586                                 get_irn_mode(get_Store_value(pred)),
587                                 ptr, load_mode);
588                         /* if the might be an alias, we cannot pass this Store */
589                         if (rel != ir_no_alias)
590                                 break;
591                         pred = skip_Proj(get_Store_mem(pred));
592                 } else if (is_Load(pred)) {
593                         pred = skip_Proj(get_Load_mem(pred));
594                 } else if (is_Call(pred)) {
595                         if (is_Call_pure(pred)) {
596                                 /* The called graph is at least pure, so there are no Store's
597                                    in it. We can handle it like a Load and skip it. */
598                                 pred = skip_Proj(get_Call_mem(pred));
599                         } else {
600                                 /* there might be Store's in the graph, stop here */
601                                 break;
602                         }
603                 } else {
604                         /* follow only Load chains */
605                         break;
606                 }
607
608                 /* check for cycles */
609                 if (NODE_VISITED(pred_info))
610                         break;
611                 MARK_NODE(pred_info);
612         }
613
614         if (is_Sync(pred)) {
615                 int i;
616
617                 /* handle all Sync predecessors */
618                 for (i = get_Sync_n_preds(pred) - 1; i >= 0; --i) {
619                         res |= follow_Mem_chain(load, skip_Proj(get_Sync_pred(pred, i)));
620                         if (res)
621                                 return res;
622                 }
623         }
624
625         return res;
626 }  /* follow_Mem_chain */
627
628 /**
629  * optimize a Load
630  *
631  * @param load  the Load node
632  */
633 static unsigned optimize_load(ir_node *load)
634 {
635         ldst_info_t *info = get_irn_link(load);
636         ir_node *mem, *ptr, *new_node;
637         ir_entity *ent;
638         unsigned res = 0;
639
640         /* do NOT touch volatile loads for now */
641         if (get_Load_volatility(load) == volatility_is_volatile)
642                 return 0;
643
644         /* the address of the load to be optimized */
645         ptr = get_Load_ptr(load);
646
647         /*
648          * Check if we can remove the exception from a Load:
649          * This can be done, if the address is from an Sel(Alloc) and
650          * the Sel type is a subtype of the allocated type.
651          *
652          * This optimizes some often used OO constructs,
653          * like x = new O; x->t;
654          */
655         if (info->projs[pn_Load_X_except]) {
656                 if (is_Sel(ptr)) {
657                         ir_node *mem = get_Sel_mem(ptr);
658
659                         /* FIXME: works with the current FE, but better use the base */
660                         if (is_Alloc(skip_Proj(mem))) {
661                                 /* ok, check the types */
662                                 ir_entity *ent    = get_Sel_entity(ptr);
663                                 ir_type   *s_type = get_entity_type(ent);
664                                 ir_type   *a_type = get_Alloc_type(mem);
665
666                                 if (is_SubClass_of(s_type, a_type)) {
667                                         /* ok, condition met: there can't be an exception because
668                                         * Alloc guarantees that enough memory was allocated */
669
670                                         exchange(info->projs[pn_Load_X_except], new_Bad());
671                                         info->projs[pn_Load_X_except] = NULL;
672                                         exchange(info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
673                                         info->projs[pn_Load_X_regular] = NULL;
674                                         res |= CF_CHANGED;
675                                 }
676                         }
677                 } else if (is_Alloc(skip_Proj(skip_Cast(ptr)))) {
678                                 /* simple case: a direct load after an Alloc. Firm Alloc throw
679                                  * an exception in case of out-of-memory. So, there is no way for an
680                                  * exception in this load.
681                                  * This code is constructed by the "exception lowering" in the Jack compiler.
682                                  */
683                                 exchange(info->projs[pn_Load_X_except], new_Bad());
684                                 info->projs[pn_Load_X_except] = NULL;
685                                 exchange(info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
686                                 info->projs[pn_Load_X_regular] = NULL;
687                                 res |= CF_CHANGED;
688                 }
689         }
690
691         /* The mem of the Load. Must still be returned after optimization. */
692         mem  = get_Load_mem(load);
693
694         if (! info->projs[pn_Load_res] && ! info->projs[pn_Load_X_except]) {
695                 /* a Load which value is neither used nor exception checked, remove it */
696                 exchange(info->projs[pn_Load_M], mem);
697
698                 if (info->projs[pn_Load_X_regular]) {
699                         /* should not happen, but if it does, remove it */
700                         exchange(info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
701                         res |= CF_CHANGED;
702                 }
703                 kill_node(load);
704                 reduce_adr_usage(ptr);
705                 return res | DF_CHANGED;
706         }
707
708         /* Load from a constant polymorphic field, where we can resolve
709            polymorphism. */
710         new_node = transform_node_Load(load);
711         if (new_node != load) {
712                 if (info->projs[pn_Load_M]) {
713                         exchange(info->projs[pn_Load_M], mem);
714                         info->projs[pn_Load_M] = NULL;
715                 }
716                 if (info->projs[pn_Load_X_except]) {
717                         exchange(info->projs[pn_Load_X_except], new_Bad());
718                         info->projs[pn_Load_X_except] = NULL;
719                         res |= CF_CHANGED;
720                 }
721                 if (info->projs[pn_Load_X_regular]) {
722                         exchange(info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
723                         info->projs[pn_Load_X_regular] = NULL;
724                         res |= CF_CHANGED;
725                 }
726                 if (info->projs[pn_Load_res])
727                         exchange(info->projs[pn_Load_res], new_node);
728
729                 kill_node(load);
730                 reduce_adr_usage(ptr);
731                 return res | DF_CHANGED;
732         }
733
734         /* check if we can determine the entity that will be loaded */
735         ent = find_constant_entity(ptr);
736         if (ent) {
737                 if ((allocation_static == get_entity_allocation(ent)) &&
738                         (visibility_external_allocated != get_entity_visibility(ent))) {
739                         /* a static allocation that is not external: there should be NO exception
740                          * when loading. */
741
742                         /* no exception, clear the info field as it might be checked later again */
743                         if (info->projs[pn_Load_X_except]) {
744                                 exchange(info->projs[pn_Load_X_except], new_Bad());
745                                 info->projs[pn_Load_X_except] = NULL;
746                                 res |= CF_CHANGED;
747                         }
748                         if (info->projs[pn_Load_X_regular]) {
749                                 exchange(info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
750                                 info->projs[pn_Load_X_regular] = NULL;
751                                 res |= CF_CHANGED;
752                         }
753
754                         if (variability_constant == get_entity_variability(ent)) {
755                                 if (is_atomic_entity(ent)) {
756                                         /* Might not be atomic after
757                                            lowering of Sels.  In this
758                                            case we could also load, but
759                                            it's more complicated. */
760                                         /* more simpler case: we load the content of a constant value:
761                                          * replace it by the constant itself
762                                          */
763
764                                         /* no memory */
765                                         if (info->projs[pn_Load_M]) {
766                                                 exchange(info->projs[pn_Load_M], mem);
767                                                 res |= DF_CHANGED;
768                                         }
769                                         /* no result :-) */
770                                         if (info->projs[pn_Load_res]) {
771                                                 if (is_atomic_entity(ent)) {
772                                                         ir_node *c = copy_const_value(get_irn_dbg_info(load), get_atomic_ent_value(ent));
773
774                                                         DBG_OPT_RC(load, c);
775                                                         exchange(info->projs[pn_Load_res], c);
776                                                         res |= DF_CHANGED;
777                                                 }
778                                         }
779                                         kill_node(load);
780                                         reduce_adr_usage(ptr);
781                                         return res;
782                                 } else {
783                                         compound_graph_path *path = get_accessed_path(ptr);
784
785                                         if (path) {
786                                                 ir_node *c;
787
788                                                 assert(is_proper_compound_graph_path(path, get_compound_graph_path_length(path)-1));
789                                                 /*
790                                                 {
791                                                         int j;
792                                                         for (j = 0; j < get_compound_graph_path_length(path); ++j) {
793                                                                 ir_entity *node = get_compound_graph_path_node(path, j);
794                                                                 fprintf(stdout, ".%s", get_entity_name(node));
795                                                                 if (is_Array_type(get_entity_owner(node)))
796                                                                         fprintf(stdout, "[%d]", get_compound_graph_path_array_index(path, j));
797                                                         }
798                                                         printf("\n");
799                                                 }
800                                                 */
801
802                                                 c = get_compound_ent_value_by_path(ent, path);
803                                                 free_compound_graph_path(path);
804
805                                                 /* printf("  cons: "); DDMN(c); */
806
807                                                 if (info->projs[pn_Load_M]) {
808                                                         exchange(info->projs[pn_Load_M], mem);
809                                                         res |= DF_CHANGED;
810                                                 }
811                                                 if (info->projs[pn_Load_res]) {
812                                                         exchange(info->projs[pn_Load_res], copy_const_value(get_irn_dbg_info(load), c));
813                                                         res |= DF_CHANGED;
814                                                 }
815                                                 kill_node(load);
816                                                 reduce_adr_usage(ptr);
817                                                 return res;
818                                         } else {
819                                                 /*  We can not determine a correct access path.  E.g., in jack, we load
820                                                 a byte from an object to generate an exception.   Happens in test program
821                                                 Reflectiontest.
822                                                 printf(">>>>>>>>>>>>> Found access to constant entity %s in function %s\n", get_entity_name(ent),
823                                                 get_entity_name(get_irg_entity(current_ir_graph)));
824                                                 printf("  load: "); DDMN(load);
825                                                 printf("  ptr:  "); DDMN(ptr);
826                                                 */
827                                         }
828                                 }
829                         }
830                 }
831         }
832
833         /* Check, if the address of this load is used more than once.
834          * If not, this load cannot be removed in any case. */
835         if (get_irn_n_uses(ptr) <= 1)
836                 return res;
837
838         /*
839          * follow the memory chain as long as there are only Loads
840          * and try to replace current Load or Store by a previous one.
841          * Note that in unreachable loops it might happen that we reach
842          * load again, as well as we can fall into a cycle.
843          * We break such cycles using a special visited flag.
844          */
845         INC_MASTER();
846         res = follow_Mem_chain(load, skip_Proj(mem));
847         return res;
848 }  /* optimize_load */
849
850 /**
851  * Check whether a value of mode new_mode would completely overwrite a value
852  * of mode old_mode in memory.
853  */
854 static int is_completely_overwritten(ir_mode *old_mode, ir_mode *new_mode)
855 {
856         return get_mode_size_bits(new_mode) >= get_mode_size_bits(old_mode);
857 }  /* is_completely_overwritten */
858
859 /**
860  * follow the memory chain as long as there are only Loads and alias free Stores.
861  *
862  * INC_MASTER() must be called before dive into
863  */
864 static unsigned follow_Mem_chain_for_Store(ir_node *store, ir_node *curr) {
865         unsigned res = 0;
866         ldst_info_t *info = get_irn_link(store);
867         ir_node *pred;
868         ir_node *ptr = get_Store_ptr(store);
869         ir_node *mem = get_Store_mem(store);
870         ir_node *value = get_Store_value(store);
871         ir_mode *mode  = get_irn_mode(value);
872         ir_node *block = get_nodes_block(store);
873         ir_node *mblk  = get_Block_MacroBlock(block);
874
875         for (pred = curr; pred != store;) {
876                 ldst_info_t *pred_info = get_irn_link(pred);
877
878                 /*
879                  * BEWARE: one might think that checking the modes is useless, because
880                  * if the pointers are identical, they refer to the same object.
881                  * This is only true in strong typed languages, not is C were the following
882                  * is possible *(ir_type1 *)p = a; *(ir_type2 *)p = b ...
883                  * However, if the mode that is written have a bigger  or equal size the the old
884                  * one, the old value is completely overwritten and can be killed ...
885                  */
886                 if (is_Store(pred) && get_Store_ptr(pred) == ptr &&
887                     get_nodes_MacroBlock(pred) == mblk &&
888                     is_completely_overwritten(get_irn_mode(get_Store_value(pred)), mode)) {
889                         /*
890                          * a Store after a Store in the same MacroBlock -- a write after write.
891                          * We may remove the first Store, if it does not have an exception handler.
892                          *
893                          * TODO: What, if both have the same exception handler ???
894                          */
895                         if (get_Store_volatility(pred) != volatility_is_volatile && !pred_info->projs[pn_Store_X_except]) {
896                                 DBG_OPT_WAW(pred, store);
897                                 exchange(pred_info->projs[pn_Store_M], get_Store_mem(pred));
898                                 kill_node(pred);
899                                 reduce_adr_usage(ptr);
900                                 return DF_CHANGED;
901                         }
902                 } else if (is_Load(pred) && get_Load_ptr(pred) == ptr &&
903                            value == pred_info->projs[pn_Load_res]) {
904                         /*
905                          * a Store of a value just loaded from the same address
906                          * -- a write after read.
907                          * We may remove the Store, if it does not have an exception
908                          * handler.
909                          */
910                         if (! info->projs[pn_Store_X_except]) {
911                                 DBG_OPT_WAR(store, pred);
912                                 exchange(info->projs[pn_Store_M], mem);
913                                 kill_node(store);
914                                 reduce_adr_usage(ptr);
915                                 return DF_CHANGED;
916                         }
917                 }
918
919                 if (is_Store(pred)) {
920                         /* check if we can pass thru this store */
921                         ir_alias_relation rel = get_alias_relation(
922                                 current_ir_graph,
923                                 get_Store_ptr(pred),
924                                 get_irn_mode(get_Store_value(pred)),
925                                 ptr, mode);
926                         /* if the might be an alias, we cannot pass this Store */
927                         if (rel != ir_no_alias)
928                                 break;
929                         pred = skip_Proj(get_Store_mem(pred));
930                 } else if (get_irn_op(pred) == op_Load) {
931                         ir_alias_relation rel = get_alias_relation(
932                                 current_ir_graph, get_Load_ptr(pred), get_Load_mode(pred),
933                                 ptr, mode);
934                         if (rel != ir_no_alias)
935                                 break;
936
937                         pred = skip_Proj(get_Load_mem(pred));
938                 } else {
939                         /* follow only Load chains */
940                         break;
941                 }
942
943                 /* check for cycles */
944                 if (NODE_VISITED(pred_info))
945                         break;
946                 MARK_NODE(pred_info);
947         }
948
949         if (is_Sync(pred)) {
950                 int i;
951
952                 /* handle all Sync predecessors */
953                 for (i = get_Sync_n_preds(pred) - 1; i >= 0; --i) {
954                         res |= follow_Mem_chain_for_Store(store, skip_Proj(get_Sync_pred(pred, i)));
955                         if (res)
956                                 break;
957                 }
958         }
959         return res;
960 }  /* follow_Mem_chain_for_Store */
961
962 /**
963  * optimize a Store
964  *
965  * @param store  the Store node
966  */
967 static unsigned optimize_store(ir_node *store) {
968         ir_node *ptr, *mem;
969
970         if (get_Store_volatility(store) == volatility_is_volatile)
971                 return 0;
972
973         ptr = get_Store_ptr(store);
974
975         /* Check, if the address of this Store is used more than once.
976          * If not, this Store cannot be removed in any case. */
977         if (get_irn_n_uses(ptr) <= 1)
978                 return 0;
979
980         mem = get_Store_mem(store);
981
982         /* follow the memory chain as long as there are only Loads */
983         INC_MASTER();
984
985         return follow_Mem_chain_for_Store(store, skip_Proj(mem));
986 }  /* optimize_store */
987
988 /**
989  * walker, optimizes Phi after Stores to identical places:
990  * Does the following optimization:
991  * @verbatim
992  *
993  *   val1   val2   val3          val1  val2  val3
994  *    |      |      |               \    |    /
995  *  Store  Store  Store              \   |   /
996  *      \    |    /                   PhiData
997  *       \   |   /                       |
998  *        \  |  /                      Store
999  *          PhiM
1000  *
1001  * @endverbatim
1002  * This reduces the number of stores and allows for predicated execution.
1003  * Moves Stores back to the end of a function which may be bad.
1004  *
1005  * This is only possible if the predecessor blocks have only one successor.
1006  */
1007 static unsigned optimize_phi(ir_node *phi, walk_env_t *wenv)
1008 {
1009         int i, n;
1010         ir_node *store, *old_store, *ptr, *block, *phi_block, *phiM, *phiD, *exc, *projM;
1011         ir_mode *mode;
1012         ir_node **inM, **inD, **projMs;
1013         int *idx;
1014         dbg_info *db = NULL;
1015         ldst_info_t *info;
1016         block_info_t *bl_info;
1017         unsigned res = 0;
1018
1019         /* Must be a memory Phi */
1020         if (get_irn_mode(phi) != mode_M)
1021                 return 0;
1022
1023         n = get_Phi_n_preds(phi);
1024         if (n <= 0)
1025                 return 0;
1026
1027         /* must be only one user */
1028         projM = get_Phi_pred(phi, 0);
1029         if (get_irn_n_edges(projM) != 1)
1030                 return 0;
1031
1032         store = skip_Proj(projM);
1033         old_store = store;
1034         if (get_irn_op(store) != op_Store)
1035                 return 0;
1036
1037         block = get_nodes_block(store);
1038
1039         /* abort on dead blocks */
1040         if (is_Block_dead(block))
1041                 return 0;
1042
1043         /* check if the block is post dominated by Phi-block
1044            and has no exception exit */
1045         bl_info = get_irn_link(block);
1046         if (bl_info->flags & BLOCK_HAS_EXC)
1047                 return 0;
1048
1049         phi_block = get_nodes_block(phi);
1050         if (! block_strictly_postdominates(phi_block, block))
1051                 return 0;
1052
1053         /* this is the address of the store */
1054         ptr  = get_Store_ptr(store);
1055         mode = get_irn_mode(get_Store_value(store));
1056         info = get_irn_link(store);
1057         exc  = info->exc_block;
1058
1059         for (i = 1; i < n; ++i) {
1060                 ir_node *pred = get_Phi_pred(phi, i);
1061
1062                 if (get_irn_n_edges(pred) != 1)
1063                         return 0;
1064
1065                 pred = skip_Proj(pred);
1066                 if (!is_Store(pred))
1067                         return 0;
1068
1069                 if (ptr != get_Store_ptr(pred) || mode != get_irn_mode(get_Store_value(pred)))
1070                         return 0;
1071
1072                 info = get_irn_link(pred);
1073
1074                 /* check, if all stores have the same exception flow */
1075                 if (exc != info->exc_block)
1076                         return 0;
1077
1078                 /* abort on dead blocks */
1079                 block = get_nodes_block(pred);
1080                 if (is_Block_dead(block))
1081                         return 0;
1082
1083                 /* check if the block is post dominated by Phi-block
1084                    and has no exception exit. Note that block must be different from
1085                    Phi-block, else we would move a Store from end End of a block to its
1086                    Start... */
1087                 bl_info = get_irn_link(block);
1088                 if (bl_info->flags & BLOCK_HAS_EXC)
1089                         return 0;
1090                 if (block == phi_block || ! block_postdominates(phi_block, block))
1091                         return 0;
1092         }
1093
1094         /*
1095          * ok, when we are here, we found all predecessors of a Phi that
1096          * are Stores to the same address and size. That means whatever
1097          * we do before we enter the block of the Phi, we do a Store.
1098          * So, we can move the Store to the current block:
1099          *
1100          *   val1    val2    val3          val1  val2  val3
1101          *    |       |       |               \    |    /
1102          * | Str | | Str | | Str |             \   |   /
1103          *      \     |     /                   PhiData
1104          *       \    |    /                       |
1105          *        \   |   /                       Str
1106          *           PhiM
1107          *
1108          * Is only allowed if the predecessor blocks have only one successor.
1109          */
1110
1111         NEW_ARR_A(ir_node *, projMs, n);
1112         NEW_ARR_A(ir_node *, inM, n);
1113         NEW_ARR_A(ir_node *, inD, n);
1114         NEW_ARR_A(int, idx, n);
1115
1116         /* Prepare: Collect all Store nodes.  We must do this
1117            first because we otherwise may loose a store when exchanging its
1118            memory Proj.
1119          */
1120         for (i = n - 1; i >= 0; --i) {
1121                 ir_node *store;
1122
1123                 projMs[i] = get_Phi_pred(phi, i);
1124                 assert(is_Proj(projMs[i]));
1125
1126                 store = get_Proj_pred(projMs[i]);
1127                 info  = get_irn_link(store);
1128
1129                 inM[i] = get_Store_mem(store);
1130                 inD[i] = get_Store_value(store);
1131                 idx[i] = info->exc_idx;
1132         }
1133         block = get_nodes_block(phi);
1134
1135         /* second step: create a new memory Phi */
1136         phiM = new_rd_Phi(get_irn_dbg_info(phi), current_ir_graph, block, n, inM, mode_M);
1137
1138         /* third step: create a new data Phi */
1139         phiD = new_rd_Phi(get_irn_dbg_info(phi), current_ir_graph, block, n, inD, mode);
1140
1141         /* rewire memory and kill the node */
1142         for (i = n - 1; i >= 0; --i) {
1143                 ir_node *proj  = projMs[i];
1144
1145                 if(is_Proj(proj)) {
1146                         ir_node *store = get_Proj_pred(proj);
1147                         exchange(proj, inM[i]);
1148                         kill_node(store);
1149                 }
1150         }
1151
1152         /* fourth step: create the Store */
1153         store = new_rd_Store(db, current_ir_graph, block, phiM, ptr, phiD);
1154 #ifdef DO_CACHEOPT
1155         co_set_irn_name(store, co_get_irn_ident(old_store));
1156 #endif
1157
1158         projM = new_rd_Proj(NULL, current_ir_graph, block, store, mode_M, pn_Store_M);
1159
1160         info = get_ldst_info(store, &wenv->obst);
1161         info->projs[pn_Store_M] = projM;
1162
1163         /* fifths step: repair exception flow */
1164         if (exc) {
1165                 ir_node *projX = new_rd_Proj(NULL, current_ir_graph, block, store, mode_X, pn_Store_X_except);
1166
1167                 info->projs[pn_Store_X_except] = projX;
1168                 info->exc_block                = exc;
1169                 info->exc_idx                  = idx[0];
1170
1171                 for (i = 0; i < n; ++i) {
1172                         set_Block_cfgpred(exc, idx[i], projX);
1173                 }
1174
1175                 if (n > 1) {
1176                         /* the exception block should be optimized as some inputs are identical now */
1177                 }
1178
1179                 res |= CF_CHANGED;
1180         }
1181
1182         /* sixth step: replace old Phi */
1183         exchange(phi, projM);
1184
1185         return res | DF_CHANGED;
1186 }  /* optimize_phi */
1187
1188 /**
1189  * walker, do the optimizations
1190  */
1191 static void do_load_store_optimize(ir_node *n, void *env) {
1192         walk_env_t *wenv = env;
1193
1194         switch (get_irn_opcode(n)) {
1195
1196         case iro_Load:
1197                 wenv->changes |= optimize_load(n);
1198                 break;
1199
1200         case iro_Store:
1201                 wenv->changes |= optimize_store(n);
1202                 break;
1203
1204         case iro_Phi:
1205                 wenv->changes |= optimize_phi(n, wenv);
1206                 break;
1207
1208         default:
1209                 ;
1210         }
1211 }  /* do_load_store_optimize */
1212
1213 /** A scc. */
1214 typedef struct scc {
1215         ir_node *head;          /**< the head of the list */
1216 } scc;
1217
1218 /** A node entry. */
1219 typedef struct node_entry {
1220         unsigned DFSnum;    /**< the DFS number of this node */
1221         unsigned low;       /**< the low number of this node */
1222         ir_node  *header;   /**< the header of this node */
1223         int      in_stack;  /**< flag, set if the node is on the stack */
1224         ir_node  *next;     /**< link to the next node the the same scc */
1225         scc      *pscc;     /**< the scc of this node */
1226         unsigned POnum;     /**< the post order number for blocks */
1227 } node_entry;
1228
1229 /** A loop entry. */
1230 typedef struct loop_env {
1231         ir_phase ph;           /**< the phase object */
1232         ir_node  **stack;      /**< the node stack */
1233         int      tos;          /**< tos index */
1234         unsigned nextDFSnum;   /**< the current DFS number */
1235         unsigned POnum;        /**< current post order number */
1236
1237         unsigned changes;      /**< a bitmask of graph changes */
1238 } loop_env;
1239
1240 /**
1241 * Gets the node_entry of a node
1242 */
1243 static node_entry *get_irn_ne(ir_node *irn, loop_env *env) {
1244         ir_phase   *ph = &env->ph;
1245         node_entry *e  = phase_get_irn_data(&env->ph, irn);
1246
1247         if (! e) {
1248                 e = phase_alloc(ph, sizeof(*e));
1249                 memset(e, 0, sizeof(*e));
1250                 phase_set_irn_data(ph, irn, e);
1251         }
1252         return e;
1253 }  /* get_irn_ne */
1254
1255 /**
1256  * Push a node onto the stack.
1257  *
1258  * @param env   the loop environment
1259  * @param n     the node to push
1260  */
1261 static void push(loop_env *env, ir_node *n) {
1262         node_entry *e;
1263
1264         if (env->tos == ARR_LEN(env->stack)) {
1265                 int nlen = ARR_LEN(env->stack) * 2;
1266                 ARR_RESIZE(ir_node *, env->stack, nlen);
1267         }
1268         env->stack[env->tos++] = n;
1269         e = get_irn_ne(n, env);
1270         e->in_stack = 1;
1271 }  /* push */
1272
1273 /**
1274  * pop a node from the stack
1275  *
1276  * @param env   the loop environment
1277  *
1278  * @return  The topmost node
1279  */
1280 static ir_node *pop(loop_env *env) {
1281         ir_node *n = env->stack[--env->tos];
1282         node_entry *e = get_irn_ne(n, env);
1283
1284         e->in_stack = 0;
1285         return n;
1286 }  /* pop */
1287
1288 /**
1289  * Check if irn is a region constant.
1290  * The block or irn must strictly dominate the header block.
1291  *
1292  * @param irn           the node to check
1293  * @param header_block  the header block of the induction variable
1294  */
1295 static int is_rc(ir_node *irn, ir_node *header_block) {
1296         ir_node *block = get_nodes_block(irn);
1297
1298         return (block != header_block) && block_dominates(block, header_block);
1299 }  /* is_rc */
1300
1301 typedef struct phi_entry phi_entry;
1302 struct phi_entry {
1303         ir_node   *phi;    /**< A phi with a region const memory. */
1304         int       pos;     /**< The position of the region const memory */
1305         ir_node   *load;   /**< the newly created load for this phi */
1306         phi_entry *next;
1307 };
1308
1309 /**
1310  * Move loops out of loops if possible.
1311  *
1312  * @param pscc   the loop described by an SCC
1313  * @param env    the loop environment
1314  */
1315 static void move_loads_out_of_loops(scc *pscc, loop_env *env) {
1316         ir_node   *phi, *load, *next, *other, *next_other;
1317         ir_entity *ent;
1318         int       j;
1319         phi_entry *phi_list = NULL;
1320
1321         /* collect all outer memories */
1322         for (phi = pscc->head; phi != NULL; phi = next) {
1323                 node_entry *ne = get_irn_ne(phi, env);
1324                 next = ne->next;
1325
1326                 /* check all memory Phi's */
1327                 if (! is_Phi(phi))
1328                         continue;
1329
1330                 assert(get_irn_mode(phi) == mode_M && "DFS geturn non-memory Phi");
1331
1332                 for (j = get_irn_arity(phi) - 1; j >= 0; --j) {
1333                         ir_node    *pred = get_irn_n(phi, j);
1334                         node_entry *pe   = get_irn_ne(pred, env);
1335
1336                         if (pe->pscc != ne->pscc) {
1337                                 /* not in the same SCC, is region const */
1338                                 phi_entry *pe = phase_alloc(&env->ph, sizeof(*pe));
1339
1340                                 pe->phi  = phi;
1341                                 pe->pos  = j;
1342                                 pe->next = phi_list;
1343                                 phi_list = pe;
1344                         }
1345                 }
1346         }
1347         /* no Phis no fun */
1348         assert(phi_list != NULL && "DFS found a loop without Phi");
1349
1350         for (load = pscc->head; load; load = next) {
1351                 ir_mode *load_mode;
1352                 node_entry *ne = get_irn_ne(load, env);
1353                 next = ne->next;
1354
1355                 if (is_Load(load)) {
1356                         ldst_info_t *info = get_irn_link(load);
1357                         ir_node     *ptr = get_Load_ptr(load);
1358
1359                         /* for now, we cannot handle Loads with exceptions */
1360                         if (info->projs[pn_Load_res] == NULL || info->projs[pn_Load_X_regular] != NULL || info->projs[pn_Load_X_except] != NULL)
1361                                 continue;
1362
1363                         /* for now, we can only handle Load(Global) */
1364                         if (! is_Global(ptr))
1365                                 continue;
1366                         ent = get_Global_entity(ptr);
1367                         load_mode = get_Load_mode(load);
1368                         for (other = pscc->head; other != NULL; other = next_other) {
1369                                 node_entry *ne = get_irn_ne(other, env);
1370                                 next_other = ne->next;
1371
1372                                 if (is_Store(other)) {
1373                                         ir_alias_relation rel = get_alias_relation(
1374                                                 current_ir_graph,
1375                                                 get_Store_ptr(other),
1376                                                 get_irn_mode(get_Store_value(other)),
1377                                                 ptr, load_mode);
1378                                         /* if the might be an alias, we cannot pass this Store */
1379                                         if (rel != ir_no_alias)
1380                                                 break;
1381                                 }
1382                                 /* only pure Calls are allowed here, so ignore them */
1383                         }
1384                         if (other == NULL) {
1385                                 ldst_info_t *ninfo;
1386                                 phi_entry   *pe;
1387                                 dbg_info    *db;
1388
1389                                 /* for now, we cannot handle more than one input */
1390                                 if (phi_list->next != NULL)
1391                                         return;
1392
1393                                 /* yep, no aliasing Store found, Load can be moved */
1394                                 DB((dbg, LEVEL_1, "  Found a Load that could be moved: %+F\n", load));
1395
1396                                 db   = get_irn_dbg_info(load);
1397                                 for (pe = phi_list; pe != NULL; pe = pe->next) {
1398                                         int     pos   = pe->pos;
1399                                         ir_node *phi  = pe->phi;
1400                                         ir_node *blk  = get_nodes_block(phi);
1401                                         ir_node *pred = get_Block_cfgpred_block(blk, pos);
1402                                         ir_node *irn, *mem;
1403
1404                                         pe->load = irn = new_rd_Load(db, current_ir_graph, pred, get_Phi_pred(phi, pos), ptr, load_mode);
1405                                         ninfo = get_ldst_info(irn, phase_obst(&env->ph));
1406
1407                                         ninfo->projs[pn_Load_M] = mem = new_r_Proj(current_ir_graph, pred, irn, mode_M, pn_Load_M);
1408                                         set_Phi_pred(phi, pos, mem);
1409
1410                                         ninfo->projs[pn_Load_res] = new_r_Proj(current_ir_graph, pred, irn, load_mode, pn_Load_res);
1411
1412                                         DB((dbg, LEVEL_1, "  Created %+F in %+F\n", irn, pred));
1413                                 }
1414
1415                                 /* now kill the old Load */
1416                                 exchange(info->projs[pn_Load_M], get_Load_mem(load));
1417                                 exchange(info->projs[pn_Load_res], ninfo->projs[pn_Load_res]);
1418
1419                                 env->changes |= DF_CHANGED;
1420                         }
1421                 }
1422         }
1423 }  /* move_loads_out_of_loops */
1424
1425 /**
1426  * Process a loop SCC.
1427  *
1428  * @param pscc  the SCC
1429  * @param env   the loop environment
1430  */
1431 static void process_loop(scc *pscc, loop_env *env) {
1432         ir_node *irn, *next, *header = NULL;
1433         node_entry *b, *h = NULL;
1434         int j, only_phi, num_outside, process = 0;
1435         ir_node *out_rc;
1436
1437         /* find the header block for this scc */
1438         for (irn = pscc->head; irn; irn = next) {
1439                 node_entry *e = get_irn_ne(irn, env);
1440                 ir_node *block = get_nodes_block(irn);
1441
1442                 next = e->next;
1443                 b = get_irn_ne(block, env);
1444
1445                 if (header) {
1446                         if (h->POnum < b->POnum) {
1447                                 header = block;
1448                                 h      = b;
1449                         }
1450                 }
1451                 else {
1452                         header = block;
1453                         h      = b;
1454                 }
1455         }
1456
1457         /* check if this scc contains only Phi, Loads or Stores nodes */
1458         only_phi    = 1;
1459         num_outside = 0;
1460         out_rc      = NULL;
1461         for (irn = pscc->head; irn; irn = next) {
1462                 node_entry *e = get_irn_ne(irn, env);
1463
1464                 next = e->next;
1465                 switch (get_irn_opcode(irn)) {
1466                 case iro_Call:
1467                         if (is_Call_pure(irn)) {
1468                                 /* pure calls can be treated like loads */
1469                                 only_phi = 0;
1470                                 break;
1471                         }
1472                         /* non-pure calls must be handle like may-alias Stores */
1473                         goto fail;
1474                 case iro_CopyB:
1475                         /* cannot handle CopyB yet */
1476                         goto fail;
1477                 case iro_Load:
1478                         process = 1;
1479                         if (get_Load_volatility(irn) == volatility_is_volatile) {
1480                                 /* cannot handle loops with volatile Loads */
1481                                 goto fail;
1482                         }
1483                         only_phi = 0;
1484                         break;
1485                 case iro_Store:
1486                         if (get_Store_volatility(irn) == volatility_is_volatile) {
1487                                 /* cannot handle loops with volatile Stores */
1488                                 goto fail;
1489                         }
1490                         only_phi = 0;
1491                         break;
1492                 default:
1493                         only_phi = 0;
1494                         break;
1495                 case iro_Phi:
1496                         for (j = get_irn_arity(irn) - 1; j >= 0; --j) {
1497                                 ir_node *pred  = get_irn_n(irn, j);
1498                                 node_entry *pe = get_irn_ne(pred, env);
1499
1500                                 if (pe->pscc != e->pscc) {
1501                                         /* not in the same SCC, must be a region const */
1502                                         if (! is_rc(pred, header)) {
1503                                                 /* not a memory loop */
1504                                                 goto fail;
1505                                         }
1506                                         if (! out_rc) {
1507                                                 out_rc = pred;
1508                                                 ++num_outside;
1509                                         } else if (out_rc != pred) {
1510                                                 ++num_outside;
1511                                         }
1512                                 }
1513                         }
1514                         break;
1515                 }
1516         }
1517         if (! process)
1518                 goto fail;
1519
1520         /* found a memory loop */
1521         DB((dbg, LEVEL_2, "  Found a memory loop:\n  "));
1522         if (only_phi && num_outside == 1) {
1523                 /* a phi cycle with only one real predecessor can be collapsed */
1524                 DB((dbg, LEVEL_2, "  Found an USELESS Phi cycle:\n  "));
1525
1526                 for (irn = pscc->head; irn; irn = next) {
1527                         node_entry *e = get_irn_ne(irn, env);
1528                         next = e->next;
1529                         e->header = NULL;
1530                         exchange(irn, out_rc);
1531                 }
1532                 env->changes |= DF_CHANGED;
1533                 return;
1534         }
1535
1536         /* set the header for every node in this scc */
1537         for (irn = pscc->head; irn; irn = next) {
1538                 node_entry *e = get_irn_ne(irn, env);
1539                 e->header = header;
1540                 next = e->next;
1541                 DB((dbg, LEVEL_2, " %+F,", irn));
1542         }
1543         DB((dbg, LEVEL_2, "\n"));
1544
1545         move_loads_out_of_loops(pscc, env);
1546
1547 fail:
1548         ;
1549 }  /* process_loop */
1550
1551 /**
1552  * Process a SCC.
1553  *
1554  * @param pscc  the SCC
1555  * @param env   the loop environment
1556  */
1557 static void process_scc(scc *pscc, loop_env *env) {
1558         ir_node *head = pscc->head;
1559         node_entry *e = get_irn_ne(head, env);
1560
1561 #ifdef DEBUG_libfirm
1562         {
1563                 ir_node *irn, *next;
1564
1565                 DB((dbg, LEVEL_4, " SCC at %p:\n ", pscc));
1566                 for (irn = pscc->head; irn; irn = next) {
1567                         node_entry *e = get_irn_ne(irn, env);
1568
1569                         next = e->next;
1570
1571                         DB((dbg, LEVEL_4, " %+F,", irn));
1572                 }
1573                 DB((dbg, LEVEL_4, "\n"));
1574         }
1575 #endif
1576
1577         if (e->next != NULL) {
1578                 /* this SCC has more than one member */
1579                 process_loop(pscc, env);
1580         }
1581 }  /* process_scc */
1582
1583 /**
1584  * Do Tarjan's SCC algorithm and drive load/store optimization.
1585  *
1586  * @param irn  start at this node
1587  * @param env  the loop environment
1588  */
1589 static void dfs(ir_node *irn, loop_env *env)
1590 {
1591         int i, n;
1592         node_entry *node = get_irn_ne(irn, env);
1593
1594         mark_irn_visited(irn);
1595
1596         node->DFSnum = env->nextDFSnum++;
1597         node->low    = node->DFSnum;
1598         push(env, irn);
1599
1600         /* handle preds */
1601         if (is_Phi(irn) || is_Sync(irn)) {
1602                 n = get_irn_arity(irn);
1603                 for (i = 0; i < n; ++i) {
1604                         ir_node *pred = get_irn_n(irn, i);
1605                         node_entry *o = get_irn_ne(pred, env);
1606
1607                         if (irn_not_visited(pred)) {
1608                                 dfs(pred, env);
1609                                 node->low = MIN(node->low, o->low);
1610                         }
1611                         if (o->DFSnum < node->DFSnum && o->in_stack)
1612                                 node->low = MIN(o->DFSnum, node->low);
1613                 }
1614         } else if (is_fragile_op(irn)) {
1615                 ir_node *pred = get_fragile_op_mem(irn);
1616                 node_entry *o = get_irn_ne(pred, env);
1617
1618                 if (irn_not_visited(pred)) {
1619                         dfs(pred, env);
1620                         node->low = MIN(node->low, o->low);
1621                 }
1622                 if (o->DFSnum < node->DFSnum && o->in_stack)
1623                         node->low = MIN(o->DFSnum, node->low);
1624         } else if (is_Proj(irn)) {
1625                 ir_node *pred = get_Proj_pred(irn);
1626                 node_entry *o = get_irn_ne(pred, env);
1627
1628                 if (irn_not_visited(pred)) {
1629                         dfs(pred, env);
1630                         node->low = MIN(node->low, o->low);
1631                 }
1632                 if (o->DFSnum < node->DFSnum && o->in_stack)
1633                         node->low = MIN(o->DFSnum, node->low);
1634         }
1635         else {
1636                  /* IGNORE predecessors */
1637         }
1638
1639         if (node->low == node->DFSnum) {
1640                 scc *pscc = phase_alloc(&env->ph, sizeof(*pscc));
1641                 ir_node *x;
1642
1643                 pscc->head = NULL;
1644                 do {
1645                         node_entry *e;
1646
1647                         x = pop(env);
1648                         e = get_irn_ne(x, env);
1649                         e->pscc    = pscc;
1650                         e->next    = pscc->head;
1651                         pscc->head = x;
1652                 } while (x != irn);
1653
1654                 process_scc(pscc, env);
1655         }
1656 }  /* dfs */
1657
1658 /**
1659  * Do the DFS on the memory edges a graph.
1660  *
1661  * @param irg  the graph to process
1662  * @param env  the loop environment
1663  */
1664 static void do_dfs(ir_graph *irg, loop_env *env) {
1665         ir_graph *rem = current_ir_graph;
1666         ir_node  *endblk, *end;
1667         int      i;
1668
1669         current_ir_graph = irg;
1670         inc_irg_visited(irg);
1671
1672         /* visit all memory nodes */
1673         endblk = get_irg_end_block(irg);
1674         for (i = get_Block_n_cfgpreds(endblk) - 1; i >= 0; --i) {
1675                 ir_node *pred = get_Block_cfgpred(endblk, i);
1676
1677                 pred = skip_Proj(pred);
1678                 if (is_Return(pred))
1679                         dfs(get_Return_mem(pred), env);
1680                 else if (is_Raise(pred))
1681                         dfs(get_Raise_mem(pred), env);
1682                 else if (is_fragile_op(pred))
1683                         dfs(get_fragile_op_mem(pred), env);
1684                 else {
1685                         assert(0 && "Unknown EndBlock predecessor");
1686                 }
1687         }
1688
1689         /* visit the keep-alives */
1690         end = get_irg_end(irg);
1691         for (i = get_End_n_keepalives(end) - 1; i >= 0; --i) {
1692                 ir_node *ka = get_End_keepalive(end, i);
1693
1694                 if (is_Phi(ka) && irn_not_visited(ka))
1695                         dfs(ka, env);
1696         }
1697         current_ir_graph = rem;
1698 }  /* do_dfs */
1699
1700 /**
1701  * Initialize new phase data. We do this always explicit, so return NULL here
1702  */
1703 static void *init_loop_data(ir_phase *ph, const ir_node *irn, void *data) {
1704         (void)ph;
1705         (void)irn;
1706         (void)data;
1707         return NULL;
1708 }  /* init_loop_data */
1709
1710 /**
1711  * Optimize Loads/Stores in loops.
1712  *
1713  * @param irg  the graph
1714  */
1715 static int optimize_loops(ir_graph *irg) {
1716         loop_env env;
1717
1718         env.stack         = NEW_ARR_F(ir_node *, 128);
1719         env.tos           = 0;
1720         env.nextDFSnum    = 0;
1721         env.POnum         = 0;
1722         env.changes       = 0;
1723         phase_init(&env.ph, "ldstopt", irg, PHASE_DEFAULT_GROWTH, init_loop_data, NULL);
1724
1725         /* calculate the SCC's and drive loop optimization. */
1726         do_dfs(irg, &env);
1727
1728         DEL_ARR_F(env.stack);
1729         phase_free(&env.ph);
1730
1731         return env.changes;
1732 }  /* optimize_loops */
1733
1734 /*
1735  * do the load store optimization
1736  */
1737 void optimize_load_store(ir_graph *irg) {
1738         walk_env_t env;
1739
1740         FIRM_DBG_REGISTER(dbg, "firm.opt.ldstopt");
1741
1742         assert(get_irg_phase_state(irg) != phase_building);
1743         assert(get_irg_pinned(irg) != op_pin_state_floats &&
1744                 "LoadStore optimization needs pinned graph");
1745
1746         /* we need landing pads */
1747         remove_critical_cf_edges(irg);
1748
1749         edges_assure(irg);
1750
1751         /* for Phi optimization post-dominators are needed ... */
1752         assure_postdoms(irg);
1753
1754         if (get_opt_alias_analysis()) {
1755                 assure_irg_address_taken_computed(irg);
1756                 assure_irp_globals_address_taken_computed();
1757         }
1758
1759         obstack_init(&env.obst);
1760         env.changes = 0;
1761
1762         /* init the links, then collect Loads/Stores/Proj's in lists */
1763         master_visited = 0;
1764         irg_walk_graph(irg, firm_clear_link, collect_nodes, &env);
1765
1766         /* now we have collected enough information, optimize */
1767         irg_walk_graph(irg, NULL, do_load_store_optimize, &env);
1768
1769         env.changes |= optimize_loops(irg);
1770
1771         obstack_free(&env.obst, NULL);
1772
1773         /* Handle graph state */
1774         if (env.changes) {
1775                 set_irg_outs_inconsistent(irg);
1776         }
1777
1778         if (env.changes & CF_CHANGED) {
1779                 /* is this really needed: Yes, control flow changed, block might
1780                 have Bad() predecessors. */
1781                 set_irg_doms_inconsistent(irg);
1782         }
1783 }  /* optimize_load_store */