add an is_Quot function
[libfirm] / ir / opt / ldstopt.c
1 /*
2  * Project:     libFIRM
3  * File name:   ir/opt/ldstopt.c
4  * Purpose:     load store optimizations
5  * Author:      Michael Beck
6  * Created:
7  * CVS-ID:      $Id$
8  * Copyright:   (c) 1998-2007 Universität Karlsruhe
9  * Licence:     This file protected by GPL -  GNU GENERAL PUBLIC LICENSE.
10  */
11 #ifdef HAVE_CONFIG_H
12 # include "config.h"
13 #endif
14
15 #ifdef HAVE_ALLOCA_H
16 #include <alloca.h>
17 #endif
18 #ifdef HAVE_MALLOC_H
19 #include <malloc.h>
20 #endif
21 #ifdef HAVE_STRING_H
22 # include <string.h>
23 #endif
24
25 #include "irnode_t.h"
26 #include "irgraph_t.h"
27 #include "irmode_t.h"
28 #include "iropt_t.h"
29 #include "ircons_t.h"
30 #include "irgmod.h"
31 #include "irgwalk.h"
32 #include "irvrfy.h"
33 #include "tv_t.h"
34 #include "dbginfo_t.h"
35 #include "iropt_dbg.h"
36 #include "irflag_t.h"
37 #include "array.h"
38 #include "irhooks.h"
39 #include "iredges.h"
40 #include "irtools.h"
41 #include "opt_polymorphy.h"
42 #include "irmemory.h"
43
44 #ifdef DO_CACHEOPT
45 #include "cacheopt/cachesim.h"
46 #endif
47
48 #undef IMAX
49 #define IMAX(a,b)       ((a) > (b) ? (a) : (b))
50
51 #define MAX_PROJ        IMAX(pn_Load_max, pn_Store_max)
52
53 enum changes_t {
54         DF_CHANGED = 1,       /**< data flow changed */
55         CF_CHANGED = 2,       /**< control flow changed */
56 };
57
58 /**
59  * walker environment
60  */
61 typedef struct _walk_env_t {
62         struct obstack obst;          /**< list of all stores */
63         unsigned changes;             /**< a bitmask of graph changes */
64 } walk_env_t;
65
66 /**
67  * flags for Load/Store
68  */
69 enum ldst_flags_t {
70         LDST_VISITED = 1              /**< if set, this Load/Store is already visited */
71 };
72
73 /** A Load/Store info. */
74 typedef struct _ldst_info_t {
75         ir_node  *projs[MAX_PROJ];    /**< list of Proj's of this node */
76         ir_node  *exc_block;          /**< the exception block if available */
77         int      exc_idx;             /**< predecessor index in the exception block */
78         unsigned flags;               /**< flags */
79         unsigned visited;             /**< visited counter for breaking loops */
80 } ldst_info_t;
81
82 /**
83  * flags for control flow.
84  */
85 enum block_flags_t {
86         BLOCK_HAS_COND = 1,      /**< Block has conditional control flow */
87         BLOCK_HAS_EXC  = 2       /**< Block has exceptional control flow */
88 };
89
90 /**
91  * a Block info.
92  */
93 typedef struct _block_info_t {
94         unsigned flags;               /**< flags for the block */
95 } block_info_t;
96
97 /** the master visited flag for loop detection. */
98 static unsigned master_visited = 0;
99
100 #define INC_MASTER()       ++master_visited
101 #define MARK_NODE(info)    (info)->visited = master_visited
102 #define NODE_VISITED(info) (info)->visited >= master_visited
103
104 /**
105  * get the Load/Store info of a node
106  */
107 static ldst_info_t *get_ldst_info(ir_node *node, walk_env_t *env) {
108         ldst_info_t *info = get_irn_link(node);
109
110         if (! info) {
111                 info = obstack_alloc(&env->obst, sizeof(*info));
112                 memset(info, 0, sizeof(*info));
113                 set_irn_link(node, info);
114         }
115         return info;
116 }  /* get_ldst_info */
117
118 /**
119  * get the Block info of a node
120  */
121 static block_info_t *get_block_info(ir_node *node, walk_env_t *env) {
122         block_info_t *info = get_irn_link(node);
123
124         if (! info) {
125                 info = obstack_alloc(&env->obst, sizeof(*info));
126                 memset(info, 0, sizeof(*info));
127                 set_irn_link(node, info);
128         }
129         return info;
130 }  /* get_block_info */
131
132 /**
133  * update the projection info for a Load/Store
134  */
135 static unsigned update_projs(ldst_info_t *info, ir_node *proj)
136 {
137         long nr = get_Proj_proj(proj);
138
139         assert(0 <= nr && nr <= MAX_PROJ && "Wrong proj from LoadStore");
140
141         if (info->projs[nr]) {
142                 /* there is already one, do CSE */
143                 exchange(proj, info->projs[nr]);
144                 return DF_CHANGED;
145         }
146         else {
147                 info->projs[nr] = proj;
148                 return 0;
149         }
150 }  /* update_projs */
151
152 /**
153  * update the exception block info for a Load/Store node.
154  *
155  * @param info   the load/store info struct
156  * @param block  the exception handler block for this load/store
157  * @param pos    the control flow input of the block
158  */
159 static unsigned update_exc(ldst_info_t *info, ir_node *block, int pos)
160 {
161         assert(info->exc_block == NULL && "more than one exception block found");
162
163         info->exc_block = block;
164         info->exc_idx   = pos;
165         return 0;
166 }  /* update_exc */
167
168 /** Return the number of uses of an address node */
169 #define get_irn_n_uses(adr)     get_irn_n_edges(adr)
170
171 /**
172  * walker, collects all Load/Store/Proj nodes
173  *
174  * walks from Start -> End
175  */
176 static void collect_nodes(ir_node *node, void *env)
177 {
178         ir_op       *op = get_irn_op(node);
179         ir_node     *pred, *blk, *pred_blk;
180         ldst_info_t *ldst_info;
181         walk_env_t  *wenv = env;
182
183         if (op == op_Proj) {
184                 ir_node *adr;
185                 ir_op *op;
186
187                 pred = get_Proj_pred(node);
188                 op   = get_irn_op(pred);
189
190                 if (op == op_Load) {
191                         ldst_info = get_ldst_info(pred, wenv);
192
193                         wenv->changes |= update_projs(ldst_info, node);
194
195                         if ((ldst_info->flags & LDST_VISITED) == 0) {
196                                 adr = get_Load_ptr(pred);
197                                 ldst_info->flags |= LDST_VISITED;
198                         }
199
200                         /*
201                         * Place the Proj's to the same block as the
202                         * predecessor Load. This is always ok and prevents
203                         * "non-SSA" form after optimizations if the Proj
204                         * is in a wrong block.
205                         */
206                         blk      = get_nodes_block(node);
207                         pred_blk = get_nodes_block(pred);
208                         if (blk != pred_blk) {
209                                 wenv->changes |= DF_CHANGED;
210                                 set_nodes_block(node, pred_blk);
211                         }
212                 } else if (op == op_Store) {
213                         ldst_info = get_ldst_info(pred, wenv);
214
215                         wenv->changes |= update_projs(ldst_info, node);
216
217                         if ((ldst_info->flags & LDST_VISITED) == 0) {
218                                 adr = get_Store_ptr(pred);
219                                 ldst_info->flags |= LDST_VISITED;
220                         }
221
222                         /*
223                         * Place the Proj's to the same block as the
224                         * predecessor Store. This is always ok and prevents
225                         * "non-SSA" form after optimizations if the Proj
226                         * is in a wrong block.
227                         */
228                         blk      = get_nodes_block(node);
229                         pred_blk = get_nodes_block(pred);
230                         if (blk != pred_blk) {
231                                 wenv->changes |= DF_CHANGED;
232                                 set_nodes_block(node, pred_blk);
233                         }
234                 }
235         } else if (op == op_Block) {
236                 int i;
237
238                 for (i = get_Block_n_cfgpreds(node) - 1; i >= 0; --i) {
239                         ir_node      *pred_block;
240                         block_info_t *bl_info;
241
242                         pred = skip_Proj(get_Block_cfgpred(node, i));
243
244                         /* ignore Bad predecessors, they will be removed later */
245                         if (is_Bad(pred))
246                                 continue;
247
248                         pred_block = get_nodes_block(pred);
249                         bl_info    = get_block_info(pred_block, wenv);
250
251                         if (is_fragile_op(pred))
252                                 bl_info->flags |= BLOCK_HAS_EXC;
253                         else if (is_irn_forking(pred))
254                                 bl_info->flags |= BLOCK_HAS_COND;
255
256                         if (get_irn_op(pred) == op_Load || get_irn_op(pred) == op_Store) {
257                                 ldst_info = get_ldst_info(pred, wenv);
258
259                                 wenv->changes |= update_exc(ldst_info, node, i);
260                         }
261                 }
262         }
263 }  /* collect_nodes */
264
265 /**
266  * Returns an entity if the address ptr points to a constant one.
267  *
268  * @param ptr  the address
269  *
270  * @return an entity or NULL
271  */
272 static ir_entity *find_constant_entity(ir_node *ptr)
273 {
274         for (;;) {
275                 ir_op *op = get_irn_op(ptr);
276
277                 if (op == op_SymConst && (get_SymConst_kind(ptr) == symconst_addr_ent)) {
278                         ir_entity *ent = get_SymConst_entity(ptr);
279                         if (variability_constant == get_entity_variability(ent))
280                                 return ent;
281                         return NULL;
282                 } else if (op == op_Sel) {
283                         ir_entity *ent = get_Sel_entity(ptr);
284                         ir_type   *tp  = get_entity_owner(ent);
285
286                         /* Do not fiddle with polymorphism. */
287                         if (is_Class_type(get_entity_owner(ent)) &&
288                                 ((get_entity_n_overwrites(ent)    != 0) ||
289                                 (get_entity_n_overwrittenby(ent) != 0)   ) )
290                                 return NULL;
291
292                         if (is_Array_type(tp)) {
293                                 /* check bounds */
294                                 int i, n;
295
296                                 for (i = 0, n = get_Sel_n_indexs(ptr); i < n; ++i) {
297                                         ir_node *bound;
298                                         tarval *tlower, *tupper;
299                                         ir_node *index = get_Sel_index(ptr, i);
300                                         tarval *tv     = computed_value(index);
301
302                                         /* check if the index is constant */
303                                         if (tv == tarval_bad)
304                                                 return NULL;
305
306                                         bound  = get_array_lower_bound(tp, i);
307                                         tlower = computed_value(bound);
308                                         bound  = get_array_upper_bound(tp, i);
309                                         tupper = computed_value(bound);
310
311                                         if (tlower == tarval_bad || tupper == tarval_bad)
312                                                 return NULL;
313
314                                         if (tarval_cmp(tv, tlower) & pn_Cmp_Lt)
315                                                 return NULL;
316                                         if (tarval_cmp(tupper, tv) & pn_Cmp_Lt)
317                                                 return NULL;
318
319                                         /* ok, bounds check finished */
320                                 }
321                         }
322
323                         if (variability_constant == get_entity_variability(ent))
324                                 return ent;
325
326                         /* try next */
327                         ptr = get_Sel_ptr(ptr);
328                 } else
329                         return NULL;
330         }
331 }  /* find_constant_entity */
332
333 /**
334  * Return the Selection index of a Sel node from dimension n
335  */
336 static long get_Sel_array_index_long(ir_node *n, int dim) {
337         ir_node *index = get_Sel_index(n, dim);
338         assert(get_irn_op(index) == op_Const);
339         return get_tarval_long(get_Const_tarval(index));
340 }  /* get_Sel_array_index_long */
341
342 /**
343  * Returns the accessed component graph path for an
344  * node computing an address.
345  *
346  * @param ptr    the node computing the address
347  * @param depth  current depth in steps upward from the root
348  *               of the address
349  */
350 static compound_graph_path *rec_get_accessed_path(ir_node *ptr, int depth) {
351         compound_graph_path *res = NULL;
352         ir_entity           *root, *field;
353         int                 path_len, pos;
354
355         if (get_irn_op(ptr) == op_SymConst) {
356                 /* a SymConst. If the depth is 0, this is an access to a global
357                  * entity and we don't need a component path, else we know
358                  * at least it's length.
359                  */
360                 assert(get_SymConst_kind(ptr) == symconst_addr_ent);
361                 root = get_SymConst_entity(ptr);
362                 res = (depth == 0) ? NULL : new_compound_graph_path(get_entity_type(root), depth);
363         } else {
364                 assert(get_irn_op(ptr) == op_Sel);
365                 /* it's a Sel, go up until we find the root */
366                 res = rec_get_accessed_path(get_Sel_ptr(ptr), depth+1);
367
368                 /* fill up the step in the path at the current position */
369                 field    = get_Sel_entity(ptr);
370                 path_len = get_compound_graph_path_length(res);
371                 pos      = path_len - depth - 1;
372                 set_compound_graph_path_node(res, pos, field);
373
374                 if (is_Array_type(get_entity_owner(field))) {
375                         assert(get_Sel_n_indexs(ptr) == 1 && "multi dim arrays not implemented");
376                         set_compound_graph_path_array_index(res, pos, get_Sel_array_index_long(ptr, 0));
377                 }
378         }
379         return res;
380 }  /* rec_get_accessed_path */
381
382 /** Returns an access path or NULL.  The access path is only
383  *  valid, if the graph is in phase_high and _no_ address computation is used.
384  */
385 static compound_graph_path *get_accessed_path(ir_node *ptr) {
386         return rec_get_accessed_path(ptr, 0);
387 }  /* get_accessed_path */
388
389 /* forward */
390 static void reduce_adr_usage(ir_node *ptr);
391
392 /**
393  * Update a Load that may lost it's usage.
394  */
395 static void handle_load_update(ir_node *load) {
396         ldst_info_t *info = get_irn_link(load);
397
398         /* do NOT touch volatile loads for now */
399         if (get_Load_volatility(load) == volatility_is_volatile)
400                 return;
401
402         if (! info->projs[pn_Load_res] && ! info->projs[pn_Load_X_except]) {
403                 ir_node *ptr = get_Load_ptr(load);
404                 ir_node *mem = get_Load_mem(load);
405
406                 /* a Load which value is neither used nor exception checked, remove it */
407                 exchange(info->projs[pn_Load_M], mem);
408                 exchange(load, new_Bad());
409                 reduce_adr_usage(ptr);
410         }
411 }  /* handle_load_update */
412
413 /**
414  * A Use of an address node is vanished. Check if this was a Proj
415  * node and update the counters.
416  */
417 static void reduce_adr_usage(ir_node *ptr) {
418         if (is_Proj(ptr)) {
419                 if (get_irn_n_edges(ptr) <= 0) {
420                         /* this Proj is dead now */
421                         ir_node *pred = get_Proj_pred(ptr);
422
423                         if (is_Load(pred)) {
424                                 ldst_info_t *info = get_irn_link(pred);
425                                 info->projs[get_Proj_proj(ptr)] = NULL;
426
427                                 /* this node lost it's result proj, handle that */
428                                 handle_load_update(pred);
429                         }
430                 }
431         }
432 }  /* reduce_adr_usage */
433
434 /**
435  * Check, if an already existing value of mode old_mode can be converted
436  * into the needed one new_mode without loss.
437  */
438 static int can_use_stored_value(ir_mode *old_mode, ir_mode *new_mode) {
439         if (old_mode == new_mode)
440                 return 1;
441
442         /* if both modes are two-complement ones, we can always convert the
443            Stored value into the needed one. */
444         if (get_mode_size_bits(old_mode) >= get_mode_size_bits(new_mode) &&
445                   get_mode_arithmetic(old_mode) == irma_twos_complement &&
446                   get_mode_arithmetic(new_mode) == irma_twos_complement)
447                 return 1;
448         return 0;
449 }  /* can_use_stored_value */
450
451 /**
452  * Follow the memory chain as long as there are only Loads
453  * and alias free Stores and try to replace current Load or Store
454  * by a previous ones.
455  * Note that in unreachable loops it might happen that we reach
456  * load again, as well as we can fall into a cycle.
457  * We break such cycles using a special visited flag.
458  *
459  * INC_MASTER() must be called before dive into
460  */
461 static unsigned follow_Mem_chain(ir_node *load, ir_node *curr) {
462         unsigned res = 0;
463         ldst_info_t *info = get_irn_link(load);
464         ir_node *pred;
465         ir_node *ptr       = get_Load_ptr(load);
466         ir_node *mem       = get_Load_mem(load);
467         ir_mode *load_mode = get_Load_mode(load);
468
469         for (pred = curr; load != pred; ) {
470                 ldst_info_t *pred_info = get_irn_link(pred);
471
472                 /*
473                  * BEWARE: one might think that checking the modes is useless, because
474                  * if the pointers are identical, they refer to the same object.
475                  * This is only true in strong typed languages, not in C were the following
476                  * is possible a = *(ir_type1 *)p; b = *(ir_type2 *)p ...
477                  */
478                 if (get_irn_op(pred) == op_Store && get_Store_ptr(pred) == ptr &&
479                     can_use_stored_value(get_irn_mode(get_Store_value(pred)), load_mode)) {
480                         /*
481                          * a Load immediately after a Store -- a read after write.
482                          * We may remove the Load, if both Load & Store does not have an exception handler
483                          * OR they are in the same block. In the latter case the Load cannot
484                          * throw an exception when the previous Store was quiet.
485                          *
486                          * Why we need to check for Store Exception? If the Store cannot
487                          * be executed (ROM) the exception handler might simply jump into
488                          * the load block :-(
489                          * We could make it a little bit better if we would know that the exception
490                          * handler of the Store jumps directly to the end...
491                          */
492                         if ((!pred_info->projs[pn_Store_X_except] && !info->projs[pn_Load_X_except]) ||
493                             get_nodes_block(load) == get_nodes_block(pred)) {
494                                 ir_node *value = get_Store_value(pred);
495
496                                 DBG_OPT_RAW(load, value);
497
498                                 /* add an convert if needed */
499                                 if (get_irn_mode(get_Store_value(pred)) != load_mode) {
500                                         value = new_r_Conv(current_ir_graph, get_nodes_block(load), value, load_mode);
501                                 }
502
503                                 if (info->projs[pn_Load_M])
504                                         exchange(info->projs[pn_Load_M], mem);
505
506                                 /* no exception */
507                                 if (info->projs[pn_Load_X_except]) {
508                                         exchange( info->projs[pn_Load_X_except], new_Bad());
509                                         res |= CF_CHANGED;
510                                 }
511
512                                 if (info->projs[pn_Load_res])
513                                         exchange(info->projs[pn_Load_res], value);
514
515                                 exchange(load, new_Bad());
516                                 reduce_adr_usage(ptr);
517                                 return res | DF_CHANGED;
518                         }
519                 } else if (get_irn_op(pred) == op_Load && get_Load_ptr(pred) == ptr &&
520                            can_use_stored_value(get_Load_mode(pred), load_mode)) {
521                         /*
522                          * a Load after a Load -- a read after read.
523                          * We may remove the second Load, if it does not have an exception handler
524                          * OR they are in the same block. In the later case the Load cannot
525                          * throw an exception when the previous Load was quiet.
526                          *
527                          * Here, there is no need to check if the previous Load has an exception
528                          * hander because they would have exact the same exception...
529                          */
530                         if (! info->projs[pn_Load_X_except] || get_nodes_block(load) == get_nodes_block(pred)) {
531                                 DBG_OPT_RAR(load, pred);
532
533                                 if (pred_info->projs[pn_Load_res]) {
534                                         ir_node *value = pred_info->projs[pn_Load_res];
535
536                                         /* add an convert if needed */
537                                         if (get_Load_mode(pred) != load_mode) {
538                                                 value = new_r_Conv(current_ir_graph, get_nodes_block(load), value, load_mode);
539                                         }
540
541                                         /* we need a data proj from the previous load for this optimization */
542                                         if (info->projs[pn_Load_res])
543                                                 exchange(info->projs[pn_Load_res], pred_info->projs[pn_Load_res]);
544
545                                         if (info->projs[pn_Load_M])
546                                                 exchange(info->projs[pn_Load_M], mem);
547                                 } else {
548                                         if (info->projs[pn_Load_res]) {
549                                                 set_Proj_pred(info->projs[pn_Load_res], pred);
550                                                 set_nodes_block(info->projs[pn_Load_res], get_nodes_block(pred));
551                                                 pred_info->projs[pn_Load_res] = info->projs[pn_Load_res];
552                                         }
553                                         if (info->projs[pn_Load_M]) {
554                                                 /* Actually, this if should not be necessary.  Construct the Loads
555                                                 properly!!! */
556                                                 exchange(info->projs[pn_Load_M], mem);
557                                         }
558                                 }
559
560                                 /* no exception */
561                                 if (info->projs[pn_Load_X_except]) {
562                                         exchange(info->projs[pn_Load_X_except], new_Bad());
563                                         res |= CF_CHANGED;
564                                 }
565
566                                 exchange(load, new_Bad());
567                                 reduce_adr_usage(ptr);
568                                 return res |= DF_CHANGED;
569                         }
570                 }
571
572                 if (get_irn_op(pred) == op_Store) {
573                         /* check if we can pass thru this store */
574                         ir_alias_relation rel = get_alias_relation(
575                                 current_ir_graph,
576                                 get_Store_ptr(pred),
577                                 get_irn_mode(get_Store_value(pred)),
578                                 ptr, load_mode);
579                         /* if the might be an alias, we cannot pass this Store */
580                         if (rel != no_alias)
581                                 break;
582                         pred = skip_Proj(get_Store_mem(pred));
583                 } else if (get_irn_op(pred) == op_Load) {
584                         pred = skip_Proj(get_Load_mem(pred));
585                 } else {
586                         /* follow only Load chains */
587                         break;
588                 }
589
590                 /* check for cycles */
591                 if (NODE_VISITED(pred_info))
592                         break;
593                 MARK_NODE(pred_info);
594         }
595
596         if (get_irn_op(pred) == op_Sync) {
597                 int i;
598
599                 /* handle all Sync predecessors */
600                 for (i = get_Sync_n_preds(pred) - 1; i >= 0; --i) {
601                         res |= follow_Mem_chain(load, skip_Proj(get_Sync_pred(pred, i)));
602                         if (res)
603                                 break;
604                 }
605         }
606
607         return res;
608 }  /* follow_Mem_chain */
609
610 /**
611  * optimize a Load
612  *
613  * @param load  the Load node
614  */
615 static unsigned optimize_load(ir_node *load)
616 {
617         ldst_info_t *info = get_irn_link(load);
618         ir_node *mem, *ptr, *new_node;
619         ir_entity *ent;
620         unsigned res = 0;
621
622         /* do NOT touch volatile loads for now */
623         if (get_Load_volatility(load) == volatility_is_volatile)
624                 return 0;
625
626         /* the address of the load to be optimized */
627         ptr = get_Load_ptr(load);
628
629         /*
630          * Check if we can remove the exception from a Load:
631          * This can be done, if the address is from an Sel(Alloc) and
632          * the Sel type is a subtype of the allocated type.
633          *
634          * This optimizes some often used OO constructs,
635          * like x = new O; x->t;
636          */
637         if (info->projs[pn_Load_X_except]) {
638                 if (is_Sel(ptr)) {
639                         ir_node *mem = get_Sel_mem(ptr);
640
641                         /* FIXME: works with the current FE, but better use the base */
642                         if (get_irn_op(skip_Proj(mem)) == op_Alloc) {
643                                 /* ok, check the types */
644                                 ir_entity *ent    = get_Sel_entity(ptr);
645                                 ir_type   *s_type = get_entity_type(ent);
646                                 ir_type   *a_type = get_Alloc_type(mem);
647
648                                 if (is_SubClass_of(s_type, a_type)) {
649                                         /* ok, condition met: there can't be an exception because
650                                         * Alloc guarantees that enough memory was allocated */
651
652                                         exchange(info->projs[pn_Load_X_except], new_Bad());
653                                         info->projs[pn_Load_X_except] = NULL;
654                                         res |= CF_CHANGED;
655                                 }
656                         }
657                 } else if ((get_irn_op(skip_Proj(ptr)) == op_Alloc) ||
658                         ((get_irn_op(ptr) == op_Cast) && (get_irn_op(skip_Proj(get_Cast_op(ptr))) == op_Alloc))) {
659                                 /* simple case: a direct load after an Alloc. Firm Alloc throw
660                                  * an exception in case of out-of-memory. So, there is no way for an
661                                  * exception in this load.
662                                  * This code is constructed by the "exception lowering" in the Jack compiler.
663                                  */
664                                 exchange(info->projs[pn_Load_X_except], new_Bad());
665                                 info->projs[pn_Load_X_except] = NULL;
666                                 res |= CF_CHANGED;
667                 }
668         }
669
670         /* The mem of the Load. Must still be returned after optimization. */
671         mem  = get_Load_mem(load);
672
673         if (! info->projs[pn_Load_res] && ! info->projs[pn_Load_X_except]) {
674                 /* a Load which value is neither used nor exception checked, remove it */
675                 exchange(info->projs[pn_Load_M], mem);
676
677                 exchange(load, new_Bad());
678                 reduce_adr_usage(ptr);
679                 return res | DF_CHANGED;
680         }
681
682         /* Load from a constant polymorphic field, where we can resolve
683            polymorphism. */
684         new_node = transform_node_Load(load);
685         if (new_node != load) {
686                 if (info->projs[pn_Load_M]) {
687                         exchange(info->projs[pn_Load_M], mem);
688                         info->projs[pn_Load_M] = NULL;
689                 }
690                 if (info->projs[pn_Load_X_except]) {
691                         exchange(info->projs[pn_Load_X_except], new_Bad());
692                         info->projs[pn_Load_X_except] = NULL;
693                 }
694                 if (info->projs[pn_Load_res])
695                         exchange(info->projs[pn_Load_res], new_node);
696
697                 exchange(load, new_Bad());
698                 reduce_adr_usage(ptr);
699                 return res | DF_CHANGED;
700         }
701
702         /* check if we can determine the entity that will be loaded */
703         ent = find_constant_entity(ptr);
704         if (ent) {
705                 if ((allocation_static == get_entity_allocation(ent)) &&
706                         (visibility_external_allocated != get_entity_visibility(ent))) {
707                         /* a static allocation that is not external: there should be NO exception
708                          * when loading. */
709
710                         /* no exception, clear the info field as it might be checked later again */
711                         if (info->projs[pn_Load_X_except]) {
712                                 exchange(info->projs[pn_Load_X_except], new_Bad());
713                                 info->projs[pn_Load_X_except] = NULL;
714                                 res |= CF_CHANGED;
715                         }
716
717                         if (variability_constant == get_entity_variability(ent)
718                                 && is_atomic_entity(ent)) {
719                                 /* Might not be atomic after
720                                    lowering of Sels.  In this
721                                    case we could also load, but
722                                    it's more complicated. */
723                                 /* more simpler case: we load the content of a constant value:
724                                  * replace it by the constant itself
725                                  */
726
727                                 /* no memory */
728                                 if (info->projs[pn_Load_M]) {
729                                         exchange(info->projs[pn_Load_M], mem);
730                                         res |= DF_CHANGED;
731                                 }
732                                 /* no result :-) */
733                                 if (info->projs[pn_Load_res]) {
734                                         if (is_atomic_entity(ent)) {
735                                                 ir_node *c = copy_const_value(get_irn_dbg_info(load), get_atomic_ent_value(ent));
736
737                                                 DBG_OPT_RC(load, c);
738                                                 exchange(info->projs[pn_Load_res], c);
739                                                 res |= DF_CHANGED;
740                                         }
741                                 }
742                                 exchange(load, new_Bad());
743                                 reduce_adr_usage(ptr);
744                                 return res;
745                         } else if (variability_constant == get_entity_variability(ent)) {
746                                 compound_graph_path *path = get_accessed_path(ptr);
747
748                                 if (path) {
749                                         ir_node *c;
750
751                                         assert(is_proper_compound_graph_path(path, get_compound_graph_path_length(path)-1));
752                                         /*
753                                         {
754                                                 int j;
755                                                 for (j = 0; j < get_compound_graph_path_length(path); ++j) {
756                                                         ir_entity *node = get_compound_graph_path_node(path, j);
757                                                         fprintf(stdout, ".%s", get_entity_name(node));
758                                                         if (is_Array_type(get_entity_owner(node)))
759                                                                 fprintf(stdout, "[%d]", get_compound_graph_path_array_index(path, j));
760                                                 }
761                                                 printf("\n");
762                                         }
763                                         */
764
765                                         c = get_compound_ent_value_by_path(ent, path);
766                                         free_compound_graph_path(path);
767
768                                         /* printf("  cons: "); DDMN(c); */
769
770                                         if (info->projs[pn_Load_M]) {
771                                                 exchange(info->projs[pn_Load_M], mem);
772                                                 res |= DF_CHANGED;
773                                         }
774                                         if (info->projs[pn_Load_res]) {
775                                                 exchange(info->projs[pn_Load_res], copy_const_value(get_irn_dbg_info(load), c));
776                                                 res |= DF_CHANGED;
777                                         }
778                                         exchange(load, new_Bad());
779                                         reduce_adr_usage(ptr);
780                                         return res;
781                                 } else {
782                                         /*  We can not determine a correct access path.  E.g., in jack, we load
783                                         a byte from an object to generate an exception.   Happens in test program
784                                         Reflectiontest.
785                                         printf(">>>>>>>>>>>>> Found access to constant entity %s in function %s\n", get_entity_name(ent),
786                                         get_entity_name(get_irg_entity(current_ir_graph)));
787                                         printf("  load: "); DDMN(load);
788                                         printf("  ptr:  "); DDMN(ptr);
789                                         */
790                                 }
791                         }
792                 }
793         }
794
795         /* Check, if the address of this load is used more than once.
796          * If not, this load cannot be removed in any case. */
797         if (get_irn_n_uses(ptr) <= 1)
798                 return res;
799
800         /*
801          * follow the memory chain as long as there are only Loads
802          * and try to replace current Load or Store by a previous one.
803          * Note that in unreachable loops it might happen that we reach
804          * load again, as well as we can fall into a cycle.
805          * We break such cycles using a special visited flag.
806          */
807         INC_MASTER();
808         res = follow_Mem_chain(load, skip_Proj(mem));
809         return res;
810 }  /* optimize_load */
811
812 /**
813  * Check whether a value of mode new_mode would completely overwrite a value
814  * of mode old_mode in memory.
815  */
816 static int is_completely_overwritten(ir_mode *old_mode, ir_mode *new_mode)
817 {
818         return get_mode_size_bits(new_mode) >= get_mode_size_bits(old_mode);
819 }  /* is_completely_overwritten */
820
821 /**
822  * follow the memory chain as long as there are only Loads and alias free Stores.
823  *
824  * INC_MASTER() must be called before dive into
825  */
826 static unsigned follow_Mem_chain_for_Store(ir_node *store, ir_node *curr) {
827         unsigned res = 0;
828         ldst_info_t *info = get_irn_link(store);
829         ir_node *pred;
830         ir_node *ptr = get_Store_ptr(store);
831         ir_node *mem = get_Store_mem(store);
832         ir_node *value = get_Store_value(store);
833         ir_mode *mode  = get_irn_mode(value);
834         ir_node *block = get_nodes_block(store);
835
836         for (pred = curr; pred != store;) {
837                 ldst_info_t *pred_info = get_irn_link(pred);
838
839                 /*
840                  * BEWARE: one might think that checking the modes is useless, because
841                  * if the pointers are identical, they refer to the same object.
842                  * This is only true in strong typed languages, not is C were the following
843                  * is possible *(ir_type1 *)p = a; *(ir_type2 *)p = b ...
844                  * However, if the mode that is written have a bigger  or equal size the the old
845                  * one, the old value is completely overwritten and can be killed ...
846                  */
847                 if (get_irn_op(pred) == op_Store && get_Store_ptr(pred) == ptr &&
848                     get_nodes_block(pred) == block &&
849                     is_completely_overwritten(get_irn_mode(get_Store_value(pred)), mode)) {
850                         /*
851                          * a Store after a Store in the same block -- a write after write.
852                          * We may remove the first Store, if it does not have an exception handler.
853                          *
854                          * TODO: What, if both have the same exception handler ???
855                          */
856                         if (get_Store_volatility(pred) != volatility_is_volatile && !pred_info->projs[pn_Store_X_except]) {
857                                 DBG_OPT_WAW(pred, store);
858                                 exchange( pred_info->projs[pn_Store_M], get_Store_mem(pred) );
859                                 exchange(pred, new_Bad());
860                                 reduce_adr_usage(ptr);
861                                 return DF_CHANGED;
862                         }
863                 } else if (get_irn_op(pred) == op_Load && get_Load_ptr(pred) == ptr &&
864                            value == pred_info->projs[pn_Load_res]) {
865                         /*
866                          * a Store of a value after a Load -- a write after read.
867                          * We may remove the second Store, if it does not have an exception handler.
868                          */
869                         if (! info->projs[pn_Store_X_except]) {
870                                 DBG_OPT_WAR(store, pred);
871                                 exchange( info->projs[pn_Store_M], mem );
872                                 exchange(store, new_Bad());
873                                 reduce_adr_usage(ptr);
874                                 return DF_CHANGED;
875                         }
876                 }
877
878                 if (get_irn_op(pred) == op_Store) {
879                         /* check if we can pass thru this store */
880                         ir_alias_relation rel = get_alias_relation(
881                                 current_ir_graph,
882                                 get_Store_ptr(pred),
883                                 get_irn_mode(get_Store_value(pred)),
884                                 ptr, mode);
885                         /* if the might be an alias, we cannot pass this Store */
886                         if (rel != no_alias)
887                                 break;
888                         pred = skip_Proj(get_Store_mem(pred));
889                 } else if (get_irn_op(pred) == op_Load) {
890                         pred = skip_Proj(get_Load_mem(pred));
891                 } else {
892                         /* follow only Load chains */
893                         break;
894                 }
895
896                 /* check for cycles */
897                 if (NODE_VISITED(pred_info))
898                         break;
899                 MARK_NODE(pred_info);
900         }
901
902         if (get_irn_op(pred) == op_Sync) {
903                 int i;
904
905                 /* handle all Sync predecessors */
906                 for (i = get_Sync_n_preds(pred) - 1; i >= 0; --i) {
907                         res |= follow_Mem_chain_for_Store(store, skip_Proj(get_Sync_pred(pred, i)));
908                         if (res)
909                                 break;
910                 }
911         }
912         return res;
913 }  /* follow_Mem_chain_for_Store */
914
915 /**
916  * optimize a Store
917  *
918  * @param store  the Store node
919  */
920 static unsigned optimize_store(ir_node *store) {
921         ir_node *ptr, *mem;
922
923         if (get_Store_volatility(store) == volatility_is_volatile)
924                 return 0;
925
926         ptr = get_Store_ptr(store);
927
928         /* Check, if the address of this Store is used more than once.
929          * If not, this Store cannot be removed in any case. */
930         if (get_irn_n_uses(ptr) <= 1)
931                 return 0;
932
933         mem = get_Store_mem(store);
934
935         /* follow the memory chain as long as there are only Loads */
936         INC_MASTER();
937         return follow_Mem_chain_for_Store(store, skip_Proj(mem));
938 }  /* optimize_store */
939
940 /**
941  * walker, optimizes Phi after Stores to identical places:
942  * Does the following optimization:
943  * @verbatim
944  *
945  *   val1   val2   val3          val1  val2  val3
946  *    |      |      |               \    |    /
947  *  Store  Store  Store              \   |   /
948  *      \    |    /                   PhiData
949  *       \   |   /                       |
950  *        \  |  /                      Store
951  *          PhiM
952  *
953  * @endverbatim
954  * This reduces the number of stores and allows for predicated execution.
955  * Moves Stores back to the end of a function which may be bad.
956  *
957  * This is only possible if the predecessor blocks have only one successor.
958  */
959 static unsigned optimize_phi(ir_node *phi, walk_env_t *wenv)
960 {
961         int i, n;
962         ir_node *store, *old_store, *ptr, *block, *phi_block, *phiM, *phiD, *exc, *projM;
963         ir_mode *mode;
964         ir_node **inM, **inD, **stores;
965         int *idx;
966         dbg_info *db = NULL;
967         ldst_info_t *info;
968         block_info_t *bl_info;
969         unsigned res = 0;
970
971         /* Must be a memory Phi */
972         if (get_irn_mode(phi) != mode_M)
973                 return 0;
974
975         n = get_Phi_n_preds(phi);
976         if (n <= 0)
977                 return 0;
978
979         store = skip_Proj(get_Phi_pred(phi, 0));
980         old_store = store;
981         if (get_irn_op(store) != op_Store)
982                 return 0;
983
984         block = get_nodes_block(store);
985
986         /* abort on dead blocks */
987         if (is_Block_dead(block))
988                 return 0;
989
990         /* check if the block is post dominated by Phi-block
991            and has no exception exit */
992         bl_info = get_irn_link(block);
993         if (bl_info->flags & BLOCK_HAS_EXC)
994                 return 0;
995
996         phi_block = get_nodes_block(phi);
997         if (! block_postdominates(phi_block, block))
998                 return 0;
999
1000         /* this is the address of the store */
1001         ptr  = get_Store_ptr(store);
1002         mode = get_irn_mode(get_Store_value(store));
1003         info = get_irn_link(store);
1004         exc  = info->exc_block;
1005
1006         for (i = 1; i < n; ++i) {
1007                 ir_node *pred = skip_Proj(get_Phi_pred(phi, i));
1008
1009                 if (get_irn_op(pred) != op_Store)
1010                         return 0;
1011
1012                 if (ptr != get_Store_ptr(pred) || mode != get_irn_mode(get_Store_value(pred)))
1013                         return 0;
1014
1015                 info = get_irn_link(pred);
1016
1017                 /* check, if all stores have the same exception flow */
1018                 if (exc != info->exc_block)
1019                         return 0;
1020
1021                 /* abort on dead blocks */
1022                 block = get_nodes_block(pred);
1023                 if (is_Block_dead(block))
1024                         return 0;
1025
1026                 /* check if the block is post dominated by Phi-block
1027                    and has no exception exit. Note that block must be different from
1028                    Phi-block, else we would move a Store from end End of a block to its
1029                    Start... */
1030                 bl_info = get_irn_link(block);
1031                 if (bl_info->flags & BLOCK_HAS_EXC)
1032                         return 0;
1033                 if (block == phi_block || ! block_postdominates(phi_block, block))
1034                         return 0;
1035         }
1036
1037         /*
1038          * ok, when we are here, we found all predecessors of a Phi that
1039          * are Stores to the same address and size. That means whatever
1040          * we do before we enter the block of the Phi, we do a Store.
1041          * So, we can move the Store to the current block:
1042          *
1043          *   val1    val2    val3          val1  val2  val3
1044          *    |       |       |               \    |    /
1045          * | Str | | Str | | Str |             \   |   /
1046          *      \     |     /                   PhiData
1047          *       \    |    /                       |
1048          *        \   |   /                       Str
1049          *           PhiM
1050          *
1051          * Is only allowed if the predecessor blocks have only one successor.
1052          */
1053
1054         NEW_ARR_A(ir_node *, stores, n);
1055         NEW_ARR_A(ir_node *, inM, n);
1056         NEW_ARR_A(ir_node *, inD, n);
1057         NEW_ARR_A(int, idx, n);
1058
1059         /* Prepare: Collect all Store nodes.  We must do this
1060            first because we otherwise may loose a store when exchanging its
1061            memory Proj.
1062          */
1063         for (i = 0; i < n; ++i)
1064                 stores[i] = skip_Proj(get_Phi_pred(phi, i));
1065
1066         /* Prepare: Skip the memory Proj: we need this in the case some stores
1067            are cascaded.
1068            Beware: One Store might be included more than once in the stores[]
1069            list, so we must prevent to do the exchange more than once.
1070          */
1071         for (i = 0; i < n; ++i) {
1072                 ir_node *store = stores[i];
1073                 ir_node *proj_m;
1074
1075                 info = get_irn_link(store);
1076                 proj_m = info->projs[pn_Store_M];
1077
1078                 if (is_Proj(proj_m) && get_Proj_pred(proj_m) == store)
1079                         exchange(proj_m, get_Store_mem(store));
1080         }
1081
1082         /* first step: collect all inputs */
1083         for (i = 0; i < n; ++i) {
1084                 ir_node *store = stores[i];
1085                 info = get_irn_link(store);
1086
1087                 inM[i] = get_Store_mem(store);
1088                 inD[i] = get_Store_value(store);
1089                 idx[i] = info->exc_idx;
1090         }
1091         block = get_nodes_block(phi);
1092
1093         /* second step: create a new memory Phi */
1094         phiM = new_rd_Phi(get_irn_dbg_info(phi), current_ir_graph, block, n, inM, mode_M);
1095
1096         /* third step: create a new data Phi */
1097         phiD = new_rd_Phi(get_irn_dbg_info(phi), current_ir_graph, block, n, inD, mode);
1098
1099         /* fourth step: create the Store */
1100         store = new_rd_Store(db, current_ir_graph, block, phiM, ptr, phiD);
1101 #ifdef DO_CACHEOPT
1102         co_set_irn_name(store, co_get_irn_ident(old_store));
1103 #endif
1104
1105         projM = new_rd_Proj(NULL, current_ir_graph, block, store, mode_M, pn_Store_M);
1106
1107         info = get_ldst_info(store, wenv);
1108         info->projs[pn_Store_M] = projM;
1109
1110         /* fifths step: repair exception flow */
1111         if (exc) {
1112                 ir_node *projX = new_rd_Proj(NULL, current_ir_graph, block, store, mode_X, pn_Store_X_except);
1113
1114                 info->projs[pn_Store_X_except] = projX;
1115                 info->exc_block                = exc;
1116                 info->exc_idx                  = idx[0];
1117
1118                 for (i = 0; i < n; ++i) {
1119                         set_Block_cfgpred(exc, idx[i], projX);
1120                 }
1121
1122                 if (n > 1) {
1123                         /* the exception block should be optimized as some inputs are identical now */
1124                 }
1125
1126                 res |= CF_CHANGED;
1127         }
1128
1129         /* sixth step: replace old Phi */
1130         exchange(phi, projM);
1131
1132         return res | DF_CHANGED;
1133 }  /* optimize_phi */
1134
1135 /**
1136  * walker, do the optimizations
1137  */
1138 static void do_load_store_optimize(ir_node *n, void *env) {
1139         walk_env_t *wenv = env;
1140
1141         switch (get_irn_opcode(n)) {
1142
1143         case iro_Load:
1144                 wenv->changes |= optimize_load(n);
1145                 break;
1146
1147         case iro_Store:
1148                 wenv->changes |= optimize_store(n);
1149                 break;
1150
1151         case iro_Phi:
1152                 wenv->changes |= optimize_phi(n, wenv);
1153
1154         default:
1155                 ;
1156         }
1157 }  /* do_load_store_optimize */
1158
1159 /*
1160  * do the load store optimization
1161  */
1162 void optimize_load_store(ir_graph *irg) {
1163         walk_env_t env;
1164
1165         assert(get_irg_phase_state(irg) != phase_building);
1166         assert(get_irg_pinned(irg) != op_pin_state_floats &&
1167                 "LoadStore optimization needs pinned graph");
1168
1169         if (! get_opt_redundant_loadstore())
1170                 return;
1171
1172         edges_assure(irg);
1173
1174         /* for Phi optimization post-dominators are needed ... */
1175         assure_postdoms(irg);
1176
1177         if (get_opt_alias_analysis()) {
1178                 assure_irg_address_taken_computed(irg);
1179                 assure_irp_globals_address_taken_computed();
1180         }
1181
1182         obstack_init(&env.obst);
1183         env.changes = 0;
1184
1185         /* init the links, then collect Loads/Stores/Proj's in lists */
1186         master_visited = 0;
1187         irg_walk_graph(irg, firm_clear_link, collect_nodes, &env);
1188
1189         /* now we have collected enough information, optimize */
1190         irg_walk_graph(irg, NULL, do_load_store_optimize, &env);
1191
1192         obstack_free(&env.obst, NULL);
1193
1194         /* Handle graph state */
1195         if (env.changes) {
1196                 if (get_irg_outs_state(irg) == outs_consistent)
1197                         set_irg_outs_inconsistent(irg);
1198         }
1199
1200         if (env.changes & CF_CHANGED) {
1201                 /* is this really needed: Yes, control flow changed, block might
1202                 have Bad() predecessors. */
1203                 set_irg_doms_inconsistent(irg);
1204         }
1205 }  /* optimize_load_store */