Remove the obsolete .cvsignore files.
[libfirm] / ir / opt / ldstopt.c
1 /*
2  * Project:     libFIRM
3  * File name:   ir/opt/ldstopt.c
4  * Purpose:     load store optimizations
5  * Author:      Michael Beck
6  * Created:
7  * CVS-ID:      $Id$
8  * Copyright:   (c) 1998-2007 Universität Karlsruhe
9  * Licence:     This file protected by GPL -  GNU GENERAL PUBLIC LICENSE.
10  */
11 #ifdef HAVE_CONFIG_H
12 # include "config.h"
13 #endif
14
15 #ifdef HAVE_STRING_H
16 # include <string.h>
17 #endif
18
19 #include "irnode_t.h"
20 #include "irgraph_t.h"
21 #include "irmode_t.h"
22 #include "iropt_t.h"
23 #include "ircons_t.h"
24 #include "irgmod.h"
25 #include "irgwalk.h"
26 #include "irvrfy.h"
27 #include "tv_t.h"
28 #include "dbginfo_t.h"
29 #include "iropt_dbg.h"
30 #include "irflag_t.h"
31 #include "array.h"
32 #include "irhooks.h"
33 #include "iredges.h"
34 #include "irtools.h"
35 #include "opt_polymorphy.h"
36 #include "irmemory.h"
37 #include "xmalloc.h"
38
39 #ifdef DO_CACHEOPT
40 #include "cacheopt/cachesim.h"
41 #endif
42
43 #undef IMAX
44 #define IMAX(a,b)       ((a) > (b) ? (a) : (b))
45
46 #define MAX_PROJ        IMAX(pn_Load_max, pn_Store_max)
47
48 enum changes_t {
49         DF_CHANGED = 1,       /**< data flow changed */
50         CF_CHANGED = 2,       /**< control flow changed */
51 };
52
53 /**
54  * walker environment
55  */
56 typedef struct _walk_env_t {
57         struct obstack obst;          /**< list of all stores */
58         unsigned changes;             /**< a bitmask of graph changes */
59 } walk_env_t;
60
61 /**
62  * flags for Load/Store
63  */
64 enum ldst_flags_t {
65         LDST_VISITED = 1              /**< if set, this Load/Store is already visited */
66 };
67
68 /** A Load/Store info. */
69 typedef struct _ldst_info_t {
70         ir_node  *projs[MAX_PROJ];    /**< list of Proj's of this node */
71         ir_node  *exc_block;          /**< the exception block if available */
72         int      exc_idx;             /**< predecessor index in the exception block */
73         unsigned flags;               /**< flags */
74         unsigned visited;             /**< visited counter for breaking loops */
75 } ldst_info_t;
76
77 /**
78  * flags for control flow.
79  */
80 enum block_flags_t {
81         BLOCK_HAS_COND = 1,      /**< Block has conditional control flow */
82         BLOCK_HAS_EXC  = 2       /**< Block has exceptional control flow */
83 };
84
85 /**
86  * a Block info.
87  */
88 typedef struct _block_info_t {
89         unsigned flags;               /**< flags for the block */
90 } block_info_t;
91
92 /** the master visited flag for loop detection. */
93 static unsigned master_visited = 0;
94
95 #define INC_MASTER()       ++master_visited
96 #define MARK_NODE(info)    (info)->visited = master_visited
97 #define NODE_VISITED(info) (info)->visited >= master_visited
98
99 /**
100  * get the Load/Store info of a node
101  */
102 static ldst_info_t *get_ldst_info(ir_node *node, walk_env_t *env) {
103         ldst_info_t *info = get_irn_link(node);
104
105         if (! info) {
106                 info = obstack_alloc(&env->obst, sizeof(*info));
107                 memset(info, 0, sizeof(*info));
108                 set_irn_link(node, info);
109         }
110         return info;
111 }  /* get_ldst_info */
112
113 /**
114  * get the Block info of a node
115  */
116 static block_info_t *get_block_info(ir_node *node, walk_env_t *env) {
117         block_info_t *info = get_irn_link(node);
118
119         if (! info) {
120                 info = obstack_alloc(&env->obst, sizeof(*info));
121                 memset(info, 0, sizeof(*info));
122                 set_irn_link(node, info);
123         }
124         return info;
125 }  /* get_block_info */
126
127 /**
128  * update the projection info for a Load/Store
129  */
130 static unsigned update_projs(ldst_info_t *info, ir_node *proj)
131 {
132         long nr = get_Proj_proj(proj);
133
134         assert(0 <= nr && nr <= MAX_PROJ && "Wrong proj from LoadStore");
135
136         if (info->projs[nr]) {
137                 /* there is already one, do CSE */
138                 exchange(proj, info->projs[nr]);
139                 return DF_CHANGED;
140         }
141         else {
142                 info->projs[nr] = proj;
143                 return 0;
144         }
145 }  /* update_projs */
146
147 /**
148  * update the exception block info for a Load/Store node.
149  *
150  * @param info   the load/store info struct
151  * @param block  the exception handler block for this load/store
152  * @param pos    the control flow input of the block
153  */
154 static unsigned update_exc(ldst_info_t *info, ir_node *block, int pos)
155 {
156         assert(info->exc_block == NULL && "more than one exception block found");
157
158         info->exc_block = block;
159         info->exc_idx   = pos;
160         return 0;
161 }  /* update_exc */
162
163 /** Return the number of uses of an address node */
164 #define get_irn_n_uses(adr)     get_irn_n_edges(adr)
165
166 /**
167  * walker, collects all Load/Store/Proj nodes
168  *
169  * walks from Start -> End
170  */
171 static void collect_nodes(ir_node *node, void *env)
172 {
173         ir_op       *op = get_irn_op(node);
174         ir_node     *pred, *blk, *pred_blk;
175         ldst_info_t *ldst_info;
176         walk_env_t  *wenv = env;
177
178         if (op == op_Proj) {
179                 ir_node *adr;
180                 ir_op *op;
181
182                 pred = get_Proj_pred(node);
183                 op   = get_irn_op(pred);
184
185                 if (op == op_Load) {
186                         ldst_info = get_ldst_info(pred, wenv);
187
188                         wenv->changes |= update_projs(ldst_info, node);
189
190                         if ((ldst_info->flags & LDST_VISITED) == 0) {
191                                 adr = get_Load_ptr(pred);
192                                 ldst_info->flags |= LDST_VISITED;
193                         }
194
195                         /*
196                         * Place the Proj's to the same block as the
197                         * predecessor Load. This is always ok and prevents
198                         * "non-SSA" form after optimizations if the Proj
199                         * is in a wrong block.
200                         */
201                         blk      = get_nodes_block(node);
202                         pred_blk = get_nodes_block(pred);
203                         if (blk != pred_blk) {
204                                 wenv->changes |= DF_CHANGED;
205                                 set_nodes_block(node, pred_blk);
206                         }
207                 } else if (op == op_Store) {
208                         ldst_info = get_ldst_info(pred, wenv);
209
210                         wenv->changes |= update_projs(ldst_info, node);
211
212                         if ((ldst_info->flags & LDST_VISITED) == 0) {
213                                 adr = get_Store_ptr(pred);
214                                 ldst_info->flags |= LDST_VISITED;
215                         }
216
217                         /*
218                         * Place the Proj's to the same block as the
219                         * predecessor Store. This is always ok and prevents
220                         * "non-SSA" form after optimizations if the Proj
221                         * is in a wrong block.
222                         */
223                         blk      = get_nodes_block(node);
224                         pred_blk = get_nodes_block(pred);
225                         if (blk != pred_blk) {
226                                 wenv->changes |= DF_CHANGED;
227                                 set_nodes_block(node, pred_blk);
228                         }
229                 }
230         } else if (op == op_Block) {
231                 int i;
232
233                 for (i = get_Block_n_cfgpreds(node) - 1; i >= 0; --i) {
234                         ir_node      *pred_block;
235                         block_info_t *bl_info;
236
237                         pred = skip_Proj(get_Block_cfgpred(node, i));
238
239                         /* ignore Bad predecessors, they will be removed later */
240                         if (is_Bad(pred))
241                                 continue;
242
243                         pred_block = get_nodes_block(pred);
244                         bl_info    = get_block_info(pred_block, wenv);
245
246                         if (is_fragile_op(pred))
247                                 bl_info->flags |= BLOCK_HAS_EXC;
248                         else if (is_irn_forking(pred))
249                                 bl_info->flags |= BLOCK_HAS_COND;
250
251                         if (get_irn_op(pred) == op_Load || get_irn_op(pred) == op_Store) {
252                                 ldst_info = get_ldst_info(pred, wenv);
253
254                                 wenv->changes |= update_exc(ldst_info, node, i);
255                         }
256                 }
257         }
258 }  /* collect_nodes */
259
260 /**
261  * Returns an entity if the address ptr points to a constant one.
262  *
263  * @param ptr  the address
264  *
265  * @return an entity or NULL
266  */
267 static ir_entity *find_constant_entity(ir_node *ptr)
268 {
269         for (;;) {
270                 ir_op *op = get_irn_op(ptr);
271
272                 if (op == op_SymConst && (get_SymConst_kind(ptr) == symconst_addr_ent)) {
273                         ir_entity *ent = get_SymConst_entity(ptr);
274                         if (variability_constant == get_entity_variability(ent))
275                                 return ent;
276                         return NULL;
277                 } else if (op == op_Sel) {
278                         ir_entity *ent = get_Sel_entity(ptr);
279                         ir_type   *tp  = get_entity_owner(ent);
280
281                         /* Do not fiddle with polymorphism. */
282                         if (is_Class_type(get_entity_owner(ent)) &&
283                                 ((get_entity_n_overwrites(ent)    != 0) ||
284                                 (get_entity_n_overwrittenby(ent) != 0)   ) )
285                                 return NULL;
286
287                         if (is_Array_type(tp)) {
288                                 /* check bounds */
289                                 int i, n;
290
291                                 for (i = 0, n = get_Sel_n_indexs(ptr); i < n; ++i) {
292                                         ir_node *bound;
293                                         tarval *tlower, *tupper;
294                                         ir_node *index = get_Sel_index(ptr, i);
295                                         tarval *tv     = computed_value(index);
296
297                                         /* check if the index is constant */
298                                         if (tv == tarval_bad)
299                                                 return NULL;
300
301                                         bound  = get_array_lower_bound(tp, i);
302                                         tlower = computed_value(bound);
303                                         bound  = get_array_upper_bound(tp, i);
304                                         tupper = computed_value(bound);
305
306                                         if (tlower == tarval_bad || tupper == tarval_bad)
307                                                 return NULL;
308
309                                         if (tarval_cmp(tv, tlower) & pn_Cmp_Lt)
310                                                 return NULL;
311                                         if (tarval_cmp(tupper, tv) & pn_Cmp_Lt)
312                                                 return NULL;
313
314                                         /* ok, bounds check finished */
315                                 }
316                         }
317
318                         if (variability_constant == get_entity_variability(ent))
319                                 return ent;
320
321                         /* try next */
322                         ptr = get_Sel_ptr(ptr);
323                 } else
324                         return NULL;
325         }
326 }  /* find_constant_entity */
327
328 /**
329  * Return the Selection index of a Sel node from dimension n
330  */
331 static long get_Sel_array_index_long(ir_node *n, int dim) {
332         ir_node *index = get_Sel_index(n, dim);
333         assert(get_irn_op(index) == op_Const);
334         return get_tarval_long(get_Const_tarval(index));
335 }  /* get_Sel_array_index_long */
336
337 /**
338  * Returns the accessed component graph path for an
339  * node computing an address.
340  *
341  * @param ptr    the node computing the address
342  * @param depth  current depth in steps upward from the root
343  *               of the address
344  */
345 static compound_graph_path *rec_get_accessed_path(ir_node *ptr, int depth) {
346         compound_graph_path *res = NULL;
347         ir_entity           *root, *field;
348         int                 path_len, pos;
349
350         if (get_irn_op(ptr) == op_SymConst) {
351                 /* a SymConst. If the depth is 0, this is an access to a global
352                  * entity and we don't need a component path, else we know
353                  * at least it's length.
354                  */
355                 assert(get_SymConst_kind(ptr) == symconst_addr_ent);
356                 root = get_SymConst_entity(ptr);
357                 res = (depth == 0) ? NULL : new_compound_graph_path(get_entity_type(root), depth);
358         } else {
359                 assert(get_irn_op(ptr) == op_Sel);
360                 /* it's a Sel, go up until we find the root */
361                 res = rec_get_accessed_path(get_Sel_ptr(ptr), depth+1);
362
363                 /* fill up the step in the path at the current position */
364                 field    = get_Sel_entity(ptr);
365                 path_len = get_compound_graph_path_length(res);
366                 pos      = path_len - depth - 1;
367                 set_compound_graph_path_node(res, pos, field);
368
369                 if (is_Array_type(get_entity_owner(field))) {
370                         assert(get_Sel_n_indexs(ptr) == 1 && "multi dim arrays not implemented");
371                         set_compound_graph_path_array_index(res, pos, get_Sel_array_index_long(ptr, 0));
372                 }
373         }
374         return res;
375 }  /* rec_get_accessed_path */
376
377 /** Returns an access path or NULL.  The access path is only
378  *  valid, if the graph is in phase_high and _no_ address computation is used.
379  */
380 static compound_graph_path *get_accessed_path(ir_node *ptr) {
381         return rec_get_accessed_path(ptr, 0);
382 }  /* get_accessed_path */
383
384 /* forward */
385 static void reduce_adr_usage(ir_node *ptr);
386
387 /**
388  * Update a Load that may lost it's usage.
389  */
390 static void handle_load_update(ir_node *load) {
391         ldst_info_t *info = get_irn_link(load);
392
393         /* do NOT touch volatile loads for now */
394         if (get_Load_volatility(load) == volatility_is_volatile)
395                 return;
396
397         if (! info->projs[pn_Load_res] && ! info->projs[pn_Load_X_except]) {
398                 ir_node *ptr = get_Load_ptr(load);
399                 ir_node *mem = get_Load_mem(load);
400
401                 /* a Load which value is neither used nor exception checked, remove it */
402                 exchange(info->projs[pn_Load_M], mem);
403                 exchange(load, new_Bad());
404                 reduce_adr_usage(ptr);
405         }
406 }  /* handle_load_update */
407
408 /**
409  * A Use of an address node is vanished. Check if this was a Proj
410  * node and update the counters.
411  */
412 static void reduce_adr_usage(ir_node *ptr) {
413         if (is_Proj(ptr)) {
414                 if (get_irn_n_edges(ptr) <= 0) {
415                         /* this Proj is dead now */
416                         ir_node *pred = get_Proj_pred(ptr);
417
418                         if (is_Load(pred)) {
419                                 ldst_info_t *info = get_irn_link(pred);
420                                 info->projs[get_Proj_proj(ptr)] = NULL;
421
422                                 /* this node lost it's result proj, handle that */
423                                 handle_load_update(pred);
424                         }
425                 }
426         }
427 }  /* reduce_adr_usage */
428
429 /**
430  * Check, if an already existing value of mode old_mode can be converted
431  * into the needed one new_mode without loss.
432  */
433 static int can_use_stored_value(ir_mode *old_mode, ir_mode *new_mode) {
434         if (old_mode == new_mode)
435                 return 1;
436
437         /* if both modes are two-complement ones, we can always convert the
438            Stored value into the needed one. */
439         if (get_mode_size_bits(old_mode) >= get_mode_size_bits(new_mode) &&
440                   get_mode_arithmetic(old_mode) == irma_twos_complement &&
441                   get_mode_arithmetic(new_mode) == irma_twos_complement)
442                 return 1;
443         return 0;
444 }  /* can_use_stored_value */
445
446 /**
447  * Follow the memory chain as long as there are only Loads
448  * and alias free Stores and try to replace current Load or Store
449  * by a previous ones.
450  * Note that in unreachable loops it might happen that we reach
451  * load again, as well as we can fall into a cycle.
452  * We break such cycles using a special visited flag.
453  *
454  * INC_MASTER() must be called before dive into
455  */
456 static unsigned follow_Mem_chain(ir_node *load, ir_node *curr) {
457         unsigned res = 0;
458         ldst_info_t *info = get_irn_link(load);
459         ir_node *pred;
460         ir_node *ptr       = get_Load_ptr(load);
461         ir_node *mem       = get_Load_mem(load);
462         ir_mode *load_mode = get_Load_mode(load);
463
464         for (pred = curr; load != pred; ) {
465                 ldst_info_t *pred_info = get_irn_link(pred);
466
467                 /*
468                  * BEWARE: one might think that checking the modes is useless, because
469                  * if the pointers are identical, they refer to the same object.
470                  * This is only true in strong typed languages, not in C were the following
471                  * is possible a = *(ir_type1 *)p; b = *(ir_type2 *)p ...
472                  */
473                 if (get_irn_op(pred) == op_Store && get_Store_ptr(pred) == ptr &&
474                     can_use_stored_value(get_irn_mode(get_Store_value(pred)), load_mode)) {
475                         /*
476                          * a Load immediately after a Store -- a read after write.
477                          * We may remove the Load, if both Load & Store does not have an exception handler
478                          * OR they are in the same block. In the latter case the Load cannot
479                          * throw an exception when the previous Store was quiet.
480                          *
481                          * Why we need to check for Store Exception? If the Store cannot
482                          * be executed (ROM) the exception handler might simply jump into
483                          * the load block :-(
484                          * We could make it a little bit better if we would know that the exception
485                          * handler of the Store jumps directly to the end...
486                          */
487                         if ((pred_info->projs[pn_Store_X_except] == NULL && info->projs[pn_Load_X_except] == NULL) ||
488                             get_nodes_block(load) == get_nodes_block(pred)) {
489                                 ir_node *value = get_Store_value(pred);
490
491                                 DBG_OPT_RAW(load, value);
492
493                                 /* add an convert if needed */
494                                 if (get_irn_mode(get_Store_value(pred)) != load_mode) {
495                                         value = new_r_Conv(current_ir_graph, get_nodes_block(load), value, load_mode);
496                                 }
497
498                                 if (info->projs[pn_Load_M])
499                                         exchange(info->projs[pn_Load_M], mem);
500
501                                 /* no exception */
502                                 if (info->projs[pn_Load_X_except]) {
503                                         exchange( info->projs[pn_Load_X_except], new_Bad());
504                                         res |= CF_CHANGED;
505                                 }
506
507                                 if (info->projs[pn_Load_res])
508                                         exchange(info->projs[pn_Load_res], value);
509
510                                 exchange(load, new_Bad());
511                                 reduce_adr_usage(ptr);
512                                 return res | DF_CHANGED;
513                         }
514                 } else if (get_irn_op(pred) == op_Load && get_Load_ptr(pred) == ptr &&
515                            can_use_stored_value(get_Load_mode(pred), load_mode)) {
516                         /*
517                          * a Load after a Load -- a read after read.
518                          * We may remove the second Load, if it does not have an exception handler
519                          * OR they are in the same block. In the later case the Load cannot
520                          * throw an exception when the previous Load was quiet.
521                          *
522                          * Here, there is no need to check if the previous Load has an exception
523                          * hander because they would have exact the same exception...
524                          */
525                         if (info->projs[pn_Load_X_except] == NULL || get_nodes_block(load) == get_nodes_block(pred)) {
526                                 ir_node *value;
527
528                                 DBG_OPT_RAR(load, pred);
529
530                                 /* the result is used */
531                                 if (info->projs[pn_Load_res]) {
532                                         if (pred_info->projs[pn_Load_res] == NULL) {
533                                                 /* create a new Proj again */
534                                                 pred_info->projs[pn_Load_res] = new_r_Proj(current_ir_graph, get_nodes_block(pred), pred, get_Load_mode(pred), pn_Load_res);
535                                         }
536                                         value = pred_info->projs[pn_Load_res];
537
538                                         /* add an convert if needed */
539                                         if (get_Load_mode(pred) != load_mode) {
540                                                 value = new_r_Conv(current_ir_graph, get_nodes_block(load), value, load_mode);
541                                         }
542
543                                         exchange(info->projs[pn_Load_res], value);
544                                 }
545
546                                 if (info->projs[pn_Load_M])
547                                         exchange(info->projs[pn_Load_M], mem);
548
549                                 /* no exception */
550                                 if (info->projs[pn_Load_X_except]) {
551                                         exchange(info->projs[pn_Load_X_except], new_Bad());
552                                         res |= CF_CHANGED;
553                                 }
554
555                                 exchange(load, new_Bad());
556                                 reduce_adr_usage(ptr);
557                                 return res |= DF_CHANGED;
558                         }
559                 }
560
561                 if (get_irn_op(pred) == op_Store) {
562                         /* check if we can pass through this store */
563                         ir_alias_relation rel = get_alias_relation(
564                                 current_ir_graph,
565                                 get_Store_ptr(pred),
566                                 get_irn_mode(get_Store_value(pred)),
567                                 ptr, load_mode);
568                         /* if the might be an alias, we cannot pass this Store */
569                         if (rel != no_alias)
570                                 break;
571                         pred = skip_Proj(get_Store_mem(pred));
572                 } else if (get_irn_op(pred) == op_Load) {
573                         pred = skip_Proj(get_Load_mem(pred));
574                 } else {
575                         /* follow only Load chains */
576                         break;
577                 }
578
579                 /* check for cycles */
580                 if (NODE_VISITED(pred_info))
581                         break;
582                 MARK_NODE(pred_info);
583         }
584
585         if (get_irn_op(pred) == op_Sync) {
586                 int i;
587
588                 /* handle all Sync predecessors */
589                 for (i = get_Sync_n_preds(pred) - 1; i >= 0; --i) {
590                         res |= follow_Mem_chain(load, skip_Proj(get_Sync_pred(pred, i)));
591                         if (res)
592                                 break;
593                 }
594         }
595
596         return res;
597 }  /* follow_Mem_chain */
598
599 /**
600  * optimize a Load
601  *
602  * @param load  the Load node
603  */
604 static unsigned optimize_load(ir_node *load)
605 {
606         ldst_info_t *info = get_irn_link(load);
607         ir_node *mem, *ptr, *new_node;
608         ir_entity *ent;
609         unsigned res = 0;
610
611         /* do NOT touch volatile loads for now */
612         if (get_Load_volatility(load) == volatility_is_volatile)
613                 return 0;
614
615         /* the address of the load to be optimized */
616         ptr = get_Load_ptr(load);
617
618         /*
619          * Check if we can remove the exception from a Load:
620          * This can be done, if the address is from an Sel(Alloc) and
621          * the Sel type is a subtype of the allocated type.
622          *
623          * This optimizes some often used OO constructs,
624          * like x = new O; x->t;
625          */
626         if (info->projs[pn_Load_X_except]) {
627                 if (is_Sel(ptr)) {
628                         ir_node *mem = get_Sel_mem(ptr);
629
630                         /* FIXME: works with the current FE, but better use the base */
631                         if (get_irn_op(skip_Proj(mem)) == op_Alloc) {
632                                 /* ok, check the types */
633                                 ir_entity *ent    = get_Sel_entity(ptr);
634                                 ir_type   *s_type = get_entity_type(ent);
635                                 ir_type   *a_type = get_Alloc_type(mem);
636
637                                 if (is_SubClass_of(s_type, a_type)) {
638                                         /* ok, condition met: there can't be an exception because
639                                         * Alloc guarantees that enough memory was allocated */
640
641                                         exchange(info->projs[pn_Load_X_except], new_Bad());
642                                         info->projs[pn_Load_X_except] = NULL;
643                                         res |= CF_CHANGED;
644                                 }
645                         }
646                 } else if ((get_irn_op(skip_Proj(ptr)) == op_Alloc) ||
647                         ((get_irn_op(ptr) == op_Cast) && (get_irn_op(skip_Proj(get_Cast_op(ptr))) == op_Alloc))) {
648                                 /* simple case: a direct load after an Alloc. Firm Alloc throw
649                                  * an exception in case of out-of-memory. So, there is no way for an
650                                  * exception in this load.
651                                  * This code is constructed by the "exception lowering" in the Jack compiler.
652                                  */
653                                 exchange(info->projs[pn_Load_X_except], new_Bad());
654                                 info->projs[pn_Load_X_except] = NULL;
655                                 res |= CF_CHANGED;
656                 }
657         }
658
659         /* The mem of the Load. Must still be returned after optimization. */
660         mem  = get_Load_mem(load);
661
662         if (! info->projs[pn_Load_res] && ! info->projs[pn_Load_X_except]) {
663                 /* a Load which value is neither used nor exception checked, remove it */
664                 exchange(info->projs[pn_Load_M], mem);
665
666                 exchange(load, new_Bad());
667                 reduce_adr_usage(ptr);
668                 return res | DF_CHANGED;
669         }
670
671         /* Load from a constant polymorphic field, where we can resolve
672            polymorphism. */
673         new_node = transform_node_Load(load);
674         if (new_node != load) {
675                 if (info->projs[pn_Load_M]) {
676                         exchange(info->projs[pn_Load_M], mem);
677                         info->projs[pn_Load_M] = NULL;
678                 }
679                 if (info->projs[pn_Load_X_except]) {
680                         exchange(info->projs[pn_Load_X_except], new_Bad());
681                         info->projs[pn_Load_X_except] = NULL;
682                 }
683                 if (info->projs[pn_Load_res])
684                         exchange(info->projs[pn_Load_res], new_node);
685
686                 exchange(load, new_Bad());
687                 reduce_adr_usage(ptr);
688                 return res | DF_CHANGED;
689         }
690
691         /* check if we can determine the entity that will be loaded */
692         ent = find_constant_entity(ptr);
693         if (ent) {
694                 if ((allocation_static == get_entity_allocation(ent)) &&
695                         (visibility_external_allocated != get_entity_visibility(ent))) {
696                         /* a static allocation that is not external: there should be NO exception
697                          * when loading. */
698
699                         /* no exception, clear the info field as it might be checked later again */
700                         if (info->projs[pn_Load_X_except]) {
701                                 exchange(info->projs[pn_Load_X_except], new_Bad());
702                                 info->projs[pn_Load_X_except] = NULL;
703                                 res |= CF_CHANGED;
704                         }
705
706                         if (variability_constant == get_entity_variability(ent)
707                                 && is_atomic_entity(ent)) {
708                                 /* Might not be atomic after
709                                    lowering of Sels.  In this
710                                    case we could also load, but
711                                    it's more complicated. */
712                                 /* more simpler case: we load the content of a constant value:
713                                  * replace it by the constant itself
714                                  */
715
716                                 /* no memory */
717                                 if (info->projs[pn_Load_M]) {
718                                         exchange(info->projs[pn_Load_M], mem);
719                                         res |= DF_CHANGED;
720                                 }
721                                 /* no result :-) */
722                                 if (info->projs[pn_Load_res]) {
723                                         if (is_atomic_entity(ent)) {
724                                                 ir_node *c = copy_const_value(get_irn_dbg_info(load), get_atomic_ent_value(ent));
725
726                                                 DBG_OPT_RC(load, c);
727                                                 exchange(info->projs[pn_Load_res], c);
728                                                 res |= DF_CHANGED;
729                                         }
730                                 }
731                                 exchange(load, new_Bad());
732                                 reduce_adr_usage(ptr);
733                                 return res;
734                         } else if (variability_constant == get_entity_variability(ent)) {
735                                 compound_graph_path *path = get_accessed_path(ptr);
736
737                                 if (path) {
738                                         ir_node *c;
739
740                                         assert(is_proper_compound_graph_path(path, get_compound_graph_path_length(path)-1));
741                                         /*
742                                         {
743                                                 int j;
744                                                 for (j = 0; j < get_compound_graph_path_length(path); ++j) {
745                                                         ir_entity *node = get_compound_graph_path_node(path, j);
746                                                         fprintf(stdout, ".%s", get_entity_name(node));
747                                                         if (is_Array_type(get_entity_owner(node)))
748                                                                 fprintf(stdout, "[%d]", get_compound_graph_path_array_index(path, j));
749                                                 }
750                                                 printf("\n");
751                                         }
752                                         */
753
754                                         c = get_compound_ent_value_by_path(ent, path);
755                                         free_compound_graph_path(path);
756
757                                         /* printf("  cons: "); DDMN(c); */
758
759                                         if (info->projs[pn_Load_M]) {
760                                                 exchange(info->projs[pn_Load_M], mem);
761                                                 res |= DF_CHANGED;
762                                         }
763                                         if (info->projs[pn_Load_res]) {
764                                                 exchange(info->projs[pn_Load_res], copy_const_value(get_irn_dbg_info(load), c));
765                                                 res |= DF_CHANGED;
766                                         }
767                                         exchange(load, new_Bad());
768                                         reduce_adr_usage(ptr);
769                                         return res;
770                                 } else {
771                                         /*  We can not determine a correct access path.  E.g., in jack, we load
772                                         a byte from an object to generate an exception.   Happens in test program
773                                         Reflectiontest.
774                                         printf(">>>>>>>>>>>>> Found access to constant entity %s in function %s\n", get_entity_name(ent),
775                                         get_entity_name(get_irg_entity(current_ir_graph)));
776                                         printf("  load: "); DDMN(load);
777                                         printf("  ptr:  "); DDMN(ptr);
778                                         */
779                                 }
780                         }
781                 }
782         }
783
784         /* Check, if the address of this load is used more than once.
785          * If not, this load cannot be removed in any case. */
786         if (get_irn_n_uses(ptr) <= 1)
787                 return res;
788
789         /*
790          * follow the memory chain as long as there are only Loads
791          * and try to replace current Load or Store by a previous one.
792          * Note that in unreachable loops it might happen that we reach
793          * load again, as well as we can fall into a cycle.
794          * We break such cycles using a special visited flag.
795          */
796         INC_MASTER();
797         res = follow_Mem_chain(load, skip_Proj(mem));
798         return res;
799 }  /* optimize_load */
800
801 /**
802  * Check whether a value of mode new_mode would completely overwrite a value
803  * of mode old_mode in memory.
804  */
805 static int is_completely_overwritten(ir_mode *old_mode, ir_mode *new_mode)
806 {
807         return get_mode_size_bits(new_mode) >= get_mode_size_bits(old_mode);
808 }  /* is_completely_overwritten */
809
810 /**
811  * follow the memory chain as long as there are only Loads and alias free Stores.
812  *
813  * INC_MASTER() must be called before dive into
814  */
815 static unsigned follow_Mem_chain_for_Store(ir_node *store, ir_node *curr) {
816         unsigned res = 0;
817         ldst_info_t *info = get_irn_link(store);
818         ir_node *pred;
819         ir_node *ptr = get_Store_ptr(store);
820         ir_node *mem = get_Store_mem(store);
821         ir_node *value = get_Store_value(store);
822         ir_mode *mode  = get_irn_mode(value);
823         ir_node *block = get_nodes_block(store);
824
825         for (pred = curr; pred != store;) {
826                 ldst_info_t *pred_info = get_irn_link(pred);
827
828                 /*
829                  * BEWARE: one might think that checking the modes is useless, because
830                  * if the pointers are identical, they refer to the same object.
831                  * This is only true in strong typed languages, not is C were the following
832                  * is possible *(ir_type1 *)p = a; *(ir_type2 *)p = b ...
833                  * However, if the mode that is written have a bigger  or equal size the the old
834                  * one, the old value is completely overwritten and can be killed ...
835                  */
836                 if (get_irn_op(pred) == op_Store && get_Store_ptr(pred) == ptr &&
837                     get_nodes_block(pred) == block &&
838                     is_completely_overwritten(get_irn_mode(get_Store_value(pred)), mode)) {
839                         /*
840                          * a Store after a Store in the same block -- a write after write.
841                          * We may remove the first Store, if it does not have an exception handler.
842                          *
843                          * TODO: What, if both have the same exception handler ???
844                          */
845                         if (get_Store_volatility(pred) != volatility_is_volatile && !pred_info->projs[pn_Store_X_except]) {
846                                 DBG_OPT_WAW(pred, store);
847                                 exchange( pred_info->projs[pn_Store_M], get_Store_mem(pred) );
848                                 exchange(pred, new_Bad());
849                                 reduce_adr_usage(ptr);
850                                 return DF_CHANGED;
851                         }
852                 } else if (get_irn_op(pred) == op_Load && get_Load_ptr(pred) == ptr &&
853                            value == pred_info->projs[pn_Load_res]) {
854                         /*
855                          * a Store of a value after a Load -- a write after read.
856                          * We may remove the second Store, if it does not have an exception handler.
857                          */
858                         if (! info->projs[pn_Store_X_except]) {
859                                 DBG_OPT_WAR(store, pred);
860                                 exchange( info->projs[pn_Store_M], mem );
861                                 exchange(store, new_Bad());
862                                 reduce_adr_usage(ptr);
863                                 return DF_CHANGED;
864                         }
865                 }
866
867                 if (get_irn_op(pred) == op_Store) {
868                         /* check if we can pass thru this store */
869                         ir_alias_relation rel = get_alias_relation(
870                                 current_ir_graph,
871                                 get_Store_ptr(pred),
872                                 get_irn_mode(get_Store_value(pred)),
873                                 ptr, mode);
874                         /* if the might be an alias, we cannot pass this Store */
875                         if (rel != no_alias)
876                                 break;
877                         pred = skip_Proj(get_Store_mem(pred));
878                 } else if (get_irn_op(pred) == op_Load) {
879                         pred = skip_Proj(get_Load_mem(pred));
880                 } else {
881                         /* follow only Load chains */
882                         break;
883                 }
884
885                 /* check for cycles */
886                 if (NODE_VISITED(pred_info))
887                         break;
888                 MARK_NODE(pred_info);
889         }
890
891         if (get_irn_op(pred) == op_Sync) {
892                 int i;
893
894                 /* handle all Sync predecessors */
895                 for (i = get_Sync_n_preds(pred) - 1; i >= 0; --i) {
896                         res |= follow_Mem_chain_for_Store(store, skip_Proj(get_Sync_pred(pred, i)));
897                         if (res)
898                                 break;
899                 }
900         }
901         return res;
902 }  /* follow_Mem_chain_for_Store */
903
904 /**
905  * optimize a Store
906  *
907  * @param store  the Store node
908  */
909 static unsigned optimize_store(ir_node *store) {
910         ir_node *ptr, *mem;
911
912         if (get_Store_volatility(store) == volatility_is_volatile)
913                 return 0;
914
915         ptr = get_Store_ptr(store);
916
917         /* Check, if the address of this Store is used more than once.
918          * If not, this Store cannot be removed in any case. */
919         if (get_irn_n_uses(ptr) <= 1)
920                 return 0;
921
922         mem = get_Store_mem(store);
923
924         /* follow the memory chain as long as there are only Loads */
925         INC_MASTER();
926         return follow_Mem_chain_for_Store(store, skip_Proj(mem));
927 }  /* optimize_store */
928
929 /**
930  * walker, optimizes Phi after Stores to identical places:
931  * Does the following optimization:
932  * @verbatim
933  *
934  *   val1   val2   val3          val1  val2  val3
935  *    |      |      |               \    |    /
936  *  Store  Store  Store              \   |   /
937  *      \    |    /                   PhiData
938  *       \   |   /                       |
939  *        \  |  /                      Store
940  *          PhiM
941  *
942  * @endverbatim
943  * This reduces the number of stores and allows for predicated execution.
944  * Moves Stores back to the end of a function which may be bad.
945  *
946  * This is only possible if the predecessor blocks have only one successor.
947  */
948 static unsigned optimize_phi(ir_node *phi, walk_env_t *wenv)
949 {
950         int i, n;
951         ir_node *store, *old_store, *ptr, *block, *phi_block, *phiM, *phiD, *exc, *projM;
952         ir_mode *mode;
953         ir_node **inM, **inD, **stores;
954         int *idx;
955         dbg_info *db = NULL;
956         ldst_info_t *info;
957         block_info_t *bl_info;
958         unsigned res = 0;
959
960         /* Must be a memory Phi */
961         if (get_irn_mode(phi) != mode_M)
962                 return 0;
963
964         n = get_Phi_n_preds(phi);
965         if (n <= 0)
966                 return 0;
967
968         store = skip_Proj(get_Phi_pred(phi, 0));
969         old_store = store;
970         if (get_irn_op(store) != op_Store)
971                 return 0;
972
973         block = get_nodes_block(store);
974
975         /* abort on dead blocks */
976         if (is_Block_dead(block))
977                 return 0;
978
979         /* check if the block is post dominated by Phi-block
980            and has no exception exit */
981         bl_info = get_irn_link(block);
982         if (bl_info->flags & BLOCK_HAS_EXC)
983                 return 0;
984
985         phi_block = get_nodes_block(phi);
986         if (! block_postdominates(phi_block, block))
987                 return 0;
988
989         /* this is the address of the store */
990         ptr  = get_Store_ptr(store);
991         mode = get_irn_mode(get_Store_value(store));
992         info = get_irn_link(store);
993         exc  = info->exc_block;
994
995         for (i = 1; i < n; ++i) {
996                 ir_node *pred = skip_Proj(get_Phi_pred(phi, i));
997
998                 if (get_irn_op(pred) != op_Store)
999                         return 0;
1000
1001                 if (ptr != get_Store_ptr(pred) || mode != get_irn_mode(get_Store_value(pred)))
1002                         return 0;
1003
1004                 info = get_irn_link(pred);
1005
1006                 /* check, if all stores have the same exception flow */
1007                 if (exc != info->exc_block)
1008                         return 0;
1009
1010                 /* abort on dead blocks */
1011                 block = get_nodes_block(pred);
1012                 if (is_Block_dead(block))
1013                         return 0;
1014
1015                 /* check if the block is post dominated by Phi-block
1016                    and has no exception exit. Note that block must be different from
1017                    Phi-block, else we would move a Store from end End of a block to its
1018                    Start... */
1019                 bl_info = get_irn_link(block);
1020                 if (bl_info->flags & BLOCK_HAS_EXC)
1021                         return 0;
1022                 if (block == phi_block || ! block_postdominates(phi_block, block))
1023                         return 0;
1024         }
1025
1026         /*
1027          * ok, when we are here, we found all predecessors of a Phi that
1028          * are Stores to the same address and size. That means whatever
1029          * we do before we enter the block of the Phi, we do a Store.
1030          * So, we can move the Store to the current block:
1031          *
1032          *   val1    val2    val3          val1  val2  val3
1033          *    |       |       |               \    |    /
1034          * | Str | | Str | | Str |             \   |   /
1035          *      \     |     /                   PhiData
1036          *       \    |    /                       |
1037          *        \   |   /                       Str
1038          *           PhiM
1039          *
1040          * Is only allowed if the predecessor blocks have only one successor.
1041          */
1042
1043         NEW_ARR_A(ir_node *, stores, n);
1044         NEW_ARR_A(ir_node *, inM, n);
1045         NEW_ARR_A(ir_node *, inD, n);
1046         NEW_ARR_A(int, idx, n);
1047
1048         /* Prepare: Collect all Store nodes.  We must do this
1049            first because we otherwise may loose a store when exchanging its
1050            memory Proj.
1051          */
1052         for (i = 0; i < n; ++i)
1053                 stores[i] = skip_Proj(get_Phi_pred(phi, i));
1054
1055         /* Prepare: Skip the memory Proj: we need this in the case some stores
1056            are cascaded.
1057            Beware: One Store might be included more than once in the stores[]
1058            list, so we must prevent to do the exchange more than once.
1059          */
1060         for (i = 0; i < n; ++i) {
1061                 ir_node *store = stores[i];
1062                 ir_node *proj_m;
1063
1064                 info = get_irn_link(store);
1065                 proj_m = info->projs[pn_Store_M];
1066
1067                 if (is_Proj(proj_m) && get_Proj_pred(proj_m) == store)
1068                         exchange(proj_m, get_Store_mem(store));
1069         }
1070
1071         /* first step: collect all inputs */
1072         for (i = 0; i < n; ++i) {
1073                 ir_node *store = stores[i];
1074                 info = get_irn_link(store);
1075
1076                 inM[i] = get_Store_mem(store);
1077                 inD[i] = get_Store_value(store);
1078                 idx[i] = info->exc_idx;
1079         }
1080         block = get_nodes_block(phi);
1081
1082         /* second step: create a new memory Phi */
1083         phiM = new_rd_Phi(get_irn_dbg_info(phi), current_ir_graph, block, n, inM, mode_M);
1084
1085         /* third step: create a new data Phi */
1086         phiD = new_rd_Phi(get_irn_dbg_info(phi), current_ir_graph, block, n, inD, mode);
1087
1088         /* fourth step: create the Store */
1089         store = new_rd_Store(db, current_ir_graph, block, phiM, ptr, phiD);
1090 #ifdef DO_CACHEOPT
1091         co_set_irn_name(store, co_get_irn_ident(old_store));
1092 #endif
1093
1094         projM = new_rd_Proj(NULL, current_ir_graph, block, store, mode_M, pn_Store_M);
1095
1096         info = get_ldst_info(store, wenv);
1097         info->projs[pn_Store_M] = projM;
1098
1099         /* fifths step: repair exception flow */
1100         if (exc) {
1101                 ir_node *projX = new_rd_Proj(NULL, current_ir_graph, block, store, mode_X, pn_Store_X_except);
1102
1103                 info->projs[pn_Store_X_except] = projX;
1104                 info->exc_block                = exc;
1105                 info->exc_idx                  = idx[0];
1106
1107                 for (i = 0; i < n; ++i) {
1108                         set_Block_cfgpred(exc, idx[i], projX);
1109                 }
1110
1111                 if (n > 1) {
1112                         /* the exception block should be optimized as some inputs are identical now */
1113                 }
1114
1115                 res |= CF_CHANGED;
1116         }
1117
1118         /* sixth step: replace old Phi */
1119         exchange(phi, projM);
1120
1121         return res | DF_CHANGED;
1122 }  /* optimize_phi */
1123
1124 /**
1125  * walker, do the optimizations
1126  */
1127 static void do_load_store_optimize(ir_node *n, void *env) {
1128         walk_env_t *wenv = env;
1129
1130         switch (get_irn_opcode(n)) {
1131
1132         case iro_Load:
1133                 wenv->changes |= optimize_load(n);
1134                 break;
1135
1136         case iro_Store:
1137                 wenv->changes |= optimize_store(n);
1138                 break;
1139
1140         case iro_Phi:
1141                 wenv->changes |= optimize_phi(n, wenv);
1142
1143         default:
1144                 ;
1145         }
1146 }  /* do_load_store_optimize */
1147
1148 /*
1149  * do the load store optimization
1150  */
1151 void optimize_load_store(ir_graph *irg) {
1152         walk_env_t env;
1153
1154         assert(get_irg_phase_state(irg) != phase_building);
1155         assert(get_irg_pinned(irg) != op_pin_state_floats &&
1156                 "LoadStore optimization needs pinned graph");
1157
1158         if (! get_opt_redundant_loadstore())
1159                 return;
1160
1161         edges_assure(irg);
1162
1163         /* for Phi optimization post-dominators are needed ... */
1164         assure_postdoms(irg);
1165
1166         if (get_opt_alias_analysis()) {
1167                 assure_irg_address_taken_computed(irg);
1168                 assure_irp_globals_address_taken_computed();
1169         }
1170
1171         obstack_init(&env.obst);
1172         env.changes = 0;
1173
1174         /* init the links, then collect Loads/Stores/Proj's in lists */
1175         master_visited = 0;
1176         irg_walk_graph(irg, firm_clear_link, collect_nodes, &env);
1177
1178         /* now we have collected enough information, optimize */
1179         irg_walk_graph(irg, NULL, do_load_store_optimize, &env);
1180
1181         obstack_free(&env.obst, NULL);
1182
1183         /* Handle graph state */
1184         if (env.changes) {
1185                 if (get_irg_outs_state(irg) == outs_consistent)
1186                         set_irg_outs_inconsistent(irg);
1187         }
1188
1189         if (env.changes & CF_CHANGED) {
1190                 /* is this really needed: Yes, control flow changed, block might
1191                 have Bad() predecessors. */
1192                 set_irg_doms_inconsistent(irg);
1193         }
1194 }  /* optimize_load_store */