f45e7dfae8b321f441eecd3f65b5b28d450b75f6
[libfirm] / opt / ldstopt.c
1 /*
2  * Copyright (C) 1995-2007 University of Karlsruhe.  All right reserved.
3  *
4  * This file is part of libFirm.
5  *
6  * This file may be distributed and/or modified under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation and appearing in the file LICENSE.GPL included in the
9  * packaging of this file.
10  *
11  * Licensees holding valid libFirm Professional Edition licenses may use
12  * this file in accordance with the libFirm Commercial License.
13  * Agreement provided with the Software.
14  *
15  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17  * PURPOSE.
18  */
19
20 /**
21  * @file
22  * @brief   Load/Store optimizations.
23  * @author  Michael Beck
24  * @version $Id$
25  */
26 #ifdef HAVE_CONFIG_H
27 # include "config.h"
28 #endif
29
30 #include <string.h>
31
32 #include "iroptimize.h"
33 #include "irnode_t.h"
34 #include "irgraph_t.h"
35 #include "irmode_t.h"
36 #include "iropt_t.h"
37 #include "ircons_t.h"
38 #include "irgmod.h"
39 #include "irgwalk.h"
40 #include "irvrfy.h"
41 #include "tv_t.h"
42 #include "dbginfo_t.h"
43 #include "iropt_dbg.h"
44 #include "irflag_t.h"
45 #include "array.h"
46 #include "irhooks.h"
47 #include "iredges.h"
48 #include "irtools.h"
49 #include "opt_polymorphy.h"
50 #include "irmemory.h"
51 #include "xmalloc.h"
52
53 #ifdef DO_CACHEOPT
54 #include "cacheopt/cachesim.h"
55 #endif
56
57 #undef IMAX
58 #define IMAX(a,b)       ((a) > (b) ? (a) : (b))
59
60 #define MAX_PROJ        IMAX(pn_Load_max, pn_Store_max)
61
62 enum changes_t {
63         DF_CHANGED = 1,       /**< data flow changed */
64         CF_CHANGED = 2,       /**< control flow changed */
65 };
66
67 /**
68  * walker environment
69  */
70 typedef struct _walk_env_t {
71         struct obstack obst;          /**< list of all stores */
72         unsigned changes;             /**< a bitmask of graph changes */
73 } walk_env_t;
74
75 /**
76  * flags for Load/Store
77  */
78 enum ldst_flags_t {
79         LDST_VISITED = 1              /**< if set, this Load/Store is already visited */
80 };
81
82 /** A Load/Store info. */
83 typedef struct _ldst_info_t {
84         ir_node  *projs[MAX_PROJ];    /**< list of Proj's of this node */
85         ir_node  *exc_block;          /**< the exception block if available */
86         int      exc_idx;             /**< predecessor index in the exception block */
87         unsigned flags;               /**< flags */
88         unsigned visited;             /**< visited counter for breaking loops */
89 } ldst_info_t;
90
91 /**
92  * flags for control flow.
93  */
94 enum block_flags_t {
95         BLOCK_HAS_COND = 1,      /**< Block has conditional control flow */
96         BLOCK_HAS_EXC  = 2       /**< Block has exceptional control flow */
97 };
98
99 /**
100  * a Block info.
101  */
102 typedef struct _block_info_t {
103         unsigned flags;               /**< flags for the block */
104 } block_info_t;
105
106 /** the master visited flag for loop detection. */
107 static unsigned master_visited = 0;
108
109 #define INC_MASTER()       ++master_visited
110 #define MARK_NODE(info)    (info)->visited = master_visited
111 #define NODE_VISITED(info) (info)->visited >= master_visited
112
113 /**
114  * get the Load/Store info of a node
115  */
116 static ldst_info_t *get_ldst_info(ir_node *node, walk_env_t *env) {
117         ldst_info_t *info = get_irn_link(node);
118
119         if (! info) {
120                 info = obstack_alloc(&env->obst, sizeof(*info));
121                 memset(info, 0, sizeof(*info));
122                 set_irn_link(node, info);
123         }
124         return info;
125 }  /* get_ldst_info */
126
127 /**
128  * get the Block info of a node
129  */
130 static block_info_t *get_block_info(ir_node *node, walk_env_t *env) {
131         block_info_t *info = get_irn_link(node);
132
133         if (! info) {
134                 info = obstack_alloc(&env->obst, sizeof(*info));
135                 memset(info, 0, sizeof(*info));
136                 set_irn_link(node, info);
137         }
138         return info;
139 }  /* get_block_info */
140
141 /**
142  * update the projection info for a Load/Store
143  */
144 static unsigned update_projs(ldst_info_t *info, ir_node *proj)
145 {
146         long nr = get_Proj_proj(proj);
147
148         assert(0 <= nr && nr <= MAX_PROJ && "Wrong proj from LoadStore");
149
150         if (info->projs[nr]) {
151                 /* there is already one, do CSE */
152                 exchange(proj, info->projs[nr]);
153                 return DF_CHANGED;
154         }
155         else {
156                 info->projs[nr] = proj;
157                 return 0;
158         }
159 }  /* update_projs */
160
161 /**
162  * update the exception block info for a Load/Store node.
163  *
164  * @param info   the load/store info struct
165  * @param block  the exception handler block for this load/store
166  * @param pos    the control flow input of the block
167  */
168 static unsigned update_exc(ldst_info_t *info, ir_node *block, int pos)
169 {
170         assert(info->exc_block == NULL && "more than one exception block found");
171
172         info->exc_block = block;
173         info->exc_idx   = pos;
174         return 0;
175 }  /* update_exc */
176
177 /** Return the number of uses of an address node */
178 #define get_irn_n_uses(adr)     get_irn_n_edges(adr)
179
180 /**
181  * walker, collects all Load/Store/Proj nodes
182  *
183  * walks from Start -> End
184  */
185 static void collect_nodes(ir_node *node, void *env)
186 {
187         ir_op       *op = get_irn_op(node);
188         ir_node     *pred, *blk, *pred_blk;
189         ldst_info_t *ldst_info;
190         walk_env_t  *wenv = env;
191
192         if (op == op_Proj) {
193                 ir_node *adr;
194                 ir_op *op;
195
196                 pred = get_Proj_pred(node);
197                 op   = get_irn_op(pred);
198
199                 if (op == op_Load) {
200                         ldst_info = get_ldst_info(pred, wenv);
201
202                         wenv->changes |= update_projs(ldst_info, node);
203
204                         if ((ldst_info->flags & LDST_VISITED) == 0) {
205                                 adr = get_Load_ptr(pred);
206                                 ldst_info->flags |= LDST_VISITED;
207                         }
208
209                         /*
210                          * Place the Proj's to the same block as the
211                          * predecessor Load. This is always ok and prevents
212                          * "non-SSA" form after optimizations if the Proj
213                          * is in a wrong block.
214                          */
215                         blk      = get_nodes_block(node);
216                         pred_blk = get_nodes_block(pred);
217                         if (blk != pred_blk) {
218                                 wenv->changes |= DF_CHANGED;
219                                 set_nodes_block(node, pred_blk);
220                         }
221                 } else if (op == op_Store) {
222                         ldst_info = get_ldst_info(pred, wenv);
223
224                         wenv->changes |= update_projs(ldst_info, node);
225
226                         if ((ldst_info->flags & LDST_VISITED) == 0) {
227                                 adr = get_Store_ptr(pred);
228                                 ldst_info->flags |= LDST_VISITED;
229                         }
230
231                         /*
232                         * Place the Proj's to the same block as the
233                         * predecessor Store. This is always ok and prevents
234                         * "non-SSA" form after optimizations if the Proj
235                         * is in a wrong block.
236                         */
237                         blk      = get_nodes_block(node);
238                         pred_blk = get_nodes_block(pred);
239                         if (blk != pred_blk) {
240                                 wenv->changes |= DF_CHANGED;
241                                 set_nodes_block(node, pred_blk);
242                         }
243                 }
244         } else if (op == op_Block) {
245                 int i;
246
247                 for (i = get_Block_n_cfgpreds(node) - 1; i >= 0; --i) {
248                         ir_node      *pred_block, *proj;
249                         block_info_t *bl_info;
250                         int          is_exc = 0;
251
252                         pred = proj = get_Block_cfgpred(node, i);
253
254                         if (is_Proj(proj)) {
255                                 pred   = get_Proj_pred(proj);
256                                 is_exc = get_Proj_proj(proj) == pn_Generic_X_except;
257                         }
258
259                         /* ignore Bad predecessors, they will be removed later */
260                         if (is_Bad(pred))
261                                 continue;
262
263                         pred_block = get_nodes_block(pred);
264                         bl_info    = get_block_info(pred_block, wenv);
265
266                         if (is_fragile_op(pred) && is_exc)
267                                 bl_info->flags |= BLOCK_HAS_EXC;
268                         else if (is_irn_forking(pred))
269                                 bl_info->flags |= BLOCK_HAS_COND;
270
271                         if (is_exc && (get_irn_op(pred) == op_Load || get_irn_op(pred) == op_Store)) {
272                                 ldst_info = get_ldst_info(pred, wenv);
273
274                                 wenv->changes |= update_exc(ldst_info, node, i);
275                         }
276                 }
277         }
278 }  /* collect_nodes */
279
280 /**
281  * Returns an entity if the address ptr points to a constant one.
282  *
283  * @param ptr  the address
284  *
285  * @return an entity or NULL
286  */
287 static ir_entity *find_constant_entity(ir_node *ptr)
288 {
289         for (;;) {
290                 ir_op *op = get_irn_op(ptr);
291
292                 if (op == op_SymConst && (get_SymConst_kind(ptr) == symconst_addr_ent)) {
293                         ir_entity *ent = get_SymConst_entity(ptr);
294                         if (variability_constant == get_entity_variability(ent))
295                                 return ent;
296                         return NULL;
297                 } else if (op == op_Sel) {
298                         ir_entity *ent = get_Sel_entity(ptr);
299                         ir_type   *tp  = get_entity_owner(ent);
300
301                         /* Do not fiddle with polymorphism. */
302                         if (is_Class_type(get_entity_owner(ent)) &&
303                                 ((get_entity_n_overwrites(ent)    != 0) ||
304                                 (get_entity_n_overwrittenby(ent) != 0)   ) )
305                                 return NULL;
306
307                         if (is_Array_type(tp)) {
308                                 /* check bounds */
309                                 int i, n;
310
311                                 for (i = 0, n = get_Sel_n_indexs(ptr); i < n; ++i) {
312                                         ir_node *bound;
313                                         tarval *tlower, *tupper;
314                                         ir_node *index = get_Sel_index(ptr, i);
315                                         tarval *tv     = computed_value(index);
316
317                                         /* check if the index is constant */
318                                         if (tv == tarval_bad)
319                                                 return NULL;
320
321                                         bound  = get_array_lower_bound(tp, i);
322                                         tlower = computed_value(bound);
323                                         bound  = get_array_upper_bound(tp, i);
324                                         tupper = computed_value(bound);
325
326                                         if (tlower == tarval_bad || tupper == tarval_bad)
327                                                 return NULL;
328
329                                         if (tarval_cmp(tv, tlower) & pn_Cmp_Lt)
330                                                 return NULL;
331                                         if (tarval_cmp(tupper, tv) & pn_Cmp_Lt)
332                                                 return NULL;
333
334                                         /* ok, bounds check finished */
335                                 }
336                         }
337
338                         if (variability_constant == get_entity_variability(ent))
339                                 return ent;
340
341                         /* try next */
342                         ptr = get_Sel_ptr(ptr);
343                 } else
344                         return NULL;
345         }
346 }  /* find_constant_entity */
347
348 /**
349  * Return the Selection index of a Sel node from dimension n
350  */
351 static long get_Sel_array_index_long(ir_node *n, int dim) {
352         ir_node *index = get_Sel_index(n, dim);
353         assert(get_irn_op(index) == op_Const);
354         return get_tarval_long(get_Const_tarval(index));
355 }  /* get_Sel_array_index_long */
356
357 /**
358  * Returns the accessed component graph path for an
359  * node computing an address.
360  *
361  * @param ptr    the node computing the address
362  * @param depth  current depth in steps upward from the root
363  *               of the address
364  */
365 static compound_graph_path *rec_get_accessed_path(ir_node *ptr, int depth) {
366         compound_graph_path *res = NULL;
367         ir_entity           *root, *field;
368         int                 path_len, pos;
369
370         if (get_irn_op(ptr) == op_SymConst) {
371                 /* a SymConst. If the depth is 0, this is an access to a global
372                  * entity and we don't need a component path, else we know
373                  * at least it's length.
374                  */
375                 assert(get_SymConst_kind(ptr) == symconst_addr_ent);
376                 root = get_SymConst_entity(ptr);
377                 res = (depth == 0) ? NULL : new_compound_graph_path(get_entity_type(root), depth);
378         } else {
379                 assert(get_irn_op(ptr) == op_Sel);
380                 /* it's a Sel, go up until we find the root */
381                 res = rec_get_accessed_path(get_Sel_ptr(ptr), depth+1);
382
383                 /* fill up the step in the path at the current position */
384                 field    = get_Sel_entity(ptr);
385                 path_len = get_compound_graph_path_length(res);
386                 pos      = path_len - depth - 1;
387                 set_compound_graph_path_node(res, pos, field);
388
389                 if (is_Array_type(get_entity_owner(field))) {
390                         assert(get_Sel_n_indexs(ptr) == 1 && "multi dim arrays not implemented");
391                         set_compound_graph_path_array_index(res, pos, get_Sel_array_index_long(ptr, 0));
392                 }
393         }
394         return res;
395 }  /* rec_get_accessed_path */
396
397 /** Returns an access path or NULL.  The access path is only
398  *  valid, if the graph is in phase_high and _no_ address computation is used.
399  */
400 static compound_graph_path *get_accessed_path(ir_node *ptr) {
401         return rec_get_accessed_path(ptr, 0);
402 }  /* get_accessed_path */
403
404 /* forward */
405 static void reduce_adr_usage(ir_node *ptr);
406
407 /**
408  * Update a Load that may lost it's usage.
409  */
410 static void handle_load_update(ir_node *load) {
411         ldst_info_t *info = get_irn_link(load);
412
413         /* do NOT touch volatile loads for now */
414         if (get_Load_volatility(load) == volatility_is_volatile)
415                 return;
416
417         if (! info->projs[pn_Load_res] && ! info->projs[pn_Load_X_except]) {
418                 ir_node *ptr = get_Load_ptr(load);
419                 ir_node *mem = get_Load_mem(load);
420
421                 /* a Load which value is neither used nor exception checked, remove it */
422                 exchange(info->projs[pn_Load_M], mem);
423                 exchange(load, new_Bad());
424                 reduce_adr_usage(ptr);
425         }
426 }  /* handle_load_update */
427
428 /**
429  * A Use of an address node is vanished. Check if this was a Proj
430  * node and update the counters.
431  */
432 static void reduce_adr_usage(ir_node *ptr) {
433         if (is_Proj(ptr)) {
434                 if (get_irn_n_edges(ptr) <= 0) {
435                         /* this Proj is dead now */
436                         ir_node *pred = get_Proj_pred(ptr);
437
438                         if (is_Load(pred)) {
439                                 ldst_info_t *info = get_irn_link(pred);
440                                 info->projs[get_Proj_proj(ptr)] = NULL;
441
442                                 /* this node lost it's result proj, handle that */
443                                 handle_load_update(pred);
444                         }
445                 }
446         }
447 }  /* reduce_adr_usage */
448
449 /**
450  * Check, if an already existing value of mode old_mode can be converted
451  * into the needed one new_mode without loss.
452  */
453 static int can_use_stored_value(ir_mode *old_mode, ir_mode *new_mode) {
454         if (old_mode == new_mode)
455                 return 1;
456
457         /* if both modes are two-complement ones, we can always convert the
458            Stored value into the needed one. */
459         if (get_mode_size_bits(old_mode) >= get_mode_size_bits(new_mode) &&
460                   get_mode_arithmetic(old_mode) == irma_twos_complement &&
461                   get_mode_arithmetic(new_mode) == irma_twos_complement)
462                 return 1;
463         return 0;
464 }  /* can_use_stored_value */
465
466 /**
467  * Follow the memory chain as long as there are only Loads
468  * and alias free Stores and try to replace current Load or Store
469  * by a previous ones.
470  * Note that in unreachable loops it might happen that we reach
471  * load again, as well as we can fall into a cycle.
472  * We break such cycles using a special visited flag.
473  *
474  * INC_MASTER() must be called before dive into
475  */
476 static unsigned follow_Mem_chain(ir_node *load, ir_node *curr) {
477         unsigned res = 0;
478         ldst_info_t *info = get_irn_link(load);
479         ir_node *pred;
480         ir_node *ptr       = get_Load_ptr(load);
481         ir_node *mem       = get_Load_mem(load);
482         ir_mode *load_mode = get_Load_mode(load);
483
484         for (pred = curr; load != pred; ) {
485                 ldst_info_t *pred_info = get_irn_link(pred);
486
487                 /*
488                  * BEWARE: one might think that checking the modes is useless, because
489                  * if the pointers are identical, they refer to the same object.
490                  * This is only true in strong typed languages, not in C were the following
491                  * is possible a = *(ir_type1 *)p; b = *(ir_type2 *)p ...
492                  */
493                 if (get_irn_op(pred) == op_Store && get_Store_ptr(pred) == ptr &&
494                     can_use_stored_value(get_irn_mode(get_Store_value(pred)), load_mode)) {
495                         /*
496                          * a Load immediately after a Store -- a read after write.
497                          * We may remove the Load, if both Load & Store does not have an exception handler
498                          * OR they are in the same block. In the latter case the Load cannot
499                          * throw an exception when the previous Store was quiet.
500                          *
501                          * Why we need to check for Store Exception? If the Store cannot
502                          * be executed (ROM) the exception handler might simply jump into
503                          * the load block :-(
504                          * We could make it a little bit better if we would know that the exception
505                          * handler of the Store jumps directly to the end...
506                          */
507                         if ((pred_info->projs[pn_Store_X_except] == NULL && info->projs[pn_Load_X_except] == NULL) ||
508                             get_nodes_block(load) == get_nodes_block(pred)) {
509                                 ir_node *value = get_Store_value(pred);
510
511                                 DBG_OPT_RAW(load, value);
512
513                                 /* add an convert if needed */
514                                 if (get_irn_mode(get_Store_value(pred)) != load_mode) {
515                                         value = new_r_Conv(current_ir_graph, get_nodes_block(load), value, load_mode);
516                                 }
517
518                                 if (info->projs[pn_Load_M])
519                                         exchange(info->projs[pn_Load_M], mem);
520
521                                 /* no exception */
522                                 if (info->projs[pn_Load_X_except]) {
523                                         exchange( info->projs[pn_Load_X_except], new_Bad());
524                                         res |= CF_CHANGED;
525                                 }
526
527                                 if (info->projs[pn_Load_res])
528                                         exchange(info->projs[pn_Load_res], value);
529
530                                 exchange(load, new_Bad());
531                                 reduce_adr_usage(ptr);
532                                 return res | DF_CHANGED;
533                         }
534                 } else if (get_irn_op(pred) == op_Load && get_Load_ptr(pred) == ptr &&
535                            can_use_stored_value(get_Load_mode(pred), load_mode)) {
536                         /*
537                          * a Load after a Load -- a read after read.
538                          * We may remove the second Load, if it does not have an exception handler
539                          * OR they are in the same block. In the later case the Load cannot
540                          * throw an exception when the previous Load was quiet.
541                          *
542                          * Here, there is no need to check if the previous Load has an exception
543                          * hander because they would have exact the same exception...
544                          */
545                         if (info->projs[pn_Load_X_except] == NULL || get_nodes_block(load) == get_nodes_block(pred)) {
546                                 ir_node *value;
547
548                                 DBG_OPT_RAR(load, pred);
549
550                                 /* the result is used */
551                                 if (info->projs[pn_Load_res]) {
552                                         if (pred_info->projs[pn_Load_res] == NULL) {
553                                                 /* create a new Proj again */
554                                                 pred_info->projs[pn_Load_res] = new_r_Proj(current_ir_graph, get_nodes_block(pred), pred, get_Load_mode(pred), pn_Load_res);
555                                         }
556                                         value = pred_info->projs[pn_Load_res];
557
558                                         /* add an convert if needed */
559                                         if (get_Load_mode(pred) != load_mode) {
560                                                 value = new_r_Conv(current_ir_graph, get_nodes_block(load), value, load_mode);
561                                         }
562
563                                         exchange(info->projs[pn_Load_res], value);
564                                 }
565
566                                 if (info->projs[pn_Load_M])
567                                         exchange(info->projs[pn_Load_M], mem);
568
569                                 /* no exception */
570                                 if (info->projs[pn_Load_X_except]) {
571                                         exchange(info->projs[pn_Load_X_except], new_Bad());
572                                         res |= CF_CHANGED;
573                                 }
574
575                                 exchange(load, new_Bad());
576                                 reduce_adr_usage(ptr);
577                                 return res |= DF_CHANGED;
578                         }
579                 }
580
581                 if (get_irn_op(pred) == op_Store) {
582                         /* check if we can pass through this store */
583                         ir_alias_relation rel = get_alias_relation(
584                                 current_ir_graph,
585                                 get_Store_ptr(pred),
586                                 get_irn_mode(get_Store_value(pred)),
587                                 ptr, load_mode);
588                         /* if the might be an alias, we cannot pass this Store */
589                         if (rel != no_alias)
590                                 break;
591                         pred = skip_Proj(get_Store_mem(pred));
592                 } else if (get_irn_op(pred) == op_Load) {
593                         pred = skip_Proj(get_Load_mem(pred));
594                 } else {
595                         /* follow only Load chains */
596                         break;
597                 }
598
599                 /* check for cycles */
600                 if (NODE_VISITED(pred_info))
601                         break;
602                 MARK_NODE(pred_info);
603         }
604
605         if (get_irn_op(pred) == op_Sync) {
606                 int i;
607
608                 /* handle all Sync predecessors */
609                 for (i = get_Sync_n_preds(pred) - 1; i >= 0; --i) {
610                         res |= follow_Mem_chain(load, skip_Proj(get_Sync_pred(pred, i)));
611                         if (res)
612                                 break;
613                 }
614         }
615
616         return res;
617 }  /* follow_Mem_chain */
618
619 /**
620  * optimize a Load
621  *
622  * @param load  the Load node
623  */
624 static unsigned optimize_load(ir_node *load)
625 {
626         ldst_info_t *info = get_irn_link(load);
627         ir_node *mem, *ptr, *new_node;
628         ir_entity *ent;
629         unsigned res = 0;
630
631         /* do NOT touch volatile loads for now */
632         if (get_Load_volatility(load) == volatility_is_volatile)
633                 return 0;
634
635         /* the address of the load to be optimized */
636         ptr = get_Load_ptr(load);
637
638         /*
639          * Check if we can remove the exception from a Load:
640          * This can be done, if the address is from an Sel(Alloc) and
641          * the Sel type is a subtype of the allocated type.
642          *
643          * This optimizes some often used OO constructs,
644          * like x = new O; x->t;
645          */
646         if (info->projs[pn_Load_X_except]) {
647                 if (is_Sel(ptr)) {
648                         ir_node *mem = get_Sel_mem(ptr);
649
650                         /* FIXME: works with the current FE, but better use the base */
651                         if (get_irn_op(skip_Proj(mem)) == op_Alloc) {
652                                 /* ok, check the types */
653                                 ir_entity *ent    = get_Sel_entity(ptr);
654                                 ir_type   *s_type = get_entity_type(ent);
655                                 ir_type   *a_type = get_Alloc_type(mem);
656
657                                 if (is_SubClass_of(s_type, a_type)) {
658                                         /* ok, condition met: there can't be an exception because
659                                         * Alloc guarantees that enough memory was allocated */
660
661                                         exchange(info->projs[pn_Load_X_except], new_Bad());
662                                         info->projs[pn_Load_X_except] = NULL;
663                                         res |= CF_CHANGED;
664                                 }
665                         }
666                 } else if ((get_irn_op(skip_Proj(ptr)) == op_Alloc) ||
667                         ((get_irn_op(ptr) == op_Cast) && (get_irn_op(skip_Proj(get_Cast_op(ptr))) == op_Alloc))) {
668                                 /* simple case: a direct load after an Alloc. Firm Alloc throw
669                                  * an exception in case of out-of-memory. So, there is no way for an
670                                  * exception in this load.
671                                  * This code is constructed by the "exception lowering" in the Jack compiler.
672                                  */
673                                 exchange(info->projs[pn_Load_X_except], new_Bad());
674                                 info->projs[pn_Load_X_except] = NULL;
675                                 res |= CF_CHANGED;
676                 }
677         }
678
679         /* The mem of the Load. Must still be returned after optimization. */
680         mem  = get_Load_mem(load);
681
682         if (! info->projs[pn_Load_res] && ! info->projs[pn_Load_X_except]) {
683                 /* a Load which value is neither used nor exception checked, remove it */
684                 exchange(info->projs[pn_Load_M], mem);
685
686                 exchange(load, new_Bad());
687                 reduce_adr_usage(ptr);
688                 return res | DF_CHANGED;
689         }
690
691         /* Load from a constant polymorphic field, where we can resolve
692            polymorphism. */
693         new_node = transform_node_Load(load);
694         if (new_node != load) {
695                 if (info->projs[pn_Load_M]) {
696                         exchange(info->projs[pn_Load_M], mem);
697                         info->projs[pn_Load_M] = NULL;
698                 }
699                 if (info->projs[pn_Load_X_except]) {
700                         exchange(info->projs[pn_Load_X_except], new_Bad());
701                         info->projs[pn_Load_X_except] = NULL;
702                 }
703                 if (info->projs[pn_Load_res])
704                         exchange(info->projs[pn_Load_res], new_node);
705
706                 exchange(load, new_Bad());
707                 reduce_adr_usage(ptr);
708                 return res | DF_CHANGED;
709         }
710
711         /* check if we can determine the entity that will be loaded */
712         ent = find_constant_entity(ptr);
713         if (ent) {
714                 if ((allocation_static == get_entity_allocation(ent)) &&
715                         (visibility_external_allocated != get_entity_visibility(ent))) {
716                         /* a static allocation that is not external: there should be NO exception
717                          * when loading. */
718
719                         /* no exception, clear the info field as it might be checked later again */
720                         if (info->projs[pn_Load_X_except]) {
721                                 exchange(info->projs[pn_Load_X_except], new_Bad());
722                                 info->projs[pn_Load_X_except] = NULL;
723                                 res |= CF_CHANGED;
724                         }
725
726                         if (variability_constant == get_entity_variability(ent)
727                                 && is_atomic_entity(ent)) {
728                                 /* Might not be atomic after
729                                    lowering of Sels.  In this
730                                    case we could also load, but
731                                    it's more complicated. */
732                                 /* more simpler case: we load the content of a constant value:
733                                  * replace it by the constant itself
734                                  */
735
736                                 /* no memory */
737                                 if (info->projs[pn_Load_M]) {
738                                         exchange(info->projs[pn_Load_M], mem);
739                                         res |= DF_CHANGED;
740                                 }
741                                 /* no result :-) */
742                                 if (info->projs[pn_Load_res]) {
743                                         if (is_atomic_entity(ent)) {
744                                                 ir_node *c = copy_const_value(get_irn_dbg_info(load), get_atomic_ent_value(ent));
745
746                                                 DBG_OPT_RC(load, c);
747                                                 exchange(info->projs[pn_Load_res], c);
748                                                 res |= DF_CHANGED;
749                                         }
750                                 }
751                                 exchange(load, new_Bad());
752                                 reduce_adr_usage(ptr);
753                                 return res;
754                         } else if (variability_constant == get_entity_variability(ent)) {
755                                 compound_graph_path *path = get_accessed_path(ptr);
756
757                                 if (path) {
758                                         ir_node *c;
759
760                                         assert(is_proper_compound_graph_path(path, get_compound_graph_path_length(path)-1));
761                                         /*
762                                         {
763                                                 int j;
764                                                 for (j = 0; j < get_compound_graph_path_length(path); ++j) {
765                                                         ir_entity *node = get_compound_graph_path_node(path, j);
766                                                         fprintf(stdout, ".%s", get_entity_name(node));
767                                                         if (is_Array_type(get_entity_owner(node)))
768                                                                 fprintf(stdout, "[%d]", get_compound_graph_path_array_index(path, j));
769                                                 }
770                                                 printf("\n");
771                                         }
772                                         */
773
774                                         c = get_compound_ent_value_by_path(ent, path);
775                                         free_compound_graph_path(path);
776
777                                         /* printf("  cons: "); DDMN(c); */
778
779                                         if (info->projs[pn_Load_M]) {
780                                                 exchange(info->projs[pn_Load_M], mem);
781                                                 res |= DF_CHANGED;
782                                         }
783                                         if (info->projs[pn_Load_res]) {
784                                                 exchange(info->projs[pn_Load_res], copy_const_value(get_irn_dbg_info(load), c));
785                                                 res |= DF_CHANGED;
786                                         }
787                                         exchange(load, new_Bad());
788                                         reduce_adr_usage(ptr);
789                                         return res;
790                                 } else {
791                                         /*  We can not determine a correct access path.  E.g., in jack, we load
792                                         a byte from an object to generate an exception.   Happens in test program
793                                         Reflectiontest.
794                                         printf(">>>>>>>>>>>>> Found access to constant entity %s in function %s\n", get_entity_name(ent),
795                                         get_entity_name(get_irg_entity(current_ir_graph)));
796                                         printf("  load: "); DDMN(load);
797                                         printf("  ptr:  "); DDMN(ptr);
798                                         */
799                                 }
800                         }
801                 }
802         }
803
804         /* Check, if the address of this load is used more than once.
805          * If not, this load cannot be removed in any case. */
806         if (get_irn_n_uses(ptr) <= 1)
807                 return res;
808
809         /*
810          * follow the memory chain as long as there are only Loads
811          * and try to replace current Load or Store by a previous one.
812          * Note that in unreachable loops it might happen that we reach
813          * load again, as well as we can fall into a cycle.
814          * We break such cycles using a special visited flag.
815          */
816         INC_MASTER();
817         res = follow_Mem_chain(load, skip_Proj(mem));
818         return res;
819 }  /* optimize_load */
820
821 /**
822  * Check whether a value of mode new_mode would completely overwrite a value
823  * of mode old_mode in memory.
824  */
825 static int is_completely_overwritten(ir_mode *old_mode, ir_mode *new_mode)
826 {
827         return get_mode_size_bits(new_mode) >= get_mode_size_bits(old_mode);
828 }  /* is_completely_overwritten */
829
830 /**
831  * follow the memory chain as long as there are only Loads and alias free Stores.
832  *
833  * INC_MASTER() must be called before dive into
834  */
835 static unsigned follow_Mem_chain_for_Store(ir_node *store, ir_node *curr) {
836         unsigned res = 0;
837         ldst_info_t *info = get_irn_link(store);
838         ir_node *pred;
839         ir_node *ptr = get_Store_ptr(store);
840         ir_node *mem = get_Store_mem(store);
841         ir_node *value = get_Store_value(store);
842         ir_mode *mode  = get_irn_mode(value);
843         ir_node *block = get_nodes_block(store);
844
845         for (pred = curr; pred != store;) {
846                 ldst_info_t *pred_info = get_irn_link(pred);
847
848                 /*
849                  * BEWARE: one might think that checking the modes is useless, because
850                  * if the pointers are identical, they refer to the same object.
851                  * This is only true in strong typed languages, not is C were the following
852                  * is possible *(ir_type1 *)p = a; *(ir_type2 *)p = b ...
853                  * However, if the mode that is written have a bigger  or equal size the the old
854                  * one, the old value is completely overwritten and can be killed ...
855                  */
856                 if (get_irn_op(pred) == op_Store && get_Store_ptr(pred) == ptr &&
857                     get_nodes_block(pred) == block &&
858                     is_completely_overwritten(get_irn_mode(get_Store_value(pred)), mode)) {
859                         /*
860                          * a Store after a Store in the same block -- a write after write.
861                          * We may remove the first Store, if it does not have an exception handler.
862                          *
863                          * TODO: What, if both have the same exception handler ???
864                          */
865                         if (get_Store_volatility(pred) != volatility_is_volatile && !pred_info->projs[pn_Store_X_except]) {
866                                 DBG_OPT_WAW(pred, store);
867                                 exchange( pred_info->projs[pn_Store_M], get_Store_mem(pred) );
868                                 exchange(pred, new_Bad());
869                                 reduce_adr_usage(ptr);
870                                 return DF_CHANGED;
871                         }
872                 } else if (get_irn_op(pred) == op_Load && get_Load_ptr(pred) == ptr &&
873                            value == pred_info->projs[pn_Load_res]) {
874                         /*
875                          * a Store of a value after a Load -- a write after read.
876                          * We may remove the second Store, if it does not have an exception handler.
877                          */
878                         if (! info->projs[pn_Store_X_except]) {
879                                 DBG_OPT_WAR(store, pred);
880                                 exchange( info->projs[pn_Store_M], mem );
881                                 exchange(store, new_Bad());
882                                 reduce_adr_usage(ptr);
883                                 return DF_CHANGED;
884                         }
885                 }
886
887                 if (get_irn_op(pred) == op_Store) {
888                         /* check if we can pass thru this store */
889                         ir_alias_relation rel = get_alias_relation(
890                                 current_ir_graph,
891                                 get_Store_ptr(pred),
892                                 get_irn_mode(get_Store_value(pred)),
893                                 ptr, mode);
894                         /* if the might be an alias, we cannot pass this Store */
895                         if (rel != no_alias)
896                                 break;
897                         pred = skip_Proj(get_Store_mem(pred));
898                 } else if (get_irn_op(pred) == op_Load) {
899                         pred = skip_Proj(get_Load_mem(pred));
900                 } else {
901                         /* follow only Load chains */
902                         break;
903                 }
904
905                 /* check for cycles */
906                 if (NODE_VISITED(pred_info))
907                         break;
908                 MARK_NODE(pred_info);
909         }
910
911         if (get_irn_op(pred) == op_Sync) {
912                 int i;
913
914                 /* handle all Sync predecessors */
915                 for (i = get_Sync_n_preds(pred) - 1; i >= 0; --i) {
916                         res |= follow_Mem_chain_for_Store(store, skip_Proj(get_Sync_pred(pred, i)));
917                         if (res)
918                                 break;
919                 }
920         }
921         return res;
922 }  /* follow_Mem_chain_for_Store */
923
924 /**
925  * optimize a Store
926  *
927  * @param store  the Store node
928  */
929 static unsigned optimize_store(ir_node *store) {
930         ir_node *ptr, *mem;
931
932         if (get_Store_volatility(store) == volatility_is_volatile)
933                 return 0;
934
935         ptr = get_Store_ptr(store);
936
937         /* Check, if the address of this Store is used more than once.
938          * If not, this Store cannot be removed in any case. */
939         if (get_irn_n_uses(ptr) <= 1)
940                 return 0;
941
942         mem = get_Store_mem(store);
943
944         /* follow the memory chain as long as there are only Loads */
945         INC_MASTER();
946         return follow_Mem_chain_for_Store(store, skip_Proj(mem));
947 }  /* optimize_store */
948
949 /**
950  * walker, optimizes Phi after Stores to identical places:
951  * Does the following optimization:
952  * @verbatim
953  *
954  *   val1   val2   val3          val1  val2  val3
955  *    |      |      |               \    |    /
956  *  Store  Store  Store              \   |   /
957  *      \    |    /                   PhiData
958  *       \   |   /                       |
959  *        \  |  /                      Store
960  *          PhiM
961  *
962  * @endverbatim
963  * This reduces the number of stores and allows for predicated execution.
964  * Moves Stores back to the end of a function which may be bad.
965  *
966  * This is only possible if the predecessor blocks have only one successor.
967  */
968 static unsigned optimize_phi(ir_node *phi, walk_env_t *wenv)
969 {
970         int i, n;
971         ir_node *store, *old_store, *ptr, *block, *phi_block, *phiM, *phiD, *exc, *projM;
972         ir_mode *mode;
973         ir_node **inM, **inD, **projMs;
974         int *idx;
975         dbg_info *db = NULL;
976         ldst_info_t *info;
977         block_info_t *bl_info;
978         unsigned res = 0;
979
980         /* Must be a memory Phi */
981         if (get_irn_mode(phi) != mode_M)
982                 return 0;
983
984         n = get_Phi_n_preds(phi);
985         if (n <= 0)
986                 return 0;
987
988         /* must be only one user */
989         projM = get_Phi_pred(phi, 0);
990         if (get_irn_n_edges(projM) != 1)
991                 return 0;
992
993         store = skip_Proj(projM);
994         old_store = store;
995         if (get_irn_op(store) != op_Store)
996                 return 0;
997
998         block = get_nodes_block(store);
999
1000         /* abort on dead blocks */
1001         if (is_Block_dead(block))
1002                 return 0;
1003
1004         /* check if the block is post dominated by Phi-block
1005            and has no exception exit */
1006         bl_info = get_irn_link(block);
1007         if (bl_info->flags & BLOCK_HAS_EXC)
1008                 return 0;
1009
1010         phi_block = get_nodes_block(phi);
1011         if (! block_strictly_postdominates(phi_block, block))
1012                 return 0;
1013
1014         /* this is the address of the store */
1015         ptr  = get_Store_ptr(store);
1016         mode = get_irn_mode(get_Store_value(store));
1017         info = get_irn_link(store);
1018         exc  = info->exc_block;
1019
1020         for (i = 1; i < n; ++i) {
1021                 ir_node *pred = get_Phi_pred(phi, i);
1022
1023                 if (get_irn_n_edges(pred) != 1)
1024                         return 0;
1025
1026                 pred = skip_Proj(pred);
1027                 if (get_irn_op(pred) != op_Store)
1028                         return 0;
1029
1030                 if (ptr != get_Store_ptr(pred) || mode != get_irn_mode(get_Store_value(pred)))
1031                         return 0;
1032
1033                 info = get_irn_link(pred);
1034
1035                 /* check, if all stores have the same exception flow */
1036                 if (exc != info->exc_block)
1037                         return 0;
1038
1039                 /* abort on dead blocks */
1040                 block = get_nodes_block(pred);
1041                 if (is_Block_dead(block))
1042                         return 0;
1043
1044                 /* check if the block is post dominated by Phi-block
1045                    and has no exception exit. Note that block must be different from
1046                    Phi-block, else we would move a Store from end End of a block to its
1047                    Start... */
1048                 bl_info = get_irn_link(block);
1049                 if (bl_info->flags & BLOCK_HAS_EXC)
1050                         return 0;
1051                 if (block == phi_block || ! block_postdominates(phi_block, block))
1052                         return 0;
1053         }
1054
1055         /*
1056          * ok, when we are here, we found all predecessors of a Phi that
1057          * are Stores to the same address and size. That means whatever
1058          * we do before we enter the block of the Phi, we do a Store.
1059          * So, we can move the Store to the current block:
1060          *
1061          *   val1    val2    val3          val1  val2  val3
1062          *    |       |       |               \    |    /
1063          * | Str | | Str | | Str |             \   |   /
1064          *      \     |     /                   PhiData
1065          *       \    |    /                       |
1066          *        \   |   /                       Str
1067          *           PhiM
1068          *
1069          * Is only allowed if the predecessor blocks have only one successor.
1070          */
1071
1072         NEW_ARR_A(ir_node *, projMs, n);
1073         NEW_ARR_A(ir_node *, inM, n);
1074         NEW_ARR_A(ir_node *, inD, n);
1075         NEW_ARR_A(int, idx, n);
1076
1077         /* Prepare: Collect all Store nodes.  We must do this
1078            first because we otherwise may loose a store when exchanging its
1079            memory Proj.
1080          */
1081         for (i = n - 1; i >= 0; --i) {
1082                 ir_node *store;
1083
1084                 projMs[i] = get_Phi_pred(phi, i);
1085                 assert(is_Proj(projMs[i]));
1086
1087                 store = get_Proj_pred(projMs[i]);
1088                 info  = get_irn_link(store);
1089
1090                 inM[i] = get_Store_mem(store);
1091                 inD[i] = get_Store_value(store);
1092                 idx[i] = info->exc_idx;
1093         }
1094         block = get_nodes_block(phi);
1095
1096         /* second step: create a new memory Phi */
1097         phiM = new_rd_Phi(get_irn_dbg_info(phi), current_ir_graph, block, n, inM, mode_M);
1098
1099         /* third step: create a new data Phi */
1100         phiD = new_rd_Phi(get_irn_dbg_info(phi), current_ir_graph, block, n, inD, mode);
1101
1102         /* rewire memory and kill the node */
1103         for (i = n - 1; i >= 0; --i) {
1104                 ir_node *proj  = projMs[i];
1105
1106                 if(is_Proj(proj)) {
1107                         ir_node *store = get_Proj_pred(proj);
1108                         exchange(proj, inM[i]);
1109                         kill_node(store);
1110                 }
1111         }
1112
1113         /* fourth step: create the Store */
1114         store = new_rd_Store(db, current_ir_graph, block, phiM, ptr, phiD);
1115 #ifdef DO_CACHEOPT
1116         co_set_irn_name(store, co_get_irn_ident(old_store));
1117 #endif
1118
1119         projM = new_rd_Proj(NULL, current_ir_graph, block, store, mode_M, pn_Store_M);
1120
1121         info = get_ldst_info(store, wenv);
1122         info->projs[pn_Store_M] = projM;
1123
1124         /* fifths step: repair exception flow */
1125         if (exc) {
1126                 ir_node *projX = new_rd_Proj(NULL, current_ir_graph, block, store, mode_X, pn_Store_X_except);
1127
1128                 info->projs[pn_Store_X_except] = projX;
1129                 info->exc_block                = exc;
1130                 info->exc_idx                  = idx[0];
1131
1132                 for (i = 0; i < n; ++i) {
1133                         set_Block_cfgpred(exc, idx[i], projX);
1134                 }
1135
1136                 if (n > 1) {
1137                         /* the exception block should be optimized as some inputs are identical now */
1138                 }
1139
1140                 res |= CF_CHANGED;
1141         }
1142
1143         /* sixth step: replace old Phi */
1144         exchange(phi, projM);
1145
1146         return res | DF_CHANGED;
1147 }  /* optimize_phi */
1148
1149 /**
1150  * walker, do the optimizations
1151  */
1152 static void do_load_store_optimize(ir_node *n, void *env) {
1153         walk_env_t *wenv = env;
1154
1155         switch (get_irn_opcode(n)) {
1156
1157         case iro_Load:
1158                 wenv->changes |= optimize_load(n);
1159                 break;
1160
1161         case iro_Store:
1162                 wenv->changes |= optimize_store(n);
1163                 break;
1164
1165         case iro_Phi:
1166                 wenv->changes |= optimize_phi(n, wenv);
1167
1168         default:
1169                 ;
1170         }
1171 }  /* do_load_store_optimize */
1172
1173 /*
1174  * do the load store optimization
1175  */
1176 void optimize_load_store(ir_graph *irg) {
1177         walk_env_t env;
1178
1179         assert(get_irg_phase_state(irg) != phase_building);
1180         assert(get_irg_pinned(irg) != op_pin_state_floats &&
1181                 "LoadStore optimization needs pinned graph");
1182
1183         if (! get_opt_redundant_loadstore())
1184                 return;
1185
1186         edges_assure(irg);
1187
1188         /* for Phi optimization post-dominators are needed ... */
1189         assure_postdoms(irg);
1190
1191         if (get_opt_alias_analysis()) {
1192                 assure_irg_address_taken_computed(irg);
1193                 assure_irp_globals_address_taken_computed();
1194         }
1195
1196         obstack_init(&env.obst);
1197         env.changes = 0;
1198
1199         /* init the links, then collect Loads/Stores/Proj's in lists */
1200         master_visited = 0;
1201         irg_walk_graph(irg, firm_clear_link, collect_nodes, &env);
1202
1203         /* now we have collected enough information, optimize */
1204         irg_walk_graph(irg, NULL, do_load_store_optimize, &env);
1205
1206         obstack_free(&env.obst, NULL);
1207
1208         /* Handle graph state */
1209         if (env.changes) {
1210                 set_irg_outs_inconsistent(irg);
1211         }
1212
1213         if (env.changes & CF_CHANGED) {
1214                 /* is this really needed: Yes, control flow changed, block might
1215                 have Bad() predecessors. */
1216                 set_irg_doms_inconsistent(irg);
1217         }
1218 }  /* optimize_load_store */