updated Header
[libfirm] / ir / opt / ldstopt.c
1 /*
2  * Copyright (C) 1995-2007 University of Karlsruhe.  All right reserved.
3  *
4  * This file is part of libFirm.
5  *
6  * This file may be distributed and/or modified under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation and appearing in the file LICENSE.GPL included in the
9  * packaging of this file.
10  *
11  * Licensees holding valid libFirm Professional Edition licenses may use
12  * this file in accordance with the libFirm Commercial License.
13  * Agreement provided with the Software.
14  *
15  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17  * PURPOSE.
18  */
19
20 /*
21  * Project:     libFIRM
22  * File name:   ir/opt/ldstopt.c
23  * Purpose:     load store optimizations
24  * Author:      Michael Beck
25  * Created:
26  * CVS-ID:      $Id$
27  * Copyright:   (c) 1998-2007 Universität Karlsruhe
28  */
29 #ifdef HAVE_CONFIG_H
30 # include "config.h"
31 #endif
32
33 #ifdef HAVE_STRING_H
34 # include <string.h>
35 #endif
36
37 #include "irnode_t.h"
38 #include "irgraph_t.h"
39 #include "irmode_t.h"
40 #include "iropt_t.h"
41 #include "ircons_t.h"
42 #include "irgmod.h"
43 #include "irgwalk.h"
44 #include "irvrfy.h"
45 #include "tv_t.h"
46 #include "dbginfo_t.h"
47 #include "iropt_dbg.h"
48 #include "irflag_t.h"
49 #include "array.h"
50 #include "irhooks.h"
51 #include "iredges.h"
52 #include "irtools.h"
53 #include "opt_polymorphy.h"
54 #include "irmemory.h"
55 #include "xmalloc.h"
56
57 #ifdef DO_CACHEOPT
58 #include "cacheopt/cachesim.h"
59 #endif
60
61 #undef IMAX
62 #define IMAX(a,b)       ((a) > (b) ? (a) : (b))
63
64 #define MAX_PROJ        IMAX(pn_Load_max, pn_Store_max)
65
66 enum changes_t {
67         DF_CHANGED = 1,       /**< data flow changed */
68         CF_CHANGED = 2,       /**< control flow changed */
69 };
70
71 /**
72  * walker environment
73  */
74 typedef struct _walk_env_t {
75         struct obstack obst;          /**< list of all stores */
76         unsigned changes;             /**< a bitmask of graph changes */
77 } walk_env_t;
78
79 /**
80  * flags for Load/Store
81  */
82 enum ldst_flags_t {
83         LDST_VISITED = 1              /**< if set, this Load/Store is already visited */
84 };
85
86 /** A Load/Store info. */
87 typedef struct _ldst_info_t {
88         ir_node  *projs[MAX_PROJ];    /**< list of Proj's of this node */
89         ir_node  *exc_block;          /**< the exception block if available */
90         int      exc_idx;             /**< predecessor index in the exception block */
91         unsigned flags;               /**< flags */
92         unsigned visited;             /**< visited counter for breaking loops */
93 } ldst_info_t;
94
95 /**
96  * flags for control flow.
97  */
98 enum block_flags_t {
99         BLOCK_HAS_COND = 1,      /**< Block has conditional control flow */
100         BLOCK_HAS_EXC  = 2       /**< Block has exceptional control flow */
101 };
102
103 /**
104  * a Block info.
105  */
106 typedef struct _block_info_t {
107         unsigned flags;               /**< flags for the block */
108 } block_info_t;
109
110 /** the master visited flag for loop detection. */
111 static unsigned master_visited = 0;
112
113 #define INC_MASTER()       ++master_visited
114 #define MARK_NODE(info)    (info)->visited = master_visited
115 #define NODE_VISITED(info) (info)->visited >= master_visited
116
117 /**
118  * get the Load/Store info of a node
119  */
120 static ldst_info_t *get_ldst_info(ir_node *node, walk_env_t *env) {
121         ldst_info_t *info = get_irn_link(node);
122
123         if (! info) {
124                 info = obstack_alloc(&env->obst, sizeof(*info));
125                 memset(info, 0, sizeof(*info));
126                 set_irn_link(node, info);
127         }
128         return info;
129 }  /* get_ldst_info */
130
131 /**
132  * get the Block info of a node
133  */
134 static block_info_t *get_block_info(ir_node *node, walk_env_t *env) {
135         block_info_t *info = get_irn_link(node);
136
137         if (! info) {
138                 info = obstack_alloc(&env->obst, sizeof(*info));
139                 memset(info, 0, sizeof(*info));
140                 set_irn_link(node, info);
141         }
142         return info;
143 }  /* get_block_info */
144
145 /**
146  * update the projection info for a Load/Store
147  */
148 static unsigned update_projs(ldst_info_t *info, ir_node *proj)
149 {
150         long nr = get_Proj_proj(proj);
151
152         assert(0 <= nr && nr <= MAX_PROJ && "Wrong proj from LoadStore");
153
154         if (info->projs[nr]) {
155                 /* there is already one, do CSE */
156                 exchange(proj, info->projs[nr]);
157                 return DF_CHANGED;
158         }
159         else {
160                 info->projs[nr] = proj;
161                 return 0;
162         }
163 }  /* update_projs */
164
165 /**
166  * update the exception block info for a Load/Store node.
167  *
168  * @param info   the load/store info struct
169  * @param block  the exception handler block for this load/store
170  * @param pos    the control flow input of the block
171  */
172 static unsigned update_exc(ldst_info_t *info, ir_node *block, int pos)
173 {
174         assert(info->exc_block == NULL && "more than one exception block found");
175
176         info->exc_block = block;
177         info->exc_idx   = pos;
178         return 0;
179 }  /* update_exc */
180
181 /** Return the number of uses of an address node */
182 #define get_irn_n_uses(adr)     get_irn_n_edges(adr)
183
184 /**
185  * walker, collects all Load/Store/Proj nodes
186  *
187  * walks from Start -> End
188  */
189 static void collect_nodes(ir_node *node, void *env)
190 {
191         ir_op       *op = get_irn_op(node);
192         ir_node     *pred, *blk, *pred_blk;
193         ldst_info_t *ldst_info;
194         walk_env_t  *wenv = env;
195
196         if (op == op_Proj) {
197                 ir_node *adr;
198                 ir_op *op;
199
200                 pred = get_Proj_pred(node);
201                 op   = get_irn_op(pred);
202
203                 if (op == op_Load) {
204                         ldst_info = get_ldst_info(pred, wenv);
205
206                         wenv->changes |= update_projs(ldst_info, node);
207
208                         if ((ldst_info->flags & LDST_VISITED) == 0) {
209                                 adr = get_Load_ptr(pred);
210                                 ldst_info->flags |= LDST_VISITED;
211                         }
212
213                         /*
214                         * Place the Proj's to the same block as the
215                         * predecessor Load. This is always ok and prevents
216                         * "non-SSA" form after optimizations if the Proj
217                         * is in a wrong block.
218                         */
219                         blk      = get_nodes_block(node);
220                         pred_blk = get_nodes_block(pred);
221                         if (blk != pred_blk) {
222                                 wenv->changes |= DF_CHANGED;
223                                 set_nodes_block(node, pred_blk);
224                         }
225                 } else if (op == op_Store) {
226                         ldst_info = get_ldst_info(pred, wenv);
227
228                         wenv->changes |= update_projs(ldst_info, node);
229
230                         if ((ldst_info->flags & LDST_VISITED) == 0) {
231                                 adr = get_Store_ptr(pred);
232                                 ldst_info->flags |= LDST_VISITED;
233                         }
234
235                         /*
236                         * Place the Proj's to the same block as the
237                         * predecessor Store. This is always ok and prevents
238                         * "non-SSA" form after optimizations if the Proj
239                         * is in a wrong block.
240                         */
241                         blk      = get_nodes_block(node);
242                         pred_blk = get_nodes_block(pred);
243                         if (blk != pred_blk) {
244                                 wenv->changes |= DF_CHANGED;
245                                 set_nodes_block(node, pred_blk);
246                         }
247                 }
248         } else if (op == op_Block) {
249                 int i;
250
251                 for (i = get_Block_n_cfgpreds(node) - 1; i >= 0; --i) {
252                         ir_node      *pred_block;
253                         block_info_t *bl_info;
254
255                         pred = skip_Proj(get_Block_cfgpred(node, i));
256
257                         /* ignore Bad predecessors, they will be removed later */
258                         if (is_Bad(pred))
259                                 continue;
260
261                         pred_block = get_nodes_block(pred);
262                         bl_info    = get_block_info(pred_block, wenv);
263
264                         if (is_fragile_op(pred))
265                                 bl_info->flags |= BLOCK_HAS_EXC;
266                         else if (is_irn_forking(pred))
267                                 bl_info->flags |= BLOCK_HAS_COND;
268
269                         if (get_irn_op(pred) == op_Load || get_irn_op(pred) == op_Store) {
270                                 ldst_info = get_ldst_info(pred, wenv);
271
272                                 wenv->changes |= update_exc(ldst_info, node, i);
273                         }
274                 }
275         }
276 }  /* collect_nodes */
277
278 /**
279  * Returns an entity if the address ptr points to a constant one.
280  *
281  * @param ptr  the address
282  *
283  * @return an entity or NULL
284  */
285 static ir_entity *find_constant_entity(ir_node *ptr)
286 {
287         for (;;) {
288                 ir_op *op = get_irn_op(ptr);
289
290                 if (op == op_SymConst && (get_SymConst_kind(ptr) == symconst_addr_ent)) {
291                         ir_entity *ent = get_SymConst_entity(ptr);
292                         if (variability_constant == get_entity_variability(ent))
293                                 return ent;
294                         return NULL;
295                 } else if (op == op_Sel) {
296                         ir_entity *ent = get_Sel_entity(ptr);
297                         ir_type   *tp  = get_entity_owner(ent);
298
299                         /* Do not fiddle with polymorphism. */
300                         if (is_Class_type(get_entity_owner(ent)) &&
301                                 ((get_entity_n_overwrites(ent)    != 0) ||
302                                 (get_entity_n_overwrittenby(ent) != 0)   ) )
303                                 return NULL;
304
305                         if (is_Array_type(tp)) {
306                                 /* check bounds */
307                                 int i, n;
308
309                                 for (i = 0, n = get_Sel_n_indexs(ptr); i < n; ++i) {
310                                         ir_node *bound;
311                                         tarval *tlower, *tupper;
312                                         ir_node *index = get_Sel_index(ptr, i);
313                                         tarval *tv     = computed_value(index);
314
315                                         /* check if the index is constant */
316                                         if (tv == tarval_bad)
317                                                 return NULL;
318
319                                         bound  = get_array_lower_bound(tp, i);
320                                         tlower = computed_value(bound);
321                                         bound  = get_array_upper_bound(tp, i);
322                                         tupper = computed_value(bound);
323
324                                         if (tlower == tarval_bad || tupper == tarval_bad)
325                                                 return NULL;
326
327                                         if (tarval_cmp(tv, tlower) & pn_Cmp_Lt)
328                                                 return NULL;
329                                         if (tarval_cmp(tupper, tv) & pn_Cmp_Lt)
330                                                 return NULL;
331
332                                         /* ok, bounds check finished */
333                                 }
334                         }
335
336                         if (variability_constant == get_entity_variability(ent))
337                                 return ent;
338
339                         /* try next */
340                         ptr = get_Sel_ptr(ptr);
341                 } else
342                         return NULL;
343         }
344 }  /* find_constant_entity */
345
346 /**
347  * Return the Selection index of a Sel node from dimension n
348  */
349 static long get_Sel_array_index_long(ir_node *n, int dim) {
350         ir_node *index = get_Sel_index(n, dim);
351         assert(get_irn_op(index) == op_Const);
352         return get_tarval_long(get_Const_tarval(index));
353 }  /* get_Sel_array_index_long */
354
355 /**
356  * Returns the accessed component graph path for an
357  * node computing an address.
358  *
359  * @param ptr    the node computing the address
360  * @param depth  current depth in steps upward from the root
361  *               of the address
362  */
363 static compound_graph_path *rec_get_accessed_path(ir_node *ptr, int depth) {
364         compound_graph_path *res = NULL;
365         ir_entity           *root, *field;
366         int                 path_len, pos;
367
368         if (get_irn_op(ptr) == op_SymConst) {
369                 /* a SymConst. If the depth is 0, this is an access to a global
370                  * entity and we don't need a component path, else we know
371                  * at least it's length.
372                  */
373                 assert(get_SymConst_kind(ptr) == symconst_addr_ent);
374                 root = get_SymConst_entity(ptr);
375                 res = (depth == 0) ? NULL : new_compound_graph_path(get_entity_type(root), depth);
376         } else {
377                 assert(get_irn_op(ptr) == op_Sel);
378                 /* it's a Sel, go up until we find the root */
379                 res = rec_get_accessed_path(get_Sel_ptr(ptr), depth+1);
380
381                 /* fill up the step in the path at the current position */
382                 field    = get_Sel_entity(ptr);
383                 path_len = get_compound_graph_path_length(res);
384                 pos      = path_len - depth - 1;
385                 set_compound_graph_path_node(res, pos, field);
386
387                 if (is_Array_type(get_entity_owner(field))) {
388                         assert(get_Sel_n_indexs(ptr) == 1 && "multi dim arrays not implemented");
389                         set_compound_graph_path_array_index(res, pos, get_Sel_array_index_long(ptr, 0));
390                 }
391         }
392         return res;
393 }  /* rec_get_accessed_path */
394
395 /** Returns an access path or NULL.  The access path is only
396  *  valid, if the graph is in phase_high and _no_ address computation is used.
397  */
398 static compound_graph_path *get_accessed_path(ir_node *ptr) {
399         return rec_get_accessed_path(ptr, 0);
400 }  /* get_accessed_path */
401
402 /* forward */
403 static void reduce_adr_usage(ir_node *ptr);
404
405 /**
406  * Update a Load that may lost it's usage.
407  */
408 static void handle_load_update(ir_node *load) {
409         ldst_info_t *info = get_irn_link(load);
410
411         /* do NOT touch volatile loads for now */
412         if (get_Load_volatility(load) == volatility_is_volatile)
413                 return;
414
415         if (! info->projs[pn_Load_res] && ! info->projs[pn_Load_X_except]) {
416                 ir_node *ptr = get_Load_ptr(load);
417                 ir_node *mem = get_Load_mem(load);
418
419                 /* a Load which value is neither used nor exception checked, remove it */
420                 exchange(info->projs[pn_Load_M], mem);
421                 exchange(load, new_Bad());
422                 reduce_adr_usage(ptr);
423         }
424 }  /* handle_load_update */
425
426 /**
427  * A Use of an address node is vanished. Check if this was a Proj
428  * node and update the counters.
429  */
430 static void reduce_adr_usage(ir_node *ptr) {
431         if (is_Proj(ptr)) {
432                 if (get_irn_n_edges(ptr) <= 0) {
433                         /* this Proj is dead now */
434                         ir_node *pred = get_Proj_pred(ptr);
435
436                         if (is_Load(pred)) {
437                                 ldst_info_t *info = get_irn_link(pred);
438                                 info->projs[get_Proj_proj(ptr)] = NULL;
439
440                                 /* this node lost it's result proj, handle that */
441                                 handle_load_update(pred);
442                         }
443                 }
444         }
445 }  /* reduce_adr_usage */
446
447 /**
448  * Check, if an already existing value of mode old_mode can be converted
449  * into the needed one new_mode without loss.
450  */
451 static int can_use_stored_value(ir_mode *old_mode, ir_mode *new_mode) {
452         if (old_mode == new_mode)
453                 return 1;
454
455         /* if both modes are two-complement ones, we can always convert the
456            Stored value into the needed one. */
457         if (get_mode_size_bits(old_mode) >= get_mode_size_bits(new_mode) &&
458                   get_mode_arithmetic(old_mode) == irma_twos_complement &&
459                   get_mode_arithmetic(new_mode) == irma_twos_complement)
460                 return 1;
461         return 0;
462 }  /* can_use_stored_value */
463
464 /**
465  * Follow the memory chain as long as there are only Loads
466  * and alias free Stores and try to replace current Load or Store
467  * by a previous ones.
468  * Note that in unreachable loops it might happen that we reach
469  * load again, as well as we can fall into a cycle.
470  * We break such cycles using a special visited flag.
471  *
472  * INC_MASTER() must be called before dive into
473  */
474 static unsigned follow_Mem_chain(ir_node *load, ir_node *curr) {
475         unsigned res = 0;
476         ldst_info_t *info = get_irn_link(load);
477         ir_node *pred;
478         ir_node *ptr       = get_Load_ptr(load);
479         ir_node *mem       = get_Load_mem(load);
480         ir_mode *load_mode = get_Load_mode(load);
481
482         for (pred = curr; load != pred; ) {
483                 ldst_info_t *pred_info = get_irn_link(pred);
484
485                 /*
486                  * BEWARE: one might think that checking the modes is useless, because
487                  * if the pointers are identical, they refer to the same object.
488                  * This is only true in strong typed languages, not in C were the following
489                  * is possible a = *(ir_type1 *)p; b = *(ir_type2 *)p ...
490                  */
491                 if (get_irn_op(pred) == op_Store && get_Store_ptr(pred) == ptr &&
492                     can_use_stored_value(get_irn_mode(get_Store_value(pred)), load_mode)) {
493                         /*
494                          * a Load immediately after a Store -- a read after write.
495                          * We may remove the Load, if both Load & Store does not have an exception handler
496                          * OR they are in the same block. In the latter case the Load cannot
497                          * throw an exception when the previous Store was quiet.
498                          *
499                          * Why we need to check for Store Exception? If the Store cannot
500                          * be executed (ROM) the exception handler might simply jump into
501                          * the load block :-(
502                          * We could make it a little bit better if we would know that the exception
503                          * handler of the Store jumps directly to the end...
504                          */
505                         if ((pred_info->projs[pn_Store_X_except] == NULL && info->projs[pn_Load_X_except] == NULL) ||
506                             get_nodes_block(load) == get_nodes_block(pred)) {
507                                 ir_node *value = get_Store_value(pred);
508
509                                 DBG_OPT_RAW(load, value);
510
511                                 /* add an convert if needed */
512                                 if (get_irn_mode(get_Store_value(pred)) != load_mode) {
513                                         value = new_r_Conv(current_ir_graph, get_nodes_block(load), value, load_mode);
514                                 }
515
516                                 if (info->projs[pn_Load_M])
517                                         exchange(info->projs[pn_Load_M], mem);
518
519                                 /* no exception */
520                                 if (info->projs[pn_Load_X_except]) {
521                                         exchange( info->projs[pn_Load_X_except], new_Bad());
522                                         res |= CF_CHANGED;
523                                 }
524
525                                 if (info->projs[pn_Load_res])
526                                         exchange(info->projs[pn_Load_res], value);
527
528                                 exchange(load, new_Bad());
529                                 reduce_adr_usage(ptr);
530                                 return res | DF_CHANGED;
531                         }
532                 } else if (get_irn_op(pred) == op_Load && get_Load_ptr(pred) == ptr &&
533                            can_use_stored_value(get_Load_mode(pred), load_mode)) {
534                         /*
535                          * a Load after a Load -- a read after read.
536                          * We may remove the second Load, if it does not have an exception handler
537                          * OR they are in the same block. In the later case the Load cannot
538                          * throw an exception when the previous Load was quiet.
539                          *
540                          * Here, there is no need to check if the previous Load has an exception
541                          * hander because they would have exact the same exception...
542                          */
543                         if (info->projs[pn_Load_X_except] == NULL || get_nodes_block(load) == get_nodes_block(pred)) {
544                                 ir_node *value;
545
546                                 DBG_OPT_RAR(load, pred);
547
548                                 /* the result is used */
549                                 if (info->projs[pn_Load_res]) {
550                                         if (pred_info->projs[pn_Load_res] == NULL) {
551                                                 /* create a new Proj again */
552                                                 pred_info->projs[pn_Load_res] = new_r_Proj(current_ir_graph, get_nodes_block(pred), pred, get_Load_mode(pred), pn_Load_res);
553                                         }
554                                         value = pred_info->projs[pn_Load_res];
555
556                                         /* add an convert if needed */
557                                         if (get_Load_mode(pred) != load_mode) {
558                                                 value = new_r_Conv(current_ir_graph, get_nodes_block(load), value, load_mode);
559                                         }
560
561                                         exchange(info->projs[pn_Load_res], value);
562                                 }
563
564                                 if (info->projs[pn_Load_M])
565                                         exchange(info->projs[pn_Load_M], mem);
566
567                                 /* no exception */
568                                 if (info->projs[pn_Load_X_except]) {
569                                         exchange(info->projs[pn_Load_X_except], new_Bad());
570                                         res |= CF_CHANGED;
571                                 }
572
573                                 exchange(load, new_Bad());
574                                 reduce_adr_usage(ptr);
575                                 return res |= DF_CHANGED;
576                         }
577                 }
578
579                 if (get_irn_op(pred) == op_Store) {
580                         /* check if we can pass through this store */
581                         ir_alias_relation rel = get_alias_relation(
582                                 current_ir_graph,
583                                 get_Store_ptr(pred),
584                                 get_irn_mode(get_Store_value(pred)),
585                                 ptr, load_mode);
586                         /* if the might be an alias, we cannot pass this Store */
587                         if (rel != no_alias)
588                                 break;
589                         pred = skip_Proj(get_Store_mem(pred));
590                 } else if (get_irn_op(pred) == op_Load) {
591                         pred = skip_Proj(get_Load_mem(pred));
592                 } else {
593                         /* follow only Load chains */
594                         break;
595                 }
596
597                 /* check for cycles */
598                 if (NODE_VISITED(pred_info))
599                         break;
600                 MARK_NODE(pred_info);
601         }
602
603         if (get_irn_op(pred) == op_Sync) {
604                 int i;
605
606                 /* handle all Sync predecessors */
607                 for (i = get_Sync_n_preds(pred) - 1; i >= 0; --i) {
608                         res |= follow_Mem_chain(load, skip_Proj(get_Sync_pred(pred, i)));
609                         if (res)
610                                 break;
611                 }
612         }
613
614         return res;
615 }  /* follow_Mem_chain */
616
617 /**
618  * optimize a Load
619  *
620  * @param load  the Load node
621  */
622 static unsigned optimize_load(ir_node *load)
623 {
624         ldst_info_t *info = get_irn_link(load);
625         ir_node *mem, *ptr, *new_node;
626         ir_entity *ent;
627         unsigned res = 0;
628
629         /* do NOT touch volatile loads for now */
630         if (get_Load_volatility(load) == volatility_is_volatile)
631                 return 0;
632
633         /* the address of the load to be optimized */
634         ptr = get_Load_ptr(load);
635
636         /*
637          * Check if we can remove the exception from a Load:
638          * This can be done, if the address is from an Sel(Alloc) and
639          * the Sel type is a subtype of the allocated type.
640          *
641          * This optimizes some often used OO constructs,
642          * like x = new O; x->t;
643          */
644         if (info->projs[pn_Load_X_except]) {
645                 if (is_Sel(ptr)) {
646                         ir_node *mem = get_Sel_mem(ptr);
647
648                         /* FIXME: works with the current FE, but better use the base */
649                         if (get_irn_op(skip_Proj(mem)) == op_Alloc) {
650                                 /* ok, check the types */
651                                 ir_entity *ent    = get_Sel_entity(ptr);
652                                 ir_type   *s_type = get_entity_type(ent);
653                                 ir_type   *a_type = get_Alloc_type(mem);
654
655                                 if (is_SubClass_of(s_type, a_type)) {
656                                         /* ok, condition met: there can't be an exception because
657                                         * Alloc guarantees that enough memory was allocated */
658
659                                         exchange(info->projs[pn_Load_X_except], new_Bad());
660                                         info->projs[pn_Load_X_except] = NULL;
661                                         res |= CF_CHANGED;
662                                 }
663                         }
664                 } else if ((get_irn_op(skip_Proj(ptr)) == op_Alloc) ||
665                         ((get_irn_op(ptr) == op_Cast) && (get_irn_op(skip_Proj(get_Cast_op(ptr))) == op_Alloc))) {
666                                 /* simple case: a direct load after an Alloc. Firm Alloc throw
667                                  * an exception in case of out-of-memory. So, there is no way for an
668                                  * exception in this load.
669                                  * This code is constructed by the "exception lowering" in the Jack compiler.
670                                  */
671                                 exchange(info->projs[pn_Load_X_except], new_Bad());
672                                 info->projs[pn_Load_X_except] = NULL;
673                                 res |= CF_CHANGED;
674                 }
675         }
676
677         /* The mem of the Load. Must still be returned after optimization. */
678         mem  = get_Load_mem(load);
679
680         if (! info->projs[pn_Load_res] && ! info->projs[pn_Load_X_except]) {
681                 /* a Load which value is neither used nor exception checked, remove it */
682                 exchange(info->projs[pn_Load_M], mem);
683
684                 exchange(load, new_Bad());
685                 reduce_adr_usage(ptr);
686                 return res | DF_CHANGED;
687         }
688
689         /* Load from a constant polymorphic field, where we can resolve
690            polymorphism. */
691         new_node = transform_node_Load(load);
692         if (new_node != load) {
693                 if (info->projs[pn_Load_M]) {
694                         exchange(info->projs[pn_Load_M], mem);
695                         info->projs[pn_Load_M] = NULL;
696                 }
697                 if (info->projs[pn_Load_X_except]) {
698                         exchange(info->projs[pn_Load_X_except], new_Bad());
699                         info->projs[pn_Load_X_except] = NULL;
700                 }
701                 if (info->projs[pn_Load_res])
702                         exchange(info->projs[pn_Load_res], new_node);
703
704                 exchange(load, new_Bad());
705                 reduce_adr_usage(ptr);
706                 return res | DF_CHANGED;
707         }
708
709         /* check if we can determine the entity that will be loaded */
710         ent = find_constant_entity(ptr);
711         if (ent) {
712                 if ((allocation_static == get_entity_allocation(ent)) &&
713                         (visibility_external_allocated != get_entity_visibility(ent))) {
714                         /* a static allocation that is not external: there should be NO exception
715                          * when loading. */
716
717                         /* no exception, clear the info field as it might be checked later again */
718                         if (info->projs[pn_Load_X_except]) {
719                                 exchange(info->projs[pn_Load_X_except], new_Bad());
720                                 info->projs[pn_Load_X_except] = NULL;
721                                 res |= CF_CHANGED;
722                         }
723
724                         if (variability_constant == get_entity_variability(ent)
725                                 && is_atomic_entity(ent)) {
726                                 /* Might not be atomic after
727                                    lowering of Sels.  In this
728                                    case we could also load, but
729                                    it's more complicated. */
730                                 /* more simpler case: we load the content of a constant value:
731                                  * replace it by the constant itself
732                                  */
733
734                                 /* no memory */
735                                 if (info->projs[pn_Load_M]) {
736                                         exchange(info->projs[pn_Load_M], mem);
737                                         res |= DF_CHANGED;
738                                 }
739                                 /* no result :-) */
740                                 if (info->projs[pn_Load_res]) {
741                                         if (is_atomic_entity(ent)) {
742                                                 ir_node *c = copy_const_value(get_irn_dbg_info(load), get_atomic_ent_value(ent));
743
744                                                 DBG_OPT_RC(load, c);
745                                                 exchange(info->projs[pn_Load_res], c);
746                                                 res |= DF_CHANGED;
747                                         }
748                                 }
749                                 exchange(load, new_Bad());
750                                 reduce_adr_usage(ptr);
751                                 return res;
752                         } else if (variability_constant == get_entity_variability(ent)) {
753                                 compound_graph_path *path = get_accessed_path(ptr);
754
755                                 if (path) {
756                                         ir_node *c;
757
758                                         assert(is_proper_compound_graph_path(path, get_compound_graph_path_length(path)-1));
759                                         /*
760                                         {
761                                                 int j;
762                                                 for (j = 0; j < get_compound_graph_path_length(path); ++j) {
763                                                         ir_entity *node = get_compound_graph_path_node(path, j);
764                                                         fprintf(stdout, ".%s", get_entity_name(node));
765                                                         if (is_Array_type(get_entity_owner(node)))
766                                                                 fprintf(stdout, "[%d]", get_compound_graph_path_array_index(path, j));
767                                                 }
768                                                 printf("\n");
769                                         }
770                                         */
771
772                                         c = get_compound_ent_value_by_path(ent, path);
773                                         free_compound_graph_path(path);
774
775                                         /* printf("  cons: "); DDMN(c); */
776
777                                         if (info->projs[pn_Load_M]) {
778                                                 exchange(info->projs[pn_Load_M], mem);
779                                                 res |= DF_CHANGED;
780                                         }
781                                         if (info->projs[pn_Load_res]) {
782                                                 exchange(info->projs[pn_Load_res], copy_const_value(get_irn_dbg_info(load), c));
783                                                 res |= DF_CHANGED;
784                                         }
785                                         exchange(load, new_Bad());
786                                         reduce_adr_usage(ptr);
787                                         return res;
788                                 } else {
789                                         /*  We can not determine a correct access path.  E.g., in jack, we load
790                                         a byte from an object to generate an exception.   Happens in test program
791                                         Reflectiontest.
792                                         printf(">>>>>>>>>>>>> Found access to constant entity %s in function %s\n", get_entity_name(ent),
793                                         get_entity_name(get_irg_entity(current_ir_graph)));
794                                         printf("  load: "); DDMN(load);
795                                         printf("  ptr:  "); DDMN(ptr);
796                                         */
797                                 }
798                         }
799                 }
800         }
801
802         /* Check, if the address of this load is used more than once.
803          * If not, this load cannot be removed in any case. */
804         if (get_irn_n_uses(ptr) <= 1)
805                 return res;
806
807         /*
808          * follow the memory chain as long as there are only Loads
809          * and try to replace current Load or Store by a previous one.
810          * Note that in unreachable loops it might happen that we reach
811          * load again, as well as we can fall into a cycle.
812          * We break such cycles using a special visited flag.
813          */
814         INC_MASTER();
815         res = follow_Mem_chain(load, skip_Proj(mem));
816         return res;
817 }  /* optimize_load */
818
819 /**
820  * Check whether a value of mode new_mode would completely overwrite a value
821  * of mode old_mode in memory.
822  */
823 static int is_completely_overwritten(ir_mode *old_mode, ir_mode *new_mode)
824 {
825         return get_mode_size_bits(new_mode) >= get_mode_size_bits(old_mode);
826 }  /* is_completely_overwritten */
827
828 /**
829  * follow the memory chain as long as there are only Loads and alias free Stores.
830  *
831  * INC_MASTER() must be called before dive into
832  */
833 static unsigned follow_Mem_chain_for_Store(ir_node *store, ir_node *curr) {
834         unsigned res = 0;
835         ldst_info_t *info = get_irn_link(store);
836         ir_node *pred;
837         ir_node *ptr = get_Store_ptr(store);
838         ir_node *mem = get_Store_mem(store);
839         ir_node *value = get_Store_value(store);
840         ir_mode *mode  = get_irn_mode(value);
841         ir_node *block = get_nodes_block(store);
842
843         for (pred = curr; pred != store;) {
844                 ldst_info_t *pred_info = get_irn_link(pred);
845
846                 /*
847                  * BEWARE: one might think that checking the modes is useless, because
848                  * if the pointers are identical, they refer to the same object.
849                  * This is only true in strong typed languages, not is C were the following
850                  * is possible *(ir_type1 *)p = a; *(ir_type2 *)p = b ...
851                  * However, if the mode that is written have a bigger  or equal size the the old
852                  * one, the old value is completely overwritten and can be killed ...
853                  */
854                 if (get_irn_op(pred) == op_Store && get_Store_ptr(pred) == ptr &&
855                     get_nodes_block(pred) == block &&
856                     is_completely_overwritten(get_irn_mode(get_Store_value(pred)), mode)) {
857                         /*
858                          * a Store after a Store in the same block -- a write after write.
859                          * We may remove the first Store, if it does not have an exception handler.
860                          *
861                          * TODO: What, if both have the same exception handler ???
862                          */
863                         if (get_Store_volatility(pred) != volatility_is_volatile && !pred_info->projs[pn_Store_X_except]) {
864                                 DBG_OPT_WAW(pred, store);
865                                 exchange( pred_info->projs[pn_Store_M], get_Store_mem(pred) );
866                                 exchange(pred, new_Bad());
867                                 reduce_adr_usage(ptr);
868                                 return DF_CHANGED;
869                         }
870                 } else if (get_irn_op(pred) == op_Load && get_Load_ptr(pred) == ptr &&
871                            value == pred_info->projs[pn_Load_res]) {
872                         /*
873                          * a Store of a value after a Load -- a write after read.
874                          * We may remove the second Store, if it does not have an exception handler.
875                          */
876                         if (! info->projs[pn_Store_X_except]) {
877                                 DBG_OPT_WAR(store, pred);
878                                 exchange( info->projs[pn_Store_M], mem );
879                                 exchange(store, new_Bad());
880                                 reduce_adr_usage(ptr);
881                                 return DF_CHANGED;
882                         }
883                 }
884
885                 if (get_irn_op(pred) == op_Store) {
886                         /* check if we can pass thru this store */
887                         ir_alias_relation rel = get_alias_relation(
888                                 current_ir_graph,
889                                 get_Store_ptr(pred),
890                                 get_irn_mode(get_Store_value(pred)),
891                                 ptr, mode);
892                         /* if the might be an alias, we cannot pass this Store */
893                         if (rel != no_alias)
894                                 break;
895                         pred = skip_Proj(get_Store_mem(pred));
896                 } else if (get_irn_op(pred) == op_Load) {
897                         pred = skip_Proj(get_Load_mem(pred));
898                 } else {
899                         /* follow only Load chains */
900                         break;
901                 }
902
903                 /* check for cycles */
904                 if (NODE_VISITED(pred_info))
905                         break;
906                 MARK_NODE(pred_info);
907         }
908
909         if (get_irn_op(pred) == op_Sync) {
910                 int i;
911
912                 /* handle all Sync predecessors */
913                 for (i = get_Sync_n_preds(pred) - 1; i >= 0; --i) {
914                         res |= follow_Mem_chain_for_Store(store, skip_Proj(get_Sync_pred(pred, i)));
915                         if (res)
916                                 break;
917                 }
918         }
919         return res;
920 }  /* follow_Mem_chain_for_Store */
921
922 /**
923  * optimize a Store
924  *
925  * @param store  the Store node
926  */
927 static unsigned optimize_store(ir_node *store) {
928         ir_node *ptr, *mem;
929
930         if (get_Store_volatility(store) == volatility_is_volatile)
931                 return 0;
932
933         ptr = get_Store_ptr(store);
934
935         /* Check, if the address of this Store is used more than once.
936          * If not, this Store cannot be removed in any case. */
937         if (get_irn_n_uses(ptr) <= 1)
938                 return 0;
939
940         mem = get_Store_mem(store);
941
942         /* follow the memory chain as long as there are only Loads */
943         INC_MASTER();
944         return follow_Mem_chain_for_Store(store, skip_Proj(mem));
945 }  /* optimize_store */
946
947 /**
948  * walker, optimizes Phi after Stores to identical places:
949  * Does the following optimization:
950  * @verbatim
951  *
952  *   val1   val2   val3          val1  val2  val3
953  *    |      |      |               \    |    /
954  *  Store  Store  Store              \   |   /
955  *      \    |    /                   PhiData
956  *       \   |   /                       |
957  *        \  |  /                      Store
958  *          PhiM
959  *
960  * @endverbatim
961  * This reduces the number of stores and allows for predicated execution.
962  * Moves Stores back to the end of a function which may be bad.
963  *
964  * This is only possible if the predecessor blocks have only one successor.
965  */
966 static unsigned optimize_phi(ir_node *phi, walk_env_t *wenv)
967 {
968         int i, n;
969         ir_node *store, *old_store, *ptr, *block, *phi_block, *phiM, *phiD, *exc, *projM;
970         ir_mode *mode;
971         ir_node **inM, **inD, **stores;
972         int *idx;
973         dbg_info *db = NULL;
974         ldst_info_t *info;
975         block_info_t *bl_info;
976         unsigned res = 0;
977
978         /* Must be a memory Phi */
979         if (get_irn_mode(phi) != mode_M)
980                 return 0;
981
982         n = get_Phi_n_preds(phi);
983         if (n <= 0)
984                 return 0;
985
986         store = skip_Proj(get_Phi_pred(phi, 0));
987         old_store = store;
988         if (get_irn_op(store) != op_Store)
989                 return 0;
990
991         block = get_nodes_block(store);
992
993         /* abort on dead blocks */
994         if (is_Block_dead(block))
995                 return 0;
996
997         /* check if the block is post dominated by Phi-block
998            and has no exception exit */
999         bl_info = get_irn_link(block);
1000         if (bl_info->flags & BLOCK_HAS_EXC)
1001                 return 0;
1002
1003         phi_block = get_nodes_block(phi);
1004         if (! block_postdominates(phi_block, block))
1005                 return 0;
1006
1007         /* this is the address of the store */
1008         ptr  = get_Store_ptr(store);
1009         mode = get_irn_mode(get_Store_value(store));
1010         info = get_irn_link(store);
1011         exc  = info->exc_block;
1012
1013         for (i = 1; i < n; ++i) {
1014                 ir_node *pred = skip_Proj(get_Phi_pred(phi, i));
1015
1016                 if (get_irn_op(pred) != op_Store)
1017                         return 0;
1018
1019                 if (ptr != get_Store_ptr(pred) || mode != get_irn_mode(get_Store_value(pred)))
1020                         return 0;
1021
1022                 info = get_irn_link(pred);
1023
1024                 /* check, if all stores have the same exception flow */
1025                 if (exc != info->exc_block)
1026                         return 0;
1027
1028                 /* abort on dead blocks */
1029                 block = get_nodes_block(pred);
1030                 if (is_Block_dead(block))
1031                         return 0;
1032
1033                 /* check if the block is post dominated by Phi-block
1034                    and has no exception exit. Note that block must be different from
1035                    Phi-block, else we would move a Store from end End of a block to its
1036                    Start... */
1037                 bl_info = get_irn_link(block);
1038                 if (bl_info->flags & BLOCK_HAS_EXC)
1039                         return 0;
1040                 if (block == phi_block || ! block_postdominates(phi_block, block))
1041                         return 0;
1042         }
1043
1044         /*
1045          * ok, when we are here, we found all predecessors of a Phi that
1046          * are Stores to the same address and size. That means whatever
1047          * we do before we enter the block of the Phi, we do a Store.
1048          * So, we can move the Store to the current block:
1049          *
1050          *   val1    val2    val3          val1  val2  val3
1051          *    |       |       |               \    |    /
1052          * | Str | | Str | | Str |             \   |   /
1053          *      \     |     /                   PhiData
1054          *       \    |    /                       |
1055          *        \   |   /                       Str
1056          *           PhiM
1057          *
1058          * Is only allowed if the predecessor blocks have only one successor.
1059          */
1060
1061         NEW_ARR_A(ir_node *, stores, n);
1062         NEW_ARR_A(ir_node *, inM, n);
1063         NEW_ARR_A(ir_node *, inD, n);
1064         NEW_ARR_A(int, idx, n);
1065
1066         /* Prepare: Collect all Store nodes.  We must do this
1067            first because we otherwise may loose a store when exchanging its
1068            memory Proj.
1069          */
1070         for (i = 0; i < n; ++i)
1071                 stores[i] = skip_Proj(get_Phi_pred(phi, i));
1072
1073         /* Prepare: Skip the memory Proj: we need this in the case some stores
1074            are cascaded.
1075            Beware: One Store might be included more than once in the stores[]
1076            list, so we must prevent to do the exchange more than once.
1077          */
1078         for (i = 0; i < n; ++i) {
1079                 ir_node *store = stores[i];
1080                 ir_node *proj_m;
1081
1082                 info = get_irn_link(store);
1083                 proj_m = info->projs[pn_Store_M];
1084
1085                 if (is_Proj(proj_m) && get_Proj_pred(proj_m) == store)
1086                         exchange(proj_m, get_Store_mem(store));
1087         }
1088
1089         /* first step: collect all inputs */
1090         for (i = 0; i < n; ++i) {
1091                 ir_node *store = stores[i];
1092                 info = get_irn_link(store);
1093
1094                 inM[i] = get_Store_mem(store);
1095                 inD[i] = get_Store_value(store);
1096                 idx[i] = info->exc_idx;
1097         }
1098         block = get_nodes_block(phi);
1099
1100         /* second step: create a new memory Phi */
1101         phiM = new_rd_Phi(get_irn_dbg_info(phi), current_ir_graph, block, n, inM, mode_M);
1102
1103         /* third step: create a new data Phi */
1104         phiD = new_rd_Phi(get_irn_dbg_info(phi), current_ir_graph, block, n, inD, mode);
1105
1106         /* fourth step: create the Store */
1107         store = new_rd_Store(db, current_ir_graph, block, phiM, ptr, phiD);
1108 #ifdef DO_CACHEOPT
1109         co_set_irn_name(store, co_get_irn_ident(old_store));
1110 #endif
1111
1112         projM = new_rd_Proj(NULL, current_ir_graph, block, store, mode_M, pn_Store_M);
1113
1114         info = get_ldst_info(store, wenv);
1115         info->projs[pn_Store_M] = projM;
1116
1117         /* fifths step: repair exception flow */
1118         if (exc) {
1119                 ir_node *projX = new_rd_Proj(NULL, current_ir_graph, block, store, mode_X, pn_Store_X_except);
1120
1121                 info->projs[pn_Store_X_except] = projX;
1122                 info->exc_block                = exc;
1123                 info->exc_idx                  = idx[0];
1124
1125                 for (i = 0; i < n; ++i) {
1126                         set_Block_cfgpred(exc, idx[i], projX);
1127                 }
1128
1129                 if (n > 1) {
1130                         /* the exception block should be optimized as some inputs are identical now */
1131                 }
1132
1133                 res |= CF_CHANGED;
1134         }
1135
1136         /* sixth step: replace old Phi */
1137         exchange(phi, projM);
1138
1139         return res | DF_CHANGED;
1140 }  /* optimize_phi */
1141
1142 /**
1143  * walker, do the optimizations
1144  */
1145 static void do_load_store_optimize(ir_node *n, void *env) {
1146         walk_env_t *wenv = env;
1147
1148         switch (get_irn_opcode(n)) {
1149
1150         case iro_Load:
1151                 wenv->changes |= optimize_load(n);
1152                 break;
1153
1154         case iro_Store:
1155                 wenv->changes |= optimize_store(n);
1156                 break;
1157
1158         case iro_Phi:
1159                 wenv->changes |= optimize_phi(n, wenv);
1160
1161         default:
1162                 ;
1163         }
1164 }  /* do_load_store_optimize */
1165
1166 /*
1167  * do the load store optimization
1168  */
1169 void optimize_load_store(ir_graph *irg) {
1170         walk_env_t env;
1171
1172         assert(get_irg_phase_state(irg) != phase_building);
1173         assert(get_irg_pinned(irg) != op_pin_state_floats &&
1174                 "LoadStore optimization needs pinned graph");
1175
1176         if (! get_opt_redundant_loadstore())
1177                 return;
1178
1179         edges_assure(irg);
1180
1181         /* for Phi optimization post-dominators are needed ... */
1182         assure_postdoms(irg);
1183
1184         if (get_opt_alias_analysis()) {
1185                 assure_irg_address_taken_computed(irg);
1186                 assure_irp_globals_address_taken_computed();
1187         }
1188
1189         obstack_init(&env.obst);
1190         env.changes = 0;
1191
1192         /* init the links, then collect Loads/Stores/Proj's in lists */
1193         master_visited = 0;
1194         irg_walk_graph(irg, firm_clear_link, collect_nodes, &env);
1195
1196         /* now we have collected enough information, optimize */
1197         irg_walk_graph(irg, NULL, do_load_store_optimize, &env);
1198
1199         obstack_free(&env.obst, NULL);
1200
1201         /* Handle graph state */
1202         if (env.changes) {
1203                 if (get_irg_outs_state(irg) == outs_consistent)
1204                         set_irg_outs_inconsistent(irg);
1205         }
1206
1207         if (env.changes & CF_CHANGED) {
1208                 /* is this really needed: Yes, control flow changed, block might
1209                 have Bad() predecessors. */
1210                 set_irg_doms_inconsistent(irg);
1211         }
1212 }  /* optimize_load_store */