bechordal: Remove remnants of the long gone split phase.
[libfirm] / ir / opt / ldstopt.c
1 /*
2  * Copyright (C) 1995-2011 University of Karlsruhe.  All right reserved.
3  *
4  * This file is part of libFirm.
5  *
6  * This file may be distributed and/or modified under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation and appearing in the file LICENSE.GPL included in the
9  * packaging of this file.
10  *
11  * Licensees holding valid libFirm Professional Edition licenses may use
12  * this file in accordance with the libFirm Commercial License.
13  * Agreement provided with the Software.
14  *
15  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17  * PURPOSE.
18  */
19
20 /**
21  * @file
22  * @brief   Load/Store optimizations.
23  * @author  Michael Beck
24  */
25 #include "config.h"
26
27 #include <string.h>
28
29 #include "iroptimize.h"
30 #include "irnode_t.h"
31 #include "irgraph_t.h"
32 #include "irmode_t.h"
33 #include "iropt_t.h"
34 #include "ircons_t.h"
35 #include "irgmod.h"
36 #include "irgwalk.h"
37 #include "irtools.h"
38 #include "tv_t.h"
39 #include "dbginfo_t.h"
40 #include "iropt_dbg.h"
41 #include "irflag_t.h"
42 #include "array_t.h"
43 #include "irhooks.h"
44 #include "iredges.h"
45 #include "irpass.h"
46 #include "irmemory.h"
47 #include "irnodehashmap.h"
48 #include "irgopt.h"
49 #include "set.h"
50 #include "be.h"
51 #include "debug.h"
52
53 /** The debug handle. */
54 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
55
56 #undef IMAX
57 #define IMAX(a,b)   ((a) > (b) ? (a) : (b))
58
59 #define MAX_PROJ    IMAX(IMAX((long)pn_Load_max, (long)pn_Store_max), (long)pn_Call_max)
60
61 enum changes_t {
62         DF_CHANGED = 1,       /**< data flow changed */
63         CF_CHANGED = 2,       /**< control flow changed */
64 };
65
66 /**
67  * walker environment
68  */
69 typedef struct walk_env_t {
70         struct obstack obst;          /**< list of all stores */
71         unsigned changes;             /**< a bitmask of graph changes */
72 } walk_env_t;
73
74 /** A Load/Store info. */
75 typedef struct ldst_info_t {
76         ir_node  *projs[MAX_PROJ+1];  /**< list of Proj's of this node */
77         ir_node  *exc_block;          /**< the exception block if available */
78         int      exc_idx;             /**< predecessor index in the exception block */
79         unsigned visited;             /**< visited counter for breaking loops */
80 } ldst_info_t;
81
82 /**
83  * flags for control flow.
84  */
85 enum block_flags_t {
86         BLOCK_HAS_COND = 1,      /**< Block has conditional control flow */
87         BLOCK_HAS_EXC  = 2       /**< Block has exceptional control flow */
88 };
89
90 /**
91  * a Block info.
92  */
93 typedef struct block_info_t {
94         unsigned flags;               /**< flags for the block */
95 } block_info_t;
96
97 /** the master visited flag for loop detection. */
98 static unsigned master_visited = 0;
99
100 #define INC_MASTER()       ++master_visited
101 #define MARK_NODE(info)    (info)->visited = master_visited
102 #define NODE_VISITED(info) (info)->visited >= master_visited
103
104 /**
105  * get the Load/Store info of a node
106  */
107 static ldst_info_t *get_ldst_info(ir_node *node, struct obstack *obst)
108 {
109         ldst_info_t *info = (ldst_info_t*)get_irn_link(node);
110
111         if (! info) {
112                 info = OALLOCZ(obst, ldst_info_t);
113                 set_irn_link(node, info);
114         }
115         return info;
116 }  /* get_ldst_info */
117
118 /**
119  * get the Block info of a node
120  */
121 static block_info_t *get_block_info(ir_node *node, struct obstack *obst)
122 {
123         block_info_t *info = (block_info_t*)get_irn_link(node);
124
125         if (! info) {
126                 info = OALLOCZ(obst, block_info_t);
127                 set_irn_link(node, info);
128         }
129         return info;
130 }  /* get_block_info */
131
132 /**
133  * update the projection info for a Load/Store
134  */
135 static unsigned update_projs(ldst_info_t *info, ir_node *proj)
136 {
137         long nr = get_Proj_proj(proj);
138
139         assert(0 <= nr && nr <= MAX_PROJ && "Wrong proj from LoadStore");
140
141         if (info->projs[nr]) {
142                 /* there is already one, do CSE */
143                 exchange(proj, info->projs[nr]);
144                 return DF_CHANGED;
145         }
146         else {
147                 info->projs[nr] = proj;
148                 return 0;
149         }
150 }  /* update_projs */
151
152 /**
153  * update the exception block info for a Load/Store node.
154  *
155  * @param info   the load/store info struct
156  * @param block  the exception handler block for this load/store
157  * @param pos    the control flow input of the block
158  */
159 static unsigned update_exc(ldst_info_t *info, ir_node *block, int pos)
160 {
161         assert(info->exc_block == NULL && "more than one exception block found");
162
163         info->exc_block = block;
164         info->exc_idx   = pos;
165         return 0;
166 }  /* update_exc */
167
168 /**
169  * walker, collects all Load/Store/Proj nodes
170  *
171  * walks from Start -> End
172  */
173 static void collect_nodes(ir_node *node, void *env)
174 {
175         walk_env_t  *wenv   = (walk_env_t *)env;
176         unsigned     opcode = get_irn_opcode(node);
177         ir_node     *pred, *blk, *pred_blk;
178         ldst_info_t *ldst_info;
179
180         if (opcode == iro_Proj) {
181                 pred   = get_Proj_pred(node);
182                 opcode = get_irn_opcode(pred);
183
184                 if (opcode == iro_Load || opcode == iro_Store || opcode == iro_Call) {
185                         ldst_info = get_ldst_info(pred, &wenv->obst);
186
187                         wenv->changes |= update_projs(ldst_info, node);
188
189                         /*
190                          * Place the Proj's to the same block as the
191                          * predecessor Load. This is always ok and prevents
192                          * "non-SSA" form after optimizations if the Proj
193                          * is in a wrong block.
194                          */
195                         blk      = get_nodes_block(node);
196                         pred_blk = get_nodes_block(pred);
197                         if (blk != pred_blk) {
198                                 wenv->changes |= DF_CHANGED;
199                                 set_nodes_block(node, pred_blk);
200                         }
201                 }
202         } else if (opcode == iro_Block) {
203                 int i;
204
205                 for (i = get_Block_n_cfgpreds(node) - 1; i >= 0; --i) {
206                         ir_node      *pred_block, *proj;
207                         block_info_t *bl_info;
208                         int          is_exc = 0;
209
210                         pred = proj = get_Block_cfgpred(node, i);
211
212                         if (is_Proj(proj)) {
213                                 pred   = get_Proj_pred(proj);
214                                 is_exc = is_x_except_Proj(proj);
215                         }
216
217                         /* ignore Bad predecessors, they will be removed later */
218                         if (is_Bad(pred))
219                                 continue;
220
221                         pred_block = get_nodes_block(pred);
222                         bl_info    = get_block_info(pred_block, &wenv->obst);
223
224                         if (is_fragile_op(pred) && is_exc)
225                                 bl_info->flags |= BLOCK_HAS_EXC;
226                         else if (is_irn_forking(pred))
227                                 bl_info->flags |= BLOCK_HAS_COND;
228
229                         opcode = get_irn_opcode(pred);
230                         if (is_exc && (opcode == iro_Load || opcode == iro_Store || opcode == iro_Call)) {
231                                 ldst_info = get_ldst_info(pred, &wenv->obst);
232
233                                 wenv->changes |= update_exc(ldst_info, node, i);
234                         }
235                 }
236         }
237 }  /* collect_nodes */
238
239 /**
240  * Returns an entity if the address ptr points to a constant one.
241  *
242  * @param ptr  the address
243  *
244  * @return an entity or NULL
245  */
246 static ir_entity *find_constant_entity(ir_node *ptr)
247 {
248         for (;;) {
249                 if (is_SymConst(ptr) && get_SymConst_kind(ptr) == symconst_addr_ent) {
250                         return get_SymConst_entity(ptr);
251                 } else if (is_Sel(ptr)) {
252                         ir_entity *ent = get_Sel_entity(ptr);
253                         ir_type   *tp  = get_entity_owner(ent);
254
255                         /* Do not fiddle with polymorphism. */
256                         if (is_Class_type(get_entity_owner(ent)) &&
257                                 ((get_entity_n_overwrites(ent)    != 0) ||
258                                 (get_entity_n_overwrittenby(ent) != 0)   ) )
259                                 return NULL;
260
261                         if (is_Array_type(tp)) {
262                                 /* check bounds */
263                                 int i, n;
264
265                                 for (i = 0, n = get_Sel_n_indexs(ptr); i < n; ++i) {
266                                         ir_node   *bound;
267                                         ir_tarval *tlower, *tupper;
268                                         ir_node   *index = get_Sel_index(ptr, i);
269                                         ir_tarval *tv    = computed_value(index);
270
271                                         /* check if the index is constant */
272                                         if (tv == tarval_bad)
273                                                 return NULL;
274
275                                         bound  = get_array_lower_bound(tp, i);
276                                         tlower = computed_value(bound);
277                                         bound  = get_array_upper_bound(tp, i);
278                                         tupper = computed_value(bound);
279
280                                         if (tlower == tarval_bad || tupper == tarval_bad)
281                                                 return NULL;
282
283                                         if (tarval_cmp(tv, tlower) == ir_relation_less)
284                                                 return NULL;
285                                         if (tarval_cmp(tupper, tv) == ir_relation_less)
286                                                 return NULL;
287
288                                         /* ok, bounds check finished */
289                                 }
290                         }
291
292                         if (get_entity_linkage(ent) & IR_LINKAGE_CONSTANT)
293                                 return ent;
294
295                         /* try next */
296                         ptr = get_Sel_ptr(ptr);
297                 } else if (is_Add(ptr)) {
298                         ir_node *l = get_Add_left(ptr);
299                         ir_node *r = get_Add_right(ptr);
300
301                         if (get_irn_mode(l) == get_irn_mode(ptr) && is_Const(r))
302                                 ptr = l;
303                         else if (get_irn_mode(r) == get_irn_mode(ptr) && is_Const(l))
304                                 ptr = r;
305                         else
306                                 return NULL;
307
308                         /* for now, we support only one addition, reassoc should fold all others */
309                         if (! is_SymConst(ptr) && !is_Sel(ptr))
310                                 return NULL;
311                 } else if (is_Sub(ptr)) {
312                         ir_node *l = get_Sub_left(ptr);
313                         ir_node *r = get_Sub_right(ptr);
314
315                         if (get_irn_mode(l) == get_irn_mode(ptr) && is_Const(r))
316                                 ptr = l;
317                         else
318                                 return NULL;
319                         /* for now, we support only one substraction, reassoc should fold all others */
320                         if (! is_SymConst(ptr) && !is_Sel(ptr))
321                                 return NULL;
322                 } else
323                         return NULL;
324         }
325 }  /* find_constant_entity */
326
327 /**
328  * Return the Selection index of a Sel node from dimension n
329  */
330 static long get_Sel_array_index_long(ir_node *n, int dim)
331 {
332         ir_node *index = get_Sel_index(n, dim);
333         assert(is_Const(index));
334         return get_tarval_long(get_Const_tarval(index));
335 }  /* get_Sel_array_index_long */
336
337 typedef struct path_entry {
338         ir_entity         *ent;
339         struct path_entry *next;
340         size_t            index;
341 } path_entry;
342
343 static ir_node *rec_find_compound_ent_value(ir_node *ptr, path_entry *next)
344 {
345         path_entry       entry, *p;
346         ir_entity        *ent, *field;
347         ir_initializer_t *initializer;
348         ir_tarval        *tv;
349         ir_type          *tp;
350         size_t           n;
351
352         entry.next = next;
353         if (is_SymConst(ptr)) {
354                 /* found the root */
355                 ent         = get_SymConst_entity(ptr);
356                 initializer = get_entity_initializer(ent);
357                 for (p = next; p != NULL;) {
358                         if (initializer->kind != IR_INITIALIZER_COMPOUND)
359                                 return NULL;
360                         n  = get_initializer_compound_n_entries(initializer);
361                         tp = get_entity_type(ent);
362
363                         if (is_Array_type(tp)) {
364                                 ent = get_array_element_entity(tp);
365                                 if (ent != p->ent) {
366                                         /* a missing [0] */
367                                         if (0 >= n)
368                                                 return NULL;
369                                         initializer = get_initializer_compound_value(initializer, 0);
370                                         continue;
371                                 }
372                         }
373                         if (p->index >= n)
374                                 return NULL;
375                         initializer = get_initializer_compound_value(initializer, p->index);
376
377                         ent = p->ent;
378                         p   = p->next;
379                 }
380                 tp = get_entity_type(ent);
381                 while (is_Array_type(tp)) {
382                         ent = get_array_element_entity(tp);
383                         tp = get_entity_type(ent);
384                         /* a missing [0] */
385                         n  = get_initializer_compound_n_entries(initializer);
386                         if (0 >= n)
387                                 return NULL;
388                         initializer = get_initializer_compound_value(initializer, 0);
389                 }
390
391                 switch (initializer->kind) {
392                 case IR_INITIALIZER_CONST:
393                         return get_initializer_const_value(initializer);
394                 case IR_INITIALIZER_TARVAL:
395                 case IR_INITIALIZER_NULL:
396                 default:
397                         return NULL;
398                 }
399         } else if (is_Sel(ptr)) {
400                 entry.ent = field = get_Sel_entity(ptr);
401                 tp = get_entity_owner(field);
402                 if (is_Array_type(tp)) {
403                         assert(get_Sel_n_indexs(ptr) == 1 && "multi dim arrays not implemented");
404                         entry.index = get_Sel_array_index_long(ptr, 0) - get_array_lower_bound_int(tp, 0);
405                 } else {
406                         size_t i, n_members = get_compound_n_members(tp);
407                         for (i = 0; i < n_members; ++i) {
408                                 if (get_compound_member(tp, i) == field)
409                                         break;
410                         }
411                         if (i >= n_members) {
412                                 /* not found: should NOT happen */
413                                 return NULL;
414                         }
415                         entry.index = i;
416                 }
417                 return rec_find_compound_ent_value(get_Sel_ptr(ptr), &entry);
418         }  else if (is_Add(ptr)) {
419                 ir_mode  *mode;
420                 unsigned pos;
421
422                 {
423                         ir_node *l = get_Add_left(ptr);
424                         ir_node *r = get_Add_right(ptr);
425                         if (is_Const(r)) {
426                                 ptr = l;
427                                 tv  = get_Const_tarval(r);
428                         } else {
429                                 ptr = r;
430                                 tv  = get_Const_tarval(l);
431                         }
432                 }
433 ptr_arith:
434                 mode = get_tarval_mode(tv);
435
436                 /* ptr must be a Sel or a SymConst, this was checked in find_constant_entity() */
437                 if (is_Sel(ptr)) {
438                         field = get_Sel_entity(ptr);
439                 } else {
440                         field = get_SymConst_entity(ptr);
441                 }
442
443                 /* count needed entries */
444                 pos = 0;
445                 for (ent = field;;) {
446                         tp = get_entity_type(ent);
447                         if (! is_Array_type(tp))
448                                 break;
449                         ent = get_array_element_entity(tp);
450                         ++pos;
451                 }
452                 /* should be at least ONE entry */
453                 if (pos == 0)
454                         return NULL;
455
456                 /* allocate the right number of entries */
457                 NEW_ARR_A(path_entry, p, pos);
458
459                 /* fill them up */
460                 pos = 0;
461                 for (ent = field;;) {
462                         unsigned   size;
463                         ir_tarval *sz, *tv_index, *tlower, *tupper;
464                         long       index;
465                         ir_node   *bound;
466
467                         tp = get_entity_type(ent);
468                         if (! is_Array_type(tp))
469                                 break;
470                         ent = get_array_element_entity(tp);
471                         p[pos].ent  = ent;
472                         p[pos].next = &p[pos + 1];
473
474                         size = get_type_size_bytes(get_entity_type(ent));
475                         sz   = new_tarval_from_long(size, mode);
476
477                         tv_index = tarval_div(tv, sz);
478                         tv       = tarval_mod(tv, sz);
479
480                         if (tv_index == tarval_bad || tv == tarval_bad)
481                                 return NULL;
482
483                         assert(get_array_n_dimensions(tp) == 1 && "multiarrays not implemented");
484                         bound  = get_array_lower_bound(tp, 0);
485                         tlower = computed_value(bound);
486                         bound  = get_array_upper_bound(tp, 0);
487                         tupper = computed_value(bound);
488
489                         if (tlower == tarval_bad || tupper == tarval_bad)
490                                 return NULL;
491
492                         if (tarval_cmp(tv_index, tlower) == ir_relation_less)
493                                 return NULL;
494                         if (tarval_cmp(tupper, tv_index) == ir_relation_less)
495                                 return NULL;
496
497                         /* ok, bounds check finished */
498                         index = get_tarval_long(tv_index);
499                         p[pos].index = index;
500                         ++pos;
501                 }
502                 if (! tarval_is_null(tv)) {
503                         /* hmm, wrong access */
504                         return NULL;
505                 }
506                 p[pos - 1].next = next;
507                 return rec_find_compound_ent_value(ptr, p);
508         } else if (is_Sub(ptr)) {
509                 ir_node *l = get_Sub_left(ptr);
510                 ir_node *r = get_Sub_right(ptr);
511
512                 ptr = l;
513                 tv  = get_Const_tarval(r);
514                 tv  = tarval_neg(tv);
515                 goto ptr_arith;
516         }
517         return NULL;
518 }
519
520 static ir_node *find_compound_ent_value(ir_node *ptr)
521 {
522         return rec_find_compound_ent_value(ptr, NULL);
523 }
524
525 /* forward */
526 static void reduce_adr_usage(ir_node *ptr);
527
528 /**
529  * Update a Load that may have lost its users.
530  */
531 static void handle_load_update(ir_node *load)
532 {
533         ldst_info_t *info = (ldst_info_t*)get_irn_link(load);
534
535         /* do NOT touch volatile loads for now */
536         if (get_Load_volatility(load) == volatility_is_volatile)
537                 return;
538
539         if (! info->projs[pn_Load_res] && ! info->projs[pn_Load_X_except]) {
540                 ir_node *ptr = get_Load_ptr(load);
541                 ir_node *mem = get_Load_mem(load);
542
543                 /* a Load whose value is neither used nor exception checked, remove it */
544                 exchange(info->projs[pn_Load_M], mem);
545                 if (info->projs[pn_Load_X_regular])
546                         exchange(info->projs[pn_Load_X_regular], new_r_Jmp(get_nodes_block(load)));
547                 kill_node(load);
548                 reduce_adr_usage(ptr);
549         }
550 }  /* handle_load_update */
551
552 /**
553  * A use of an address node has vanished. Check if this was a Proj
554  * node and update the counters.
555  */
556 static void reduce_adr_usage(ir_node *ptr)
557 {
558         ir_node *pred;
559         if (!is_Proj(ptr))
560                 return;
561         if (get_irn_n_edges(ptr) > 0)
562                 return;
563
564         /* this Proj is dead now */
565         pred = get_Proj_pred(ptr);
566         if (is_Load(pred)) {
567                 ldst_info_t *info = (ldst_info_t*)get_irn_link(pred);
568                 info->projs[get_Proj_proj(ptr)] = NULL;
569
570                 /* this node lost its result proj, handle that */
571                 handle_load_update(pred);
572         }
573 }  /* reduce_adr_usage */
574
575 /**
576  * Check, if an already existing value of mode old_mode can be converted
577  * into the needed one new_mode without loss.
578  */
579 static int can_use_stored_value(ir_mode *old_mode, ir_mode *new_mode)
580 {
581         unsigned old_size;
582         unsigned new_size;
583         if (old_mode == new_mode)
584                 return true;
585
586         old_size = get_mode_size_bits(old_mode);
587         new_size = get_mode_size_bits(new_mode);
588
589         /* if both modes are two-complement ones, we can always convert the
590            Stored value into the needed one. (on big endian machines we currently
591            only support this for modes of same size) */
592         if (old_size >= new_size &&
593                   get_mode_arithmetic(old_mode) == irma_twos_complement &&
594                   get_mode_arithmetic(new_mode) == irma_twos_complement &&
595                   (!be_get_backend_param()->byte_order_big_endian
596                 || old_size == new_size)) {
597                 return true;
598         }
599         return false;
600 }
601
602 /**
603  * Check whether a Call is at least pure, i.e. does only read memory.
604  */
605 static unsigned is_Call_pure(ir_node *call)
606 {
607         ir_type *call_tp = get_Call_type(call);
608         unsigned prop = get_method_additional_properties(call_tp);
609
610         /* check first the call type */
611         if ((prop & (mtp_property_const|mtp_property_pure)) == 0) {
612                 /* try the called entity */
613                 ir_node *ptr = get_Call_ptr(call);
614
615                 if (is_SymConst_addr_ent(ptr)) {
616                         ir_entity *ent = get_SymConst_entity(ptr);
617
618                         prop = get_entity_additional_properties(ent);
619                 }
620         }
621         return (prop & (mtp_property_const|mtp_property_pure)) != 0;
622 }  /* is_Call_pure */
623
624 static ir_node *get_base_and_offset(ir_node *ptr, long *pOffset)
625 {
626         ir_mode *mode  = get_irn_mode(ptr);
627         long    offset = 0;
628
629         /* TODO: long might not be enough, we should probably use some tarval thingy... */
630         for (;;) {
631                 if (is_Add(ptr)) {
632                         ir_node *l = get_Add_left(ptr);
633                         ir_node *r = get_Add_right(ptr);
634
635                         if (get_irn_mode(l) != mode || !is_Const(r))
636                                 break;
637
638                         offset += get_tarval_long(get_Const_tarval(r));
639                         ptr     = l;
640                 } else if (is_Sub(ptr)) {
641                         ir_node *l = get_Sub_left(ptr);
642                         ir_node *r = get_Sub_right(ptr);
643
644                         if (get_irn_mode(l) != mode || !is_Const(r))
645                                 break;
646
647                         offset -= get_tarval_long(get_Const_tarval(r));
648                         ptr     = l;
649                 } else if (is_Sel(ptr)) {
650                         ir_entity *ent = get_Sel_entity(ptr);
651                         ir_type   *tp  = get_entity_owner(ent);
652
653                         if (is_Array_type(tp)) {
654                                 int     size;
655                                 ir_node *index;
656
657                                 /* only one dimensional arrays yet */
658                                 if (get_Sel_n_indexs(ptr) != 1)
659                                         break;
660                                 index = get_Sel_index(ptr, 0);
661                                 if (! is_Const(index))
662                                         break;
663
664                                 tp = get_entity_type(ent);
665                                 if (get_type_state(tp) != layout_fixed)
666                                         break;
667
668                                 size    = get_type_size_bytes(tp);
669                                 offset += size * get_tarval_long(get_Const_tarval(index));
670                         } else {
671                                 if (get_type_state(tp) != layout_fixed)
672                                         break;
673                                 offset += get_entity_offset(ent);
674                         }
675                         ptr = get_Sel_ptr(ptr);
676                 } else
677                         break;
678         }
679
680         *pOffset = offset;
681         return ptr;
682 }
683
684 static int try_load_after_store(ir_node *load,
685                 ir_node *load_base_ptr, long load_offset, ir_node *store)
686 {
687         ldst_info_t *info;
688         ir_node *store_ptr      = get_Store_ptr(store);
689         long     store_offset;
690         ir_node *store_base_ptr = get_base_and_offset(store_ptr, &store_offset);
691         ir_node *store_value;
692         ir_mode *store_mode;
693         ir_node *load_ptr;
694         ir_mode *load_mode;
695         long     load_mode_len;
696         long     store_mode_len;
697         long     delta;
698         int      res;
699
700         if (load_base_ptr != store_base_ptr)
701                 return 0;
702
703         load_mode      = get_Load_mode(load);
704         load_mode_len  = get_mode_size_bytes(load_mode);
705         store_mode     = get_irn_mode(get_Store_value(store));
706         store_mode_len = get_mode_size_bytes(store_mode);
707         delta          = load_offset - store_offset;
708         store_value    = get_Store_value(store);
709
710         if (delta != 0 || store_mode != load_mode) {
711                 /* TODO: implement for big-endian */
712                 if (delta < 0 || delta + load_mode_len > store_mode_len
713                                 || (be_get_backend_param()->byte_order_big_endian
714                                     && load_mode_len != store_mode_len))
715                         return 0;
716
717                 if (get_mode_arithmetic(store_mode) != irma_twos_complement ||
718                         get_mode_arithmetic(load_mode)  != irma_twos_complement)
719                         return 0;
720
721
722                 /* produce a shift to adjust offset delta */
723                 if (delta > 0) {
724                         ir_node *cnst;
725                         ir_graph *irg = get_irn_irg(load);
726
727                         cnst        = new_r_Const_long(irg, mode_Iu, delta * 8);
728                         store_value = new_r_Shr(get_nodes_block(load),
729                                                                         store_value, cnst, store_mode);
730                 }
731
732                 /* add an convert if needed */
733                 if (store_mode != load_mode) {
734                         store_value = new_r_Conv(get_nodes_block(load), store_value, load_mode);
735                 }
736         }
737
738         DBG_OPT_RAW(load, store_value);
739
740         info = (ldst_info_t*)get_irn_link(load);
741         if (info->projs[pn_Load_M])
742                 exchange(info->projs[pn_Load_M], get_Load_mem(load));
743
744         res = 0;
745         /* no exception */
746         if (info->projs[pn_Load_X_except]) {
747                 ir_graph *irg = get_irn_irg(load);
748                 exchange( info->projs[pn_Load_X_except], new_r_Bad(irg, mode_X));
749                 res |= CF_CHANGED;
750         }
751         if (info->projs[pn_Load_X_regular]) {
752                 exchange( info->projs[pn_Load_X_regular], new_r_Jmp(get_nodes_block(load)));
753                 res |= CF_CHANGED;
754         }
755
756         if (info->projs[pn_Load_res])
757                 exchange(info->projs[pn_Load_res], store_value);
758
759         load_ptr = get_Load_ptr(load);
760         kill_node(load);
761         reduce_adr_usage(load_ptr);
762         return res | DF_CHANGED;
763 }
764
765 /**
766  * Follow the memory chain as long as there are only Loads,
767  * alias free Stores, and constant Calls and try to replace the
768  * current Load by a previous ones.
769  * Note that in unreachable loops it might happen that we reach
770  * load again, as well as we can fall into a cycle.
771  * We break such cycles using a special visited flag.
772  *
773  * INC_MASTER() must be called before dive into
774  */
775 static unsigned follow_Mem_chain(ir_node *load, ir_node *curr)
776 {
777         unsigned    res = 0;
778         ldst_info_t *info = (ldst_info_t*)get_irn_link(load);
779         ir_node     *pred;
780         ir_node     *ptr       = get_Load_ptr(load);
781         ir_node     *mem       = get_Load_mem(load);
782         ir_mode     *load_mode = get_Load_mode(load);
783
784         for (pred = curr; load != pred; ) {
785                 ldst_info_t *pred_info = (ldst_info_t*)get_irn_link(pred);
786
787                 /*
788                  * a Load immediately after a Store -- a read after write.
789                  * We may remove the Load, if both Load & Store does not have an
790                  * exception handler OR they are in the same Block. In the latter
791                  * case the Load cannot throw an exception when the previous Store was
792                  * quiet.
793                  *
794                  * Why we need to check for Store Exception? If the Store cannot
795                  * be executed (ROM) the exception handler might simply jump into
796                  * the load Block :-(
797                  * We could make it a little bit better if we would know that the
798                  * exception handler of the Store jumps directly to the end...
799                  */
800                 if (is_Store(pred) && ((pred_info->projs[pn_Store_X_except] == NULL
801                                 && info->projs[pn_Load_X_except] == NULL)
802                                 || get_nodes_block(load) == get_nodes_block(pred)))
803                 {
804                         long    load_offset;
805                         ir_node *base_ptr = get_base_and_offset(ptr, &load_offset);
806                         int     changes   = try_load_after_store(load, base_ptr, load_offset, pred);
807
808                         if (changes != 0)
809                                 return res | changes;
810                 } else if (is_Load(pred) && get_Load_ptr(pred) == ptr &&
811                            can_use_stored_value(get_Load_mode(pred), load_mode)) {
812                         /*
813                          * a Load after a Load -- a read after read.
814                          * We may remove the second Load, if it does not have an exception
815                          * handler OR they are in the same Block. In the later case
816                          * the Load cannot throw an exception when the previous Load was
817                          * quiet.
818                          *
819                          * Here, there is no need to check if the previous Load has an
820                          * exception hander because they would have exact the same
821                          * exception...
822                          *
823                          * TODO: implement load-after-load with different mode for big
824                          *       endian
825                          */
826                         if (info->projs[pn_Load_X_except] == NULL
827                                         || get_nodes_block(load) == get_nodes_block(pred)) {
828                                 ir_node *value;
829
830                                 DBG_OPT_RAR(load, pred);
831
832                                 /* the result is used */
833                                 if (info->projs[pn_Load_res]) {
834                                         if (pred_info->projs[pn_Load_res] == NULL) {
835                                                 /* create a new Proj again */
836                                                 pred_info->projs[pn_Load_res] = new_r_Proj(pred, get_Load_mode(pred), pn_Load_res);
837                                         }
838                                         value = pred_info->projs[pn_Load_res];
839
840                                         /* add an convert if needed */
841                                         if (get_Load_mode(pred) != load_mode) {
842                                                 value = new_r_Conv(get_nodes_block(load), value, load_mode);
843                                         }
844
845                                         exchange(info->projs[pn_Load_res], value);
846                                 }
847
848                                 if (info->projs[pn_Load_M])
849                                         exchange(info->projs[pn_Load_M], mem);
850
851                                 /* no exception */
852                                 if (info->projs[pn_Load_X_except]) {
853                                         ir_graph *irg = get_irn_irg(load);
854                                         exchange(info->projs[pn_Load_X_except], new_r_Bad(irg, mode_X));
855                                         res |= CF_CHANGED;
856                                 }
857                                 if (info->projs[pn_Load_X_regular]) {
858                                         exchange( info->projs[pn_Load_X_regular], new_r_Jmp(get_nodes_block(load)));
859                                         res |= CF_CHANGED;
860                                 }
861
862                                 kill_node(load);
863                                 reduce_adr_usage(ptr);
864                                 return res |= DF_CHANGED;
865                         }
866                 }
867
868                 if (is_Store(pred)) {
869                         /* check if we can pass through this store */
870                         ir_alias_relation rel = get_alias_relation(
871                                 get_Store_ptr(pred),
872                                 get_irn_mode(get_Store_value(pred)),
873                                 ptr, load_mode);
874                         /* if the might be an alias, we cannot pass this Store */
875                         if (rel != ir_no_alias)
876                                 break;
877                         pred = skip_Proj(get_Store_mem(pred));
878                 } else if (is_Load(pred)) {
879                         pred = skip_Proj(get_Load_mem(pred));
880                 } else if (is_Call(pred)) {
881                         if (is_Call_pure(pred)) {
882                                 /* The called graph is at least pure, so there are no Store's
883                                    in it. We can handle it like a Load and skip it. */
884                                 pred = skip_Proj(get_Call_mem(pred));
885                         } else {
886                                 /* there might be Store's in the graph, stop here */
887                                 break;
888                         }
889                 } else {
890                         /* follow only Load chains */
891                         break;
892                 }
893
894                 /* check for cycles */
895                 if (NODE_VISITED(pred_info))
896                         break;
897                 MARK_NODE(pred_info);
898         }
899
900         if (is_Sync(pred)) {
901                 int i;
902
903                 /* handle all Sync predecessors */
904                 for (i = get_Sync_n_preds(pred) - 1; i >= 0; --i) {
905                         res |= follow_Mem_chain(load, skip_Proj(get_Sync_pred(pred, i)));
906                         if (res)
907                                 return res;
908                 }
909         }
910
911         return res;
912 }  /* follow_Mem_chain */
913
914 /*
915  * Check if we can replace the load by a given const from
916  * the const code irg.
917  */
918 ir_node *can_replace_load_by_const(const ir_node *load, ir_node *c)
919 {
920         ir_mode  *c_mode = get_irn_mode(c);
921         ir_mode  *l_mode = get_Load_mode(load);
922         ir_node  *block  = get_nodes_block(load);
923         dbg_info *dbgi   = get_irn_dbg_info(load);
924         ir_node  *res    = copy_const_value(dbgi, c, block);
925
926         if (c_mode != l_mode) {
927                 /* check, if the mode matches OR can be easily converted info */
928                 if (is_reinterpret_cast(c_mode, l_mode)) {
929                         /* copy the value from the const code irg and cast it */
930                         res = new_rd_Conv(dbgi, block, res, l_mode);
931                 } else {
932                         return NULL;
933                 }
934         }
935         return res;
936 }
937
938 /**
939  * optimize a Load
940  *
941  * @param load  the Load node
942  */
943 static unsigned optimize_load(ir_node *load)
944 {
945         ldst_info_t *info = (ldst_info_t*)get_irn_link(load);
946         ir_node     *mem, *ptr, *value;
947         ir_entity   *ent;
948         long        dummy;
949         unsigned    res = 0;
950
951         /* do NOT touch volatile loads for now */
952         if (get_Load_volatility(load) == volatility_is_volatile)
953                 return 0;
954
955         /* the address of the load to be optimized */
956         ptr = get_Load_ptr(load);
957
958         /* The mem of the Load. Must still be returned after optimization. */
959         mem = get_Load_mem(load);
960
961         if (info->projs[pn_Load_res] == NULL
962                         && info->projs[pn_Load_X_except] == NULL) {
963                 /* the value is never used and we don't care about exceptions, remove */
964                 exchange(info->projs[pn_Load_M], mem);
965
966                 if (info->projs[pn_Load_X_regular]) {
967                         /* should not happen, but if it does, remove it */
968                         exchange(info->projs[pn_Load_X_regular], new_r_Jmp(get_nodes_block(load)));
969                         res |= CF_CHANGED;
970                 }
971                 kill_node(load);
972                 reduce_adr_usage(ptr);
973                 return res | DF_CHANGED;
974         }
975
976         value = NULL;
977         /* check if we can determine the entity that will be loaded */
978         ent = find_constant_entity(ptr);
979         if (ent != NULL
980                         && get_entity_visibility(ent) != ir_visibility_external) {
981                 /* a static allocation that is not external: there should be NO
982                  * exception when loading even if we cannot replace the load itself.
983                  */
984
985                 /* no exception, clear the info field as it might be checked later again */
986                 if (info->projs[pn_Load_X_except]) {
987                         ir_graph *irg = get_irn_irg(load);
988                         exchange(info->projs[pn_Load_X_except], new_r_Bad(irg, mode_X));
989                         info->projs[pn_Load_X_except] = NULL;
990                         res |= CF_CHANGED;
991                 }
992                 if (info->projs[pn_Load_X_regular]) {
993                         exchange(info->projs[pn_Load_X_regular], new_r_Jmp(get_nodes_block(load)));
994                         info->projs[pn_Load_X_regular] = NULL;
995                         res |= CF_CHANGED;
996                 }
997
998                 if (get_entity_linkage(ent) & IR_LINKAGE_CONSTANT) {
999                         if (has_entity_initializer(ent)) {
1000                                 /* new style initializer */
1001                                 value = find_compound_ent_value(ptr);
1002                         }
1003                         if (value != NULL) {
1004                                 ir_graph *irg = get_irn_irg(load);
1005                                 value = can_replace_load_by_const(load, value);
1006                                 if (value != NULL && is_Sel(ptr)) {
1007                                         /* frontend has inserted masking operations after bitfield accesses,
1008                                          * so we might have to shift the const. */
1009                                         unsigned char bit_offset = get_entity_offset_bits_remainder(get_Sel_entity(ptr));
1010                                         if (bit_offset != 0) {
1011                                                 if (is_Const(value)) {
1012                                                         ir_tarval *tv_old = get_Const_tarval(value);
1013                                                         ir_tarval *tv_offset = new_tarval_from_long(bit_offset, mode_Bu);
1014                                                         ir_tarval *tv_new = tarval_shl(tv_old, tv_offset);
1015                                                         value = new_r_Const(irg, tv_new);
1016                                                 } else {
1017                                                         value = NULL;
1018                                                 }
1019                                         }
1020                                 }
1021                         }
1022                 }
1023         }
1024         if (value != NULL) {
1025                 /* we completely replace the load by this value */
1026                 if (info->projs[pn_Load_X_except]) {
1027                         ir_graph *irg = get_irn_irg(load);
1028                         exchange(info->projs[pn_Load_X_except], new_r_Bad(irg, mode_X));
1029                         info->projs[pn_Load_X_except] = NULL;
1030                         res |= CF_CHANGED;
1031                 }
1032                 if (info->projs[pn_Load_X_regular]) {
1033                         exchange(info->projs[pn_Load_X_regular], new_r_Jmp(get_nodes_block(load)));
1034                         info->projs[pn_Load_X_regular] = NULL;
1035                         res |= CF_CHANGED;
1036                 }
1037                 if (info->projs[pn_Load_M]) {
1038                         exchange(info->projs[pn_Load_M], mem);
1039                         res |= DF_CHANGED;
1040                 }
1041                 if (info->projs[pn_Load_res]) {
1042                         exchange(info->projs[pn_Load_res], value);
1043                         res |= DF_CHANGED;
1044                 }
1045                 kill_node(load);
1046                 reduce_adr_usage(ptr);
1047                 return res;
1048         }
1049
1050         /* Check, if the address of this load is used more than once.
1051          * If not, more load cannot be removed in any case. */
1052         if (get_irn_n_edges(ptr) <= 1 && get_irn_n_edges(get_base_and_offset(ptr, &dummy)) <= 1)
1053                 return res;
1054
1055         /*
1056          * follow the memory chain as long as there are only Loads
1057          * and try to replace current Load or Store by a previous one.
1058          * Note that in unreachable loops it might happen that we reach
1059          * load again, as well as we can fall into a cycle.
1060          * We break such cycles using a special visited flag.
1061          */
1062         INC_MASTER();
1063         res = follow_Mem_chain(load, skip_Proj(mem));
1064         return res;
1065 }  /* optimize_load */
1066
1067 /**
1068  * Check whether a value of mode new_mode would completely overwrite a value
1069  * of mode old_mode in memory.
1070  */
1071 static int is_completely_overwritten(ir_mode *old_mode, ir_mode *new_mode)
1072 {
1073         return get_mode_size_bits(new_mode) >= get_mode_size_bits(old_mode);
1074 }  /* is_completely_overwritten */
1075
1076 /**
1077  * Check whether small is a part of large (starting at same address).
1078  */
1079 static int is_partially_same(ir_node *small, ir_node *large)
1080 {
1081         ir_mode *sm = get_irn_mode(small);
1082         ir_mode *lm = get_irn_mode(large);
1083
1084         /* FIXME: Check endianness */
1085         return is_Conv(small) && get_Conv_op(small) == large
1086             && get_mode_size_bytes(sm) < get_mode_size_bytes(lm)
1087             && get_mode_arithmetic(sm) == irma_twos_complement
1088             && get_mode_arithmetic(lm) == irma_twos_complement;
1089 }  /* is_partially_same */
1090
1091 /**
1092  * follow the memory chain as long as there are only Loads and alias free Stores.
1093  *
1094  * INC_MASTER() must be called before dive into
1095  */
1096 static unsigned follow_Mem_chain_for_Store(ir_node *store, ir_node *curr)
1097 {
1098         unsigned res = 0;
1099         ldst_info_t *info = (ldst_info_t*)get_irn_link(store);
1100         ir_node *pred;
1101         ir_node *ptr = get_Store_ptr(store);
1102         ir_node *mem = get_Store_mem(store);
1103         ir_node *value = get_Store_value(store);
1104         ir_mode *mode  = get_irn_mode(value);
1105         ir_node *block = get_nodes_block(store);
1106
1107         for (pred = curr; pred != store;) {
1108                 ldst_info_t *pred_info = (ldst_info_t*)get_irn_link(pred);
1109
1110                 /*
1111                  * BEWARE: one might think that checking the modes is useless, because
1112                  * if the pointers are identical, they refer to the same object.
1113                  * This is only true in strong typed languages, not is C were the following
1114                  * is possible *(ir_type1 *)p = a; *(ir_type2 *)p = b ...
1115                  * However, if the size of the mode that is written is bigger or equal the
1116                  * size of the old one, the old value is completely overwritten and can be
1117                  * killed ...
1118                  */
1119                 if (is_Store(pred) && get_Store_ptr(pred) == ptr &&
1120             get_nodes_block(pred) == block) {
1121                         /*
1122                          * a Store after a Store in the same Block -- a write after write.
1123                          */
1124
1125                         /*
1126                          * We may remove the first Store, if the old value is completely
1127                          * overwritten or the old value is a part of the new value,
1128                          * and if it does not have an exception handler.
1129                          *
1130                          * TODO: What, if both have the same exception handler ???
1131                          */
1132                         if (get_Store_volatility(pred) != volatility_is_volatile
1133                                 && !pred_info->projs[pn_Store_X_except]) {
1134                                 ir_node *predvalue = get_Store_value(pred);
1135                                 ir_mode *predmode  = get_irn_mode(predvalue);
1136
1137                                 if (is_completely_overwritten(predmode, mode)
1138                                         || is_partially_same(predvalue, value)) {
1139                                         DBG_OPT_WAW(pred, store);
1140                                         exchange(pred_info->projs[pn_Store_M], get_Store_mem(pred));
1141                                         kill_node(pred);
1142                                         reduce_adr_usage(ptr);
1143                                         return DF_CHANGED;
1144                                 }
1145                         }
1146
1147                         /*
1148                          * We may remove the Store, if the old value already contains
1149                          * the new value, and if it does not have an exception handler.
1150                          *
1151                          * TODO: What, if both have the same exception handler ???
1152                          */
1153                         if (get_Store_volatility(store) != volatility_is_volatile
1154                                 && !info->projs[pn_Store_X_except]) {
1155                                 ir_node *predvalue = get_Store_value(pred);
1156
1157                                 if (is_partially_same(value, predvalue)) {
1158                                         DBG_OPT_WAW(pred, store);
1159                                         exchange(info->projs[pn_Store_M], mem);
1160                                         kill_node(store);
1161                                         reduce_adr_usage(ptr);
1162                                         return DF_CHANGED;
1163                                 }
1164                         }
1165                 } else if (is_Load(pred) && get_Load_ptr(pred) == ptr &&
1166                            value == pred_info->projs[pn_Load_res]) {
1167                         /*
1168                          * a Store of a value just loaded from the same address
1169                          * -- a write after read.
1170                          * We may remove the Store, if it does not have an exception
1171                          * handler.
1172                          */
1173                         if (! info->projs[pn_Store_X_except]) {
1174                                 DBG_OPT_WAR(store, pred);
1175                                 exchange(info->projs[pn_Store_M], mem);
1176                                 kill_node(store);
1177                                 reduce_adr_usage(ptr);
1178                                 return DF_CHANGED;
1179                         }
1180                 }
1181
1182                 if (is_Store(pred)) {
1183                         /* check if we can pass through this store */
1184                         ir_alias_relation rel = get_alias_relation(
1185                                 get_Store_ptr(pred),
1186                                 get_irn_mode(get_Store_value(pred)),
1187                                 ptr, mode);
1188                         /* if the might be an alias, we cannot pass this Store */
1189                         if (rel != ir_no_alias)
1190                                 break;
1191                         pred = skip_Proj(get_Store_mem(pred));
1192                 } else if (is_Load(pred)) {
1193                         ir_alias_relation rel = get_alias_relation(
1194                                 get_Load_ptr(pred), get_Load_mode(pred),
1195                                 ptr, mode);
1196                         if (rel != ir_no_alias)
1197                                 break;
1198
1199                         pred = skip_Proj(get_Load_mem(pred));
1200                 } else {
1201                         /* follow only Load chains */
1202                         break;
1203                 }
1204
1205                 /* check for cycles */
1206                 if (NODE_VISITED(pred_info))
1207                         break;
1208                 MARK_NODE(pred_info);
1209         }
1210
1211         if (is_Sync(pred)) {
1212                 int i;
1213
1214                 /* handle all Sync predecessors */
1215                 for (i = get_Sync_n_preds(pred) - 1; i >= 0; --i) {
1216                         res |= follow_Mem_chain_for_Store(store, skip_Proj(get_Sync_pred(pred, i)));
1217                         if (res)
1218                                 break;
1219                 }
1220         }
1221         return res;
1222 }  /* follow_Mem_chain_for_Store */
1223
1224 /** find entity used as base for an address calculation */
1225 static ir_entity *find_entity(ir_node *ptr)
1226 {
1227         switch (get_irn_opcode(ptr)) {
1228         case iro_SymConst:
1229                 return get_SymConst_entity(ptr);
1230         case iro_Sel: {
1231                 ir_node *pred = get_Sel_ptr(ptr);
1232                 if (get_irg_frame(get_irn_irg(ptr)) == pred)
1233                         return get_Sel_entity(ptr);
1234
1235                 return find_entity(pred);
1236         }
1237         case iro_Sub:
1238         case iro_Add: {
1239                 ir_node *left = get_binop_left(ptr);
1240                 ir_node *right;
1241                 if (mode_is_reference(get_irn_mode(left)))
1242                         return find_entity(left);
1243                 right = get_binop_right(ptr);
1244                 if (mode_is_reference(get_irn_mode(right)))
1245                         return find_entity(right);
1246                 return NULL;
1247         }
1248         default:
1249                 return NULL;
1250         }
1251 }
1252
1253 /**
1254  * optimize a Store
1255  *
1256  * @param store  the Store node
1257  */
1258 static unsigned optimize_store(ir_node *store)
1259 {
1260         ir_node   *ptr;
1261         ir_node   *mem;
1262         ir_entity *entity;
1263
1264         if (get_Store_volatility(store) == volatility_is_volatile)
1265                 return 0;
1266
1267         ptr    = get_Store_ptr(store);
1268         entity = find_entity(ptr);
1269
1270         /* a store to an entity which is never read is unnecessary */
1271         if (entity != NULL && !(get_entity_usage(entity) & ir_usage_read)) {
1272                 ldst_info_t *info = (ldst_info_t*)get_irn_link(store);
1273                 if (info->projs[pn_Store_X_except] == NULL) {
1274                         DB((dbg, LEVEL_1, "  Killing useless %+F to never read entity %+F\n", store, entity));
1275                         exchange(info->projs[pn_Store_M], get_Store_mem(store));
1276                         kill_node(store);
1277                         reduce_adr_usage(ptr);
1278                         return DF_CHANGED;
1279                 }
1280         }
1281
1282         /* Check, if the address of this Store is used more than once.
1283          * If not, this Store cannot be removed in any case. */
1284         if (get_irn_n_edges(ptr) <= 1)
1285                 return 0;
1286
1287         mem = get_Store_mem(store);
1288
1289         /* follow the memory chain as long as there are only Loads */
1290         INC_MASTER();
1291
1292         return follow_Mem_chain_for_Store(store, skip_Proj(mem));
1293 }  /* optimize_store */
1294
1295 /* check if a node has more than one real user. Keepalive edges do not count as
1296  * real users */
1297 static bool has_multiple_users(const ir_node *node)
1298 {
1299         unsigned real_users = 0;
1300         foreach_out_edge(node, edge) {
1301                 ir_node *user = get_edge_src_irn(edge);
1302                 if (is_End(user))
1303                         continue;
1304                 ++real_users;
1305                 if (real_users > 1)
1306                         return true;
1307         }
1308         return false;
1309 }
1310
1311 /**
1312  * walker, optimizes Phi after Stores to identical places:
1313  * Does the following optimization:
1314  * @verbatim
1315  *
1316  *   val1   val2   val3          val1  val2  val3
1317  *    |      |      |               \    |    /
1318  *  Store  Store  Store              \   |   /
1319  *      \    |    /                   PhiData
1320  *       \   |   /                       |
1321  *        \  |  /                      Store
1322  *          PhiM
1323  *
1324  * @endverbatim
1325  * This reduces the number of stores and allows for predicated execution.
1326  * Moves Stores back to the end of a function which may be bad.
1327  *
1328  * This is only possible if the predecessor blocks have only one successor.
1329  */
1330 static unsigned optimize_phi(ir_node *phi, walk_env_t *wenv)
1331 {
1332         int i, n;
1333         ir_node *store, *ptr, *block, *phi_block, *phiM, *phiD, *exc, *projM;
1334 #ifdef DO_CACHEOPT
1335         ir_node *old_store;
1336 #endif
1337         ir_mode *mode;
1338         ir_node **inM, **inD, **projMs;
1339         int *idx;
1340         dbg_info *db = NULL;
1341         ldst_info_t *info;
1342         block_info_t *bl_info;
1343         unsigned res = 0;
1344
1345         /* Must be a memory Phi */
1346         if (get_irn_mode(phi) != mode_M)
1347                 return 0;
1348
1349         n = get_Phi_n_preds(phi);
1350         if (n <= 0)
1351                 return 0;
1352
1353         /* must be only one user */
1354         projM = get_Phi_pred(phi, 0);
1355         if (has_multiple_users(projM))
1356                 return 0;
1357
1358         store = skip_Proj(projM);
1359 #ifdef DO_CACHEOPT
1360         old_store = store;
1361 #endif
1362         if (!is_Store(store))
1363                 return 0;
1364
1365         block = get_nodes_block(store);
1366
1367         /* check if the block is post dominated by Phi-block
1368            and has no exception exit */
1369         bl_info = (block_info_t*)get_irn_link(block);
1370         if (bl_info->flags & BLOCK_HAS_EXC)
1371                 return 0;
1372
1373         phi_block = get_nodes_block(phi);
1374         if (! block_strictly_postdominates(phi_block, block))
1375                 return 0;
1376
1377         /* this is the address of the store */
1378         ptr  = get_Store_ptr(store);
1379         mode = get_irn_mode(get_Store_value(store));
1380         info = (ldst_info_t*)get_irn_link(store);
1381         exc  = info->exc_block;
1382
1383         for (i = 1; i < n; ++i) {
1384                 ir_node *pred = get_Phi_pred(phi, i);
1385
1386                 if (has_multiple_users(pred))
1387                         return 0;
1388
1389                 pred = skip_Proj(pred);
1390                 if (!is_Store(pred))
1391                         return 0;
1392
1393                 if (ptr != get_Store_ptr(pred) || mode != get_irn_mode(get_Store_value(pred)))
1394                         return 0;
1395
1396                 info = (ldst_info_t*)get_irn_link(pred);
1397
1398                 /* check, if all stores have the same exception flow */
1399                 if (exc != info->exc_block)
1400                         return 0;
1401
1402                 block = get_nodes_block(pred);
1403
1404                 /* check if the block is post dominated by Phi-block
1405                    and has no exception exit. Note that block must be different from
1406                    Phi-block, else we would move a Store from end End of a block to its
1407                    Start... */
1408                 bl_info = (block_info_t*)get_irn_link(block);
1409                 if (bl_info->flags & BLOCK_HAS_EXC)
1410                         return 0;
1411                 if (block == phi_block || ! block_postdominates(phi_block, block))
1412                         return 0;
1413         }
1414
1415         /*
1416          * ok, when we are here, we found all predecessors of a Phi that
1417          * are Stores to the same address and size. That means whatever
1418          * we do before we enter the block of the Phi, we do a Store.
1419          * So, we can move the Store to the current block:
1420          *
1421          *   val1    val2    val3          val1  val2  val3
1422          *    |       |       |               \    |    /
1423          * | Str | | Str | | Str |             \   |   /
1424          *      \     |     /                   PhiData
1425          *       \    |    /                       |
1426          *        \   |   /                       Str
1427          *           PhiM
1428          *
1429          * Is only allowed if the predecessor blocks have only one successor.
1430          */
1431
1432         NEW_ARR_A(ir_node *, projMs, n);
1433         NEW_ARR_A(ir_node *, inM, n);
1434         NEW_ARR_A(ir_node *, inD, n);
1435         NEW_ARR_A(int, idx, n);
1436
1437         /* Prepare: Collect all Store nodes.  We must do this
1438            first because we otherwise may loose a store when exchanging its
1439            memory Proj.
1440          */
1441         for (i = n - 1; i >= 0; --i) {
1442                 ir_node *store;
1443
1444                 projMs[i] = get_Phi_pred(phi, i);
1445                 assert(is_Proj(projMs[i]));
1446
1447                 store = get_Proj_pred(projMs[i]);
1448                 info  = (ldst_info_t*)get_irn_link(store);
1449
1450                 inM[i] = get_Store_mem(store);
1451                 inD[i] = get_Store_value(store);
1452                 idx[i] = info->exc_idx;
1453         }
1454         block = get_nodes_block(phi);
1455
1456         /* second step: create a new memory Phi */
1457         phiM = new_rd_Phi(get_irn_dbg_info(phi), block, n, inM, mode_M);
1458
1459         /* third step: create a new data Phi */
1460         phiD = new_rd_Phi(get_irn_dbg_info(phi), block, n, inD, mode);
1461
1462         /* rewire memory and kill the node */
1463         for (i = n - 1; i >= 0; --i) {
1464                 ir_node *proj  = projMs[i];
1465
1466                 if (is_Proj(proj)) {
1467                         ir_node *store = get_Proj_pred(proj);
1468                         exchange(proj, inM[i]);
1469                         kill_node(store);
1470                 }
1471         }
1472
1473         /* fourth step: create the Store */
1474         store = new_rd_Store(db, block, phiM, ptr, phiD, cons_none);
1475 #ifdef DO_CACHEOPT
1476         co_set_irn_name(store, co_get_irn_ident(old_store));
1477 #endif
1478
1479         projM = new_rd_Proj(NULL, store, mode_M, pn_Store_M);
1480
1481         info = get_ldst_info(store, &wenv->obst);
1482         info->projs[pn_Store_M] = projM;
1483
1484         /* fifths step: repair exception flow */
1485         if (exc) {
1486                 ir_node *projX = new_rd_Proj(NULL, store, mode_X, pn_Store_X_except);
1487
1488                 info->projs[pn_Store_X_except] = projX;
1489                 info->exc_block                = exc;
1490                 info->exc_idx                  = idx[0];
1491
1492                 for (i = 0; i < n; ++i) {
1493                         set_Block_cfgpred(exc, idx[i], projX);
1494                 }
1495
1496                 if (n > 1) {
1497                         /* the exception block should be optimized as some inputs are identical now */
1498                 }
1499
1500                 res |= CF_CHANGED;
1501         }
1502
1503         /* sixth step: replace old Phi */
1504         exchange(phi, projM);
1505
1506         return res | DF_CHANGED;
1507 }  /* optimize_phi */
1508
1509 static int optimize_conv_load(ir_node *conv)
1510 {
1511         ir_node *op = get_Conv_op(conv);
1512         if (!is_Proj(op))
1513                 return 0;
1514         if (has_multiple_users(op))
1515                 return 0;
1516         /* shrink mode of load if possible. */
1517         ir_node *load = get_Proj_pred(op);
1518         if (!is_Load(load))
1519                 return 0;
1520
1521         /* only do it if we are the only user (otherwise the risk is too
1522          * great that we end up with 2 loads instead of one). */
1523         ir_mode *mode      = get_irn_mode(conv);
1524         ir_mode *load_mode = get_Load_mode(load);
1525         int      bits_diff
1526                 = get_mode_size_bits(load_mode) - get_mode_size_bits(mode);
1527         if (mode_is_float(load_mode) || mode_is_float(mode) || bits_diff < 0)
1528             return 0;
1529
1530         if (be_get_backend_param()->byte_order_big_endian) {
1531                 if (bits_diff % 8 != 0)
1532                         return 0;
1533                 ir_graph *irg   = get_irn_irg(conv);
1534                 ir_node  *ptr   = get_Load_ptr(load);
1535                 ir_mode  *mode  = get_irn_mode(ptr);
1536                 ir_node  *delta = new_r_Const_long(irg, mode, bits_diff/8);
1537                 ir_node  *block = get_nodes_block(load);
1538                 ir_node  *add   = new_r_Add(block, ptr, delta, mode);
1539                 set_Load_ptr(load, add);
1540         }
1541         set_Load_mode(load, mode);
1542         set_irn_mode(op, mode);
1543         exchange(conv, op);
1544         return DF_CHANGED;
1545 }
1546
1547 /**
1548  * walker, do the optimizations
1549  */
1550 static void do_load_store_optimize(ir_node *n, void *env)
1551 {
1552         walk_env_t *wenv = (walk_env_t*)env;
1553
1554         switch (get_irn_opcode(n)) {
1555
1556         case iro_Load:
1557                 wenv->changes |= optimize_load(n);
1558                 break;
1559
1560         case iro_Store:
1561                 wenv->changes |= optimize_store(n);
1562                 break;
1563
1564         case iro_Phi:
1565                 wenv->changes |= optimize_phi(n, wenv);
1566                 break;
1567
1568         case iro_Conv:
1569                 wenv->changes |= optimize_conv_load(n);
1570                 break;
1571
1572         default:
1573                 break;
1574         }
1575 }  /* do_load_store_optimize */
1576
1577 /** A scc. */
1578 typedef struct scc {
1579         ir_node *head;      /**< the head of the list */
1580 } scc;
1581
1582 /** A node entry. */
1583 typedef struct node_entry {
1584         unsigned DFSnum;    /**< the DFS number of this node */
1585         unsigned low;       /**< the low number of this node */
1586         int      in_stack;  /**< flag, set if the node is on the stack */
1587         ir_node  *next;     /**< link to the next node the the same scc */
1588         scc      *pscc;     /**< the scc of this node */
1589         unsigned POnum;     /**< the post order number for blocks */
1590 } node_entry;
1591
1592 /** A loop entry. */
1593 typedef struct loop_env {
1594         ir_nodehashmap_t map;
1595         struct obstack   obst;
1596         ir_node          **stack;      /**< the node stack */
1597         size_t           tos;          /**< tos index */
1598         unsigned         nextDFSnum;   /**< the current DFS number */
1599         unsigned         POnum;        /**< current post order number */
1600
1601         unsigned         changes;      /**< a bitmask of graph changes */
1602 } loop_env;
1603
1604 /**
1605 * Gets the node_entry of a node
1606 */
1607 static node_entry *get_irn_ne(ir_node *irn, loop_env *env)
1608 {
1609         node_entry *e = ir_nodehashmap_get(node_entry, &env->map, irn);
1610
1611         if (e == NULL) {
1612                 e = OALLOC(&env->obst, node_entry);
1613                 memset(e, 0, sizeof(*e));
1614                 ir_nodehashmap_insert(&env->map, irn, e);
1615         }
1616         return e;
1617 }  /* get_irn_ne */
1618
1619 /**
1620  * Push a node onto the stack.
1621  *
1622  * @param env   the loop environment
1623  * @param n     the node to push
1624  */
1625 static void push(loop_env *env, ir_node *n)
1626 {
1627         node_entry *e;
1628
1629         if (env->tos == ARR_LEN(env->stack)) {
1630                 size_t nlen = ARR_LEN(env->stack) * 2;
1631                 ARR_RESIZE(ir_node *, env->stack, nlen);
1632         }
1633         env->stack[env->tos++] = n;
1634         e = get_irn_ne(n, env);
1635         e->in_stack = 1;
1636 }  /* push */
1637
1638 /**
1639  * pop a node from the stack
1640  *
1641  * @param env   the loop environment
1642  *
1643  * @return  The topmost node
1644  */
1645 static ir_node *pop(loop_env *env)
1646 {
1647         ir_node *n = env->stack[--env->tos];
1648         node_entry *e = get_irn_ne(n, env);
1649
1650         e->in_stack = 0;
1651         return n;
1652 }  /* pop */
1653
1654 /**
1655  * Check if irn is a region constant.
1656  * The block or irn must strictly dominate the header block.
1657  *
1658  * @param irn           the node to check
1659  * @param header_block  the header block of the induction variable
1660  */
1661 static int is_rc(ir_node *irn, ir_node *header_block)
1662 {
1663         ir_node *block = get_nodes_block(irn);
1664
1665         return (block != header_block) && block_dominates(block, header_block);
1666 }  /* is_rc */
1667
1668 typedef struct phi_entry phi_entry;
1669 struct phi_entry {
1670         ir_node   *phi;    /**< A phi with a region const memory. */
1671         int       pos;     /**< The position of the region const memory */
1672         ir_node   *load;   /**< the newly created load for this phi */
1673         phi_entry *next;
1674 };
1675
1676 /**
1677  * An entry in the avail set.
1678  */
1679 typedef struct avail_entry_t {
1680         ir_node *ptr;   /**< the address pointer */
1681         ir_mode *mode;  /**< the load mode */
1682         ir_node *load;  /**< the associated Load */
1683 } avail_entry_t;
1684
1685 /**
1686  * Compare two avail entries.
1687  */
1688 static int cmp_avail_entry(const void *elt, const void *key, size_t size)
1689 {
1690         const avail_entry_t *a = (const avail_entry_t*)elt;
1691         const avail_entry_t *b = (const avail_entry_t*)key;
1692         (void) size;
1693
1694         return a->ptr != b->ptr || a->mode != b->mode;
1695 }  /* cmp_avail_entry */
1696
1697 /**
1698  * Calculate the hash value of an avail entry.
1699  */
1700 static unsigned hash_cache_entry(const avail_entry_t *entry)
1701 {
1702         return get_irn_idx(entry->ptr) * 9 + hash_ptr(entry->mode);
1703 }  /* hash_cache_entry */
1704
1705 /**
1706  * Move loops out of loops if possible.
1707  *
1708  * @param pscc   the loop described by an SCC
1709  * @param env    the loop environment
1710  */
1711 static void move_loads_out_of_loops(scc *pscc, loop_env *env)
1712 {
1713         ir_node   *phi, *load, *next, *other, *next_other;
1714         int       j;
1715         phi_entry *phi_list = NULL;
1716         set       *avail;
1717
1718         /* collect all outer memories */
1719         for (phi = pscc->head; phi != NULL; phi = next) {
1720                 node_entry *ne = get_irn_ne(phi, env);
1721                 next = ne->next;
1722
1723                 /* check all memory Phi's */
1724                 if (! is_Phi(phi))
1725                         continue;
1726
1727                 assert(get_irn_mode(phi) == mode_M && "DFS return non-memory Phi");
1728
1729                 for (j = get_irn_arity(phi) - 1; j >= 0; --j) {
1730                         ir_node    *pred = get_irn_n(phi, j);
1731                         node_entry *pe   = get_irn_ne(pred, env);
1732
1733                         if (pe->pscc != ne->pscc) {
1734                                 /* not in the same SCC, is region const */
1735                                 phi_entry *pe = OALLOC(&env->obst, phi_entry);
1736
1737                                 pe->phi  = phi;
1738                                 pe->pos  = j;
1739                                 pe->next = phi_list;
1740                                 phi_list = pe;
1741                         }
1742                 }
1743         }
1744         /* no Phis no fun */
1745         assert(phi_list != NULL && "DFS found a loop without Phi");
1746
1747         /* for now, we cannot handle more than one input (only reducible cf) */
1748         if (phi_list->next != NULL)
1749                 return;
1750
1751         avail = new_set(cmp_avail_entry, 8);
1752
1753         for (load = pscc->head; load; load = next) {
1754                 ir_mode *load_mode;
1755                 node_entry *ne = get_irn_ne(load, env);
1756                 next = ne->next;
1757
1758                 if (is_Load(load)) {
1759                         ldst_info_t *info = (ldst_info_t*)get_irn_link(load);
1760                         ir_node     *ptr = get_Load_ptr(load);
1761
1762                         /* for now, we cannot handle Loads with exceptions */
1763                         if (info->projs[pn_Load_res] == NULL || info->projs[pn_Load_X_regular] != NULL || info->projs[pn_Load_X_except] != NULL)
1764                                 continue;
1765
1766                         /* for now, we can only move Load(Global) */
1767                         if (! is_SymConst_addr_ent(ptr))
1768                                 continue;
1769                         load_mode = get_Load_mode(load);
1770                         for (other = pscc->head; other != NULL; other = next_other) {
1771                                 node_entry *ne = get_irn_ne(other, env);
1772                                 next_other = ne->next;
1773
1774                                 if (is_Store(other)) {
1775                                         ir_alias_relation rel = get_alias_relation(
1776                                                 get_Store_ptr(other),
1777                                                 get_irn_mode(get_Store_value(other)),
1778                                                 ptr, load_mode);
1779                                         /* if the might be an alias, we cannot pass this Store */
1780                                         if (rel != ir_no_alias)
1781                                                 break;
1782                                 }
1783                                 /* only Phis and pure Calls are allowed here, so ignore them */
1784                         }
1785                         if (other == NULL) {
1786                                 ldst_info_t *ninfo = NULL;
1787                                 phi_entry   *pe;
1788                                 dbg_info    *db;
1789
1790                                 /* yep, no aliasing Store found, Load can be moved */
1791                                 DB((dbg, LEVEL_1, "  Found a Load that could be moved: %+F\n", load));
1792
1793                                 db   = get_irn_dbg_info(load);
1794                                 for (pe = phi_list; pe != NULL; pe = pe->next) {
1795                                         int     pos   = pe->pos;
1796                                         ir_node *phi  = pe->phi;
1797                                         ir_node *blk  = get_nodes_block(phi);
1798                                         ir_node *pred = get_Block_cfgpred_block(blk, pos);
1799                                         ir_node *irn, *mem;
1800                                         avail_entry_t entry, *res;
1801
1802                                         entry.ptr  = ptr;
1803                                         entry.mode = load_mode;
1804                                         res = set_find(avail_entry_t, avail, &entry, sizeof(entry), hash_cache_entry(&entry));
1805                                         if (res != NULL) {
1806                                                 irn = res->load;
1807                                         } else {
1808                                                 irn = new_rd_Load(db, pred, get_Phi_pred(phi, pos), ptr, load_mode, cons_none);
1809                                                 entry.load = irn;
1810                                                 (void)set_insert(avail_entry_t, avail, &entry, sizeof(entry), hash_cache_entry(&entry));
1811                                                 DB((dbg, LEVEL_1, "  Created %+F in %+F\n", irn, pred));
1812                                         }
1813                                         pe->load = irn;
1814                                         ninfo = get_ldst_info(irn, &env->obst);
1815
1816                                         ninfo->projs[pn_Load_M] = mem = new_r_Proj(irn, mode_M, pn_Load_M);
1817                                         if (res == NULL) {
1818                                                 /* irn is from cache, so do not set phi pred again.
1819                                                  * There might be other Loads between phi and irn already.
1820                                                  */
1821                                                 set_Phi_pred(phi, pos, mem);
1822                                         }
1823
1824                                         ninfo->projs[pn_Load_res] = new_r_Proj(irn, load_mode, pn_Load_res);
1825                                 }
1826
1827                                 /* now kill the old Load */
1828                                 exchange(info->projs[pn_Load_M], get_Load_mem(load));
1829                                 exchange(info->projs[pn_Load_res], ninfo->projs[pn_Load_res]);
1830
1831                                 env->changes |= DF_CHANGED;
1832                         }
1833                 }
1834         }
1835         del_set(avail);
1836 }  /* move_loads_out_of_loops */
1837
1838 /**
1839  * Process a loop SCC.
1840  *
1841  * @param pscc  the SCC
1842  * @param env   the loop environment
1843  */
1844 static void process_loop(scc *pscc, loop_env *env)
1845 {
1846         ir_node *irn, *next, *header = NULL;
1847         node_entry *b, *h = NULL;
1848         int j, only_phi, num_outside, process = 0;
1849         ir_node *out_rc;
1850
1851         /* find the header block for this scc */
1852         for (irn = pscc->head; irn; irn = next) {
1853                 node_entry *e = get_irn_ne(irn, env);
1854                 ir_node *block = get_nodes_block(irn);
1855
1856                 next = e->next;
1857                 b = get_irn_ne(block, env);
1858
1859                 if (header != NULL) {
1860                         if (h->POnum < b->POnum) {
1861                                 header = block;
1862                                 h      = b;
1863                         }
1864                 } else {
1865                         header = block;
1866                         h      = b;
1867                 }
1868         }
1869
1870         /* check if this scc contains only Phi, Loads or Stores nodes */
1871         only_phi    = 1;
1872         num_outside = 0;
1873         out_rc      = NULL;
1874         for (irn = pscc->head; irn; irn = next) {
1875                 node_entry *e = get_irn_ne(irn, env);
1876
1877                 next = e->next;
1878                 switch (get_irn_opcode(irn)) {
1879                 case iro_Call:
1880                         if (is_Call_pure(irn)) {
1881                                 /* pure calls can be treated like loads */
1882                                 only_phi = 0;
1883                                 break;
1884                         }
1885                         /* non-pure calls must be handle like may-alias Stores */
1886                         goto fail;
1887                 case iro_CopyB:
1888                         /* cannot handle CopyB yet */
1889                         goto fail;
1890                 case iro_Load:
1891                         process = 1;
1892                         if (get_Load_volatility(irn) == volatility_is_volatile) {
1893                                 /* cannot handle loops with volatile Loads */
1894                                 goto fail;
1895                         }
1896                         only_phi = 0;
1897                         break;
1898                 case iro_Store:
1899                         if (get_Store_volatility(irn) == volatility_is_volatile) {
1900                                 /* cannot handle loops with volatile Stores */
1901                                 goto fail;
1902                         }
1903                         only_phi = 0;
1904                         break;
1905                 default:
1906                         only_phi = 0;
1907                         break;
1908                 case iro_Phi:
1909                         for (j = get_irn_arity(irn) - 1; j >= 0; --j) {
1910                                 ir_node *pred  = get_irn_n(irn, j);
1911                                 node_entry *pe = get_irn_ne(pred, env);
1912
1913                                 if (pe->pscc != e->pscc) {
1914                                         /* not in the same SCC, must be a region const */
1915                                         if (! is_rc(pred, header)) {
1916                                                 /* not a memory loop */
1917                                                 goto fail;
1918                                         }
1919                                         if (out_rc == NULL) {
1920                                                 /* first region constant */
1921                                                 out_rc = pred;
1922                                                 ++num_outside;
1923                                         } else if (out_rc != pred) {
1924                                                 /* another region constant */
1925                                                 ++num_outside;
1926                                         }
1927                                 }
1928                         }
1929                         break;
1930                 }
1931         }
1932         if (! process)
1933                 goto fail;
1934
1935         /* found a memory loop */
1936         DB((dbg, LEVEL_2, "  Found a memory loop:\n  "));
1937         if (only_phi && num_outside == 1) {
1938                 /* a phi cycle with only one real predecessor can be collapsed */
1939                 DB((dbg, LEVEL_2, "  Found an USELESS Phi cycle:\n  "));
1940
1941                 for (irn = pscc->head; irn; irn = next) {
1942                         node_entry *e = get_irn_ne(irn, env);
1943                         next = e->next;
1944                         exchange(irn, out_rc);
1945                 }
1946                 env->changes |= DF_CHANGED;
1947                 return;
1948         }
1949
1950 #ifdef DEBUG_libfirm
1951         for (irn = pscc->head; irn; irn = next) {
1952                 node_entry *e = get_irn_ne(irn, env);
1953                 next = e->next;
1954                 DB((dbg, LEVEL_2, " %+F,", irn));
1955         }
1956         DB((dbg, LEVEL_2, "\n"));
1957 #endif
1958         move_loads_out_of_loops(pscc, env);
1959
1960 fail:
1961         ;
1962 }  /* process_loop */
1963
1964 /**
1965  * Process a SCC.
1966  *
1967  * @param pscc  the SCC
1968  * @param env   the loop environment
1969  */
1970 static void process_scc(scc *pscc, loop_env *env)
1971 {
1972         ir_node *head = pscc->head;
1973         node_entry *e = get_irn_ne(head, env);
1974
1975 #ifdef DEBUG_libfirm
1976         {
1977                 ir_node *irn, *next;
1978
1979                 DB((dbg, LEVEL_4, " SCC at %p:\n ", pscc));
1980                 for (irn = pscc->head; irn; irn = next) {
1981                         node_entry *e = get_irn_ne(irn, env);
1982
1983                         next = e->next;
1984
1985                         DB((dbg, LEVEL_4, " %+F,", irn));
1986                 }
1987                 DB((dbg, LEVEL_4, "\n"));
1988         }
1989 #endif
1990
1991         if (e->next != NULL) {
1992                 /* this SCC has more than one member */
1993                 process_loop(pscc, env);
1994         }
1995 }  /* process_scc */
1996
1997 /**
1998  * Do Tarjan's SCC algorithm and drive load/store optimization.
1999  *
2000  * @param irn  start at this node
2001  * @param env  the loop environment
2002  */
2003 static void dfs(ir_node *irn, loop_env *env)
2004 {
2005         int i, n;
2006         node_entry *node = get_irn_ne(irn, env);
2007
2008         mark_irn_visited(irn);
2009
2010         node->DFSnum = env->nextDFSnum++;
2011         node->low    = node->DFSnum;
2012         push(env, irn);
2013
2014         /* handle preds */
2015         if (is_Phi(irn) || is_Sync(irn)) {
2016                 n = get_irn_arity(irn);
2017                 for (i = 0; i < n; ++i) {
2018                         ir_node *pred = get_irn_n(irn, i);
2019                         node_entry *o = get_irn_ne(pred, env);
2020
2021                         if (!irn_visited(pred)) {
2022                                 dfs(pred, env);
2023                                 node->low = MIN(node->low, o->low);
2024                         }
2025                         if (o->DFSnum < node->DFSnum && o->in_stack)
2026                                 node->low = MIN(o->DFSnum, node->low);
2027                 }
2028         } else if (is_fragile_op(irn)) {
2029                 ir_node *pred = get_memop_mem(irn);
2030                 node_entry *o = get_irn_ne(pred, env);
2031
2032                 if (!irn_visited(pred)) {
2033                         dfs(pred, env);
2034                         node->low = MIN(node->low, o->low);
2035                 }
2036                 if (o->DFSnum < node->DFSnum && o->in_stack)
2037                         node->low = MIN(o->DFSnum, node->low);
2038         } else if (is_Proj(irn)) {
2039                 ir_node *pred = get_Proj_pred(irn);
2040                 node_entry *o = get_irn_ne(pred, env);
2041
2042                 if (!irn_visited(pred)) {
2043                         dfs(pred, env);
2044                         node->low = MIN(node->low, o->low);
2045                 }
2046                 if (o->DFSnum < node->DFSnum && o->in_stack)
2047                         node->low = MIN(o->DFSnum, node->low);
2048         }
2049         else {
2050                  /* IGNORE predecessors */
2051         }
2052
2053         if (node->low == node->DFSnum) {
2054                 scc *pscc = OALLOC(&env->obst, scc);
2055                 ir_node *x;
2056
2057                 pscc->head = NULL;
2058                 do {
2059                         node_entry *e;
2060
2061                         x = pop(env);
2062                         e = get_irn_ne(x, env);
2063                         e->pscc    = pscc;
2064                         e->next    = pscc->head;
2065                         pscc->head = x;
2066                 } while (x != irn);
2067
2068                 process_scc(pscc, env);
2069         }
2070 }  /* dfs */
2071
2072 /**
2073  * Do the DFS on the memory edges a graph.
2074  *
2075  * @param irg  the graph to process
2076  * @param env  the loop environment
2077  */
2078 static void do_dfs(ir_graph *irg, loop_env *env)
2079 {
2080         ir_node  *endblk, *end;
2081         int      i;
2082
2083         inc_irg_visited(irg);
2084
2085         /* visit all memory nodes */
2086         endblk = get_irg_end_block(irg);
2087         for (i = get_Block_n_cfgpreds(endblk) - 1; i >= 0; --i) {
2088                 ir_node *pred = get_Block_cfgpred(endblk, i);
2089
2090                 pred = skip_Proj(pred);
2091                 if (is_Return(pred)) {
2092                         dfs(get_Return_mem(pred), env);
2093                 } else if (is_Raise(pred)) {
2094                         dfs(get_Raise_mem(pred), env);
2095                 } else if (is_fragile_op(pred)) {
2096                         dfs(get_memop_mem(pred), env);
2097                 } else if (is_Bad(pred)) {
2098                         /* ignore non-optimized block predecessor */
2099                 } else {
2100                         assert(0 && "Unknown EndBlock predecessor");
2101                 }
2102         }
2103
2104         /* visit the keep-alives */
2105         end = get_irg_end(irg);
2106         for (i = get_End_n_keepalives(end) - 1; i >= 0; --i) {
2107                 ir_node *ka = get_End_keepalive(end, i);
2108
2109                 if (is_Phi(ka) && !irn_visited(ka))
2110                         dfs(ka, env);
2111         }
2112 }  /* do_dfs */
2113
2114 /**
2115  * Optimize Loads/Stores in loops.
2116  *
2117  * @param irg  the graph
2118  */
2119 static int optimize_loops(ir_graph *irg)
2120 {
2121         loop_env env;
2122
2123         env.stack         = NEW_ARR_F(ir_node *, 128);
2124         env.tos           = 0;
2125         env.nextDFSnum    = 0;
2126         env.POnum         = 0;
2127         env.changes       = 0;
2128         ir_nodehashmap_init(&env.map);
2129         obstack_init(&env.obst);
2130
2131         /* calculate the SCC's and drive loop optimization. */
2132         do_dfs(irg, &env);
2133
2134         DEL_ARR_F(env.stack);
2135         obstack_free(&env.obst, NULL);
2136         ir_nodehashmap_destroy(&env.map);
2137
2138         return env.changes;
2139 }  /* optimize_loops */
2140
2141 /*
2142  * do the load store optimization
2143  */
2144 void optimize_load_store(ir_graph *irg)
2145 {
2146         walk_env_t env;
2147
2148         assure_irg_properties(irg,
2149                 IR_GRAPH_PROPERTY_NO_UNREACHABLE_CODE
2150                 | IR_GRAPH_PROPERTY_CONSISTENT_OUT_EDGES
2151                 | IR_GRAPH_PROPERTY_NO_CRITICAL_EDGES
2152                 | IR_GRAPH_PROPERTY_CONSISTENT_DOMINANCE
2153                 | IR_GRAPH_PROPERTY_CONSISTENT_ENTITY_USAGE);
2154
2155         FIRM_DBG_REGISTER(dbg, "firm.opt.ldstopt");
2156
2157         assert(get_irg_pinned(irg) != op_pin_state_floats &&
2158                 "LoadStore optimization needs pinned graph");
2159
2160         if (get_opt_alias_analysis()) {
2161                 assure_irp_globals_entity_usage_computed();
2162         }
2163
2164         obstack_init(&env.obst);
2165         env.changes = 0;
2166
2167         /* init the links, then collect Loads/Stores/Proj's in lists */
2168         master_visited = 0;
2169         irg_walk_graph(irg, firm_clear_link, collect_nodes, &env);
2170
2171         /* now we have collected enough information, optimize */
2172         irg_walk_graph(irg, NULL, do_load_store_optimize, &env);
2173
2174         env.changes |= optimize_loops(irg);
2175
2176         obstack_free(&env.obst, NULL);
2177
2178         confirm_irg_properties(irg,
2179                 env.changes
2180                 ? env.changes & CF_CHANGED
2181                         ? IR_GRAPH_PROPERTIES_NONE
2182                         : IR_GRAPH_PROPERTIES_CONTROL_FLOW
2183                 : IR_GRAPH_PROPERTIES_ALL);
2184 }
2185
2186 ir_graph_pass_t *optimize_load_store_pass(const char *name)
2187 {
2188         return def_graph_pass(name ? name : "ldst", optimize_load_store);
2189 }  /* optimize_load_store_pass */