ac550616d1397f1cb99c05a554a67f343a834a3d
[libfirm] / ir / opt / opt_ldst.c
1 /*
2  * Copyright (C) 1995-2011 University of Karlsruhe.  All right reserved.
3  *
4  * This file is part of libFirm.
5  *
6  * This file may be distributed and/or modified under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation and appearing in the file LICENSE.GPL included in the
9  * packaging of this file.
10  *
11  * Licensees holding valid libFirm Professional Edition licenses may use
12  * this file in accordance with the libFirm Commercial License.
13  * Agreement provided with the Software.
14  *
15  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17  * PURPOSE.
18  */
19
20 /**
21  * @file
22  * @brief   Dataflow driven Load/Store optimizations, uses some ideas from
23  *          VanDrunen's LEPRE
24  * @author  Michael Beck
25  * @version $Id$
26  */
27 #include "config.h"
28
29 #include "irnode_t.h"
30 #include "irflag_t.h"
31 #include "array_t.h"
32 #include "ircons.h"
33 #include "irdom.h"
34 #include "irgmod.h"
35 #include "irgwalk.h"
36 #include "irouts.h"
37 #include "irgraph.h"
38 #include "irgopt.h"
39 #include "iropt.h"
40 #include "iroptimize.h"
41 #include "irnodemap.h"
42 #include "raw_bitset.h"
43 #include "debug.h"
44 #include "error.h"
45 #include "irpass.h"
46
47 /* maximum number of output Proj's */
48 #define MAX_PROJ ((long)pn_Load_max > (long)pn_Store_max ? (long)pn_Load_max : (long)pn_Store_max)
49
50 /**
51  * Mapping an address to an dense ID.
52  */
53 typedef struct address_entry_t {
54         unsigned id;          /**< The ID */
55 } address_entry;
56
57 /**
58  * Memop-flags.
59  */
60 enum memop_flags {
61         FLAG_KILL_ALL    = 1, /**< KILL all addresses */
62         FLAG_KILLED_NODE = 2, /**< this node was killed */
63         FLAG_EXCEPTION   = 4, /**< this node has exception flow */
64         FLAG_IGNORE      = 8, /**< ignore this node (volatile or other) */
65 };
66
67 /**
68  * A value: This represents a value stored at a given address in
69  * memory. Do not confuse with values from value numbering.
70  */
71 typedef struct value_t value_t;
72 struct value_t {
73         ir_node  *address;    /**< the address of this value */
74         ir_node  *value;      /**< the value itself */
75         ir_mode  *mode;       /**< the mode of the value */
76         unsigned id;          /**< address id */
77 };
78
79 /**
80  * A memop describes an memory-related operation.
81  * These are Loads/Store and all other ops that might modify
82  * memory (Calls, CopyB) or causing exceptions.
83  */
84 typedef struct memop_t memop_t;
85 struct memop_t {
86         value_t  value;      /**< the value of this memop: only defined for Load/Store */
87         ir_node  *node;      /**< the memory op itself */
88         ir_node  *mem;       /**< the memory FROM this node */
89         ir_node  *replace;   /**< the replacement node if this memop is replaced */
90         memop_t  *next;      /**< links to the next memory op in the block in forward order. */
91         memop_t  *prev;      /**< links to the previous memory op in the block in forward order. */
92         unsigned flags;      /**< memop flags */
93         ir_node  *projs[MAX_PROJ]; /**< Projs of this memory op */
94 };
95
96 /**
97  * Additional data for every basic block.
98  */
99 typedef struct block_t block_t;
100 struct block_t {
101         memop_t  *memop_forward;     /**< topologically sorted list of memory ops in this block */
102         memop_t  *memop_backward;    /**< last memop in the list */
103         unsigned *avail_out;         /**< out-set of available addresses */
104         memop_t  **id_2_memop_avail; /**< maps avail address ids to memops */
105         unsigned *anticL_in;         /**< in-set of anticipated Load addresses */
106         memop_t  **id_2_memop_antic; /**< maps anticipated address ids to memops */
107         ir_node  *block;             /**< the associated block */
108         block_t  *forward_next;      /**< next block entry for forward iteration */
109         block_t  *backward_next;     /**< next block entry for backward iteration */
110         memop_t  *avail;             /**< used locally for the avail map */
111         memop_t  **trans_results;    /**< used to cached translated nodes due antic calculation. */
112 };
113
114 /**
115  * Metadata for this pass.
116  */
117 typedef struct ldst_env_t {
118         struct obstack  obst;              /**< obstack for temporary data */
119         ir_nodemap_t    adr_map;           /**< Map addresses to */
120         block_t         *forward;          /**< Inverse post-order list of all blocks Start->End */
121         block_t         *backward;         /**< Inverse post-order list of all blocks End->Start */
122         ir_node         *start_bl;         /**< start block of the current graph */
123         ir_node         *end_bl;           /**< end block of the current graph */
124         unsigned        *curr_set;         /**< current set of addresses */
125         memop_t         **curr_id_2_memop; /**< current map of address ids to memops */
126         unsigned        curr_adr_id;       /**< number for address mapping */
127         unsigned        n_mem_ops;         /**< number of memory operations (Loads/Stores) */
128         size_t          rbs_size;          /**< size of all bitsets in bytes */
129         int             max_cfg_preds;     /**< maximum number of block cfg predecessors */
130         int             changed;           /**< Flags for changed graph state */
131 #ifdef DEBUG_libfirm
132         ir_node         **id_2_address;    /**< maps an id to the used address */
133 #endif
134 } ldst_env;
135
136 /* the one and only environment */
137 static ldst_env env;
138
139 #ifdef DEBUG_libfirm
140
141 static firm_dbg_module_t *dbg;
142
143 /**
144  * Dumps the block list.
145  *
146  * @param ldst environment
147  */
148 static void dump_block_list(ldst_env *env)
149 {
150         block_t *entry;
151         memop_t *op;
152         int     i;
153
154         for (entry = env->forward; entry != NULL; entry = entry->forward_next) {
155                 DB((dbg, LEVEL_2, "%+F {", entry->block));
156
157                 i = 0;
158                 for (op = entry->memop_forward; op != NULL; op = op->next) {
159                         if (i == 0) {
160                                 DB((dbg, LEVEL_2, "\n\t"));
161                         }
162                         DB((dbg, LEVEL_2, "%+F", op->node));
163                         if ((op->flags & FLAG_KILL_ALL) == FLAG_KILL_ALL)
164                                 DB((dbg, LEVEL_2, "X"));
165                         else if (op->flags & FLAG_KILL_ALL)
166                                 DB((dbg, LEVEL_2, "K"));
167                         DB((dbg, LEVEL_2, ", "));
168
169                         i = (i + 1) & 3;
170                 }
171                 DB((dbg, LEVEL_2, "\n}\n\n"));
172         }
173 }  /* dump_block_list */
174
175 /**
176  * Dumps the current set.
177  *
178  * @param bl   current block
179  * @param s    name of the set
180  */
181 static void dump_curr(block_t *bl, const char *s)
182 {
183         size_t end = env.rbs_size - 1;
184         size_t pos;
185         int    i;
186
187         DB((dbg, LEVEL_2, "%s[%+F] = {", s, bl->block));
188         i = 0;
189         for (pos = rbitset_next(env.curr_set, 0, 1); pos < end; pos = rbitset_next(env.curr_set, pos + 1, 1)) {
190                 memop_t *op = env.curr_id_2_memop[pos];
191
192                 if (i == 0) {
193                         DB((dbg, LEVEL_2, "\n\t"));
194                 }
195
196                 DB((dbg, LEVEL_2, "<%+F, %+F>, ", op->value.address, op->value.value));
197                 i = (i + 1) & 3;
198         }
199         DB((dbg, LEVEL_2, "\n}\n"));
200 }  /* dump_curr */
201
202 #else
203 static void dump_block_list(ldst_env *env)
204 {
205         (void) env;
206 }
207 static void dump_curr(block_t *bl, const char *s)
208 {
209         (void) bl;
210         (void) s;
211 }
212 #endif /* DEBUG_libfirm */
213
214 /** Get the block entry for a block node */
215 static block_t *get_block_entry(const ir_node *block)
216 {
217         assert(is_Block(block));
218
219         return (block_t*)get_irn_link(block);
220 }  /* get_block_entry */
221
222 /** Get the memop entry for a memory operation node */
223 static memop_t *get_irn_memop(const ir_node *irn)
224 {
225         assert(! is_Block(irn));
226         return (memop_t*)get_irn_link(irn);
227 }  /* get_irn_memop */
228
229 /**
230  * Walk over the memory edges from definition to users.
231  * This ensures, that even operation without memory output are found.
232  *
233  * @param irn   start node
234  * @param pre   pre walker function
235  * @param post  post walker function
236  * @param ctx   context parameter for the walker functions
237  */
238 static void walk_memory(ir_node *irn, irg_walk_func *pre, irg_walk_func *post, void *ctx)
239 {
240         int     i;
241         ir_mode *mode;
242
243         mark_irn_visited(irn);
244
245         if (pre)
246                 pre(irn, ctx);
247
248         mode = get_irn_mode(irn);
249         if (mode == mode_M) {
250                 /* every successor uses memory */
251                 for (i = get_irn_n_outs(irn) - 1; i >= 0; --i) {
252                         ir_node *succ = get_irn_out(irn, i);
253
254                         if (! irn_visited(succ))
255                                 walk_memory(succ, pre, post, ctx);
256                 }
257         } else if (mode == mode_T) {
258                 /* only some Proj's uses memory */
259                 for (i = get_irn_n_outs(irn) - 1; i >= 0; --i) {
260                         ir_node *proj = get_irn_out(irn, i);
261
262                         if (get_irn_mode(proj) == mode_M && ! irn_visited(proj))
263                                 walk_memory(proj, pre, post, ctx);
264                 }
265         }
266         if (post)
267                 post(irn, ctx);
268 }  /* walk_memory */
269
270 /**
271  * Walks over all memory nodes of a graph.
272  *
273  * @param irg   a graph
274  * @param pre   pre walker function
275  * @param post  post walker function
276  * @param ctx   context parameter for the walker functions
277  */
278 static void walk_memory_irg(ir_graph *irg, irg_walk_func pre, irg_walk_func post, void *ctx)
279 {
280         inc_irg_visited(irg);
281
282         ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED);
283
284         /*
285          * there are two possible sources for memory: initial_mem and nomem
286          * we ignore nomem as this should NOT change the memory
287          */
288         walk_memory(get_irg_initial_mem(irg), pre, post, ctx);
289
290         ir_free_resources(irg, IR_RESOURCE_IRN_VISITED);
291 }  /* walk_memory_irg */
292
293 /**
294  * Register an address and allocate a (sparse, 0..n) ID for it.
295  *
296  * @param adr  the IR-node representing the address
297  *
298  * @return the allocated id
299  */
300 static unsigned register_address(ir_node *adr)
301 {
302         address_entry *entry;
303
304         /* skip Confirms and Casts */
305 restart:
306         if (is_Confirm(adr)) {
307                 adr = get_Confirm_value(adr);
308                 goto restart;
309         }
310         if (is_Cast(adr)) {
311                 adr = get_Cast_op(adr);
312                 goto restart;
313         }
314
315         entry = (address_entry*)ir_nodemap_get(&env.adr_map, adr);
316
317         if (entry == NULL) {
318                 /* new address */
319                 entry = OALLOC(&env.obst, address_entry);
320
321                 entry->id = env.curr_adr_id++;
322                 ir_nodemap_insert(&env.adr_map, adr, entry);
323
324                 DB((dbg, LEVEL_3, "ADDRESS %+F has ID %u\n", adr, entry->id));
325 #ifdef DEBUG_libfirm
326                 ARR_APP1(ir_node *, env.id_2_address, adr);
327 #endif
328         }
329         return entry->id;
330 }  /* register_address */
331
332
333 /**
334  * translate an address through a Phi node into a given predecessor
335  * block.
336  *
337  * @param address  the address
338  * @param block    the block
339  * @param pos      the position of the predecessor in block
340  */
341 static ir_node *phi_translate(ir_node *address, const ir_node *block, int pos)
342 {
343         if (is_Phi(address) && get_nodes_block(address) == block)
344                 address = get_Phi_pred(address, pos);
345         return address;
346 }  /* phi_translate */
347
348 /**
349  * Walker: allocate an block entry for every block
350  * and register all potential addresses.
351  */
352 static void prepare_blocks(ir_node *irn, void *ctx)
353 {
354         (void)ctx;
355
356         if (is_Block(irn)) {
357                 block_t *entry = OALLOC(&env.obst, block_t);
358                 int     n;
359
360                 entry->memop_forward    = NULL;
361                 entry->memop_backward   = NULL;
362                 entry->avail_out        = NULL;
363                 entry->id_2_memop_avail = NULL;
364                 entry->anticL_in        = NULL;
365                 entry->id_2_memop_antic = NULL;
366                 entry->block            = irn;
367                 entry->forward_next     = NULL;
368                 entry->backward_next    = NULL;
369                 entry->avail            = NULL;
370                 entry->trans_results    = NULL;
371                 set_irn_link(irn, entry);
372
373                 set_Block_phis(irn, NULL);
374
375                 /* use block marks to track unreachable blocks */
376                 set_Block_mark(irn, 0);
377
378                 n = get_Block_n_cfgpreds(irn);
379                 if (n > env.max_cfg_preds)
380                         env.max_cfg_preds = n;
381         } else {
382                 ir_mode *mode = get_irn_mode(irn);
383
384                 if (mode_is_reference(mode)) {
385                         /*
386                          * Register ALL possible addresses: this is overkill yet but
387                          * simpler then doing it for all possible translated addresses
388                          * (which would be sufficient in the moment.
389                          */
390                         (void)register_address(irn);
391                 }
392         }
393 }  /* prepare_blocks */
394
395 /**
396  * Post-Walker, link in all Phi's
397  */
398 static void link_phis(ir_node *irn, void *ctx)
399 {
400         (void)ctx;
401
402         if (is_Phi(irn)) {
403                 ir_node *block = get_nodes_block(irn);
404                 add_Block_phi(block, irn);
405         }
406 }  /* link_phis */
407
408 /**
409  * Block walker: creates the inverse post-order list for the CFG.
410  */
411 static void inverse_post_order(ir_node *block, void *ctx)
412 {
413         block_t *entry = get_block_entry(block);
414
415         (void)ctx;
416
417         /* mark this block IS reachable from start */
418         set_Block_mark(block, 1);
419
420         /* create the list in inverse order */
421         entry->forward_next = env.forward;
422         env.forward         = entry;
423
424         /* remember the first visited (last in list) entry, needed for later */
425         if (env.backward == NULL)
426                 env.backward = entry;
427 }  /* inverse_post_order */
428
429 /**
430  * Block walker: create backward links for the memops of a block.
431  */
432 static void collect_backward(ir_node *block, void *ctx)
433 {
434         block_t *entry = get_block_entry(block);
435         memop_t *last, *op;
436
437         (void)ctx;
438
439         /*
440          * Do NOT link in the end block yet. We want it to be
441          * the first in the list. This is NOT guaranteed by the walker
442          * if we have endless loops.
443          */
444         if (block != env.end_bl) {
445                 entry->backward_next = env.backward;
446
447                 /* create the list in inverse order */
448                 env.backward = entry;
449         }
450
451         /* create backward links for all memory ops */
452         last = NULL;
453         for (op = entry->memop_forward; op != NULL; op = op->next) {
454                 op->prev = last;
455                 last     = op;
456         }
457         entry->memop_backward = last;
458 }  /* collect_backward */
459
460 /**
461  * Allocate a memop.
462  *
463  * @param irn  the IR-node representing the memop or NULL
464  *             if this is a translated (virtual) memop
465  *
466  * @return the allocated memop
467  */
468 static memop_t *alloc_memop(ir_node *irn)
469 {
470         memop_t *m = OALLOC(&env.obst, memop_t);
471
472         m->value.address = NULL;
473         m->value.value   = NULL;
474         m->value.mode    = NULL;
475
476         m->node          = irn;
477         m->mem           = NULL;
478         m->replace       = NULL;
479         m->next          = NULL;
480         m->flags         = 0;
481
482         memset(m->projs, 0, sizeof(m->projs));
483
484         if (irn != NULL)
485                 set_irn_link(irn, m);
486         return m;
487 }  /* alloc_memop */
488
489 /**
490  * Create a memop for a Phi-replacement.
491  *
492  * @param op   the memop to clone
493  * @param phi  the Phi-node representing the new value
494  */
495 static memop_t *clone_memop_phi(memop_t *op, ir_node *phi)
496 {
497         memop_t *m = OALLOC(&env.obst, memop_t);
498
499         m->value         = op->value;
500         m->value.value   = phi;
501
502         m->node          = phi;
503         m->replace       = NULL;
504         m->next          = NULL;
505         m->flags         = 0;
506
507         set_irn_link(phi, m);
508         return m;
509 }  /* clone_memop_phi */
510
511 /**
512  * Return the memory properties of a call node.
513  *
514  * @param call  the call node
515  *
516  * return a bitset of mtp_property_const and mtp_property_pure
517  */
518 static unsigned get_Call_memory_properties(ir_node *call)
519 {
520         ir_type *call_tp = get_Call_type(call);
521         unsigned prop = get_method_additional_properties(call_tp);
522
523         /* check first the call type */
524         if ((prop & (mtp_property_const|mtp_property_pure)) == 0) {
525                 /* try the called entity */
526                 ir_node *ptr = get_Call_ptr(call);
527
528                 if (is_Global(ptr)) {
529                         ir_entity *ent = get_Global_entity(ptr);
530
531                         prop = get_entity_additional_properties(ent);
532                 }
533         }
534         return prop & (mtp_property_const|mtp_property_pure);
535 }  /* get_Call_memory_properties */
536
537 /**
538  * Returns an entity if the address ptr points to a constant one.
539  *
540  * @param ptr  the address
541  *
542  * @return an entity or NULL
543  */
544 static ir_entity *find_constant_entity(ir_node *ptr)
545 {
546         for (;;) {
547                 if (is_SymConst(ptr) && get_SymConst_kind(ptr) == symconst_addr_ent) {
548                         return get_SymConst_entity(ptr);
549                 } else if (is_Sel(ptr)) {
550                         ir_entity *ent = get_Sel_entity(ptr);
551                         ir_type   *tp  = get_entity_owner(ent);
552
553                         /* Do not fiddle with polymorphism. */
554                         if (is_Class_type(get_entity_owner(ent)) &&
555                                 ((get_entity_n_overwrites(ent)    != 0) ||
556                                 (get_entity_n_overwrittenby(ent) != 0)   ) )
557                                 return NULL;
558
559                         if (is_Array_type(tp)) {
560                                 /* check bounds */
561                                 int i, n;
562
563                                 for (i = 0, n = get_Sel_n_indexs(ptr); i < n; ++i) {
564                                         ir_node   *bound;
565                                         ir_tarval *tlower, *tupper;
566                                         ir_node   *index = get_Sel_index(ptr, i);
567                                         ir_tarval *tv     = computed_value(index);
568
569                                         /* check if the index is constant */
570                                         if (tv == tarval_bad)
571                                                 return NULL;
572
573                                         bound  = get_array_lower_bound(tp, i);
574                                         tlower = computed_value(bound);
575                                         bound  = get_array_upper_bound(tp, i);
576                                         tupper = computed_value(bound);
577
578                                         if (tlower == tarval_bad || tupper == tarval_bad)
579                                                 return NULL;
580
581                                         if (tarval_cmp(tv, tlower) & pn_Cmp_Lt)
582                                                 return NULL;
583                                         if (tarval_cmp(tupper, tv) & pn_Cmp_Lt)
584                                                 return NULL;
585
586                                         /* ok, bounds check finished */
587                                 }
588                         }
589
590                         if (get_entity_linkage(ent) == IR_LINKAGE_CONSTANT)
591                                 return ent;
592
593                         /* try next */
594                         ptr = get_Sel_ptr(ptr);
595                 } else if (is_Add(ptr)) {
596                         ir_node *l = get_Add_left(ptr);
597                         ir_node *r = get_Add_right(ptr);
598
599                         if (get_irn_mode(l) == get_irn_mode(ptr) && is_Const(r))
600                                 ptr = l;
601                         else if (get_irn_mode(r) == get_irn_mode(ptr) && is_Const(l))
602                                 ptr = r;
603                         else
604                                 return NULL;
605
606                         /* for now, we support only one addition, reassoc should fold all others */
607                         if (! is_SymConst(ptr) && !is_Sel(ptr))
608                                 return NULL;
609                 } else if (is_Sub(ptr)) {
610                         ir_node *l = get_Sub_left(ptr);
611                         ir_node *r = get_Sub_right(ptr);
612
613                         if (get_irn_mode(l) == get_irn_mode(ptr) && is_Const(r))
614                                 ptr = l;
615                         else
616                                 return NULL;
617                         /* for now, we support only one subtraction, reassoc should fold all others */
618                         if (! is_SymConst(ptr) && !is_Sel(ptr))
619                                 return NULL;
620                 } else
621                         return NULL;
622         }
623 }  /* find_constant_entity */
624
625 /**
626  * Return the Selection index of a Sel node from dimension n
627  */
628 static long get_Sel_array_index_long(ir_node *n, int dim)
629 {
630         ir_node *index = get_Sel_index(n, dim);
631         assert(is_Const(index));
632         return get_tarval_long(get_Const_tarval(index));
633 }  /* get_Sel_array_index_long */
634
635 /**
636  * Returns the accessed component graph path for an
637  * node computing an address.
638  *
639  * @param ptr    the node computing the address
640  * @param depth  current depth in steps upward from the root
641  *               of the address
642  */
643 static compound_graph_path *rec_get_accessed_path(ir_node *ptr, int depth)
644 {
645         compound_graph_path *res = NULL;
646         ir_entity           *root, *field, *ent;
647         int                 path_len, pos, idx;
648         ir_tarval           *tv;
649         ir_type             *tp;
650
651         if (is_SymConst(ptr)) {
652                 /* a SymConst. If the depth is 0, this is an access to a global
653                  * entity and we don't need a component path, else we know
654                  * at least its length.
655                  */
656                 assert(get_SymConst_kind(ptr) == symconst_addr_ent);
657                 root = get_SymConst_entity(ptr);
658                 res = (depth == 0) ? NULL : new_compound_graph_path(get_entity_type(root), depth);
659         } else if (is_Sel(ptr)) {
660                 /* it's a Sel, go up until we find the root */
661                 res = rec_get_accessed_path(get_Sel_ptr(ptr), depth+1);
662                 if (res == NULL)
663                         return NULL;
664
665                 /* fill up the step in the path at the current position */
666                 field    = get_Sel_entity(ptr);
667                 path_len = get_compound_graph_path_length(res);
668                 pos      = path_len - depth - 1;
669                 set_compound_graph_path_node(res, pos, field);
670
671                 if (is_Array_type(get_entity_owner(field))) {
672                         assert(get_Sel_n_indexs(ptr) == 1 && "multi dim arrays not implemented");
673                         set_compound_graph_path_array_index(res, pos, get_Sel_array_index_long(ptr, 0));
674                 }
675         } else if (is_Add(ptr)) {
676                 ir_mode   *mode;
677                 ir_tarval *tmp;
678
679                 {
680                         ir_node *l = get_Add_left(ptr);
681                         ir_node *r = get_Add_right(ptr);
682                         if (is_Const(r) && get_irn_mode(l) == get_irn_mode(ptr)) {
683                                 ptr = l;
684                                 tv  = get_Const_tarval(r);
685                         } else {
686                                 ptr = r;
687                                 tv  = get_Const_tarval(l);
688                         }
689                 }
690 ptr_arith:
691                 mode = get_tarval_mode(tv);
692                 tmp  = tv;
693
694                 /* ptr must be a Sel or a SymConst, this was checked in find_constant_entity() */
695                 if (is_Sel(ptr)) {
696                         field = get_Sel_entity(ptr);
697                 } else {
698                         field = get_SymConst_entity(ptr);
699                 }
700                 idx = 0;
701                 for (ent = field;;) {
702                         unsigned  size;
703                         ir_tarval *sz, *tv_index, *tlower, *tupper;
704                         ir_node   *bound;
705
706                         tp = get_entity_type(ent);
707                         if (! is_Array_type(tp))
708                                 break;
709                         ent = get_array_element_entity(tp);
710                         size = get_type_size_bytes(get_entity_type(ent));
711                         sz   = new_tarval_from_long(size, mode);
712
713                         tv_index = tarval_div(tmp, sz);
714                         tmp      = tarval_mod(tmp, sz);
715
716                         if (tv_index == tarval_bad || tmp == tarval_bad)
717                                 return NULL;
718
719                         assert(get_array_n_dimensions(tp) == 1 && "multiarrays not implemented");
720                         bound  = get_array_lower_bound(tp, 0);
721                         tlower = computed_value(bound);
722                         bound  = get_array_upper_bound(tp, 0);
723                         tupper = computed_value(bound);
724
725                         if (tlower == tarval_bad || tupper == tarval_bad)
726                                 return NULL;
727
728                         if (tarval_cmp(tv_index, tlower) & pn_Cmp_Lt)
729                                 return NULL;
730                         if (tarval_cmp(tupper, tv_index) & pn_Cmp_Lt)
731                                 return NULL;
732
733                         /* ok, bounds check finished */
734                         ++idx;
735                 }
736                 if (! tarval_is_null(tmp)) {
737                         /* access to some struct/union member */
738                         return NULL;
739                 }
740
741                 /* should be at least ONE array */
742                 if (idx == 0)
743                         return NULL;
744
745                 res = rec_get_accessed_path(ptr, depth + idx);
746                 if (res == NULL)
747                         return NULL;
748
749                 path_len = get_compound_graph_path_length(res);
750                 pos      = path_len - depth - idx;
751
752                 for (ent = field;;) {
753                         unsigned   size;
754                         ir_tarval *sz, *tv_index;
755                         long       index;
756
757                         tp = get_entity_type(ent);
758                         if (! is_Array_type(tp))
759                                 break;
760                         ent = get_array_element_entity(tp);
761                         set_compound_graph_path_node(res, pos, ent);
762
763                         size = get_type_size_bytes(get_entity_type(ent));
764                         sz   = new_tarval_from_long(size, mode);
765
766                         tv_index = tarval_div(tv, sz);
767                         tv       = tarval_mod(tv, sz);
768
769                         /* worked above, should work again */
770                         assert(tv_index != tarval_bad && tv != tarval_bad);
771
772                         /* bounds already checked above */
773                         index = get_tarval_long(tv_index);
774                         set_compound_graph_path_array_index(res, pos, index);
775                         ++pos;
776                 }
777         } else if (is_Sub(ptr)) {
778                 ir_node *l = get_Sub_left(ptr);
779                 ir_node *r = get_Sub_right(ptr);
780
781                 ptr = l;
782                 tv  = get_Const_tarval(r);
783                 tv  = tarval_neg(tv);
784                 goto ptr_arith;
785         }
786         return res;
787 }  /* rec_get_accessed_path */
788
789 /**
790  * Returns an access path or NULL.  The access path is only
791  * valid, if the graph is in phase_high and _no_ address computation is used.
792  */
793 static compound_graph_path *get_accessed_path(ir_node *ptr)
794 {
795         compound_graph_path *gr = rec_get_accessed_path(ptr, 0);
796         return gr;
797 }  /* get_accessed_path */
798
799 typedef struct path_entry {
800         ir_entity         *ent;
801         struct path_entry *next;
802         long              index;
803 } path_entry;
804
805 static ir_node *rec_find_compound_ent_value(ir_node *ptr, path_entry *next)
806 {
807         path_entry       entry, *p;
808         ir_entity        *ent, *field;
809         ir_initializer_t *initializer;
810         ir_tarval        *tv;
811         ir_type          *tp;
812         unsigned         n;
813
814         entry.next = next;
815         if (is_SymConst(ptr)) {
816                 /* found the root */
817                 ent         = get_SymConst_entity(ptr);
818                 initializer = get_entity_initializer(ent);
819                 for (p = next; p != NULL;) {
820                         if (initializer->kind != IR_INITIALIZER_COMPOUND)
821                                 return NULL;
822                         n  = get_initializer_compound_n_entries(initializer);
823                         tp = get_entity_type(ent);
824
825                         if (is_Array_type(tp)) {
826                                 ent = get_array_element_entity(tp);
827                                 if (ent != p->ent) {
828                                         /* a missing [0] */
829                                         if (0 >= n)
830                                                 return NULL;
831                                         initializer = get_initializer_compound_value(initializer, 0);
832                                         continue;
833                                 }
834                         }
835                         if (p->index >= (int) n)
836                                 return NULL;
837                         initializer = get_initializer_compound_value(initializer, p->index);
838
839                         ent = p->ent;
840                         p   = p->next;
841                 }
842                 tp = get_entity_type(ent);
843                 while (is_Array_type(tp)) {
844                         ent = get_array_element_entity(tp);
845                         tp = get_entity_type(ent);
846                         /* a missing [0] */
847                         n  = get_initializer_compound_n_entries(initializer);
848                         if (0 >= n)
849                                 return NULL;
850                         initializer = get_initializer_compound_value(initializer, 0);
851                 }
852
853                 switch (initializer->kind) {
854                 case IR_INITIALIZER_CONST:
855                         return get_initializer_const_value(initializer);
856                 case IR_INITIALIZER_TARVAL:
857                 case IR_INITIALIZER_NULL:
858                 default:
859                         return NULL;
860                 }
861         } else if (is_Sel(ptr)) {
862                 entry.ent = field = get_Sel_entity(ptr);
863                 tp = get_entity_owner(field);
864                 if (is_Array_type(tp)) {
865                         assert(get_Sel_n_indexs(ptr) == 1 && "multi dim arrays not implemented");
866                         entry.index = get_Sel_array_index_long(ptr, 0) - get_array_lower_bound_int(tp, 0);
867                 } else {
868                         int i, n_members = get_compound_n_members(tp);
869                         for (i = 0; i < n_members; ++i) {
870                                 if (get_compound_member(tp, i) == field)
871                                         break;
872                         }
873                         if (i >= n_members) {
874                                 /* not found: should NOT happen */
875                                 return NULL;
876                         }
877                         entry.index = i;
878                 }
879                 return rec_find_compound_ent_value(get_Sel_ptr(ptr), &entry);
880         }  else if (is_Add(ptr)) {
881                 ir_mode *mode;
882                 unsigned pos;
883
884                 {
885                         ir_node *l = get_Add_left(ptr);
886                         ir_node *r = get_Add_right(ptr);
887                         if (is_Const(r)) {
888                                 ptr = l;
889                                 tv  = get_Const_tarval(r);
890                         } else {
891                                 ptr = r;
892                                 tv  = get_Const_tarval(l);
893                         }
894                 }
895 ptr_arith:
896                 mode = get_tarval_mode(tv);
897
898                 /* ptr must be a Sel or a SymConst, this was checked in find_constant_entity() */
899                 if (is_Sel(ptr)) {
900                         field = get_Sel_entity(ptr);
901                 } else {
902                         field = get_SymConst_entity(ptr);
903                 }
904
905                 /* count needed entries */
906                 pos = 0;
907                 for (ent = field;;) {
908                         tp = get_entity_type(ent);
909                         if (! is_Array_type(tp))
910                                 break;
911                         ent = get_array_element_entity(tp);
912                         ++pos;
913                 }
914                 /* should be at least ONE entry */
915                 if (pos == 0)
916                         return NULL;
917
918                 /* allocate the right number of entries */
919                 NEW_ARR_A(path_entry, p, pos);
920
921                 /* fill them up */
922                 pos = 0;
923                 for (ent = field;;) {
924                         unsigned  size;
925                         ir_tarval *sz, *tv_index, *tlower, *tupper;
926                         long      index;
927                         ir_node   *bound;
928
929                         tp = get_entity_type(ent);
930                         if (! is_Array_type(tp))
931                                 break;
932                         ent = get_array_element_entity(tp);
933                         p[pos].ent  = ent;
934                         p[pos].next = &p[pos + 1];
935
936                         size = get_type_size_bytes(get_entity_type(ent));
937                         sz   = new_tarval_from_long(size, mode);
938
939                         tv_index = tarval_div(tv, sz);
940                         tv       = tarval_mod(tv, sz);
941
942                         if (tv_index == tarval_bad || tv == tarval_bad)
943                                 return NULL;
944
945                         assert(get_array_n_dimensions(tp) == 1 && "multiarrays not implemented");
946                         bound  = get_array_lower_bound(tp, 0);
947                         tlower = computed_value(bound);
948                         bound  = get_array_upper_bound(tp, 0);
949                         tupper = computed_value(bound);
950
951                         if (tlower == tarval_bad || tupper == tarval_bad)
952                                 return NULL;
953
954                         if (tarval_cmp(tv_index, tlower) & pn_Cmp_Lt)
955                                 return NULL;
956                         if (tarval_cmp(tupper, tv_index) & pn_Cmp_Lt)
957                                 return NULL;
958
959                         /* ok, bounds check finished */
960                         index = get_tarval_long(tv_index);
961                         p[pos].index = index;
962                         ++pos;
963                 }
964                 if (! tarval_is_null(tv)) {
965                         /* hmm, wrong access */
966                         return NULL;
967                 }
968                 p[pos - 1].next = next;
969                 return rec_find_compound_ent_value(ptr, p);
970         } else if (is_Sub(ptr)) {
971                 ir_node *l = get_Sub_left(ptr);
972                 ir_node *r = get_Sub_right(ptr);
973
974                 ptr = l;
975                 tv  = get_Const_tarval(r);
976                 tv  = tarval_neg(tv);
977                 goto ptr_arith;
978         }
979         return NULL;
980 }  /* rec_find_compound_ent_value */
981
982 static ir_node *find_compound_ent_value(ir_node *ptr)
983 {
984         return rec_find_compound_ent_value(ptr, NULL);
985 }  /* find_compound_ent_value */
986
987 /**
988  * Mark a Load memop to be replace by a definition
989  *
990  * @param op  the Load memop
991  */
992 static void mark_replace_load(memop_t *op, ir_node *def)
993 {
994         op->replace = def;
995         op->flags |= FLAG_KILLED_NODE;
996         env.changed = 1;
997 }  /* mark_replace_load */
998
999 /**
1000  * Mark a Store memop to be removed.
1001  *
1002  * @param op  the Store memop
1003  */
1004 static void mark_remove_store(memop_t *op)
1005 {
1006         op->flags |= FLAG_KILLED_NODE;
1007         env.changed = 1;
1008 }  /* mark_remove_store */
1009
1010 /**
1011  * Update a memop for a Load.
1012  *
1013  * @param m  the memop
1014  */
1015 static void update_Load_memop(memop_t *m)
1016 {
1017         int       i;
1018         ir_node   *load = m->node;
1019         ir_node   *ptr;
1020         ir_entity *ent;
1021
1022         if (get_Load_volatility(load) == volatility_is_volatile)
1023                 m->flags |= FLAG_IGNORE;
1024
1025         ptr = get_Load_ptr(load);
1026
1027         m->value.address = ptr;
1028
1029         for (i = get_irn_n_outs(load) - 1; i >= 0; --i) {
1030                 ir_node *proj = get_irn_out(load, i);
1031                 long    pn;
1032
1033                 /* beware of keep edges */
1034                 if (is_End(proj))
1035                         continue;
1036
1037                 pn = get_Proj_proj(proj);
1038                 m->projs[pn] = proj;
1039                 switch (pn) {
1040                 case pn_Load_res:
1041                         m->value.value = proj;
1042                         m->value.mode  = get_irn_mode(proj);
1043                         break;
1044                 case pn_Load_X_except:
1045                         m->flags |= FLAG_EXCEPTION;
1046                         break;
1047                 case pn_Load_M:
1048                         m->mem = proj;
1049                         break;
1050                 case pn_Load_X_regular:
1051                         break;
1052                 default:
1053                         panic("Unsupported Proj from Load %+F", proj);
1054                 }
1055         }
1056
1057         /* check if we can determine the entity that will be loaded */
1058         ent = find_constant_entity(ptr);
1059
1060         if (ent != NULL && get_entity_visibility(ent) != ir_visibility_external) {
1061                 /* a static allocation that is not external: there should be NO exception
1062                  * when loading even if we cannot replace the load itself. */
1063                 ir_node *value = NULL;
1064
1065                 /* no exception, clear the m fields as it might be checked later again */
1066                 if (m->projs[pn_Load_X_except]) {
1067                         ir_graph *irg = get_irn_irg(ptr);
1068                         exchange(m->projs[pn_Load_X_except], new_r_Bad(irg));
1069                         m->projs[pn_Load_X_except] = NULL;
1070                         m->flags &= ~FLAG_EXCEPTION;
1071                         env.changed = 1;
1072                 }
1073                 if (m->projs[pn_Load_X_regular]) {
1074                         exchange(m->projs[pn_Load_X_regular], new_r_Jmp(get_nodes_block(load)));
1075                         m->projs[pn_Load_X_regular] = NULL;
1076                         env.changed = 1;
1077                 }
1078
1079                 if (get_entity_linkage(ent) & IR_LINKAGE_CONSTANT) {
1080                         if (ent->initializer) {
1081                                 /* new style initializer */
1082                                 value = find_compound_ent_value(ptr);
1083                         } else if (entity_has_compound_ent_values(ent)) {
1084                                 /* old style initializer */
1085                                 compound_graph_path *path = get_accessed_path(ptr);
1086
1087                                 if (path != NULL) {
1088                                         assert(is_proper_compound_graph_path(path, get_compound_graph_path_length(path)-1));
1089
1090                                         value = get_compound_ent_value_by_path(ent, path);
1091                                         DB((dbg, LEVEL_1, "  Constant access at %F%F resulted in %+F\n", ent, path, value));
1092                                         free_compound_graph_path(path);
1093                                 }
1094                         }
1095                         if (value != NULL)
1096                                 value = can_replace_load_by_const(load, value);
1097                 }
1098
1099                 if (value != NULL) {
1100                         /* we completely replace the load by this value */
1101                         DB((dbg, LEVEL_1, "Replacing Load %+F by constant %+F\n", m->node, value));
1102                         mark_replace_load(m, value);
1103                         return;
1104                 }
1105         }
1106
1107         if (m->value.value != NULL && !(m->flags & FLAG_IGNORE)) {
1108                 /* only create an address if this node is NOT killed immediately or ignored */
1109                 m->value.id = register_address(ptr);
1110                 ++env.n_mem_ops;
1111         } else {
1112                 /* no user, KILL it */
1113                 mark_replace_load(m, NULL);
1114         }
1115 }  /* update_Load_memop */
1116
1117 /**
1118  * Update a memop for a Store.
1119  *
1120  * @param m  the memop
1121  */
1122 static void update_Store_memop(memop_t *m)
1123 {
1124         int     i;
1125         ir_node *store = m->node;
1126         ir_node *adr   = get_Store_ptr(store);
1127
1128         if (get_Store_volatility(store) == volatility_is_volatile) {
1129                 m->flags |= FLAG_IGNORE;
1130         } else {
1131                 /* only create an address if this node is NOT ignored */
1132                 m->value.id = register_address(adr);
1133                 ++env.n_mem_ops;
1134         }
1135
1136         m->value.address = adr;
1137
1138         for (i = get_irn_n_outs(store) - 1; i >= 0; --i) {
1139                 ir_node *proj = get_irn_out(store, i);
1140                 long    pn;
1141
1142                 /* beware of keep edges */
1143                 if (is_End(proj))
1144                         continue;
1145
1146                 pn = get_Proj_proj(proj);
1147                 m->projs[pn] = proj;
1148                 switch (pn) {
1149                 case pn_Store_X_except:
1150                         m->flags |= FLAG_EXCEPTION;
1151                         break;
1152                 case pn_Store_M:
1153                         m->mem = proj;
1154                         break;
1155                 case pn_Store_X_regular:
1156                         break;
1157                 default:
1158                         panic("Unsupported Proj from Store %+F", proj);
1159                 }
1160         }
1161         m->value.value = get_Store_value(store);
1162         m->value.mode  = get_irn_mode(m->value.value);
1163 }  /* update_Store_memop */
1164
1165 /**
1166  * Update a memop for a Call.
1167  *
1168  * @param m  the memop
1169  */
1170 static void update_Call_memop(memop_t *m)
1171 {
1172         ir_node  *call = m->node;
1173         unsigned prop  = get_Call_memory_properties(call);
1174         int      i;
1175
1176         if (prop & mtp_property_const) {
1177                 /* A constant call did NOT use memory at all, we
1178                    can kick it from the list. */
1179         } else if (prop & mtp_property_pure) {
1180                 /* pure calls READ memory */
1181                 m->flags = 0;
1182         } else
1183                 m->flags = FLAG_KILL_ALL;
1184
1185         for (i = get_irn_n_outs(call) - 1; i >= 0; --i) {
1186                 ir_node *proj = get_irn_out(call, i);
1187
1188                 /* beware of keep edges */
1189                 if (is_End(proj))
1190                         continue;
1191
1192                 switch (get_Proj_proj(proj)) {
1193                 case pn_Call_X_except:
1194                         m->flags |= FLAG_EXCEPTION;
1195                         break;
1196                 case pn_Call_M:
1197                         m->mem = proj;
1198                         break;
1199                 }
1200         }
1201 }  /* update_Call_memop */
1202
1203 /**
1204  * Update a memop for a Div/Mod.
1205  *
1206  * @param m  the memop
1207  */
1208 static void update_DivOp_memop(memop_t *m)
1209 {
1210         ir_node *div = m->node;
1211         int     i;
1212
1213         for (i = get_irn_n_outs(div) - 1; i >= 0; --i) {
1214                 ir_node *proj = get_irn_out(div, i);
1215
1216                 /* beware of keep edges */
1217                 if (is_End(proj))
1218                         continue;
1219
1220                 switch (get_Proj_proj(proj)) {
1221                 case pn_Generic_X_except:
1222                         m->flags |= FLAG_EXCEPTION;
1223                         break;
1224                 case pn_Generic_M:
1225                         m->mem = proj;
1226                         break;
1227                 }
1228         }
1229 }  /* update_DivOp_memop */
1230
1231 /**
1232  * Update a memop for a Phi.
1233  *
1234  * @param m  the memop
1235  */
1236 static void update_Phi_memop(memop_t *m)
1237 {
1238         /* the Phi is it's own mem */
1239         m->mem = m->node;
1240 }  /* update_Phi_memop */
1241
1242 /**
1243  * Memory walker: collect all memory ops and build topological lists.
1244  */
1245 static void collect_memops(ir_node *irn, void *ctx)
1246 {
1247         memop_t  *op;
1248         ir_node  *block;
1249         block_t  *entry;
1250
1251         (void) ctx;
1252         if (is_Proj(irn)) {
1253                 /* we can safely ignore ProjM's except the initial memory */
1254                 ir_graph *irg = get_irn_irg(irn);
1255                 if (irn != get_irg_initial_mem(irg))
1256                         return;
1257         }
1258
1259         op    = alloc_memop(irn);
1260         block = get_nodes_block(irn);
1261         entry = get_block_entry(block);
1262
1263         if (is_Phi(irn)) {
1264                 update_Phi_memop(op);
1265                 /* Phis must be always placed first */
1266                 op->next = entry->memop_forward;
1267                 entry->memop_forward = op;
1268                 if (entry->memop_backward == NULL)
1269                         entry->memop_backward = op;
1270         } else {
1271                 switch (get_irn_opcode(irn)) {
1272                 case iro_Load:
1273                         update_Load_memop(op);
1274                         break;
1275                 case iro_Store:
1276                         update_Store_memop(op);
1277                         break;
1278                 case iro_Call:
1279                         update_Call_memop(op);
1280                         break;
1281                 case iro_Sync:
1282                 case iro_Pin:
1283                         op->mem = irn;
1284                         break;
1285                 case iro_Proj:
1286                         /* initial memory */
1287                         op->mem = irn;
1288                         break;
1289                 case iro_Return:
1290                 case iro_End:
1291                         /* we can those to find the memory edge */
1292                         break;
1293                 case iro_Div:
1294                 case iro_Mod:
1295                         update_DivOp_memop(op);
1296                         break;
1297
1298                 case iro_Builtin:
1299                         /* TODO: handle some builtins */
1300                 default:
1301                         /* unsupported operation */
1302                         op->flags = FLAG_KILL_ALL;
1303                 }
1304
1305
1306                 /* all other should be placed last */
1307                 if (entry->memop_backward == NULL) {
1308                         entry->memop_forward = entry->memop_backward = op;
1309                 } else {
1310                         entry->memop_backward->next = op;
1311                         entry->memop_backward       = op;
1312                 }
1313         }
1314 }  /* collect_memops */
1315
1316 /**
1317  * Find an address in the current set.
1318  *
1319  * @param value  the value to be searched for
1320  *
1321  * @return a memop for the value or NULL if the value does
1322  *         not exists in the set or cannot be converted into
1323  *         the requested mode
1324  */
1325 static memop_t *find_address(const value_t *value)
1326 {
1327         if (rbitset_is_set(env.curr_set, value->id)) {
1328                 memop_t *res = env.curr_id_2_memop[value->id];
1329
1330                 if (res->value.mode == value->mode)
1331                         return res;
1332                 /* allow hidden casts */
1333                 if (get_mode_arithmetic(res->value.mode) == irma_twos_complement &&
1334                     get_mode_arithmetic(value->mode) == irma_twos_complement &&
1335                     get_mode_size_bits(res->value.mode) == get_mode_size_bits(value->mode))
1336                         return res;
1337         }
1338         return NULL;
1339 }  /* find_address */
1340
1341 /**
1342  * Find an address in the avail_out set.
1343  *
1344  * @param bl     the block
1345  */
1346 static memop_t *find_address_avail(const block_t *bl, unsigned id, const ir_mode *mode)
1347 {
1348         if (rbitset_is_set(bl->avail_out, id)) {
1349                 memop_t *res = bl->id_2_memop_avail[id];
1350
1351                 if (res->value.mode == mode)
1352                         return res;
1353                 /* allow hidden casts */
1354                 if (get_mode_arithmetic(res->value.mode) == irma_twos_complement &&
1355                     get_mode_arithmetic(mode) == irma_twos_complement &&
1356                     get_mode_size_bits(res->value.mode) == get_mode_size_bits(mode))
1357                         return res;
1358         }
1359         return NULL;
1360 }  /* find_address_avail */
1361
1362 /**
1363  * Kill all addresses from the current set.
1364  */
1365 static void kill_all(void)
1366 {
1367         rbitset_clear_all(env.curr_set, env.rbs_size);
1368
1369         /* set sentinel */
1370         rbitset_set(env.curr_set, env.rbs_size - 1);
1371 }  /* kill_all */
1372
1373 /**
1374  * Kill memops that are not alias free due to a Store value from the current set.
1375  *
1376  * @param value  the Store value
1377  */
1378 static void kill_memops(const value_t *value)
1379 {
1380         size_t end = env.rbs_size - 1;
1381         size_t pos;
1382
1383         for (pos = rbitset_next(env.curr_set, 0, 1); pos < end; pos = rbitset_next(env.curr_set, pos + 1, 1)) {
1384                 memop_t *op = env.curr_id_2_memop[pos];
1385
1386                 if (ir_no_alias != get_alias_relation(value->address, value->mode,
1387                                                           op->value.address, op->value.mode)) {
1388                         rbitset_clear(env.curr_set, pos);
1389                         env.curr_id_2_memop[pos] = NULL;
1390                         DB((dbg, LEVEL_2, "KILLING %+F because of possible alias address %+F\n", op->node, value->address));
1391                 }
1392         }
1393 }  /* kill_memops */
1394
1395 /**
1396  * Add the value of a memop to the current set.
1397  *
1398  * @param op  the memory op
1399  */
1400 static void add_memop(memop_t *op)
1401 {
1402         rbitset_set(env.curr_set, op->value.id);
1403         env.curr_id_2_memop[op->value.id] = op;
1404 }  /* add_memop */
1405
1406 /**
1407  * Add the value of a memop to the avail_out set.
1408  *
1409  * @param bl  the block
1410  * @param op  the memory op
1411  */
1412 static void add_memop_avail(block_t *bl, memop_t *op)
1413 {
1414         rbitset_set(bl->avail_out, op->value.id);
1415         bl->id_2_memop_avail[op->value.id] = op;
1416 }  /* add_memop_avail */
1417
1418 /**
1419  * Check, if we can convert a value of one mode to another mode
1420  * without changing the representation of bits.
1421  *
1422  * @param from  the original mode
1423  * @param to    the destination mode
1424  */
1425 static int can_convert_to(const ir_mode *from, const ir_mode *to)
1426 {
1427         if (get_mode_arithmetic(from) == irma_twos_complement &&
1428             get_mode_arithmetic(to) == irma_twos_complement &&
1429             get_mode_size_bits(from) == get_mode_size_bits(to))
1430                 return 1;
1431         return 0;
1432 }  /* can_convert_to */
1433
1434 /**
1435  * Add a Conv to the requested mode if needed.
1436  *
1437  * @param irn   the IR-node to convert
1438  * @param mode  the destination mode
1439  *
1440  * @return the possible converted node or NULL
1441  *         if the conversion is not possible
1442  */
1443 static ir_node *conv_to(ir_node *irn, ir_mode *mode)
1444 {
1445         ir_mode *other = get_irn_mode(irn);
1446         if (other != mode) {
1447                 /* different modes: check if conversion is possible without changing the bits */
1448                 if (can_convert_to(other, mode)) {
1449                         ir_node *block = get_nodes_block(irn);
1450                         return new_r_Conv(block, irn, mode);
1451                 }
1452                 /* otherwise not possible ... yet */
1453                 return NULL;
1454         }
1455         return irn;
1456 }  /* conv_to */
1457
1458 /**
1459  * Update the address of an value if this address was a load result
1460  * and the load is killed now.
1461  *
1462  * @param value  the value whose address is updated
1463  */
1464 static void update_address(value_t *value)
1465 {
1466         if (is_Proj(value->address)) {
1467                 ir_node *load = get_Proj_pred(value->address);
1468
1469                 if (is_Load(load)) {
1470                         const memop_t *op = get_irn_memop(load);
1471
1472                         if (op->flags & FLAG_KILLED_NODE)
1473                                 value->address = op->replace;
1474                 }
1475         }
1476 }  /* update_address */
1477
1478 /**
1479  * Do forward dataflow analysis on the given block and calculate the
1480  * GEN and KILL in the current (avail) set.
1481  *
1482  * @param bl  the block
1483  */
1484 static void calc_gen_kill_avail(block_t *bl)
1485 {
1486         memop_t *op;
1487         ir_node *def;
1488
1489         for (op = bl->memop_forward; op != NULL; op = op->next) {
1490                 switch (get_irn_opcode(op->node)) {
1491                 case iro_Phi:
1492                         /* meet */
1493                         break;
1494                 case iro_Sync:
1495                         /* join */
1496                         break;
1497                 case iro_Load:
1498                         if (! (op->flags & (FLAG_KILLED_NODE|FLAG_IGNORE))) {
1499                                 /* do we have this already? */
1500                                 memop_t *other;
1501
1502                                 update_address(&op->value);
1503                                 other = find_address(&op->value);
1504                                 if (other != NULL && other != op) {
1505                                         def = conv_to(other->value.value, op->value.mode);
1506                                         if (def != NULL) {
1507 #ifdef DEBUG_libfirm
1508                                                 if (is_Store(other->node)) {
1509                                                         /* RAW */
1510                                                         DB((dbg, LEVEL_1, "RAW %+F <- %+F(%+F)\n", op->node, def, other->node));
1511                                                 } else {
1512                                                         /* RAR */
1513                                                         DB((dbg, LEVEL_1, "RAR %+F <- %+F(%+F)\n", op->node, def, other->node));
1514                                                 }
1515 #endif
1516                                                 mark_replace_load(op, def);
1517                                                 /* do NOT change the memop table */
1518                                                 continue;
1519                                         }
1520                                 }
1521                                 /* add this value */
1522                                 add_memop(op);
1523                         }
1524                         break;
1525                 case iro_Store:
1526                         if (! (op->flags & FLAG_KILLED_NODE)) {
1527                                 /* do we have this store already */
1528                                 memop_t *other;
1529
1530                                 update_address(&op->value);
1531                                 other = find_address(&op->value);
1532                                 if (other != NULL) {
1533                                         if (is_Store(other->node)) {
1534                                                 if (op != other && !(other->flags & FLAG_IGNORE) &&
1535                                                     get_nodes_block(other->node) == get_nodes_block(op->node)) {
1536                                                         /*
1537                                                          * A WAW in the same block we can kick the first store.
1538                                                          * This is a shortcut: we know that the second Store will be anticipated
1539                                                          * then in an case.
1540                                                          */
1541                                                         DB((dbg, LEVEL_1, "WAW %+F <- %+F\n", other->node, op->node));
1542                                                         mark_remove_store(other);
1543                                                         /* FIXME: a Load might be get freed due to this killed store */
1544                                                 }
1545                                         } else if (other->value.value == op->value.value && !(op->flags & FLAG_IGNORE)) {
1546                                                 /* WAR */
1547                                                 DB((dbg, LEVEL_1, "WAR %+F <- %+F\n", op->node, other->node));
1548                                                 mark_remove_store(op);
1549                                                 /* do NOT change the memop table */
1550                                                 continue;
1551                                         }
1552                                 }
1553                                 /* KILL all possible aliases */
1554                                 kill_memops(&op->value);
1555                                 /* add this value */
1556                                 add_memop(op);
1557                         }
1558                         break;
1559                 default:
1560                         if (op->flags & FLAG_KILL_ALL)
1561                                 kill_all();
1562                 }
1563         }
1564 }  /* calc_gen_kill_avail */
1565
1566 #define BYTE_SIZE(x)  (((x) + 7) >> 3)
1567
1568 /**
1569  * Do forward dataflow analysis on a given block to calculate the avail_out set
1570  * for this block only.
1571  *
1572  * @param block  the block
1573  */
1574 static void forward_avail(block_t *bl)
1575 {
1576         /* fill the data from the current block */
1577         env.curr_id_2_memop = bl->id_2_memop_avail;
1578         env.curr_set        = bl->avail_out;
1579
1580         calc_gen_kill_avail(bl);
1581         dump_curr(bl, "Avail_out");
1582 }  /* forward_avail */
1583
1584 /**
1585  * Do backward dataflow analysis on a given block to calculate the antic set
1586  * of Loaded addresses.
1587  *
1588  * @param bl  the block
1589  *
1590  * @return non-zero if the set has changed since last iteration
1591  */
1592 static int backward_antic(block_t *bl)
1593 {
1594         memop_t *op;
1595         ir_node *block = bl->block;
1596         int     n = get_Block_n_cfg_outs(block);
1597
1598         if (n == 1) {
1599                 ir_node  *succ    = get_Block_cfg_out(block, 0);
1600                 block_t  *succ_bl = get_block_entry(succ);
1601                 int      pred_pos = get_Block_cfgpred_pos(succ, block);
1602                 size_t   end      = env.rbs_size - 1;
1603                 size_t   pos;
1604
1605                 kill_all();
1606
1607                 if (bl->trans_results == NULL) {
1608                         /* allocate the translate cache */
1609                         bl->trans_results = OALLOCNZ(&env.obst, memop_t*, env.curr_adr_id);
1610                 }
1611
1612                 /* check for partly redundant values */
1613                 for (pos = rbitset_next(succ_bl->anticL_in, 0, 1);
1614                      pos < end;
1615                      pos = rbitset_next(succ_bl->anticL_in, pos + 1, 1)) {
1616                         /*
1617                          * do Phi-translation here: Note that at this point the nodes are
1618                          * not changed, so we can safely cache the results.
1619                          * However: Loads of Load results ARE bad, because we have no way
1620                           to translate them yet ...
1621                          */
1622                         memop_t *op = bl->trans_results[pos];
1623                         if (op == NULL) {
1624                                 /* not yet translated */
1625                                 ir_node *adr, *trans_adr;
1626
1627                                 op  = succ_bl->id_2_memop_antic[pos];
1628                                 adr = op->value.address;
1629
1630                                 trans_adr = phi_translate(adr, succ, pred_pos);
1631                                 if (trans_adr != adr) {
1632                                         /* create a new entry for the translated one */
1633                                         memop_t *new_op;
1634
1635                                         new_op = alloc_memop(NULL);
1636                                         new_op->value.address = trans_adr;
1637                                         new_op->value.id      = register_address(trans_adr);
1638                                         new_op->value.mode    = op->value.mode;
1639                                         new_op->node          = op->node; /* we need the node to decide if Load/Store */
1640                                         new_op->flags         = op->flags;
1641
1642                                         bl->trans_results[pos] = new_op;
1643                                         op = new_op;
1644                                 }
1645                         }
1646                         env.curr_id_2_memop[op->value.id] = op;
1647                         rbitset_set(env.curr_set, op->value.id);
1648                 }
1649         } else if (n > 1) {
1650                 ir_node *succ    = get_Block_cfg_out(block, 0);
1651                 block_t *succ_bl = get_block_entry(succ);
1652                 int i;
1653
1654                 rbitset_copy(env.curr_set, succ_bl->anticL_in, env.rbs_size);
1655                 memcpy(env.curr_id_2_memop, succ_bl->id_2_memop_antic, env.rbs_size * sizeof(env.curr_id_2_memop[0]));
1656
1657                 /* Hmm: probably we want kill merges of Loads ans Stores here */
1658                 for (i = n - 1; i > 0; --i) {
1659                         ir_node *succ    = get_Block_cfg_out(bl->block, i);
1660                         block_t *succ_bl = get_block_entry(succ);
1661
1662                         rbitset_and(env.curr_set, succ_bl->anticL_in, env.rbs_size);
1663                 }
1664         } else {
1665                 /* block ends with a noreturn call */
1666                 kill_all();
1667         }
1668
1669         dump_curr(bl, "AnticL_out");
1670
1671         for (op = bl->memop_backward; op != NULL; op = op->prev) {
1672                 switch (get_irn_opcode(op->node)) {
1673                 case iro_Phi:
1674                         /* meet */
1675                         break;
1676                 case iro_Sync:
1677                         /* join */
1678                         break;
1679                 case iro_Load:
1680                         if (! (op->flags & (FLAG_KILLED_NODE|FLAG_IGNORE))) {
1681                                 /* always add it */
1682                                 add_memop(op);
1683                         }
1684                         break;
1685                 case iro_Store:
1686                         if (! (op->flags & FLAG_KILLED_NODE)) {
1687                                 /* a Store: check which memops must be killed */
1688                                 kill_memops(&op->value);
1689                         }
1690                         break;
1691                 default:
1692                         if (op->flags & FLAG_KILL_ALL)
1693                                 kill_all();
1694                 }
1695         }
1696
1697         memcpy(bl->id_2_memop_antic, env.curr_id_2_memop, env.rbs_size * sizeof(env.curr_id_2_memop[0]));
1698         if (! rbitsets_equal(bl->anticL_in, env.curr_set, env.rbs_size)) {
1699                 /* changed */
1700                 rbitset_copy(bl->anticL_in, env.curr_set, env.rbs_size);
1701                 dump_curr(bl, "AnticL_in*");
1702                 return 1;
1703         }
1704         dump_curr(bl, "AnticL_in");
1705         return 0;
1706 }  /* backward_antic */
1707
1708 /**
1709  * Replace a Load memop by a already known value.
1710  *
1711  * @param op  the Load memop
1712  */
1713 static void replace_load(memop_t *op)
1714 {
1715         ir_node *load = op->node;
1716         ir_node *def  = skip_Id(op->replace);
1717         ir_node *proj;
1718         ir_mode *mode;
1719
1720         if (def != NULL)
1721                 DB((dbg, LEVEL_1, "Replacing %+F by definition %+F\n", load, is_Proj(def) ? get_Proj_pred(def) : def));
1722         else {
1723                 if (op->flags & FLAG_EXCEPTION) {
1724                         /* bad: this node is unused and executed for exception only */
1725                         DB((dbg, LEVEL_1, "Unused %+F executed for exception only ...\n", load));
1726                         return;
1727                 }
1728                 DB((dbg, LEVEL_1, "Killing unused %+F\n", load));
1729         }
1730
1731         if (op->mem != NULL) {
1732                 /* in rare cases a Load might have NO memory */
1733                 exchange(op->mem, get_Load_mem(load));
1734         }
1735         proj = op->projs[pn_Load_res];
1736         if (proj != NULL) {
1737                 mode = get_irn_mode(proj);
1738                 if (get_irn_mode(def) != mode) {
1739                         /* a hidden cast */
1740                         dbg_info *db    = get_irn_dbg_info(load);
1741                         ir_node  *block = get_nodes_block(proj);
1742                         def = new_rd_Conv(db, block, def, mode);
1743                 }
1744                 exchange(proj, def);
1745         }
1746         proj = op->projs[pn_Load_X_except];
1747         if (proj != NULL) {
1748                 ir_graph *irg = get_irn_irg(load);
1749                 exchange(proj, new_r_Bad(irg));
1750         }
1751         proj = op->projs[pn_Load_X_regular];
1752         if (proj != NULL) {
1753                 exchange(proj, new_r_Jmp(get_nodes_block(load)));
1754         }
1755 }  /* replace_load */
1756
1757 /**
1758  * Remove a Store memop.
1759  *
1760  * @param op  the Store memop
1761  */
1762 static void remove_store(memop_t *op)
1763 {
1764         ir_node *store = op->node;
1765         ir_node *proj;
1766
1767         DB((dbg, LEVEL_1, "Removing %+F\n", store));
1768
1769         if (op->mem != NULL) {
1770                 /* in rare cases a Store might have no memory */
1771                 exchange(op->mem, get_Store_mem(store));
1772         }
1773         proj = op->projs[pn_Store_X_except];
1774         if (proj != NULL) {
1775                 ir_graph *irg = get_irn_irg(store);
1776                 exchange(proj, new_r_Bad(irg));
1777         }
1778         proj = op->projs[pn_Store_X_regular];
1779         if (proj != NULL) {
1780                 exchange(proj, new_r_Jmp(get_nodes_block(store)));
1781         }
1782 }  /* remove_store */
1783
1784
1785 /**
1786  * Do all necessary replacements for a given block.
1787  *
1788  * @param bl  the block
1789  */
1790 static void do_replacements(block_t *bl)
1791 {
1792         memop_t *op;
1793
1794         for (op = bl->memop_forward; op != NULL; op = op->next) {
1795                 if (op->flags & FLAG_KILLED_NODE) {
1796                         switch (get_irn_opcode(op->node)) {
1797                         case iro_Load:
1798                                 replace_load(op);
1799                                 break;
1800                         case iro_Store:
1801                                 remove_store(op);
1802                                 break;
1803                         }
1804                 }
1805         }
1806 }  /* do_replacements */
1807
1808 /**
1809  * Calculate the Avail_out sets for all basic blocks.
1810  */
1811 static void calcAvail(void)
1812 {
1813         memop_t  **tmp_memop = env.curr_id_2_memop;
1814         unsigned *tmp_set    = env.curr_set;
1815         block_t  *bl;
1816
1817         /* calculate avail_out */
1818         DB((dbg, LEVEL_2, "Calculate Avail_out\n"));
1819
1820         /* iterate over all blocks in in any order, skip the start block */
1821         for (bl = env.forward->forward_next; bl != NULL; bl = bl->forward_next) {
1822                 forward_avail(bl);
1823         }
1824
1825         /* restore the current sets */
1826         env.curr_id_2_memop = tmp_memop;
1827         env.curr_set        = tmp_set;
1828 }  /* calcAvail */
1829
1830 /**
1831  * Calculate the Antic_in sets for all basic blocks.
1832  */
1833 static void calcAntic(void)
1834 {
1835         int i, need_iter;
1836
1837         /* calculate antic_out */
1838         DB((dbg, LEVEL_2, "Calculate Antic_in\n"));
1839         i = 0;
1840         do {
1841                 block_t *bl;
1842
1843                 DB((dbg, LEVEL_2, "Iteration %d:\n=========\n", i));
1844
1845                 need_iter = 0;
1846
1847                 /* over all blocks in reverse post order */
1848                 for (bl = env.backward->backward_next; bl != NULL; bl = bl->backward_next) {
1849                         need_iter |= backward_antic(bl);
1850                 }
1851                 ++i;
1852         } while (need_iter);
1853         DB((dbg, LEVEL_2, "Get anticipated Load set after %d iterations\n", i));
1854 }  /* calcAntic */
1855
1856 /**
1857  * Return the node representing the last memory in a block.
1858  *
1859  * @param bl  the block
1860  */
1861 static ir_node *find_last_memory(block_t *bl)
1862 {
1863         for (;;) {
1864                 if (bl->memop_backward != NULL) {
1865                         return bl->memop_backward->mem;
1866                 }
1867                 /* if there is NO memory in this block, go to the dominator */
1868                 bl = get_block_entry(get_Block_idom(bl->block));
1869         }
1870 }  /* find_last_memory */
1871
1872 /**
1873  * Reroute all memory users of old memory
1874  * to a new memory IR-node.
1875  *
1876  * @param omem  the old memory IR-node
1877  * @param nmem  the new memory IR-node
1878  */
1879 static void reroute_all_mem_users(ir_node *omem, ir_node *nmem)
1880 {
1881         int i;
1882
1883         for (i = get_irn_n_outs(omem) - 1; i >= 0; --i) {
1884                 int     n_pos;
1885                 ir_node *user = get_irn_out_ex(omem, i, &n_pos);
1886
1887                 set_irn_n(user, n_pos, nmem);
1888         }
1889
1890         /* all edges previously point to omem now point to nmem */
1891         nmem->out = omem->out;
1892 }  /* reroute_all_mem_users */
1893
1894 /**
1895  * Reroute memory users of old memory that are dominated by a given block
1896  * to a new memory IR-node.
1897  *
1898  * @param omem     the old memory IR-node
1899  * @param nmem     the new memory IR-node
1900  * @param pass_bl  the block the memory must pass
1901  */
1902 static void reroute_mem_through(ir_node *omem, ir_node *nmem, ir_node *pass_bl)
1903 {
1904         int             i, j, n = get_irn_n_outs(omem);
1905         ir_def_use_edge *edges = NEW_ARR_D(ir_def_use_edge, &env.obst, n + 1);
1906
1907         for (i = j = 0; i < n; ++i) {
1908                 int     n_pos;
1909                 ir_node *user   = get_irn_out_ex(omem, i, &n_pos);
1910                 ir_node *use_bl = get_nodes_block(user);
1911
1912
1913                 if (is_Phi(user)) {
1914                         use_bl = get_Block_cfgpred_block(use_bl, n_pos);
1915                 }
1916                 if (block_dominates(pass_bl, use_bl)) {
1917                         /* found an user that is dominated */
1918                         ++j;
1919                         edges[j].pos = n_pos;
1920                         edges[j].use = user;
1921
1922                         set_irn_n(user, n_pos, nmem);
1923                 }
1924         }
1925
1926         /* Modify the out structure: we create a new out edge array on our
1927            temporary obstack here. This should be no problem, as we invalidate the edges
1928            at the end either. */
1929         /* first entry is used for the length */
1930         edges[0].pos = j;
1931         nmem->out = edges;
1932 }  /* reroute_mem_through */
1933
1934 /**
1935  * insert Loads, making partly redundant Loads fully redundant
1936  */
1937 static int insert_Load(block_t *bl)
1938 {
1939         ir_node  *block = bl->block;
1940         int      i, n = get_Block_n_cfgpreds(block);
1941         size_t   end = env.rbs_size - 1;
1942
1943         DB((dbg, LEVEL_3, "processing %+F\n", block));
1944
1945         if (n == 0) {
1946                 /* might still happen for an unreachable block (end for instance) */
1947                 return 0;
1948         }
1949
1950         if (n > 1) {
1951                 ir_node **ins;
1952                 size_t    pos;
1953
1954                 NEW_ARR_A(ir_node *, ins, n);
1955
1956                 rbitset_set_all(env.curr_set, env.rbs_size);
1957
1958                 /* More than one predecessors, calculate the join for all avail_outs ignoring unevaluated
1959                    Blocks. These put in Top anyway. */
1960                 for (i = n - 1; i >= 0; --i) {
1961                         ir_node *pred = skip_Proj(get_Block_cfgpred(block, i));
1962                         ir_node *blk  = get_nodes_block(pred);
1963                         block_t *pred_bl;
1964
1965                         pred_bl = get_block_entry(blk);
1966                         rbitset_and(env.curr_set, pred_bl->avail_out, env.rbs_size);
1967
1968                         if (is_Load(pred) || is_Store(pred)) {
1969                                 /* We reached this block by an exception from a Load or Store:
1970                                  * the memop creating the exception was NOT completed than, kill it
1971                                  */
1972                                 memop_t *exc_op = get_irn_memop(pred);
1973                                 rbitset_clear(env.curr_set, exc_op->value.id);
1974                         }
1975
1976                 }
1977                 /*
1978                  * Ensure that all values are in the map: build Phi's if necessary:
1979                  * Note: the last bit is the sentinel and ALWAYS set, so end with -2.
1980                  */
1981                 for (pos = 0; pos < env.rbs_size - 1; ++pos) {
1982                         if (! rbitset_is_set(env.curr_set, pos))
1983                                 env.curr_id_2_memop[pos] = NULL;
1984                         else {
1985                                 ir_node *pred    = get_Block_cfgpred_block(bl->block, 0);
1986                                 block_t *pred_bl = get_block_entry(pred);
1987                                 int     need_phi = 0;
1988                                 memop_t *first   = NULL;
1989                                 ir_mode *mode    = NULL;
1990
1991                                 for (i = 0; i < n; ++i) {
1992                                         memop_t *mop;
1993
1994                                         pred    = get_Block_cfgpred_block(bl->block, i);
1995                                         pred_bl = get_block_entry(pred);
1996
1997                                         mop = pred_bl->id_2_memop_avail[pos];
1998                                         if (first == NULL) {
1999                                                 first = mop;
2000                                                 ins[0] = first->value.value;
2001                                                 mode = get_irn_mode(ins[0]);
2002
2003                                                 /* no Phi needed so far */
2004                                                 env.curr_id_2_memop[pos] = first;
2005                                         } else {
2006                                                 ins[i] = conv_to(mop->value.value, mode);
2007                                                 if (ins[i] != ins[0]) {
2008                                                         if (ins[i] == NULL) {
2009                                                                 /* conversion failed */
2010                                                                 env.curr_id_2_memop[pos] = NULL;
2011                                                                 rbitset_clear(env.curr_set, pos);
2012                                                                 break;
2013                                                         }
2014                                                         need_phi = 1;
2015                                                 }
2016                                         }
2017                                 }
2018                                 if (need_phi) {
2019                                         /* build a Phi  */
2020                                         ir_node *phi = new_r_Phi(bl->block, n, ins, mode);
2021                                         memop_t *phiop = alloc_memop(phi);
2022
2023                                         phiop->value = first->value;
2024                                         phiop->value.value = phi;
2025
2026                                         /* no need to link it in, as it is a DATA phi */
2027
2028                                         env.curr_id_2_memop[pos] = phiop;
2029
2030                                         DB((dbg, LEVEL_3, "Created new %+F on merging value for address %+F\n", phi, first->value.address));
2031                                 }
2032                         }
2033                 }
2034         } else {
2035                 /* only one predecessor, simply copy the map */
2036                 ir_node *pred    = get_Block_cfgpred_block(bl->block, 0);
2037                 block_t *pred_bl = get_block_entry(pred);
2038
2039                 rbitset_copy(env.curr_set, pred_bl->avail_out, env.rbs_size);
2040
2041                 memcpy(env.curr_id_2_memop, pred_bl->id_2_memop_avail, env.rbs_size * sizeof(bl->id_2_memop_avail[0]));
2042         }
2043
2044         if (n > 1) {
2045                 size_t pos;
2046
2047                 /* check for partly redundant values */
2048                 for (pos = rbitset_next(bl->anticL_in, 0, 1);
2049                      pos < end;
2050                      pos = rbitset_next(bl->anticL_in, pos + 1, 1)) {
2051                         memop_t *op = bl->id_2_memop_antic[pos];
2052                         int     have_some, all_same;
2053                         ir_node *first;
2054
2055                         if (rbitset_is_set(env.curr_set, pos)) {
2056                                 /* already avail */
2057                                 continue;
2058                         }
2059
2060                         assert(is_Load(op->node));
2061
2062                         DB((dbg, LEVEL_3, "anticipated %+F\n", op->node));
2063
2064                         have_some  = 0;
2065                         all_same   = 1;
2066                         first      = 0;
2067                         for (i = n - 1; i >= 0; --i) {
2068                                 ir_node *pred    = get_Block_cfgpred_block(block, i);
2069                                 block_t *pred_bl = get_block_entry(pred);
2070                                 ir_mode *mode    = op->value.mode;
2071                                 memop_t *e;
2072                                 ir_node *adr;
2073
2074                                 adr = phi_translate(op->value.address, block, i);
2075                                 DB((dbg, LEVEL_3, ".. using address %+F in pred %d\n", adr, i));
2076                                 e   = find_address_avail(pred_bl, register_address(adr), mode);
2077                                 if (e == NULL) {
2078                                         ir_node *ef_block = get_nodes_block(adr);
2079                                         if (! block_dominates(ef_block, pred)) {
2080                                                 /* cannot place a copy here */
2081                                                 have_some = 0;
2082                                                 DB((dbg, LEVEL_3, "%+F cannot be moved into predecessor %+F\n", op->node, pred));
2083                                                 break;
2084                                         }
2085                                         DB((dbg, LEVEL_3, "%+F is not available in predecessor %+F\n", op->node, pred));
2086                                         pred_bl->avail = NULL;
2087                                         all_same       = 0;
2088                                 } else {
2089                                         if (e->value.mode != mode && !can_convert_to(e->value.mode, mode)) {
2090                                                 /* cannot create a Phi due to different modes */
2091                                                 have_some = 0;
2092                                                 break;
2093                                         }
2094                                         pred_bl->avail = e;
2095                                         have_some      = 1;
2096                                         DB((dbg, LEVEL_3, "%+F is available for %+F in predecessor %+F\n", e->node, op->node, pred));
2097                                         if (first == NULL)
2098                                                 first = e->node;
2099                                         else if (first != e->node)
2100                                                 all_same = 0;
2101                                 }
2102                         }
2103                         if (have_some && !all_same) {
2104                                 ir_mode *mode = op->value.mode;
2105                                 ir_node **in, *phi;
2106                                 memop_t *phi_op;
2107
2108                                 NEW_ARR_A(ir_node *, in, n);
2109
2110                                 for (i = n - 1; i >= 0; --i) {
2111                                         ir_node *pred    = get_Block_cfgpred_block(block, i);
2112                                         block_t *pred_bl = get_block_entry(pred);
2113
2114                                         if (pred_bl->avail == NULL) {
2115                                                 /* create a new Load here and make to make it fully redundant */
2116                                                 dbg_info *db       = get_irn_dbg_info(op->node);
2117                                                 ir_node  *last_mem = find_last_memory(pred_bl);
2118                                                 ir_node  *load, *def, *adr;
2119                                                 memop_t  *new_op;
2120
2121                                                 assert(last_mem != NULL);
2122                                                 adr  = phi_translate(op->value.address, block, i);
2123                                                 load = new_rd_Load(db, pred, last_mem, adr, mode, cons_none);
2124                                                 def  = new_r_Proj(load, mode, pn_Load_res);
2125                                                 DB((dbg, LEVEL_1, "Created new %+F in %+F for party redundant %+F\n", load, pred, op->node));
2126
2127                                                 new_op                = alloc_memop(load);
2128                                                 new_op->mem           = new_r_Proj(load, mode_M, pn_Load_M);
2129                                                 new_op->value.address = adr;
2130                                                 new_op->value.id      = op->value.id;
2131                                                 new_op->value.mode    = mode;
2132                                                 new_op->value.value   = def;
2133
2134                                                 new_op->projs[pn_Load_M]   = new_op->mem;
2135                                                 new_op->projs[pn_Load_res] = def;
2136
2137                                                 new_op->prev = pred_bl->memop_backward;
2138                                                 if (pred_bl->memop_backward != NULL)
2139                                                         pred_bl->memop_backward->next = new_op;
2140
2141                                                 pred_bl->memop_backward = new_op;
2142
2143                                                 if (pred_bl->memop_forward == NULL)
2144                                                         pred_bl->memop_forward = new_op;
2145
2146                                                 if (get_nodes_block(last_mem) == pred) {
2147                                                         /* We have add a new last memory op in pred block.
2148                                                            If pred had already a last mem, reroute all memory
2149                                                            users. */
2150                                                         reroute_all_mem_users(last_mem, new_op->mem);
2151                                                 } else {
2152                                                         /* reroute only those memory going through the pre block */
2153                                                         reroute_mem_through(last_mem, new_op->mem, pred);
2154                                                 }
2155
2156                                                 /* we added this load at the end, so it will be avail anyway */
2157                                                 add_memop_avail(pred_bl, new_op);
2158                                                 pred_bl->avail = new_op;
2159                                         }
2160                                         in[i] = conv_to(pred_bl->avail->value.value, mode);
2161                                 }
2162                                 phi = new_r_Phi(block, n, in, mode);
2163                                 DB((dbg, LEVEL_1, "Created new %+F in %+F for now redundant %+F\n", phi, block, op->node));
2164
2165                                 phi_op = clone_memop_phi(op, phi);
2166                                 add_memop(phi_op);
2167                         }
2168                 }
2169         }
2170
2171         /* recalculate avail by gen and kill */
2172         calc_gen_kill_avail(bl);
2173
2174         /* always update the map after gen/kill, as values might have been changed due to RAR/WAR/WAW */
2175         memcpy(bl->id_2_memop_avail, env.curr_id_2_memop, env.rbs_size * sizeof(env.curr_id_2_memop[0]));
2176
2177         if (!rbitsets_equal(bl->avail_out, env.curr_set, env.rbs_size)) {
2178                 /* the avail set has changed */
2179                 rbitset_copy(bl->avail_out, env.curr_set, env.rbs_size);
2180                 dump_curr(bl, "Avail_out*");
2181                 return 1;
2182         }
2183         dump_curr(bl, "Avail_out");
2184         return 0;
2185 }  /* insert_Load */
2186
2187 /**
2188  * Insert Loads upwards.
2189  */
2190 static void insert_Loads_upwards(void)
2191 {
2192         int i, need_iter;
2193         block_t *bl;
2194
2195         /* recalculate antic_out and insert Loads */
2196         DB((dbg, LEVEL_2, "Inserting Loads\n"));
2197
2198         i = 0;
2199         do {
2200                 DB((dbg, LEVEL_2, "Iteration %d:\n=========\n", i));
2201
2202                 need_iter = 0;
2203
2204                 /* over all blocks in reverse post order, skip the start block */
2205                 for (bl = env.forward->forward_next; bl != NULL; bl = bl->forward_next) {
2206                         need_iter |= insert_Load(bl);
2207                 }
2208                 ++i;
2209         } while (need_iter);
2210
2211         DB((dbg, LEVEL_2, "Finished Load inserting after %d iterations\n", i));
2212 }  /* insert_Loads_upwards */
2213
2214 /**
2215  * Kill unreachable control flow.
2216  *
2217  * @param irg  the graph to operate on
2218  */
2219 static void kill_unreachable_blocks(ir_graph *irg)
2220 {
2221         block_t *bl;
2222         ir_node **ins;
2223         int     changed = 0;
2224
2225         NEW_ARR_A(ir_node *, ins, env.max_cfg_preds);
2226
2227         for (bl = env.forward; bl != NULL; bl = bl->forward_next) {
2228                 ir_node *block = bl->block;
2229                 int     i, j, k, n;
2230
2231                 assert(get_Block_mark(block));
2232
2233                 n = get_Block_n_cfgpreds(block);
2234
2235                 for (i = j = 0; i < n; ++i) {
2236                         ir_node *pred = get_Block_cfgpred(block, i);
2237                         ir_node *pred_bl;
2238
2239                         if (is_Bad(pred))
2240                                 continue;
2241
2242                         pred_bl = get_nodes_block(skip_Proj(pred));
2243                         if (! get_Block_mark(pred_bl))
2244                                 continue;
2245
2246                         ins[j++] = pred;
2247                 }
2248                 if (j != n) {
2249                         ir_node *phi, *next;
2250
2251                         /* some unreachable blocks detected */
2252                         changed = 1;
2253
2254                         DB((dbg, LEVEL_1, "Killing dead block predecessors on %+F\n", block));
2255
2256                         set_irn_in(block, j, ins);
2257
2258                         /* shorten all Phi nodes */
2259                         for (phi = get_Block_phis(block); phi != NULL; phi = next) {
2260                                 next = get_Phi_next(phi);
2261
2262                                 for (i = k = 0; i < n; ++i) {
2263                                         ir_node *pred = get_Block_cfgpred_block(block, i);
2264
2265                                         if (is_Bad(pred))
2266                                                 continue;
2267
2268                                         if (! get_Block_mark(pred))
2269                                                 continue;
2270
2271                                         ins[k++] = get_Phi_pred(phi, i);
2272                                 }
2273                                 if (k == 1)
2274                                         exchange(phi, ins[0]);
2275                                 else
2276                                         set_irn_in(phi, k, ins);
2277                         }
2278                 }
2279
2280         }
2281
2282         if (changed) {
2283                 /* kick keep alives */
2284                 ir_node *end = get_irg_end(irg);
2285                 int     i, j, n = get_End_n_keepalives(end);
2286
2287                 NEW_ARR_A(ir_node *, ins, n);
2288
2289                 for (i = j = 0; i < n; ++i) {
2290                         ir_node *ka = get_End_keepalive(end, i);
2291                         ir_node *ka_bl;
2292
2293                         if (is_Bad(ka))
2294                                 continue;
2295                         if (is_Block(ka))
2296                                 ka_bl = ka;
2297                         else
2298                                 ka_bl = get_nodes_block(skip_Proj(ka));
2299                         if (get_Block_mark(ka_bl))
2300                                 ins[j++] = ka;
2301                 }
2302                 if (j != n)
2303                         set_End_keepalives(end, j, ins);
2304
2305                 free_irg_outs(irg);
2306
2307                 /* this transformation do NOT invalidate the dominance */
2308         }
2309 }  /* kill_unreachable_blocks */
2310
2311 int opt_ldst(ir_graph *irg)
2312 {
2313         block_t *bl;
2314
2315         FIRM_DBG_REGISTER(dbg, "firm.opt.ldst");
2316
2317         DB((dbg, LEVEL_1, "\nDoing Load/Store optimization on %+F\n", irg));
2318
2319         /* we need landing pads */
2320         remove_critical_cf_edges(irg);
2321
2322         if (get_opt_alias_analysis()) {
2323                 assure_irg_entity_usage_computed(irg);
2324                 assure_irp_globals_entity_usage_computed();
2325         }
2326
2327         obstack_init(&env.obst);
2328         ir_nodemap_init(&env.adr_map);
2329
2330         env.forward       = NULL;
2331         env.backward      = NULL;
2332         env.curr_adr_id   = 0;
2333         env.n_mem_ops     = 0;
2334         env.max_cfg_preds = 0;
2335         env.changed       = 0;
2336         env.start_bl      = get_irg_start_block(irg);
2337         env.end_bl        = get_irg_end_block(irg);
2338 #ifdef DEBUG_libfirm
2339         env.id_2_address  = NEW_ARR_F(ir_node *, 0);
2340 #endif
2341
2342         assure_irg_outs(irg);
2343
2344         ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK | IR_RESOURCE_BLOCK_MARK);
2345
2346         /* first step: allocate block entries. Note that some blocks might be
2347            unreachable here. Using the normal walk ensures that ALL blocks are initialized. */
2348         irg_walk_graph(irg, prepare_blocks, link_phis, NULL);
2349
2350         /* produce an inverse post-order list for the CFG: this links only reachable
2351            blocks */
2352         irg_out_block_walk(get_irg_start_block(irg), NULL, inverse_post_order, NULL);
2353
2354         if (! get_Block_mark(env.end_bl)) {
2355                 /*
2356                  * The end block is NOT reachable due to endless loops
2357                  * or no_return calls.
2358                  * Place the end block last.
2359                  * env.backward points to the last block in the list for this purpose.
2360                  */
2361                 env.backward->forward_next = get_block_entry(env.end_bl);
2362
2363                 set_Block_mark(env.end_bl, 1);
2364         }
2365
2366         /* KILL unreachable blocks: these disturb the data flow analysis */
2367         kill_unreachable_blocks(irg);
2368
2369         assure_doms(irg);
2370
2371         /* second step: find and sort all memory ops */
2372         walk_memory_irg(irg, collect_memops, NULL, NULL);
2373
2374 #ifdef DEBUG_libfirm
2375         /* check that the backward map is correct */
2376         assert((unsigned)ARR_LEN(env.id_2_address) == env.curr_adr_id);
2377 #endif
2378
2379         if (env.n_mem_ops == 0) {
2380                 /* no memory ops */
2381                 goto end;
2382         }
2383
2384         /* create the backward links. */
2385         env.backward = NULL;
2386         irg_block_walk_graph(irg, NULL, collect_backward, NULL);
2387
2388         /* link the end block in */
2389         bl = get_block_entry(env.end_bl);
2390         bl->backward_next = env.backward;
2391         env.backward      = bl;
2392
2393         /* check that we really start with the start / end block */
2394         assert(env.forward->block  == env.start_bl);
2395         assert(env.backward->block == env.end_bl);
2396
2397         /* create address sets: for now, only the existing addresses are allowed plus one
2398            needed for the sentinel */
2399         env.rbs_size = env.curr_adr_id + 1;
2400
2401         /* create the current set */
2402         env.curr_set = rbitset_obstack_alloc(&env.obst, env.rbs_size);
2403         rbitset_set(env.curr_set, env.rbs_size - 1);
2404         env.curr_id_2_memop = NEW_ARR_D(memop_t *, &env.obst, env.rbs_size);
2405         memset(env.curr_id_2_memop, 0, env.rbs_size * sizeof(env.curr_id_2_memop[0]));
2406
2407         for (bl = env.forward; bl != NULL; bl = bl->forward_next) {
2408                 /* set sentinel bits */
2409                 bl->avail_out  = rbitset_obstack_alloc(&env.obst, env.rbs_size);
2410                 rbitset_set(bl->avail_out, env.rbs_size - 1);
2411
2412                 bl->id_2_memop_avail = NEW_ARR_D(memop_t *, &env.obst, env.rbs_size);
2413                 memset(bl->id_2_memop_avail, 0, env.rbs_size * sizeof(bl->id_2_memop_avail[0]));
2414
2415                 bl->anticL_in  = rbitset_obstack_alloc(&env.obst, env.rbs_size);
2416                 rbitset_set(bl->anticL_in, env.rbs_size - 1);
2417
2418                 bl->id_2_memop_antic = NEW_ARR_D(memop_t *, &env.obst, env.rbs_size);
2419                 memset(bl->id_2_memop_antic, 0, env.rbs_size * sizeof(bl->id_2_memop_antic[0]));
2420         }
2421
2422         (void) dump_block_list;
2423
2424         calcAvail();
2425         calcAntic();
2426
2427         insert_Loads_upwards();
2428
2429         if (env.changed) {
2430                 /* over all blocks in reverse post order */
2431                 for (bl = env.forward; bl != NULL; bl = bl->forward_next) {
2432                         do_replacements(bl);
2433                 }
2434
2435                 /* not only invalidate but free them. We might allocate new out arrays
2436                    on our obstack which will be deleted yet. */
2437                 free_irg_outs(irg);
2438                 set_irg_entity_usage_state(irg, ir_entity_usage_not_computed);
2439         }
2440 end:
2441
2442         ir_free_resources(irg, IR_RESOURCE_IRN_LINK | IR_RESOURCE_BLOCK_MARK);
2443         ir_nodemap_destroy(&env.adr_map);
2444         obstack_free(&env.obst, NULL);
2445
2446 #ifdef DEBUG_libfirm
2447         DEL_ARR_F(env.id_2_address);
2448 #endif
2449
2450         return env.changed != 0;
2451 }  /* opt_ldst */
2452
2453 ir_graph_pass_t *opt_ldst_pass(const char *name)
2454 {
2455         return def_graph_pass_ret(name ? name : "ldst_df", opt_ldst);
2456 }  /* opt_ldst_pass */