remove is_Global/get_GlobalEntity
[libfirm] / ir / opt / opt_ldst.c
1 /*
2  * Copyright (C) 1995-2011 University of Karlsruhe.  All right reserved.
3  *
4  * This file is part of libFirm.
5  *
6  * This file may be distributed and/or modified under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation and appearing in the file LICENSE.GPL included in the
9  * packaging of this file.
10  *
11  * Licensees holding valid libFirm Professional Edition licenses may use
12  * this file in accordance with the libFirm Commercial License.
13  * Agreement provided with the Software.
14  *
15  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17  * PURPOSE.
18  */
19
20 /**
21  * @file
22  * @brief   Dataflow driven Load/Store optimizations, uses some ideas from
23  *          VanDrunen's LEPRE
24  * @author  Michael Beck
25  * @version $Id$
26  */
27 #include "config.h"
28
29 #include "irnode_t.h"
30 #include "irflag_t.h"
31 #include "array_t.h"
32 #include "ircons.h"
33 #include "irdom.h"
34 #include "irgmod.h"
35 #include "irgwalk.h"
36 #include "irouts.h"
37 #include "irgraph.h"
38 #include "irgopt.h"
39 #include "iropt.h"
40 #include "iroptimize.h"
41 #include "irnodehashmap.h"
42 #include "raw_bitset.h"
43 #include "debug.h"
44 #include "error.h"
45 #include "irpass.h"
46
47 /* maximum number of output Proj's */
48 #define MAX_PROJ ((long)pn_Load_max > (long)pn_Store_max ? (long)pn_Load_max : (long)pn_Store_max)
49
50 /**
51  * Mapping an address to an dense ID.
52  */
53 typedef struct address_entry_t {
54         unsigned id;          /**< The ID */
55 } address_entry;
56
57 /**
58  * Memop-flags.
59  */
60 enum memop_flags {
61         FLAG_KILL_ALL    = 1, /**< KILL all addresses */
62         FLAG_KILLED_NODE = 2, /**< this node was killed */
63         FLAG_EXCEPTION   = 4, /**< this node has exception flow */
64         FLAG_IGNORE      = 8, /**< ignore this node (volatile or other) */
65 };
66
67 /**
68  * A value: This represents a value stored at a given address in
69  * memory. Do not confuse with values from value numbering.
70  */
71 typedef struct value_t value_t;
72 struct value_t {
73         ir_node  *address;    /**< the address of this value */
74         ir_node  *value;      /**< the value itself */
75         ir_mode  *mode;       /**< the mode of the value */
76         unsigned id;          /**< address id */
77 };
78
79 /**
80  * A memop describes an memory-related operation.
81  * These are Loads/Store and all other ops that might modify
82  * memory (Calls, CopyB) or causing exceptions.
83  */
84 typedef struct memop_t memop_t;
85 struct memop_t {
86         value_t  value;      /**< the value of this memop: only defined for Load/Store */
87         ir_node  *node;      /**< the memory op itself */
88         ir_node  *mem;       /**< the memory FROM this node */
89         ir_node  *replace;   /**< the replacement node if this memop is replaced */
90         memop_t  *next;      /**< links to the next memory op in the block in forward order. */
91         memop_t  *prev;      /**< links to the previous memory op in the block in forward order. */
92         unsigned flags;      /**< memop flags */
93         ir_node  *projs[MAX_PROJ+1]; /**< Projs of this memory op */
94 };
95
96 /**
97  * Additional data for every basic block.
98  */
99 typedef struct block_t block_t;
100 struct block_t {
101         memop_t  *memop_forward;     /**< topologically sorted list of memory ops in this block */
102         memop_t  *memop_backward;    /**< last memop in the list */
103         unsigned *avail_out;         /**< out-set of available addresses */
104         memop_t  **id_2_memop_avail; /**< maps avail address ids to memops */
105         unsigned *anticL_in;         /**< in-set of anticipated Load addresses */
106         memop_t  **id_2_memop_antic; /**< maps anticipated address ids to memops */
107         ir_node  *block;             /**< the associated block */
108         block_t  *forward_next;      /**< next block entry for forward iteration */
109         block_t  *backward_next;     /**< next block entry for backward iteration */
110         memop_t  *avail;             /**< used locally for the avail map */
111         memop_t  **trans_results;    /**< used to cached translated nodes due antic calculation. */
112 };
113
114 /**
115  * Metadata for this pass.
116  */
117 typedef struct ldst_env_t {
118         struct obstack   obst;             /**< obstack for temporary data */
119         ir_nodehashmap_t adr_map;          /**< Map addresses to */
120         block_t         *forward;          /**< Inverse post-order list of all blocks Start->End */
121         block_t         *backward;         /**< Inverse post-order list of all blocks End->Start */
122         ir_node         *start_bl;         /**< start block of the current graph */
123         ir_node         *end_bl;           /**< end block of the current graph */
124         unsigned        *curr_set;         /**< current set of addresses */
125         memop_t         **curr_id_2_memop; /**< current map of address ids to memops */
126         unsigned        curr_adr_id;       /**< number for address mapping */
127         unsigned        n_mem_ops;         /**< number of memory operations (Loads/Stores) */
128         size_t          rbs_size;          /**< size of all bitsets in bytes */
129         int             max_cfg_preds;     /**< maximum number of block cfg predecessors */
130         int             changed;           /**< Flags for changed graph state */
131 #ifdef DEBUG_libfirm
132         ir_node         **id_2_address;    /**< maps an id to the used address */
133 #endif
134 } ldst_env;
135
136 /* the one and only environment */
137 static ldst_env env;
138
139 #ifdef DEBUG_libfirm
140
141 static firm_dbg_module_t *dbg;
142
143 /**
144  * Dumps the block list.
145  *
146  * @param ldst environment
147  */
148 static void dump_block_list(ldst_env *env)
149 {
150         block_t *entry;
151         memop_t *op;
152         int     i;
153
154         for (entry = env->forward; entry != NULL; entry = entry->forward_next) {
155                 DB((dbg, LEVEL_2, "%+F {", entry->block));
156
157                 i = 0;
158                 for (op = entry->memop_forward; op != NULL; op = op->next) {
159                         if (i == 0) {
160                                 DB((dbg, LEVEL_2, "\n\t"));
161                         }
162                         DB((dbg, LEVEL_2, "%+F", op->node));
163                         if ((op->flags & FLAG_KILL_ALL) == FLAG_KILL_ALL)
164                                 DB((dbg, LEVEL_2, "X"));
165                         else if (op->flags & FLAG_KILL_ALL)
166                                 DB((dbg, LEVEL_2, "K"));
167                         DB((dbg, LEVEL_2, ", "));
168
169                         i = (i + 1) & 3;
170                 }
171                 DB((dbg, LEVEL_2, "\n}\n\n"));
172         }
173 }  /* dump_block_list */
174
175 /**
176  * Dumps the current set.
177  *
178  * @param bl   current block
179  * @param s    name of the set
180  */
181 static void dump_curr(block_t *bl, const char *s)
182 {
183         size_t end = env.rbs_size - 1;
184         size_t pos;
185         int    i;
186
187         DB((dbg, LEVEL_2, "%s[%+F] = {", s, bl->block));
188         i = 0;
189         for (pos = rbitset_next(env.curr_set, 0, 1); pos < end; pos = rbitset_next(env.curr_set, pos + 1, 1)) {
190                 memop_t *op = env.curr_id_2_memop[pos];
191
192                 if (i == 0) {
193                         DB((dbg, LEVEL_2, "\n\t"));
194                 }
195
196                 DB((dbg, LEVEL_2, "<%+F, %+F>, ", op->value.address, op->value.value));
197                 i = (i + 1) & 3;
198         }
199         DB((dbg, LEVEL_2, "\n}\n"));
200 }  /* dump_curr */
201
202 #else
203 static void dump_block_list(ldst_env *env)
204 {
205         (void) env;
206 }
207 static void dump_curr(block_t *bl, const char *s)
208 {
209         (void) bl;
210         (void) s;
211 }
212 #endif /* DEBUG_libfirm */
213
214 /** Get the block entry for a block node */
215 static block_t *get_block_entry(const ir_node *block)
216 {
217         assert(is_Block(block));
218
219         return (block_t*)get_irn_link(block);
220 }  /* get_block_entry */
221
222 /** Get the memop entry for a memory operation node */
223 static memop_t *get_irn_memop(const ir_node *irn)
224 {
225         assert(! is_Block(irn));
226         return (memop_t*)get_irn_link(irn);
227 }  /* get_irn_memop */
228
229 /**
230  * Walk over the memory edges from definition to users.
231  * This ensures, that even operation without memory output are found.
232  *
233  * @param irn   start node
234  * @param pre   pre walker function
235  * @param post  post walker function
236  * @param ctx   context parameter for the walker functions
237  */
238 static void walk_memory(ir_node *irn, irg_walk_func *pre, irg_walk_func *post, void *ctx)
239 {
240         int     i;
241         ir_mode *mode;
242
243         mark_irn_visited(irn);
244
245         if (pre)
246                 pre(irn, ctx);
247
248         mode = get_irn_mode(irn);
249         if (mode == mode_M) {
250                 /* every successor uses memory */
251                 for (i = get_irn_n_outs(irn) - 1; i >= 0; --i) {
252                         ir_node *succ = get_irn_out(irn, i);
253
254                         if (! irn_visited(succ))
255                                 walk_memory(succ, pre, post, ctx);
256                 }
257         } else if (mode == mode_T) {
258                 /* only some Proj's uses memory */
259                 for (i = get_irn_n_outs(irn) - 1; i >= 0; --i) {
260                         ir_node *proj = get_irn_out(irn, i);
261
262                         if (get_irn_mode(proj) == mode_M && ! irn_visited(proj))
263                                 walk_memory(proj, pre, post, ctx);
264                 }
265         }
266         if (post)
267                 post(irn, ctx);
268 }  /* walk_memory */
269
270 /**
271  * Walks over all memory nodes of a graph.
272  *
273  * @param irg   a graph
274  * @param pre   pre walker function
275  * @param post  post walker function
276  * @param ctx   context parameter for the walker functions
277  */
278 static void walk_memory_irg(ir_graph *irg, irg_walk_func pre, irg_walk_func post, void *ctx)
279 {
280         inc_irg_visited(irg);
281
282         ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED);
283
284         /*
285          * there are two possible sources for memory: initial_mem and nomem
286          * we ignore nomem as this should NOT change the memory
287          */
288         walk_memory(get_irg_initial_mem(irg), pre, post, ctx);
289
290         ir_free_resources(irg, IR_RESOURCE_IRN_VISITED);
291 }  /* walk_memory_irg */
292
293 /**
294  * Register an address and allocate a (sparse, 0..n) ID for it.
295  *
296  * @param adr  the IR-node representing the address
297  *
298  * @return the allocated id
299  */
300 static unsigned register_address(ir_node *adr)
301 {
302         address_entry *entry;
303
304         /* skip Confirms and Casts */
305 restart:
306         if (is_Confirm(adr)) {
307                 adr = get_Confirm_value(adr);
308                 goto restart;
309         }
310         if (is_Cast(adr)) {
311                 adr = get_Cast_op(adr);
312                 goto restart;
313         }
314
315         entry = (address_entry*)ir_nodehashmap_get(&env.adr_map, adr);
316
317         if (entry == NULL) {
318                 /* new address */
319                 entry = OALLOC(&env.obst, address_entry);
320
321                 entry->id = env.curr_adr_id++;
322                 ir_nodehashmap_insert(&env.adr_map, adr, entry);
323
324                 DB((dbg, LEVEL_3, "ADDRESS %+F has ID %u\n", adr, entry->id));
325 #ifdef DEBUG_libfirm
326                 ARR_APP1(ir_node *, env.id_2_address, adr);
327 #endif
328         }
329         return entry->id;
330 }  /* register_address */
331
332
333 /**
334  * translate an address through a Phi node into a given predecessor
335  * block.
336  *
337  * @param address  the address
338  * @param block    the block
339  * @param pos      the position of the predecessor in block
340  */
341 static ir_node *phi_translate(ir_node *address, const ir_node *block, int pos)
342 {
343         if (is_Phi(address) && get_nodes_block(address) == block)
344                 address = get_Phi_pred(address, pos);
345         return address;
346 }  /* phi_translate */
347
348 /**
349  * Walker: allocate an block entry for every block
350  * and register all potential addresses.
351  */
352 static void prepare_blocks(ir_node *irn, void *ctx)
353 {
354         (void)ctx;
355
356         if (is_Block(irn)) {
357                 block_t *entry = OALLOC(&env.obst, block_t);
358                 int     n;
359
360                 entry->memop_forward    = NULL;
361                 entry->memop_backward   = NULL;
362                 entry->avail_out        = NULL;
363                 entry->id_2_memop_avail = NULL;
364                 entry->anticL_in        = NULL;
365                 entry->id_2_memop_antic = NULL;
366                 entry->block            = irn;
367                 entry->forward_next     = NULL;
368                 entry->backward_next    = NULL;
369                 entry->avail            = NULL;
370                 entry->trans_results    = NULL;
371                 set_irn_link(irn, entry);
372
373                 set_Block_phis(irn, NULL);
374
375                 /* use block marks to track unreachable blocks */
376                 set_Block_mark(irn, 0);
377
378                 n = get_Block_n_cfgpreds(irn);
379                 if (n > env.max_cfg_preds)
380                         env.max_cfg_preds = n;
381         } else {
382                 ir_mode *mode = get_irn_mode(irn);
383
384                 if (mode_is_reference(mode)) {
385                         /*
386                          * Register ALL possible addresses: this is overkill yet but
387                          * simpler then doing it for all possible translated addresses
388                          * (which would be sufficient in the moment.
389                          */
390                         (void)register_address(irn);
391                 }
392         }
393 }  /* prepare_blocks */
394
395 /**
396  * Post-Walker, link in all Phi's
397  */
398 static void link_phis(ir_node *irn, void *ctx)
399 {
400         (void)ctx;
401
402         if (is_Phi(irn)) {
403                 ir_node *block = get_nodes_block(irn);
404                 add_Block_phi(block, irn);
405         }
406 }  /* link_phis */
407
408 /**
409  * Block walker: creates the inverse post-order list for the CFG.
410  */
411 static void inverse_post_order(ir_node *block, void *ctx)
412 {
413         block_t *entry = get_block_entry(block);
414
415         (void)ctx;
416
417         /* mark this block IS reachable from start */
418         set_Block_mark(block, 1);
419
420         /* create the list in inverse order */
421         entry->forward_next = env.forward;
422         env.forward         = entry;
423
424         /* remember the first visited (last in list) entry, needed for later */
425         if (env.backward == NULL)
426                 env.backward = entry;
427 }  /* inverse_post_order */
428
429 /**
430  * Block walker: create backward links for the memops of a block.
431  */
432 static void collect_backward(ir_node *block, void *ctx)
433 {
434         block_t *entry = get_block_entry(block);
435         memop_t *last, *op;
436
437         (void)ctx;
438
439         /*
440          * Do NOT link in the end block yet. We want it to be
441          * the first in the list. This is NOT guaranteed by the walker
442          * if we have endless loops.
443          */
444         if (block != env.end_bl) {
445                 entry->backward_next = env.backward;
446
447                 /* create the list in inverse order */
448                 env.backward = entry;
449         }
450
451         /* create backward links for all memory ops */
452         last = NULL;
453         for (op = entry->memop_forward; op != NULL; op = op->next) {
454                 op->prev = last;
455                 last     = op;
456         }
457         entry->memop_backward = last;
458 }  /* collect_backward */
459
460 /**
461  * Allocate a memop.
462  *
463  * @param irn  the IR-node representing the memop or NULL
464  *             if this is a translated (virtual) memop
465  *
466  * @return the allocated memop
467  */
468 static memop_t *alloc_memop(ir_node *irn)
469 {
470         memop_t *m = OALLOC(&env.obst, memop_t);
471
472         m->value.address = NULL;
473         m->value.value   = NULL;
474         m->value.mode    = NULL;
475
476         m->node          = irn;
477         m->mem           = NULL;
478         m->replace       = NULL;
479         m->next          = NULL;
480         m->flags         = 0;
481
482         memset(m->projs, 0, sizeof(m->projs));
483
484         if (irn != NULL)
485                 set_irn_link(irn, m);
486         return m;
487 }  /* alloc_memop */
488
489 /**
490  * Create a memop for a Phi-replacement.
491  *
492  * @param op   the memop to clone
493  * @param phi  the Phi-node representing the new value
494  */
495 static memop_t *clone_memop_phi(memop_t *op, ir_node *phi)
496 {
497         memop_t *m = OALLOC(&env.obst, memop_t);
498
499         m->value         = op->value;
500         m->value.value   = phi;
501
502         m->node          = phi;
503         m->replace       = NULL;
504         m->next          = NULL;
505         m->flags         = 0;
506
507         set_irn_link(phi, m);
508         return m;
509 }  /* clone_memop_phi */
510
511 /**
512  * Return the memory properties of a call node.
513  *
514  * @param call  the call node
515  *
516  * return a bitset of mtp_property_const and mtp_property_pure
517  */
518 static unsigned get_Call_memory_properties(ir_node *call)
519 {
520         ir_type *call_tp = get_Call_type(call);
521         unsigned prop = get_method_additional_properties(call_tp);
522
523         /* check first the call type */
524         if ((prop & (mtp_property_const|mtp_property_pure)) == 0) {
525                 /* try the called entity */
526                 ir_node *ptr = get_Call_ptr(call);
527
528                 if (is_SymConst_addr_ent(ptr)) {
529                         ir_entity *ent = get_SymConst_entity(ptr);
530
531                         prop = get_entity_additional_properties(ent);
532                 }
533         }
534         return prop & (mtp_property_const|mtp_property_pure);
535 }  /* get_Call_memory_properties */
536
537 /**
538  * Returns an entity if the address ptr points to a constant one.
539  *
540  * @param ptr  the address
541  *
542  * @return an entity or NULL
543  */
544 static ir_entity *find_constant_entity(ir_node *ptr)
545 {
546         for (;;) {
547                 if (is_SymConst(ptr) && get_SymConst_kind(ptr) == symconst_addr_ent) {
548                         return get_SymConst_entity(ptr);
549                 } else if (is_Sel(ptr)) {
550                         ir_entity *ent = get_Sel_entity(ptr);
551                         ir_type   *tp  = get_entity_owner(ent);
552
553                         /* Do not fiddle with polymorphism. */
554                         if (is_Class_type(get_entity_owner(ent)) &&
555                                 ((get_entity_n_overwrites(ent)    != 0) ||
556                                 (get_entity_n_overwrittenby(ent) != 0)   ) )
557                                 return NULL;
558
559                         if (is_Array_type(tp)) {
560                                 /* check bounds */
561                                 int i, n;
562
563                                 for (i = 0, n = get_Sel_n_indexs(ptr); i < n; ++i) {
564                                         ir_node   *bound;
565                                         ir_tarval *tlower, *tupper;
566                                         ir_node   *index = get_Sel_index(ptr, i);
567                                         ir_tarval *tv     = computed_value(index);
568
569                                         /* check if the index is constant */
570                                         if (tv == tarval_bad)
571                                                 return NULL;
572
573                                         bound  = get_array_lower_bound(tp, i);
574                                         tlower = computed_value(bound);
575                                         bound  = get_array_upper_bound(tp, i);
576                                         tupper = computed_value(bound);
577
578                                         if (tlower == tarval_bad || tupper == tarval_bad)
579                                                 return NULL;
580
581                                         if (tarval_cmp(tv, tlower) == ir_relation_less)
582                                                 return NULL;
583                                         if (tarval_cmp(tupper, tv) == ir_relation_less)
584                                                 return NULL;
585
586                                         /* ok, bounds check finished */
587                                 }
588                         }
589
590                         if (get_entity_linkage(ent) == IR_LINKAGE_CONSTANT)
591                                 return ent;
592
593                         /* try next */
594                         ptr = get_Sel_ptr(ptr);
595                 } else if (is_Add(ptr)) {
596                         ir_node *l = get_Add_left(ptr);
597                         ir_node *r = get_Add_right(ptr);
598
599                         if (get_irn_mode(l) == get_irn_mode(ptr) && is_Const(r))
600                                 ptr = l;
601                         else if (get_irn_mode(r) == get_irn_mode(ptr) && is_Const(l))
602                                 ptr = r;
603                         else
604                                 return NULL;
605
606                         /* for now, we support only one addition, reassoc should fold all others */
607                         if (! is_SymConst(ptr) && !is_Sel(ptr))
608                                 return NULL;
609                 } else if (is_Sub(ptr)) {
610                         ir_node *l = get_Sub_left(ptr);
611                         ir_node *r = get_Sub_right(ptr);
612
613                         if (get_irn_mode(l) == get_irn_mode(ptr) && is_Const(r))
614                                 ptr = l;
615                         else
616                                 return NULL;
617                         /* for now, we support only one subtraction, reassoc should fold all others */
618                         if (! is_SymConst(ptr) && !is_Sel(ptr))
619                                 return NULL;
620                 } else
621                         return NULL;
622         }
623 }  /* find_constant_entity */
624
625 /**
626  * Return the Selection index of a Sel node from dimension n
627  */
628 static long get_Sel_array_index_long(ir_node *n, int dim)
629 {
630         ir_node *index = get_Sel_index(n, dim);
631         assert(is_Const(index));
632         return get_tarval_long(get_Const_tarval(index));
633 }  /* get_Sel_array_index_long */
634
635 /**
636  * Returns the accessed component graph path for an
637  * node computing an address.
638  *
639  * @param ptr    the node computing the address
640  * @param depth  current depth in steps upward from the root
641  *               of the address
642  */
643 static compound_graph_path *rec_get_accessed_path(ir_node *ptr, size_t depth)
644 {
645         compound_graph_path *res = NULL;
646         ir_entity           *root, *field, *ent;
647         size_t              path_len, pos, idx;
648         ir_tarval           *tv;
649         ir_type             *tp;
650
651         if (is_SymConst(ptr)) {
652                 /* a SymConst. If the depth is 0, this is an access to a global
653                  * entity and we don't need a component path, else we know
654                  * at least its length.
655                  */
656                 assert(get_SymConst_kind(ptr) == symconst_addr_ent);
657                 root = get_SymConst_entity(ptr);
658                 res = (depth == 0) ? NULL : new_compound_graph_path(get_entity_type(root), depth);
659         } else if (is_Sel(ptr)) {
660                 /* it's a Sel, go up until we find the root */
661                 res = rec_get_accessed_path(get_Sel_ptr(ptr), depth+1);
662                 if (res == NULL)
663                         return NULL;
664
665                 /* fill up the step in the path at the current position */
666                 field    = get_Sel_entity(ptr);
667                 path_len = get_compound_graph_path_length(res);
668                 pos      = path_len - depth - 1;
669                 set_compound_graph_path_node(res, pos, field);
670
671                 if (is_Array_type(get_entity_owner(field))) {
672                         assert(get_Sel_n_indexs(ptr) == 1 && "multi dim arrays not implemented");
673                         set_compound_graph_path_array_index(res, pos, get_Sel_array_index_long(ptr, 0));
674                 }
675         } else if (is_Add(ptr)) {
676                 ir_mode   *mode;
677                 ir_tarval *tmp;
678
679                 {
680                         ir_node *l = get_Add_left(ptr);
681                         ir_node *r = get_Add_right(ptr);
682                         if (is_Const(r) && get_irn_mode(l) == get_irn_mode(ptr)) {
683                                 ptr = l;
684                                 tv  = get_Const_tarval(r);
685                         } else {
686                                 ptr = r;
687                                 tv  = get_Const_tarval(l);
688                         }
689                 }
690 ptr_arith:
691                 mode = get_tarval_mode(tv);
692                 tmp  = tv;
693
694                 /* ptr must be a Sel or a SymConst, this was checked in find_constant_entity() */
695                 if (is_Sel(ptr)) {
696                         field = get_Sel_entity(ptr);
697                 } else {
698                         field = get_SymConst_entity(ptr);
699                 }
700                 idx = 0;
701                 for (ent = field;;) {
702                         unsigned  size;
703                         ir_tarval *sz, *tv_index, *tlower, *tupper;
704                         ir_node   *bound;
705
706                         tp = get_entity_type(ent);
707                         if (! is_Array_type(tp))
708                                 break;
709                         ent = get_array_element_entity(tp);
710                         size = get_type_size_bytes(get_entity_type(ent));
711                         sz   = new_tarval_from_long(size, mode);
712
713                         tv_index = tarval_div(tmp, sz);
714                         tmp      = tarval_mod(tmp, sz);
715
716                         if (tv_index == tarval_bad || tmp == tarval_bad)
717                                 return NULL;
718
719                         assert(get_array_n_dimensions(tp) == 1 && "multiarrays not implemented");
720                         bound  = get_array_lower_bound(tp, 0);
721                         tlower = computed_value(bound);
722                         bound  = get_array_upper_bound(tp, 0);
723                         tupper = computed_value(bound);
724
725                         if (tlower == tarval_bad || tupper == tarval_bad)
726                                 return NULL;
727
728                         if (tarval_cmp(tv_index, tlower) == ir_relation_less)
729                                 return NULL;
730                         if (tarval_cmp(tupper, tv_index) == ir_relation_less)
731                                 return NULL;
732
733                         /* ok, bounds check finished */
734                         ++idx;
735                 }
736                 if (! tarval_is_null(tmp)) {
737                         /* access to some struct/union member */
738                         return NULL;
739                 }
740
741                 /* should be at least ONE array */
742                 if (idx == 0)
743                         return NULL;
744
745                 res = rec_get_accessed_path(ptr, depth + idx);
746                 if (res == NULL)
747                         return NULL;
748
749                 path_len = get_compound_graph_path_length(res);
750                 pos      = path_len - depth - idx;
751
752                 for (ent = field;;) {
753                         unsigned   size;
754                         ir_tarval *sz, *tv_index;
755                         long       index;
756
757                         tp = get_entity_type(ent);
758                         if (! is_Array_type(tp))
759                                 break;
760                         ent = get_array_element_entity(tp);
761                         set_compound_graph_path_node(res, pos, ent);
762
763                         size = get_type_size_bytes(get_entity_type(ent));
764                         sz   = new_tarval_from_long(size, mode);
765
766                         tv_index = tarval_div(tv, sz);
767                         tv       = tarval_mod(tv, sz);
768
769                         /* worked above, should work again */
770                         assert(tv_index != tarval_bad && tv != tarval_bad);
771
772                         /* bounds already checked above */
773                         index = get_tarval_long(tv_index);
774                         set_compound_graph_path_array_index(res, pos, index);
775                         ++pos;
776                 }
777         } else if (is_Sub(ptr)) {
778                 ir_node *l = get_Sub_left(ptr);
779                 ir_node *r = get_Sub_right(ptr);
780
781                 ptr = l;
782                 tv  = get_Const_tarval(r);
783                 tv  = tarval_neg(tv);
784                 goto ptr_arith;
785         }
786         return res;
787 }  /* rec_get_accessed_path */
788
789 /**
790  * Returns an access path or NULL.  The access path is only
791  * valid, if the graph is in phase_high and _no_ address computation is used.
792  */
793 static compound_graph_path *get_accessed_path(ir_node *ptr)
794 {
795         compound_graph_path *gr = rec_get_accessed_path(ptr, 0);
796         return gr;
797 }  /* get_accessed_path */
798
799 typedef struct path_entry {
800         ir_entity         *ent;
801         struct path_entry *next;
802         size_t            index;
803 } path_entry;
804
805 static ir_node *rec_find_compound_ent_value(ir_node *ptr, path_entry *next)
806 {
807         path_entry       entry, *p;
808         ir_entity        *ent, *field;
809         ir_initializer_t *initializer;
810         ir_tarval        *tv;
811         ir_type          *tp;
812         size_t           n;
813
814         entry.next = next;
815         if (is_SymConst(ptr)) {
816                 /* found the root */
817                 ent         = get_SymConst_entity(ptr);
818                 initializer = get_entity_initializer(ent);
819                 for (p = next; p != NULL;) {
820                         if (initializer->kind != IR_INITIALIZER_COMPOUND)
821                                 return NULL;
822                         n  = get_initializer_compound_n_entries(initializer);
823                         tp = get_entity_type(ent);
824
825                         if (is_Array_type(tp)) {
826                                 ent = get_array_element_entity(tp);
827                                 if (ent != p->ent) {
828                                         /* a missing [0] */
829                                         if (0 >= n)
830                                                 return NULL;
831                                         initializer = get_initializer_compound_value(initializer, 0);
832                                         continue;
833                                 }
834                         }
835                         if (p->index >= n)
836                                 return NULL;
837                         initializer = get_initializer_compound_value(initializer, p->index);
838
839                         ent = p->ent;
840                         p   = p->next;
841                 }
842                 tp = get_entity_type(ent);
843                 while (is_Array_type(tp)) {
844                         ent = get_array_element_entity(tp);
845                         tp = get_entity_type(ent);
846                         /* a missing [0] */
847                         n  = get_initializer_compound_n_entries(initializer);
848                         if (0 >= n)
849                                 return NULL;
850                         initializer = get_initializer_compound_value(initializer, 0);
851                 }
852
853                 switch (initializer->kind) {
854                 case IR_INITIALIZER_CONST:
855                         return get_initializer_const_value(initializer);
856                 case IR_INITIALIZER_TARVAL:
857                 case IR_INITIALIZER_NULL:
858                 default:
859                         return NULL;
860                 }
861         } else if (is_Sel(ptr)) {
862                 entry.ent = field = get_Sel_entity(ptr);
863                 tp = get_entity_owner(field);
864                 if (is_Array_type(tp)) {
865                         assert(get_Sel_n_indexs(ptr) == 1 && "multi dim arrays not implemented");
866                         entry.index = get_Sel_array_index_long(ptr, 0) - get_array_lower_bound_int(tp, 0);
867                 } else {
868                         size_t i, n_members = get_compound_n_members(tp);
869                         for (i = 0; i < n_members; ++i) {
870                                 if (get_compound_member(tp, i) == field)
871                                         break;
872                         }
873                         if (i >= n_members) {
874                                 /* not found: should NOT happen */
875                                 return NULL;
876                         }
877                         entry.index = i;
878                 }
879                 return rec_find_compound_ent_value(get_Sel_ptr(ptr), &entry);
880         }  else if (is_Add(ptr)) {
881                 ir_mode *mode;
882                 unsigned pos;
883
884                 {
885                         ir_node *l = get_Add_left(ptr);
886                         ir_node *r = get_Add_right(ptr);
887                         if (is_Const(r)) {
888                                 ptr = l;
889                                 tv  = get_Const_tarval(r);
890                         } else {
891                                 ptr = r;
892                                 tv  = get_Const_tarval(l);
893                         }
894                 }
895 ptr_arith:
896                 mode = get_tarval_mode(tv);
897
898                 /* ptr must be a Sel or a SymConst, this was checked in find_constant_entity() */
899                 if (is_Sel(ptr)) {
900                         field = get_Sel_entity(ptr);
901                 } else {
902                         field = get_SymConst_entity(ptr);
903                 }
904
905                 /* count needed entries */
906                 pos = 0;
907                 for (ent = field;;) {
908                         tp = get_entity_type(ent);
909                         if (! is_Array_type(tp))
910                                 break;
911                         ent = get_array_element_entity(tp);
912                         ++pos;
913                 }
914                 /* should be at least ONE entry */
915                 if (pos == 0)
916                         return NULL;
917
918                 /* allocate the right number of entries */
919                 NEW_ARR_A(path_entry, p, pos);
920
921                 /* fill them up */
922                 pos = 0;
923                 for (ent = field;;) {
924                         unsigned  size;
925                         ir_tarval *sz, *tv_index, *tlower, *tupper;
926                         size_t    index;
927                         ir_node   *bound;
928
929                         tp = get_entity_type(ent);
930                         if (! is_Array_type(tp))
931                                 break;
932                         ent = get_array_element_entity(tp);
933                         p[pos].ent  = ent;
934                         p[pos].next = &p[pos + 1];
935
936                         size = get_type_size_bytes(get_entity_type(ent));
937                         sz   = new_tarval_from_long(size, mode);
938
939                         tv_index = tarval_div(tv, sz);
940                         tv       = tarval_mod(tv, sz);
941
942                         if (tv_index == tarval_bad || tv == tarval_bad)
943                                 return NULL;
944
945                         assert(get_array_n_dimensions(tp) == 1 && "multiarrays not implemented");
946                         bound  = get_array_lower_bound(tp, 0);
947                         tlower = computed_value(bound);
948                         bound  = get_array_upper_bound(tp, 0);
949                         tupper = computed_value(bound);
950
951                         if (tlower == tarval_bad || tupper == tarval_bad)
952                                 return NULL;
953
954                         if (tarval_cmp(tv_index, tlower) == ir_relation_less)
955                                 return NULL;
956                         if (tarval_cmp(tupper, tv_index) == ir_relation_less)
957                                 return NULL;
958
959                         /* ok, bounds check finished */
960                         index = get_tarval_long(tv_index);
961                         p[pos].index = index;
962                         ++pos;
963                 }
964                 if (! tarval_is_null(tv)) {
965                         /* hmm, wrong access */
966                         return NULL;
967                 }
968                 p[pos - 1].next = next;
969                 return rec_find_compound_ent_value(ptr, p);
970         } else if (is_Sub(ptr)) {
971                 ir_node *l = get_Sub_left(ptr);
972                 ir_node *r = get_Sub_right(ptr);
973
974                 ptr = l;
975                 tv  = get_Const_tarval(r);
976                 tv  = tarval_neg(tv);
977                 goto ptr_arith;
978         }
979         return NULL;
980 }  /* rec_find_compound_ent_value */
981
982 static ir_node *find_compound_ent_value(ir_node *ptr)
983 {
984         return rec_find_compound_ent_value(ptr, NULL);
985 }  /* find_compound_ent_value */
986
987 /**
988  * Mark a Load memop to be replace by a definition
989  *
990  * @param op  the Load memop
991  */
992 static void mark_replace_load(memop_t *op, ir_node *def)
993 {
994         op->replace = def;
995         op->flags |= FLAG_KILLED_NODE;
996         env.changed = 1;
997 }  /* mark_replace_load */
998
999 /**
1000  * Mark a Store memop to be removed.
1001  *
1002  * @param op  the Store memop
1003  */
1004 static void mark_remove_store(memop_t *op)
1005 {
1006         op->flags |= FLAG_KILLED_NODE;
1007         env.changed = 1;
1008 }  /* mark_remove_store */
1009
1010 /**
1011  * Update a memop for a Load.
1012  *
1013  * @param m  the memop
1014  */
1015 static void update_Load_memop(memop_t *m)
1016 {
1017         int       i;
1018         ir_node   *load = m->node;
1019         ir_node   *ptr;
1020         ir_entity *ent;
1021
1022         if (get_Load_volatility(load) == volatility_is_volatile)
1023                 m->flags |= FLAG_IGNORE;
1024
1025         ptr = get_Load_ptr(load);
1026
1027         m->value.address = ptr;
1028
1029         for (i = get_irn_n_outs(load) - 1; i >= 0; --i) {
1030                 ir_node *proj = get_irn_out(load, i);
1031                 long    pn;
1032
1033                 /* beware of keep edges */
1034                 if (is_End(proj))
1035                         continue;
1036
1037                 pn = get_Proj_proj(proj);
1038                 m->projs[pn] = proj;
1039                 switch (pn) {
1040                 case pn_Load_res:
1041                         m->value.value = proj;
1042                         m->value.mode  = get_irn_mode(proj);
1043                         break;
1044                 case pn_Load_X_except:
1045                         m->flags |= FLAG_EXCEPTION;
1046                         break;
1047                 case pn_Load_M:
1048                         m->mem = proj;
1049                         break;
1050                 case pn_Load_X_regular:
1051                         break;
1052                 default:
1053                         panic("Unsupported Proj from Load %+F", proj);
1054                 }
1055         }
1056
1057         /* check if we can determine the entity that will be loaded */
1058         ent = find_constant_entity(ptr);
1059
1060         if (ent != NULL && get_entity_visibility(ent) != ir_visibility_external) {
1061                 /* a static allocation that is not external: there should be NO exception
1062                  * when loading even if we cannot replace the load itself. */
1063                 ir_node *value = NULL;
1064
1065                 /* no exception, clear the m fields as it might be checked later again */
1066                 if (m->projs[pn_Load_X_except]) {
1067                         ir_graph *irg = get_irn_irg(ptr);
1068                         exchange(m->projs[pn_Load_X_except], new_r_Bad(irg, mode_X));
1069                         m->projs[pn_Load_X_except] = NULL;
1070                         m->flags &= ~FLAG_EXCEPTION;
1071                         env.changed = 1;
1072                 }
1073                 if (m->projs[pn_Load_X_regular]) {
1074                         exchange(m->projs[pn_Load_X_regular], new_r_Jmp(get_nodes_block(load)));
1075                         m->projs[pn_Load_X_regular] = NULL;
1076                         env.changed = 1;
1077                 }
1078
1079                 if (get_entity_linkage(ent) & IR_LINKAGE_CONSTANT) {
1080                         if (ent->initializer) {
1081                                 /* new style initializer */
1082                                 value = find_compound_ent_value(ptr);
1083                         } else if (entity_has_compound_ent_values(ent)) {
1084                                 /* old style initializer */
1085                                 compound_graph_path *path = get_accessed_path(ptr);
1086
1087                                 if (path != NULL) {
1088                                         assert(is_proper_compound_graph_path(path, get_compound_graph_path_length(path)-1));
1089
1090                                         value = get_compound_ent_value_by_path(ent, path);
1091                                         DB((dbg, LEVEL_1, "  Constant access at %F%F resulted in %+F\n", ent, path, value));
1092                                         free_compound_graph_path(path);
1093                                 }
1094                         }
1095                         if (value != NULL)
1096                                 value = can_replace_load_by_const(load, value);
1097                 }
1098
1099                 if (value != NULL) {
1100                         /* we completely replace the load by this value */
1101                         DB((dbg, LEVEL_1, "Replacing Load %+F by constant %+F\n", m->node, value));
1102                         mark_replace_load(m, value);
1103                         return;
1104                 }
1105         }
1106
1107         if (m->value.value != NULL && !(m->flags & FLAG_IGNORE)) {
1108                 /* only create an address if this node is NOT killed immediately or ignored */
1109                 m->value.id = register_address(ptr);
1110                 ++env.n_mem_ops;
1111         } else {
1112                 /* no user, KILL it */
1113                 mark_replace_load(m, NULL);
1114         }
1115 }  /* update_Load_memop */
1116
1117 /**
1118  * Update a memop for a Store.
1119  *
1120  * @param m  the memop
1121  */
1122 static void update_Store_memop(memop_t *m)
1123 {
1124         int     i;
1125         ir_node *store = m->node;
1126         ir_node *adr   = get_Store_ptr(store);
1127
1128         if (get_Store_volatility(store) == volatility_is_volatile) {
1129                 m->flags |= FLAG_IGNORE;
1130         } else {
1131                 /* only create an address if this node is NOT ignored */
1132                 m->value.id = register_address(adr);
1133                 ++env.n_mem_ops;
1134         }
1135
1136         m->value.address = adr;
1137
1138         for (i = get_irn_n_outs(store) - 1; i >= 0; --i) {
1139                 ir_node *proj = get_irn_out(store, i);
1140                 long    pn;
1141
1142                 /* beware of keep edges */
1143                 if (is_End(proj))
1144                         continue;
1145
1146                 pn = get_Proj_proj(proj);
1147                 m->projs[pn] = proj;
1148                 switch (pn) {
1149                 case pn_Store_X_except:
1150                         m->flags |= FLAG_EXCEPTION;
1151                         break;
1152                 case pn_Store_M:
1153                         m->mem = proj;
1154                         break;
1155                 case pn_Store_X_regular:
1156                         break;
1157                 default:
1158                         panic("Unsupported Proj from Store %+F", proj);
1159                 }
1160         }
1161         m->value.value = get_Store_value(store);
1162         m->value.mode  = get_irn_mode(m->value.value);
1163 }  /* update_Store_memop */
1164
1165 /**
1166  * Update a memop for a Call.
1167  *
1168  * @param m  the memop
1169  */
1170 static void update_Call_memop(memop_t *m)
1171 {
1172         ir_node  *call = m->node;
1173         unsigned prop  = get_Call_memory_properties(call);
1174         int      i;
1175
1176         if (prop & mtp_property_const) {
1177                 /* A constant call did NOT use memory at all, we
1178                    can kick it from the list. */
1179         } else if (prop & mtp_property_pure) {
1180                 /* pure calls READ memory */
1181                 m->flags = 0;
1182         } else
1183                 m->flags = FLAG_KILL_ALL;
1184
1185         for (i = get_irn_n_outs(call) - 1; i >= 0; --i) {
1186                 ir_node *proj = get_irn_out(call, i);
1187
1188                 /* beware of keep edges */
1189                 if (is_End(proj))
1190                         continue;
1191
1192                 switch (get_Proj_proj(proj)) {
1193                 case pn_Call_X_except:
1194                         m->flags |= FLAG_EXCEPTION;
1195                         break;
1196                 case pn_Call_M:
1197                         m->mem = proj;
1198                         break;
1199                 }
1200         }
1201 }  /* update_Call_memop */
1202
1203 /**
1204  * Update a memop for a Div/Mod.
1205  *
1206  * @param m  the memop
1207  */
1208 static void update_Div_memop(memop_t *m)
1209 {
1210         ir_node *div = m->node;
1211         int     i;
1212
1213         for (i = get_irn_n_outs(div) - 1; i >= 0; --i) {
1214                 ir_node *proj = get_irn_out(div, i);
1215
1216                 /* beware of keep edges */
1217                 if (is_End(proj))
1218                         continue;
1219
1220                 switch (get_Proj_proj(proj)) {
1221                 case pn_Div_X_except:
1222                         m->flags |= FLAG_EXCEPTION;
1223                         break;
1224                 case pn_Div_M:
1225                         m->mem = proj;
1226                         break;
1227                 }
1228         }
1229 }
1230
1231 static void update_Mod_memop(memop_t *m)
1232 {
1233         ir_node *div = m->node;
1234         int     i;
1235
1236         for (i = get_irn_n_outs(div) - 1; i >= 0; --i) {
1237                 ir_node *proj = get_irn_out(div, i);
1238
1239                 /* beware of keep edges */
1240                 if (is_End(proj))
1241                         continue;
1242
1243                 switch (get_Proj_proj(proj)) {
1244                 case pn_Mod_X_except:
1245                         m->flags |= FLAG_EXCEPTION;
1246                         break;
1247                 case pn_Mod_M:
1248                         m->mem = proj;
1249                         break;
1250                 }
1251         }
1252 }
1253
1254 /**
1255  * Update a memop for a Phi.
1256  *
1257  * @param m  the memop
1258  */
1259 static void update_Phi_memop(memop_t *m)
1260 {
1261         /* the Phi is its own mem */
1262         m->mem = m->node;
1263 }  /* update_Phi_memop */
1264
1265 /**
1266  * Memory walker: collect all memory ops and build topological lists.
1267  */
1268 static void collect_memops(ir_node *irn, void *ctx)
1269 {
1270         memop_t  *op;
1271         ir_node  *block;
1272         block_t  *entry;
1273
1274         (void) ctx;
1275         if (is_Proj(irn)) {
1276                 /* we can safely ignore ProjM's except the initial memory */
1277                 ir_graph *irg = get_irn_irg(irn);
1278                 if (irn != get_irg_initial_mem(irg))
1279                         return;
1280         }
1281
1282         op    = alloc_memop(irn);
1283         block = get_nodes_block(irn);
1284         entry = get_block_entry(block);
1285
1286         if (is_Phi(irn)) {
1287                 update_Phi_memop(op);
1288                 /* Phis must be always placed first */
1289                 op->next = entry->memop_forward;
1290                 entry->memop_forward = op;
1291                 if (entry->memop_backward == NULL)
1292                         entry->memop_backward = op;
1293         } else {
1294                 switch (get_irn_opcode(irn)) {
1295                 case iro_Load:
1296                         update_Load_memop(op);
1297                         break;
1298                 case iro_Store:
1299                         update_Store_memop(op);
1300                         break;
1301                 case iro_Call:
1302                         update_Call_memop(op);
1303                         break;
1304                 case iro_Sync:
1305                 case iro_Pin:
1306                         op->mem = irn;
1307                         break;
1308                 case iro_Proj:
1309                         /* initial memory */
1310                         op->mem = irn;
1311                         break;
1312                 case iro_Return:
1313                 case iro_End:
1314                         /* we can those to find the memory edge */
1315                         break;
1316                 case iro_Div:
1317                         update_Div_memop(op);
1318                         break;
1319                 case iro_Mod:
1320                         update_Mod_memop(op);
1321                         break;
1322
1323                 case iro_Builtin:
1324                         /* TODO: handle some builtins */
1325                 default:
1326                         /* unsupported operation */
1327                         op->flags = FLAG_KILL_ALL;
1328                 }
1329
1330
1331                 /* all other should be placed last */
1332                 if (entry->memop_backward == NULL) {
1333                         entry->memop_forward = entry->memop_backward = op;
1334                 } else {
1335                         entry->memop_backward->next = op;
1336                         entry->memop_backward       = op;
1337                 }
1338         }
1339 }  /* collect_memops */
1340
1341 /**
1342  * Find an address in the current set.
1343  *
1344  * @param value  the value to be searched for
1345  *
1346  * @return a memop for the value or NULL if the value does
1347  *         not exists in the set or cannot be converted into
1348  *         the requested mode
1349  */
1350 static memop_t *find_address(const value_t *value)
1351 {
1352         if (rbitset_is_set(env.curr_set, value->id)) {
1353                 memop_t *res = env.curr_id_2_memop[value->id];
1354
1355                 if (res->value.mode == value->mode)
1356                         return res;
1357                 /* allow hidden casts */
1358                 if (get_mode_arithmetic(res->value.mode) == irma_twos_complement &&
1359                     get_mode_arithmetic(value->mode) == irma_twos_complement &&
1360                     get_mode_size_bits(res->value.mode) == get_mode_size_bits(value->mode))
1361                         return res;
1362         }
1363         return NULL;
1364 }  /* find_address */
1365
1366 /**
1367  * Find an address in the avail_out set.
1368  *
1369  * @param bl     the block
1370  */
1371 static memop_t *find_address_avail(const block_t *bl, unsigned id, const ir_mode *mode)
1372 {
1373         if (rbitset_is_set(bl->avail_out, id)) {
1374                 memop_t *res = bl->id_2_memop_avail[id];
1375
1376                 if (res->value.mode == mode)
1377                         return res;
1378                 /* allow hidden casts */
1379                 if (get_mode_arithmetic(res->value.mode) == irma_twos_complement &&
1380                     get_mode_arithmetic(mode) == irma_twos_complement &&
1381                     get_mode_size_bits(res->value.mode) == get_mode_size_bits(mode))
1382                         return res;
1383         }
1384         return NULL;
1385 }  /* find_address_avail */
1386
1387 /**
1388  * Kill all addresses from the current set.
1389  */
1390 static void kill_all(void)
1391 {
1392         rbitset_clear_all(env.curr_set, env.rbs_size);
1393
1394         /* set sentinel */
1395         rbitset_set(env.curr_set, env.rbs_size - 1);
1396 }  /* kill_all */
1397
1398 /**
1399  * Kill memops that are not alias free due to a Store value from the current set.
1400  *
1401  * @param value  the Store value
1402  */
1403 static void kill_memops(const value_t *value)
1404 {
1405         size_t end = env.rbs_size - 1;
1406         size_t pos;
1407
1408         for (pos = rbitset_next(env.curr_set, 0, 1); pos < end; pos = rbitset_next(env.curr_set, pos + 1, 1)) {
1409                 memop_t *op = env.curr_id_2_memop[pos];
1410
1411                 if (ir_no_alias != get_alias_relation(value->address, value->mode,
1412                                                           op->value.address, op->value.mode)) {
1413                         rbitset_clear(env.curr_set, pos);
1414                         env.curr_id_2_memop[pos] = NULL;
1415                         DB((dbg, LEVEL_2, "KILLING %+F because of possible alias address %+F\n", op->node, value->address));
1416                 }
1417         }
1418 }  /* kill_memops */
1419
1420 /**
1421  * Add the value of a memop to the current set.
1422  *
1423  * @param op  the memory op
1424  */
1425 static void add_memop(memop_t *op)
1426 {
1427         rbitset_set(env.curr_set, op->value.id);
1428         env.curr_id_2_memop[op->value.id] = op;
1429 }  /* add_memop */
1430
1431 /**
1432  * Add the value of a memop to the avail_out set.
1433  *
1434  * @param bl  the block
1435  * @param op  the memory op
1436  */
1437 static void add_memop_avail(block_t *bl, memop_t *op)
1438 {
1439         rbitset_set(bl->avail_out, op->value.id);
1440         bl->id_2_memop_avail[op->value.id] = op;
1441 }  /* add_memop_avail */
1442
1443 /**
1444  * Check, if we can convert a value of one mode to another mode
1445  * without changing the representation of bits.
1446  *
1447  * @param from  the original mode
1448  * @param to    the destination mode
1449  */
1450 static int can_convert_to(const ir_mode *from, const ir_mode *to)
1451 {
1452         if (get_mode_arithmetic(from) == irma_twos_complement &&
1453             get_mode_arithmetic(to) == irma_twos_complement &&
1454             get_mode_size_bits(from) == get_mode_size_bits(to))
1455                 return 1;
1456         return 0;
1457 }  /* can_convert_to */
1458
1459 /**
1460  * Add a Conv to the requested mode if needed.
1461  *
1462  * @param irn   the IR-node to convert
1463  * @param mode  the destination mode
1464  *
1465  * @return the possible converted node or NULL
1466  *         if the conversion is not possible
1467  */
1468 static ir_node *conv_to(ir_node *irn, ir_mode *mode)
1469 {
1470         ir_mode *other = get_irn_mode(irn);
1471         if (other != mode) {
1472                 /* different modes: check if conversion is possible without changing the bits */
1473                 if (can_convert_to(other, mode)) {
1474                         ir_node *block = get_nodes_block(irn);
1475                         return new_r_Conv(block, irn, mode);
1476                 }
1477                 /* otherwise not possible ... yet */
1478                 return NULL;
1479         }
1480         return irn;
1481 }  /* conv_to */
1482
1483 /**
1484  * Update the address of an value if this address was a load result
1485  * and the load is killed now.
1486  *
1487  * @param value  the value whose address is updated
1488  */
1489 static void update_address(value_t *value)
1490 {
1491         if (is_Proj(value->address)) {
1492                 ir_node *load = get_Proj_pred(value->address);
1493
1494                 if (is_Load(load)) {
1495                         const memop_t *op = get_irn_memop(load);
1496
1497                         if (op->flags & FLAG_KILLED_NODE)
1498                                 value->address = op->replace;
1499                 }
1500         }
1501 }  /* update_address */
1502
1503 /**
1504  * Do forward dataflow analysis on the given block and calculate the
1505  * GEN and KILL in the current (avail) set.
1506  *
1507  * @param bl  the block
1508  */
1509 static void calc_gen_kill_avail(block_t *bl)
1510 {
1511         memop_t *op;
1512         ir_node *def;
1513
1514         for (op = bl->memop_forward; op != NULL; op = op->next) {
1515                 switch (get_irn_opcode(op->node)) {
1516                 case iro_Phi:
1517                         /* meet */
1518                         break;
1519                 case iro_Sync:
1520                         /* join */
1521                         break;
1522                 case iro_Load:
1523                         if (! (op->flags & (FLAG_KILLED_NODE|FLAG_IGNORE))) {
1524                                 /* do we have this already? */
1525                                 memop_t *other;
1526
1527                                 update_address(&op->value);
1528                                 other = find_address(&op->value);
1529                                 if (other != NULL && other != op) {
1530                                         def = conv_to(other->value.value, op->value.mode);
1531                                         if (def != NULL) {
1532 #ifdef DEBUG_libfirm
1533                                                 if (is_Store(other->node)) {
1534                                                         /* RAW */
1535                                                         DB((dbg, LEVEL_1, "RAW %+F <- %+F(%+F)\n", op->node, def, other->node));
1536                                                 } else {
1537                                                         /* RAR */
1538                                                         DB((dbg, LEVEL_1, "RAR %+F <- %+F(%+F)\n", op->node, def, other->node));
1539                                                 }
1540 #endif
1541                                                 mark_replace_load(op, def);
1542                                                 /* do NOT change the memop table */
1543                                                 continue;
1544                                         }
1545                                 }
1546                                 /* add this value */
1547                                 add_memop(op);
1548                         }
1549                         break;
1550                 case iro_Store:
1551                         if (! (op->flags & FLAG_KILLED_NODE)) {
1552                                 /* do we have this store already */
1553                                 memop_t *other;
1554
1555                                 update_address(&op->value);
1556                                 other = find_address(&op->value);
1557                                 if (other != NULL) {
1558                                         if (is_Store(other->node)) {
1559                                                 if (op != other && !(other->flags & FLAG_IGNORE) &&
1560                                                     get_nodes_block(other->node) == get_nodes_block(op->node)) {
1561                                                         /*
1562                                                          * A WAW in the same block we can kick the first store.
1563                                                          * This is a shortcut: we know that the second Store will be anticipated
1564                                                          * then in an case.
1565                                                          */
1566                                                         DB((dbg, LEVEL_1, "WAW %+F <- %+F\n", other->node, op->node));
1567                                                         mark_remove_store(other);
1568                                                         /* FIXME: a Load might be get freed due to this killed store */
1569                                                 }
1570                                         } else if (other->value.value == op->value.value && !(op->flags & FLAG_IGNORE)) {
1571                                                 /* WAR */
1572                                                 DB((dbg, LEVEL_1, "WAR %+F <- %+F\n", op->node, other->node));
1573                                                 mark_remove_store(op);
1574                                                 /* do NOT change the memop table */
1575                                                 continue;
1576                                         }
1577                                 }
1578                                 /* KILL all possible aliases */
1579                                 kill_memops(&op->value);
1580                                 /* add this value */
1581                                 add_memop(op);
1582                         }
1583                         break;
1584                 default:
1585                         if (op->flags & FLAG_KILL_ALL)
1586                                 kill_all();
1587                 }
1588         }
1589 }  /* calc_gen_kill_avail */
1590
1591 #define BYTE_SIZE(x)  (((x) + 7) >> 3)
1592
1593 /**
1594  * Do forward dataflow analysis on a given block to calculate the avail_out set
1595  * for this block only.
1596  *
1597  * @param block  the block
1598  */
1599 static void forward_avail(block_t *bl)
1600 {
1601         /* fill the data from the current block */
1602         env.curr_id_2_memop = bl->id_2_memop_avail;
1603         env.curr_set        = bl->avail_out;
1604
1605         calc_gen_kill_avail(bl);
1606         dump_curr(bl, "Avail_out");
1607 }  /* forward_avail */
1608
1609 /**
1610  * Do backward dataflow analysis on a given block to calculate the antic set
1611  * of Loaded addresses.
1612  *
1613  * @param bl  the block
1614  *
1615  * @return non-zero if the set has changed since last iteration
1616  */
1617 static int backward_antic(block_t *bl)
1618 {
1619         memop_t *op;
1620         ir_node *block = bl->block;
1621         int     n = get_Block_n_cfg_outs(block);
1622
1623         if (n == 1) {
1624                 ir_node  *succ    = get_Block_cfg_out(block, 0);
1625                 block_t  *succ_bl = get_block_entry(succ);
1626                 int      pred_pos = get_Block_cfgpred_pos(succ, block);
1627                 size_t   end      = env.rbs_size - 1;
1628                 size_t   pos;
1629
1630                 kill_all();
1631
1632                 if (bl->trans_results == NULL) {
1633                         /* allocate the translate cache */
1634                         bl->trans_results = OALLOCNZ(&env.obst, memop_t*, env.curr_adr_id);
1635                 }
1636
1637                 /* check for partly redundant values */
1638                 for (pos = rbitset_next(succ_bl->anticL_in, 0, 1);
1639                      pos < end;
1640                      pos = rbitset_next(succ_bl->anticL_in, pos + 1, 1)) {
1641                         /*
1642                          * do Phi-translation here: Note that at this point the nodes are
1643                          * not changed, so we can safely cache the results.
1644                          * However: Loads of Load results ARE bad, because we have no way
1645                           to translate them yet ...
1646                          */
1647                         memop_t *op = bl->trans_results[pos];
1648                         if (op == NULL) {
1649                                 /* not yet translated */
1650                                 ir_node *adr, *trans_adr;
1651
1652                                 op  = succ_bl->id_2_memop_antic[pos];
1653                                 adr = op->value.address;
1654
1655                                 trans_adr = phi_translate(adr, succ, pred_pos);
1656                                 if (trans_adr != adr) {
1657                                         /* create a new entry for the translated one */
1658                                         memop_t *new_op;
1659
1660                                         new_op = alloc_memop(NULL);
1661                                         new_op->value.address = trans_adr;
1662                                         new_op->value.id      = register_address(trans_adr);
1663                                         new_op->value.mode    = op->value.mode;
1664                                         new_op->node          = op->node; /* we need the node to decide if Load/Store */
1665                                         new_op->flags         = op->flags;
1666
1667                                         bl->trans_results[pos] = new_op;
1668                                         op = new_op;
1669                                 }
1670                         }
1671                         env.curr_id_2_memop[op->value.id] = op;
1672                         rbitset_set(env.curr_set, op->value.id);
1673                 }
1674         } else if (n > 1) {
1675                 ir_node *succ    = get_Block_cfg_out(block, 0);
1676                 block_t *succ_bl = get_block_entry(succ);
1677                 int i;
1678
1679                 rbitset_copy(env.curr_set, succ_bl->anticL_in, env.rbs_size);
1680                 memcpy(env.curr_id_2_memop, succ_bl->id_2_memop_antic, env.rbs_size * sizeof(env.curr_id_2_memop[0]));
1681
1682                 /* Hmm: probably we want kill merges of Loads ans Stores here */
1683                 for (i = n - 1; i > 0; --i) {
1684                         ir_node *succ    = get_Block_cfg_out(bl->block, i);
1685                         block_t *succ_bl = get_block_entry(succ);
1686
1687                         rbitset_and(env.curr_set, succ_bl->anticL_in, env.rbs_size);
1688                 }
1689         } else {
1690                 /* block ends with a noreturn call */
1691                 kill_all();
1692         }
1693
1694         dump_curr(bl, "AnticL_out");
1695
1696         for (op = bl->memop_backward; op != NULL; op = op->prev) {
1697                 switch (get_irn_opcode(op->node)) {
1698                 case iro_Phi:
1699                         /* meet */
1700                         break;
1701                 case iro_Sync:
1702                         /* join */
1703                         break;
1704                 case iro_Load:
1705                         if (! (op->flags & (FLAG_KILLED_NODE|FLAG_IGNORE))) {
1706                                 /* always add it */
1707                                 add_memop(op);
1708                         }
1709                         break;
1710                 case iro_Store:
1711                         if (! (op->flags & FLAG_KILLED_NODE)) {
1712                                 /* a Store: check which memops must be killed */
1713                                 kill_memops(&op->value);
1714                         }
1715                         break;
1716                 default:
1717                         if (op->flags & FLAG_KILL_ALL)
1718                                 kill_all();
1719                 }
1720         }
1721
1722         memcpy(bl->id_2_memop_antic, env.curr_id_2_memop, env.rbs_size * sizeof(env.curr_id_2_memop[0]));
1723         if (! rbitsets_equal(bl->anticL_in, env.curr_set, env.rbs_size)) {
1724                 /* changed */
1725                 rbitset_copy(bl->anticL_in, env.curr_set, env.rbs_size);
1726                 dump_curr(bl, "AnticL_in*");
1727                 return 1;
1728         }
1729         dump_curr(bl, "AnticL_in");
1730         return 0;
1731 }  /* backward_antic */
1732
1733 /**
1734  * Replace a Load memop by a already known value.
1735  *
1736  * @param op  the Load memop
1737  */
1738 static void replace_load(memop_t *op)
1739 {
1740         ir_node *load = op->node;
1741         ir_node *def  = skip_Id(op->replace);
1742         ir_node *proj;
1743         ir_mode *mode;
1744
1745         if (def != NULL)
1746                 DB((dbg, LEVEL_1, "Replacing %+F by definition %+F\n", load, is_Proj(def) ? get_Proj_pred(def) : def));
1747         else {
1748                 if (op->flags & FLAG_EXCEPTION) {
1749                         /* bad: this node is unused and executed for exception only */
1750                         DB((dbg, LEVEL_1, "Unused %+F executed for exception only ...\n", load));
1751                         return;
1752                 }
1753                 DB((dbg, LEVEL_1, "Killing unused %+F\n", load));
1754         }
1755
1756         if (op->mem != NULL) {
1757                 /* in rare cases a Load might have NO memory */
1758                 exchange(op->mem, get_Load_mem(load));
1759         }
1760         proj = op->projs[pn_Load_res];
1761         if (proj != NULL) {
1762                 mode = get_irn_mode(proj);
1763                 if (get_irn_mode(def) != mode) {
1764                         /* a hidden cast */
1765                         dbg_info *db    = get_irn_dbg_info(load);
1766                         ir_node  *block = get_nodes_block(proj);
1767                         def = new_rd_Conv(db, block, def, mode);
1768                 }
1769                 exchange(proj, def);
1770         }
1771         proj = op->projs[pn_Load_X_except];
1772         if (proj != NULL) {
1773                 ir_graph *irg = get_irn_irg(load);
1774                 exchange(proj, new_r_Bad(irg, mode_X));
1775         }
1776         proj = op->projs[pn_Load_X_regular];
1777         if (proj != NULL) {
1778                 exchange(proj, new_r_Jmp(get_nodes_block(load)));
1779         }
1780 }  /* replace_load */
1781
1782 /**
1783  * Remove a Store memop.
1784  *
1785  * @param op  the Store memop
1786  */
1787 static void remove_store(memop_t *op)
1788 {
1789         ir_node *store = op->node;
1790         ir_node *proj;
1791
1792         DB((dbg, LEVEL_1, "Removing %+F\n", store));
1793
1794         if (op->mem != NULL) {
1795                 /* in rare cases a Store might have no memory */
1796                 exchange(op->mem, get_Store_mem(store));
1797         }
1798         proj = op->projs[pn_Store_X_except];
1799         if (proj != NULL) {
1800                 ir_graph *irg = get_irn_irg(store);
1801                 exchange(proj, new_r_Bad(irg, mode_X));
1802         }
1803         proj = op->projs[pn_Store_X_regular];
1804         if (proj != NULL) {
1805                 exchange(proj, new_r_Jmp(get_nodes_block(store)));
1806         }
1807 }  /* remove_store */
1808
1809
1810 /**
1811  * Do all necessary replacements for a given block.
1812  *
1813  * @param bl  the block
1814  */
1815 static void do_replacements(block_t *bl)
1816 {
1817         memop_t *op;
1818
1819         for (op = bl->memop_forward; op != NULL; op = op->next) {
1820                 if (op->flags & FLAG_KILLED_NODE) {
1821                         switch (get_irn_opcode(op->node)) {
1822                         case iro_Load:
1823                                 replace_load(op);
1824                                 break;
1825                         case iro_Store:
1826                                 remove_store(op);
1827                                 break;
1828                         }
1829                 }
1830         }
1831 }  /* do_replacements */
1832
1833 /**
1834  * Calculate the Avail_out sets for all basic blocks.
1835  */
1836 static void calcAvail(void)
1837 {
1838         memop_t  **tmp_memop = env.curr_id_2_memop;
1839         unsigned *tmp_set    = env.curr_set;
1840         block_t  *bl;
1841
1842         /* calculate avail_out */
1843         DB((dbg, LEVEL_2, "Calculate Avail_out\n"));
1844
1845         /* iterate over all blocks in in any order, skip the start block */
1846         for (bl = env.forward->forward_next; bl != NULL; bl = bl->forward_next) {
1847                 forward_avail(bl);
1848         }
1849
1850         /* restore the current sets */
1851         env.curr_id_2_memop = tmp_memop;
1852         env.curr_set        = tmp_set;
1853 }  /* calcAvail */
1854
1855 /**
1856  * Calculate the Antic_in sets for all basic blocks.
1857  */
1858 static void calcAntic(void)
1859 {
1860         int i, need_iter;
1861
1862         /* calculate antic_out */
1863         DB((dbg, LEVEL_2, "Calculate Antic_in\n"));
1864         i = 0;
1865         do {
1866                 block_t *bl;
1867
1868                 DB((dbg, LEVEL_2, "Iteration %d:\n=========\n", i));
1869
1870                 need_iter = 0;
1871
1872                 /* over all blocks in reverse post order */
1873                 for (bl = env.backward->backward_next; bl != NULL; bl = bl->backward_next) {
1874                         need_iter |= backward_antic(bl);
1875                 }
1876                 ++i;
1877         } while (need_iter);
1878         DB((dbg, LEVEL_2, "Get anticipated Load set after %d iterations\n", i));
1879 }  /* calcAntic */
1880
1881 /**
1882  * Return the node representing the last memory in a block.
1883  *
1884  * @param bl  the block
1885  */
1886 static ir_node *find_last_memory(block_t *bl)
1887 {
1888         for (;;) {
1889                 if (bl->memop_backward != NULL) {
1890                         return bl->memop_backward->mem;
1891                 }
1892                 /* if there is NO memory in this block, go to the dominator */
1893                 bl = get_block_entry(get_Block_idom(bl->block));
1894         }
1895 }  /* find_last_memory */
1896
1897 /**
1898  * Reroute all memory users of old memory
1899  * to a new memory IR-node.
1900  *
1901  * @param omem  the old memory IR-node
1902  * @param nmem  the new memory IR-node
1903  */
1904 static void reroute_all_mem_users(ir_node *omem, ir_node *nmem)
1905 {
1906         int i;
1907
1908         for (i = get_irn_n_outs(omem) - 1; i >= 0; --i) {
1909                 int     n_pos;
1910                 ir_node *user = get_irn_out_ex(omem, i, &n_pos);
1911
1912                 set_irn_n(user, n_pos, nmem);
1913         }
1914
1915         /* all edges previously point to omem now point to nmem */
1916         nmem->out = omem->out;
1917 }  /* reroute_all_mem_users */
1918
1919 /**
1920  * Reroute memory users of old memory that are dominated by a given block
1921  * to a new memory IR-node.
1922  *
1923  * @param omem     the old memory IR-node
1924  * @param nmem     the new memory IR-node
1925  * @param pass_bl  the block the memory must pass
1926  */
1927 static void reroute_mem_through(ir_node *omem, ir_node *nmem, ir_node *pass_bl)
1928 {
1929         int             i, j, n = get_irn_n_outs(omem);
1930         ir_def_use_edge *edges = NEW_ARR_D(ir_def_use_edge, &env.obst, n + 1);
1931
1932         for (i = j = 0; i < n; ++i) {
1933                 int     n_pos;
1934                 ir_node *user   = get_irn_out_ex(omem, i, &n_pos);
1935                 ir_node *use_bl = get_nodes_block(user);
1936
1937
1938                 if (is_Phi(user)) {
1939                         use_bl = get_Block_cfgpred_block(use_bl, n_pos);
1940                 }
1941                 if (block_dominates(pass_bl, use_bl)) {
1942                         /* found an user that is dominated */
1943                         ++j;
1944                         edges[j].pos = n_pos;
1945                         edges[j].use = user;
1946
1947                         set_irn_n(user, n_pos, nmem);
1948                 }
1949         }
1950
1951         /* Modify the out structure: we create a new out edge array on our
1952            temporary obstack here. This should be no problem, as we invalidate the edges
1953            at the end either. */
1954         /* first entry is used for the length */
1955         edges[0].pos = j;
1956         nmem->out = edges;
1957 }  /* reroute_mem_through */
1958
1959 /**
1960  * insert Loads, making partly redundant Loads fully redundant
1961  */
1962 static int insert_Load(block_t *bl)
1963 {
1964         ir_node  *block = bl->block;
1965         int      i, n = get_Block_n_cfgpreds(block);
1966         size_t   end = env.rbs_size - 1;
1967
1968         DB((dbg, LEVEL_3, "processing %+F\n", block));
1969
1970         if (n == 0) {
1971                 /* might still happen for an unreachable block (end for instance) */
1972                 return 0;
1973         }
1974
1975         if (n > 1) {
1976                 ir_node **ins;
1977                 size_t    pos;
1978
1979                 NEW_ARR_A(ir_node *, ins, n);
1980
1981                 rbitset_set_all(env.curr_set, env.rbs_size);
1982
1983                 /* More than one predecessors, calculate the join for all avail_outs ignoring unevaluated
1984                    Blocks. These put in Top anyway. */
1985                 for (i = n - 1; i >= 0; --i) {
1986                         ir_node *pred = skip_Proj(get_Block_cfgpred(block, i));
1987                         ir_node *blk  = get_nodes_block(pred);
1988                         block_t *pred_bl;
1989
1990                         pred_bl = get_block_entry(blk);
1991                         rbitset_and(env.curr_set, pred_bl->avail_out, env.rbs_size);
1992
1993                         if (is_Load(pred) || is_Store(pred)) {
1994                                 /* We reached this block by an exception from a Load or Store:
1995                                  * the memop creating the exception was NOT completed than, kill it
1996                                  */
1997                                 memop_t *exc_op = get_irn_memop(pred);
1998                                 rbitset_clear(env.curr_set, exc_op->value.id);
1999                         }
2000
2001                 }
2002                 /*
2003                  * Ensure that all values are in the map: build Phi's if necessary:
2004                  * Note: the last bit is the sentinel and ALWAYS set, so end with -2.
2005                  */
2006                 for (pos = 0; pos < env.rbs_size - 1; ++pos) {
2007                         if (! rbitset_is_set(env.curr_set, pos))
2008                                 env.curr_id_2_memop[pos] = NULL;
2009                         else {
2010                                 ir_node *pred    = get_Block_cfgpred_block(bl->block, 0);
2011                                 block_t *pred_bl = get_block_entry(pred);
2012                                 int     need_phi = 0;
2013                                 memop_t *first   = NULL;
2014                                 ir_mode *mode    = NULL;
2015
2016                                 for (i = 0; i < n; ++i) {
2017                                         memop_t *mop;
2018
2019                                         pred    = get_Block_cfgpred_block(bl->block, i);
2020                                         pred_bl = get_block_entry(pred);
2021
2022                                         mop = pred_bl->id_2_memop_avail[pos];
2023                                         if (first == NULL) {
2024                                                 first = mop;
2025                                                 ins[0] = first->value.value;
2026                                                 mode = get_irn_mode(ins[0]);
2027
2028                                                 /* no Phi needed so far */
2029                                                 env.curr_id_2_memop[pos] = first;
2030                                         } else {
2031                                                 ins[i] = conv_to(mop->value.value, mode);
2032                                                 if (ins[i] != ins[0]) {
2033                                                         if (ins[i] == NULL) {
2034                                                                 /* conversion failed */
2035                                                                 env.curr_id_2_memop[pos] = NULL;
2036                                                                 rbitset_clear(env.curr_set, pos);
2037                                                                 break;
2038                                                         }
2039                                                         need_phi = 1;
2040                                                 }
2041                                         }
2042                                 }
2043                                 if (need_phi) {
2044                                         /* build a Phi  */
2045                                         ir_node *phi = new_r_Phi(bl->block, n, ins, mode);
2046                                         memop_t *phiop = alloc_memop(phi);
2047
2048                                         phiop->value = first->value;
2049                                         phiop->value.value = phi;
2050
2051                                         /* no need to link it in, as it is a DATA phi */
2052
2053                                         env.curr_id_2_memop[pos] = phiop;
2054
2055                                         DB((dbg, LEVEL_3, "Created new %+F on merging value for address %+F\n", phi, first->value.address));
2056                                 }
2057                         }
2058                 }
2059         } else {
2060                 /* only one predecessor, simply copy the map */
2061                 ir_node *pred    = get_Block_cfgpred_block(bl->block, 0);
2062                 block_t *pred_bl = get_block_entry(pred);
2063
2064                 rbitset_copy(env.curr_set, pred_bl->avail_out, env.rbs_size);
2065
2066                 memcpy(env.curr_id_2_memop, pred_bl->id_2_memop_avail, env.rbs_size * sizeof(bl->id_2_memop_avail[0]));
2067         }
2068
2069         if (n > 1) {
2070                 size_t pos;
2071
2072                 /* check for partly redundant values */
2073                 for (pos = rbitset_next(bl->anticL_in, 0, 1);
2074                      pos < end;
2075                      pos = rbitset_next(bl->anticL_in, pos + 1, 1)) {
2076                         memop_t *op = bl->id_2_memop_antic[pos];
2077                         int     have_some, all_same;
2078                         ir_node *first;
2079
2080                         if (rbitset_is_set(env.curr_set, pos)) {
2081                                 /* already avail */
2082                                 continue;
2083                         }
2084
2085                         assert(is_Load(op->node));
2086
2087                         DB((dbg, LEVEL_3, "anticipated %+F\n", op->node));
2088
2089                         have_some  = 0;
2090                         all_same   = 1;
2091                         first      = 0;
2092                         for (i = n - 1; i >= 0; --i) {
2093                                 ir_node *pred    = get_Block_cfgpred_block(block, i);
2094                                 block_t *pred_bl = get_block_entry(pred);
2095                                 ir_mode *mode    = op->value.mode;
2096                                 memop_t *e;
2097                                 ir_node *adr;
2098
2099                                 adr = phi_translate(op->value.address, block, i);
2100                                 DB((dbg, LEVEL_3, ".. using address %+F in pred %d\n", adr, i));
2101                                 e   = find_address_avail(pred_bl, register_address(adr), mode);
2102                                 if (e == NULL) {
2103                                         ir_node *ef_block = get_nodes_block(adr);
2104                                         if (! block_dominates(ef_block, pred)) {
2105                                                 /* cannot place a copy here */
2106                                                 have_some = 0;
2107                                                 DB((dbg, LEVEL_3, "%+F cannot be moved into predecessor %+F\n", op->node, pred));
2108                                                 break;
2109                                         }
2110                                         DB((dbg, LEVEL_3, "%+F is not available in predecessor %+F\n", op->node, pred));
2111                                         pred_bl->avail = NULL;
2112                                         all_same       = 0;
2113                                 } else {
2114                                         if (e->value.mode != mode && !can_convert_to(e->value.mode, mode)) {
2115                                                 /* cannot create a Phi due to different modes */
2116                                                 have_some = 0;
2117                                                 break;
2118                                         }
2119                                         pred_bl->avail = e;
2120                                         have_some      = 1;
2121                                         DB((dbg, LEVEL_3, "%+F is available for %+F in predecessor %+F\n", e->node, op->node, pred));
2122                                         if (first == NULL)
2123                                                 first = e->node;
2124                                         else if (first != e->node)
2125                                                 all_same = 0;
2126                                 }
2127                         }
2128                         if (have_some && !all_same) {
2129                                 ir_mode *mode = op->value.mode;
2130                                 ir_node **in, *phi;
2131                                 memop_t *phi_op;
2132
2133                                 NEW_ARR_A(ir_node *, in, n);
2134
2135                                 for (i = n - 1; i >= 0; --i) {
2136                                         ir_node *pred    = get_Block_cfgpred_block(block, i);
2137                                         block_t *pred_bl = get_block_entry(pred);
2138
2139                                         if (pred_bl->avail == NULL) {
2140                                                 /* create a new Load here and make to make it fully redundant */
2141                                                 dbg_info *db       = get_irn_dbg_info(op->node);
2142                                                 ir_node  *last_mem = find_last_memory(pred_bl);
2143                                                 ir_node  *load, *def, *adr;
2144                                                 memop_t  *new_op;
2145
2146                                                 assert(last_mem != NULL);
2147                                                 adr  = phi_translate(op->value.address, block, i);
2148                                                 load = new_rd_Load(db, pred, last_mem, adr, mode, cons_none);
2149                                                 def  = new_r_Proj(load, mode, pn_Load_res);
2150                                                 DB((dbg, LEVEL_1, "Created new %+F in %+F for party redundant %+F\n", load, pred, op->node));
2151
2152                                                 new_op                = alloc_memop(load);
2153                                                 new_op->mem           = new_r_Proj(load, mode_M, pn_Load_M);
2154                                                 new_op->value.address = adr;
2155                                                 new_op->value.id      = op->value.id;
2156                                                 new_op->value.mode    = mode;
2157                                                 new_op->value.value   = def;
2158
2159                                                 new_op->projs[pn_Load_M]   = new_op->mem;
2160                                                 new_op->projs[pn_Load_res] = def;
2161
2162                                                 new_op->prev = pred_bl->memop_backward;
2163                                                 if (pred_bl->memop_backward != NULL)
2164                                                         pred_bl->memop_backward->next = new_op;
2165
2166                                                 pred_bl->memop_backward = new_op;
2167
2168                                                 if (pred_bl->memop_forward == NULL)
2169                                                         pred_bl->memop_forward = new_op;
2170
2171                                                 if (get_nodes_block(last_mem) == pred) {
2172                                                         /* We have add a new last memory op in pred block.
2173                                                            If pred had already a last mem, reroute all memory
2174                                                            users. */
2175                                                         reroute_all_mem_users(last_mem, new_op->mem);
2176                                                 } else {
2177                                                         /* reroute only those memory going through the pre block */
2178                                                         reroute_mem_through(last_mem, new_op->mem, pred);
2179                                                 }
2180
2181                                                 /* we added this load at the end, so it will be avail anyway */
2182                                                 add_memop_avail(pred_bl, new_op);
2183                                                 pred_bl->avail = new_op;
2184                                         }
2185                                         in[i] = conv_to(pred_bl->avail->value.value, mode);
2186                                 }
2187                                 phi = new_r_Phi(block, n, in, mode);
2188                                 DB((dbg, LEVEL_1, "Created new %+F in %+F for now redundant %+F\n", phi, block, op->node));
2189
2190                                 phi_op = clone_memop_phi(op, phi);
2191                                 add_memop(phi_op);
2192                         }
2193                 }
2194         }
2195
2196         /* recalculate avail by gen and kill */
2197         calc_gen_kill_avail(bl);
2198
2199         /* always update the map after gen/kill, as values might have been changed due to RAR/WAR/WAW */
2200         memcpy(bl->id_2_memop_avail, env.curr_id_2_memop, env.rbs_size * sizeof(env.curr_id_2_memop[0]));
2201
2202         if (!rbitsets_equal(bl->avail_out, env.curr_set, env.rbs_size)) {
2203                 /* the avail set has changed */
2204                 rbitset_copy(bl->avail_out, env.curr_set, env.rbs_size);
2205                 dump_curr(bl, "Avail_out*");
2206                 return 1;
2207         }
2208         dump_curr(bl, "Avail_out");
2209         return 0;
2210 }  /* insert_Load */
2211
2212 /**
2213  * Insert Loads upwards.
2214  */
2215 static void insert_Loads_upwards(void)
2216 {
2217         int i, need_iter;
2218         block_t *bl;
2219
2220         /* recalculate antic_out and insert Loads */
2221         DB((dbg, LEVEL_2, "Inserting Loads\n"));
2222
2223         i = 0;
2224         do {
2225                 DB((dbg, LEVEL_2, "Iteration %d:\n=========\n", i));
2226
2227                 need_iter = 0;
2228
2229                 /* over all blocks in reverse post order, skip the start block */
2230                 for (bl = env.forward->forward_next; bl != NULL; bl = bl->forward_next) {
2231                         need_iter |= insert_Load(bl);
2232                 }
2233                 ++i;
2234         } while (need_iter);
2235
2236         DB((dbg, LEVEL_2, "Finished Load inserting after %d iterations\n", i));
2237 }  /* insert_Loads_upwards */
2238
2239 /**
2240  * Kill unreachable control flow.
2241  *
2242  * @param irg  the graph to operate on
2243  */
2244 static void kill_unreachable_blocks(ir_graph *irg)
2245 {
2246         block_t *bl;
2247         ir_node **ins;
2248         int     changed = 0;
2249
2250         NEW_ARR_A(ir_node *, ins, env.max_cfg_preds);
2251
2252         for (bl = env.forward; bl != NULL; bl = bl->forward_next) {
2253                 ir_node *block = bl->block;
2254                 int     i, j, k, n;
2255
2256                 assert(get_Block_mark(block));
2257
2258                 n = get_Block_n_cfgpreds(block);
2259
2260                 for (i = j = 0; i < n; ++i) {
2261                         ir_node *pred = get_Block_cfgpred(block, i);
2262                         ir_node *pred_bl;
2263
2264                         if (is_Bad(pred))
2265                                 continue;
2266
2267                         pred_bl = get_nodes_block(skip_Proj(pred));
2268                         if (! get_Block_mark(pred_bl))
2269                                 continue;
2270
2271                         ins[j++] = pred;
2272                 }
2273                 if (j != n) {
2274                         ir_node *phi, *next;
2275
2276                         /* some unreachable blocks detected */
2277                         changed = 1;
2278
2279                         DB((dbg, LEVEL_1, "Killing dead block predecessors on %+F\n", block));
2280
2281                         set_irn_in(block, j, ins);
2282
2283                         /* shorten all Phi nodes */
2284                         for (phi = get_Block_phis(block); phi != NULL; phi = next) {
2285                                 next = get_Phi_next(phi);
2286
2287                                 for (i = k = 0; i < n; ++i) {
2288                                         ir_node *pred = get_Block_cfgpred_block(block, i);
2289
2290                                         if (is_Bad(pred))
2291                                                 continue;
2292
2293                                         if (! get_Block_mark(pred))
2294                                                 continue;
2295
2296                                         ins[k++] = get_Phi_pred(phi, i);
2297                                 }
2298                                 if (k == 1)
2299                                         exchange(phi, ins[0]);
2300                                 else
2301                                         set_irn_in(phi, k, ins);
2302                         }
2303                 }
2304
2305         }
2306
2307         if (changed) {
2308                 /* kick keep alives */
2309                 ir_node *end = get_irg_end(irg);
2310                 int     i, j, n = get_End_n_keepalives(end);
2311
2312                 NEW_ARR_A(ir_node *, ins, n);
2313
2314                 for (i = j = 0; i < n; ++i) {
2315                         ir_node *ka = get_End_keepalive(end, i);
2316                         ir_node *ka_bl;
2317
2318                         if (is_Bad(ka))
2319                                 continue;
2320                         if (is_Block(ka))
2321                                 ka_bl = ka;
2322                         else
2323                                 ka_bl = get_nodes_block(skip_Proj(ka));
2324                         if (get_Block_mark(ka_bl))
2325                                 ins[j++] = ka;
2326                 }
2327                 if (j != n)
2328                         set_End_keepalives(end, j, ins);
2329
2330                 free_irg_outs(irg);
2331
2332                 /* this transformation do NOT invalidate the dominance */
2333         }
2334 }  /* kill_unreachable_blocks */
2335
2336 int opt_ldst(ir_graph *irg)
2337 {
2338         block_t *bl;
2339
2340         FIRM_DBG_REGISTER(dbg, "firm.opt.ldst");
2341
2342         DB((dbg, LEVEL_1, "\nDoing Load/Store optimization on %+F\n", irg));
2343
2344         /* we need landing pads */
2345         remove_critical_cf_edges(irg);
2346
2347         if (get_opt_alias_analysis()) {
2348                 assure_irg_entity_usage_computed(irg);
2349                 assure_irp_globals_entity_usage_computed();
2350         }
2351
2352         obstack_init(&env.obst);
2353         ir_nodehashmap_init(&env.adr_map);
2354
2355         env.forward       = NULL;
2356         env.backward      = NULL;
2357         env.curr_adr_id   = 0;
2358         env.n_mem_ops     = 0;
2359         env.max_cfg_preds = 0;
2360         env.changed       = 0;
2361         env.start_bl      = get_irg_start_block(irg);
2362         env.end_bl        = get_irg_end_block(irg);
2363 #ifdef DEBUG_libfirm
2364         env.id_2_address  = NEW_ARR_F(ir_node *, 0);
2365 #endif
2366
2367         assure_irg_outs(irg);
2368
2369         ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK | IR_RESOURCE_BLOCK_MARK);
2370
2371         /* first step: allocate block entries. Note that some blocks might be
2372            unreachable here. Using the normal walk ensures that ALL blocks are initialized. */
2373         irg_walk_graph(irg, prepare_blocks, link_phis, NULL);
2374
2375         /* produce an inverse post-order list for the CFG: this links only reachable
2376            blocks */
2377         irg_out_block_walk(get_irg_start_block(irg), NULL, inverse_post_order, NULL);
2378
2379         if (! get_Block_mark(env.end_bl)) {
2380                 /*
2381                  * The end block is NOT reachable due to endless loops
2382                  * or no_return calls.
2383                  * Place the end block last.
2384                  * env.backward points to the last block in the list for this purpose.
2385                  */
2386                 env.backward->forward_next = get_block_entry(env.end_bl);
2387
2388                 set_Block_mark(env.end_bl, 1);
2389         }
2390
2391         /* KILL unreachable blocks: these disturb the data flow analysis */
2392         kill_unreachable_blocks(irg);
2393
2394         assure_doms(irg);
2395
2396         /* second step: find and sort all memory ops */
2397         walk_memory_irg(irg, collect_memops, NULL, NULL);
2398
2399 #ifdef DEBUG_libfirm
2400         /* check that the backward map is correct */
2401         assert((unsigned)ARR_LEN(env.id_2_address) == env.curr_adr_id);
2402 #endif
2403
2404         if (env.n_mem_ops == 0) {
2405                 /* no memory ops */
2406                 goto end;
2407         }
2408
2409         /* create the backward links. */
2410         env.backward = NULL;
2411         irg_block_walk_graph(irg, NULL, collect_backward, NULL);
2412
2413         /* link the end block in */
2414         bl = get_block_entry(env.end_bl);
2415         bl->backward_next = env.backward;
2416         env.backward      = bl;
2417
2418         /* check that we really start with the start / end block */
2419         assert(env.forward->block  == env.start_bl);
2420         assert(env.backward->block == env.end_bl);
2421
2422         /* create address sets: for now, only the existing addresses are allowed plus one
2423            needed for the sentinel */
2424         env.rbs_size = env.curr_adr_id + 1;
2425
2426         /* create the current set */
2427         env.curr_set = rbitset_obstack_alloc(&env.obst, env.rbs_size);
2428         rbitset_set(env.curr_set, env.rbs_size - 1);
2429         env.curr_id_2_memop = NEW_ARR_D(memop_t *, &env.obst, env.rbs_size);
2430         memset(env.curr_id_2_memop, 0, env.rbs_size * sizeof(env.curr_id_2_memop[0]));
2431
2432         for (bl = env.forward; bl != NULL; bl = bl->forward_next) {
2433                 /* set sentinel bits */
2434                 bl->avail_out  = rbitset_obstack_alloc(&env.obst, env.rbs_size);
2435                 rbitset_set(bl->avail_out, env.rbs_size - 1);
2436
2437                 bl->id_2_memop_avail = NEW_ARR_D(memop_t *, &env.obst, env.rbs_size);
2438                 memset(bl->id_2_memop_avail, 0, env.rbs_size * sizeof(bl->id_2_memop_avail[0]));
2439
2440                 bl->anticL_in  = rbitset_obstack_alloc(&env.obst, env.rbs_size);
2441                 rbitset_set(bl->anticL_in, env.rbs_size - 1);
2442
2443                 bl->id_2_memop_antic = NEW_ARR_D(memop_t *, &env.obst, env.rbs_size);
2444                 memset(bl->id_2_memop_antic, 0, env.rbs_size * sizeof(bl->id_2_memop_antic[0]));
2445         }
2446
2447         (void) dump_block_list;
2448
2449         calcAvail();
2450         calcAntic();
2451
2452         insert_Loads_upwards();
2453
2454         if (env.changed) {
2455                 /* over all blocks in reverse post order */
2456                 for (bl = env.forward; bl != NULL; bl = bl->forward_next) {
2457                         do_replacements(bl);
2458                 }
2459
2460                 /* not only invalidate but free them. We might allocate new out arrays
2461                    on our obstack which will be deleted yet. */
2462                 free_irg_outs(irg);
2463                 clear_irg_state(irg, IR_GRAPH_STATE_CONSISTENT_ENTITY_USAGE);
2464         }
2465 end:
2466
2467         ir_free_resources(irg, IR_RESOURCE_IRN_LINK | IR_RESOURCE_BLOCK_MARK);
2468         ir_nodehashmap_destroy(&env.adr_map);
2469         obstack_free(&env.obst, NULL);
2470
2471 #ifdef DEBUG_libfirm
2472         DEL_ARR_F(env.id_2_address);
2473 #endif
2474
2475         return env.changed != 0;
2476 }  /* opt_ldst */
2477
2478 ir_graph_pass_t *opt_ldst_pass(const char *name)
2479 {
2480         return def_graph_pass_ret(name ? name : "ldst_df", opt_ldst);
2481 }  /* opt_ldst_pass */