98b7dfdc07ba6dd272a23c80ba3d4a6acd3e3f8b
[libfirm] / ir / opt / opt_ldst.c
1 /*
2  * Copyright (C) 1995-2008 University of Karlsruhe.  All right reserved.
3  *
4  * This file is part of libFirm.
5  *
6  * This file may be distributed and/or modified under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation and appearing in the file LICENSE.GPL included in the
9  * packaging of this file.
10  *
11  * Licensees holding valid libFirm Professional Edition licenses may use
12  * this file in accordance with the libFirm Commercial License.
13  * Agreement provided with the Software.
14  *
15  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17  * PURPOSE.
18  */
19
20 /**
21  * @file
22  * @brief   Dataflow driven Load/Store optimizations, uses some ideas from
23  *          VanDrunen's LEPRE
24  * @author  Michael Beck
25  * @version $Id$
26  */
27 #include "config.h"
28
29 #include "irnode_t.h"
30 #include "irflag_t.h"
31 #include "array_t.h"
32 #include "ircons.h"
33 #include "irdom.h"
34 #include "irgmod.h"
35 #include "irgwalk.h"
36 #include "irouts.h"
37 #include "irgraph.h"
38 #include "irgopt.h"
39 #include "iropt.h"
40 #include "iroptimize.h"
41 #include "irnodemap.h"
42 #include "raw_bitset.h"
43 #include "debug.h"
44 #include "error.h"
45 #include "irpass.h"
46
47 /* maximum number of output Proj's */
48 #define MAX_PROJ (pn_Load_max > pn_Store_max ? pn_Load_max : pn_Store_max)
49
50 /**
51  * Mapping an address to an dense ID.
52  */
53 typedef struct address_entry_t {
54         unsigned id;          /**< The ID */
55 } address_entry;
56
57 /**
58  * Memop-flags.
59  */
60 enum memop_flags {
61         FLAG_KILL_ALL    = 1, /**< KILL all addresses */
62         FLAG_KILLED_NODE = 2, /**< this node was killed */
63         FLAG_EXCEPTION   = 4, /**< this node has exception flow */
64         FLAG_IGNORE      = 8, /**< ignore this node (volatile or other) */
65 };
66
67 /**
68  * A value: This represents a value stored at a given address in
69  * memory. Do not confuse with values from value numbering.
70  */
71 typedef struct value_t value_t;
72 struct value_t {
73         ir_node  *address;    /**< the address of this value */
74         ir_node  *value;      /**< the value itself */
75         ir_mode  *mode;       /**< the mode of the value */
76         unsigned id;          /**< address id */
77 };
78
79 /**
80  * A memop describes an memory-related operation.
81  * These are Loads/Store and all other ops that might modify
82  * memory (Calls, CopyB) or causing exceptions.
83  */
84 typedef struct memop_t memop_t;
85 struct memop_t {
86         value_t  value;      /**< the value of this memop: only defined for Load/Store */
87         ir_node  *node;      /**< the memory op itself */
88         ir_node  *mem;       /**< the memory FROM this node */
89         ir_node  *replace;   /**< the replacement node if this memop is replaced */
90         memop_t  *next;      /**< links to the next memory op in the block in forward order. */
91         memop_t  *prev;      /**< links to the previous memory op in the block in forward order. */
92         unsigned flags;      /**< memop flags */
93         ir_node  *projs[MAX_PROJ]; /**< Projs of this memory op */
94 };
95
96 /**
97  * Additional data for every basic block.
98  */
99 typedef struct block_t block_t;
100 struct block_t {
101         memop_t  *memop_forward;     /**< topologically sorted list of memory ops in this block */
102         memop_t  *memop_backward;    /**< last memop in the list */
103         unsigned *avail_out;         /**< out-set of available addresses */
104         memop_t  **id_2_memop_avail; /**< maps avail address ids to memops */
105         unsigned *anticL_in;         /**< in-set of anticipated Load addresses */
106         memop_t  **id_2_memop_antic; /**< maps anticipated address ids to memops */
107         ir_node  *block;             /**< the associated block */
108         block_t  *forward_next;      /**< next block entry for forward iteration */
109         block_t  *backward_next;     /**< next block entry for backward iteration */
110         memop_t  *avail;             /**< used locally for the avail map */
111         memop_t  **trans_results;    /**< used to cached translated nodes due antic calculation. */
112 };
113
114 /**
115  * Metadata for this pass.
116  */
117 typedef struct ldst_env_t {
118         struct obstack  obst;              /**< obstack for temporary data */
119         ir_nodemap_t    adr_map;           /**< Map addresses to */
120         block_t         *forward;          /**< Inverse post-order list of all blocks Start->End */
121         block_t         *backward;         /**< Inverse post-order list of all blocks End->Start */
122         ir_node         *start_bl;         /**< start block of the current graph */
123         ir_node         *end_bl;           /**< end block of the current graph */
124         unsigned        *curr_set;         /**< current set of addresses */
125         memop_t         **curr_id_2_memop; /**< current map of address ids to memops */
126         unsigned        curr_adr_id;       /**< number for address mapping */
127         unsigned        n_mem_ops;         /**< number of memory operations (Loads/Stores) */
128         unsigned        rbs_size;          /**< size of all bitsets in bytes */
129         int             max_cfg_preds;     /**< maximum number of block cfg predecessors */
130         int             changed;           /**< Flags for changed graph state */
131 #ifdef DEBUG_libfirm
132         ir_node         **id_2_address;    /**< maps an id to the used address */
133 #endif
134 } ldst_env;
135
136 /* the one and only environment */
137 static ldst_env env;
138
139 #ifdef DEBUG_libfirm
140
141 static firm_dbg_module_t *dbg;
142
143 /**
144  * Dumps the block list.
145  *
146  * @param ldst environment
147  */
148 static void dump_block_list(ldst_env *env)
149 {
150         block_t *entry;
151         memop_t *op;
152         int     i;
153
154         for (entry = env->forward; entry != NULL; entry = entry->forward_next) {
155                 DB((dbg, LEVEL_2, "%+F {", entry->block));
156
157                 i = 0;
158                 for (op = entry->memop_forward; op != NULL; op = op->next) {
159                         if (i == 0) {
160                                 DB((dbg, LEVEL_2, "\n\t"));
161                         }
162                         DB((dbg, LEVEL_2, "%+F", op->node));
163                         if ((op->flags & FLAG_KILL_ALL) == FLAG_KILL_ALL)
164                                 DB((dbg, LEVEL_2, "X"));
165                         else if (op->flags & FLAG_KILL_ALL)
166                                 DB((dbg, LEVEL_2, "K"));
167                         DB((dbg, LEVEL_2, ", "));
168
169                         i = (i + 1) & 3;
170                 }
171                 DB((dbg, LEVEL_2, "\n}\n\n"));
172         }
173 }  /* dump_block_list */
174
175 /**
176  * Dumps the current set.
177  *
178  * @param bl   current block
179  * @param s    name of the set
180  */
181 static void dump_curr(block_t *bl, const char *s)
182 {
183         unsigned end = env.rbs_size - 1;
184         unsigned pos;
185         int      i;
186
187         DB((dbg, LEVEL_2, "%s[%+F] = {", s, bl->block));
188         i = 0;
189         for (pos = rbitset_next(env.curr_set, 0, 1); pos < end; pos = rbitset_next(env.curr_set, pos + 1, 1)) {
190                 memop_t *op = env.curr_id_2_memop[pos];
191
192                 if (i == 0) {
193                         DB((dbg, LEVEL_2, "\n\t"));
194                 }
195
196                 DB((dbg, LEVEL_2, "<%+F, %+F>, ", op->value.address, op->value.value));
197                 i = (i + 1) & 3;
198         }
199         DB((dbg, LEVEL_2, "\n}\n"));
200 }  /* dump_curr */
201
202 #else
203 static void dump_block_list(ldst_env *env)
204 {
205         (void) env;
206 }
207 static void dump_curr(block_t *bl, const char *s)
208 {
209         (void) bl;
210         (void) s;
211 }
212 #endif /* DEBUG_libfirm */
213
214 /** Get the block entry for a block node */
215 static block_t *get_block_entry(const ir_node *block)
216 {
217         assert(is_Block(block));
218
219         return get_irn_link(block);
220 }  /* get_block_entry */
221
222 /** Get the memop entry for a memory operation node */
223 static memop_t *get_irn_memop(const ir_node *irn)
224 {
225         assert(! is_Block(irn));
226         return get_irn_link(irn);
227 }  /* get_irn_memop */
228
229 /**
230  * Walk over the memory edges from definition to users.
231  * This ensures, that even operation without memory output are found.
232  *
233  * @param irn   start node
234  * @param pre   pre walker function
235  * @param post  post walker function
236  * @param ctx   context parameter for the walker functions
237  */
238 static void walk_memory(ir_node *irn, irg_walk_func *pre, irg_walk_func *post, void *ctx)
239 {
240         int     i;
241         ir_mode *mode;
242
243         mark_irn_visited(irn);
244
245         if (pre)
246                 pre(irn, ctx);
247
248         mode = get_irn_mode(irn);
249         if (mode == mode_M) {
250                 /* every successor uses memory */
251                 for (i = get_irn_n_outs(irn) - 1; i >= 0; --i) {
252                         ir_node *succ = get_irn_out(irn, i);
253
254                         if (! irn_visited(succ))
255                                 walk_memory(succ, pre, post, ctx);
256                 }
257         } else if (mode == mode_T) {
258                 /* only some Proj's uses memory */
259                 for (i = get_irn_n_outs(irn) - 1; i >= 0; --i) {
260                         ir_node *proj = get_irn_out(irn, i);
261
262                         if (get_irn_mode(proj) == mode_M && ! irn_visited(proj))
263                                 walk_memory(proj, pre, post, ctx);
264                 }
265         }
266         if (post)
267                 post(irn, ctx);
268 }  /* walk_memory */
269
270 /**
271  * Walks over all memory nodes of a graph.
272  *
273  * @param irg   a graph
274  * @param pre   pre walker function
275  * @param post  post walker function
276  * @param ctx   context parameter for the walker functions
277  */
278 static void walk_memory_irg(ir_graph *irg, irg_walk_func pre, irg_walk_func post, void *ctx)
279 {
280         inc_irg_visited(irg);
281
282         ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED);
283
284         /*
285          * there are two possible sources for memory: initial_mem and nomem
286          * we ignore nomem as this should NOT change the memory
287          */
288         walk_memory(get_irg_initial_mem(irg), pre, post, ctx);
289
290         ir_free_resources(irg, IR_RESOURCE_IRN_VISITED);
291 }  /* walk_memory_irg */
292
293 /**
294  * Register an address and allocate a (sparse, 0..n) ID for it.
295  *
296  * @param adr  the IR-node representing the address
297  *
298  * @return the allocated id
299  */
300 static unsigned register_address(ir_node *adr)
301 {
302         address_entry *entry;
303
304         /* skip Confirms and Casts */
305 restart:
306         if (is_Confirm(adr)) {
307                 adr = get_Confirm_value(adr);
308                 goto restart;
309         }
310         if (is_Cast(adr)) {
311                 adr = get_Cast_op(adr);
312                 goto restart;
313         }
314
315         entry = ir_nodemap_get(&env.adr_map, adr);
316
317         if (entry == NULL) {
318                 /* new address */
319                 entry = OALLOC(&env.obst, address_entry);
320
321                 entry->id = env.curr_adr_id++;
322                 ir_nodemap_insert(&env.adr_map, adr, entry);
323
324                 DB((dbg, LEVEL_3, "ADDRESS %+F has ID %u\n", adr, entry->id));
325 #ifdef DEBUG_libfirm
326                 ARR_APP1(ir_node *, env.id_2_address, adr);
327 #endif
328         }
329         return entry->id;
330 }  /* register_address */
331
332
333 /**
334  * translate an address through a Phi node into a given predecessor
335  * block.
336  *
337  * @param address  the address
338  * @param block    the block
339  * @param pos      the position of the predecessor in block
340  */
341 static ir_node *phi_translate(ir_node *address, const ir_node *block, int pos)
342 {
343         if (is_Phi(address) && get_nodes_block(address) == block)
344                 address = get_Phi_pred(address, pos);
345         return address;
346 }  /* phi_translate */
347
348 /**
349  * Walker: allocate an block entry for every block
350  * and register all potential addresses.
351  */
352 static void prepare_blocks(ir_node *irn, void *ctx)
353 {
354         (void)ctx;
355
356         if (is_Block(irn)) {
357                 block_t *entry = OALLOC(&env.obst, block_t);
358                 int     n;
359
360                 entry->memop_forward    = NULL;
361                 entry->memop_backward   = NULL;
362                 entry->avail_out        = NULL;
363                 entry->id_2_memop_avail = NULL;
364                 entry->anticL_in        = NULL;
365                 entry->id_2_memop_antic = NULL;
366                 entry->block            = irn;
367                 entry->forward_next     = NULL;
368                 entry->backward_next    = NULL;
369                 entry->avail            = NULL;
370                 entry->trans_results    = NULL;
371                 set_irn_link(irn, entry);
372
373                 set_Block_phis(irn, NULL);
374
375                 /* use block marks to track unreachable blocks */
376                 set_Block_mark(irn, 0);
377
378                 n = get_Block_n_cfgpreds(irn);
379                 if (n > env.max_cfg_preds)
380                         env.max_cfg_preds = n;
381         } else {
382                 ir_mode *mode = get_irn_mode(irn);
383
384                 if (mode_is_reference(mode)) {
385                         /*
386                          * Register ALL possible addresses: this is overkill yet but
387                          * simpler then doing it for all possible translated addresses
388                          * (which would be sufficient in the moment.
389                          */
390                         (void)register_address(irn);
391                 }
392         }
393 }  /* prepare_blocks */
394
395 /**
396  * Post-Walker, link in all Phi's
397  */
398 static void link_phis(ir_node *irn, void *ctx)
399 {
400         (void)ctx;
401
402         if (is_Phi(irn)) {
403                 ir_node *block = get_nodes_block(irn);
404                 add_Block_phi(block, irn);
405         }
406 }  /* link_phis */
407
408 /**
409  * Block walker: creates the inverse post-order list for the CFG.
410  */
411 static void inverse_post_order(ir_node *block, void *ctx)
412 {
413         block_t *entry = get_block_entry(block);
414
415         (void)ctx;
416
417         /* mark this block IS reachable from start */
418         set_Block_mark(block, 1);
419
420         /* create the list in inverse order */
421         entry->forward_next = env.forward;
422         env.forward         = entry;
423
424         /* remember the first visited (last in list) entry, needed for later */
425         if (env.backward == NULL)
426                 env.backward = entry;
427 }  /* inverse_post_order */
428
429 /**
430  * Block walker: create backward links for the memops of a block.
431  */
432 static void collect_backward(ir_node *block, void *ctx)
433 {
434         block_t *entry = get_block_entry(block);
435         memop_t *last, *op;
436
437         (void)ctx;
438
439         /*
440          * Do NOT link in the end block yet. We want it to be
441          * the first in the list. This is NOT guaranteed by the walker
442          * if we have endless loops.
443          */
444         if (block != env.end_bl) {
445                 entry->backward_next = env.backward;
446
447                 /* create the list in inverse order */
448                 env.backward = entry;
449         }
450
451         /* create backward links for all memory ops */
452         last = NULL;
453         for (op = entry->memop_forward; op != NULL; op = op->next) {
454                 op->prev = last;
455                 last     = op;
456         }
457         entry->memop_backward = last;
458 }  /* collect_backward */
459
460 /**
461  * Allocate a memop.
462  *
463  * @param irn  the IR-node representing the memop or NULL
464  *             if this is a translated (virtual) memop
465  *
466  * @return the allocated memop
467  */
468 static memop_t *alloc_memop(ir_node *irn)
469 {
470         memop_t *m = OALLOC(&env.obst, memop_t);
471
472         m->value.address = NULL;
473         m->value.value   = NULL;
474         m->value.mode    = NULL;
475
476         m->node          = irn;
477         m->mem           = NULL;
478         m->replace       = NULL;
479         m->next          = NULL;
480         m->flags         = 0;
481
482         memset(m->projs, 0, sizeof(m->projs));
483
484         if (irn != NULL)
485                 set_irn_link(irn, m);
486         return m;
487 }  /* alloc_memop */
488
489 /**
490  * Create a memop for a Phi-replacement.
491  *
492  * @param op   the memop to clone
493  * @param phi  the Phi-node representing the new value
494  */
495 static memop_t *clone_memop_phi(memop_t *op, ir_node *phi)
496 {
497         memop_t *m = OALLOC(&env.obst, memop_t);
498
499         m->value         = op->value;
500         m->value.value   = phi;
501
502         m->node          = phi;
503         m->replace       = NULL;
504         m->next          = NULL;
505         m->flags         = 0;
506
507         set_irn_link(phi, m);
508         return m;
509 }  /* clone_memop_phi */
510
511 /**
512  * Return the memory properties of a call node.
513  *
514  * @param call  the call node
515  *
516  * return a bitset of mtp_property_const and mtp_property_pure
517  */
518 static unsigned get_Call_memory_properties(ir_node *call)
519 {
520         ir_type *call_tp = get_Call_type(call);
521         unsigned prop = get_method_additional_properties(call_tp);
522
523         /* check first the call type */
524         if ((prop & (mtp_property_const|mtp_property_pure)) == 0) {
525                 /* try the called entity */
526                 ir_node *ptr = get_Call_ptr(call);
527
528                 if (is_Global(ptr)) {
529                         ir_entity *ent = get_Global_entity(ptr);
530
531                         prop = get_entity_additional_properties(ent);
532                 }
533         }
534         return prop & (mtp_property_const|mtp_property_pure);
535 }  /* get_Call_memory_properties */
536
537 /**
538  * Returns an entity if the address ptr points to a constant one.
539  *
540  * @param ptr  the address
541  *
542  * @return an entity or NULL
543  */
544 static ir_entity *find_constant_entity(ir_node *ptr)
545 {
546         for (;;) {
547                 if (is_SymConst(ptr) && get_SymConst_kind(ptr) == symconst_addr_ent) {
548                         return get_SymConst_entity(ptr);
549                 } else if (is_Sel(ptr)) {
550                         ir_entity *ent = get_Sel_entity(ptr);
551                         ir_type   *tp  = get_entity_owner(ent);
552
553                         /* Do not fiddle with polymorphism. */
554                         if (is_Class_type(get_entity_owner(ent)) &&
555                                 ((get_entity_n_overwrites(ent)    != 0) ||
556                                 (get_entity_n_overwrittenby(ent) != 0)   ) )
557                                 return NULL;
558
559                         if (is_Array_type(tp)) {
560                                 /* check bounds */
561                                 int i, n;
562
563                                 for (i = 0, n = get_Sel_n_indexs(ptr); i < n; ++i) {
564                                         ir_node *bound;
565                                         tarval *tlower, *tupper;
566                                         ir_node *index = get_Sel_index(ptr, i);
567                                         tarval *tv     = computed_value(index);
568
569                                         /* check if the index is constant */
570                                         if (tv == tarval_bad)
571                                                 return NULL;
572
573                                         bound  = get_array_lower_bound(tp, i);
574                                         tlower = computed_value(bound);
575                                         bound  = get_array_upper_bound(tp, i);
576                                         tupper = computed_value(bound);
577
578                                         if (tlower == tarval_bad || tupper == tarval_bad)
579                                                 return NULL;
580
581                                         if (tarval_cmp(tv, tlower) & pn_Cmp_Lt)
582                                                 return NULL;
583                                         if (tarval_cmp(tupper, tv) & pn_Cmp_Lt)
584                                                 return NULL;
585
586                                         /* ok, bounds check finished */
587                                 }
588                         }
589
590                         if (get_entity_linkage(ent) == IR_LINKAGE_CONSTANT)
591                                 return ent;
592
593                         /* try next */
594                         ptr = get_Sel_ptr(ptr);
595                 } else if (is_Add(ptr)) {
596                         ir_node *l = get_Add_left(ptr);
597                         ir_node *r = get_Add_right(ptr);
598
599                         if (get_irn_mode(l) == get_irn_mode(ptr) && is_Const(r))
600                                 ptr = l;
601                         else if (get_irn_mode(r) == get_irn_mode(ptr) && is_Const(l))
602                                 ptr = r;
603                         else
604                                 return NULL;
605
606                         /* for now, we support only one addition, reassoc should fold all others */
607                         if (! is_SymConst(ptr) && !is_Sel(ptr))
608                                 return NULL;
609                 } else if (is_Sub(ptr)) {
610                         ir_node *l = get_Sub_left(ptr);
611                         ir_node *r = get_Sub_right(ptr);
612
613                         if (get_irn_mode(l) == get_irn_mode(ptr) && is_Const(r))
614                                 ptr = l;
615                         else
616                                 return NULL;
617                         /* for now, we support only one subtraction, reassoc should fold all others */
618                         if (! is_SymConst(ptr) && !is_Sel(ptr))
619                                 return NULL;
620                 } else
621                         return NULL;
622         }
623 }  /* find_constant_entity */
624
625 /**
626  * Return the Selection index of a Sel node from dimension n
627  */
628 static long get_Sel_array_index_long(ir_node *n, int dim)
629 {
630         ir_node *index = get_Sel_index(n, dim);
631         assert(is_Const(index));
632         return get_tarval_long(get_Const_tarval(index));
633 }  /* get_Sel_array_index_long */
634
635 /**
636  * Returns the accessed component graph path for an
637  * node computing an address.
638  *
639  * @param ptr    the node computing the address
640  * @param depth  current depth in steps upward from the root
641  *               of the address
642  */
643 static compound_graph_path *rec_get_accessed_path(ir_node *ptr, int depth)
644 {
645         compound_graph_path *res = NULL;
646         ir_entity           *root, *field, *ent;
647         int                 path_len, pos, idx;
648         tarval              *tv;
649         ir_type             *tp;
650
651         if (is_SymConst(ptr)) {
652                 /* a SymConst. If the depth is 0, this is an access to a global
653                  * entity and we don't need a component path, else we know
654                  * at least its length.
655                  */
656                 assert(get_SymConst_kind(ptr) == symconst_addr_ent);
657                 root = get_SymConst_entity(ptr);
658                 res = (depth == 0) ? NULL : new_compound_graph_path(get_entity_type(root), depth);
659         } else if (is_Sel(ptr)) {
660                 /* it's a Sel, go up until we find the root */
661                 res = rec_get_accessed_path(get_Sel_ptr(ptr), depth+1);
662                 if (res == NULL)
663                         return NULL;
664
665                 /* fill up the step in the path at the current position */
666                 field    = get_Sel_entity(ptr);
667                 path_len = get_compound_graph_path_length(res);
668                 pos      = path_len - depth - 1;
669                 set_compound_graph_path_node(res, pos, field);
670
671                 if (is_Array_type(get_entity_owner(field))) {
672                         assert(get_Sel_n_indexs(ptr) == 1 && "multi dim arrays not implemented");
673                         set_compound_graph_path_array_index(res, pos, get_Sel_array_index_long(ptr, 0));
674                 }
675         } else if (is_Add(ptr)) {
676                 ir_node *l    = get_Add_left(ptr);
677                 ir_node *r    = get_Add_right(ptr);
678                 ir_mode *mode = get_irn_mode(ptr);
679                 tarval  *tmp;
680
681                 if (is_Const(r) && get_irn_mode(l) == mode) {
682                         ptr = l;
683                         tv  = get_Const_tarval(r);
684                 } else {
685                         ptr = r;
686                         tv  = get_Const_tarval(l);
687                 }
688 ptr_arith:
689                 mode = get_tarval_mode(tv);
690                 tmp  = tv;
691
692                 /* ptr must be a Sel or a SymConst, this was checked in find_constant_entity() */
693                 if (is_Sel(ptr)) {
694                         field = get_Sel_entity(ptr);
695                 } else {
696                         field = get_SymConst_entity(ptr);
697                 }
698                 idx = 0;
699                 for (ent = field;;) {
700                         unsigned size;
701                         tarval   *sz, *tv_index, *tlower, *tupper;
702                         ir_node  *bound;
703
704                         tp = get_entity_type(ent);
705                         if (! is_Array_type(tp))
706                                 break;
707                         ent = get_array_element_entity(tp);
708                         size = get_type_size_bytes(get_entity_type(ent));
709                         sz   = new_tarval_from_long(size, mode);
710
711                         tv_index = tarval_div(tmp, sz);
712                         tmp      = tarval_mod(tmp, sz);
713
714                         if (tv_index == tarval_bad || tmp == tarval_bad)
715                                 return NULL;
716
717                         assert(get_array_n_dimensions(tp) == 1 && "multiarrays not implemented");
718                         bound  = get_array_lower_bound(tp, 0);
719                         tlower = computed_value(bound);
720                         bound  = get_array_upper_bound(tp, 0);
721                         tupper = computed_value(bound);
722
723                         if (tlower == tarval_bad || tupper == tarval_bad)
724                                 return NULL;
725
726                         if (tarval_cmp(tv_index, tlower) & pn_Cmp_Lt)
727                                 return NULL;
728                         if (tarval_cmp(tupper, tv_index) & pn_Cmp_Lt)
729                                 return NULL;
730
731                         /* ok, bounds check finished */
732                         ++idx;
733                 }
734                 if (! tarval_is_null(tmp)) {
735                         /* access to some struct/union member */
736                         return NULL;
737                 }
738
739                 /* should be at least ONE array */
740                 if (idx == 0)
741                         return NULL;
742
743                 res = rec_get_accessed_path(ptr, depth + idx);
744                 if (res == NULL)
745                         return NULL;
746
747                 path_len = get_compound_graph_path_length(res);
748                 pos      = path_len - depth - idx;
749
750                 for (ent = field;;) {
751                         unsigned size;
752                         tarval   *sz, *tv_index;
753                         long     index;
754
755                         tp = get_entity_type(ent);
756                         if (! is_Array_type(tp))
757                                 break;
758                         ent = get_array_element_entity(tp);
759                         set_compound_graph_path_node(res, pos, ent);
760
761                         size = get_type_size_bytes(get_entity_type(ent));
762                         sz   = new_tarval_from_long(size, mode);
763
764                         tv_index = tarval_div(tv, sz);
765                         tv       = tarval_mod(tv, sz);
766
767                         /* worked above, should work again */
768                         assert(tv_index != tarval_bad && tv != tarval_bad);
769
770                         /* bounds already checked above */
771                         index = get_tarval_long(tv_index);
772                         set_compound_graph_path_array_index(res, pos, index);
773                         ++pos;
774                 }
775         } else if (is_Sub(ptr)) {
776                 ir_node *l = get_Sub_left(ptr);
777                 ir_node *r = get_Sub_right(ptr);
778
779                 ptr = l;
780                 tv  = get_Const_tarval(r);
781                 tv  = tarval_neg(tv);
782                 goto ptr_arith;
783         }
784         return res;
785 }  /* rec_get_accessed_path */
786
787 /**
788  * Returns an access path or NULL.  The access path is only
789  * valid, if the graph is in phase_high and _no_ address computation is used.
790  */
791 static compound_graph_path *get_accessed_path(ir_node *ptr)
792 {
793         compound_graph_path *gr = rec_get_accessed_path(ptr, 0);
794         return gr;
795 }  /* get_accessed_path */
796
797 typedef struct path_entry {
798         ir_entity         *ent;
799         struct path_entry *next;
800         long              index;
801 } path_entry;
802
803 static ir_node *rec_find_compound_ent_value(ir_node *ptr, path_entry *next)
804 {
805         path_entry       entry, *p;
806         ir_entity        *ent, *field;
807         ir_initializer_t *initializer;
808         tarval           *tv;
809         ir_type          *tp;
810         unsigned         n;
811
812         entry.next = next;
813         if (is_SymConst(ptr)) {
814                 /* found the root */
815                 ent         = get_SymConst_entity(ptr);
816                 initializer = get_entity_initializer(ent);
817                 for (p = next; p != NULL;) {
818                         if (initializer->kind != IR_INITIALIZER_COMPOUND)
819                                 return NULL;
820                         n  = get_initializer_compound_n_entries(initializer);
821                         tp = get_entity_type(ent);
822
823                         if (is_Array_type(tp)) {
824                                 ent = get_array_element_entity(tp);
825                                 if (ent != p->ent) {
826                                         /* a missing [0] */
827                                         if (0 >= n)
828                                                 return NULL;
829                                         initializer = get_initializer_compound_value(initializer, 0);
830                                         continue;
831                                 }
832                         }
833                         if (p->index >= (int) n)
834                                 return NULL;
835                         initializer = get_initializer_compound_value(initializer, p->index);
836
837                         ent = p->ent;
838                         p   = p->next;
839                 }
840                 tp = get_entity_type(ent);
841                 while (is_Array_type(tp)) {
842                         ent = get_array_element_entity(tp);
843                         tp = get_entity_type(ent);
844                         /* a missing [0] */
845                         n  = get_initializer_compound_n_entries(initializer);
846                         if (0 >= n)
847                                 return NULL;
848                         initializer = get_initializer_compound_value(initializer, 0);
849                 }
850
851                 switch (initializer->kind) {
852                 case IR_INITIALIZER_CONST:
853                         return get_initializer_const_value(initializer);
854                 case IR_INITIALIZER_TARVAL:
855                 case IR_INITIALIZER_NULL:
856                 default:
857                         return NULL;
858                 }
859         } else if (is_Sel(ptr)) {
860                 entry.ent = field = get_Sel_entity(ptr);
861                 tp = get_entity_owner(field);
862                 if (is_Array_type(tp)) {
863                         assert(get_Sel_n_indexs(ptr) == 1 && "multi dim arrays not implemented");
864                         entry.index = get_Sel_array_index_long(ptr, 0) - get_array_lower_bound_int(tp, 0);
865                 } else {
866                         int i, n_members = get_compound_n_members(tp);
867                         for (i = 0; i < n_members; ++i) {
868                                 if (get_compound_member(tp, i) == field)
869                                         break;
870                         }
871                         if (i >= n_members) {
872                                 /* not found: should NOT happen */
873                                 return NULL;
874                         }
875                         entry.index = i;
876                 }
877                 return rec_find_compound_ent_value(get_Sel_ptr(ptr), &entry);
878         }  else if (is_Add(ptr)) {
879                 ir_node  *l = get_Add_left(ptr);
880                 ir_node  *r = get_Add_right(ptr);
881                 ir_mode  *mode;
882                 unsigned pos;
883
884                 if (is_Const(r)) {
885                         ptr = l;
886                         tv  = get_Const_tarval(r);
887                 } else {
888                         ptr = r;
889                         tv  = get_Const_tarval(l);
890                 }
891 ptr_arith:
892                 mode = get_tarval_mode(tv);
893
894                 /* ptr must be a Sel or a SymConst, this was checked in find_constant_entity() */
895                 if (is_Sel(ptr)) {
896                         field = get_Sel_entity(ptr);
897                 } else {
898                         field = get_SymConst_entity(ptr);
899                 }
900
901                 /* count needed entries */
902                 pos = 0;
903                 for (ent = field;;) {
904                         tp = get_entity_type(ent);
905                         if (! is_Array_type(tp))
906                                 break;
907                         ent = get_array_element_entity(tp);
908                         ++pos;
909                 }
910                 /* should be at least ONE entry */
911                 if (pos == 0)
912                         return NULL;
913
914                 /* allocate the right number of entries */
915                 NEW_ARR_A(path_entry, p, pos);
916
917                 /* fill them up */
918                 pos = 0;
919                 for (ent = field;;) {
920                         unsigned size;
921                         tarval   *sz, *tv_index, *tlower, *tupper;
922                         long     index;
923                         ir_node  *bound;
924
925                         tp = get_entity_type(ent);
926                         if (! is_Array_type(tp))
927                                 break;
928                         ent = get_array_element_entity(tp);
929                         p[pos].ent  = ent;
930                         p[pos].next = &p[pos + 1];
931
932                         size = get_type_size_bytes(get_entity_type(ent));
933                         sz   = new_tarval_from_long(size, mode);
934
935                         tv_index = tarval_div(tv, sz);
936                         tv       = tarval_mod(tv, sz);
937
938                         if (tv_index == tarval_bad || tv == tarval_bad)
939                                 return NULL;
940
941                         assert(get_array_n_dimensions(tp) == 1 && "multiarrays not implemented");
942                         bound  = get_array_lower_bound(tp, 0);
943                         tlower = computed_value(bound);
944                         bound  = get_array_upper_bound(tp, 0);
945                         tupper = computed_value(bound);
946
947                         if (tlower == tarval_bad || tupper == tarval_bad)
948                                 return NULL;
949
950                         if (tarval_cmp(tv_index, tlower) & pn_Cmp_Lt)
951                                 return NULL;
952                         if (tarval_cmp(tupper, tv_index) & pn_Cmp_Lt)
953                                 return NULL;
954
955                         /* ok, bounds check finished */
956                         index = get_tarval_long(tv_index);
957                         p[pos].index = index;
958                         ++pos;
959                 }
960                 if (! tarval_is_null(tv)) {
961                         /* hmm, wrong access */
962                         return NULL;
963                 }
964                 p[pos - 1].next = next;
965                 return rec_find_compound_ent_value(ptr, p);
966         } else if (is_Sub(ptr)) {
967                 ir_node *l = get_Sub_left(ptr);
968                 ir_node *r = get_Sub_right(ptr);
969
970                 ptr = l;
971                 tv  = get_Const_tarval(r);
972                 tv  = tarval_neg(tv);
973                 goto ptr_arith;
974         }
975         return NULL;
976 }  /* rec_find_compound_ent_value */
977
978 static ir_node *find_compound_ent_value(ir_node *ptr)
979 {
980         return rec_find_compound_ent_value(ptr, NULL);
981 }  /* find_compound_ent_value */
982
983 /**
984  * Mark a Load memop to be replace by a definition
985  *
986  * @param op  the Load memop
987  */
988 static void mark_replace_load(memop_t *op, ir_node *def)
989 {
990         op->replace = def;
991         op->flags |= FLAG_KILLED_NODE;
992         env.changed = 1;
993 }  /* mark_replace_load */
994
995 /**
996  * Mark a Store memop to be removed.
997  *
998  * @param op  the Store memop
999  */
1000 static void mark_remove_store(memop_t *op)
1001 {
1002         op->flags |= FLAG_KILLED_NODE;
1003         env.changed = 1;
1004 }  /* mark_remove_store */
1005
1006 /**
1007  * Update a memop for a Load.
1008  *
1009  * @param m  the memop
1010  */
1011 static void update_Load_memop(memop_t *m)
1012 {
1013         int       i;
1014         ir_node   *load = m->node;
1015         ir_node   *ptr;
1016         ir_entity *ent;
1017
1018         if (get_Load_volatility(load) == volatility_is_volatile)
1019                 m->flags |= FLAG_IGNORE;
1020
1021         ptr = get_Load_ptr(load);
1022
1023         m->value.address = ptr;
1024
1025         for (i = get_irn_n_outs(load) - 1; i >= 0; --i) {
1026                 ir_node *proj = get_irn_out(load, i);
1027                 long    pn;
1028
1029                 /* beware of keep edges */
1030                 if (is_End(proj))
1031                         continue;
1032
1033                 pn = get_Proj_proj(proj);
1034                 m->projs[pn] = proj;
1035                 switch (pn) {
1036                 case pn_Load_res:
1037                         m->value.value = proj;
1038                         m->value.mode  = get_irn_mode(proj);
1039                         break;
1040                 case pn_Load_X_except:
1041                         m->flags |= FLAG_EXCEPTION;
1042                         break;
1043                 case pn_Load_M:
1044                         m->mem = proj;
1045                         break;
1046                 case pn_Load_X_regular:
1047                         break;
1048                 default:
1049                         panic("Unsupported Proj from Load %+F", proj);
1050                 }
1051         }
1052
1053         /* check if we can determine the entity that will be loaded */
1054         ent = find_constant_entity(ptr);
1055
1056         if (ent != NULL && get_entity_visibility(ent) != ir_visibility_external) {
1057                 /* a static allocation that is not external: there should be NO exception
1058                  * when loading even if we cannot replace the load itself. */
1059                 ir_node *value = NULL;
1060
1061                 /* no exception, clear the m fields as it might be checked later again */
1062                 if (m->projs[pn_Load_X_except]) {
1063                         ir_graph *irg = get_irn_irg(ptr);
1064                         exchange(m->projs[pn_Load_X_except], new_r_Bad(irg));
1065                         m->projs[pn_Load_X_except] = NULL;
1066                         m->flags &= ~FLAG_EXCEPTION;
1067                         env.changed = 1;
1068                 }
1069                 if (m->projs[pn_Load_X_regular]) {
1070                         exchange(m->projs[pn_Load_X_regular], new_r_Jmp(get_nodes_block(load)));
1071                         m->projs[pn_Load_X_regular] = NULL;
1072                         env.changed = 1;
1073                 }
1074
1075                 if (get_entity_linkage(ent) & IR_LINKAGE_CONSTANT) {
1076                         if (ent->initializer) {
1077                                 /* new style initializer */
1078                                 value = find_compound_ent_value(ptr);
1079                         } else if (entity_has_compound_ent_values(ent)) {
1080                                 /* old style initializer */
1081                                 compound_graph_path *path = get_accessed_path(ptr);
1082
1083                                 if (path != NULL) {
1084                                         assert(is_proper_compound_graph_path(path, get_compound_graph_path_length(path)-1));
1085
1086                                         value = get_compound_ent_value_by_path(ent, path);
1087                                         DB((dbg, LEVEL_1, "  Constant access at %F%F resulted in %+F\n", ent, path, value));
1088                                         free_compound_graph_path(path);
1089                                 }
1090                         }
1091                         if (value != NULL)
1092                                 value = can_replace_load_by_const(load, value);
1093                 }
1094
1095                 if (value != NULL) {
1096                         /* we completely replace the load by this value */
1097                         DB((dbg, LEVEL_1, "Replacing Load %+F by constant %+F\n", m->node, value));
1098                         mark_replace_load(m, value);
1099                         return;
1100                 }
1101         }
1102
1103         if (m->value.value != NULL && !(m->flags & FLAG_IGNORE)) {
1104                 /* only create an address if this node is NOT killed immediately or ignored */
1105                 m->value.id = register_address(ptr);
1106                 ++env.n_mem_ops;
1107         } else {
1108                 /* no user, KILL it */
1109                 mark_replace_load(m, NULL);
1110         }
1111 }  /* update_Load_memop */
1112
1113 /**
1114  * Update a memop for a Store.
1115  *
1116  * @param m  the memop
1117  */
1118 static void update_Store_memop(memop_t *m)
1119 {
1120         int     i;
1121         ir_node *store = m->node;
1122         ir_node *adr   = get_Store_ptr(store);
1123
1124         if (get_Store_volatility(store) == volatility_is_volatile) {
1125                 m->flags |= FLAG_IGNORE;
1126         } else {
1127                 /* only create an address if this node is NOT ignored */
1128                 m->value.id = register_address(adr);
1129                 ++env.n_mem_ops;
1130         }
1131
1132         m->value.address = adr;
1133
1134         for (i = get_irn_n_outs(store) - 1; i >= 0; --i) {
1135                 ir_node *proj = get_irn_out(store, i);
1136                 long    pn;
1137
1138                 /* beware of keep edges */
1139                 if (is_End(proj))
1140                         continue;
1141
1142                 pn = get_Proj_proj(proj);
1143                 m->projs[pn] = proj;
1144                 switch (pn) {
1145                 case pn_Store_X_except:
1146                         m->flags |= FLAG_EXCEPTION;
1147                         break;
1148                 case pn_Store_M:
1149                         m->mem = proj;
1150                         break;
1151                 case pn_Store_X_regular:
1152                         break;
1153                 default:
1154                         panic("Unsupported Proj from Store %+F", proj);
1155                 }
1156         }
1157         m->value.value = get_Store_value(store);
1158         m->value.mode  = get_irn_mode(m->value.value);
1159 }  /* update_Store_memop */
1160
1161 /**
1162  * Update a memop for a Call.
1163  *
1164  * @param m  the memop
1165  */
1166 static void update_Call_memop(memop_t *m)
1167 {
1168         ir_node  *call = m->node;
1169         unsigned prop  = get_Call_memory_properties(call);
1170         int      i;
1171
1172         if (prop & mtp_property_const) {
1173                 /* A constant call did NOT use memory at all, we
1174                    can kick it from the list. */
1175         } else if (prop & mtp_property_pure) {
1176                 /* pure calls READ memory */
1177                 m->flags = 0;
1178         } else
1179                 m->flags = FLAG_KILL_ALL;
1180
1181         for (i = get_irn_n_outs(call) - 1; i >= 0; --i) {
1182                 ir_node *proj = get_irn_out(call, i);
1183
1184                 /* beware of keep edges */
1185                 if (is_End(proj))
1186                         continue;
1187
1188                 switch (get_Proj_proj(proj)) {
1189                 case pn_Call_X_except:
1190                         m->flags |= FLAG_EXCEPTION;
1191                         break;
1192                 case pn_Call_M:
1193                         m->mem = proj;
1194                         break;
1195                 }
1196         }
1197 }  /* update_Call_memop */
1198
1199 /**
1200  * Update a memop for a Div/Mod/Quot/DivMod.
1201  *
1202  * @param m  the memop
1203  */
1204 static void update_DivOp_memop(memop_t *m)
1205 {
1206         ir_node *div = m->node;
1207         int     i;
1208
1209         for (i = get_irn_n_outs(div) - 1; i >= 0; --i) {
1210                 ir_node *proj = get_irn_out(div, i);
1211
1212                 /* beware of keep edges */
1213                 if (is_End(proj))
1214                         continue;
1215
1216                 switch (get_Proj_proj(proj)) {
1217                 case pn_Generic_X_except:
1218                         m->flags |= FLAG_EXCEPTION;
1219                         break;
1220                 case pn_Generic_M:
1221                         m->mem = proj;
1222                         break;
1223                 }
1224         }
1225 }  /* update_DivOp_memop */
1226
1227 /**
1228  * Update a memop for a Phi.
1229  *
1230  * @param m  the memop
1231  */
1232 static void update_Phi_memop(memop_t *m)
1233 {
1234         /* the Phi is it's own mem */
1235         m->mem = m->node;
1236 }  /* update_Phi_memop */
1237
1238 /**
1239  * Memory walker: collect all memory ops and build topological lists.
1240  */
1241 static void collect_memops(ir_node *irn, void *ctx)
1242 {
1243         memop_t  *op;
1244         ir_node  *block;
1245         block_t  *entry;
1246
1247         (void) ctx;
1248         if (is_Proj(irn)) {
1249                 /* we can safely ignore ProjM's except the initial memory */
1250                 ir_graph *irg = get_irn_irg(irn);
1251                 if (irn != get_irg_initial_mem(irg))
1252                         return;
1253         }
1254
1255         op    = alloc_memop(irn);
1256         block = get_nodes_block(irn);
1257         entry = get_block_entry(block);
1258
1259         if (is_Phi(irn)) {
1260                 update_Phi_memop(op);
1261                 /* Phis must be always placed first */
1262                 op->next = entry->memop_forward;
1263                 entry->memop_forward = op;
1264                 if (entry->memop_backward == NULL)
1265                         entry->memop_backward = op;
1266         } else {
1267                 switch (get_irn_opcode(irn)) {
1268                 case iro_Load:
1269                         update_Load_memop(op);
1270                         break;
1271                 case iro_Store:
1272                         update_Store_memop(op);
1273                         break;
1274                 case iro_Call:
1275                         update_Call_memop(op);
1276                         break;
1277                 case iro_Sync:
1278                 case iro_Pin:
1279                         op->mem = irn;
1280                         break;
1281                 case iro_Proj:
1282                         /* initial memory */
1283                         op->mem = irn;
1284                         break;
1285                 case iro_Return:
1286                 case iro_End:
1287                         /* we can those to find the memory edge */
1288                         break;
1289                 case iro_Div:
1290                 case iro_DivMod:
1291                 case iro_Quot:
1292                 case iro_Mod:
1293                         update_DivOp_memop(op);
1294                         break;
1295
1296                 case iro_Builtin:
1297                         /* TODO: handle some builtins */
1298                 default:
1299                         /* unsupported operation */
1300                         op->flags = FLAG_KILL_ALL;
1301                 }
1302
1303
1304                 /* all other should be placed last */
1305                 if (entry->memop_backward == NULL) {
1306                         entry->memop_forward = entry->memop_backward = op;
1307                 } else {
1308                         entry->memop_backward->next = op;
1309                         entry->memop_backward       = op;
1310                 }
1311         }
1312 }  /* collect_memops */
1313
1314 /**
1315  * Find an address in the current set.
1316  *
1317  * @param value  the value to be searched for
1318  *
1319  * @return a memop for the value or NULL if the value does
1320  *         not exists in the set or cannot be converted into
1321  *         the requested mode
1322  */
1323 static memop_t *find_address(const value_t *value)
1324 {
1325         if (rbitset_is_set(env.curr_set, value->id)) {
1326                 memop_t *res = env.curr_id_2_memop[value->id];
1327
1328                 if (res->value.mode == value->mode)
1329                         return res;
1330                 /* allow hidden casts */
1331                 if (get_mode_arithmetic(res->value.mode) == irma_twos_complement &&
1332                     get_mode_arithmetic(value->mode) == irma_twos_complement &&
1333                     get_mode_size_bits(res->value.mode) == get_mode_size_bits(value->mode))
1334                         return res;
1335         }
1336         return NULL;
1337 }  /* find_address */
1338
1339 /**
1340  * Find an address in the avail_out set.
1341  *
1342  * @param bl     the block
1343  */
1344 static memop_t *find_address_avail(const block_t *bl, unsigned id, const ir_mode *mode)
1345 {
1346         if (rbitset_is_set(bl->avail_out, id)) {
1347                 memop_t *res = bl->id_2_memop_avail[id];
1348
1349                 if (res->value.mode == mode)
1350                         return res;
1351                 /* allow hidden casts */
1352                 if (get_mode_arithmetic(res->value.mode) == irma_twos_complement &&
1353                     get_mode_arithmetic(mode) == irma_twos_complement &&
1354                     get_mode_size_bits(res->value.mode) == get_mode_size_bits(mode))
1355                         return res;
1356         }
1357         return NULL;
1358 }  /* find_address_avail */
1359
1360 /**
1361  * Kill all addresses from the current set.
1362  */
1363 static void kill_all(void)
1364 {
1365         rbitset_clear_all(env.curr_set, env.rbs_size);
1366
1367         /* set sentinel */
1368         rbitset_set(env.curr_set, env.rbs_size - 1);
1369 }  /* kill_all */
1370
1371 /**
1372  * Kill memops that are not alias free due to a Store value from the current set.
1373  *
1374  * @param value  the Store value
1375  */
1376 static void kill_memops(const value_t *value)
1377 {
1378         unsigned end = env.rbs_size - 1;
1379         unsigned pos;
1380
1381         for (pos = rbitset_next(env.curr_set, 0, 1); pos < end; pos = rbitset_next(env.curr_set, pos + 1, 1)) {
1382                 memop_t *op = env.curr_id_2_memop[pos];
1383
1384                 if (ir_no_alias != get_alias_relation(value->address, value->mode,
1385                                                           op->value.address, op->value.mode)) {
1386                         rbitset_clear(env.curr_set, pos);
1387                         env.curr_id_2_memop[pos] = NULL;
1388                         DB((dbg, LEVEL_2, "KILLING %+F because of possible alias address %+F\n", op->node, value->address));
1389                 }
1390         }
1391 }  /* kill_memops */
1392
1393 /**
1394  * Add the value of a memop to the current set.
1395  *
1396  * @param op  the memory op
1397  */
1398 static void add_memop(memop_t *op)
1399 {
1400         rbitset_set(env.curr_set, op->value.id);
1401         env.curr_id_2_memop[op->value.id] = op;
1402 }  /* add_memop */
1403
1404 /**
1405  * Add the value of a memop to the avail_out set.
1406  *
1407  * @param bl  the block
1408  * @param op  the memory op
1409  */
1410 static void add_memop_avail(block_t *bl, memop_t *op)
1411 {
1412         rbitset_set(bl->avail_out, op->value.id);
1413         bl->id_2_memop_avail[op->value.id] = op;
1414 }  /* add_memop_avail */
1415
1416 /**
1417  * Check, if we can convert a value of one mode to another mode
1418  * without changing the representation of bits.
1419  *
1420  * @param from  the original mode
1421  * @param to    the destination mode
1422  */
1423 static int can_convert_to(const ir_mode *from, const ir_mode *to)
1424 {
1425         if (get_mode_arithmetic(from) == irma_twos_complement &&
1426             get_mode_arithmetic(to) == irma_twos_complement &&
1427             get_mode_size_bits(from) == get_mode_size_bits(to))
1428                 return 1;
1429         return 0;
1430 }  /* can_convert_to */
1431
1432 /**
1433  * Add a Conv to the requested mode if needed.
1434  *
1435  * @param irn   the IR-node to convert
1436  * @param mode  the destination mode
1437  *
1438  * @return the possible converted node or NULL
1439  *         if the conversion is not possible
1440  */
1441 static ir_node *conv_to(ir_node *irn, ir_mode *mode)
1442 {
1443         ir_mode *other = get_irn_mode(irn);
1444         if (other != mode) {
1445                 /* different modes: check if conversion is possible without changing the bits */
1446                 if (can_convert_to(other, mode)) {
1447                         ir_node *block = get_nodes_block(irn);
1448                         return new_r_Conv(block, irn, mode);
1449                 }
1450                 /* otherwise not possible ... yet */
1451                 return NULL;
1452         }
1453         return irn;
1454 }  /* conv_to */
1455
1456 /**
1457  * Update the address of an value if this address was a load result
1458  * and the load is killed now.
1459  *
1460  * @param value  the value whose address is updated
1461  */
1462 static void update_address(value_t *value)
1463 {
1464         if (is_Proj(value->address)) {
1465                 ir_node *load = get_Proj_pred(value->address);
1466
1467                 if (is_Load(load)) {
1468                         const memop_t *op = get_irn_memop(load);
1469
1470                         if (op->flags & FLAG_KILLED_NODE)
1471                                 value->address = op->replace;
1472                 }
1473         }
1474 }  /* update_address */
1475
1476 /**
1477  * Do forward dataflow analysis on the given block and calculate the
1478  * GEN and KILL in the current (avail) set.
1479  *
1480  * @param bl  the block
1481  */
1482 static void calc_gen_kill_avail(block_t *bl)
1483 {
1484         memop_t *op;
1485         ir_node *def;
1486
1487         for (op = bl->memop_forward; op != NULL; op = op->next) {
1488                 switch (get_irn_opcode(op->node)) {
1489                 case iro_Phi:
1490                         /* meet */
1491                         break;
1492                 case iro_Sync:
1493                         /* join */
1494                         break;
1495                 case iro_Load:
1496                         if (! (op->flags & (FLAG_KILLED_NODE|FLAG_IGNORE))) {
1497                                 /* do we have this already? */
1498                                 memop_t *other;
1499
1500                                 update_address(&op->value);
1501                                 other = find_address(&op->value);
1502                                 if (other != NULL && other != op) {
1503                                         def = conv_to(other->value.value, op->value.mode);
1504                                         if (def != NULL) {
1505 #ifdef DEBUG_libfirm
1506                                                 if (is_Store(other->node)) {
1507                                                         /* RAW */
1508                                                         DB((dbg, LEVEL_1, "RAW %+F <- %+F(%+F)\n", op->node, def, other->node));
1509                                                 } else {
1510                                                         /* RAR */
1511                                                         DB((dbg, LEVEL_1, "RAR %+F <- %+F(%+F)\n", op->node, def, other->node));
1512                                                 }
1513 #endif
1514                                                 mark_replace_load(op, def);
1515                                                 /* do NOT change the memop table */
1516                                                 continue;
1517                                         }
1518                                 }
1519                                 /* add this value */
1520                                 add_memop(op);
1521                         }
1522                         break;
1523                 case iro_Store:
1524                         if (! (op->flags & FLAG_KILLED_NODE)) {
1525                                 /* do we have this store already */
1526                                 memop_t *other;
1527
1528                                 update_address(&op->value);
1529                                 other = find_address(&op->value);
1530                                 if (other != NULL) {
1531                                         if (is_Store(other->node)) {
1532                                                 if (op != other && !(other->flags & FLAG_IGNORE) &&
1533                                                     get_nodes_block(other->node) == get_nodes_block(op->node)) {
1534                                                         /*
1535                                                          * A WAW in the same block we can kick the first store.
1536                                                          * This is a shortcut: we know that the second Store will be anticipated
1537                                                          * then in an case.
1538                                                          */
1539                                                         DB((dbg, LEVEL_1, "WAW %+F <- %+F\n", other->node, op->node));
1540                                                         mark_remove_store(other);
1541                                                         /* FIXME: a Load might be get freed due to this killed store */
1542                                                 }
1543                                         } else if (other->value.value == op->value.value && !(op->flags & FLAG_IGNORE)) {
1544                                                 /* WAR */
1545                                                 DB((dbg, LEVEL_1, "WAR %+F <- %+F\n", op->node, other->node));
1546                                                 mark_remove_store(op);
1547                                                 /* do NOT change the memop table */
1548                                                 continue;
1549                                         }
1550                                 }
1551                                 /* KILL all possible aliases */
1552                                 kill_memops(&op->value);
1553                                 /* add this value */
1554                                 add_memop(op);
1555                         }
1556                         break;
1557                 default:
1558                         if (op->flags & FLAG_KILL_ALL)
1559                                 kill_all();
1560                 }
1561         }
1562 }  /* calc_gen_kill_avail */
1563
1564 #define BYTE_SIZE(x)  (((x) + 7) >> 3)
1565
1566 /**
1567  * Do forward dataflow analysis on a given block to calculate the avail_out set
1568  * for this block only.
1569  *
1570  * @param block  the block
1571  */
1572 static void forward_avail(block_t *bl)
1573 {
1574         /* fill the data from the current block */
1575         env.curr_id_2_memop = bl->id_2_memop_avail;
1576         env.curr_set        = bl->avail_out;
1577
1578         calc_gen_kill_avail(bl);
1579         dump_curr(bl, "Avail_out");
1580 }  /* forward_avail */
1581
1582 /**
1583  * Do backward dataflow analysis on a given block to calculate the antic set
1584  * of Loaded addresses.
1585  *
1586  * @param bl  the block
1587  *
1588  * @return non-zero if the set has changed since last iteration
1589  */
1590 static int backward_antic(block_t *bl)
1591 {
1592         memop_t *op;
1593         ir_node *block = bl->block;
1594         int     n = get_Block_n_cfg_outs(block);
1595
1596         if (n == 1) {
1597                 ir_node  *succ    = get_Block_cfg_out(block, 0);
1598                 block_t  *succ_bl = get_block_entry(succ);
1599                 int      pred_pos = get_Block_cfgpred_pos(succ, block);
1600                 unsigned end      = env.rbs_size - 1;
1601                 unsigned pos;
1602
1603                 kill_all();
1604
1605                 if (bl->trans_results == NULL) {
1606                         /* allocate the translate cache */
1607                         bl->trans_results = OALLOCNZ(&env.obst, memop_t*, env.curr_adr_id);
1608                 }
1609
1610                 /* check for partly redundant values */
1611                 for (pos = rbitset_next(succ_bl->anticL_in, 0, 1);
1612                      pos < end;
1613                      pos = rbitset_next(succ_bl->anticL_in, pos + 1, 1)) {
1614                         /*
1615                          * do Phi-translation here: Note that at this point the nodes are
1616                          * not changed, so we can safely cache the results.
1617                          * However: Loads of Load results ARE bad, because we have no way
1618                           to translate them yet ...
1619                          */
1620                         memop_t *op = bl->trans_results[pos];
1621                         if (op == NULL) {
1622                                 /* not yet translated */
1623                                 ir_node *adr, *trans_adr;
1624
1625                                 op  = succ_bl->id_2_memop_antic[pos];
1626                                 adr = op->value.address;
1627
1628                                 trans_adr = phi_translate(adr, succ, pred_pos);
1629                                 if (trans_adr != adr) {
1630                                         /* create a new entry for the translated one */
1631                                         memop_t *new_op;
1632
1633                                         new_op = alloc_memop(NULL);
1634                                         new_op->value.address = trans_adr;
1635                                         new_op->value.id      = register_address(trans_adr);
1636                                         new_op->value.mode    = op->value.mode;
1637                                         new_op->node          = op->node; /* we need the node to decide if Load/Store */
1638                                         new_op->flags         = op->flags;
1639
1640                                         bl->trans_results[pos] = new_op;
1641                                         op = new_op;
1642                                 }
1643                         }
1644                         env.curr_id_2_memop[op->value.id] = op;
1645                         rbitset_set(env.curr_set, op->value.id);
1646                 }
1647         } else if (n > 1) {
1648                 ir_node *succ    = get_Block_cfg_out(block, 0);
1649                 block_t *succ_bl = get_block_entry(succ);
1650                 int i;
1651
1652                 rbitset_copy(env.curr_set, succ_bl->anticL_in, env.rbs_size);
1653                 memcpy(env.curr_id_2_memop, succ_bl->id_2_memop_antic, env.rbs_size * sizeof(env.curr_id_2_memop[0]));
1654
1655                 /* Hmm: probably we want kill merges of Loads ans Stores here */
1656                 for (i = n - 1; i > 0; --i) {
1657                         ir_node *succ    = get_Block_cfg_out(bl->block, i);
1658                         block_t *succ_bl = get_block_entry(succ);
1659
1660                         rbitset_and(env.curr_set, succ_bl->anticL_in, env.rbs_size);
1661                 }
1662         } else {
1663                 /* block ends with a noreturn call */
1664                 kill_all();
1665         }
1666
1667         dump_curr(bl, "AnticL_out");
1668
1669         for (op = bl->memop_backward; op != NULL; op = op->prev) {
1670                 switch (get_irn_opcode(op->node)) {
1671                 case iro_Phi:
1672                         /* meet */
1673                         break;
1674                 case iro_Sync:
1675                         /* join */
1676                         break;
1677                 case iro_Load:
1678                         if (! (op->flags & (FLAG_KILLED_NODE|FLAG_IGNORE))) {
1679                                 /* always add it */
1680                                 add_memop(op);
1681                         }
1682                         break;
1683                 case iro_Store:
1684                         if (! (op->flags & FLAG_KILLED_NODE)) {
1685                                 /* a Store: check which memops must be killed */
1686                                 kill_memops(&op->value);
1687                         }
1688                         break;
1689                 default:
1690                         if (op->flags & FLAG_KILL_ALL)
1691                                 kill_all();
1692                 }
1693         }
1694
1695         memcpy(bl->id_2_memop_antic, env.curr_id_2_memop, env.rbs_size * sizeof(env.curr_id_2_memop[0]));
1696         if (! rbitsets_equal(bl->anticL_in, env.curr_set, env.rbs_size)) {
1697                 /* changed */
1698                 rbitset_copy(bl->anticL_in, env.curr_set, env.rbs_size);
1699                 dump_curr(bl, "AnticL_in*");
1700                 return 1;
1701         }
1702         dump_curr(bl, "AnticL_in");
1703         return 0;
1704 }  /* backward_antic */
1705
1706 /**
1707  * Replace a Load memop by a already known value.
1708  *
1709  * @param op  the Load memop
1710  */
1711 static void replace_load(memop_t *op)
1712 {
1713         ir_node *load = op->node;
1714         ir_node *def  = skip_Id(op->replace);
1715         ir_node *proj;
1716         ir_mode *mode;
1717
1718         if (def != NULL)
1719                 DB((dbg, LEVEL_1, "Replacing %+F by definition %+F\n", load, is_Proj(def) ? get_Proj_pred(def) : def));
1720         else {
1721                 if (op->flags & FLAG_EXCEPTION) {
1722                         /* bad: this node is unused and executed for exception only */
1723                         DB((dbg, LEVEL_1, "Unused %+F executed for exception only ...\n", load));
1724                         return;
1725                 }
1726                 DB((dbg, LEVEL_1, "Killing unused %+F\n", load));
1727         }
1728
1729         if (op->mem != NULL) {
1730                 /* in rare cases a Load might have NO memory */
1731                 exchange(op->mem, get_Load_mem(load));
1732         }
1733         proj = op->projs[pn_Load_res];
1734         if (proj != NULL) {
1735                 mode = get_irn_mode(proj);
1736                 if (get_irn_mode(def) != mode) {
1737                         /* a hidden cast */
1738                         dbg_info *db    = get_irn_dbg_info(load);
1739                         ir_node  *block = get_nodes_block(proj);
1740                         def = new_rd_Conv(db, block, def, mode);
1741                 }
1742                 exchange(proj, def);
1743         }
1744         proj = op->projs[pn_Load_X_except];
1745         if (proj != NULL) {
1746                 ir_graph *irg = get_irn_irg(load);
1747                 exchange(proj, new_r_Bad(irg));
1748         }
1749         proj = op->projs[pn_Load_X_regular];
1750         if (proj != NULL) {
1751                 exchange(proj, new_r_Jmp(get_nodes_block(load)));
1752         }
1753 }  /* replace_load */
1754
1755 /**
1756  * Remove a Store memop.
1757  *
1758  * @param op  the Store memop
1759  */
1760 static void remove_store(memop_t *op)
1761 {
1762         ir_node *store = op->node;
1763         ir_node *proj;
1764
1765         DB((dbg, LEVEL_1, "Removing %+F\n", store));
1766
1767         if (op->mem != NULL) {
1768                 /* in rare cases a Store might have no memory */
1769                 exchange(op->mem, get_Store_mem(store));
1770         }
1771         proj = op->projs[pn_Store_X_except];
1772         if (proj != NULL) {
1773                 ir_graph *irg = get_irn_irg(store);
1774                 exchange(proj, new_r_Bad(irg));
1775         }
1776         proj = op->projs[pn_Store_X_regular];
1777         if (proj != NULL) {
1778                 exchange(proj, new_r_Jmp(get_nodes_block(store)));
1779         }
1780 }  /* remove_store */
1781
1782
1783 /**
1784  * Do all necessary replacements for a given block.
1785  *
1786  * @param bl  the block
1787  */
1788 static void do_replacements(block_t *bl)
1789 {
1790         memop_t *op;
1791
1792         for (op = bl->memop_forward; op != NULL; op = op->next) {
1793                 if (op->flags & FLAG_KILLED_NODE) {
1794                         switch (get_irn_opcode(op->node)) {
1795                         case iro_Load:
1796                                 replace_load(op);
1797                                 break;
1798                         case iro_Store:
1799                                 remove_store(op);
1800                                 break;
1801                         }
1802                 }
1803         }
1804 }  /* do_replacements */
1805
1806 /**
1807  * Calculate the Avail_out sets for all basic blocks.
1808  */
1809 static void calcAvail(void)
1810 {
1811         memop_t  **tmp_memop = env.curr_id_2_memop;
1812         unsigned *tmp_set    = env.curr_set;
1813         block_t  *bl;
1814
1815         /* calculate avail_out */
1816         DB((dbg, LEVEL_2, "Calculate Avail_out\n"));
1817
1818         /* iterate over all blocks in in any order, skip the start block */
1819         for (bl = env.forward->forward_next; bl != NULL; bl = bl->forward_next) {
1820                 forward_avail(bl);
1821         }
1822
1823         /* restore the current sets */
1824         env.curr_id_2_memop = tmp_memop;
1825         env.curr_set        = tmp_set;
1826 }  /* calcAvail */
1827
1828 /**
1829  * Calculate the Antic_in sets for all basic blocks.
1830  */
1831 static void calcAntic(void)
1832 {
1833         int i, need_iter;
1834
1835         /* calculate antic_out */
1836         DB((dbg, LEVEL_2, "Calculate Antic_in\n"));
1837         i = 0;
1838         do {
1839                 block_t *bl;
1840
1841                 DB((dbg, LEVEL_2, "Iteration %d:\n=========\n", i));
1842
1843                 need_iter = 0;
1844
1845                 /* over all blocks in reverse post order */
1846                 for (bl = env.backward->backward_next; bl != NULL; bl = bl->backward_next) {
1847                         need_iter |= backward_antic(bl);
1848                 }
1849                 ++i;
1850         } while (need_iter);
1851         DB((dbg, LEVEL_2, "Get anticipated Load set after %d iterations\n", i));
1852 }  /* calcAntic */
1853
1854 /**
1855  * Return the node representing the last memory in a block.
1856  *
1857  * @param bl  the block
1858  */
1859 static ir_node *find_last_memory(block_t *bl)
1860 {
1861         for (;;) {
1862                 if (bl->memop_backward != NULL) {
1863                         return bl->memop_backward->mem;
1864                 }
1865                 /* if there is NO memory in this block, go to the dominator */
1866                 bl = get_block_entry(get_Block_idom(bl->block));
1867         }
1868 }  /* find_last_memory */
1869
1870 /**
1871  * Reroute all memory users of old memory
1872  * to a new memory IR-node.
1873  *
1874  * @param omem  the old memory IR-node
1875  * @param nmem  the new memory IR-node
1876  */
1877 static void reroute_all_mem_users(ir_node *omem, ir_node *nmem)
1878 {
1879         int i;
1880
1881         for (i = get_irn_n_outs(omem) - 1; i >= 0; --i) {
1882                 int     n_pos;
1883                 ir_node *user = get_irn_out_ex(omem, i, &n_pos);
1884
1885                 set_irn_n(user, n_pos, nmem);
1886         }
1887
1888         /* all edges previously point to omem now point to nmem */
1889         nmem->out = omem->out;
1890 }  /* reroute_all_mem_users */
1891
1892 /**
1893  * Reroute memory users of old memory that are dominated by a given block
1894  * to a new memory IR-node.
1895  *
1896  * @param omem     the old memory IR-node
1897  * @param nmem     the new memory IR-node
1898  * @param pass_bl  the block the memory must pass
1899  */
1900 static void reroute_mem_through(ir_node *omem, ir_node *nmem, ir_node *pass_bl)
1901 {
1902         int             i, j, n = get_irn_n_outs(omem);
1903         ir_def_use_edge *edges = NEW_ARR_D(ir_def_use_edge, &env.obst, n + 1);
1904
1905         for (i = j = 0; i < n; ++i) {
1906                 int     n_pos;
1907                 ir_node *user   = get_irn_out_ex(omem, i, &n_pos);
1908                 ir_node *use_bl = get_nodes_block(user);
1909
1910
1911                 if (is_Phi(user)) {
1912                         use_bl = get_Block_cfgpred_block(use_bl, n_pos);
1913                 }
1914                 if (block_dominates(pass_bl, use_bl)) {
1915                         /* found an user that is dominated */
1916                         ++j;
1917                         edges[j].pos = n_pos;
1918                         edges[j].use = user;
1919
1920                         set_irn_n(user, n_pos, nmem);
1921                 }
1922         }
1923
1924         /* Modify the out structure: we create a new out edge array on our
1925            temporary obstack here. This should be no problem, as we invalidate the edges
1926            at the end either. */
1927         /* first entry is used for the length */
1928         edges[0].pos = j;
1929         nmem->out = edges;
1930 }  /* reroute_mem_through */
1931
1932 /**
1933  * insert Loads, making partly redundant Loads fully redundant
1934  */
1935 static int insert_Load(block_t *bl)
1936 {
1937         ir_node  *block = bl->block;
1938         int      i, n = get_Block_n_cfgpreds(block);
1939         unsigned end = env.rbs_size - 1;
1940         unsigned pos;
1941
1942         DB((dbg, LEVEL_3, "processing %+F\n", block));
1943
1944         if (n == 0) {
1945                 /* might still happen for an unreachable block (end for instance) */
1946                 return 0;
1947         }
1948
1949         if (n > 1) {
1950                 ir_node **ins;
1951                 int     pos;
1952
1953                 NEW_ARR_A(ir_node *, ins, n);
1954
1955                 rbitset_set_all(env.curr_set, env.rbs_size);
1956
1957                 /* More than one predecessors, calculate the join for all avail_outs ignoring unevaluated
1958                    Blocks. These put in Top anyway. */
1959                 for (i = n - 1; i >= 0; --i) {
1960                         ir_node *pred = skip_Proj(get_Block_cfgpred(block, i));
1961                         ir_node *blk  = get_nodes_block(pred);
1962                         block_t *pred_bl;
1963
1964                         pred_bl = get_block_entry(blk);
1965                         rbitset_and(env.curr_set, pred_bl->avail_out, env.rbs_size);
1966
1967                         if (is_Load(pred) || is_Store(pred)) {
1968                                 /* We reached this block by an exception from a Load or Store:
1969                                  * the memop creating the exception was NOT completed than, kill it
1970                                  */
1971                                 memop_t *exc_op = get_irn_memop(pred);
1972                                 rbitset_clear(env.curr_set, exc_op->value.id);
1973                         }
1974
1975                 }
1976                 /*
1977                  * Ensure that all values are in the map: build Phi's if necessary:
1978                  * Note: the last bit is the sentinel and ALWAYS set, so start with -2.
1979                  */
1980                 for (pos = env.rbs_size - 2; pos >= 0; --pos) {
1981                         if (! rbitset_is_set(env.curr_set, pos))
1982                                 env.curr_id_2_memop[pos] = NULL;
1983                         else {
1984                                 ir_node *pred    = get_Block_cfgpred_block(bl->block, 0);
1985                                 block_t *pred_bl = get_block_entry(pred);
1986                                 int     need_phi = 0;
1987                                 memop_t *first   = NULL;
1988                                 ir_mode *mode    = NULL;
1989
1990                                 for (i = 0; i < n; ++i) {
1991                                         memop_t *mop;
1992
1993                                         pred    = get_Block_cfgpred_block(bl->block, i);
1994                                         pred_bl = get_block_entry(pred);
1995
1996                                         mop = pred_bl->id_2_memop_avail[pos];
1997                                         if (first == NULL) {
1998                                                 first = mop;
1999                                                 ins[0] = first->value.value;
2000                                                 mode = get_irn_mode(ins[0]);
2001
2002                                                 /* no Phi needed so far */
2003                                                 env.curr_id_2_memop[pos] = first;
2004                                         } else {
2005                                                 ins[i] = conv_to(mop->value.value, mode);
2006                                                 if (ins[i] != ins[0]) {
2007                                                         if (ins[i] == NULL) {
2008                                                                 /* conversion failed */
2009                                                                 env.curr_id_2_memop[pos] = NULL;
2010                                                                 rbitset_clear(env.curr_set, pos);
2011                                                                 break;
2012                                                         }
2013                                                         need_phi = 1;
2014                                                 }
2015                                         }
2016                                 }
2017                                 if (need_phi) {
2018                                         /* build a Phi  */
2019                                         ir_node *phi = new_r_Phi(bl->block, n, ins, mode);
2020                                         memop_t *phiop = alloc_memop(phi);
2021
2022                                         phiop->value = first->value;
2023                                         phiop->value.value = phi;
2024
2025                                         /* no need to link it in, as it is a DATA phi */
2026
2027                                         env.curr_id_2_memop[pos] = phiop;
2028
2029                                         DB((dbg, LEVEL_3, "Created new %+F on merging value for address %+F\n", phi, first->value.address));
2030                                 }
2031                         }
2032                 }
2033         } else {
2034                 /* only one predecessor, simply copy the map */
2035                 ir_node *pred    = get_Block_cfgpred_block(bl->block, 0);
2036                 block_t *pred_bl = get_block_entry(pred);
2037
2038                 rbitset_copy(env.curr_set, pred_bl->avail_out, env.rbs_size);
2039
2040                 memcpy(env.curr_id_2_memop, pred_bl->id_2_memop_avail, env.rbs_size * sizeof(bl->id_2_memop_avail[0]));
2041         }
2042
2043         if (n > 1) {
2044                 /* check for partly redundant values */
2045                 for (pos = rbitset_next(bl->anticL_in, 0, 1);
2046                      pos < end;
2047                      pos = rbitset_next(bl->anticL_in, pos + 1, 1)) {
2048                         memop_t *op = bl->id_2_memop_antic[pos];
2049                         int     have_some, all_same;
2050                         ir_node *first;
2051
2052                         if (rbitset_is_set(env.curr_set, pos)) {
2053                                 /* already avail */
2054                                 continue;
2055                         }
2056
2057                         assert(is_Load(op->node));
2058
2059                         DB((dbg, LEVEL_3, "anticipated %+F\n", op->node));
2060
2061                         have_some  = 0;
2062                         all_same   = 1;
2063                         first      = 0;
2064                         for (i = n - 1; i >= 0; --i) {
2065                                 ir_node *pred    = get_Block_cfgpred_block(block, i);
2066                                 block_t *pred_bl = get_block_entry(pred);
2067                                 ir_mode *mode    = op->value.mode;
2068                                 memop_t *e;
2069                                 ir_node *adr;
2070
2071                                 adr = phi_translate(op->value.address, block, i);
2072                                 DB((dbg, LEVEL_3, ".. using address %+F in pred %d\n", adr, i));
2073                                 e   = find_address_avail(pred_bl, register_address(adr), mode);
2074                                 if (e == NULL) {
2075                                         ir_node *ef_block = get_nodes_block(adr);
2076                                         if (! block_dominates(ef_block, pred)) {
2077                                                 /* cannot place a copy here */
2078                                                 have_some = 0;
2079                                                 DB((dbg, LEVEL_3, "%+F cannot be moved into predecessor %+F\n", op->node, pred));
2080                                                 break;
2081                                         }
2082                                         DB((dbg, LEVEL_3, "%+F is not available in predecessor %+F\n", op->node, pred));
2083                                         pred_bl->avail = NULL;
2084                                         all_same       = 0;
2085                                 } else {
2086                                         if (e->value.mode != mode && !can_convert_to(e->value.mode, mode)) {
2087                                                 /* cannot create a Phi due to different modes */
2088                                                 have_some = 0;
2089                                                 break;
2090                                         }
2091                                         pred_bl->avail = e;
2092                                         have_some      = 1;
2093                                         DB((dbg, LEVEL_3, "%+F is available for %+F in predecessor %+F\n", e->node, op->node, pred));
2094                                         if (first == NULL)
2095                                                 first = e->node;
2096                                         else if (first != e->node)
2097                                                 all_same = 0;
2098                                 }
2099                         }
2100                         if (have_some && !all_same) {
2101                                 ir_mode *mode = op->value.mode;
2102                                 ir_node **in, *phi;
2103                                 memop_t *phi_op;
2104
2105                                 NEW_ARR_A(ir_node *, in, n);
2106
2107                                 for (i = n - 1; i >= 0; --i) {
2108                                         ir_node *pred    = get_Block_cfgpred_block(block, i);
2109                                         block_t *pred_bl = get_block_entry(pred);
2110
2111                                         if (pred_bl->avail == NULL) {
2112                                                 /* create a new Load here and make to make it fully redundant */
2113                                                 dbg_info *db       = get_irn_dbg_info(op->node);
2114                                                 ir_node  *last_mem = find_last_memory(pred_bl);
2115                                                 ir_node  *load, *def, *adr;
2116                                                 memop_t  *new_op;
2117
2118                                                 assert(last_mem != NULL);
2119                                                 adr  = phi_translate(op->value.address, block, i);
2120                                                 load = new_rd_Load(db, pred, last_mem, adr, mode, cons_none);
2121                                                 def  = new_r_Proj(load, mode, pn_Load_res);
2122                                                 DB((dbg, LEVEL_1, "Created new %+F in %+F for party redundant %+F\n", load, pred, op->node));
2123
2124                                                 new_op                = alloc_memop(load);
2125                                                 new_op->mem           = new_r_Proj(load, mode_M, pn_Load_M);
2126                                                 new_op->value.address = adr;
2127                                                 new_op->value.id      = op->value.id;
2128                                                 new_op->value.mode    = mode;
2129                                                 new_op->value.value   = def;
2130
2131                                                 new_op->projs[pn_Load_M]   = new_op->mem;
2132                                                 new_op->projs[pn_Load_res] = def;
2133
2134                                                 new_op->prev = pred_bl->memop_backward;
2135                                                 if (pred_bl->memop_backward != NULL)
2136                                                         pred_bl->memop_backward->next = new_op;
2137
2138                                                 pred_bl->memop_backward = new_op;
2139
2140                                                 if (pred_bl->memop_forward == NULL)
2141                                                         pred_bl->memop_forward = new_op;
2142
2143                                                 if (get_nodes_block(last_mem) == pred) {
2144                                                         /* We have add a new last memory op in pred block.
2145                                                            If pred had already a last mem, reroute all memory
2146                                                            users. */
2147                                                         reroute_all_mem_users(last_mem, new_op->mem);
2148                                                 } else {
2149                                                         /* reroute only those memory going through the pre block */
2150                                                         reroute_mem_through(last_mem, new_op->mem, pred);
2151                                                 }
2152
2153                                                 /* we added this load at the end, so it will be avail anyway */
2154                                                 add_memop_avail(pred_bl, new_op);
2155                                                 pred_bl->avail = new_op;
2156                                         }
2157                                         in[i] = conv_to(pred_bl->avail->value.value, mode);
2158                                 }
2159                                 phi = new_r_Phi(block, n, in, mode);
2160                                 DB((dbg, LEVEL_1, "Created new %+F in %+F for now redundant %+F\n", phi, block, op->node));
2161
2162                                 phi_op = clone_memop_phi(op, phi);
2163                                 add_memop(phi_op);
2164                         }
2165                 }
2166         }
2167
2168         /* recalculate avail by gen and kill */
2169         calc_gen_kill_avail(bl);
2170
2171         /* always update the map after gen/kill, as values might have been changed due to RAR/WAR/WAW */
2172         memcpy(bl->id_2_memop_avail, env.curr_id_2_memop, env.rbs_size * sizeof(env.curr_id_2_memop[0]));
2173
2174         if (!rbitsets_equal(bl->avail_out, env.curr_set, env.rbs_size)) {
2175                 /* the avail set has changed */
2176                 rbitset_copy(bl->avail_out, env.curr_set, env.rbs_size);
2177                 dump_curr(bl, "Avail_out*");
2178                 return 1;
2179         }
2180         dump_curr(bl, "Avail_out");
2181         return 0;
2182 }  /* insert_Load */
2183
2184 /**
2185  * Insert Loads upwards.
2186  */
2187 static void insert_Loads_upwards(void)
2188 {
2189         int i, need_iter;
2190         block_t *bl;
2191
2192         /* recalculate antic_out and insert Loads */
2193         DB((dbg, LEVEL_2, "Inserting Loads\n"));
2194
2195         i = 0;
2196         do {
2197                 DB((dbg, LEVEL_2, "Iteration %d:\n=========\n", i));
2198
2199                 need_iter = 0;
2200
2201                 /* over all blocks in reverse post order, skip the start block */
2202                 for (bl = env.forward->forward_next; bl != NULL; bl = bl->forward_next) {
2203                         need_iter |= insert_Load(bl);
2204                 }
2205                 ++i;
2206         } while (need_iter);
2207
2208         DB((dbg, LEVEL_2, "Finished Load inserting after %d iterations\n", i));
2209 }  /* insert_Loads_upwards */
2210
2211 /**
2212  * Kill unreachable control flow.
2213  *
2214  * @param irg  the graph to operate on
2215  */
2216 static void kill_unreachable_blocks(ir_graph *irg)
2217 {
2218         block_t *bl;
2219         ir_node **ins;
2220         int     changed = 0;
2221
2222         NEW_ARR_A(ir_node *, ins, env.max_cfg_preds);
2223
2224         for (bl = env.forward; bl != NULL; bl = bl->forward_next) {
2225                 ir_node *block = bl->block;
2226                 int     i, j, k, n;
2227
2228                 assert(get_Block_mark(block));
2229
2230                 n = get_Block_n_cfgpreds(block);
2231
2232                 for (i = j = 0; i < n; ++i) {
2233                         ir_node *pred = get_Block_cfgpred(block, i);
2234                         ir_node *pred_bl;
2235
2236                         if (is_Bad(pred))
2237                                 continue;
2238
2239                         pred_bl = get_nodes_block(skip_Proj(pred));
2240                         if (! get_Block_mark(pred_bl))
2241                                 continue;
2242
2243                         ins[j++] = pred;
2244                 }
2245                 if (j != n) {
2246                         ir_node *phi, *next;
2247
2248                         /* some unreachable blocks detected */
2249                         changed = 1;
2250
2251                         DB((dbg, LEVEL_1, "Killing dead block predecessors on %+F\n", block));
2252
2253                         set_irn_in(block, j, ins);
2254
2255                         /* shorten all Phi nodes */
2256                         for (phi = get_Block_phis(block); phi != NULL; phi = next) {
2257                                 next = get_Phi_next(phi);
2258
2259                                 for (i = k = 0; i < n; ++i) {
2260                                         ir_node *pred = get_Block_cfgpred_block(block, i);
2261
2262                                         if (is_Bad(pred))
2263                                                 continue;
2264
2265                                         if (! get_Block_mark(pred))
2266                                                 continue;
2267
2268                                         ins[k++] = get_Phi_pred(phi, i);
2269                                 }
2270                                 if (k == 1)
2271                                         exchange(phi, ins[0]);
2272                                 else
2273                                         set_irn_in(phi, k, ins);
2274                         }
2275                 }
2276
2277         }
2278
2279         if (changed) {
2280                 /* kick keep alives */
2281                 ir_node *end = get_irg_end(irg);
2282                 int     i, j, n = get_End_n_keepalives(end);
2283
2284                 NEW_ARR_A(ir_node *, ins, n);
2285
2286                 for (i = j = 0; i < n; ++i) {
2287                         ir_node *ka = get_End_keepalive(end, i);
2288                         ir_node *ka_bl;
2289
2290                         if (is_Bad(ka))
2291                                 continue;
2292                         if (is_Block(ka))
2293                                 ka_bl = ka;
2294                         else
2295                                 ka_bl = get_nodes_block(skip_Proj(ka));
2296                         if (get_Block_mark(ka_bl))
2297                                 ins[j++] = ka;
2298                 }
2299                 if (j != n)
2300                         set_End_keepalives(end, j, ins);
2301
2302                 free_irg_outs(irg);
2303
2304                 /* this transformation do NOT invalidate the dominance */
2305         }
2306 }  /* kill_unreachable_blocks */
2307
2308 int opt_ldst(ir_graph *irg)
2309 {
2310         block_t *bl;
2311
2312         FIRM_DBG_REGISTER(dbg, "firm.opt.ldst");
2313
2314         DB((dbg, LEVEL_1, "\nDoing Load/Store optimization on %+F\n", irg));
2315
2316         /* we need landing pads */
2317         remove_critical_cf_edges(irg);
2318
2319         if (get_opt_alias_analysis()) {
2320                 assure_irg_entity_usage_computed(irg);
2321                 assure_irp_globals_entity_usage_computed();
2322         }
2323
2324         obstack_init(&env.obst);
2325         ir_nodemap_init(&env.adr_map);
2326
2327         env.forward       = NULL;
2328         env.backward      = NULL;
2329         env.curr_adr_id   = 0;
2330         env.n_mem_ops     = 0;
2331         env.max_cfg_preds = 0;
2332         env.changed       = 0;
2333         env.start_bl      = get_irg_start_block(irg);
2334         env.end_bl        = get_irg_end_block(irg);
2335 #ifdef DEBUG_libfirm
2336         env.id_2_address  = NEW_ARR_F(ir_node *, 0);
2337 #endif
2338
2339         assure_irg_outs(irg);
2340
2341         ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK | IR_RESOURCE_BLOCK_MARK);
2342
2343         /* first step: allocate block entries. Note that some blocks might be
2344            unreachable here. Using the normal walk ensures that ALL blocks are initialized. */
2345         irg_walk_graph(irg, prepare_blocks, link_phis, NULL);
2346
2347         /* produce an inverse post-order list for the CFG: this links only reachable
2348            blocks */
2349         irg_out_block_walk(get_irg_start_block(irg), NULL, inverse_post_order, NULL);
2350
2351         if (! get_Block_mark(env.end_bl)) {
2352                 /*
2353                  * The end block is NOT reachable due to endless loops
2354                  * or no_return calls.
2355                  * Place the end block last.
2356                  * env.backward points to the last block in the list for this purpose.
2357                  */
2358                 env.backward->forward_next = get_block_entry(env.end_bl);
2359
2360                 set_Block_mark(env.end_bl, 1);
2361         }
2362
2363         /* KILL unreachable blocks: these disturb the data flow analysis */
2364         kill_unreachable_blocks(irg);
2365
2366         assure_doms(irg);
2367
2368         /* second step: find and sort all memory ops */
2369         walk_memory_irg(irg, collect_memops, NULL, NULL);
2370
2371 #ifdef DEBUG_libfirm
2372         /* check that the backward map is correct */
2373         assert((unsigned)ARR_LEN(env.id_2_address) == env.curr_adr_id);
2374 #endif
2375
2376         if (env.n_mem_ops == 0) {
2377                 /* no memory ops */
2378                 goto end;
2379         }
2380
2381         /* create the backward links. */
2382         env.backward = NULL;
2383         irg_block_walk_graph(irg, NULL, collect_backward, NULL);
2384
2385         /* link the end block in */
2386         bl = get_block_entry(env.end_bl);
2387         bl->backward_next = env.backward;
2388         env.backward      = bl;
2389
2390         /* check that we really start with the start / end block */
2391         assert(env.forward->block  == env.start_bl);
2392         assert(env.backward->block == env.end_bl);
2393
2394         /* create address sets: for now, only the existing addresses are allowed plus one
2395            needed for the sentinel */
2396         env.rbs_size = env.curr_adr_id + 1;
2397
2398         /* create the current set */
2399         env.curr_set = rbitset_obstack_alloc(&env.obst, env.rbs_size);
2400         rbitset_set(env.curr_set, env.rbs_size - 1);
2401         env.curr_id_2_memop = NEW_ARR_D(memop_t *, &env.obst, env.rbs_size);
2402         memset(env.curr_id_2_memop, 0, env.rbs_size * sizeof(env.curr_id_2_memop[0]));
2403
2404         for (bl = env.forward; bl != NULL; bl = bl->forward_next) {
2405                 /* set sentinel bits */
2406                 bl->avail_out  = rbitset_obstack_alloc(&env.obst, env.rbs_size);
2407                 rbitset_set(bl->avail_out, env.rbs_size - 1);
2408
2409                 bl->id_2_memop_avail = NEW_ARR_D(memop_t *, &env.obst, env.rbs_size);
2410                 memset(bl->id_2_memop_avail, 0, env.rbs_size * sizeof(bl->id_2_memop_avail[0]));
2411
2412                 bl->anticL_in  = rbitset_obstack_alloc(&env.obst, env.rbs_size);
2413                 rbitset_set(bl->anticL_in, env.rbs_size - 1);
2414
2415                 bl->id_2_memop_antic = NEW_ARR_D(memop_t *, &env.obst, env.rbs_size);
2416                 memset(bl->id_2_memop_antic, 0, env.rbs_size * sizeof(bl->id_2_memop_antic[0]));
2417         }
2418
2419         (void) dump_block_list;
2420
2421         calcAvail();
2422         calcAntic();
2423
2424         insert_Loads_upwards();
2425
2426         if (env.changed) {
2427                 /* over all blocks in reverse post order */
2428                 for (bl = env.forward; bl != NULL; bl = bl->forward_next) {
2429                         do_replacements(bl);
2430                 }
2431
2432                 /* not only invalidate but free them. We might allocate new out arrays
2433                    on our obstack which will be deleted yet. */
2434                 free_irg_outs(irg);
2435                 set_irg_entity_usage_state(irg, ir_entity_usage_not_computed);
2436         }
2437 end:
2438
2439         ir_free_resources(irg, IR_RESOURCE_IRN_LINK | IR_RESOURCE_BLOCK_MARK);
2440         ir_nodemap_destroy(&env.adr_map);
2441         obstack_free(&env.obst, NULL);
2442
2443 #ifdef DEBUG_libfirm
2444         DEL_ARR_F(env.id_2_address);
2445 #endif
2446
2447         return env.changed != 0;
2448 }  /* opt_ldst */
2449
2450 ir_graph_pass_t *opt_ldst_pass(const char *name)
2451 {
2452         return def_graph_pass_ret(name ? name : "ldst_df", opt_ldst);
2453 }  /* opt_ldst_pass */