fix trailing whitespaces and tabulators in the middle of a line
[libfirm] / ir / opt / opt_ldst.c
1 /*
2  * Copyright (C) 1995-2008 University of Karlsruhe.  All right reserved.
3  *
4  * This file is part of libFirm.
5  *
6  * This file may be distributed and/or modified under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation and appearing in the file LICENSE.GPL included in the
9  * packaging of this file.
10  *
11  * Licensees holding valid libFirm Professional Edition licenses may use
12  * this file in accordance with the libFirm Commercial License.
13  * Agreement provided with the Software.
14  *
15  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17  * PURPOSE.
18  */
19
20 /**
21  * @file
22  * @brief   Dataflow driven Load/Store optimizations, uses some ideas from
23  *          VanDrunen's LEPRE
24  * @author  Michael Beck
25  * @version $Id$
26  */
27 #include "config.h"
28
29 #include "irnode_t.h"
30 #include "irflag_t.h"
31 #include "array_t.h"
32 #include "ircons.h"
33 #include "irdom.h"
34 #include "irgmod.h"
35 #include "irgwalk.h"
36 #include "irouts.h"
37 #include "irgraph.h"
38 #include "irgopt.h"
39 #include "iropt.h"
40 #include "iroptimize.h"
41 #include "irnodemap.h"
42 #include "raw_bitset.h"
43 #include "debug.h"
44 #include "error.h"
45 #include "irpass.h"
46
47 /* maximum number of output Proj's */
48 #define MAX_PROJ (pn_Load_max > pn_Store_max ? pn_Load_max : pn_Store_max)
49
50 /**
51  * Mapping an address to an dense ID.
52  */
53 typedef struct address_entry_t {
54         unsigned id;          /**< The ID */
55 } address_entry;
56
57 /**
58  * Memop-flags.
59  */
60 enum memop_flags {
61         FLAG_KILL_ALL    = 1, /**< KILL all addresses */
62         FLAG_KILLED_NODE = 2, /**< this node was killed */
63         FLAG_EXCEPTION   = 4, /**< this node has exception flow */
64         FLAG_IGNORE      = 8, /**< ignore this node (volatile or other) */
65 };
66
67 /**
68  * A value: This represents a value stored at a given address in
69  * memory. Do not confuse with values from value numbering.
70  */
71 typedef struct value_t value_t;
72 struct value_t {
73         ir_node  *address;    /**< the address of this value */
74         ir_node  *value;      /**< the value itself */
75         ir_mode  *mode;       /**< the mode of the value */
76         unsigned id;          /**< address id */
77 };
78
79 /**
80  * A memop describes an memory-related operation.
81  * These are Loads/Store and all other ops that might modify
82  * memory (Calls, CopyB) or causing exceptions.
83  */
84 typedef struct memop_t memop_t;
85 struct memop_t {
86         value_t  value;      /**< the value of this memop: only defined for Load/Store */
87         ir_node  *node;      /**< the memory op itself */
88         ir_node  *mem;       /**< the memory FROM this node */
89         ir_node  *replace;   /**< the replacement node if this memop is replaced */
90         memop_t  *next;      /**< links to the next memory op in the block in forward order. */
91         memop_t  *prev;      /**< links to the previous memory op in the block in forward order. */
92         unsigned flags;      /**< memop flags */
93         ir_node  *projs[MAX_PROJ]; /**< Projs of this memory op */
94 };
95
96 /**
97  * Additional data for every basic block.
98  */
99 typedef struct block_t block_t;
100 struct block_t {
101         memop_t  *memop_forward;     /**< topologically sorted list of memory ops in this block */
102         memop_t  *memop_backward;    /**< last memop in the list */
103         unsigned *avail_out;         /**< out-set of available addresses */
104         memop_t  **id_2_memop_avail; /**< maps avail address ids to memops */
105         unsigned *anticL_in;         /**< in-set of anticipated Load addresses */
106         memop_t  **id_2_memop_antic; /**< maps anticipated address ids to memops */
107         ir_node  *block;             /**< the associated block */
108         block_t  *forward_next;      /**< next block entry for forward iteration */
109         block_t  *backward_next;     /**< next block entry for backward iteration */
110         memop_t  *avail;             /**< used locally for the avail map */
111         memop_t  **trans_results;    /**< used to cached translated nodes due antic calculation. */
112 };
113
114 /**
115  * Metadata for this pass.
116  */
117 typedef struct ldst_env_t {
118         struct obstack  obst;              /**< obstack for temporary data */
119         ir_nodemap_t    adr_map;           /**< Map addresses to */
120         block_t         *forward;          /**< Inverse post-order list of all blocks Start->End */
121         block_t         *backward;         /**< Inverse post-order list of all blocks End->Start */
122         ir_node         *start_bl;         /**< start block of the current graph */
123         ir_node         *end_bl;           /**< end block of the current graph */
124         unsigned        *curr_set;         /**< current set of addresses */
125         memop_t         **curr_id_2_memop; /**< current map of address ids to memops */
126         unsigned        curr_adr_id;       /**< number for address mapping */
127         unsigned        n_mem_ops;         /**< number of memory operations (Loads/Stores) */
128         unsigned        rbs_size;          /**< size of all bitsets in bytes */
129         int             max_cfg_preds;     /**< maximum number of block cfg predecessors */
130         int             changed;           /**< Flags for changed graph state */
131 #ifdef DEBUG_libfirm
132         ir_node         **id_2_address;    /**< maps an id to the used address */
133 #endif
134 } ldst_env;
135
136 /* the one and only environment */
137 static ldst_env env;
138
139 #ifdef DEBUG_libfirm
140
141 static firm_dbg_module_t *dbg;
142
143 /**
144  * Dumps the block list.
145  *
146  * @param ldst environment
147  */
148 static void dump_block_list(ldst_env *env)
149 {
150         block_t *entry;
151         memop_t *op;
152         int     i;
153
154         for (entry = env->forward; entry != NULL; entry = entry->forward_next) {
155                 DB((dbg, LEVEL_2, "%+F {", entry->block));
156
157                 i = 0;
158                 for (op = entry->memop_forward; op != NULL; op = op->next) {
159                         if (i == 0) {
160                                 DB((dbg, LEVEL_2, "\n\t"));
161                         }
162                         DB((dbg, LEVEL_2, "%+F", op->node));
163                         if ((op->flags & FLAG_KILL_ALL) == FLAG_KILL_ALL)
164                                 DB((dbg, LEVEL_2, "X"));
165                         else if (op->flags & FLAG_KILL_ALL)
166                                 DB((dbg, LEVEL_2, "K"));
167                         DB((dbg, LEVEL_2, ", "));
168
169                         i = (i + 1) & 3;
170                 }
171                 DB((dbg, LEVEL_2, "\n}\n\n"));
172         }
173 }  /* dump_block_list */
174
175 /**
176  * Dumps the current set.
177  *
178  * @param bl   current block
179  * @param s    name of the set
180  */
181 static void dump_curr(block_t *bl, const char *s)
182 {
183         unsigned end = env.rbs_size - 1;
184         unsigned pos;
185         int      i;
186
187         DB((dbg, LEVEL_2, "%s[%+F] = {", s, bl->block));
188         i = 0;
189         for (pos = rbitset_next(env.curr_set, 0, 1); pos < end; pos = rbitset_next(env.curr_set, pos + 1, 1)) {
190                 memop_t *op = env.curr_id_2_memop[pos];
191
192                 if (i == 0) {
193                         DB((dbg, LEVEL_2, "\n\t"));
194                 }
195
196                 DB((dbg, LEVEL_2, "<%+F, %+F>, ", op->value.address, op->value.value));
197                 i = (i + 1) & 3;
198         }
199         DB((dbg, LEVEL_2, "\n}\n"));
200 }  /* dump_curr */
201
202 #else
203 static void dump_block_list(ldst_env *env)
204 {
205         (void) env;
206 }
207 static void dump_curr(block_t *bl, const char *s)
208 {
209         (void) bl;
210         (void) s;
211 }
212 #endif /* DEBUG_libfirm */
213
214 /** Get the block entry for a block node */
215 static block_t *get_block_entry(const ir_node *block)
216 {
217         assert(is_Block(block));
218
219         return get_irn_link(block);
220 }  /* get_block_entry */
221
222 /** Get the memop entry for a memory operation node */
223 static memop_t *get_irn_memop(const ir_node *irn)
224 {
225         assert(! is_Block(irn));
226         return get_irn_link(irn);
227 }  /* get_irn_memop */
228
229 /**
230  * Walk over the memory edges from definition to users.
231  * This ensures, that even operation without memory output are found.
232  *
233  * @param irn   start node
234  * @param pre   pre walker function
235  * @param post  post walker function
236  * @param ctx   context parameter for the walker functions
237  */
238 static void walk_memory(ir_node *irn, irg_walk_func *pre, irg_walk_func *post, void *ctx)
239 {
240         int     i;
241         ir_mode *mode;
242
243         mark_irn_visited(irn);
244
245         if (pre)
246                 pre(irn, ctx);
247
248         mode = get_irn_mode(irn);
249         if (mode == mode_M) {
250                 /* every successor uses memory */
251                 for (i = get_irn_n_outs(irn) - 1; i >= 0; --i) {
252                         ir_node *succ = get_irn_out(irn, i);
253
254                         if (! irn_visited(succ))
255                                 walk_memory(succ, pre, post, ctx);
256                 }
257         } else if (mode == mode_T) {
258                 /* only some Proj's uses memory */
259                 for (i = get_irn_n_outs(irn) - 1; i >= 0; --i) {
260                         ir_node *proj = get_irn_out(irn, i);
261
262                         if (get_irn_mode(proj) == mode_M && ! irn_visited(proj))
263                                 walk_memory(proj, pre, post, ctx);
264                 }
265         }
266         if (post)
267                 post(irn, ctx);
268 }  /* walk_memory */
269
270 /**
271  * Walks over all memory nodes of a graph.
272  *
273  * @param irg   a graph
274  * @param pre   pre walker function
275  * @param post  post walker function
276  * @param ctx   context parameter for the walker functions
277  */
278 static void walk_memory_irg(ir_graph *irg, irg_walk_func pre, irg_walk_func post, void *ctx)
279 {
280         inc_irg_visited(irg);
281
282         ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED);
283
284         /*
285          * there are two possible sources for memory: initial_mem and nomem
286          * we ignore nomem as this should NOT change the memory
287          */
288         walk_memory(get_irg_initial_mem(irg), pre, post, ctx);
289
290         ir_free_resources(irg, IR_RESOURCE_IRN_VISITED);
291 }  /* walk_memory_irg */
292
293 /**
294  * Register an address and allocate a (sparse, 0..n) ID for it.
295  *
296  * @param adr  the IR-node representing the address
297  *
298  * @return the allocated id
299  */
300 static unsigned register_address(ir_node *adr)
301 {
302         address_entry *entry;
303
304         /* skip Confirms and Casts */
305 restart:
306         if (is_Confirm(adr)) {
307                 adr = get_Confirm_value(adr);
308                 goto restart;
309         }
310         if (is_Cast(adr)) {
311                 adr = get_Cast_op(adr);
312                 goto restart;
313         }
314
315         entry = ir_nodemap_get(&env.adr_map, adr);
316
317         if (entry == NULL) {
318                 /* new address */
319                 entry = OALLOC(&env.obst, address_entry);
320
321                 entry->id = env.curr_adr_id++;
322                 ir_nodemap_insert(&env.adr_map, adr, entry);
323
324                 DB((dbg, LEVEL_3, "ADDRESS %+F has ID %u\n", adr, entry->id));
325 #ifdef DEBUG_libfirm
326                 ARR_APP1(ir_node *, env.id_2_address, adr);
327 #endif
328         }
329         return entry->id;
330 }  /* register_address */
331
332
333 /**
334  * translate an address through a Phi node into a given predecessor
335  * block.
336  *
337  * @param address  the address
338  * @param block    the block
339  * @param pos      the position of the predecessor in block
340  */
341 static ir_node *phi_translate(ir_node *address, const ir_node *block, int pos)
342 {
343         if (is_Phi(address) && get_nodes_block(address) == block)
344                 address = get_Phi_pred(address, pos);
345         return address;
346 }  /* phi_translate */
347
348 /**
349  * Walker: allocate an block entry for every block
350  * and register all potential addresses.
351  */
352 static void prepare_blocks(ir_node *irn, void *ctx)
353 {
354         (void)ctx;
355
356         if (is_Block(irn)) {
357                 block_t *entry = OALLOC(&env.obst, block_t);
358                 int     n;
359
360                 entry->memop_forward    = NULL;
361                 entry->memop_backward   = NULL;
362                 entry->avail_out        = NULL;
363                 entry->id_2_memop_avail = NULL;
364                 entry->anticL_in        = NULL;
365                 entry->id_2_memop_antic = NULL;
366                 entry->block            = irn;
367                 entry->forward_next     = NULL;
368                 entry->backward_next    = NULL;
369                 entry->avail            = NULL;
370                 entry->trans_results    = NULL;
371                 set_irn_link(irn, entry);
372
373                 set_Block_phis(irn, NULL);
374
375                 /* use block marks to track unreachable blocks */
376                 set_Block_mark(irn, 0);
377
378                 n = get_Block_n_cfgpreds(irn);
379                 if (n > env.max_cfg_preds)
380                         env.max_cfg_preds = n;
381         } else {
382                 ir_mode *mode = get_irn_mode(irn);
383
384                 if (mode_is_reference(mode)) {
385                         /*
386                          * Register ALL possible addresses: this is overkill yet but
387                          * simpler then doing it for all possible translated addresses
388                          * (which would be sufficient in the moment.
389                          */
390                         (void)register_address(irn);
391                 }
392         }
393 }  /* prepare_blocks */
394
395 /**
396  * Post-Walker, link in all Phi's
397  */
398 static void link_phis(ir_node *irn, void *ctx)
399 {
400         (void)ctx;
401
402         if (is_Phi(irn)) {
403                 ir_node *block = get_nodes_block(irn);
404                 add_Block_phi(block, irn);
405         }
406 }  /* link_phis */
407
408 /**
409  * Block walker: creates the inverse post-order list for the CFG.
410  */
411 static void inverse_post_order(ir_node *block, void *ctx)
412 {
413         block_t *entry = get_block_entry(block);
414
415         (void)ctx;
416
417         /* mark this block IS reachable from start */
418         set_Block_mark(block, 1);
419
420         /* create the list in inverse order */
421         entry->forward_next = env.forward;
422         env.forward         = entry;
423
424         /* remember the first visited (last in list) entry, needed for later */
425         if (env.backward == NULL)
426                 env.backward = entry;
427 }  /* inverse_post_order */
428
429 /**
430  * Block walker: create backward links for the memops of a block.
431  */
432 static void collect_backward(ir_node *block, void *ctx)
433 {
434         block_t *entry = get_block_entry(block);
435         memop_t *last, *op;
436
437         (void)ctx;
438
439         /*
440          * Do NOT link in the end block yet. We want it to be
441          * the first in the list. This is NOT guaranteed by the walker
442          * if we have endless loops.
443          */
444         if (block != env.end_bl) {
445                 entry->backward_next = env.backward;
446
447                 /* create the list in inverse order */
448                 env.backward = entry;
449         }
450
451         /* create backward links for all memory ops */
452         last = NULL;
453         for (op = entry->memop_forward; op != NULL; op = op->next) {
454                 op->prev = last;
455                 last     = op;
456         }
457         entry->memop_backward = last;
458 }  /* collect_backward */
459
460 /**
461  * Allocate a memop.
462  *
463  * @param irn  the IR-node representing the memop or NULL
464  *             if this is a translated (virtual) memop
465  *
466  * @return the allocated memop
467  */
468 static memop_t *alloc_memop(ir_node *irn)
469 {
470         memop_t *m = OALLOC(&env.obst, memop_t);
471
472         m->value.address = NULL;
473         m->value.value   = NULL;
474         m->value.mode    = NULL;
475
476         m->node          = irn;
477         m->mem           = NULL;
478         m->replace       = NULL;
479         m->next          = NULL;
480         m->flags         = 0;
481
482         memset(m->projs, 0, sizeof(m->projs));
483
484         if (irn != NULL)
485                 set_irn_link(irn, m);
486         return m;
487 }  /* alloc_memop */
488
489 /**
490  * Create a memop for a Phi-replacement.
491  *
492  * @param op   the memop to clone
493  * @param phi  the Phi-node representing the new value
494  */
495 static memop_t *clone_memop_phi(memop_t *op, ir_node *phi)
496 {
497         memop_t *m = OALLOC(&env.obst, memop_t);
498
499         m->value         = op->value;
500         m->value.value   = phi;
501
502         m->node          = phi;
503         m->replace       = NULL;
504         m->next          = NULL;
505         m->flags         = 0;
506
507         set_irn_link(phi, m);
508         return m;
509 }  /* clone_memop_phi */
510
511 /**
512  * Return the memory properties of a call node.
513  *
514  * @param call  the call node
515  *
516  * return a bitset of mtp_property_const and mtp_property_pure
517  */
518 static unsigned get_Call_memory_properties(ir_node *call)
519 {
520         ir_type *call_tp = get_Call_type(call);
521         unsigned prop = get_method_additional_properties(call_tp);
522
523         /* check first the call type */
524         if ((prop & (mtp_property_const|mtp_property_pure)) == 0) {
525                 /* try the called entity */
526                 ir_node *ptr = get_Call_ptr(call);
527
528                 if (is_Global(ptr)) {
529                         ir_entity *ent = get_Global_entity(ptr);
530
531                         prop = get_entity_additional_properties(ent);
532                 }
533         }
534         return prop & (mtp_property_const|mtp_property_pure);
535 }  /* get_Call_memory_properties */
536
537 /**
538  * Returns an entity if the address ptr points to a constant one.
539  *
540  * @param ptr  the address
541  *
542  * @return an entity or NULL
543  */
544 static ir_entity *find_constant_entity(ir_node *ptr)
545 {
546         for (;;) {
547                 if (is_SymConst(ptr) && get_SymConst_kind(ptr) == symconst_addr_ent) {
548                         return get_SymConst_entity(ptr);
549                 } else if (is_Sel(ptr)) {
550                         ir_entity *ent = get_Sel_entity(ptr);
551                         ir_type   *tp  = get_entity_owner(ent);
552
553                         /* Do not fiddle with polymorphism. */
554                         if (is_Class_type(get_entity_owner(ent)) &&
555                                 ((get_entity_n_overwrites(ent)    != 0) ||
556                                 (get_entity_n_overwrittenby(ent) != 0)   ) )
557                                 return NULL;
558
559                         if (is_Array_type(tp)) {
560                                 /* check bounds */
561                                 int i, n;
562
563                                 for (i = 0, n = get_Sel_n_indexs(ptr); i < n; ++i) {
564                                         ir_node *bound;
565                                         tarval *tlower, *tupper;
566                                         ir_node *index = get_Sel_index(ptr, i);
567                                         tarval *tv     = computed_value(index);
568
569                                         /* check if the index is constant */
570                                         if (tv == tarval_bad)
571                                                 return NULL;
572
573                                         bound  = get_array_lower_bound(tp, i);
574                                         tlower = computed_value(bound);
575                                         bound  = get_array_upper_bound(tp, i);
576                                         tupper = computed_value(bound);
577
578                                         if (tlower == tarval_bad || tupper == tarval_bad)
579                                                 return NULL;
580
581                                         if (tarval_cmp(tv, tlower) & pn_Cmp_Lt)
582                                                 return NULL;
583                                         if (tarval_cmp(tupper, tv) & pn_Cmp_Lt)
584                                                 return NULL;
585
586                                         /* ok, bounds check finished */
587                                 }
588                         }
589
590                         if (get_entity_linkage(ent) == IR_LINKAGE_CONSTANT)
591                                 return ent;
592
593                         /* try next */
594                         ptr = get_Sel_ptr(ptr);
595                 } else if (is_Add(ptr)) {
596                         ir_node *l = get_Add_left(ptr);
597                         ir_node *r = get_Add_right(ptr);
598
599                         if (get_irn_mode(l) == get_irn_mode(ptr) && is_Const(r))
600                                 ptr = l;
601                         else if (get_irn_mode(r) == get_irn_mode(ptr) && is_Const(l))
602                                 ptr = r;
603                         else
604                                 return NULL;
605
606                         /* for now, we support only one addition, reassoc should fold all others */
607                         if (! is_SymConst(ptr) && !is_Sel(ptr))
608                                 return NULL;
609                 } else if (is_Sub(ptr)) {
610                         ir_node *l = get_Sub_left(ptr);
611                         ir_node *r = get_Sub_right(ptr);
612
613                         if (get_irn_mode(l) == get_irn_mode(ptr) && is_Const(r))
614                                 ptr = l;
615                         else
616                                 return NULL;
617                         /* for now, we support only one subtraction, reassoc should fold all others */
618                         if (! is_SymConst(ptr) && !is_Sel(ptr))
619                                 return NULL;
620                 } else
621                         return NULL;
622         }
623 }  /* find_constant_entity */
624
625 /**
626  * Return the Selection index of a Sel node from dimension n
627  */
628 static long get_Sel_array_index_long(ir_node *n, int dim)
629 {
630         ir_node *index = get_Sel_index(n, dim);
631         assert(is_Const(index));
632         return get_tarval_long(get_Const_tarval(index));
633 }  /* get_Sel_array_index_long */
634
635 /**
636  * Returns the accessed component graph path for an
637  * node computing an address.
638  *
639  * @param ptr    the node computing the address
640  * @param depth  current depth in steps upward from the root
641  *               of the address
642  */
643 static compound_graph_path *rec_get_accessed_path(ir_node *ptr, int depth)
644 {
645         compound_graph_path *res = NULL;
646         ir_entity           *root, *field, *ent;
647         int                 path_len, pos, idx;
648         tarval              *tv;
649         ir_type             *tp;
650
651         if (is_SymConst(ptr)) {
652                 /* a SymConst. If the depth is 0, this is an access to a global
653                  * entity and we don't need a component path, else we know
654                  * at least its length.
655                  */
656                 assert(get_SymConst_kind(ptr) == symconst_addr_ent);
657                 root = get_SymConst_entity(ptr);
658                 res = (depth == 0) ? NULL : new_compound_graph_path(get_entity_type(root), depth);
659         } else if (is_Sel(ptr)) {
660                 /* it's a Sel, go up until we find the root */
661                 res = rec_get_accessed_path(get_Sel_ptr(ptr), depth+1);
662                 if (res == NULL)
663                         return NULL;
664
665                 /* fill up the step in the path at the current position */
666                 field    = get_Sel_entity(ptr);
667                 path_len = get_compound_graph_path_length(res);
668                 pos      = path_len - depth - 1;
669                 set_compound_graph_path_node(res, pos, field);
670
671                 if (is_Array_type(get_entity_owner(field))) {
672                         assert(get_Sel_n_indexs(ptr) == 1 && "multi dim arrays not implemented");
673                         set_compound_graph_path_array_index(res, pos, get_Sel_array_index_long(ptr, 0));
674                 }
675         } else if (is_Add(ptr)) {
676                 ir_node *l    = get_Add_left(ptr);
677                 ir_node *r    = get_Add_right(ptr);
678                 ir_mode *mode = get_irn_mode(ptr);
679                 tarval  *tmp;
680
681                 if (is_Const(r) && get_irn_mode(l) == mode) {
682                         ptr = l;
683                         tv  = get_Const_tarval(r);
684                 } else {
685                         ptr = r;
686                         tv  = get_Const_tarval(l);
687                 }
688 ptr_arith:
689                 mode = get_tarval_mode(tv);
690                 tmp  = tv;
691
692                 /* ptr must be a Sel or a SymConst, this was checked in find_constant_entity() */
693                 if (is_Sel(ptr)) {
694                         field = get_Sel_entity(ptr);
695                 } else {
696                         field = get_SymConst_entity(ptr);
697                 }
698                 idx = 0;
699                 for (ent = field;;) {
700                         unsigned size;
701                         tarval   *sz, *tv_index, *tlower, *tupper;
702                         ir_node  *bound;
703
704                         tp = get_entity_type(ent);
705                         if (! is_Array_type(tp))
706                                 break;
707                         ent = get_array_element_entity(tp);
708                         size = get_type_size_bytes(get_entity_type(ent));
709                         sz   = new_tarval_from_long(size, mode);
710
711                         tv_index = tarval_div(tmp, sz);
712                         tmp      = tarval_mod(tmp, sz);
713
714                         if (tv_index == tarval_bad || tmp == tarval_bad)
715                                 return NULL;
716
717                         assert(get_array_n_dimensions(tp) == 1 && "multiarrays not implemented");
718                         bound  = get_array_lower_bound(tp, 0);
719                         tlower = computed_value(bound);
720                         bound  = get_array_upper_bound(tp, 0);
721                         tupper = computed_value(bound);
722
723                         if (tlower == tarval_bad || tupper == tarval_bad)
724                                 return NULL;
725
726                         if (tarval_cmp(tv_index, tlower) & pn_Cmp_Lt)
727                                 return NULL;
728                         if (tarval_cmp(tupper, tv_index) & pn_Cmp_Lt)
729                                 return NULL;
730
731                         /* ok, bounds check finished */
732                         ++idx;
733                 }
734                 if (! tarval_is_null(tmp)) {
735                         /* access to some struct/union member */
736                         return NULL;
737                 }
738
739                 /* should be at least ONE array */
740                 if (idx == 0)
741                         return NULL;
742
743                 res = rec_get_accessed_path(ptr, depth + idx);
744                 if (res == NULL)
745                         return NULL;
746
747                 path_len = get_compound_graph_path_length(res);
748                 pos      = path_len - depth - idx;
749
750                 for (ent = field;;) {
751                         unsigned size;
752                         tarval   *sz, *tv_index;
753                         long     index;
754
755                         tp = get_entity_type(ent);
756                         if (! is_Array_type(tp))
757                                 break;
758                         ent = get_array_element_entity(tp);
759                         set_compound_graph_path_node(res, pos, ent);
760
761                         size = get_type_size_bytes(get_entity_type(ent));
762                         sz   = new_tarval_from_long(size, mode);
763
764                         tv_index = tarval_div(tv, sz);
765                         tv       = tarval_mod(tv, sz);
766
767                         /* worked above, should work again */
768                         assert(tv_index != tarval_bad && tv != tarval_bad);
769
770                         /* bounds already checked above */
771                         index = get_tarval_long(tv_index);
772                         set_compound_graph_path_array_index(res, pos, index);
773                         ++pos;
774                 }
775         } else if (is_Sub(ptr)) {
776                 ir_node *l = get_Sub_left(ptr);
777                 ir_node *r = get_Sub_right(ptr);
778
779                 ptr = l;
780                 tv  = get_Const_tarval(r);
781                 tv  = tarval_neg(tv);
782                 goto ptr_arith;
783         }
784         return res;
785 }  /* rec_get_accessed_path */
786
787 /**
788  * Returns an access path or NULL.  The access path is only
789  * valid, if the graph is in phase_high and _no_ address computation is used.
790  */
791 static compound_graph_path *get_accessed_path(ir_node *ptr)
792 {
793         compound_graph_path *gr = rec_get_accessed_path(ptr, 0);
794         return gr;
795 }  /* get_accessed_path */
796
797 typedef struct path_entry {
798         ir_entity         *ent;
799         struct path_entry *next;
800         long              index;
801 } path_entry;
802
803 static ir_node *rec_find_compound_ent_value(ir_node *ptr, path_entry *next)
804 {
805         path_entry       entry, *p;
806         ir_entity        *ent, *field;
807         ir_initializer_t *initializer;
808         tarval           *tv;
809         ir_type          *tp;
810         unsigned         n;
811
812         entry.next = next;
813         if (is_SymConst(ptr)) {
814                 /* found the root */
815                 ent         = get_SymConst_entity(ptr);
816                 initializer = get_entity_initializer(ent);
817                 for (p = next; p != NULL;) {
818                         if (initializer->kind != IR_INITIALIZER_COMPOUND)
819                                 return NULL;
820                         n  = get_initializer_compound_n_entries(initializer);
821                         tp = get_entity_type(ent);
822
823                         if (is_Array_type(tp)) {
824                                 ent = get_array_element_entity(tp);
825                                 if (ent != p->ent) {
826                                         /* a missing [0] */
827                                         if (0 >= n)
828                                                 return NULL;
829                                         initializer = get_initializer_compound_value(initializer, 0);
830                                         continue;
831                                 }
832                         }
833                         if (p->index >= (int) n)
834                                 return NULL;
835                         initializer = get_initializer_compound_value(initializer, p->index);
836
837                         ent = p->ent;
838                         p   = p->next;
839                 }
840                 tp = get_entity_type(ent);
841                 while (is_Array_type(tp)) {
842                         ent = get_array_element_entity(tp);
843                         tp = get_entity_type(ent);
844                         /* a missing [0] */
845                         n  = get_initializer_compound_n_entries(initializer);
846                         if (0 >= n)
847                                 return NULL;
848                         initializer = get_initializer_compound_value(initializer, 0);
849                 }
850
851                 switch (initializer->kind) {
852                 case IR_INITIALIZER_CONST:
853                         return get_initializer_const_value(initializer);
854                 case IR_INITIALIZER_TARVAL:
855                 case IR_INITIALIZER_NULL:
856                 default:
857                         return NULL;
858                 }
859         } else if (is_Sel(ptr)) {
860                 entry.ent = field = get_Sel_entity(ptr);
861                 tp = get_entity_owner(field);
862                 if (is_Array_type(tp)) {
863                         assert(get_Sel_n_indexs(ptr) == 1 && "multi dim arrays not implemented");
864                         entry.index = get_Sel_array_index_long(ptr, 0) - get_array_lower_bound_int(tp, 0);
865                 } else {
866                         int i, n_members = get_compound_n_members(tp);
867                         for (i = 0; i < n_members; ++i) {
868                                 if (get_compound_member(tp, i) == field)
869                                         break;
870                         }
871                         if (i >= n_members) {
872                                 /* not found: should NOT happen */
873                                 return NULL;
874                         }
875                         entry.index = i;
876                 }
877                 return rec_find_compound_ent_value(get_Sel_ptr(ptr), &entry);
878         }  else if (is_Add(ptr)) {
879                 ir_node  *l = get_Add_left(ptr);
880                 ir_node  *r = get_Add_right(ptr);
881                 ir_mode  *mode;
882                 unsigned pos;
883
884                 if (is_Const(r)) {
885                         ptr = l;
886                         tv  = get_Const_tarval(r);
887                 } else {
888                         ptr = r;
889                         tv  = get_Const_tarval(l);
890                 }
891 ptr_arith:
892                 mode = get_tarval_mode(tv);
893
894                 /* ptr must be a Sel or a SymConst, this was checked in find_constant_entity() */
895                 if (is_Sel(ptr)) {
896                         field = get_Sel_entity(ptr);
897                 } else {
898                         field = get_SymConst_entity(ptr);
899                 }
900
901                 /* count needed entries */
902                 pos = 0;
903                 for (ent = field;;) {
904                         tp = get_entity_type(ent);
905                         if (! is_Array_type(tp))
906                                 break;
907                         ent = get_array_element_entity(tp);
908                         ++pos;
909                 }
910                 /* should be at least ONE entry */
911                 if (pos == 0)
912                         return NULL;
913
914                 /* allocate the right number of entries */
915                 NEW_ARR_A(path_entry, p, pos);
916
917                 /* fill them up */
918                 pos = 0;
919                 for (ent = field;;) {
920                         unsigned size;
921                         tarval   *sz, *tv_index, *tlower, *tupper;
922                         long     index;
923                         ir_node  *bound;
924
925                         tp = get_entity_type(ent);
926                         if (! is_Array_type(tp))
927                                 break;
928                         ent = get_array_element_entity(tp);
929                         p[pos].ent  = ent;
930                         p[pos].next = &p[pos + 1];
931
932                         size = get_type_size_bytes(get_entity_type(ent));
933                         sz   = new_tarval_from_long(size, mode);
934
935                         tv_index = tarval_div(tv, sz);
936                         tv       = tarval_mod(tv, sz);
937
938                         if (tv_index == tarval_bad || tv == tarval_bad)
939                                 return NULL;
940
941                         assert(get_array_n_dimensions(tp) == 1 && "multiarrays not implemented");
942                         bound  = get_array_lower_bound(tp, 0);
943                         tlower = computed_value(bound);
944                         bound  = get_array_upper_bound(tp, 0);
945                         tupper = computed_value(bound);
946
947                         if (tlower == tarval_bad || tupper == tarval_bad)
948                                 return NULL;
949
950                         if (tarval_cmp(tv_index, tlower) & pn_Cmp_Lt)
951                                 return NULL;
952                         if (tarval_cmp(tupper, tv_index) & pn_Cmp_Lt)
953                                 return NULL;
954
955                         /* ok, bounds check finished */
956                         index = get_tarval_long(tv_index);
957                         p[pos].index = index;
958                         ++pos;
959                 }
960                 if (! tarval_is_null(tv)) {
961                         /* hmm, wrong access */
962                         return NULL;
963                 }
964                 p[pos - 1].next = next;
965                 return rec_find_compound_ent_value(ptr, p);
966         } else if (is_Sub(ptr)) {
967                 ir_node *l = get_Sub_left(ptr);
968                 ir_node *r = get_Sub_right(ptr);
969
970                 ptr = l;
971                 tv  = get_Const_tarval(r);
972                 tv  = tarval_neg(tv);
973                 goto ptr_arith;
974         }
975         return NULL;
976 }  /* rec_find_compound_ent_value */
977
978 static ir_node *find_compound_ent_value(ir_node *ptr)
979 {
980         return rec_find_compound_ent_value(ptr, NULL);
981 }  /* find_compound_ent_value */
982
983 /**
984  * Mark a Load memop to be replace by a definition
985  *
986  * @param op  the Load memop
987  */
988 static void mark_replace_load(memop_t *op, ir_node *def)
989 {
990         op->replace = def;
991         op->flags |= FLAG_KILLED_NODE;
992         env.changed = 1;
993 }  /* mark_replace_load */
994
995 /**
996  * Mark a Store memop to be removed.
997  *
998  * @param op  the Store memop
999  */
1000 static void mark_remove_store(memop_t *op)
1001 {
1002         op->flags |= FLAG_KILLED_NODE;
1003         env.changed = 1;
1004 }  /* mark_remove_store */
1005
1006 /**
1007  * Update a memop for a Load.
1008  *
1009  * @param m  the memop
1010  */
1011 static void update_Load_memop(memop_t *m)
1012 {
1013         int       i;
1014         ir_node   *load = m->node;
1015         ir_node   *ptr;
1016         ir_entity *ent;
1017
1018         if (get_Load_volatility(load) == volatility_is_volatile)
1019                 m->flags |= FLAG_IGNORE;
1020
1021         ptr = get_Load_ptr(load);
1022
1023         m->value.address = ptr;
1024
1025         for (i = get_irn_n_outs(load) - 1; i >= 0; --i) {
1026                 ir_node *proj = get_irn_out(load, i);
1027                 long    pn;
1028
1029                 /* beware of keep edges */
1030                 if (is_End(proj))
1031                         continue;
1032
1033                 pn = get_Proj_proj(proj);
1034                 m->projs[pn] = proj;
1035                 switch (pn) {
1036                 case pn_Load_res:
1037                         m->value.value = proj;
1038                         m->value.mode  = get_irn_mode(proj);
1039                         break;
1040                 case pn_Load_X_except:
1041                         m->flags |= FLAG_EXCEPTION;
1042                         break;
1043                 case pn_Load_M:
1044                         m->mem = proj;
1045                         break;
1046                 case pn_Load_X_regular:
1047                         break;
1048                 default:
1049                         panic("Unsupported Proj from Load %+F", proj);
1050                 }
1051         }
1052
1053         /* check if we can determine the entity that will be loaded */
1054         ent = find_constant_entity(ptr);
1055
1056         if (ent != NULL && get_entity_visibility(ent) != ir_visibility_external) {
1057                 /* a static allocation that is not external: there should be NO exception
1058                  * when loading even if we cannot replace the load itself. */
1059                 ir_node *value = NULL;
1060
1061                 /* no exception, clear the m fields as it might be checked later again */
1062                 if (m->projs[pn_Load_X_except]) {
1063                         exchange(m->projs[pn_Load_X_except], new_Bad());
1064                         m->projs[pn_Load_X_except] = NULL;
1065                         m->flags &= ~FLAG_EXCEPTION;
1066                         env.changed = 1;
1067                 }
1068                 if (m->projs[pn_Load_X_regular]) {
1069                         exchange(m->projs[pn_Load_X_regular], new_r_Jmp(get_nodes_block(load)));
1070                         m->projs[pn_Load_X_regular] = NULL;
1071                         env.changed = 1;
1072                 }
1073
1074                 if (get_entity_linkage(ent) & IR_LINKAGE_CONSTANT) {
1075                         if (ent->initializer) {
1076                                 /* new style initializer */
1077                                 value = find_compound_ent_value(ptr);
1078                         } else if (entity_has_compound_ent_values(ent)) {
1079                                 /* old style initializer */
1080                                 compound_graph_path *path = get_accessed_path(ptr);
1081
1082                                 if (path != NULL) {
1083                                         assert(is_proper_compound_graph_path(path, get_compound_graph_path_length(path)-1));
1084
1085                                         value = get_compound_ent_value_by_path(ent, path);
1086                                         DB((dbg, LEVEL_1, "  Constant access at %F%F resulted in %+F\n", ent, path, value));
1087                                         free_compound_graph_path(path);
1088                                 }
1089                         }
1090                         if (value != NULL)
1091                                 value = can_replace_load_by_const(load, value);
1092                 }
1093
1094                 if (value != NULL) {
1095                         /* we completely replace the load by this value */
1096                         DB((dbg, LEVEL_1, "Replacing Load %+F by constant %+F\n", m->node, value));
1097                         mark_replace_load(m, value);
1098                         return;
1099                 }
1100         }
1101
1102         if (m->value.value != NULL && !(m->flags & FLAG_IGNORE)) {
1103                 /* only create an address if this node is NOT killed immediately or ignored */
1104                 m->value.id = register_address(ptr);
1105                 ++env.n_mem_ops;
1106         } else {
1107                 /* no user, KILL it */
1108                 mark_replace_load(m, NULL);
1109         }
1110 }  /* update_Load_memop */
1111
1112 /**
1113  * Update a memop for a Store.
1114  *
1115  * @param m  the memop
1116  */
1117 static void update_Store_memop(memop_t *m)
1118 {
1119         int     i;
1120         ir_node *store = m->node;
1121         ir_node *adr   = get_Store_ptr(store);
1122
1123         if (get_Store_volatility(store) == volatility_is_volatile) {
1124                 m->flags |= FLAG_IGNORE;
1125         } else {
1126                 /* only create an address if this node is NOT ignored */
1127                 m->value.id = register_address(adr);
1128                 ++env.n_mem_ops;
1129         }
1130
1131         m->value.address = adr;
1132
1133         for (i = get_irn_n_outs(store) - 1; i >= 0; --i) {
1134                 ir_node *proj = get_irn_out(store, i);
1135                 long    pn;
1136
1137                 /* beware of keep edges */
1138                 if (is_End(proj))
1139                         continue;
1140
1141                 pn = get_Proj_proj(proj);
1142                 m->projs[pn] = proj;
1143                 switch (pn) {
1144                 case pn_Store_X_except:
1145                         m->flags |= FLAG_EXCEPTION;
1146                         break;
1147                 case pn_Store_M:
1148                         m->mem = proj;
1149                         break;
1150                 case pn_Store_X_regular:
1151                         break;
1152                 default:
1153                         panic("Unsupported Proj from Store %+F", proj);
1154                 }
1155         }
1156         m->value.value = get_Store_value(store);
1157         m->value.mode  = get_irn_mode(m->value.value);
1158 }  /* update_Store_memop */
1159
1160 /**
1161  * Update a memop for a Call.
1162  *
1163  * @param m  the memop
1164  */
1165 static void update_Call_memop(memop_t *m)
1166 {
1167         ir_node  *call = m->node;
1168         unsigned prop  = get_Call_memory_properties(call);
1169         int      i;
1170
1171         if (prop & mtp_property_const) {
1172                 /* A constant call did NOT use memory at all, we
1173                    can kick it from the list. */
1174         } else if (prop & mtp_property_pure) {
1175                 /* pure calls READ memory */
1176                 m->flags = 0;
1177         } else
1178                 m->flags = FLAG_KILL_ALL;
1179
1180         for (i = get_irn_n_outs(call) - 1; i >= 0; --i) {
1181                 ir_node *proj = get_irn_out(call, i);
1182
1183                 /* beware of keep edges */
1184                 if (is_End(proj))
1185                         continue;
1186
1187                 switch (get_Proj_proj(proj)) {
1188                 case pn_Call_X_except:
1189                         m->flags |= FLAG_EXCEPTION;
1190                         break;
1191                 case pn_Call_M:
1192                         m->mem = proj;
1193                         break;
1194                 }
1195         }
1196 }  /* update_Call_memop */
1197
1198 /**
1199  * Update a memop for a Div/Mod/Quot/DivMod.
1200  *
1201  * @param m  the memop
1202  */
1203 static void update_DivOp_memop(memop_t *m)
1204 {
1205         ir_node *div = m->node;
1206         int     i;
1207
1208         for (i = get_irn_n_outs(div) - 1; i >= 0; --i) {
1209                 ir_node *proj = get_irn_out(div, i);
1210
1211                 /* beware of keep edges */
1212                 if (is_End(proj))
1213                         continue;
1214
1215                 switch (get_Proj_proj(proj)) {
1216                 case pn_Generic_X_except:
1217                         m->flags |= FLAG_EXCEPTION;
1218                         break;
1219                 case pn_Generic_M:
1220                         m->mem = proj;
1221                         break;
1222                 }
1223         }
1224 }  /* update_DivOp_memop */
1225
1226 /**
1227  * Update a memop for a Phi.
1228  *
1229  * @param m  the memop
1230  */
1231 static void update_Phi_memop(memop_t *m)
1232 {
1233         /* the Phi is it's own mem */
1234         m->mem = m->node;
1235 }  /* update_Phi_memop */
1236
1237 /**
1238  * Memory walker: collect all memory ops and build topological lists.
1239  */
1240 static void collect_memops(ir_node *irn, void *ctx)
1241 {
1242         memop_t  *op;
1243         ir_node  *block;
1244         block_t  *entry;
1245
1246         (void) ctx;
1247         if (is_Proj(irn)) {
1248                 /* we can safely ignore ProjM's except the initial memory */
1249                 if (irn != get_irg_initial_mem(current_ir_graph))
1250                         return;
1251         }
1252
1253         op    = alloc_memop(irn);
1254         block = get_nodes_block(irn);
1255         entry = get_block_entry(block);
1256
1257         if (is_Phi(irn)) {
1258                 update_Phi_memop(op);
1259                 /* Phis must be always placed first */
1260                 op->next = entry->memop_forward;
1261                 entry->memop_forward = op;
1262                 if (entry->memop_backward == NULL)
1263                         entry->memop_backward = op;
1264         } else {
1265                 switch (get_irn_opcode(irn)) {
1266                 case iro_Load:
1267                         update_Load_memop(op);
1268                         break;
1269                 case iro_Store:
1270                         update_Store_memop(op);
1271                         break;
1272                 case iro_Call:
1273                         update_Call_memop(op);
1274                         break;
1275                 case iro_Sync:
1276                 case iro_Pin:
1277                         op->mem = irn;
1278                         break;
1279                 case iro_Proj:
1280                         /* initial memory */
1281                         op->mem = irn;
1282                         break;
1283                 case iro_Return:
1284                 case iro_End:
1285                         /* we can those to find the memory edge */
1286                         break;
1287                 case iro_Div:
1288                 case iro_DivMod:
1289                 case iro_Quot:
1290                 case iro_Mod:
1291                         update_DivOp_memop(op);
1292                         break;
1293
1294                 case iro_Builtin:
1295                         /* TODO: handle some builtins */
1296                 default:
1297                         /* unsupported operation */
1298                         op->flags = FLAG_KILL_ALL;
1299                 }
1300
1301
1302                 /* all other should be placed last */
1303                 if (entry->memop_backward == NULL) {
1304                         entry->memop_forward = entry->memop_backward = op;
1305                 } else {
1306                         entry->memop_backward->next = op;
1307                         entry->memop_backward       = op;
1308                 }
1309         }
1310 }  /* collect_memops */
1311
1312 /**
1313  * Find an address in the current set.
1314  *
1315  * @param value  the value to be searched for
1316  *
1317  * @return a memop for the value or NULL if the value does
1318  *         not exists in the set or cannot be converted into
1319  *         the requested mode
1320  */
1321 static memop_t *find_address(const value_t *value)
1322 {
1323         if (rbitset_is_set(env.curr_set, value->id)) {
1324                 memop_t *res = env.curr_id_2_memop[value->id];
1325
1326                 if (res->value.mode == value->mode)
1327                         return res;
1328                 /* allow hidden casts */
1329                 if (get_mode_arithmetic(res->value.mode) == irma_twos_complement &&
1330                     get_mode_arithmetic(value->mode) == irma_twos_complement &&
1331                     get_mode_size_bits(res->value.mode) == get_mode_size_bits(value->mode))
1332                         return res;
1333         }
1334         return NULL;
1335 }  /* find_address */
1336
1337 /**
1338  * Find an address in the avail_out set.
1339  *
1340  * @param bl     the block
1341  */
1342 static memop_t *find_address_avail(const block_t *bl, unsigned id, const ir_mode *mode)
1343 {
1344         if (rbitset_is_set(bl->avail_out, id)) {
1345                 memop_t *res = bl->id_2_memop_avail[id];
1346
1347                 if (res->value.mode == mode)
1348                         return res;
1349                 /* allow hidden casts */
1350                 if (get_mode_arithmetic(res->value.mode) == irma_twos_complement &&
1351                     get_mode_arithmetic(mode) == irma_twos_complement &&
1352                     get_mode_size_bits(res->value.mode) == get_mode_size_bits(mode))
1353                         return res;
1354         }
1355         return NULL;
1356 }  /* find_address_avail */
1357
1358 /**
1359  * Kill all addresses from the current set.
1360  */
1361 static void kill_all(void)
1362 {
1363         rbitset_clear_all(env.curr_set, env.rbs_size);
1364
1365         /* set sentinel */
1366         rbitset_set(env.curr_set, env.rbs_size - 1);
1367 }  /* kill_all */
1368
1369 /**
1370  * Kill memops that are not alias free due to a Store value from the current set.
1371  *
1372  * @param value  the Store value
1373  */
1374 static void kill_memops(const value_t *value)
1375 {
1376         unsigned end = env.rbs_size - 1;
1377         unsigned pos;
1378
1379         for (pos = rbitset_next(env.curr_set, 0, 1); pos < end; pos = rbitset_next(env.curr_set, pos + 1, 1)) {
1380                 memop_t *op = env.curr_id_2_memop[pos];
1381
1382                 if (ir_no_alias != get_alias_relation(current_ir_graph, value->address, value->mode,
1383                                                           op->value.address, op->value.mode)) {
1384                         rbitset_clear(env.curr_set, pos);
1385                         env.curr_id_2_memop[pos] = NULL;
1386                         DB((dbg, LEVEL_2, "KILLING %+F because of possible alias address %+F\n", op->node, value->address));
1387                 }
1388         }
1389 }  /* kill_memops */
1390
1391 /**
1392  * Add the value of a memop to the current set.
1393  *
1394  * @param op  the memory op
1395  */
1396 static void add_memop(memop_t *op)
1397 {
1398         rbitset_set(env.curr_set, op->value.id);
1399         env.curr_id_2_memop[op->value.id] = op;
1400 }  /* add_memop */
1401
1402 /**
1403  * Add the value of a memop to the avail_out set.
1404  *
1405  * @param bl  the block
1406  * @param op  the memory op
1407  */
1408 static void add_memop_avail(block_t *bl, memop_t *op)
1409 {
1410         rbitset_set(bl->avail_out, op->value.id);
1411         bl->id_2_memop_avail[op->value.id] = op;
1412 }  /* add_memop_avail */
1413
1414 /**
1415  * Check, if we can convert a value of one mode to another mode
1416  * without changing the representation of bits.
1417  *
1418  * @param from  the original mode
1419  * @param to    the destination mode
1420  */
1421 static int can_convert_to(const ir_mode *from, const ir_mode *to)
1422 {
1423         if (get_mode_arithmetic(from) == irma_twos_complement &&
1424             get_mode_arithmetic(to) == irma_twos_complement &&
1425             get_mode_size_bits(from) == get_mode_size_bits(to))
1426                 return 1;
1427         return 0;
1428 }  /* can_convert_to */
1429
1430 /**
1431  * Add a Conv to the requested mode if needed.
1432  *
1433  * @param irn   the IR-node to convert
1434  * @param mode  the destination mode
1435  *
1436  * @return the possible converted node or NULL
1437  *         if the conversion is not possible
1438  */
1439 static ir_node *conv_to(ir_node *irn, ir_mode *mode)
1440 {
1441         ir_mode *other = get_irn_mode(irn);
1442         if (other != mode) {
1443                 /* different modes: check if conversion is possible without changing the bits */
1444                 if (can_convert_to(other, mode)) {
1445                         ir_node *block = get_nodes_block(irn);
1446                         return new_r_Conv(block, irn, mode);
1447                 }
1448                 /* otherwise not possible ... yet */
1449                 return NULL;
1450         }
1451         return irn;
1452 }  /* conv_to */
1453
1454 /**
1455  * Update the address of an value if this address was a load result
1456  * and the load is killed now.
1457  *
1458  * @param value  the value whose address is updated
1459  */
1460 static void update_address(value_t *value)
1461 {
1462         if (is_Proj(value->address)) {
1463                 ir_node *load = get_Proj_pred(value->address);
1464
1465                 if (is_Load(load)) {
1466                         const memop_t *op = get_irn_memop(load);
1467
1468                         if (op->flags & FLAG_KILLED_NODE)
1469                                 value->address = op->replace;
1470                 }
1471         }
1472 }  /* update_address */
1473
1474 /**
1475  * Do forward dataflow analysis on the given block and calculate the
1476  * GEN and KILL in the current (avail) set.
1477  *
1478  * @param bl  the block
1479  */
1480 static void calc_gen_kill_avail(block_t *bl)
1481 {
1482         memop_t *op;
1483         ir_node *def;
1484
1485         for (op = bl->memop_forward; op != NULL; op = op->next) {
1486                 switch (get_irn_opcode(op->node)) {
1487                 case iro_Phi:
1488                         /* meet */
1489                         break;
1490                 case iro_Sync:
1491                         /* join */
1492                         break;
1493                 case iro_Load:
1494                         if (! (op->flags & (FLAG_KILLED_NODE|FLAG_IGNORE))) {
1495                                 /* do we have this already? */
1496                                 memop_t *other;
1497
1498                                 update_address(&op->value);
1499                                 other = find_address(&op->value);
1500                                 if (other != NULL && other != op) {
1501                                         def = conv_to(other->value.value, op->value.mode);
1502                                         if (def != NULL) {
1503 #ifdef DEBUG_libfirm
1504                                                 if (is_Store(other->node)) {
1505                                                         /* RAW */
1506                                                         DB((dbg, LEVEL_1, "RAW %+F <- %+F(%+F)\n", op->node, def, other->node));
1507                                                 } else {
1508                                                         /* RAR */
1509                                                         DB((dbg, LEVEL_1, "RAR %+F <- %+F(%+F)\n", op->node, def, other->node));
1510                                                 }
1511 #endif
1512                                                 mark_replace_load(op, def);
1513                                                 /* do NOT change the memop table */
1514                                                 continue;
1515                                         }
1516                                 }
1517                                 /* add this value */
1518                                 add_memop(op);
1519                         }
1520                         break;
1521                 case iro_Store:
1522                         if (! (op->flags & FLAG_KILLED_NODE)) {
1523                                 /* do we have this store already */
1524                                 memop_t *other;
1525
1526                                 update_address(&op->value);
1527                                 other = find_address(&op->value);
1528                                 if (other != NULL) {
1529                                         if (is_Store(other->node)) {
1530                                                 if (op != other && !(other->flags & FLAG_IGNORE) &&
1531                                                     get_nodes_block(other->node) == get_nodes_block(op->node)) {
1532                                                         /*
1533                                                          * A WAW in the same block we can kick the first store.
1534                                                          * This is a shortcut: we know that the second Store will be anticipated
1535                                                          * then in an case.
1536                                                          */
1537                                                         DB((dbg, LEVEL_1, "WAW %+F <- %+F\n", other->node, op->node));
1538                                                         mark_remove_store(other);
1539                                                         /* FIXME: a Load might be get freed due to this killed store */
1540                                                 }
1541                                         } else if (other->value.value == op->value.value && !(op->flags & FLAG_IGNORE)) {
1542                                                 /* WAR */
1543                                                 DB((dbg, LEVEL_1, "WAR %+F <- %+F\n", op->node, other->node));
1544                                                 mark_remove_store(op);
1545                                                 /* do NOT change the memop table */
1546                                                 continue;
1547                                         }
1548                                 }
1549                                 /* KILL all possible aliases */
1550                                 kill_memops(&op->value);
1551                                 /* add this value */
1552                                 add_memop(op);
1553                         }
1554                         break;
1555                 default:
1556                         if (op->flags & FLAG_KILL_ALL)
1557                                 kill_all();
1558                 }
1559         }
1560 }  /* calc_gen_kill_avail */
1561
1562 #define BYTE_SIZE(x)  (((x) + 7) >> 3)
1563
1564 /**
1565  * Do forward dataflow analysis on a given block to calculate the avail_out set
1566  * for this block only.
1567  *
1568  * @param block  the block
1569  */
1570 static void forward_avail(block_t *bl)
1571 {
1572         /* fill the data from the current block */
1573         env.curr_id_2_memop = bl->id_2_memop_avail;
1574         env.curr_set        = bl->avail_out;
1575
1576         calc_gen_kill_avail(bl);
1577         dump_curr(bl, "Avail_out");
1578 }  /* forward_avail */
1579
1580 /**
1581  * Do backward dataflow analysis on a given block to calculate the antic set
1582  * of Loaded addresses.
1583  *
1584  * @param bl  the block
1585  *
1586  * @return non-zero if the set has changed since last iteration
1587  */
1588 static int backward_antic(block_t *bl)
1589 {
1590         memop_t *op;
1591         ir_node *block = bl->block;
1592         int     n = get_Block_n_cfg_outs(block);
1593
1594         if (n == 1) {
1595                 ir_node  *succ    = get_Block_cfg_out(block, 0);
1596                 block_t  *succ_bl = get_block_entry(succ);
1597                 int      pred_pos = get_Block_cfgpred_pos(succ, block);
1598                 unsigned end      = env.rbs_size - 1;
1599                 unsigned pos;
1600
1601                 kill_all();
1602
1603                 if (bl->trans_results == NULL) {
1604                         /* allocate the translate cache */
1605                         bl->trans_results = OALLOCNZ(&env.obst, memop_t*, env.curr_adr_id);
1606                 }
1607
1608                 /* check for partly redundant values */
1609                 for (pos = rbitset_next(succ_bl->anticL_in, 0, 1);
1610                      pos < end;
1611                      pos = rbitset_next(succ_bl->anticL_in, pos + 1, 1)) {
1612                         /*
1613                          * do Phi-translation here: Note that at this point the nodes are
1614                          * not changed, so we can safely cache the results.
1615                          * However: Loads of Load results ARE bad, because we have no way
1616                           to translate them yet ...
1617                          */
1618                         memop_t *op = bl->trans_results[pos];
1619                         if (op == NULL) {
1620                                 /* not yet translated */
1621                                 ir_node *adr, *trans_adr;
1622
1623                                 op  = succ_bl->id_2_memop_antic[pos];
1624                                 adr = op->value.address;
1625
1626                                 trans_adr = phi_translate(adr, succ, pred_pos);
1627                                 if (trans_adr != adr) {
1628                                         /* create a new entry for the translated one */
1629                                         memop_t *new_op;
1630
1631                                         new_op = alloc_memop(NULL);
1632                                         new_op->value.address = trans_adr;
1633                                         new_op->value.id      = register_address(trans_adr);
1634                                         new_op->value.mode    = op->value.mode;
1635                                         new_op->node          = op->node; /* we need the node to decide if Load/Store */
1636                                         new_op->flags         = op->flags;
1637
1638                                         bl->trans_results[pos] = new_op;
1639                                         op = new_op;
1640                                 }
1641                         }
1642                         env.curr_id_2_memop[op->value.id] = op;
1643                         rbitset_set(env.curr_set, op->value.id);
1644                 }
1645         } else if (n > 1) {
1646                 ir_node *succ    = get_Block_cfg_out(block, 0);
1647                 block_t *succ_bl = get_block_entry(succ);
1648                 int i;
1649
1650                 rbitset_copy(env.curr_set, succ_bl->anticL_in, env.rbs_size);
1651                 memcpy(env.curr_id_2_memop, succ_bl->id_2_memop_antic, env.rbs_size * sizeof(env.curr_id_2_memop[0]));
1652
1653                 /* Hmm: probably we want kill merges of Loads ans Stores here */
1654                 for (i = n - 1; i > 0; --i) {
1655                         ir_node *succ    = get_Block_cfg_out(bl->block, i);
1656                         block_t *succ_bl = get_block_entry(succ);
1657
1658                         rbitset_and(env.curr_set, succ_bl->anticL_in, env.rbs_size);
1659                 }
1660         } else {
1661                 /* block ends with a noreturn call */
1662                 kill_all();
1663         }
1664
1665         dump_curr(bl, "AnticL_out");
1666
1667         for (op = bl->memop_backward; op != NULL; op = op->prev) {
1668                 switch (get_irn_opcode(op->node)) {
1669                 case iro_Phi:
1670                         /* meet */
1671                         break;
1672                 case iro_Sync:
1673                         /* join */
1674                         break;
1675                 case iro_Load:
1676                         if (! (op->flags & (FLAG_KILLED_NODE|FLAG_IGNORE))) {
1677                                 /* always add it */
1678                                 add_memop(op);
1679                         }
1680                         break;
1681                 case iro_Store:
1682                         if (! (op->flags & FLAG_KILLED_NODE)) {
1683                                 /* a Store: check which memops must be killed */
1684                                 kill_memops(&op->value);
1685                         }
1686                         break;
1687                 default:
1688                         if (op->flags & FLAG_KILL_ALL)
1689                                 kill_all();
1690                 }
1691         }
1692
1693         memcpy(bl->id_2_memop_antic, env.curr_id_2_memop, env.rbs_size * sizeof(env.curr_id_2_memop[0]));
1694         if (! rbitsets_equal(bl->anticL_in, env.curr_set, env.rbs_size)) {
1695                 /* changed */
1696                 rbitset_copy(bl->anticL_in, env.curr_set, env.rbs_size);
1697                 dump_curr(bl, "AnticL_in*");
1698                 return 1;
1699         }
1700         dump_curr(bl, "AnticL_in");
1701         return 0;
1702 }  /* backward_antic */
1703
1704 /**
1705  * Replace a Load memop by a already known value.
1706  *
1707  * @param op  the Load memop
1708  */
1709 static void replace_load(memop_t *op)
1710 {
1711         ir_node *load = op->node;
1712         ir_node *def  = skip_Id(op->replace);
1713         ir_node *proj;
1714         ir_mode *mode;
1715
1716         if (def != NULL)
1717                 DB((dbg, LEVEL_1, "Replacing %+F by definition %+F\n", load, is_Proj(def) ? get_Proj_pred(def) : def));
1718         else {
1719                 if (op->flags & FLAG_EXCEPTION) {
1720                         /* bad: this node is unused and executed for exception only */
1721                         DB((dbg, LEVEL_1, "Unused %+F executed for exception only ...\n", load));
1722                         return;
1723                 }
1724                 DB((dbg, LEVEL_1, "Killing unused %+F\n", load));
1725         }
1726
1727         if (op->mem != NULL) {
1728                 /* in rare cases a Load might have NO memory */
1729                 exchange(op->mem, get_Load_mem(load));
1730         }
1731         proj = op->projs[pn_Load_res];
1732         if (proj != NULL) {
1733                 mode = get_irn_mode(proj);
1734                 if (get_irn_mode(def) != mode) {
1735                         /* a hidden cast */
1736                         dbg_info *db    = get_irn_dbg_info(load);
1737                         ir_node  *block = get_nodes_block(proj);
1738                         def = new_rd_Conv(db, block, def, mode);
1739                 }
1740                 exchange(proj, def);
1741         }
1742         proj = op->projs[pn_Load_X_except];
1743         if (proj != NULL) {
1744                 exchange(proj, new_Bad());
1745         }
1746         proj = op->projs[pn_Load_X_regular];
1747         if (proj != NULL) {
1748                 exchange(proj, new_r_Jmp(get_nodes_block(load)));
1749         }
1750 }  /* replace_load */
1751
1752 /**
1753  * Remove a Store memop.
1754  *
1755  * @param op  the Store memop
1756  */
1757 static void remove_store(memop_t *op)
1758 {
1759         ir_node *store = op->node;
1760         ir_node *proj;
1761
1762         DB((dbg, LEVEL_1, "Removing %+F\n", store));
1763
1764         if (op->mem != NULL) {
1765                 /* in rare cases a Store might have no memory */
1766                 exchange(op->mem, get_Store_mem(store));
1767         }
1768         proj = op->projs[pn_Store_X_except];
1769         if (proj != NULL) {
1770                 exchange(proj, new_Bad());
1771         }
1772         proj = op->projs[pn_Store_X_regular];
1773         if (proj != NULL) {
1774                 exchange(proj, new_r_Jmp(get_nodes_block(store)));
1775         }
1776 }  /* remove_store */
1777
1778
1779 /**
1780  * Do all necessary replacements for a given block.
1781  *
1782  * @param bl  the block
1783  */
1784 static void do_replacements(block_t *bl)
1785 {
1786         memop_t *op;
1787
1788         for (op = bl->memop_forward; op != NULL; op = op->next) {
1789                 if (op->flags & FLAG_KILLED_NODE) {
1790                         switch (get_irn_opcode(op->node)) {
1791                         case iro_Load:
1792                                 replace_load(op);
1793                                 break;
1794                         case iro_Store:
1795                                 remove_store(op);
1796                                 break;
1797                         }
1798                 }
1799         }
1800 }  /* do_replacements */
1801
1802 /**
1803  * Calculate the Avail_out sets for all basic blocks.
1804  */
1805 static void calcAvail(void)
1806 {
1807         memop_t  **tmp_memop = env.curr_id_2_memop;
1808         unsigned *tmp_set    = env.curr_set;
1809         block_t  *bl;
1810
1811         /* calculate avail_out */
1812         DB((dbg, LEVEL_2, "Calculate Avail_out\n"));
1813
1814         /* iterate over all blocks in in any order, skip the start block */
1815         for (bl = env.forward->forward_next; bl != NULL; bl = bl->forward_next) {
1816                 forward_avail(bl);
1817         }
1818
1819         /* restore the current sets */
1820         env.curr_id_2_memop = tmp_memop;
1821         env.curr_set        = tmp_set;
1822 }  /* calcAvail */
1823
1824 /**
1825  * Calculate the Antic_in sets for all basic blocks.
1826  */
1827 static void calcAntic(void)
1828 {
1829         int i, need_iter;
1830
1831         /* calculate antic_out */
1832         DB((dbg, LEVEL_2, "Calculate Antic_in\n"));
1833         i = 0;
1834         do {
1835                 block_t *bl;
1836
1837                 DB((dbg, LEVEL_2, "Iteration %d:\n=========\n", i));
1838
1839                 need_iter = 0;
1840
1841                 /* over all blocks in reverse post order */
1842                 for (bl = env.backward->backward_next; bl != NULL; bl = bl->backward_next) {
1843                         need_iter |= backward_antic(bl);
1844                 }
1845                 ++i;
1846         } while (need_iter);
1847         DB((dbg, LEVEL_2, "Get anticipated Load set after %d iterations\n", i));
1848 }  /* calcAntic */
1849
1850 /**
1851  * Return the node representing the last memory in a block.
1852  *
1853  * @param bl  the block
1854  */
1855 static ir_node *find_last_memory(block_t *bl)
1856 {
1857         for (;;) {
1858                 if (bl->memop_backward != NULL) {
1859                         return bl->memop_backward->mem;
1860                 }
1861                 /* if there is NO memory in this block, go to the dominator */
1862                 bl = get_block_entry(get_Block_idom(bl->block));
1863         }
1864 }  /* find_last_memory */
1865
1866 /**
1867  * Reroute all memory users of old memory
1868  * to a new memory IR-node.
1869  *
1870  * @param omem  the old memory IR-node
1871  * @param nmem  the new memory IR-node
1872  */
1873 static void reroute_all_mem_users(ir_node *omem, ir_node *nmem)
1874 {
1875         int i;
1876
1877         for (i = get_irn_n_outs(omem) - 1; i >= 0; --i) {
1878                 int     n_pos;
1879                 ir_node *user = get_irn_out_ex(omem, i, &n_pos);
1880
1881                 set_irn_n(user, n_pos, nmem);
1882         }
1883
1884         /* all edges previously point to omem now point to nmem */
1885         nmem->out = omem->out;
1886 }  /* reroute_all_mem_users */
1887
1888 /**
1889  * Reroute memory users of old memory that are dominated by a given block
1890  * to a new memory IR-node.
1891  *
1892  * @param omem     the old memory IR-node
1893  * @param nmem     the new memory IR-node
1894  * @param pass_bl  the block the memory must pass
1895  */
1896 static void reroute_mem_through(ir_node *omem, ir_node *nmem, ir_node *pass_bl)
1897 {
1898         int             i, j, n = get_irn_n_outs(omem);
1899         ir_def_use_edge *edges = NEW_ARR_D(ir_def_use_edge, &env.obst, n + 1);
1900
1901         for (i = j = 0; i < n; ++i) {
1902                 int     n_pos;
1903                 ir_node *user   = get_irn_out_ex(omem, i, &n_pos);
1904                 ir_node *use_bl = get_nodes_block(user);
1905
1906
1907                 if (is_Phi(user)) {
1908                         use_bl = get_Block_cfgpred_block(use_bl, n_pos);
1909                 }
1910                 if (block_dominates(pass_bl, use_bl)) {
1911                         /* found an user that is dominated */
1912                         ++j;
1913                         edges[j].pos = n_pos;
1914                         edges[j].use = user;
1915
1916                         set_irn_n(user, n_pos, nmem);
1917                 }
1918         }
1919
1920         /* Modify the out structure: we create a new out edge array on our
1921            temporary obstack here. This should be no problem, as we invalidate the edges
1922            at the end either. */
1923         /* first entry is used for the length */
1924         edges[0].pos = j;
1925         nmem->out = edges;
1926 }  /* reroute_mem_through */
1927
1928 /**
1929  * insert Loads, making partly redundant Loads fully redundant
1930  */
1931 static int insert_Load(block_t *bl)
1932 {
1933         ir_node  *block = bl->block;
1934         int      i, n = get_Block_n_cfgpreds(block);
1935         unsigned end = env.rbs_size - 1;
1936         unsigned pos;
1937
1938         DB((dbg, LEVEL_3, "processing %+F\n", block));
1939
1940         if (n == 0) {
1941                 /* might still happen for an unreachable block (end for instance) */
1942                 return 0;
1943         }
1944
1945         if (n > 1) {
1946                 ir_node **ins;
1947                 int     pos;
1948
1949                 NEW_ARR_A(ir_node *, ins, n);
1950
1951                 rbitset_set_all(env.curr_set, env.rbs_size);
1952
1953                 /* More than one predecessors, calculate the join for all avail_outs ignoring unevaluated
1954                    Blocks. These put in Top anyway. */
1955                 for (i = n - 1; i >= 0; --i) {
1956                         ir_node *pred = skip_Proj(get_Block_cfgpred(block, i));
1957                         ir_node *blk  = get_nodes_block(pred);
1958                         block_t *pred_bl;
1959
1960                         pred_bl = get_block_entry(blk);
1961                         rbitset_and(env.curr_set, pred_bl->avail_out, env.rbs_size);
1962
1963                         if (is_Load(pred) || is_Store(pred)) {
1964                                 /* We reached this block by an exception from a Load or Store:
1965                                  * the memop creating the exception was NOT completed than, kill it
1966                                  */
1967                                 memop_t *exc_op = get_irn_memop(pred);
1968                                 rbitset_clear(env.curr_set, exc_op->value.id);
1969                         }
1970
1971                 }
1972                 /*
1973                  * Ensure that all values are in the map: build Phi's if necessary:
1974                  * Note: the last bit is the sentinel and ALWAYS set, so start with -2.
1975                  */
1976                 for (pos = env.rbs_size - 2; pos >= 0; --pos) {
1977                         if (! rbitset_is_set(env.curr_set, pos))
1978                                 env.curr_id_2_memop[pos] = NULL;
1979                         else {
1980                                 ir_node *pred    = get_Block_cfgpred_block(bl->block, 0);
1981                                 block_t *pred_bl = get_block_entry(pred);
1982                                 int     need_phi = 0;
1983                                 memop_t *first   = NULL;
1984                                 ir_mode *mode    = NULL;
1985
1986                                 for (i = 0; i < n; ++i) {
1987                                         memop_t *mop;
1988
1989                                         pred    = get_Block_cfgpred_block(bl->block, i);
1990                                         pred_bl = get_block_entry(pred);
1991
1992                                         mop = pred_bl->id_2_memop_avail[pos];
1993                                         if (first == NULL) {
1994                                                 first = mop;
1995                                                 ins[0] = first->value.value;
1996                                                 mode = get_irn_mode(ins[0]);
1997
1998                                                 /* no Phi needed so far */
1999                                                 env.curr_id_2_memop[pos] = first;
2000                                         } else {
2001                                                 ins[i] = conv_to(mop->value.value, mode);
2002                                                 if (ins[i] != ins[0]) {
2003                                                         if (ins[i] == NULL) {
2004                                                                 /* conversion failed */
2005                                                                 env.curr_id_2_memop[pos] = NULL;
2006                                                                 rbitset_clear(env.curr_set, pos);
2007                                                                 break;
2008                                                         }
2009                                                         need_phi = 1;
2010                                                 }
2011                                         }
2012                                 }
2013                                 if (need_phi) {
2014                                         /* build a Phi  */
2015                                         ir_node *phi = new_r_Phi(bl->block, n, ins, mode);
2016                                         memop_t *phiop = alloc_memop(phi);
2017
2018                                         phiop->value = first->value;
2019                                         phiop->value.value = phi;
2020
2021                                         /* no need to link it in, as it is a DATA phi */
2022
2023                                         env.curr_id_2_memop[pos] = phiop;
2024
2025                                         DB((dbg, LEVEL_3, "Created new %+F on merging value for address %+F\n", phi, first->value.address));
2026                                 }
2027                         }
2028                 }
2029         } else {
2030                 /* only one predecessor, simply copy the map */
2031                 ir_node *pred    = get_Block_cfgpred_block(bl->block, 0);
2032                 block_t *pred_bl = get_block_entry(pred);
2033
2034                 rbitset_copy(env.curr_set, pred_bl->avail_out, env.rbs_size);
2035
2036                 memcpy(env.curr_id_2_memop, pred_bl->id_2_memop_avail, env.rbs_size * sizeof(bl->id_2_memop_avail[0]));
2037         }
2038
2039         if (n > 1) {
2040                 /* check for partly redundant values */
2041                 for (pos = rbitset_next(bl->anticL_in, 0, 1);
2042                      pos < end;
2043                      pos = rbitset_next(bl->anticL_in, pos + 1, 1)) {
2044                         memop_t *op = bl->id_2_memop_antic[pos];
2045                         int     have_some, all_same;
2046                         ir_node *first;
2047
2048                         if (rbitset_is_set(env.curr_set, pos)) {
2049                                 /* already avail */
2050                                 continue;
2051                         }
2052
2053                         assert(is_Load(op->node));
2054
2055                         DB((dbg, LEVEL_3, "anticipated %+F\n", op->node));
2056
2057                         have_some  = 0;
2058                         all_same   = 1;
2059                         first      = 0;
2060                         for (i = n - 1; i >= 0; --i) {
2061                                 ir_node *pred    = get_Block_cfgpred_block(block, i);
2062                                 block_t *pred_bl = get_block_entry(pred);
2063                                 ir_mode *mode    = op->value.mode;
2064                                 memop_t *e;
2065                                 ir_node *adr;
2066
2067                                 adr = phi_translate(op->value.address, block, i);
2068                                 DB((dbg, LEVEL_3, ".. using address %+F in pred %d\n", adr, i));
2069                                 e   = find_address_avail(pred_bl, register_address(adr), mode);
2070                                 if (e == NULL) {
2071                                         ir_node *ef_block = get_nodes_block(adr);
2072                                         if (! block_dominates(ef_block, pred)) {
2073                                                 /* cannot place a copy here */
2074                                                 have_some = 0;
2075                                                 DB((dbg, LEVEL_3, "%+F cannot be moved into predecessor %+F\n", op->node, pred));
2076                                                 break;
2077                                         }
2078                                         DB((dbg, LEVEL_3, "%+F is not available in predecessor %+F\n", op->node, pred));
2079                                         pred_bl->avail = NULL;
2080                                         all_same       = 0;
2081                                 } else {
2082                                         if (e->value.mode != mode && !can_convert_to(e->value.mode, mode)) {
2083                                                 /* cannot create a Phi due to different modes */
2084                                                 have_some = 0;
2085                                                 break;
2086                                         }
2087                                         pred_bl->avail = e;
2088                                         have_some      = 1;
2089                                         DB((dbg, LEVEL_3, "%+F is available for %+F in predecessor %+F\n", e->node, op->node, pred));
2090                                         if (first == NULL)
2091                                                 first = e->node;
2092                                         else if (first != e->node)
2093                                                 all_same = 0;
2094                                 }
2095                         }
2096                         if (have_some && !all_same) {
2097                                 ir_mode *mode = op->value.mode;
2098                                 ir_node **in, *phi;
2099                                 memop_t *phi_op;
2100
2101                                 NEW_ARR_A(ir_node *, in, n);
2102
2103                                 for (i = n - 1; i >= 0; --i) {
2104                                         ir_node *pred    = get_Block_cfgpred_block(block, i);
2105                                         block_t *pred_bl = get_block_entry(pred);
2106
2107                                         if (pred_bl->avail == NULL) {
2108                                                 /* create a new Load here and make to make it fully redundant */
2109                                                 dbg_info *db       = get_irn_dbg_info(op->node);
2110                                                 ir_node  *last_mem = find_last_memory(pred_bl);
2111                                                 ir_node  *load, *def, *adr;
2112                                                 memop_t  *new_op;
2113
2114                                                 assert(last_mem != NULL);
2115                                                 adr  = phi_translate(op->value.address, block, i);
2116                                                 load = new_rd_Load(db, pred, last_mem, adr, mode, cons_none);
2117                                                 def  = new_r_Proj(load, mode, pn_Load_res);
2118                                                 DB((dbg, LEVEL_1, "Created new %+F in %+F for party redundant %+F\n", load, pred, op->node));
2119
2120                                                 new_op                = alloc_memop(load);
2121                                                 new_op->mem           = new_r_Proj(load, mode_M, pn_Load_M);
2122                                                 new_op->value.address = adr;
2123                                                 new_op->value.id      = op->value.id;
2124                                                 new_op->value.mode    = mode;
2125                                                 new_op->value.value   = def;
2126
2127                                                 new_op->projs[pn_Load_M]   = new_op->mem;
2128                                                 new_op->projs[pn_Load_res] = def;
2129
2130                                                 new_op->prev = pred_bl->memop_backward;
2131                                                 if (pred_bl->memop_backward != NULL)
2132                                                         pred_bl->memop_backward->next = new_op;
2133
2134                                                 pred_bl->memop_backward = new_op;
2135
2136                                                 if (pred_bl->memop_forward == NULL)
2137                                                         pred_bl->memop_forward = new_op;
2138
2139                                                 if (get_nodes_block(last_mem) == pred) {
2140                                                         /* We have add a new last memory op in pred block.
2141                                                            If pred had already a last mem, reroute all memory
2142                                                            users. */
2143                                                         reroute_all_mem_users(last_mem, new_op->mem);
2144                                                 } else {
2145                                                         /* reroute only those memory going through the pre block */
2146                                                         reroute_mem_through(last_mem, new_op->mem, pred);
2147                                                 }
2148
2149                                                 /* we added this load at the end, so it will be avail anyway */
2150                                                 add_memop_avail(pred_bl, new_op);
2151                                                 pred_bl->avail = new_op;
2152                                         }
2153                                         in[i] = conv_to(pred_bl->avail->value.value, mode);
2154                                 }
2155                                 phi = new_r_Phi(block, n, in, mode);
2156                                 DB((dbg, LEVEL_1, "Created new %+F in %+F for now redundant %+F\n", phi, block, op->node));
2157
2158                                 phi_op = clone_memop_phi(op, phi);
2159                                 add_memop(phi_op);
2160                         }
2161                 }
2162         }
2163
2164         /* recalculate avail by gen and kill */
2165         calc_gen_kill_avail(bl);
2166
2167         /* always update the map after gen/kill, as values might have been changed due to RAR/WAR/WAW */
2168         memcpy(bl->id_2_memop_avail, env.curr_id_2_memop, env.rbs_size * sizeof(env.curr_id_2_memop[0]));
2169
2170         if (!rbitsets_equal(bl->avail_out, env.curr_set, env.rbs_size)) {
2171                 /* the avail set has changed */
2172                 rbitset_copy(bl->avail_out, env.curr_set, env.rbs_size);
2173                 dump_curr(bl, "Avail_out*");
2174                 return 1;
2175         }
2176         dump_curr(bl, "Avail_out");
2177         return 0;
2178 }  /* insert_Load */
2179
2180 /**
2181  * Insert Loads upwards.
2182  */
2183 static void insert_Loads_upwards(void)
2184 {
2185         int i, need_iter;
2186         block_t *bl;
2187
2188         /* recalculate antic_out and insert Loads */
2189         DB((dbg, LEVEL_2, "Inserting Loads\n"));
2190
2191         i = 0;
2192         do {
2193                 DB((dbg, LEVEL_2, "Iteration %d:\n=========\n", i));
2194
2195                 need_iter = 0;
2196
2197                 /* over all blocks in reverse post order, skip the start block */
2198                 for (bl = env.forward->forward_next; bl != NULL; bl = bl->forward_next) {
2199                         need_iter |= insert_Load(bl);
2200                 }
2201                 ++i;
2202         } while (need_iter);
2203
2204         DB((dbg, LEVEL_2, "Finished Load inserting after %d iterations\n", i));
2205 }  /* insert_Loads_upwards */
2206
2207 /**
2208  * Kill unreachable control flow.
2209  *
2210  * @param irg  the graph to operate on
2211  */
2212 static void kill_unreachable_blocks(ir_graph *irg)
2213 {
2214         block_t *bl;
2215         ir_node **ins;
2216         int     changed = 0;
2217
2218         NEW_ARR_A(ir_node *, ins, env.max_cfg_preds);
2219
2220         for (bl = env.forward; bl != NULL; bl = bl->forward_next) {
2221                 ir_node *block = bl->block;
2222                 int     i, j, k, n;
2223
2224                 assert(get_Block_mark(block));
2225
2226                 n = get_Block_n_cfgpreds(block);
2227
2228                 for (i = j = 0; i < n; ++i) {
2229                         ir_node *pred = get_Block_cfgpred(block, i);
2230                         ir_node *pred_bl;
2231
2232                         if (is_Bad(pred))
2233                                 continue;
2234
2235                         pred_bl = get_nodes_block(skip_Proj(pred));
2236                         if (! get_Block_mark(pred_bl))
2237                                 continue;
2238
2239                         ins[j++] = pred;
2240                 }
2241                 if (j != n) {
2242                         ir_node *phi, *next;
2243
2244                         /* some unreachable blocks detected */
2245                         changed = 1;
2246
2247                         DB((dbg, LEVEL_1, "Killing dead block predecessors on %+F\n", block));
2248
2249                         set_irn_in(block, j, ins);
2250
2251                         /* shorten all Phi nodes */
2252                         for (phi = get_Block_phis(block); phi != NULL; phi = next) {
2253                                 next = get_Phi_next(phi);
2254
2255                                 for (i = k = 0; i < n; ++i) {
2256                                         ir_node *pred = get_Block_cfgpred_block(block, i);
2257
2258                                         if (is_Bad(pred))
2259                                                 continue;
2260
2261                                         if (! get_Block_mark(pred))
2262                                                 continue;
2263
2264                                         ins[k++] = get_Phi_pred(phi, i);
2265                                 }
2266                                 if (k == 1)
2267                                         exchange(phi, ins[0]);
2268                                 else
2269                                         set_irn_in(phi, k, ins);
2270                         }
2271                 }
2272
2273         }
2274
2275         if (changed) {
2276                 /* kick keep alives */
2277                 ir_node *end = get_irg_end(irg);
2278                 int     i, j, n = get_End_n_keepalives(end);
2279
2280                 NEW_ARR_A(ir_node *, ins, n);
2281
2282                 for (i = j = 0; i < n; ++i) {
2283                         ir_node *ka = get_End_keepalive(end, i);
2284                         ir_node *ka_bl;
2285
2286                         if (is_Bad(ka))
2287                                 continue;
2288                         if (is_Block(ka))
2289                                 ka_bl = ka;
2290                         else
2291                                 ka_bl = get_nodes_block(skip_Proj(ka));
2292                         if (get_Block_mark(ka_bl))
2293                                 ins[j++] = ka;
2294                 }
2295                 if (j != n)
2296                         set_End_keepalives(end, j, ins);
2297
2298                 free_irg_outs(irg);
2299
2300                 /* this transformation do NOT invalidate the dominance */
2301         }
2302 }  /* kill_unreachable_blocks */
2303
2304 int opt_ldst(ir_graph *irg)
2305 {
2306         block_t  *bl;
2307         ir_graph *rem = current_ir_graph;
2308
2309         current_ir_graph = irg;
2310
2311         FIRM_DBG_REGISTER(dbg, "firm.opt.ldst");
2312
2313         DB((dbg, LEVEL_1, "\nDoing Load/Store optimization on %+F\n", irg));
2314
2315         /* we need landing pads */
2316         remove_critical_cf_edges(irg);
2317
2318         if (get_opt_alias_analysis()) {
2319                 assure_irg_entity_usage_computed(irg);
2320                 assure_irp_globals_entity_usage_computed();
2321         }
2322
2323         obstack_init(&env.obst);
2324         ir_nodemap_init(&env.adr_map);
2325
2326         env.forward       = NULL;
2327         env.backward      = NULL;
2328         env.curr_adr_id   = 0;
2329         env.n_mem_ops     = 0;
2330         env.max_cfg_preds = 0;
2331         env.changed       = 0;
2332         env.start_bl      = get_irg_start_block(irg);
2333         env.end_bl        = get_irg_end_block(irg);
2334 #ifdef DEBUG_libfirm
2335         env.id_2_address  = NEW_ARR_F(ir_node *, 0);
2336 #endif
2337
2338         assure_irg_outs(irg);
2339
2340         ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK | IR_RESOURCE_BLOCK_MARK);
2341
2342         /* first step: allocate block entries. Note that some blocks might be
2343            unreachable here. Using the normal walk ensures that ALL blocks are initialized. */
2344         irg_walk_graph(irg, prepare_blocks, link_phis, NULL);
2345
2346         /* produce an inverse post-order list for the CFG: this links only reachable
2347            blocks */
2348         irg_out_block_walk(get_irg_start_block(irg), NULL, inverse_post_order, NULL);
2349
2350         if (! get_Block_mark(env.end_bl)) {
2351                 /*
2352                  * The end block is NOT reachable due to endless loops
2353                  * or no_return calls.
2354                  * Place the end block last.
2355                  * env.backward points to the last block in the list for this purpose.
2356                  */
2357                 env.backward->forward_next = get_block_entry(env.end_bl);
2358
2359                 set_Block_mark(env.end_bl, 1);
2360         }
2361
2362         /* KILL unreachable blocks: these disturb the data flow analysis */
2363         kill_unreachable_blocks(irg);
2364
2365         assure_doms(irg);
2366
2367         /* second step: find and sort all memory ops */
2368         walk_memory_irg(irg, collect_memops, NULL, NULL);
2369
2370 #ifdef DEBUG_libfirm
2371         /* check that the backward map is correct */
2372         assert((unsigned)ARR_LEN(env.id_2_address) == env.curr_adr_id);
2373 #endif
2374
2375         if (env.n_mem_ops == 0) {
2376                 /* no memory ops */
2377                 goto end;
2378         }
2379
2380         /* create the backward links. */
2381         env.backward = NULL;
2382         irg_block_walk_graph(irg, NULL, collect_backward, NULL);
2383
2384         /* link the end block in */
2385         bl = get_block_entry(env.end_bl);
2386         bl->backward_next = env.backward;
2387         env.backward      = bl;
2388
2389         /* check that we really start with the start / end block */
2390         assert(env.forward->block  == env.start_bl);
2391         assert(env.backward->block == env.end_bl);
2392
2393         /* create address sets: for now, only the existing addresses are allowed plus one
2394            needed for the sentinel */
2395         env.rbs_size = env.curr_adr_id + 1;
2396
2397         /* create the current set */
2398         env.curr_set = rbitset_obstack_alloc(&env.obst, env.rbs_size);
2399         rbitset_set(env.curr_set, env.rbs_size - 1);
2400         env.curr_id_2_memop = NEW_ARR_D(memop_t *, &env.obst, env.rbs_size);
2401         memset(env.curr_id_2_memop, 0, env.rbs_size * sizeof(env.curr_id_2_memop[0]));
2402
2403         for (bl = env.forward; bl != NULL; bl = bl->forward_next) {
2404                 /* set sentinel bits */
2405                 bl->avail_out  = rbitset_obstack_alloc(&env.obst, env.rbs_size);
2406                 rbitset_set(bl->avail_out, env.rbs_size - 1);
2407
2408                 bl->id_2_memop_avail = NEW_ARR_D(memop_t *, &env.obst, env.rbs_size);
2409                 memset(bl->id_2_memop_avail, 0, env.rbs_size * sizeof(bl->id_2_memop_avail[0]));
2410
2411                 bl->anticL_in  = rbitset_obstack_alloc(&env.obst, env.rbs_size);
2412                 rbitset_set(bl->anticL_in, env.rbs_size - 1);
2413
2414                 bl->id_2_memop_antic = NEW_ARR_D(memop_t *, &env.obst, env.rbs_size);
2415                 memset(bl->id_2_memop_antic, 0, env.rbs_size * sizeof(bl->id_2_memop_antic[0]));
2416         }
2417
2418         (void) dump_block_list;
2419
2420         calcAvail();
2421         calcAntic();
2422
2423         insert_Loads_upwards();
2424
2425         if (env.changed) {
2426                 /* over all blocks in reverse post order */
2427                 for (bl = env.forward; bl != NULL; bl = bl->forward_next) {
2428                         do_replacements(bl);
2429                 }
2430
2431                 /* not only invalidate but free them. We might allocate new out arrays
2432                    on our obstack which will be deleted yet. */
2433                 free_irg_outs(irg);
2434                 set_irg_entity_usage_state(irg, ir_entity_usage_not_computed);
2435         }
2436 end:
2437
2438         ir_free_resources(irg, IR_RESOURCE_IRN_LINK | IR_RESOURCE_BLOCK_MARK);
2439         ir_nodemap_destroy(&env.adr_map);
2440         obstack_free(&env.obst, NULL);
2441
2442 #ifdef DEBUG_libfirm
2443         DEL_ARR_F(env.id_2_address);
2444 #endif
2445
2446         current_ir_graph = rem;
2447         return env.changed != 0;
2448 }  /* opt_ldst */
2449
2450 ir_graph_pass_t *opt_ldst_pass(const char *name)
2451 {
2452         return def_graph_pass_ret(name ? name : "ldst_df", opt_ldst);
2453 }  /* opt_ldst_pass */