2 * Copyright (C) 1995-2011 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Dataflow driven Load/Store optimizations, uses some ideas from
24 * @author Michael Beck
39 #include "iroptimize.h"
40 #include "irnodehashmap.h"
41 #include "raw_bitset.h"
46 /* maximum number of output Proj's */
47 #define MAX_PROJ ((long)pn_Load_max > (long)pn_Store_max ? (long)pn_Load_max : (long)pn_Store_max)
50 * Mapping an address to an dense ID.
52 typedef struct address_entry_t {
53 unsigned id; /**< The ID */
60 FLAG_KILL_ALL = 1, /**< KILL all addresses */
61 FLAG_KILLED_NODE = 2, /**< this node was killed */
62 FLAG_EXCEPTION = 4, /**< this node has exception flow */
63 FLAG_IGNORE = 8, /**< ignore this node (volatile or other) */
67 * A value: This represents a value stored at a given address in
68 * memory. Do not confuse with values from value numbering.
70 typedef struct value_t value_t;
72 ir_node *address; /**< the address of this value */
73 ir_node *value; /**< the value itself */
74 ir_mode *mode; /**< the mode of the value */
75 unsigned id; /**< address id */
79 * A memop describes an memory-related operation.
80 * These are Loads/Store and all other ops that might modify
81 * memory (Calls, CopyB) or causing exceptions.
83 typedef struct memop_t memop_t;
85 value_t value; /**< the value of this memop: only defined for Load/Store */
86 ir_node *node; /**< the memory op itself */
87 ir_node *mem; /**< the memory FROM this node */
88 ir_node *replace; /**< the replacement node if this memop is replaced */
89 memop_t *next; /**< links to the next memory op in the block in forward order. */
90 memop_t *prev; /**< links to the previous memory op in the block in forward order. */
91 unsigned flags; /**< memop flags */
92 ir_node *projs[MAX_PROJ+1]; /**< Projs of this memory op */
96 * Additional data for every basic block.
98 typedef struct block_t block_t;
100 memop_t *memop_forward; /**< topologically sorted list of memory ops in this block */
101 memop_t *memop_backward; /**< last memop in the list */
102 unsigned *avail_out; /**< out-set of available addresses */
103 memop_t **id_2_memop_avail; /**< maps avail address ids to memops */
104 unsigned *anticL_in; /**< in-set of anticipated Load addresses */
105 memop_t **id_2_memop_antic; /**< maps anticipated address ids to memops */
106 ir_node *block; /**< the associated block */
107 block_t *forward_next; /**< next block entry for forward iteration */
108 block_t *backward_next; /**< next block entry for backward iteration */
109 memop_t *avail; /**< used locally for the avail map */
110 memop_t **trans_results; /**< used to cached translated nodes due antic calculation. */
114 * Metadata for this pass.
116 typedef struct ldst_env_t {
117 struct obstack obst; /**< obstack for temporary data */
118 ir_nodehashmap_t adr_map; /**< Map addresses to */
119 block_t *forward; /**< Inverse post-order list of all blocks Start->End */
120 block_t *backward; /**< Inverse post-order list of all blocks End->Start */
121 ir_node *start_bl; /**< start block of the current graph */
122 ir_node *end_bl; /**< end block of the current graph */
123 unsigned *curr_set; /**< current set of addresses */
124 memop_t **curr_id_2_memop; /**< current map of address ids to memops */
125 unsigned curr_adr_id; /**< number for address mapping */
126 unsigned n_mem_ops; /**< number of memory operations (Loads/Stores) */
127 size_t rbs_size; /**< size of all bitsets in bytes */
128 int max_cfg_preds; /**< maximum number of block cfg predecessors */
129 int changed; /**< Flags for changed graph state */
131 ir_node **id_2_address; /**< maps an id to the used address */
135 /* the one and only environment */
140 static firm_dbg_module_t *dbg;
143 * Dumps the block list.
145 * @param ldst environment
147 static void dump_block_list(ldst_env *env)
153 for (entry = env->forward; entry != NULL; entry = entry->forward_next) {
154 DB((dbg, LEVEL_2, "%+F {", entry->block));
157 for (op = entry->memop_forward; op != NULL; op = op->next) {
159 DB((dbg, LEVEL_2, "\n\t"));
161 DB((dbg, LEVEL_2, "%+F", op->node));
162 if ((op->flags & FLAG_KILL_ALL) == FLAG_KILL_ALL)
163 DB((dbg, LEVEL_2, "X"));
164 else if (op->flags & FLAG_KILL_ALL)
165 DB((dbg, LEVEL_2, "K"));
166 DB((dbg, LEVEL_2, ", "));
170 DB((dbg, LEVEL_2, "\n}\n\n"));
172 } /* dump_block_list */
175 * Dumps the current set.
177 * @param bl current block
178 * @param s name of the set
180 static void dump_curr(block_t *bl, const char *s)
182 size_t end = env.rbs_size - 1;
186 DB((dbg, LEVEL_2, "%s[%+F] = {", s, bl->block));
188 for (pos = rbitset_next(env.curr_set, 0, 1); pos < end; pos = rbitset_next(env.curr_set, pos + 1, 1)) {
189 memop_t *op = env.curr_id_2_memop[pos];
192 DB((dbg, LEVEL_2, "\n\t"));
195 DB((dbg, LEVEL_2, "<%+F, %+F>, ", op->value.address, op->value.value));
198 DB((dbg, LEVEL_2, "\n}\n"));
202 static void dump_block_list(ldst_env *env)
206 static void dump_curr(block_t *bl, const char *s)
211 #endif /* DEBUG_libfirm */
213 /** Get the block entry for a block node */
214 static block_t *get_block_entry(const ir_node *block)
216 assert(is_Block(block));
218 return (block_t*)get_irn_link(block);
219 } /* get_block_entry */
221 /** Get the memop entry for a memory operation node */
222 static memop_t *get_irn_memop(const ir_node *irn)
224 assert(! is_Block(irn));
225 return (memop_t*)get_irn_link(irn);
226 } /* get_irn_memop */
229 * Walk over the memory edges from definition to users.
230 * This ensures, that even operation without memory output are found.
232 * @param irn start node
233 * @param pre pre walker function
234 * @param post post walker function
235 * @param ctx context parameter for the walker functions
237 static void walk_memory(ir_node *irn, irg_walk_func *pre, irg_walk_func *post, void *ctx)
241 mark_irn_visited(irn);
246 mode = get_irn_mode(irn);
247 if (mode == mode_M) {
248 /* every successor uses memory */
249 for (unsigned i = get_irn_n_outs(irn); i-- > 0; ) {
250 ir_node *succ = get_irn_out(irn, i);
252 if (! irn_visited(succ))
253 walk_memory(succ, pre, post, ctx);
255 } else if (mode == mode_T) {
256 /* only some Proj's uses memory */
257 for (unsigned i = get_irn_n_outs(irn); i-- > 0; ) {
258 ir_node *proj = get_irn_out(irn, i);
260 if (get_irn_mode(proj) == mode_M && ! irn_visited(proj))
261 walk_memory(proj, pre, post, ctx);
269 * Walks over all memory nodes of a graph.
272 * @param pre pre walker function
273 * @param post post walker function
274 * @param ctx context parameter for the walker functions
276 static void walk_memory_irg(ir_graph *irg, irg_walk_func pre, irg_walk_func post, void *ctx)
278 inc_irg_visited(irg);
280 ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED);
283 * there are two possible sources for memory: initial_mem and nomem
284 * we ignore nomem as this should NOT change the memory
286 walk_memory(get_irg_initial_mem(irg), pre, post, ctx);
288 ir_free_resources(irg, IR_RESOURCE_IRN_VISITED);
289 } /* walk_memory_irg */
292 * Register an address and allocate a (sparse, 0..n) ID for it.
294 * @param adr the IR-node representing the address
296 * @return the allocated id
298 static unsigned register_address(ir_node *adr)
300 address_entry *entry;
302 /* skip Confirms and Casts */
304 if (is_Confirm(adr)) {
305 adr = get_Confirm_value(adr);
309 adr = get_Cast_op(adr);
313 entry = ir_nodehashmap_get(address_entry, &env.adr_map, adr);
317 entry = OALLOC(&env.obst, address_entry);
319 entry->id = env.curr_adr_id++;
320 ir_nodehashmap_insert(&env.adr_map, adr, entry);
322 DB((dbg, LEVEL_3, "ADDRESS %+F has ID %u\n", adr, entry->id));
324 ARR_APP1(ir_node *, env.id_2_address, adr);
328 } /* register_address */
332 * translate an address through a Phi node into a given predecessor
335 * @param address the address
336 * @param block the block
337 * @param pos the position of the predecessor in block
339 static ir_node *phi_translate(ir_node *address, const ir_node *block, int pos)
341 if (is_Phi(address) && get_nodes_block(address) == block)
342 address = get_Phi_pred(address, pos);
344 } /* phi_translate */
347 * Walker: allocate an block entry for every block
348 * and register all potential addresses.
350 static void prepare_blocks(ir_node *irn, void *ctx)
355 block_t *entry = OALLOC(&env.obst, block_t);
358 entry->memop_forward = NULL;
359 entry->memop_backward = NULL;
360 entry->avail_out = NULL;
361 entry->id_2_memop_avail = NULL;
362 entry->anticL_in = NULL;
363 entry->id_2_memop_antic = NULL;
365 entry->forward_next = NULL;
366 entry->backward_next = NULL;
368 entry->trans_results = NULL;
369 set_irn_link(irn, entry);
371 set_Block_phis(irn, NULL);
373 /* use block marks to track unreachable blocks */
374 set_Block_mark(irn, 0);
376 n = get_Block_n_cfgpreds(irn);
377 if (n > env.max_cfg_preds)
378 env.max_cfg_preds = n;
380 ir_mode *mode = get_irn_mode(irn);
382 if (mode_is_reference(mode)) {
384 * Register ALL possible addresses: this is overkill yet but
385 * simpler then doing it for all possible translated addresses
386 * (which would be sufficient in the moment.
388 (void)register_address(irn);
391 } /* prepare_blocks */
394 * Post-Walker, link in all Phi's
396 static void link_phis(ir_node *irn, void *ctx)
401 ir_node *block = get_nodes_block(irn);
402 add_Block_phi(block, irn);
407 * Block walker: creates the inverse post-order list for the CFG.
409 static void inverse_post_order(ir_node *block, void *ctx)
411 block_t *entry = get_block_entry(block);
415 /* mark this block IS reachable from start */
416 set_Block_mark(block, 1);
418 /* create the list in inverse order */
419 entry->forward_next = env.forward;
422 /* remember the first visited (last in list) entry, needed for later */
423 if (env.backward == NULL)
424 env.backward = entry;
425 } /* inverse_post_order */
428 * Block walker: create backward links for the memops of a block.
430 static void collect_backward(ir_node *block, void *ctx)
432 block_t *entry = get_block_entry(block);
438 * Do NOT link in the end block yet. We want it to be
439 * the first in the list. This is NOT guaranteed by the walker
440 * if we have endless loops.
442 if (block != env.end_bl) {
443 entry->backward_next = env.backward;
445 /* create the list in inverse order */
446 env.backward = entry;
449 /* create backward links for all memory ops */
451 for (op = entry->memop_forward; op != NULL; op = op->next) {
455 entry->memop_backward = last;
456 } /* collect_backward */
461 * @param irn the IR-node representing the memop or NULL
462 * if this is a translated (virtual) memop
464 * @return the allocated memop
466 static memop_t *alloc_memop(ir_node *irn)
468 memop_t *m = OALLOC(&env.obst, memop_t);
470 m->value.address = NULL;
471 m->value.value = NULL;
472 m->value.mode = NULL;
480 memset(m->projs, 0, sizeof(m->projs));
483 set_irn_link(irn, m);
488 * Create a memop for a Phi-replacement.
490 * @param op the memop to clone
491 * @param phi the Phi-node representing the new value
493 static memop_t *clone_memop_phi(memop_t *op, ir_node *phi)
495 memop_t *m = OALLOC(&env.obst, memop_t);
497 m->value = op->value;
498 m->value.value = phi;
505 set_irn_link(phi, m);
507 } /* clone_memop_phi */
510 * Return the memory properties of a call node.
512 * @param call the call node
514 * return a bitset of mtp_property_const and mtp_property_pure
516 static unsigned get_Call_memory_properties(ir_node *call)
518 ir_type *call_tp = get_Call_type(call);
519 unsigned prop = get_method_additional_properties(call_tp);
521 /* check first the call type */
522 if ((prop & (mtp_property_const|mtp_property_pure)) == 0) {
523 /* try the called entity */
524 ir_node *ptr = get_Call_ptr(call);
526 if (is_SymConst_addr_ent(ptr)) {
527 ir_entity *ent = get_SymConst_entity(ptr);
529 prop = get_entity_additional_properties(ent);
532 return prop & (mtp_property_const|mtp_property_pure);
533 } /* get_Call_memory_properties */
536 * Returns an entity if the address ptr points to a constant one.
538 * @param ptr the address
540 * @return an entity or NULL
542 static ir_entity *find_constant_entity(ir_node *ptr)
545 if (is_SymConst(ptr) && get_SymConst_kind(ptr) == symconst_addr_ent) {
546 return get_SymConst_entity(ptr);
547 } else if (is_Sel(ptr)) {
548 ir_entity *ent = get_Sel_entity(ptr);
549 ir_type *tp = get_entity_owner(ent);
551 /* Do not fiddle with polymorphism. */
552 if (is_Class_type(get_entity_owner(ent)) &&
553 ((get_entity_n_overwrites(ent) != 0) ||
554 (get_entity_n_overwrittenby(ent) != 0) ) )
557 if (is_Array_type(tp)) {
561 for (i = 0, n = get_Sel_n_indexs(ptr); i < n; ++i) {
563 ir_tarval *tlower, *tupper;
564 ir_node *index = get_Sel_index(ptr, i);
565 ir_tarval *tv = computed_value(index);
567 /* check if the index is constant */
568 if (tv == tarval_bad)
571 bound = get_array_lower_bound(tp, i);
572 tlower = computed_value(bound);
573 bound = get_array_upper_bound(tp, i);
574 tupper = computed_value(bound);
576 if (tlower == tarval_bad || tupper == tarval_bad)
579 if (tarval_cmp(tv, tlower) == ir_relation_less)
581 if (tarval_cmp(tupper, tv) == ir_relation_less)
584 /* ok, bounds check finished */
588 if (get_entity_linkage(ent) == IR_LINKAGE_CONSTANT)
592 ptr = get_Sel_ptr(ptr);
593 } else if (is_Add(ptr)) {
594 ir_node *l = get_Add_left(ptr);
595 ir_node *r = get_Add_right(ptr);
597 if (get_irn_mode(l) == get_irn_mode(ptr) && is_Const(r))
599 else if (get_irn_mode(r) == get_irn_mode(ptr) && is_Const(l))
604 /* for now, we support only one addition, reassoc should fold all others */
605 if (! is_SymConst(ptr) && !is_Sel(ptr))
607 } else if (is_Sub(ptr)) {
608 ir_node *l = get_Sub_left(ptr);
609 ir_node *r = get_Sub_right(ptr);
611 if (get_irn_mode(l) == get_irn_mode(ptr) && is_Const(r))
615 /* for now, we support only one subtraction, reassoc should fold all others */
616 if (! is_SymConst(ptr) && !is_Sel(ptr))
621 } /* find_constant_entity */
624 * Return the Selection index of a Sel node from dimension n
626 static long get_Sel_array_index_long(ir_node *n, int dim)
628 ir_node *index = get_Sel_index(n, dim);
629 assert(is_Const(index));
630 return get_tarval_long(get_Const_tarval(index));
631 } /* get_Sel_array_index_long */
633 typedef struct path_entry {
635 struct path_entry *next;
639 static ir_node *rec_find_compound_ent_value(ir_node *ptr, path_entry *next)
641 path_entry entry, *p;
642 ir_entity *ent, *field;
643 ir_initializer_t *initializer;
649 if (is_SymConst(ptr)) {
651 ent = get_SymConst_entity(ptr);
652 initializer = get_entity_initializer(ent);
653 for (p = next; p != NULL;) {
654 if (initializer->kind != IR_INITIALIZER_COMPOUND)
656 n = get_initializer_compound_n_entries(initializer);
657 tp = get_entity_type(ent);
659 if (is_Array_type(tp)) {
660 ent = get_array_element_entity(tp);
665 initializer = get_initializer_compound_value(initializer, 0);
671 initializer = get_initializer_compound_value(initializer, p->index);
676 tp = get_entity_type(ent);
677 while (is_Array_type(tp)) {
678 ent = get_array_element_entity(tp);
679 tp = get_entity_type(ent);
681 n = get_initializer_compound_n_entries(initializer);
684 initializer = get_initializer_compound_value(initializer, 0);
687 switch (initializer->kind) {
688 case IR_INITIALIZER_CONST:
689 return get_initializer_const_value(initializer);
690 case IR_INITIALIZER_TARVAL:
691 case IR_INITIALIZER_NULL:
695 } else if (is_Sel(ptr)) {
696 entry.ent = field = get_Sel_entity(ptr);
697 tp = get_entity_owner(field);
698 if (is_Array_type(tp)) {
699 assert(get_Sel_n_indexs(ptr) == 1 && "multi dim arrays not implemented");
700 entry.index = get_Sel_array_index_long(ptr, 0) - get_array_lower_bound_int(tp, 0);
702 size_t i, n_members = get_compound_n_members(tp);
703 for (i = 0; i < n_members; ++i) {
704 if (get_compound_member(tp, i) == field)
707 if (i >= n_members) {
708 /* not found: should NOT happen */
713 return rec_find_compound_ent_value(get_Sel_ptr(ptr), &entry);
714 } else if (is_Add(ptr)) {
719 ir_node *l = get_Add_left(ptr);
720 ir_node *r = get_Add_right(ptr);
723 tv = get_Const_tarval(r);
726 tv = get_Const_tarval(l);
730 mode = get_tarval_mode(tv);
732 /* ptr must be a Sel or a SymConst, this was checked in find_constant_entity() */
734 field = get_Sel_entity(ptr);
736 field = get_SymConst_entity(ptr);
739 /* count needed entries */
741 for (ent = field;;) {
742 tp = get_entity_type(ent);
743 if (! is_Array_type(tp))
745 ent = get_array_element_entity(tp);
748 /* should be at least ONE entry */
752 /* allocate the right number of entries */
753 NEW_ARR_A(path_entry, p, pos);
757 for (ent = field;;) {
759 ir_tarval *sz, *tv_index, *tlower, *tupper;
763 tp = get_entity_type(ent);
764 if (! is_Array_type(tp))
766 ent = get_array_element_entity(tp);
768 p[pos].next = &p[pos + 1];
770 size = get_type_size_bytes(get_entity_type(ent));
771 sz = new_tarval_from_long(size, mode);
773 tv_index = tarval_div(tv, sz);
774 tv = tarval_mod(tv, sz);
776 if (tv_index == tarval_bad || tv == tarval_bad)
779 assert(get_array_n_dimensions(tp) == 1 && "multiarrays not implemented");
780 bound = get_array_lower_bound(tp, 0);
781 tlower = computed_value(bound);
782 bound = get_array_upper_bound(tp, 0);
783 tupper = computed_value(bound);
785 if (tlower == tarval_bad || tupper == tarval_bad)
788 if (tarval_cmp(tv_index, tlower) == ir_relation_less)
790 if (tarval_cmp(tupper, tv_index) == ir_relation_less)
793 /* ok, bounds check finished */
794 index = get_tarval_long(tv_index);
795 p[pos].index = index;
798 if (! tarval_is_null(tv)) {
799 /* hmm, wrong access */
802 p[pos - 1].next = next;
803 return rec_find_compound_ent_value(ptr, p);
804 } else if (is_Sub(ptr)) {
805 ir_node *l = get_Sub_left(ptr);
806 ir_node *r = get_Sub_right(ptr);
809 tv = get_Const_tarval(r);
814 } /* rec_find_compound_ent_value */
816 static ir_node *find_compound_ent_value(ir_node *ptr)
818 return rec_find_compound_ent_value(ptr, NULL);
819 } /* find_compound_ent_value */
822 * Mark a Load memop to be replace by a definition
824 * @param op the Load memop
826 static void mark_replace_load(memop_t *op, ir_node *def)
829 op->flags |= FLAG_KILLED_NODE;
831 } /* mark_replace_load */
834 * Mark a Store memop to be removed.
836 * @param op the Store memop
838 static void mark_remove_store(memop_t *op)
840 op->flags |= FLAG_KILLED_NODE;
842 } /* mark_remove_store */
845 * Update a memop for a Load.
849 static void update_Load_memop(memop_t *m)
851 ir_node *load = m->node;
855 if (get_Load_volatility(load) == volatility_is_volatile)
856 m->flags |= FLAG_IGNORE;
858 ptr = get_Load_ptr(load);
860 m->value.address = ptr;
862 for (unsigned i = get_irn_n_outs(load); i-- > 0; ) {
863 ir_node *proj = get_irn_out(load, i);
866 /* beware of keep edges */
870 pn = get_Proj_proj(proj);
874 m->value.value = proj;
875 m->value.mode = get_irn_mode(proj);
877 case pn_Load_X_except:
878 m->flags |= FLAG_EXCEPTION;
883 case pn_Load_X_regular:
886 panic("Unsupported Proj from Load %+F", proj);
890 /* check if we can determine the entity that will be loaded */
891 ent = find_constant_entity(ptr);
893 if (ent != NULL && get_entity_visibility(ent) != ir_visibility_external) {
894 /* a static allocation that is not external: there should be NO exception
895 * when loading even if we cannot replace the load itself. */
896 ir_node *value = NULL;
898 /* no exception, clear the m fields as it might be checked later again */
899 if (m->projs[pn_Load_X_except]) {
900 ir_graph *irg = get_irn_irg(ptr);
901 exchange(m->projs[pn_Load_X_except], new_r_Bad(irg, mode_X));
902 m->projs[pn_Load_X_except] = NULL;
903 m->flags &= ~FLAG_EXCEPTION;
906 if (m->projs[pn_Load_X_regular]) {
907 exchange(m->projs[pn_Load_X_regular], new_r_Jmp(get_nodes_block(load)));
908 m->projs[pn_Load_X_regular] = NULL;
912 if (get_entity_linkage(ent) & IR_LINKAGE_CONSTANT) {
913 if (ent->initializer) {
914 /* new style initializer */
915 value = find_compound_ent_value(ptr);
918 value = can_replace_load_by_const(load, value);
922 /* we completely replace the load by this value */
923 DB((dbg, LEVEL_1, "Replacing Load %+F by constant %+F\n", m->node, value));
924 mark_replace_load(m, value);
929 if (m->value.value != NULL && !(m->flags & FLAG_IGNORE)) {
930 /* only create an address if this node is NOT killed immediately or ignored */
931 m->value.id = register_address(ptr);
934 /* no user, KILL it */
935 mark_replace_load(m, NULL);
937 } /* update_Load_memop */
940 * Update a memop for a Store.
944 static void update_Store_memop(memop_t *m)
946 ir_node *store = m->node;
947 ir_node *adr = get_Store_ptr(store);
949 if (get_Store_volatility(store) == volatility_is_volatile) {
950 m->flags |= FLAG_IGNORE;
952 /* only create an address if this node is NOT ignored */
953 m->value.id = register_address(adr);
957 m->value.address = adr;
959 for (unsigned i = get_irn_n_outs(store); i-- > 0; ) {
960 ir_node *proj = get_irn_out(store, i);
963 /* beware of keep edges */
967 pn = get_Proj_proj(proj);
970 case pn_Store_X_except:
971 m->flags |= FLAG_EXCEPTION;
976 case pn_Store_X_regular:
979 panic("Unsupported Proj from Store %+F", proj);
982 m->value.value = get_Store_value(store);
983 m->value.mode = get_irn_mode(m->value.value);
984 } /* update_Store_memop */
987 * Update a memop for a Call.
991 static void update_Call_memop(memop_t *m)
993 ir_node *call = m->node;
994 unsigned prop = get_Call_memory_properties(call);
996 if (prop & mtp_property_const) {
997 /* A constant call did NOT use memory at all, we
998 can kick it from the list. */
999 } else if (prop & mtp_property_pure) {
1000 /* pure calls READ memory */
1003 m->flags = FLAG_KILL_ALL;
1005 for (unsigned i = get_irn_n_outs(call); i-- > 0; ) {
1006 ir_node *proj = get_irn_out(call, i);
1008 /* beware of keep edges */
1012 switch (get_Proj_proj(proj)) {
1013 case pn_Call_X_except:
1014 m->flags |= FLAG_EXCEPTION;
1021 } /* update_Call_memop */
1024 * Update a memop for a Div/Mod.
1026 * @param m the memop
1028 static void update_Div_memop(memop_t *m)
1030 ir_node *div = m->node;
1032 for (unsigned i = get_irn_n_outs(div); i-- > 0; ) {
1033 ir_node *proj = get_irn_out(div, i);
1035 /* beware of keep edges */
1039 switch (get_Proj_proj(proj)) {
1040 case pn_Div_X_except:
1041 m->flags |= FLAG_EXCEPTION;
1050 static void update_Mod_memop(memop_t *m)
1052 ir_node *div = m->node;
1054 for (unsigned i = get_irn_n_outs(div); i-- > 0; ) {
1055 ir_node *proj = get_irn_out(div, i);
1057 /* beware of keep edges */
1061 switch (get_Proj_proj(proj)) {
1062 case pn_Mod_X_except:
1063 m->flags |= FLAG_EXCEPTION;
1073 * Update a memop for a Phi.
1075 * @param m the memop
1077 static void update_Phi_memop(memop_t *m)
1079 /* the Phi is its own mem */
1081 } /* update_Phi_memop */
1084 * Memory walker: collect all memory ops and build topological lists.
1086 static void collect_memops(ir_node *irn, void *ctx)
1094 /* we can safely ignore ProjM's except the initial memory */
1095 ir_graph *irg = get_irn_irg(irn);
1096 if (irn != get_irg_initial_mem(irg))
1100 op = alloc_memop(irn);
1101 block = get_nodes_block(irn);
1102 entry = get_block_entry(block);
1105 update_Phi_memop(op);
1106 /* Phis must be always placed first */
1107 op->next = entry->memop_forward;
1108 entry->memop_forward = op;
1109 if (entry->memop_backward == NULL)
1110 entry->memop_backward = op;
1112 switch (get_irn_opcode(irn)) {
1114 update_Load_memop(op);
1117 update_Store_memop(op);
1120 update_Call_memop(op);
1127 /* initial memory */
1132 /* we can those to find the memory edge */
1135 update_Div_memop(op);
1138 update_Mod_memop(op);
1142 /* TODO: handle some builtins */
1144 /* unsupported operation */
1145 op->flags = FLAG_KILL_ALL;
1149 /* all other should be placed last */
1150 if (entry->memop_backward == NULL) {
1151 entry->memop_forward = entry->memop_backward = op;
1153 entry->memop_backward->next = op;
1154 entry->memop_backward = op;
1157 } /* collect_memops */
1160 * Find an address in the current set.
1162 * @param value the value to be searched for
1164 * @return a memop for the value or NULL if the value does
1165 * not exists in the set or cannot be converted into
1166 * the requested mode
1168 static memop_t *find_address(const value_t *value)
1170 if (rbitset_is_set(env.curr_set, value->id)) {
1171 memop_t *res = env.curr_id_2_memop[value->id];
1173 if (res->value.mode == value->mode)
1175 /* allow hidden casts */
1176 if (get_mode_arithmetic(res->value.mode) == irma_twos_complement &&
1177 get_mode_arithmetic(value->mode) == irma_twos_complement &&
1178 get_mode_size_bits(res->value.mode) == get_mode_size_bits(value->mode))
1182 } /* find_address */
1185 * Find an address in the avail_out set.
1187 * @param bl the block
1189 static memop_t *find_address_avail(const block_t *bl, unsigned id, const ir_mode *mode)
1191 if (rbitset_is_set(bl->avail_out, id)) {
1192 memop_t *res = bl->id_2_memop_avail[id];
1194 if (res->value.mode == mode)
1196 /* allow hidden casts */
1197 if (get_mode_arithmetic(res->value.mode) == irma_twos_complement &&
1198 get_mode_arithmetic(mode) == irma_twos_complement &&
1199 get_mode_size_bits(res->value.mode) == get_mode_size_bits(mode))
1203 } /* find_address_avail */
1206 * Kill all addresses from the current set.
1208 static void kill_all(void)
1210 rbitset_clear_all(env.curr_set, env.rbs_size);
1213 rbitset_set(env.curr_set, env.rbs_size - 1);
1217 * Kill memops that are not alias free due to a Store value from the current set.
1219 * @param value the Store value
1221 static void kill_memops(const value_t *value)
1223 size_t end = env.rbs_size - 1;
1226 for (pos = rbitset_next(env.curr_set, 0, 1); pos < end; pos = rbitset_next(env.curr_set, pos + 1, 1)) {
1227 memop_t *op = env.curr_id_2_memop[pos];
1229 if (ir_no_alias != get_alias_relation(value->address, value->mode,
1230 op->value.address, op->value.mode)) {
1231 rbitset_clear(env.curr_set, pos);
1232 env.curr_id_2_memop[pos] = NULL;
1233 DB((dbg, LEVEL_2, "KILLING %+F because of possible alias address %+F\n", op->node, value->address));
1239 * Add the value of a memop to the current set.
1241 * @param op the memory op
1243 static void add_memop(memop_t *op)
1245 rbitset_set(env.curr_set, op->value.id);
1246 env.curr_id_2_memop[op->value.id] = op;
1250 * Add the value of a memop to the avail_out set.
1252 * @param bl the block
1253 * @param op the memory op
1255 static void add_memop_avail(block_t *bl, memop_t *op)
1257 rbitset_set(bl->avail_out, op->value.id);
1258 bl->id_2_memop_avail[op->value.id] = op;
1259 } /* add_memop_avail */
1262 * Check, if we can convert a value of one mode to another mode
1263 * without changing the representation of bits.
1265 * @param from the original mode
1266 * @param to the destination mode
1268 static int can_convert_to(const ir_mode *from, const ir_mode *to)
1270 if (get_mode_arithmetic(from) == irma_twos_complement &&
1271 get_mode_arithmetic(to) == irma_twos_complement &&
1272 get_mode_size_bits(from) == get_mode_size_bits(to))
1275 } /* can_convert_to */
1278 * Add a Conv to the requested mode if needed.
1280 * @param irn the IR-node to convert
1281 * @param mode the destination mode
1283 * @return the possible converted node or NULL
1284 * if the conversion is not possible
1286 static ir_node *conv_to(ir_node *irn, ir_mode *mode)
1288 ir_mode *other = get_irn_mode(irn);
1289 if (other != mode) {
1290 /* different modes: check if conversion is possible without changing the bits */
1291 if (can_convert_to(other, mode)) {
1292 ir_node *block = get_nodes_block(irn);
1293 return new_r_Conv(block, irn, mode);
1295 /* otherwise not possible ... yet */
1302 * Update the address of an value if this address was a load result
1303 * and the load is killed now.
1305 * @param value the value whose address is updated
1307 static void update_address(value_t *value)
1309 if (is_Proj(value->address)) {
1310 ir_node *load = get_Proj_pred(value->address);
1312 if (is_Load(load)) {
1313 const memop_t *op = get_irn_memop(load);
1315 if (op->flags & FLAG_KILLED_NODE)
1316 value->address = op->replace;
1319 } /* update_address */
1322 * Do forward dataflow analysis on the given block and calculate the
1323 * GEN and KILL in the current (avail) set.
1325 * @param bl the block
1327 static void calc_gen_kill_avail(block_t *bl)
1332 for (op = bl->memop_forward; op != NULL; op = op->next) {
1333 switch (get_irn_opcode(op->node)) {
1341 if (! (op->flags & (FLAG_KILLED_NODE|FLAG_IGNORE))) {
1342 /* do we have this already? */
1345 update_address(&op->value);
1346 other = find_address(&op->value);
1347 if (other != NULL && other != op) {
1348 def = conv_to(other->value.value, op->value.mode);
1350 #ifdef DEBUG_libfirm
1351 if (is_Store(other->node)) {
1353 DB((dbg, LEVEL_1, "RAW %+F <- %+F(%+F)\n", op->node, def, other->node));
1356 DB((dbg, LEVEL_1, "RAR %+F <- %+F(%+F)\n", op->node, def, other->node));
1359 mark_replace_load(op, def);
1360 /* do NOT change the memop table */
1364 /* add this value */
1369 if (! (op->flags & FLAG_KILLED_NODE)) {
1370 /* do we have this store already */
1373 update_address(&op->value);
1374 other = find_address(&op->value);
1375 if (other != NULL) {
1376 if (is_Store(other->node)) {
1377 if (op != other && !(other->flags & FLAG_IGNORE) &&
1378 get_nodes_block(other->node) == get_nodes_block(op->node)) {
1380 * A WAW in the same block we can kick the first store.
1381 * This is a shortcut: we know that the second Store will be anticipated
1384 DB((dbg, LEVEL_1, "WAW %+F <- %+F\n", other->node, op->node));
1385 mark_remove_store(other);
1386 /* FIXME: a Load might be get freed due to this killed store */
1388 } else if (other->value.value == op->value.value && !(op->flags & FLAG_IGNORE)) {
1390 DB((dbg, LEVEL_1, "WAR %+F <- %+F\n", op->node, other->node));
1391 mark_remove_store(op);
1392 /* do NOT change the memop table */
1396 /* KILL all possible aliases */
1397 kill_memops(&op->value);
1398 /* add this value */
1403 if (op->flags & FLAG_KILL_ALL)
1407 } /* calc_gen_kill_avail */
1409 #define BYTE_SIZE(x) (((x) + 7) >> 3)
1412 * Do forward dataflow analysis on a given block to calculate the avail_out set
1413 * for this block only.
1415 * @param block the block
1417 static void forward_avail(block_t *bl)
1419 /* fill the data from the current block */
1420 env.curr_id_2_memop = bl->id_2_memop_avail;
1421 env.curr_set = bl->avail_out;
1423 calc_gen_kill_avail(bl);
1424 dump_curr(bl, "Avail_out");
1425 } /* forward_avail */
1428 * Do backward dataflow analysis on a given block to calculate the antic set
1429 * of Loaded addresses.
1431 * @param bl the block
1433 * @return non-zero if the set has changed since last iteration
1435 static int backward_antic(block_t *bl)
1438 ir_node *block = bl->block;
1439 int n = get_Block_n_cfg_outs(block);
1442 ir_node *succ = get_Block_cfg_out(block, 0);
1443 block_t *succ_bl = get_block_entry(succ);
1444 int pred_pos = get_Block_cfgpred_pos(succ, block);
1445 size_t end = env.rbs_size - 1;
1450 if (bl->trans_results == NULL) {
1451 /* allocate the translate cache */
1452 bl->trans_results = OALLOCNZ(&env.obst, memop_t*, env.curr_adr_id);
1455 /* check for partly redundant values */
1456 for (pos = rbitset_next(succ_bl->anticL_in, 0, 1);
1458 pos = rbitset_next(succ_bl->anticL_in, pos + 1, 1)) {
1460 * do Phi-translation here: Note that at this point the nodes are
1461 * not changed, so we can safely cache the results.
1462 * However: Loads of Load results ARE bad, because we have no way
1463 to translate them yet ...
1465 memop_t *op = bl->trans_results[pos];
1467 /* not yet translated */
1468 ir_node *adr, *trans_adr;
1470 op = succ_bl->id_2_memop_antic[pos];
1471 adr = op->value.address;
1473 trans_adr = phi_translate(adr, succ, pred_pos);
1474 if (trans_adr != adr) {
1475 /* create a new entry for the translated one */
1478 new_op = alloc_memop(NULL);
1479 new_op->value.address = trans_adr;
1480 new_op->value.id = register_address(trans_adr);
1481 new_op->value.mode = op->value.mode;
1482 new_op->node = op->node; /* we need the node to decide if Load/Store */
1483 new_op->flags = op->flags;
1485 bl->trans_results[pos] = new_op;
1489 env.curr_id_2_memop[op->value.id] = op;
1490 rbitset_set(env.curr_set, op->value.id);
1493 ir_node *succ = get_Block_cfg_out(block, 0);
1494 block_t *succ_bl = get_block_entry(succ);
1497 rbitset_copy(env.curr_set, succ_bl->anticL_in, env.rbs_size);
1498 memcpy(env.curr_id_2_memop, succ_bl->id_2_memop_antic, env.rbs_size * sizeof(env.curr_id_2_memop[0]));
1500 /* Hmm: probably we want kill merges of Loads ans Stores here */
1501 for (i = n - 1; i > 0; --i) {
1502 ir_node *succ = get_Block_cfg_out(bl->block, i);
1503 block_t *succ_bl = get_block_entry(succ);
1505 rbitset_and(env.curr_set, succ_bl->anticL_in, env.rbs_size);
1508 /* block ends with a noreturn call */
1512 dump_curr(bl, "AnticL_out");
1514 for (op = bl->memop_backward; op != NULL; op = op->prev) {
1515 switch (get_irn_opcode(op->node)) {
1523 if (! (op->flags & (FLAG_KILLED_NODE|FLAG_IGNORE))) {
1529 if (! (op->flags & FLAG_KILLED_NODE)) {
1530 /* a Store: check which memops must be killed */
1531 kill_memops(&op->value);
1535 if (op->flags & FLAG_KILL_ALL)
1540 memcpy(bl->id_2_memop_antic, env.curr_id_2_memop, env.rbs_size * sizeof(env.curr_id_2_memop[0]));
1541 if (! rbitsets_equal(bl->anticL_in, env.curr_set, env.rbs_size)) {
1543 rbitset_copy(bl->anticL_in, env.curr_set, env.rbs_size);
1544 dump_curr(bl, "AnticL_in*");
1547 dump_curr(bl, "AnticL_in");
1549 } /* backward_antic */
1552 * Replace a Load memop by a already known value.
1554 * @param op the Load memop
1556 static void replace_load(memop_t *op)
1558 ir_node *load = op->node;
1559 ir_node *def = skip_Id(op->replace);
1564 DB((dbg, LEVEL_1, "Replacing %+F by definition %+F\n", load, is_Proj(def) ? get_Proj_pred(def) : def));
1566 if (op->flags & FLAG_EXCEPTION) {
1567 /* bad: this node is unused and executed for exception only */
1568 DB((dbg, LEVEL_1, "Unused %+F executed for exception only ...\n", load));
1571 DB((dbg, LEVEL_1, "Killing unused %+F\n", load));
1574 if (op->mem != NULL) {
1575 /* in rare cases a Load might have NO memory */
1576 exchange(op->mem, get_Load_mem(load));
1578 proj = op->projs[pn_Load_res];
1580 mode = get_irn_mode(proj);
1581 if (get_irn_mode(def) != mode) {
1583 dbg_info *db = get_irn_dbg_info(load);
1584 ir_node *block = get_nodes_block(proj);
1585 def = new_rd_Conv(db, block, def, mode);
1587 exchange(proj, def);
1589 proj = op->projs[pn_Load_X_except];
1591 ir_graph *irg = get_irn_irg(load);
1592 exchange(proj, new_r_Bad(irg, mode_X));
1594 proj = op->projs[pn_Load_X_regular];
1596 exchange(proj, new_r_Jmp(get_nodes_block(load)));
1598 } /* replace_load */
1601 * Remove a Store memop.
1603 * @param op the Store memop
1605 static void remove_store(memop_t *op)
1607 ir_node *store = op->node;
1610 DB((dbg, LEVEL_1, "Removing %+F\n", store));
1612 if (op->mem != NULL) {
1613 /* in rare cases a Store might have no memory */
1614 exchange(op->mem, get_Store_mem(store));
1616 proj = op->projs[pn_Store_X_except];
1618 ir_graph *irg = get_irn_irg(store);
1619 exchange(proj, new_r_Bad(irg, mode_X));
1621 proj = op->projs[pn_Store_X_regular];
1623 exchange(proj, new_r_Jmp(get_nodes_block(store)));
1625 } /* remove_store */
1629 * Do all necessary replacements for a given block.
1631 * @param bl the block
1633 static void do_replacements(block_t *bl)
1637 for (op = bl->memop_forward; op != NULL; op = op->next) {
1638 if (op->flags & FLAG_KILLED_NODE) {
1639 switch (get_irn_opcode(op->node)) {
1649 } /* do_replacements */
1652 * Calculate the Avail_out sets for all basic blocks.
1654 static void calcAvail(void)
1656 memop_t **tmp_memop = env.curr_id_2_memop;
1657 unsigned *tmp_set = env.curr_set;
1660 /* calculate avail_out */
1661 DB((dbg, LEVEL_2, "Calculate Avail_out\n"));
1663 /* iterate over all blocks in in any order, skip the start block */
1664 for (bl = env.forward->forward_next; bl != NULL; bl = bl->forward_next) {
1668 /* restore the current sets */
1669 env.curr_id_2_memop = tmp_memop;
1670 env.curr_set = tmp_set;
1674 * Calculate the Antic_in sets for all basic blocks.
1676 static void calcAntic(void)
1680 /* calculate antic_out */
1681 DB((dbg, LEVEL_2, "Calculate Antic_in\n"));
1686 DB((dbg, LEVEL_2, "Iteration %d:\n=========\n", i));
1690 /* over all blocks in reverse post order */
1691 for (bl = env.backward->backward_next; bl != NULL; bl = bl->backward_next) {
1692 need_iter |= backward_antic(bl);
1695 } while (need_iter);
1696 DB((dbg, LEVEL_2, "Get anticipated Load set after %d iterations\n", i));
1700 * Return the node representing the last memory in a block.
1702 * @param bl the block
1704 static ir_node *find_last_memory(block_t *bl)
1707 if (bl->memop_backward != NULL) {
1708 return bl->memop_backward->mem;
1710 /* if there is NO memory in this block, go to the dominator */
1711 bl = get_block_entry(get_Block_idom(bl->block));
1713 } /* find_last_memory */
1716 * Reroute all memory users of old memory
1717 * to a new memory IR-node.
1719 * @param omem the old memory IR-node
1720 * @param nmem the new memory IR-node
1722 static void reroute_all_mem_users(ir_node *omem, ir_node *nmem)
1724 for (unsigned i = get_irn_n_outs(omem); i-- > 0; ) {
1726 ir_node *user = get_irn_out_ex(omem, i, &n_pos);
1728 set_irn_n(user, n_pos, nmem);
1731 /* all edges previously point to omem now point to nmem */
1732 nmem->o.out = omem->o.out;
1733 } /* reroute_all_mem_users */
1736 * Reroute memory users of old memory that are dominated by a given block
1737 * to a new memory IR-node.
1739 * @param omem the old memory IR-node
1740 * @param nmem the new memory IR-node
1741 * @param pass_bl the block the memory must pass
1743 static void reroute_mem_through(ir_node *omem, ir_node *nmem, ir_node *pass_bl)
1745 unsigned n = get_irn_n_outs(omem);
1746 ir_def_use_edges *new_out = OALLOCF(&env.obst, ir_def_use_edges, edges, n);
1749 for (unsigned i = 0; i < n; ++i) {
1751 ir_node *user = get_irn_out_ex(omem, i, &n_pos);
1752 ir_node *use_bl = get_nodes_block(user);
1756 use_bl = get_Block_cfgpred_block(use_bl, n_pos);
1758 if (block_dominates(pass_bl, use_bl)) {
1759 /* found an user that is dominated */
1760 new_out->edges[j].pos = n_pos;
1761 new_out->edges[j].use = user;
1764 set_irn_n(user, n_pos, nmem);
1767 new_out->n_edges = j;
1769 /* Modify the out structure: we create a new out edge array on our
1770 temporary obstack here. This should be no problem, as we invalidate the
1771 edges at the end either. */
1772 /* first entry is used for the length */
1773 nmem->o.out = new_out;
1774 } /* reroute_mem_through */
1777 * insert Loads, making partly redundant Loads fully redundant
1779 static int insert_Load(block_t *bl)
1781 ir_node *block = bl->block;
1782 int i, n = get_Block_n_cfgpreds(block);
1783 size_t end = env.rbs_size - 1;
1785 DB((dbg, LEVEL_3, "processing %+F\n", block));
1788 /* might still happen for an unreachable block (end for instance) */
1796 NEW_ARR_A(ir_node *, ins, n);
1798 rbitset_set_all(env.curr_set, env.rbs_size);
1800 /* More than one predecessors, calculate the join for all avail_outs ignoring unevaluated
1801 Blocks. These put in Top anyway. */
1802 for (i = n - 1; i >= 0; --i) {
1803 ir_node *pred = skip_Proj(get_Block_cfgpred(block, i));
1804 ir_node *blk = get_nodes_block(pred);
1807 pred_bl = get_block_entry(blk);
1808 rbitset_and(env.curr_set, pred_bl->avail_out, env.rbs_size);
1810 if (is_Load(pred) || is_Store(pred)) {
1811 /* We reached this block by an exception from a Load or Store:
1812 * the memop creating the exception was NOT completed than, kill it
1814 memop_t *exc_op = get_irn_memop(pred);
1815 rbitset_clear(env.curr_set, exc_op->value.id);
1820 * Ensure that all values are in the map: build Phi's if necessary:
1821 * Note: the last bit is the sentinel and ALWAYS set, so end with -2.
1823 for (pos = 0; pos < env.rbs_size - 1; ++pos) {
1824 if (! rbitset_is_set(env.curr_set, pos))
1825 env.curr_id_2_memop[pos] = NULL;
1827 ir_node *pred = get_Block_cfgpred_block(bl->block, 0);
1828 block_t *pred_bl = get_block_entry(pred);
1830 memop_t *first = NULL;
1831 ir_mode *mode = NULL;
1833 for (i = 0; i < n; ++i) {
1836 pred = get_Block_cfgpred_block(bl->block, i);
1837 pred_bl = get_block_entry(pred);
1839 mop = pred_bl->id_2_memop_avail[pos];
1840 if (first == NULL) {
1842 ins[0] = first->value.value;
1843 mode = get_irn_mode(ins[0]);
1845 /* no Phi needed so far */
1846 env.curr_id_2_memop[pos] = first;
1848 ins[i] = conv_to(mop->value.value, mode);
1849 if (ins[i] != ins[0]) {
1850 if (ins[i] == NULL) {
1851 /* conversion failed */
1852 env.curr_id_2_memop[pos] = NULL;
1853 rbitset_clear(env.curr_set, pos);
1862 ir_node *phi = new_r_Phi(bl->block, n, ins, mode);
1863 memop_t *phiop = alloc_memop(phi);
1865 phiop->value = first->value;
1866 phiop->value.value = phi;
1868 /* no need to link it in, as it is a DATA phi */
1870 env.curr_id_2_memop[pos] = phiop;
1872 DB((dbg, LEVEL_3, "Created new %+F on merging value for address %+F\n", phi, first->value.address));
1877 /* only one predecessor, simply copy the map */
1878 ir_node *pred = get_Block_cfgpred_block(bl->block, 0);
1879 block_t *pred_bl = get_block_entry(pred);
1881 rbitset_copy(env.curr_set, pred_bl->avail_out, env.rbs_size);
1883 memcpy(env.curr_id_2_memop, pred_bl->id_2_memop_avail, env.rbs_size * sizeof(bl->id_2_memop_avail[0]));
1889 /* check for partly redundant values */
1890 for (pos = rbitset_next(bl->anticL_in, 0, 1);
1892 pos = rbitset_next(bl->anticL_in, pos + 1, 1)) {
1893 memop_t *op = bl->id_2_memop_antic[pos];
1894 int have_some, all_same;
1897 if (rbitset_is_set(env.curr_set, pos)) {
1902 assert(is_Load(op->node));
1904 DB((dbg, LEVEL_3, "anticipated %+F\n", op->node));
1909 for (i = n - 1; i >= 0; --i) {
1910 ir_node *pred = get_Block_cfgpred_block(block, i);
1911 block_t *pred_bl = get_block_entry(pred);
1912 ir_mode *mode = op->value.mode;
1916 adr = phi_translate(op->value.address, block, i);
1917 DB((dbg, LEVEL_3, ".. using address %+F in pred %d\n", adr, i));
1918 e = find_address_avail(pred_bl, register_address(adr), mode);
1920 ir_node *ef_block = get_nodes_block(adr);
1921 if (! block_dominates(ef_block, pred)) {
1922 /* cannot place a copy here */
1924 DB((dbg, LEVEL_3, "%+F cannot be moved into predecessor %+F\n", op->node, pred));
1927 DB((dbg, LEVEL_3, "%+F is not available in predecessor %+F\n", op->node, pred));
1928 pred_bl->avail = NULL;
1931 if (e->value.mode != mode && !can_convert_to(e->value.mode, mode)) {
1932 /* cannot create a Phi due to different modes */
1938 DB((dbg, LEVEL_3, "%+F is available for %+F in predecessor %+F\n", e->node, op->node, pred));
1941 else if (first != e->node)
1945 if (have_some && !all_same) {
1946 ir_mode *mode = op->value.mode;
1950 NEW_ARR_A(ir_node *, in, n);
1952 for (i = n - 1; i >= 0; --i) {
1953 ir_node *pred = get_Block_cfgpred_block(block, i);
1954 block_t *pred_bl = get_block_entry(pred);
1956 if (pred_bl->avail == NULL) {
1957 /* create a new Load here and make to make it fully redundant */
1958 dbg_info *db = get_irn_dbg_info(op->node);
1959 ir_node *last_mem = find_last_memory(pred_bl);
1960 ir_node *load, *def, *adr;
1963 assert(last_mem != NULL);
1964 adr = phi_translate(op->value.address, block, i);
1965 load = new_rd_Load(db, pred, last_mem, adr, mode, cons_none);
1966 def = new_r_Proj(load, mode, pn_Load_res);
1967 DB((dbg, LEVEL_1, "Created new %+F in %+F for party redundant %+F\n", load, pred, op->node));
1969 new_op = alloc_memop(load);
1970 new_op->mem = new_r_Proj(load, mode_M, pn_Load_M);
1971 new_op->value.address = adr;
1972 new_op->value.id = op->value.id;
1973 new_op->value.mode = mode;
1974 new_op->value.value = def;
1976 new_op->projs[pn_Load_M] = new_op->mem;
1977 new_op->projs[pn_Load_res] = def;
1979 new_op->prev = pred_bl->memop_backward;
1980 if (pred_bl->memop_backward != NULL)
1981 pred_bl->memop_backward->next = new_op;
1983 pred_bl->memop_backward = new_op;
1985 if (pred_bl->memop_forward == NULL)
1986 pred_bl->memop_forward = new_op;
1988 if (get_nodes_block(last_mem) == pred) {
1989 /* We have add a new last memory op in pred block.
1990 If pred had already a last mem, reroute all memory
1992 reroute_all_mem_users(last_mem, new_op->mem);
1994 /* reroute only those memory going through the pre block */
1995 reroute_mem_through(last_mem, new_op->mem, pred);
1998 /* we added this load at the end, so it will be avail anyway */
1999 add_memop_avail(pred_bl, new_op);
2000 pred_bl->avail = new_op;
2002 in[i] = conv_to(pred_bl->avail->value.value, mode);
2004 phi = new_r_Phi(block, n, in, mode);
2005 DB((dbg, LEVEL_1, "Created new %+F in %+F for now redundant %+F\n", phi, block, op->node));
2007 phi_op = clone_memop_phi(op, phi);
2013 /* recalculate avail by gen and kill */
2014 calc_gen_kill_avail(bl);
2016 /* always update the map after gen/kill, as values might have been changed due to RAR/WAR/WAW */
2017 memcpy(bl->id_2_memop_avail, env.curr_id_2_memop, env.rbs_size * sizeof(env.curr_id_2_memop[0]));
2019 if (!rbitsets_equal(bl->avail_out, env.curr_set, env.rbs_size)) {
2020 /* the avail set has changed */
2021 rbitset_copy(bl->avail_out, env.curr_set, env.rbs_size);
2022 dump_curr(bl, "Avail_out*");
2025 dump_curr(bl, "Avail_out");
2030 * Insert Loads upwards.
2032 static void insert_Loads_upwards(void)
2037 /* recalculate antic_out and insert Loads */
2038 DB((dbg, LEVEL_2, "Inserting Loads\n"));
2042 DB((dbg, LEVEL_2, "Iteration %d:\n=========\n", i));
2046 /* over all blocks in reverse post order, skip the start block */
2047 for (bl = env.forward->forward_next; bl != NULL; bl = bl->forward_next) {
2048 need_iter |= insert_Load(bl);
2051 } while (need_iter);
2053 DB((dbg, LEVEL_2, "Finished Load inserting after %d iterations\n", i));
2054 } /* insert_Loads_upwards */
2056 void opt_ldst(ir_graph *irg)
2060 FIRM_DBG_REGISTER(dbg, "firm.opt.ldst");
2062 DB((dbg, LEVEL_1, "\nDoing Load/Store optimization on %+F\n", irg));
2064 assure_irg_properties(irg,
2065 IR_GRAPH_PROPERTY_NO_CRITICAL_EDGES /* we need landing pads */
2066 | IR_GRAPH_PROPERTY_CONSISTENT_ENTITY_USAGE
2067 | IR_GRAPH_PROPERTY_CONSISTENT_OUTS
2068 | IR_GRAPH_PROPERTY_NO_UNREACHABLE_CODE
2069 | IR_GRAPH_PROPERTY_CONSISTENT_DOMINANCE);
2071 if (get_opt_alias_analysis()) {
2072 assure_irp_globals_entity_usage_computed();
2075 obstack_init(&env.obst);
2076 ir_nodehashmap_init(&env.adr_map);
2079 env.backward = NULL;
2080 env.curr_adr_id = 0;
2082 env.max_cfg_preds = 0;
2084 env.start_bl = get_irg_start_block(irg);
2085 env.end_bl = get_irg_end_block(irg);
2086 #ifdef DEBUG_libfirm
2087 env.id_2_address = NEW_ARR_F(ir_node *, 0);
2090 ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK | IR_RESOURCE_BLOCK_MARK);
2092 /* first step: allocate block entries. Note that some blocks might be
2093 unreachable here. Using the normal walk ensures that ALL blocks are initialized. */
2094 irg_walk_graph(irg, prepare_blocks, link_phis, NULL);
2096 /* produce an inverse post-order list for the CFG: this links only reachable
2098 irg_out_block_walk(get_irg_start_block(irg), NULL, inverse_post_order, NULL);
2100 if (! get_Block_mark(env.end_bl)) {
2102 * The end block is NOT reachable due to endless loops
2103 * or no_return calls.
2104 * Place the end block last.
2105 * env.backward points to the last block in the list for this purpose.
2107 env.backward->forward_next = get_block_entry(env.end_bl);
2109 set_Block_mark(env.end_bl, 1);
2112 /* second step: find and sort all memory ops */
2113 walk_memory_irg(irg, collect_memops, NULL, NULL);
2115 #ifdef DEBUG_libfirm
2116 /* check that the backward map is correct */
2117 assert((unsigned)ARR_LEN(env.id_2_address) == env.curr_adr_id);
2120 if (env.n_mem_ops == 0) {
2125 /* create the backward links. */
2126 env.backward = NULL;
2127 irg_block_walk_graph(irg, NULL, collect_backward, NULL);
2129 /* link the end block in */
2130 bl = get_block_entry(env.end_bl);
2131 bl->backward_next = env.backward;
2134 /* check that we really start with the start / end block */
2135 assert(env.forward->block == env.start_bl);
2136 assert(env.backward->block == env.end_bl);
2138 /* create address sets: for now, only the existing addresses are allowed plus one
2139 needed for the sentinel */
2140 env.rbs_size = env.curr_adr_id + 1;
2142 /* create the current set */
2143 env.curr_set = rbitset_obstack_alloc(&env.obst, env.rbs_size);
2144 rbitset_set(env.curr_set, env.rbs_size - 1);
2145 env.curr_id_2_memop = NEW_ARR_D(memop_t *, &env.obst, env.rbs_size);
2146 memset(env.curr_id_2_memop, 0, env.rbs_size * sizeof(env.curr_id_2_memop[0]));
2148 for (bl = env.forward; bl != NULL; bl = bl->forward_next) {
2149 /* set sentinel bits */
2150 bl->avail_out = rbitset_obstack_alloc(&env.obst, env.rbs_size);
2151 rbitset_set(bl->avail_out, env.rbs_size - 1);
2153 bl->id_2_memop_avail = NEW_ARR_D(memop_t *, &env.obst, env.rbs_size);
2154 memset(bl->id_2_memop_avail, 0, env.rbs_size * sizeof(bl->id_2_memop_avail[0]));
2156 bl->anticL_in = rbitset_obstack_alloc(&env.obst, env.rbs_size);
2157 rbitset_set(bl->anticL_in, env.rbs_size - 1);
2159 bl->id_2_memop_antic = NEW_ARR_D(memop_t *, &env.obst, env.rbs_size);
2160 memset(bl->id_2_memop_antic, 0, env.rbs_size * sizeof(bl->id_2_memop_antic[0]));
2163 (void) dump_block_list;
2168 insert_Loads_upwards();
2171 /* over all blocks in reverse post order */
2172 for (bl = env.forward; bl != NULL; bl = bl->forward_next) {
2173 do_replacements(bl);
2176 /* not only invalidate but free them. We might allocate new out arrays
2177 on our obstack which will be deleted yet. */
2178 confirm_irg_properties(irg, IR_GRAPH_PROPERTIES_CONTROL_FLOW);
2181 confirm_irg_properties(irg, IR_GRAPH_PROPERTIES_ALL);
2184 ir_free_resources(irg, IR_RESOURCE_IRN_LINK | IR_RESOURCE_BLOCK_MARK);
2185 ir_nodehashmap_destroy(&env.adr_map);
2186 obstack_free(&env.obst, NULL);
2188 #ifdef DEBUG_libfirm
2189 DEL_ARR_F(env.id_2_address);
2193 ir_graph_pass_t *opt_ldst_pass(const char *name)
2195 return def_graph_pass(name ? name : "ldst_df", opt_ldst);
2196 } /* opt_ldst_pass */