2 * Copyright (C) 1995-2011 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Load/Store optimizations.
23 * @author Michael Beck
29 #include "iroptimize.h"
31 #include "irgraph_t.h"
39 #include "dbginfo_t.h"
40 #include "iropt_dbg.h"
47 #include "irnodehashmap.h"
53 /** The debug handle. */
54 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
57 #define IMAX(a,b) ((a) > (b) ? (a) : (b))
59 #define MAX_PROJ IMAX(IMAX((long)pn_Load_max, (long)pn_Store_max), (long)pn_Call_max)
62 DF_CHANGED = 1, /**< data flow changed */
63 CF_CHANGED = 2, /**< control flow changed */
69 typedef struct walk_env_t {
70 struct obstack obst; /**< list of all stores */
71 unsigned changes; /**< a bitmask of graph changes */
74 /** A Load/Store info. */
75 typedef struct ldst_info_t {
76 ir_node *projs[MAX_PROJ+1]; /**< list of Proj's of this node */
77 ir_node *exc_block; /**< the exception block if available */
78 int exc_idx; /**< predecessor index in the exception block */
79 unsigned visited; /**< visited counter for breaking loops */
83 * flags for control flow.
86 BLOCK_HAS_COND = 1, /**< Block has conditional control flow */
87 BLOCK_HAS_EXC = 2 /**< Block has exceptional control flow */
93 typedef struct block_info_t {
94 unsigned flags; /**< flags for the block */
97 /** the master visited flag for loop detection. */
98 static unsigned master_visited = 0;
100 #define INC_MASTER() ++master_visited
101 #define MARK_NODE(info) (info)->visited = master_visited
102 #define NODE_VISITED(info) (info)->visited >= master_visited
105 * get the Load/Store info of a node
107 static ldst_info_t *get_ldst_info(ir_node *node, struct obstack *obst)
109 ldst_info_t *info = (ldst_info_t*)get_irn_link(node);
112 info = OALLOCZ(obst, ldst_info_t);
113 set_irn_link(node, info);
116 } /* get_ldst_info */
119 * get the Block info of a node
121 static block_info_t *get_block_info(ir_node *node, struct obstack *obst)
123 block_info_t *info = (block_info_t*)get_irn_link(node);
126 info = OALLOCZ(obst, block_info_t);
127 set_irn_link(node, info);
130 } /* get_block_info */
133 * update the projection info for a Load/Store
135 static unsigned update_projs(ldst_info_t *info, ir_node *proj)
137 long nr = get_Proj_proj(proj);
139 assert(0 <= nr && nr <= MAX_PROJ && "Wrong proj from LoadStore");
141 if (info->projs[nr]) {
142 /* there is already one, do CSE */
143 exchange(proj, info->projs[nr]);
147 info->projs[nr] = proj;
153 * update the exception block info for a Load/Store node.
155 * @param info the load/store info struct
156 * @param block the exception handler block for this load/store
157 * @param pos the control flow input of the block
159 static unsigned update_exc(ldst_info_t *info, ir_node *block, int pos)
161 assert(info->exc_block == NULL && "more than one exception block found");
163 info->exc_block = block;
169 * walker, collects all Load/Store/Proj nodes
171 * walks from Start -> End
173 static void collect_nodes(ir_node *node, void *env)
175 walk_env_t *wenv = (walk_env_t *)env;
176 unsigned opcode = get_irn_opcode(node);
177 ir_node *pred, *blk, *pred_blk;
178 ldst_info_t *ldst_info;
180 if (opcode == iro_Proj) {
181 pred = get_Proj_pred(node);
182 opcode = get_irn_opcode(pred);
184 if (opcode == iro_Load || opcode == iro_Store || opcode == iro_Call) {
185 ldst_info = get_ldst_info(pred, &wenv->obst);
187 wenv->changes |= update_projs(ldst_info, node);
190 * Place the Proj's to the same block as the
191 * predecessor Load. This is always ok and prevents
192 * "non-SSA" form after optimizations if the Proj
193 * is in a wrong block.
195 blk = get_nodes_block(node);
196 pred_blk = get_nodes_block(pred);
197 if (blk != pred_blk) {
198 wenv->changes |= DF_CHANGED;
199 set_nodes_block(node, pred_blk);
202 } else if (opcode == iro_Block) {
205 for (i = get_Block_n_cfgpreds(node) - 1; i >= 0; --i) {
206 ir_node *pred_block, *proj;
207 block_info_t *bl_info;
210 pred = proj = get_Block_cfgpred(node, i);
213 pred = get_Proj_pred(proj);
214 is_exc = is_x_except_Proj(proj);
217 /* ignore Bad predecessors, they will be removed later */
221 pred_block = get_nodes_block(pred);
222 bl_info = get_block_info(pred_block, &wenv->obst);
224 if (is_fragile_op(pred) && is_exc)
225 bl_info->flags |= BLOCK_HAS_EXC;
226 else if (is_irn_forking(pred))
227 bl_info->flags |= BLOCK_HAS_COND;
229 opcode = get_irn_opcode(pred);
230 if (is_exc && (opcode == iro_Load || opcode == iro_Store || opcode == iro_Call)) {
231 ldst_info = get_ldst_info(pred, &wenv->obst);
233 wenv->changes |= update_exc(ldst_info, node, i);
237 } /* collect_nodes */
240 * Returns an entity if the address ptr points to a constant one.
242 * @param ptr the address
244 * @return an entity or NULL
246 static ir_entity *find_constant_entity(ir_node *ptr)
249 if (is_SymConst(ptr) && get_SymConst_kind(ptr) == symconst_addr_ent) {
250 return get_SymConst_entity(ptr);
251 } else if (is_Sel(ptr)) {
252 ir_entity *ent = get_Sel_entity(ptr);
253 ir_type *tp = get_entity_owner(ent);
255 /* Do not fiddle with polymorphism. */
256 if (is_Class_type(get_entity_owner(ent)) &&
257 ((get_entity_n_overwrites(ent) != 0) ||
258 (get_entity_n_overwrittenby(ent) != 0) ) )
261 if (is_Array_type(tp)) {
265 for (i = 0, n = get_Sel_n_indexs(ptr); i < n; ++i) {
267 ir_tarval *tlower, *tupper;
268 ir_node *index = get_Sel_index(ptr, i);
269 ir_tarval *tv = computed_value(index);
271 /* check if the index is constant */
272 if (tv == tarval_bad)
275 bound = get_array_lower_bound(tp, i);
276 tlower = computed_value(bound);
277 bound = get_array_upper_bound(tp, i);
278 tupper = computed_value(bound);
280 if (tlower == tarval_bad || tupper == tarval_bad)
283 if (tarval_cmp(tv, tlower) == ir_relation_less)
285 if (tarval_cmp(tupper, tv) == ir_relation_less)
288 /* ok, bounds check finished */
292 if (get_entity_linkage(ent) & IR_LINKAGE_CONSTANT)
296 ptr = get_Sel_ptr(ptr);
297 } else if (is_Add(ptr)) {
298 ir_node *l = get_Add_left(ptr);
299 ir_node *r = get_Add_right(ptr);
301 if (get_irn_mode(l) == get_irn_mode(ptr) && is_Const(r))
303 else if (get_irn_mode(r) == get_irn_mode(ptr) && is_Const(l))
308 /* for now, we support only one addition, reassoc should fold all others */
309 if (! is_SymConst(ptr) && !is_Sel(ptr))
311 } else if (is_Sub(ptr)) {
312 ir_node *l = get_Sub_left(ptr);
313 ir_node *r = get_Sub_right(ptr);
315 if (get_irn_mode(l) == get_irn_mode(ptr) && is_Const(r))
319 /* for now, we support only one substraction, reassoc should fold all others */
320 if (! is_SymConst(ptr) && !is_Sel(ptr))
325 } /* find_constant_entity */
328 * Return the Selection index of a Sel node from dimension n
330 static long get_Sel_array_index_long(ir_node *n, int dim)
332 ir_node *index = get_Sel_index(n, dim);
333 assert(is_Const(index));
334 return get_tarval_long(get_Const_tarval(index));
335 } /* get_Sel_array_index_long */
337 typedef struct path_entry {
339 struct path_entry *next;
343 static ir_node *rec_find_compound_ent_value(ir_node *ptr, path_entry *next)
345 path_entry entry, *p;
346 ir_entity *ent, *field;
347 ir_initializer_t *initializer;
353 if (is_SymConst(ptr)) {
355 ent = get_SymConst_entity(ptr);
356 initializer = get_entity_initializer(ent);
357 for (p = next; p != NULL;) {
358 if (initializer->kind != IR_INITIALIZER_COMPOUND)
360 n = get_initializer_compound_n_entries(initializer);
361 tp = get_entity_type(ent);
363 if (is_Array_type(tp)) {
364 ent = get_array_element_entity(tp);
369 initializer = get_initializer_compound_value(initializer, 0);
375 initializer = get_initializer_compound_value(initializer, p->index);
380 tp = get_entity_type(ent);
381 while (is_Array_type(tp)) {
382 ent = get_array_element_entity(tp);
383 tp = get_entity_type(ent);
385 n = get_initializer_compound_n_entries(initializer);
388 initializer = get_initializer_compound_value(initializer, 0);
391 switch (initializer->kind) {
392 case IR_INITIALIZER_CONST:
393 return get_initializer_const_value(initializer);
394 case IR_INITIALIZER_TARVAL:
395 case IR_INITIALIZER_NULL:
399 } else if (is_Sel(ptr)) {
400 entry.ent = field = get_Sel_entity(ptr);
401 tp = get_entity_owner(field);
402 if (is_Array_type(tp)) {
403 assert(get_Sel_n_indexs(ptr) == 1 && "multi dim arrays not implemented");
404 entry.index = get_Sel_array_index_long(ptr, 0) - get_array_lower_bound_int(tp, 0);
406 size_t i, n_members = get_compound_n_members(tp);
407 for (i = 0; i < n_members; ++i) {
408 if (get_compound_member(tp, i) == field)
411 if (i >= n_members) {
412 /* not found: should NOT happen */
417 return rec_find_compound_ent_value(get_Sel_ptr(ptr), &entry);
418 } else if (is_Add(ptr)) {
423 ir_node *l = get_Add_left(ptr);
424 ir_node *r = get_Add_right(ptr);
427 tv = get_Const_tarval(r);
430 tv = get_Const_tarval(l);
434 mode = get_tarval_mode(tv);
436 /* ptr must be a Sel or a SymConst, this was checked in find_constant_entity() */
438 field = get_Sel_entity(ptr);
440 field = get_SymConst_entity(ptr);
443 /* count needed entries */
445 for (ent = field;;) {
446 tp = get_entity_type(ent);
447 if (! is_Array_type(tp))
449 ent = get_array_element_entity(tp);
452 /* should be at least ONE entry */
456 /* allocate the right number of entries */
457 NEW_ARR_A(path_entry, p, pos);
461 for (ent = field;;) {
463 ir_tarval *sz, *tv_index, *tlower, *tupper;
467 tp = get_entity_type(ent);
468 if (! is_Array_type(tp))
470 ent = get_array_element_entity(tp);
472 p[pos].next = &p[pos + 1];
474 size = get_type_size_bytes(get_entity_type(ent));
475 sz = new_tarval_from_long(size, mode);
477 tv_index = tarval_div(tv, sz);
478 tv = tarval_mod(tv, sz);
480 if (tv_index == tarval_bad || tv == tarval_bad)
483 assert(get_array_n_dimensions(tp) == 1 && "multiarrays not implemented");
484 bound = get_array_lower_bound(tp, 0);
485 tlower = computed_value(bound);
486 bound = get_array_upper_bound(tp, 0);
487 tupper = computed_value(bound);
489 if (tlower == tarval_bad || tupper == tarval_bad)
492 if (tarval_cmp(tv_index, tlower) == ir_relation_less)
494 if (tarval_cmp(tupper, tv_index) == ir_relation_less)
497 /* ok, bounds check finished */
498 index = get_tarval_long(tv_index);
499 p[pos].index = index;
502 if (! tarval_is_null(tv)) {
503 /* hmm, wrong access */
506 p[pos - 1].next = next;
507 return rec_find_compound_ent_value(ptr, p);
508 } else if (is_Sub(ptr)) {
509 ir_node *l = get_Sub_left(ptr);
510 ir_node *r = get_Sub_right(ptr);
513 tv = get_Const_tarval(r);
520 static ir_node *find_compound_ent_value(ir_node *ptr)
522 return rec_find_compound_ent_value(ptr, NULL);
526 static void reduce_adr_usage(ir_node *ptr);
529 * Update a Load that may have lost its users.
531 static void handle_load_update(ir_node *load)
533 ldst_info_t *info = (ldst_info_t*)get_irn_link(load);
535 /* do NOT touch volatile loads for now */
536 if (get_Load_volatility(load) == volatility_is_volatile)
539 if (! info->projs[pn_Load_res] && ! info->projs[pn_Load_X_except]) {
540 ir_node *ptr = get_Load_ptr(load);
541 ir_node *mem = get_Load_mem(load);
543 /* a Load whose value is neither used nor exception checked, remove it */
544 exchange(info->projs[pn_Load_M], mem);
545 if (info->projs[pn_Load_X_regular])
546 exchange(info->projs[pn_Load_X_regular], new_r_Jmp(get_nodes_block(load)));
548 reduce_adr_usage(ptr);
550 } /* handle_load_update */
553 * A use of an address node has vanished. Check if this was a Proj
554 * node and update the counters.
556 static void reduce_adr_usage(ir_node *ptr)
561 if (get_irn_n_edges(ptr) > 0)
564 /* this Proj is dead now */
565 pred = get_Proj_pred(ptr);
567 ldst_info_t *info = (ldst_info_t*)get_irn_link(pred);
568 info->projs[get_Proj_proj(ptr)] = NULL;
570 /* this node lost its result proj, handle that */
571 handle_load_update(pred);
573 } /* reduce_adr_usage */
576 * Check, if an already existing value of mode old_mode can be converted
577 * into the needed one new_mode without loss.
579 static int can_use_stored_value(ir_mode *old_mode, ir_mode *new_mode)
583 if (old_mode == new_mode)
586 old_size = get_mode_size_bits(old_mode);
587 new_size = get_mode_size_bits(new_mode);
589 /* if both modes are two-complement ones, we can always convert the
590 Stored value into the needed one. (on big endian machines we currently
591 only support this for modes of same size) */
592 if (old_size >= new_size &&
593 get_mode_arithmetic(old_mode) == irma_twos_complement &&
594 get_mode_arithmetic(new_mode) == irma_twos_complement &&
595 (!be_get_backend_param()->byte_order_big_endian
596 || old_size == new_size)) {
603 * Check whether a Call is at least pure, i.e. does only read memory.
605 static unsigned is_Call_pure(ir_node *call)
607 ir_type *call_tp = get_Call_type(call);
608 unsigned prop = get_method_additional_properties(call_tp);
610 /* check first the call type */
611 if ((prop & (mtp_property_const|mtp_property_pure)) == 0) {
612 /* try the called entity */
613 ir_node *ptr = get_Call_ptr(call);
615 if (is_SymConst_addr_ent(ptr)) {
616 ir_entity *ent = get_SymConst_entity(ptr);
618 prop = get_entity_additional_properties(ent);
621 return (prop & (mtp_property_const|mtp_property_pure)) != 0;
624 static ir_node *get_base_and_offset(ir_node *ptr, long *pOffset)
626 ir_mode *mode = get_irn_mode(ptr);
629 /* TODO: long might not be enough, we should probably use some tarval thingy... */
632 ir_node *l = get_Add_left(ptr);
633 ir_node *r = get_Add_right(ptr);
635 if (get_irn_mode(l) != mode || !is_Const(r))
638 offset += get_tarval_long(get_Const_tarval(r));
640 } else if (is_Sub(ptr)) {
641 ir_node *l = get_Sub_left(ptr);
642 ir_node *r = get_Sub_right(ptr);
644 if (get_irn_mode(l) != mode || !is_Const(r))
647 offset -= get_tarval_long(get_Const_tarval(r));
649 } else if (is_Sel(ptr)) {
650 ir_entity *ent = get_Sel_entity(ptr);
651 ir_type *tp = get_entity_owner(ent);
653 if (is_Array_type(tp)) {
657 /* only one dimensional arrays yet */
658 if (get_Sel_n_indexs(ptr) != 1)
660 index = get_Sel_index(ptr, 0);
661 if (! is_Const(index))
664 tp = get_entity_type(ent);
665 if (get_type_state(tp) != layout_fixed)
668 size = get_type_size_bytes(tp);
669 offset += size * get_tarval_long(get_Const_tarval(index));
671 if (get_type_state(tp) != layout_fixed)
673 offset += get_entity_offset(ent);
675 ptr = get_Sel_ptr(ptr);
684 static int try_load_after_store(ir_node *load,
685 ir_node *load_base_ptr, long load_offset, ir_node *store)
688 ir_node *store_ptr = get_Store_ptr(store);
690 ir_node *store_base_ptr = get_base_and_offset(store_ptr, &store_offset);
691 ir_node *store_value;
700 if (load_base_ptr != store_base_ptr)
703 load_mode = get_Load_mode(load);
704 load_mode_len = get_mode_size_bytes(load_mode);
705 store_mode = get_irn_mode(get_Store_value(store));
706 store_mode_len = get_mode_size_bytes(store_mode);
707 delta = load_offset - store_offset;
708 store_value = get_Store_value(store);
710 if (delta != 0 || store_mode != load_mode) {
711 /* TODO: implement for big-endian */
712 if (delta < 0 || delta + load_mode_len > store_mode_len
713 || (be_get_backend_param()->byte_order_big_endian
714 && load_mode_len != store_mode_len))
717 if (get_mode_arithmetic(store_mode) != irma_twos_complement ||
718 get_mode_arithmetic(load_mode) != irma_twos_complement)
722 /* produce a shift to adjust offset delta */
725 ir_graph *irg = get_irn_irg(load);
727 cnst = new_r_Const_long(irg, mode_Iu, delta * 8);
728 store_value = new_r_Shr(get_nodes_block(load),
729 store_value, cnst, store_mode);
732 /* add an convert if needed */
733 if (store_mode != load_mode) {
734 store_value = new_r_Conv(get_nodes_block(load), store_value, load_mode);
738 DBG_OPT_RAW(load, store_value);
740 info = (ldst_info_t*)get_irn_link(load);
741 if (info->projs[pn_Load_M])
742 exchange(info->projs[pn_Load_M], get_Load_mem(load));
746 if (info->projs[pn_Load_X_except]) {
747 ir_graph *irg = get_irn_irg(load);
748 exchange( info->projs[pn_Load_X_except], new_r_Bad(irg, mode_X));
751 if (info->projs[pn_Load_X_regular]) {
752 exchange( info->projs[pn_Load_X_regular], new_r_Jmp(get_nodes_block(load)));
756 if (info->projs[pn_Load_res])
757 exchange(info->projs[pn_Load_res], store_value);
759 load_ptr = get_Load_ptr(load);
761 reduce_adr_usage(load_ptr);
762 return res | DF_CHANGED;
766 * Follow the memory chain as long as there are only Loads,
767 * alias free Stores, and constant Calls and try to replace the
768 * current Load by a previous ones.
769 * Note that in unreachable loops it might happen that we reach
770 * load again, as well as we can fall into a cycle.
771 * We break such cycles using a special visited flag.
773 * INC_MASTER() must be called before dive into
775 static unsigned follow_Mem_chain(ir_node *load, ir_node *curr)
778 ldst_info_t *info = (ldst_info_t*)get_irn_link(load);
780 ir_node *ptr = get_Load_ptr(load);
781 ir_node *mem = get_Load_mem(load);
782 ir_mode *load_mode = get_Load_mode(load);
784 for (pred = curr; load != pred; ) {
785 ldst_info_t *pred_info = (ldst_info_t*)get_irn_link(pred);
788 * a Load immediately after a Store -- a read after write.
789 * We may remove the Load, if both Load & Store does not have an
790 * exception handler OR they are in the same Block. In the latter
791 * case the Load cannot throw an exception when the previous Store was
794 * Why we need to check for Store Exception? If the Store cannot
795 * be executed (ROM) the exception handler might simply jump into
797 * We could make it a little bit better if we would know that the
798 * exception handler of the Store jumps directly to the end...
800 if (is_Store(pred) && ((pred_info->projs[pn_Store_X_except] == NULL
801 && info->projs[pn_Load_X_except] == NULL)
802 || get_nodes_block(load) == get_nodes_block(pred)))
805 ir_node *base_ptr = get_base_and_offset(ptr, &load_offset);
806 int changes = try_load_after_store(load, base_ptr, load_offset, pred);
809 return res | changes;
810 } else if (is_Load(pred) && get_Load_ptr(pred) == ptr &&
811 can_use_stored_value(get_Load_mode(pred), load_mode)) {
813 * a Load after a Load -- a read after read.
814 * We may remove the second Load, if it does not have an exception
815 * handler OR they are in the same Block. In the later case
816 * the Load cannot throw an exception when the previous Load was
819 * Here, there is no need to check if the previous Load has an
820 * exception hander because they would have exact the same
823 * TODO: implement load-after-load with different mode for big
826 if (info->projs[pn_Load_X_except] == NULL
827 || get_nodes_block(load) == get_nodes_block(pred)) {
830 DBG_OPT_RAR(load, pred);
832 /* the result is used */
833 if (info->projs[pn_Load_res]) {
834 if (pred_info->projs[pn_Load_res] == NULL) {
835 /* create a new Proj again */
836 pred_info->projs[pn_Load_res] = new_r_Proj(pred, get_Load_mode(pred), pn_Load_res);
838 value = pred_info->projs[pn_Load_res];
840 /* add an convert if needed */
841 if (get_Load_mode(pred) != load_mode) {
842 value = new_r_Conv(get_nodes_block(load), value, load_mode);
845 exchange(info->projs[pn_Load_res], value);
848 if (info->projs[pn_Load_M])
849 exchange(info->projs[pn_Load_M], mem);
852 if (info->projs[pn_Load_X_except]) {
853 ir_graph *irg = get_irn_irg(load);
854 exchange(info->projs[pn_Load_X_except], new_r_Bad(irg, mode_X));
857 if (info->projs[pn_Load_X_regular]) {
858 exchange( info->projs[pn_Load_X_regular], new_r_Jmp(get_nodes_block(load)));
863 reduce_adr_usage(ptr);
864 return res |= DF_CHANGED;
868 if (is_Store(pred)) {
869 /* check if we can pass through this store */
870 ir_alias_relation rel = get_alias_relation(
872 get_irn_mode(get_Store_value(pred)),
874 /* if the might be an alias, we cannot pass this Store */
875 if (rel != ir_no_alias)
877 pred = skip_Proj(get_Store_mem(pred));
878 } else if (is_Load(pred)) {
879 pred = skip_Proj(get_Load_mem(pred));
880 } else if (is_Call(pred)) {
881 if (is_Call_pure(pred)) {
882 /* The called graph is at least pure, so there are no Store's
883 in it. We can handle it like a Load and skip it. */
884 pred = skip_Proj(get_Call_mem(pred));
886 /* there might be Store's in the graph, stop here */
890 /* follow only Load chains */
894 /* check for cycles */
895 if (NODE_VISITED(pred_info))
897 MARK_NODE(pred_info);
903 /* handle all Sync predecessors */
904 for (i = get_Sync_n_preds(pred) - 1; i >= 0; --i) {
905 res |= follow_Mem_chain(load, skip_Proj(get_Sync_pred(pred, i)));
912 } /* follow_Mem_chain */
915 * Check if we can replace the load by a given const from
916 * the const code irg.
918 ir_node *can_replace_load_by_const(const ir_node *load, ir_node *c)
920 ir_mode *c_mode = get_irn_mode(c);
921 ir_mode *l_mode = get_Load_mode(load);
922 ir_node *block = get_nodes_block(load);
923 dbg_info *dbgi = get_irn_dbg_info(load);
924 ir_node *res = copy_const_value(dbgi, c, block);
926 if (c_mode != l_mode) {
927 /* check, if the mode matches OR can be easily converted info */
928 if (is_reinterpret_cast(c_mode, l_mode)) {
929 /* copy the value from the const code irg and cast it */
930 res = new_rd_Conv(dbgi, block, res, l_mode);
941 * @param load the Load node
943 static unsigned optimize_load(ir_node *load)
945 ldst_info_t *info = (ldst_info_t*)get_irn_link(load);
946 ir_node *mem, *ptr, *value;
951 /* do NOT touch volatile loads for now */
952 if (get_Load_volatility(load) == volatility_is_volatile)
955 /* the address of the load to be optimized */
956 ptr = get_Load_ptr(load);
958 /* The mem of the Load. Must still be returned after optimization. */
959 mem = get_Load_mem(load);
961 if (info->projs[pn_Load_res] == NULL
962 && info->projs[pn_Load_X_except] == NULL) {
963 /* the value is never used and we don't care about exceptions, remove */
964 exchange(info->projs[pn_Load_M], mem);
966 if (info->projs[pn_Load_X_regular]) {
967 /* should not happen, but if it does, remove it */
968 exchange(info->projs[pn_Load_X_regular], new_r_Jmp(get_nodes_block(load)));
972 reduce_adr_usage(ptr);
973 return res | DF_CHANGED;
977 /* check if we can determine the entity that will be loaded */
978 ent = find_constant_entity(ptr);
980 && get_entity_visibility(ent) != ir_visibility_external) {
981 /* a static allocation that is not external: there should be NO
982 * exception when loading even if we cannot replace the load itself.
985 /* no exception, clear the info field as it might be checked later again */
986 if (info->projs[pn_Load_X_except]) {
987 ir_graph *irg = get_irn_irg(load);
988 exchange(info->projs[pn_Load_X_except], new_r_Bad(irg, mode_X));
989 info->projs[pn_Load_X_except] = NULL;
992 if (info->projs[pn_Load_X_regular]) {
993 exchange(info->projs[pn_Load_X_regular], new_r_Jmp(get_nodes_block(load)));
994 info->projs[pn_Load_X_regular] = NULL;
998 if (get_entity_linkage(ent) & IR_LINKAGE_CONSTANT) {
999 if (has_entity_initializer(ent)) {
1000 /* new style initializer */
1001 value = find_compound_ent_value(ptr);
1003 if (value != NULL) {
1004 ir_graph *irg = get_irn_irg(load);
1005 value = can_replace_load_by_const(load, value);
1006 if (value != NULL && is_Sel(ptr)) {
1007 /* frontend has inserted masking operations after bitfield accesses,
1008 * so we might have to shift the const. */
1009 unsigned char bit_offset = get_entity_offset_bits_remainder(get_Sel_entity(ptr));
1010 ir_tarval *tv_old = get_Const_tarval(value);
1011 ir_tarval *tv_offset = new_tarval_from_long(bit_offset, mode_Bu);
1012 ir_tarval *tv_new = tarval_shl(tv_old, tv_offset);
1013 value = new_r_Const(irg, tv_new);
1018 if (value != NULL) {
1019 /* we completely replace the load by this value */
1020 if (info->projs[pn_Load_X_except]) {
1021 ir_graph *irg = get_irn_irg(load);
1022 exchange(info->projs[pn_Load_X_except], new_r_Bad(irg, mode_X));
1023 info->projs[pn_Load_X_except] = NULL;
1026 if (info->projs[pn_Load_X_regular]) {
1027 exchange(info->projs[pn_Load_X_regular], new_r_Jmp(get_nodes_block(load)));
1028 info->projs[pn_Load_X_regular] = NULL;
1031 if (info->projs[pn_Load_M]) {
1032 exchange(info->projs[pn_Load_M], mem);
1035 if (info->projs[pn_Load_res]) {
1036 exchange(info->projs[pn_Load_res], value);
1040 reduce_adr_usage(ptr);
1044 /* Check, if the address of this load is used more than once.
1045 * If not, more load cannot be removed in any case. */
1046 if (get_irn_n_edges(ptr) <= 1 && get_irn_n_edges(get_base_and_offset(ptr, &dummy)) <= 1)
1050 * follow the memory chain as long as there are only Loads
1051 * and try to replace current Load or Store by a previous one.
1052 * Note that in unreachable loops it might happen that we reach
1053 * load again, as well as we can fall into a cycle.
1054 * We break such cycles using a special visited flag.
1057 res = follow_Mem_chain(load, skip_Proj(mem));
1059 } /* optimize_load */
1062 * Check whether a value of mode new_mode would completely overwrite a value
1063 * of mode old_mode in memory.
1065 static int is_completely_overwritten(ir_mode *old_mode, ir_mode *new_mode)
1067 return get_mode_size_bits(new_mode) >= get_mode_size_bits(old_mode);
1068 } /* is_completely_overwritten */
1071 * Check whether small is a part of large (starting at same address).
1073 static int is_partially_same(ir_node *small, ir_node *large)
1075 ir_mode *sm = get_irn_mode(small);
1076 ir_mode *lm = get_irn_mode(large);
1078 /* FIXME: Check endianness */
1079 return is_Conv(small) && get_Conv_op(small) == large
1080 && get_mode_size_bytes(sm) < get_mode_size_bytes(lm)
1081 && get_mode_arithmetic(sm) == irma_twos_complement
1082 && get_mode_arithmetic(lm) == irma_twos_complement;
1083 } /* is_partially_same */
1086 * follow the memory chain as long as there are only Loads and alias free Stores.
1088 * INC_MASTER() must be called before dive into
1090 static unsigned follow_Mem_chain_for_Store(ir_node *store, ir_node *curr)
1093 ldst_info_t *info = (ldst_info_t*)get_irn_link(store);
1095 ir_node *ptr = get_Store_ptr(store);
1096 ir_node *mem = get_Store_mem(store);
1097 ir_node *value = get_Store_value(store);
1098 ir_mode *mode = get_irn_mode(value);
1099 ir_node *block = get_nodes_block(store);
1101 for (pred = curr; pred != store;) {
1102 ldst_info_t *pred_info = (ldst_info_t*)get_irn_link(pred);
1105 * BEWARE: one might think that checking the modes is useless, because
1106 * if the pointers are identical, they refer to the same object.
1107 * This is only true in strong typed languages, not is C were the following
1108 * is possible *(ir_type1 *)p = a; *(ir_type2 *)p = b ...
1109 * However, if the size of the mode that is written is bigger or equal the
1110 * size of the old one, the old value is completely overwritten and can be
1113 if (is_Store(pred) && get_Store_ptr(pred) == ptr &&
1114 get_nodes_block(pred) == block) {
1116 * a Store after a Store in the same Block -- a write after write.
1120 * We may remove the first Store, if the old value is completely
1121 * overwritten or the old value is a part of the new value,
1122 * and if it does not have an exception handler.
1124 * TODO: What, if both have the same exception handler ???
1126 if (get_Store_volatility(pred) != volatility_is_volatile
1127 && !pred_info->projs[pn_Store_X_except]) {
1128 ir_node *predvalue = get_Store_value(pred);
1129 ir_mode *predmode = get_irn_mode(predvalue);
1131 if (is_completely_overwritten(predmode, mode)
1132 || is_partially_same(predvalue, value)) {
1133 DBG_OPT_WAW(pred, store);
1134 exchange(pred_info->projs[pn_Store_M], get_Store_mem(pred));
1136 reduce_adr_usage(ptr);
1142 * We may remove the Store, if the old value already contains
1143 * the new value, and if it does not have an exception handler.
1145 * TODO: What, if both have the same exception handler ???
1147 if (get_Store_volatility(store) != volatility_is_volatile
1148 && !info->projs[pn_Store_X_except]) {
1149 ir_node *predvalue = get_Store_value(pred);
1151 if (is_partially_same(value, predvalue)) {
1152 DBG_OPT_WAW(pred, store);
1153 exchange(info->projs[pn_Store_M], mem);
1155 reduce_adr_usage(ptr);
1159 } else if (is_Load(pred) && get_Load_ptr(pred) == ptr &&
1160 value == pred_info->projs[pn_Load_res]) {
1162 * a Store of a value just loaded from the same address
1163 * -- a write after read.
1164 * We may remove the Store, if it does not have an exception
1167 if (! info->projs[pn_Store_X_except]) {
1168 DBG_OPT_WAR(store, pred);
1169 exchange(info->projs[pn_Store_M], mem);
1171 reduce_adr_usage(ptr);
1176 if (is_Store(pred)) {
1177 /* check if we can pass through this store */
1178 ir_alias_relation rel = get_alias_relation(
1179 get_Store_ptr(pred),
1180 get_irn_mode(get_Store_value(pred)),
1182 /* if the might be an alias, we cannot pass this Store */
1183 if (rel != ir_no_alias)
1185 pred = skip_Proj(get_Store_mem(pred));
1186 } else if (is_Load(pred)) {
1187 ir_alias_relation rel = get_alias_relation(
1188 get_Load_ptr(pred), get_Load_mode(pred),
1190 if (rel != ir_no_alias)
1193 pred = skip_Proj(get_Load_mem(pred));
1195 /* follow only Load chains */
1199 /* check for cycles */
1200 if (NODE_VISITED(pred_info))
1202 MARK_NODE(pred_info);
1205 if (is_Sync(pred)) {
1208 /* handle all Sync predecessors */
1209 for (i = get_Sync_n_preds(pred) - 1; i >= 0; --i) {
1210 res |= follow_Mem_chain_for_Store(store, skip_Proj(get_Sync_pred(pred, i)));
1216 } /* follow_Mem_chain_for_Store */
1218 /** find entity used as base for an address calculation */
1219 static ir_entity *find_entity(ir_node *ptr)
1221 switch (get_irn_opcode(ptr)) {
1223 return get_SymConst_entity(ptr);
1225 ir_node *pred = get_Sel_ptr(ptr);
1226 if (get_irg_frame(get_irn_irg(ptr)) == pred)
1227 return get_Sel_entity(ptr);
1229 return find_entity(pred);
1233 ir_node *left = get_binop_left(ptr);
1235 if (mode_is_reference(get_irn_mode(left)))
1236 return find_entity(left);
1237 right = get_binop_right(ptr);
1238 if (mode_is_reference(get_irn_mode(right)))
1239 return find_entity(right);
1250 * @param store the Store node
1252 static unsigned optimize_store(ir_node *store)
1258 if (get_Store_volatility(store) == volatility_is_volatile)
1261 ptr = get_Store_ptr(store);
1262 entity = find_entity(ptr);
1264 /* a store to an entity which is never read is unnecessary */
1265 if (entity != NULL && !(get_entity_usage(entity) & ir_usage_read)) {
1266 ldst_info_t *info = (ldst_info_t*)get_irn_link(store);
1267 if (info->projs[pn_Store_X_except] == NULL) {
1268 DB((dbg, LEVEL_1, " Killing useless %+F to never read entity %+F\n", store, entity));
1269 exchange(info->projs[pn_Store_M], get_Store_mem(store));
1271 reduce_adr_usage(ptr);
1276 /* Check, if the address of this Store is used more than once.
1277 * If not, this Store cannot be removed in any case. */
1278 if (get_irn_n_edges(ptr) <= 1)
1281 mem = get_Store_mem(store);
1283 /* follow the memory chain as long as there are only Loads */
1286 return follow_Mem_chain_for_Store(store, skip_Proj(mem));
1287 } /* optimize_store */
1289 /* check if a node has more than one real user. Keepalive edges do not count as
1291 static bool has_multiple_users(const ir_node *node)
1293 unsigned real_users = 0;
1294 foreach_out_edge(node, edge) {
1295 ir_node *user = get_edge_src_irn(edge);
1306 * walker, optimizes Phi after Stores to identical places:
1307 * Does the following optimization:
1310 * val1 val2 val3 val1 val2 val3
1312 * Store Store Store \ | /
1319 * This reduces the number of stores and allows for predicated execution.
1320 * Moves Stores back to the end of a function which may be bad.
1322 * This is only possible if the predecessor blocks have only one successor.
1324 static unsigned optimize_phi(ir_node *phi, walk_env_t *wenv)
1327 ir_node *store, *ptr, *block, *phi_block, *phiM, *phiD, *exc, *projM;
1332 ir_node **inM, **inD, **projMs;
1334 dbg_info *db = NULL;
1336 block_info_t *bl_info;
1339 /* Must be a memory Phi */
1340 if (get_irn_mode(phi) != mode_M)
1343 n = get_Phi_n_preds(phi);
1347 /* must be only one user */
1348 projM = get_Phi_pred(phi, 0);
1349 if (has_multiple_users(projM))
1352 store = skip_Proj(projM);
1356 if (!is_Store(store))
1359 block = get_nodes_block(store);
1361 /* check if the block is post dominated by Phi-block
1362 and has no exception exit */
1363 bl_info = (block_info_t*)get_irn_link(block);
1364 if (bl_info->flags & BLOCK_HAS_EXC)
1367 phi_block = get_nodes_block(phi);
1368 if (! block_strictly_postdominates(phi_block, block))
1371 /* this is the address of the store */
1372 ptr = get_Store_ptr(store);
1373 mode = get_irn_mode(get_Store_value(store));
1374 info = (ldst_info_t*)get_irn_link(store);
1375 exc = info->exc_block;
1377 for (i = 1; i < n; ++i) {
1378 ir_node *pred = get_Phi_pred(phi, i);
1380 if (has_multiple_users(pred))
1383 pred = skip_Proj(pred);
1384 if (!is_Store(pred))
1387 if (ptr != get_Store_ptr(pred) || mode != get_irn_mode(get_Store_value(pred)))
1390 info = (ldst_info_t*)get_irn_link(pred);
1392 /* check, if all stores have the same exception flow */
1393 if (exc != info->exc_block)
1396 block = get_nodes_block(pred);
1398 /* check if the block is post dominated by Phi-block
1399 and has no exception exit. Note that block must be different from
1400 Phi-block, else we would move a Store from end End of a block to its
1402 bl_info = (block_info_t*)get_irn_link(block);
1403 if (bl_info->flags & BLOCK_HAS_EXC)
1405 if (block == phi_block || ! block_postdominates(phi_block, block))
1410 * ok, when we are here, we found all predecessors of a Phi that
1411 * are Stores to the same address and size. That means whatever
1412 * we do before we enter the block of the Phi, we do a Store.
1413 * So, we can move the Store to the current block:
1415 * val1 val2 val3 val1 val2 val3
1417 * | Str | | Str | | Str | \ | /
1423 * Is only allowed if the predecessor blocks have only one successor.
1426 NEW_ARR_A(ir_node *, projMs, n);
1427 NEW_ARR_A(ir_node *, inM, n);
1428 NEW_ARR_A(ir_node *, inD, n);
1429 NEW_ARR_A(int, idx, n);
1431 /* Prepare: Collect all Store nodes. We must do this
1432 first because we otherwise may loose a store when exchanging its
1435 for (i = n - 1; i >= 0; --i) {
1438 projMs[i] = get_Phi_pred(phi, i);
1439 assert(is_Proj(projMs[i]));
1441 store = get_Proj_pred(projMs[i]);
1442 info = (ldst_info_t*)get_irn_link(store);
1444 inM[i] = get_Store_mem(store);
1445 inD[i] = get_Store_value(store);
1446 idx[i] = info->exc_idx;
1448 block = get_nodes_block(phi);
1450 /* second step: create a new memory Phi */
1451 phiM = new_rd_Phi(get_irn_dbg_info(phi), block, n, inM, mode_M);
1453 /* third step: create a new data Phi */
1454 phiD = new_rd_Phi(get_irn_dbg_info(phi), block, n, inD, mode);
1456 /* rewire memory and kill the node */
1457 for (i = n - 1; i >= 0; --i) {
1458 ir_node *proj = projMs[i];
1460 if (is_Proj(proj)) {
1461 ir_node *store = get_Proj_pred(proj);
1462 exchange(proj, inM[i]);
1467 /* fourth step: create the Store */
1468 store = new_rd_Store(db, block, phiM, ptr, phiD, cons_none);
1470 co_set_irn_name(store, co_get_irn_ident(old_store));
1473 projM = new_rd_Proj(NULL, store, mode_M, pn_Store_M);
1475 info = get_ldst_info(store, &wenv->obst);
1476 info->projs[pn_Store_M] = projM;
1478 /* fifths step: repair exception flow */
1480 ir_node *projX = new_rd_Proj(NULL, store, mode_X, pn_Store_X_except);
1482 info->projs[pn_Store_X_except] = projX;
1483 info->exc_block = exc;
1484 info->exc_idx = idx[0];
1486 for (i = 0; i < n; ++i) {
1487 set_Block_cfgpred(exc, idx[i], projX);
1491 /* the exception block should be optimized as some inputs are identical now */
1497 /* sixth step: replace old Phi */
1498 exchange(phi, projM);
1500 return res | DF_CHANGED;
1501 } /* optimize_phi */
1503 static int optimize_conv_load(ir_node *conv)
1505 ir_node *op = get_Conv_op(conv);
1508 if (has_multiple_users(op))
1510 /* shrink mode of load if possible. */
1511 ir_node *load = get_Proj_pred(op);
1515 /* only do it if we are the only user (otherwise the risk is too
1516 * great that we end up with 2 loads instead of one). */
1517 ir_mode *mode = get_irn_mode(conv);
1518 ir_mode *load_mode = get_Load_mode(load);
1520 = get_mode_size_bits(load_mode) - get_mode_size_bits(mode);
1521 if (mode_is_float(load_mode) || mode_is_float(mode) || bits_diff < 0)
1524 if (be_get_backend_param()->byte_order_big_endian) {
1525 if (bits_diff % 8 != 0)
1527 ir_graph *irg = get_irn_irg(conv);
1528 ir_node *ptr = get_Load_ptr(load);
1529 ir_mode *mode = get_irn_mode(ptr);
1530 ir_node *delta = new_r_Const_long(irg, mode, bits_diff/8);
1531 ir_node *block = get_nodes_block(load);
1532 ir_node *add = new_r_Add(block, ptr, delta, mode);
1533 set_Load_ptr(load, add);
1535 set_Load_mode(load, mode);
1536 set_irn_mode(op, mode);
1542 * walker, do the optimizations
1544 static void do_load_store_optimize(ir_node *n, void *env)
1546 walk_env_t *wenv = (walk_env_t*)env;
1548 switch (get_irn_opcode(n)) {
1551 wenv->changes |= optimize_load(n);
1555 wenv->changes |= optimize_store(n);
1559 wenv->changes |= optimize_phi(n, wenv);
1563 wenv->changes |= optimize_conv_load(n);
1569 } /* do_load_store_optimize */
1572 typedef struct scc {
1573 ir_node *head; /**< the head of the list */
1576 /** A node entry. */
1577 typedef struct node_entry {
1578 unsigned DFSnum; /**< the DFS number of this node */
1579 unsigned low; /**< the low number of this node */
1580 int in_stack; /**< flag, set if the node is on the stack */
1581 ir_node *next; /**< link to the next node the the same scc */
1582 scc *pscc; /**< the scc of this node */
1583 unsigned POnum; /**< the post order number for blocks */
1586 /** A loop entry. */
1587 typedef struct loop_env {
1588 ir_nodehashmap_t map;
1589 struct obstack obst;
1590 ir_node **stack; /**< the node stack */
1591 size_t tos; /**< tos index */
1592 unsigned nextDFSnum; /**< the current DFS number */
1593 unsigned POnum; /**< current post order number */
1595 unsigned changes; /**< a bitmask of graph changes */
1599 * Gets the node_entry of a node
1601 static node_entry *get_irn_ne(ir_node *irn, loop_env *env)
1603 node_entry *e = ir_nodehashmap_get(node_entry, &env->map, irn);
1606 e = OALLOC(&env->obst, node_entry);
1607 memset(e, 0, sizeof(*e));
1608 ir_nodehashmap_insert(&env->map, irn, e);
1614 * Push a node onto the stack.
1616 * @param env the loop environment
1617 * @param n the node to push
1619 static void push(loop_env *env, ir_node *n)
1623 if (env->tos == ARR_LEN(env->stack)) {
1624 size_t nlen = ARR_LEN(env->stack) * 2;
1625 ARR_RESIZE(ir_node *, env->stack, nlen);
1627 env->stack[env->tos++] = n;
1628 e = get_irn_ne(n, env);
1633 * pop a node from the stack
1635 * @param env the loop environment
1637 * @return The topmost node
1639 static ir_node *pop(loop_env *env)
1641 ir_node *n = env->stack[--env->tos];
1642 node_entry *e = get_irn_ne(n, env);
1649 * Check if irn is a region constant.
1650 * The block or irn must strictly dominate the header block.
1652 * @param irn the node to check
1653 * @param header_block the header block of the induction variable
1655 static int is_rc(ir_node *irn, ir_node *header_block)
1657 ir_node *block = get_nodes_block(irn);
1659 return (block != header_block) && block_dominates(block, header_block);
1662 typedef struct phi_entry phi_entry;
1664 ir_node *phi; /**< A phi with a region const memory. */
1665 int pos; /**< The position of the region const memory */
1666 ir_node *load; /**< the newly created load for this phi */
1671 * An entry in the avail set.
1673 typedef struct avail_entry_t {
1674 ir_node *ptr; /**< the address pointer */
1675 ir_mode *mode; /**< the load mode */
1676 ir_node *load; /**< the associated Load */
1680 * Compare two avail entries.
1682 static int cmp_avail_entry(const void *elt, const void *key, size_t size)
1684 const avail_entry_t *a = (const avail_entry_t*)elt;
1685 const avail_entry_t *b = (const avail_entry_t*)key;
1688 return a->ptr != b->ptr || a->mode != b->mode;
1689 } /* cmp_avail_entry */
1692 * Calculate the hash value of an avail entry.
1694 static unsigned hash_cache_entry(const avail_entry_t *entry)
1696 return get_irn_idx(entry->ptr) * 9 + hash_ptr(entry->mode);
1697 } /* hash_cache_entry */
1700 * Move loops out of loops if possible.
1702 * @param pscc the loop described by an SCC
1703 * @param env the loop environment
1705 static void move_loads_out_of_loops(scc *pscc, loop_env *env)
1707 ir_node *phi, *load, *next, *other, *next_other;
1709 phi_entry *phi_list = NULL;
1712 /* collect all outer memories */
1713 for (phi = pscc->head; phi != NULL; phi = next) {
1714 node_entry *ne = get_irn_ne(phi, env);
1717 /* check all memory Phi's */
1721 assert(get_irn_mode(phi) == mode_M && "DFS return non-memory Phi");
1723 for (j = get_irn_arity(phi) - 1; j >= 0; --j) {
1724 ir_node *pred = get_irn_n(phi, j);
1725 node_entry *pe = get_irn_ne(pred, env);
1727 if (pe->pscc != ne->pscc) {
1728 /* not in the same SCC, is region const */
1729 phi_entry *pe = OALLOC(&env->obst, phi_entry);
1733 pe->next = phi_list;
1738 /* no Phis no fun */
1739 assert(phi_list != NULL && "DFS found a loop without Phi");
1741 /* for now, we cannot handle more than one input (only reducible cf) */
1742 if (phi_list->next != NULL)
1745 avail = new_set(cmp_avail_entry, 8);
1747 for (load = pscc->head; load; load = next) {
1749 node_entry *ne = get_irn_ne(load, env);
1752 if (is_Load(load)) {
1753 ldst_info_t *info = (ldst_info_t*)get_irn_link(load);
1754 ir_node *ptr = get_Load_ptr(load);
1756 /* for now, we cannot handle Loads with exceptions */
1757 if (info->projs[pn_Load_res] == NULL || info->projs[pn_Load_X_regular] != NULL || info->projs[pn_Load_X_except] != NULL)
1760 /* for now, we can only move Load(Global) */
1761 if (! is_SymConst_addr_ent(ptr))
1763 load_mode = get_Load_mode(load);
1764 for (other = pscc->head; other != NULL; other = next_other) {
1765 node_entry *ne = get_irn_ne(other, env);
1766 next_other = ne->next;
1768 if (is_Store(other)) {
1769 ir_alias_relation rel = get_alias_relation(
1770 get_Store_ptr(other),
1771 get_irn_mode(get_Store_value(other)),
1773 /* if the might be an alias, we cannot pass this Store */
1774 if (rel != ir_no_alias)
1777 /* only Phis and pure Calls are allowed here, so ignore them */
1779 if (other == NULL) {
1780 ldst_info_t *ninfo = NULL;
1784 /* yep, no aliasing Store found, Load can be moved */
1785 DB((dbg, LEVEL_1, " Found a Load that could be moved: %+F\n", load));
1787 db = get_irn_dbg_info(load);
1788 for (pe = phi_list; pe != NULL; pe = pe->next) {
1790 ir_node *phi = pe->phi;
1791 ir_node *blk = get_nodes_block(phi);
1792 ir_node *pred = get_Block_cfgpred_block(blk, pos);
1794 avail_entry_t entry, *res;
1797 entry.mode = load_mode;
1798 res = set_find(avail_entry_t, avail, &entry, sizeof(entry), hash_cache_entry(&entry));
1802 irn = new_rd_Load(db, pred, get_Phi_pred(phi, pos), ptr, load_mode, cons_none);
1804 (void)set_insert(avail_entry_t, avail, &entry, sizeof(entry), hash_cache_entry(&entry));
1805 DB((dbg, LEVEL_1, " Created %+F in %+F\n", irn, pred));
1808 ninfo = get_ldst_info(irn, &env->obst);
1810 ninfo->projs[pn_Load_M] = mem = new_r_Proj(irn, mode_M, pn_Load_M);
1812 /* irn is from cache, so do not set phi pred again.
1813 * There might be other Loads between phi and irn already.
1815 set_Phi_pred(phi, pos, mem);
1818 ninfo->projs[pn_Load_res] = new_r_Proj(irn, load_mode, pn_Load_res);
1821 /* now kill the old Load */
1822 exchange(info->projs[pn_Load_M], get_Load_mem(load));
1823 exchange(info->projs[pn_Load_res], ninfo->projs[pn_Load_res]);
1825 env->changes |= DF_CHANGED;
1830 } /* move_loads_out_of_loops */
1833 * Process a loop SCC.
1835 * @param pscc the SCC
1836 * @param env the loop environment
1838 static void process_loop(scc *pscc, loop_env *env)
1840 ir_node *irn, *next, *header = NULL;
1841 node_entry *b, *h = NULL;
1842 int j, only_phi, num_outside, process = 0;
1845 /* find the header block for this scc */
1846 for (irn = pscc->head; irn; irn = next) {
1847 node_entry *e = get_irn_ne(irn, env);
1848 ir_node *block = get_nodes_block(irn);
1851 b = get_irn_ne(block, env);
1853 if (header != NULL) {
1854 if (h->POnum < b->POnum) {
1864 /* check if this scc contains only Phi, Loads or Stores nodes */
1868 for (irn = pscc->head; irn; irn = next) {
1869 node_entry *e = get_irn_ne(irn, env);
1872 switch (get_irn_opcode(irn)) {
1874 if (is_Call_pure(irn)) {
1875 /* pure calls can be treated like loads */
1879 /* non-pure calls must be handle like may-alias Stores */
1882 /* cannot handle CopyB yet */
1886 if (get_Load_volatility(irn) == volatility_is_volatile) {
1887 /* cannot handle loops with volatile Loads */
1893 if (get_Store_volatility(irn) == volatility_is_volatile) {
1894 /* cannot handle loops with volatile Stores */
1903 for (j = get_irn_arity(irn) - 1; j >= 0; --j) {
1904 ir_node *pred = get_irn_n(irn, j);
1905 node_entry *pe = get_irn_ne(pred, env);
1907 if (pe->pscc != e->pscc) {
1908 /* not in the same SCC, must be a region const */
1909 if (! is_rc(pred, header)) {
1910 /* not a memory loop */
1913 if (out_rc == NULL) {
1914 /* first region constant */
1917 } else if (out_rc != pred) {
1918 /* another region constant */
1929 /* found a memory loop */
1930 DB((dbg, LEVEL_2, " Found a memory loop:\n "));
1931 if (only_phi && num_outside == 1) {
1932 /* a phi cycle with only one real predecessor can be collapsed */
1933 DB((dbg, LEVEL_2, " Found an USELESS Phi cycle:\n "));
1935 for (irn = pscc->head; irn; irn = next) {
1936 node_entry *e = get_irn_ne(irn, env);
1938 exchange(irn, out_rc);
1940 env->changes |= DF_CHANGED;
1944 #ifdef DEBUG_libfirm
1945 for (irn = pscc->head; irn; irn = next) {
1946 node_entry *e = get_irn_ne(irn, env);
1948 DB((dbg, LEVEL_2, " %+F,", irn));
1950 DB((dbg, LEVEL_2, "\n"));
1952 move_loads_out_of_loops(pscc, env);
1956 } /* process_loop */
1961 * @param pscc the SCC
1962 * @param env the loop environment
1964 static void process_scc(scc *pscc, loop_env *env)
1966 ir_node *head = pscc->head;
1967 node_entry *e = get_irn_ne(head, env);
1969 #ifdef DEBUG_libfirm
1971 ir_node *irn, *next;
1973 DB((dbg, LEVEL_4, " SCC at %p:\n ", pscc));
1974 for (irn = pscc->head; irn; irn = next) {
1975 node_entry *e = get_irn_ne(irn, env);
1979 DB((dbg, LEVEL_4, " %+F,", irn));
1981 DB((dbg, LEVEL_4, "\n"));
1985 if (e->next != NULL) {
1986 /* this SCC has more than one member */
1987 process_loop(pscc, env);
1992 * Do Tarjan's SCC algorithm and drive load/store optimization.
1994 * @param irn start at this node
1995 * @param env the loop environment
1997 static void dfs(ir_node *irn, loop_env *env)
2000 node_entry *node = get_irn_ne(irn, env);
2002 mark_irn_visited(irn);
2004 node->DFSnum = env->nextDFSnum++;
2005 node->low = node->DFSnum;
2009 if (is_Phi(irn) || is_Sync(irn)) {
2010 n = get_irn_arity(irn);
2011 for (i = 0; i < n; ++i) {
2012 ir_node *pred = get_irn_n(irn, i);
2013 node_entry *o = get_irn_ne(pred, env);
2015 if (!irn_visited(pred)) {
2017 node->low = MIN(node->low, o->low);
2019 if (o->DFSnum < node->DFSnum && o->in_stack)
2020 node->low = MIN(o->DFSnum, node->low);
2022 } else if (is_fragile_op(irn)) {
2023 ir_node *pred = get_memop_mem(irn);
2024 node_entry *o = get_irn_ne(pred, env);
2026 if (!irn_visited(pred)) {
2028 node->low = MIN(node->low, o->low);
2030 if (o->DFSnum < node->DFSnum && o->in_stack)
2031 node->low = MIN(o->DFSnum, node->low);
2032 } else if (is_Proj(irn)) {
2033 ir_node *pred = get_Proj_pred(irn);
2034 node_entry *o = get_irn_ne(pred, env);
2036 if (!irn_visited(pred)) {
2038 node->low = MIN(node->low, o->low);
2040 if (o->DFSnum < node->DFSnum && o->in_stack)
2041 node->low = MIN(o->DFSnum, node->low);
2044 /* IGNORE predecessors */
2047 if (node->low == node->DFSnum) {
2048 scc *pscc = OALLOC(&env->obst, scc);
2056 e = get_irn_ne(x, env);
2058 e->next = pscc->head;
2062 process_scc(pscc, env);
2067 * Do the DFS on the memory edges a graph.
2069 * @param irg the graph to process
2070 * @param env the loop environment
2072 static void do_dfs(ir_graph *irg, loop_env *env)
2074 ir_node *endblk, *end;
2077 inc_irg_visited(irg);
2079 /* visit all memory nodes */
2080 endblk = get_irg_end_block(irg);
2081 for (i = get_Block_n_cfgpreds(endblk) - 1; i >= 0; --i) {
2082 ir_node *pred = get_Block_cfgpred(endblk, i);
2084 pred = skip_Proj(pred);
2085 if (is_Return(pred)) {
2086 dfs(get_Return_mem(pred), env);
2087 } else if (is_Raise(pred)) {
2088 dfs(get_Raise_mem(pred), env);
2089 } else if (is_fragile_op(pred)) {
2090 dfs(get_memop_mem(pred), env);
2091 } else if (is_Bad(pred)) {
2092 /* ignore non-optimized block predecessor */
2094 assert(0 && "Unknown EndBlock predecessor");
2098 /* visit the keep-alives */
2099 end = get_irg_end(irg);
2100 for (i = get_End_n_keepalives(end) - 1; i >= 0; --i) {
2101 ir_node *ka = get_End_keepalive(end, i);
2103 if (is_Phi(ka) && !irn_visited(ka))
2109 * Optimize Loads/Stores in loops.
2111 * @param irg the graph
2113 static int optimize_loops(ir_graph *irg)
2117 env.stack = NEW_ARR_F(ir_node *, 128);
2122 ir_nodehashmap_init(&env.map);
2123 obstack_init(&env.obst);
2125 /* calculate the SCC's and drive loop optimization. */
2128 DEL_ARR_F(env.stack);
2129 obstack_free(&env.obst, NULL);
2130 ir_nodehashmap_destroy(&env.map);
2133 } /* optimize_loops */
2136 * do the load store optimization
2138 void optimize_load_store(ir_graph *irg)
2142 assure_irg_properties(irg,
2143 IR_GRAPH_PROPERTY_NO_UNREACHABLE_CODE
2144 | IR_GRAPH_PROPERTY_CONSISTENT_OUT_EDGES
2145 | IR_GRAPH_PROPERTY_NO_CRITICAL_EDGES
2146 | IR_GRAPH_PROPERTY_CONSISTENT_DOMINANCE
2147 | IR_GRAPH_PROPERTY_CONSISTENT_ENTITY_USAGE);
2149 FIRM_DBG_REGISTER(dbg, "firm.opt.ldstopt");
2151 assert(get_irg_phase_state(irg) != phase_building);
2152 assert(get_irg_pinned(irg) != op_pin_state_floats &&
2153 "LoadStore optimization needs pinned graph");
2155 if (get_opt_alias_analysis()) {
2156 assure_irp_globals_entity_usage_computed();
2159 obstack_init(&env.obst);
2162 /* init the links, then collect Loads/Stores/Proj's in lists */
2164 irg_walk_graph(irg, firm_clear_link, collect_nodes, &env);
2166 /* now we have collected enough information, optimize */
2167 irg_walk_graph(irg, NULL, do_load_store_optimize, &env);
2169 env.changes |= optimize_loops(irg);
2171 obstack_free(&env.obst, NULL);
2173 confirm_irg_properties(irg,
2175 ? env.changes & CF_CHANGED
2176 ? IR_GRAPH_PROPERTIES_NONE
2177 : IR_GRAPH_PROPERTIES_CONTROL_FLOW
2178 : IR_GRAPH_PROPERTIES_ALL);
2181 ir_graph_pass_t *optimize_load_store_pass(const char *name)
2183 return def_graph_pass(name ? name : "ldst", optimize_load_store);
2184 } /* optimize_load_store_pass */