2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Load/Store optimizations.
23 * @author Michael Beck
33 #include "iroptimize.h"
35 #include "irgraph_t.h"
43 #include "dbginfo_t.h"
44 #include "iropt_dbg.h"
50 #include "opt_polymorphy.h"
53 #include "irphase_t.h"
57 /** The debug handle. */
58 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
61 #include "cacheopt/cachesim.h"
65 #define IMAX(a,b) ((a) > (b) ? (a) : (b))
67 #define MAX_PROJ IMAX(IMAX(pn_Load_max, pn_Store_max), pn_Call_max)
70 DF_CHANGED = 1, /**< data flow changed */
71 CF_CHANGED = 2, /**< control flow changed */
77 typedef struct _walk_env_t {
78 struct obstack obst; /**< list of all stores */
79 unsigned changes; /**< a bitmask of graph changes */
82 /** A Load/Store info. */
83 typedef struct _ldst_info_t {
84 ir_node *projs[MAX_PROJ]; /**< list of Proj's of this node */
85 ir_node *exc_block; /**< the exception block if available */
86 int exc_idx; /**< predecessor index in the exception block */
87 unsigned visited; /**< visited counter for breaking loops */
91 * flags for control flow.
94 BLOCK_HAS_COND = 1, /**< Block has conditional control flow */
95 BLOCK_HAS_EXC = 2 /**< Block has exceptional control flow */
101 typedef struct _block_info_t {
102 unsigned flags; /**< flags for the block */
105 /** the master visited flag for loop detection. */
106 static unsigned master_visited = 0;
108 #define INC_MASTER() ++master_visited
109 #define MARK_NODE(info) (info)->visited = master_visited
110 #define NODE_VISITED(info) (info)->visited >= master_visited
113 * get the Load/Store info of a node
115 static ldst_info_t *get_ldst_info(ir_node *node, struct obstack *obst) {
116 ldst_info_t *info = get_irn_link(node);
119 info = obstack_alloc(obst, sizeof(*info));
120 memset(info, 0, sizeof(*info));
121 set_irn_link(node, info);
124 } /* get_ldst_info */
127 * get the Block info of a node
129 static block_info_t *get_block_info(ir_node *node, struct obstack *obst) {
130 block_info_t *info = get_irn_link(node);
133 info = obstack_alloc(obst, sizeof(*info));
134 memset(info, 0, sizeof(*info));
135 set_irn_link(node, info);
138 } /* get_block_info */
141 * update the projection info for a Load/Store
143 static unsigned update_projs(ldst_info_t *info, ir_node *proj)
145 long nr = get_Proj_proj(proj);
147 assert(0 <= nr && nr <= MAX_PROJ && "Wrong proj from LoadStore");
149 if (info->projs[nr]) {
150 /* there is already one, do CSE */
151 exchange(proj, info->projs[nr]);
155 info->projs[nr] = proj;
161 * update the exception block info for a Load/Store node.
163 * @param info the load/store info struct
164 * @param block the exception handler block for this load/store
165 * @param pos the control flow input of the block
167 static unsigned update_exc(ldst_info_t *info, ir_node *block, int pos)
169 assert(info->exc_block == NULL && "more than one exception block found");
171 info->exc_block = block;
176 /** Return the number of uses of an address node */
177 #define get_irn_n_uses(adr) get_irn_n_edges(adr)
180 * walker, collects all Load/Store/Proj nodes
182 * walks from Start -> End
184 static void collect_nodes(ir_node *node, void *env)
186 ir_opcode opcode = get_irn_opcode(node);
187 ir_node *pred, *blk, *pred_blk;
188 ldst_info_t *ldst_info;
189 walk_env_t *wenv = env;
191 if (opcode == iro_Proj) {
192 pred = get_Proj_pred(node);
193 opcode = get_irn_opcode(pred);
195 if (opcode == iro_Load || opcode == iro_Store || opcode == iro_Call) {
196 ldst_info = get_ldst_info(pred, &wenv->obst);
198 wenv->changes |= update_projs(ldst_info, node);
201 * Place the Proj's to the same block as the
202 * predecessor Load. This is always ok and prevents
203 * "non-SSA" form after optimizations if the Proj
204 * is in a wrong block.
206 blk = get_nodes_block(node);
207 pred_blk = get_nodes_block(pred);
208 if (blk != pred_blk) {
209 wenv->changes |= DF_CHANGED;
210 set_nodes_block(node, pred_blk);
213 } else if (opcode == iro_Block) {
216 for (i = get_Block_n_cfgpreds(node) - 1; i >= 0; --i) {
217 ir_node *pred_block, *proj;
218 block_info_t *bl_info;
221 pred = proj = get_Block_cfgpred(node, i);
224 pred = get_Proj_pred(proj);
225 is_exc = get_Proj_proj(proj) == pn_Generic_X_except;
228 /* ignore Bad predecessors, they will be removed later */
232 pred_block = get_nodes_block(pred);
233 bl_info = get_block_info(pred_block, &wenv->obst);
235 if (is_fragile_op(pred) && is_exc)
236 bl_info->flags |= BLOCK_HAS_EXC;
237 else if (is_irn_forking(pred))
238 bl_info->flags |= BLOCK_HAS_COND;
240 opcode = get_irn_opcode(pred);
241 if (is_exc && (opcode == iro_Load || opcode == iro_Store || opcode == iro_Call)) {
242 ldst_info = get_ldst_info(pred, &wenv->obst);
244 wenv->changes |= update_exc(ldst_info, node, i);
248 } /* collect_nodes */
251 * Returns an entity if the address ptr points to a constant one.
253 * @param ptr the address
255 * @return an entity or NULL
257 static ir_entity *find_constant_entity(ir_node *ptr)
260 if (is_SymConst(ptr) && get_SymConst_kind(ptr) == symconst_addr_ent) {
261 ir_entity *ent = get_SymConst_entity(ptr);
262 if (variability_constant == get_entity_variability(ent))
265 } else if (is_Sel(ptr)) {
266 ir_entity *ent = get_Sel_entity(ptr);
267 ir_type *tp = get_entity_owner(ent);
269 /* Do not fiddle with polymorphism. */
270 if (is_Class_type(get_entity_owner(ent)) &&
271 ((get_entity_n_overwrites(ent) != 0) ||
272 (get_entity_n_overwrittenby(ent) != 0) ) )
275 if (is_Array_type(tp)) {
279 for (i = 0, n = get_Sel_n_indexs(ptr); i < n; ++i) {
281 tarval *tlower, *tupper;
282 ir_node *index = get_Sel_index(ptr, i);
283 tarval *tv = computed_value(index);
285 /* check if the index is constant */
286 if (tv == tarval_bad)
289 bound = get_array_lower_bound(tp, i);
290 tlower = computed_value(bound);
291 bound = get_array_upper_bound(tp, i);
292 tupper = computed_value(bound);
294 if (tlower == tarval_bad || tupper == tarval_bad)
297 if (tarval_cmp(tv, tlower) & pn_Cmp_Lt)
299 if (tarval_cmp(tupper, tv) & pn_Cmp_Lt)
302 /* ok, bounds check finished */
306 if (variability_constant == get_entity_variability(ent))
310 ptr = get_Sel_ptr(ptr);
311 } else if (is_Add(ptr)) {
312 ir_node *l = get_Add_left(ptr);
313 ir_node *r = get_Add_right(ptr);
315 if (get_irn_mode(l) == get_irn_mode(ptr) && is_Const(r))
317 else if (get_irn_mode(r) == get_irn_mode(ptr) && is_Const(l))
322 /* for now, we support only one addition, reassoc should fold all others */
323 if (! is_SymConst(ptr) && !is_Sel(ptr))
325 } else if (is_Sub(ptr)) {
326 ir_node *l = get_Sub_left(ptr);
327 ir_node *r = get_Sub_right(ptr);
329 if (get_irn_mode(l) == get_irn_mode(ptr) && is_Const(r))
333 /* for now, we support only one substraction, reassoc should fold all others */
334 if (! is_SymConst(ptr) && !is_Sel(ptr))
339 } /* find_constant_entity */
342 * Return the Selection index of a Sel node from dimension n
344 static long get_Sel_array_index_long(ir_node *n, int dim) {
345 ir_node *index = get_Sel_index(n, dim);
346 assert(is_Const(index));
347 return get_tarval_long(get_Const_tarval(index));
348 } /* get_Sel_array_index_long */
351 * Returns the accessed component graph path for an
352 * node computing an address.
354 * @param ptr the node computing the address
355 * @param depth current depth in steps upward from the root
358 static compound_graph_path *rec_get_accessed_path(ir_node *ptr, int depth) {
359 compound_graph_path *res = NULL;
360 ir_entity *root, *field, *ent;
361 int path_len, pos, idx;
365 if (is_SymConst(ptr)) {
366 /* a SymConst. If the depth is 0, this is an access to a global
367 * entity and we don't need a component path, else we know
368 * at least it's length.
370 assert(get_SymConst_kind(ptr) == symconst_addr_ent);
371 root = get_SymConst_entity(ptr);
372 res = (depth == 0) ? NULL : new_compound_graph_path(get_entity_type(root), depth);
373 } else if (is_Sel(ptr)) {
374 /* it's a Sel, go up until we find the root */
375 res = rec_get_accessed_path(get_Sel_ptr(ptr), depth+1);
379 /* fill up the step in the path at the current position */
380 field = get_Sel_entity(ptr);
381 path_len = get_compound_graph_path_length(res);
382 pos = path_len - depth - 1;
383 set_compound_graph_path_node(res, pos, field);
385 if (is_Array_type(get_entity_owner(field))) {
386 assert(get_Sel_n_indexs(ptr) == 1 && "multi dim arrays not implemented");
387 set_compound_graph_path_array_index(res, pos, get_Sel_array_index_long(ptr, 0));
389 } else if (is_Add(ptr)) {
390 ir_node *l = get_Add_left(ptr);
391 ir_node *r = get_Add_right(ptr);
396 tv = get_Const_tarval(r);
399 tv = get_Const_tarval(l);
402 mode = get_tarval_mode(tv);
404 /* ptr must be a Sel or a SymConst, this was checked in find_constant_entity() */
406 field = get_Sel_entity(ptr);
408 field = get_SymConst_entity(ptr);
411 for (ent = field;;) {
413 tarval *sz, *tv_index, *tlower, *tupper;
416 tp = get_entity_type(ent);
417 if (! is_Array_type(tp))
419 ent = get_array_element_entity(tp);
420 size = get_type_size_bytes(get_entity_type(ent));
421 sz = new_tarval_from_long(size, mode);
423 tv_index = tarval_div(tv, sz);
424 tv = tarval_mod(tv, sz);
426 if (tv_index == tarval_bad || tv == tarval_bad)
429 assert(get_array_n_dimensions(tp) == 1 && "multiarrays not implemented");
430 bound = get_array_lower_bound(tp, 0);
431 tlower = computed_value(bound);
432 bound = get_array_upper_bound(tp, 0);
433 tupper = computed_value(bound);
435 if (tlower == tarval_bad || tupper == tarval_bad)
438 if (tarval_cmp(tv_index, tlower) & pn_Cmp_Lt)
440 if (tarval_cmp(tupper, tv_index) & pn_Cmp_Lt)
443 /* ok, bounds check finished */
446 if (! tarval_is_null(tv)) {
447 /* access to some struct/union member */
451 /* should be at least ONE array */
455 res = rec_get_accessed_path(ptr, depth + idx);
459 path_len = get_compound_graph_path_length(res);
460 pos = path_len - depth - idx;
462 for (ent = field;;) {
464 tarval *sz, *tv_index;
467 tp = get_entity_type(ent);
468 if (! is_Array_type(tp))
470 ent = get_array_element_entity(tp);
471 set_compound_graph_path_node(res, pos, ent);
473 size = get_type_size_bytes(get_entity_type(ent));
474 sz = new_tarval_from_long(size, mode);
476 tv_index = tarval_div(tv, sz);
477 tv = tarval_mod(tv, sz);
479 /* worked above, should work again */
480 assert(tv_index != tarval_bad && tv != tarval_bad);
482 /* bounds already checked above */
483 index = get_tarval_long(tv_index);
484 set_compound_graph_path_array_index(res, pos, index);
487 } else if (is_Sub(ptr)) {
488 ir_node *l = get_Sub_left(ptr);
489 ir_node *r = get_Sub_right(ptr);
492 tv = get_Const_tarval(r);
497 } /* rec_get_accessed_path */
500 * Returns an access path or NULL. The access path is only
501 * valid, if the graph is in phase_high and _no_ address computation is used.
503 static compound_graph_path *get_accessed_path(ir_node *ptr) {
504 return rec_get_accessed_path(ptr, 0);
505 } /* get_accessed_path */
507 typedef struct path_entry {
509 struct path_entry *next;
513 static ir_node *rec_find_compound_ent_value(ir_node *ptr, path_entry *next) {
514 path_entry entry, *p;
515 ir_entity *ent, *field;
516 ir_initializer_t *initializer;
522 if (is_SymConst(ptr)) {
524 ent = get_SymConst_entity(ptr);
525 initializer = get_entity_initializer(ent);
526 for (p = next; p != NULL;) {
527 if (initializer->kind != IR_INITIALIZER_COMPOUND)
529 n = get_initializer_compound_n_entries(initializer);
530 tp = get_entity_type(ent);
532 if (is_Array_type(tp)) {
533 ent = get_array_element_entity(tp);
538 initializer = get_initializer_compound_value(initializer, 0);
544 initializer = get_initializer_compound_value(initializer, p->index);
549 tp = get_entity_type(ent);
550 while (is_Array_type(tp)) {
551 ent = get_array_element_entity(tp);
552 tp = get_entity_type(ent);
554 n = get_initializer_compound_n_entries(initializer);
557 initializer = get_initializer_compound_value(initializer, 0);
560 switch (initializer->kind) {
561 case IR_INITIALIZER_CONST:
562 return get_initializer_const_value(initializer);
563 case IR_INITIALIZER_TARVAL:
564 case IR_INITIALIZER_NULL:
568 } else if (is_Sel(ptr)) {
569 entry.ent = field = get_Sel_entity(ptr);
570 tp = get_entity_owner(field);
571 if (is_Array_type(tp)) {
572 assert(get_Sel_n_indexs(ptr) == 1 && "multi dim arrays not implemented");
573 entry.index = get_Sel_array_index_long(ptr, 0) - get_array_lower_bound_int(tp, 0);
575 int i, n_members = get_compound_n_members(tp);
576 for (i = 0; i < n_members; ++i) {
577 if (get_compound_member(tp, i) == field)
580 if (i >= n_members) {
581 /* not found: should NOT happen */
586 return rec_find_compound_ent_value(get_Sel_ptr(ptr), &entry);
587 } else if (is_Add(ptr)) {
588 ir_node *l = get_Add_left(ptr);
589 ir_node *r = get_Add_right(ptr);
595 tv = get_Const_tarval(r);
598 tv = get_Const_tarval(l);
601 mode = get_tarval_mode(tv);
603 /* ptr must be a Sel or a SymConst, this was checked in find_constant_entity() */
605 field = get_Sel_entity(ptr);
607 field = get_SymConst_entity(ptr);
610 /* count needed entries */
612 for (ent = field;;) {
613 tp = get_entity_type(ent);
614 if (! is_Array_type(tp))
616 ent = get_array_element_entity(tp);
619 /* should be at least ONE entry */
623 /* allocate the right number of entries */
624 NEW_ARR_A(path_entry, p, pos);
628 for (ent = field;;) {
630 tarval *sz, *tv_index, *tlower, *tupper;
634 tp = get_entity_type(ent);
635 if (! is_Array_type(tp))
637 ent = get_array_element_entity(tp);
639 p[pos].next = &p[pos + 1];
641 size = get_type_size_bytes(get_entity_type(ent));
642 sz = new_tarval_from_long(size, mode);
644 tv_index = tarval_div(tv, sz);
645 tv = tarval_mod(tv, sz);
647 if (tv_index == tarval_bad || tv == tarval_bad)
650 assert(get_array_n_dimensions(tp) == 1 && "multiarrays not implemented");
651 bound = get_array_lower_bound(tp, 0);
652 tlower = computed_value(bound);
653 bound = get_array_upper_bound(tp, 0);
654 tupper = computed_value(bound);
656 if (tlower == tarval_bad || tupper == tarval_bad)
659 if (tarval_cmp(tv_index, tlower) & pn_Cmp_Lt)
661 if (tarval_cmp(tupper, tv_index) & pn_Cmp_Lt)
664 /* ok, bounds check finished */
665 index = get_tarval_long(tv_index);
666 p[pos].index = index;
669 if (! tarval_is_null(tv)) {
670 /* hmm, wrong access */
673 p[pos - 1].next = next;
674 return rec_find_compound_ent_value(ptr, p);
675 } else if (is_Sub(ptr)) {
676 ir_node *l = get_Sub_left(ptr);
677 ir_node *r = get_Sub_right(ptr);
680 tv = get_Const_tarval(r);
687 static ir_node *find_compound_ent_value(ir_node *ptr) {
688 return rec_find_compound_ent_value(ptr, NULL);
692 static void reduce_adr_usage(ir_node *ptr);
695 * Update a Load that may lost it's usage.
697 static void handle_load_update(ir_node *load) {
698 ldst_info_t *info = get_irn_link(load);
700 /* do NOT touch volatile loads for now */
701 if (get_Load_volatility(load) == volatility_is_volatile)
704 if (! info->projs[pn_Load_res] && ! info->projs[pn_Load_X_except]) {
705 ir_node *ptr = get_Load_ptr(load);
706 ir_node *mem = get_Load_mem(load);
708 /* a Load which value is neither used nor exception checked, remove it */
709 exchange(info->projs[pn_Load_M], mem);
710 if (info->projs[pn_Load_X_regular])
711 exchange(info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
713 reduce_adr_usage(ptr);
715 } /* handle_load_update */
718 * A Use of an address node is vanished. Check if this was a Proj
719 * node and update the counters.
721 static void reduce_adr_usage(ir_node *ptr) {
723 if (get_irn_n_edges(ptr) <= 0) {
724 /* this Proj is dead now */
725 ir_node *pred = get_Proj_pred(ptr);
728 ldst_info_t *info = get_irn_link(pred);
729 info->projs[get_Proj_proj(ptr)] = NULL;
731 /* this node lost it's result proj, handle that */
732 handle_load_update(pred);
736 } /* reduce_adr_usage */
739 * Check, if an already existing value of mode old_mode can be converted
740 * into the needed one new_mode without loss.
742 static int can_use_stored_value(ir_mode *old_mode, ir_mode *new_mode) {
743 if (old_mode == new_mode)
746 /* if both modes are two-complement ones, we can always convert the
747 Stored value into the needed one. */
748 if (get_mode_size_bits(old_mode) >= get_mode_size_bits(new_mode) &&
749 get_mode_arithmetic(old_mode) == irma_twos_complement &&
750 get_mode_arithmetic(new_mode) == irma_twos_complement)
753 } /* can_use_stored_value */
756 * Check whether a Call is at least pure, ie. does only read memory.
758 static unsigned is_Call_pure(ir_node *call) {
759 ir_type *call_tp = get_Call_type(call);
760 unsigned prop = get_method_additional_properties(call_tp);
762 /* check first the call type */
763 if ((prop & (mtp_property_const|mtp_property_pure)) == 0) {
764 /* try the called entity */
765 ir_node *ptr = get_Call_ptr(call);
767 if (is_Global(ptr)) {
768 ir_entity *ent = get_Global_entity(ptr);
770 prop = get_entity_additional_properties(ent);
773 return (prop & (mtp_property_const|mtp_property_pure)) != 0;
776 static ir_node *get_base_ptr(ir_node *ptr)
778 while (is_Add(ptr) && is_Const(get_Add_right(ptr))) {
779 ptr = get_Add_left(ptr);
785 static long get_base_offset(ir_node *ptr)
787 /* TODO: long might not be enough, we should probably use some tarval thingy... */
789 while (is_Add(ptr)) {
790 ir_node *right = get_Add_right(ptr);
791 if (!is_Const(right))
793 offset += get_tarval_long(get_Const_tarval(right));
794 ptr = get_Add_left(ptr);
800 static int try_load_store(ir_node *load,
801 ir_node *load_base_ptr, long load_offset, ir_node *store)
804 ir_node *store_ptr = get_Store_ptr(store);
805 ir_node *store_base_ptr = get_base_ptr(store_ptr);
806 ir_node *store_value;
810 long store_offset = get_base_offset(store_ptr);
816 if (load_base_ptr != store_base_ptr)
819 load_mode = get_Load_mode(load);
820 load_mode_len = get_mode_size_bytes(load_mode);
821 store_mode = get_irn_mode(get_Store_value(store));
822 store_mode_len = get_mode_size_bytes(store_mode);
824 delta = load_offset - store_offset;
825 if (delta < 0 || delta >= store_mode_len)
828 if (store_mode_len - delta > load_mode_len)
831 store_value = get_Store_value(store);
832 DBG_OPT_RAW(load, store_value);
834 /* produce a shift to adjust offset delta */
836 ir_node *cnst = new_r_Const_long(current_ir_graph,
837 get_irg_start_block(current_ir_graph), mode_Iu, delta * 8);
838 store_value = new_r_Shr(current_ir_graph, get_nodes_block(load),
839 store_value, cnst, store_mode);
842 /* add an convert if needed */
843 if (store_mode != load_mode) {
844 store_value = new_r_Conv(current_ir_graph, get_nodes_block(load),
845 store_value, load_mode);
848 info = get_irn_link(load);
849 if (info->projs[pn_Load_M])
850 exchange(info->projs[pn_Load_M], get_Load_mem(load));
854 if (info->projs[pn_Load_X_except]) {
855 exchange( info->projs[pn_Load_X_except], new_Bad());
858 if (info->projs[pn_Load_X_regular]) {
859 exchange( info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
863 if (info->projs[pn_Load_res])
864 exchange(info->projs[pn_Load_res], store_value);
866 load_ptr = get_Load_ptr(load);
868 reduce_adr_usage(load_ptr);
869 return res | DF_CHANGED;
873 * Follow the memory chain as long as there are only Loads,
874 * alias free Stores, and constant Calls and try to replace the
875 * current Load by a previous ones.
876 * Note that in unreachable loops it might happen that we reach
877 * load again, as well as we can fall into a cycle.
878 * We break such cycles using a special visited flag.
880 * INC_MASTER() must be called before dive into
882 static unsigned follow_Mem_chain(ir_node *load, ir_node *curr) {
884 ldst_info_t *info = get_irn_link(load);
886 ir_node *ptr = get_Load_ptr(load);
887 ir_node *mem = get_Load_mem(load);
888 ir_mode *load_mode = get_Load_mode(load);
889 ir_node *base_ptr = get_base_ptr(ptr);
890 long load_offset = get_base_offset(ptr);
892 for (pred = curr; load != pred; ) {
893 ldst_info_t *pred_info = get_irn_link(pred);
896 * a Load immediately after a Store -- a read after write.
897 * We may remove the Load, if both Load & Store does not have an
898 * exception handler OR they are in the same MacroBlock. In the latter
899 * case the Load cannot throw an exception when the previous Store was
902 * Why we need to check for Store Exception? If the Store cannot
903 * be executed (ROM) the exception handler might simply jump into
904 * the load MacroBlock :-(
905 * We could make it a little bit better if we would know that the
906 * exception handler of the Store jumps directly to the end...
908 if (is_Store(pred) && ((pred_info->projs[pn_Store_X_except] == NULL
909 && info->projs[pn_Load_X_except] == NULL)
910 || get_nodes_MacroBlock(load) == get_nodes_MacroBlock(pred)))
913 = try_load_store(load, base_ptr, load_offset, pred);
915 return res | changes;
917 } else if (is_Load(pred) && get_Load_ptr(pred) == ptr &&
918 can_use_stored_value(get_Load_mode(pred), load_mode)) {
920 * a Load after a Load -- a read after read.
921 * We may remove the second Load, if it does not have an exception handler
922 * OR they are in the same MacroBlock. In the later case the Load cannot
923 * throw an exception when the previous Load was quiet.
925 * Here, there is no need to check if the previous Load has an exception
926 * hander because they would have exact the same exception...
928 if (info->projs[pn_Load_X_except] == NULL || get_nodes_MacroBlock(load) == get_nodes_MacroBlock(pred)) {
931 DBG_OPT_RAR(load, pred);
933 /* the result is used */
934 if (info->projs[pn_Load_res]) {
935 if (pred_info->projs[pn_Load_res] == NULL) {
936 /* create a new Proj again */
937 pred_info->projs[pn_Load_res] = new_r_Proj(current_ir_graph, get_nodes_block(pred), pred, get_Load_mode(pred), pn_Load_res);
939 value = pred_info->projs[pn_Load_res];
941 /* add an convert if needed */
942 if (get_Load_mode(pred) != load_mode) {
943 value = new_r_Conv(current_ir_graph, get_nodes_block(load), value, load_mode);
946 exchange(info->projs[pn_Load_res], value);
949 if (info->projs[pn_Load_M])
950 exchange(info->projs[pn_Load_M], mem);
953 if (info->projs[pn_Load_X_except]) {
954 exchange(info->projs[pn_Load_X_except], new_Bad());
957 if (info->projs[pn_Load_X_regular]) {
958 exchange( info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
963 reduce_adr_usage(ptr);
964 return res |= DF_CHANGED;
968 if (is_Store(pred)) {
969 /* check if we can pass through this store */
970 ir_alias_relation rel = get_alias_relation(
973 get_irn_mode(get_Store_value(pred)),
975 /* if the might be an alias, we cannot pass this Store */
976 if (rel != ir_no_alias)
978 pred = skip_Proj(get_Store_mem(pred));
979 } else if (is_Load(pred)) {
980 pred = skip_Proj(get_Load_mem(pred));
981 } else if (is_Call(pred)) {
982 if (is_Call_pure(pred)) {
983 /* The called graph is at least pure, so there are no Store's
984 in it. We can handle it like a Load and skip it. */
985 pred = skip_Proj(get_Call_mem(pred));
987 /* there might be Store's in the graph, stop here */
991 /* follow only Load chains */
995 /* check for cycles */
996 if (NODE_VISITED(pred_info))
998 MARK_NODE(pred_info);
1001 if (is_Sync(pred)) {
1004 /* handle all Sync predecessors */
1005 for (i = get_Sync_n_preds(pred) - 1; i >= 0; --i) {
1006 res |= follow_Mem_chain(load, skip_Proj(get_Sync_pred(pred, i)));
1013 } /* follow_Mem_chain */
1016 * Check if we can replace the load by a given const from
1017 * the const code irg.
1019 ir_node *can_replace_load_by_const(const ir_node *load, ir_node *c) {
1020 ir_mode *c_mode = get_irn_mode(c);
1021 ir_mode *l_mode = get_Load_mode(load);
1022 ir_node *res = NULL;
1024 if (c_mode != l_mode) {
1025 /* check, if the mode matches OR can be easily converted info */
1026 if (is_reinterpret_cast(c_mode, l_mode)) {
1027 /* we can safely cast */
1028 dbg_info *dbg = get_irn_dbg_info(load);
1029 ir_node *block = get_nodes_block(load);
1031 /* copy the value from the const code irg and cast it */
1032 res = copy_const_value(dbg, c);
1033 res = new_rd_Conv(dbg, current_ir_graph, block, res, l_mode);
1036 /* copy the value from the const code irg */
1037 res = copy_const_value(get_irn_dbg_info(load), c);
1040 } /* can_replace_load_by_const */
1045 * @param load the Load node
1047 static unsigned optimize_load(ir_node *load)
1049 ldst_info_t *info = get_irn_link(load);
1050 ir_node *mem, *ptr, *value;
1054 /* do NOT touch volatile loads for now */
1055 if (get_Load_volatility(load) == volatility_is_volatile)
1058 /* the address of the load to be optimized */
1059 ptr = get_Load_ptr(load);
1062 * Check if we can remove the exception from a Load:
1063 * This can be done, if the address is from an Sel(Alloc) and
1064 * the Sel type is a subtype of the allocated type.
1066 * This optimizes some often used OO constructs,
1067 * like x = new O; x->t;
1069 if (info->projs[pn_Load_X_except]) {
1070 ir_node *addr = ptr;
1072 /* find base address */
1073 while (is_Sel(addr))
1074 addr = get_Sel_ptr(addr);
1075 if (is_Alloc(skip_Proj(skip_Cast(addr)))) {
1076 /* simple case: a direct load after an Alloc. Firm Alloc throw
1077 * an exception in case of out-of-memory. So, there is no way for an
1078 * exception in this load.
1079 * This code is constructed by the "exception lowering" in the Jack compiler.
1081 exchange(info->projs[pn_Load_X_except], new_Bad());
1082 info->projs[pn_Load_X_except] = NULL;
1083 exchange(info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
1084 info->projs[pn_Load_X_regular] = NULL;
1089 /* The mem of the Load. Must still be returned after optimization. */
1090 mem = get_Load_mem(load);
1092 if (! info->projs[pn_Load_res] && ! info->projs[pn_Load_X_except]) {
1093 /* a Load which value is neither used nor exception checked, remove it */
1094 exchange(info->projs[pn_Load_M], mem);
1096 if (info->projs[pn_Load_X_regular]) {
1097 /* should not happen, but if it does, remove it */
1098 exchange(info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
1102 reduce_adr_usage(ptr);
1103 return res | DF_CHANGED;
1106 /* Load from a constant polymorphic field, where we can resolve
1108 value = transform_polymorph_Load(load);
1109 if (value == load) {
1111 /* check if we can determine the entity that will be loaded */
1112 ent = find_constant_entity(ptr);
1114 if ((allocation_static == get_entity_allocation(ent)) &&
1115 (visibility_external_allocated != get_entity_visibility(ent))) {
1116 /* a static allocation that is not external: there should be NO exception
1117 * when loading even if we cannot replace the load itself. */
1119 /* no exception, clear the info field as it might be checked later again */
1120 if (info->projs[pn_Load_X_except]) {
1121 exchange(info->projs[pn_Load_X_except], new_Bad());
1122 info->projs[pn_Load_X_except] = NULL;
1125 if (info->projs[pn_Load_X_regular]) {
1126 exchange(info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
1127 info->projs[pn_Load_X_regular] = NULL;
1131 if (variability_constant == get_entity_variability(ent)) {
1132 if (is_atomic_entity(ent)) {
1133 /* Might not be atomic after
1134 lowering of Sels. In this
1135 case we could also load, but
1136 it's more complicated. */
1137 /* more simpler case: we load the content of a constant value:
1138 * replace it by the constant itself
1140 value = get_atomic_ent_value(ent);
1142 if (ent->has_initializer) {
1143 /* new style initializer */
1144 value = find_compound_ent_value(ptr);
1146 /* old style initializer */
1147 compound_graph_path *path = get_accessed_path(ptr);
1150 assert(is_proper_compound_graph_path(path, get_compound_graph_path_length(path)-1));
1152 value = get_compound_ent_value_by_path(ent, path);
1153 free_compound_graph_path(path);
1158 value = can_replace_load_by_const(load, value);
1163 if (value != NULL) {
1164 /* we completely replace the load by this value */
1165 if (info->projs[pn_Load_X_except]) {
1166 exchange(info->projs[pn_Load_X_except], new_Bad());
1167 info->projs[pn_Load_X_except] = NULL;
1170 if (info->projs[pn_Load_X_regular]) {
1171 exchange(info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
1172 info->projs[pn_Load_X_regular] = NULL;
1175 if (info->projs[pn_Load_M]) {
1176 exchange(info->projs[pn_Load_M], mem);
1179 if (info->projs[pn_Load_res]) {
1180 exchange(info->projs[pn_Load_res], value);
1184 reduce_adr_usage(ptr);
1188 /* Check, if the address of this load is used more than once.
1189 * If not, this load cannot be removed in any case. */
1190 if (get_irn_n_uses(ptr) <= 1 && get_irn_n_uses(get_base_ptr(ptr)) <= 1)
1194 * follow the memory chain as long as there are only Loads
1195 * and try to replace current Load or Store by a previous one.
1196 * Note that in unreachable loops it might happen that we reach
1197 * load again, as well as we can fall into a cycle.
1198 * We break such cycles using a special visited flag.
1201 res = follow_Mem_chain(load, skip_Proj(mem));
1203 } /* optimize_load */
1206 * Check whether a value of mode new_mode would completely overwrite a value
1207 * of mode old_mode in memory.
1209 static int is_completely_overwritten(ir_mode *old_mode, ir_mode *new_mode)
1211 return get_mode_size_bits(new_mode) >= get_mode_size_bits(old_mode);
1212 } /* is_completely_overwritten */
1215 * follow the memory chain as long as there are only Loads and alias free Stores.
1217 * INC_MASTER() must be called before dive into
1219 static unsigned follow_Mem_chain_for_Store(ir_node *store, ir_node *curr) {
1221 ldst_info_t *info = get_irn_link(store);
1223 ir_node *ptr = get_Store_ptr(store);
1224 ir_node *mem = get_Store_mem(store);
1225 ir_node *value = get_Store_value(store);
1226 ir_mode *mode = get_irn_mode(value);
1227 ir_node *block = get_nodes_block(store);
1228 ir_node *mblk = get_Block_MacroBlock(block);
1230 for (pred = curr; pred != store;) {
1231 ldst_info_t *pred_info = get_irn_link(pred);
1234 * BEWARE: one might think that checking the modes is useless, because
1235 * if the pointers are identical, they refer to the same object.
1236 * This is only true in strong typed languages, not is C were the following
1237 * is possible *(ir_type1 *)p = a; *(ir_type2 *)p = b ...
1238 * However, if the mode that is written have a bigger or equal size the the old
1239 * one, the old value is completely overwritten and can be killed ...
1241 if (is_Store(pred) && get_Store_ptr(pred) == ptr &&
1242 get_nodes_MacroBlock(pred) == mblk &&
1243 is_completely_overwritten(get_irn_mode(get_Store_value(pred)), mode)) {
1245 * a Store after a Store in the same MacroBlock -- a write after write.
1246 * We may remove the first Store, if it does not have an exception handler.
1248 * TODO: What, if both have the same exception handler ???
1250 if (get_Store_volatility(pred) != volatility_is_volatile && !pred_info->projs[pn_Store_X_except]) {
1251 DBG_OPT_WAW(pred, store);
1252 exchange(pred_info->projs[pn_Store_M], get_Store_mem(pred));
1254 reduce_adr_usage(ptr);
1257 } else if (is_Load(pred) && get_Load_ptr(pred) == ptr &&
1258 value == pred_info->projs[pn_Load_res]) {
1260 * a Store of a value just loaded from the same address
1261 * -- a write after read.
1262 * We may remove the Store, if it does not have an exception
1265 if (! info->projs[pn_Store_X_except]) {
1266 DBG_OPT_WAR(store, pred);
1267 exchange(info->projs[pn_Store_M], mem);
1269 reduce_adr_usage(ptr);
1274 if (is_Store(pred)) {
1275 /* check if we can pass through this store */
1276 ir_alias_relation rel = get_alias_relation(
1278 get_Store_ptr(pred),
1279 get_irn_mode(get_Store_value(pred)),
1281 /* if the might be an alias, we cannot pass this Store */
1282 if (rel != ir_no_alias)
1284 pred = skip_Proj(get_Store_mem(pred));
1285 } else if (is_Load(pred)) {
1286 ir_alias_relation rel = get_alias_relation(
1287 current_ir_graph, get_Load_ptr(pred), get_Load_mode(pred),
1289 if (rel != ir_no_alias)
1292 pred = skip_Proj(get_Load_mem(pred));
1294 /* follow only Load chains */
1298 /* check for cycles */
1299 if (NODE_VISITED(pred_info))
1301 MARK_NODE(pred_info);
1304 if (is_Sync(pred)) {
1307 /* handle all Sync predecessors */
1308 for (i = get_Sync_n_preds(pred) - 1; i >= 0; --i) {
1309 res |= follow_Mem_chain_for_Store(store, skip_Proj(get_Sync_pred(pred, i)));
1315 } /* follow_Mem_chain_for_Store */
1320 * @param store the Store node
1322 static unsigned optimize_store(ir_node *store) {
1325 if (get_Store_volatility(store) == volatility_is_volatile)
1328 ptr = get_Store_ptr(store);
1330 /* Check, if the address of this Store is used more than once.
1331 * If not, this Store cannot be removed in any case. */
1332 if (get_irn_n_uses(ptr) <= 1)
1335 mem = get_Store_mem(store);
1337 /* follow the memory chain as long as there are only Loads */
1340 return follow_Mem_chain_for_Store(store, skip_Proj(mem));
1341 } /* optimize_store */
1344 * walker, optimizes Phi after Stores to identical places:
1345 * Does the following optimization:
1348 * val1 val2 val3 val1 val2 val3
1350 * Store Store Store \ | /
1357 * This reduces the number of stores and allows for predicated execution.
1358 * Moves Stores back to the end of a function which may be bad.
1360 * This is only possible if the predecessor blocks have only one successor.
1362 static unsigned optimize_phi(ir_node *phi, walk_env_t *wenv)
1365 ir_node *store, *old_store, *ptr, *block, *phi_block, *phiM, *phiD, *exc, *projM;
1367 ir_node **inM, **inD, **projMs;
1369 dbg_info *db = NULL;
1371 block_info_t *bl_info;
1374 /* Must be a memory Phi */
1375 if (get_irn_mode(phi) != mode_M)
1378 n = get_Phi_n_preds(phi);
1382 /* must be only one user */
1383 projM = get_Phi_pred(phi, 0);
1384 if (get_irn_n_edges(projM) != 1)
1387 store = skip_Proj(projM);
1389 if (!is_Store(store))
1392 block = get_nodes_block(store);
1394 /* abort on dead blocks */
1395 if (is_Block_dead(block))
1398 /* check if the block is post dominated by Phi-block
1399 and has no exception exit */
1400 bl_info = get_irn_link(block);
1401 if (bl_info->flags & BLOCK_HAS_EXC)
1404 phi_block = get_nodes_block(phi);
1405 if (! block_strictly_postdominates(phi_block, block))
1408 /* this is the address of the store */
1409 ptr = get_Store_ptr(store);
1410 mode = get_irn_mode(get_Store_value(store));
1411 info = get_irn_link(store);
1412 exc = info->exc_block;
1414 for (i = 1; i < n; ++i) {
1415 ir_node *pred = get_Phi_pred(phi, i);
1417 if (get_irn_n_edges(pred) != 1)
1420 pred = skip_Proj(pred);
1421 if (!is_Store(pred))
1424 if (ptr != get_Store_ptr(pred) || mode != get_irn_mode(get_Store_value(pred)))
1427 info = get_irn_link(pred);
1429 /* check, if all stores have the same exception flow */
1430 if (exc != info->exc_block)
1433 /* abort on dead blocks */
1434 block = get_nodes_block(pred);
1435 if (is_Block_dead(block))
1438 /* check if the block is post dominated by Phi-block
1439 and has no exception exit. Note that block must be different from
1440 Phi-block, else we would move a Store from end End of a block to its
1442 bl_info = get_irn_link(block);
1443 if (bl_info->flags & BLOCK_HAS_EXC)
1445 if (block == phi_block || ! block_postdominates(phi_block, block))
1450 * ok, when we are here, we found all predecessors of a Phi that
1451 * are Stores to the same address and size. That means whatever
1452 * we do before we enter the block of the Phi, we do a Store.
1453 * So, we can move the Store to the current block:
1455 * val1 val2 val3 val1 val2 val3
1457 * | Str | | Str | | Str | \ | /
1463 * Is only allowed if the predecessor blocks have only one successor.
1466 NEW_ARR_A(ir_node *, projMs, n);
1467 NEW_ARR_A(ir_node *, inM, n);
1468 NEW_ARR_A(ir_node *, inD, n);
1469 NEW_ARR_A(int, idx, n);
1471 /* Prepare: Collect all Store nodes. We must do this
1472 first because we otherwise may loose a store when exchanging its
1475 for (i = n - 1; i >= 0; --i) {
1478 projMs[i] = get_Phi_pred(phi, i);
1479 assert(is_Proj(projMs[i]));
1481 store = get_Proj_pred(projMs[i]);
1482 info = get_irn_link(store);
1484 inM[i] = get_Store_mem(store);
1485 inD[i] = get_Store_value(store);
1486 idx[i] = info->exc_idx;
1488 block = get_nodes_block(phi);
1490 /* second step: create a new memory Phi */
1491 phiM = new_rd_Phi(get_irn_dbg_info(phi), current_ir_graph, block, n, inM, mode_M);
1493 /* third step: create a new data Phi */
1494 phiD = new_rd_Phi(get_irn_dbg_info(phi), current_ir_graph, block, n, inD, mode);
1496 /* rewire memory and kill the node */
1497 for (i = n - 1; i >= 0; --i) {
1498 ir_node *proj = projMs[i];
1501 ir_node *store = get_Proj_pred(proj);
1502 exchange(proj, inM[i]);
1507 /* fourth step: create the Store */
1508 store = new_rd_Store(db, current_ir_graph, block, phiM, ptr, phiD);
1510 co_set_irn_name(store, co_get_irn_ident(old_store));
1513 projM = new_rd_Proj(NULL, current_ir_graph, block, store, mode_M, pn_Store_M);
1515 info = get_ldst_info(store, &wenv->obst);
1516 info->projs[pn_Store_M] = projM;
1518 /* fifths step: repair exception flow */
1520 ir_node *projX = new_rd_Proj(NULL, current_ir_graph, block, store, mode_X, pn_Store_X_except);
1522 info->projs[pn_Store_X_except] = projX;
1523 info->exc_block = exc;
1524 info->exc_idx = idx[0];
1526 for (i = 0; i < n; ++i) {
1527 set_Block_cfgpred(exc, idx[i], projX);
1531 /* the exception block should be optimized as some inputs are identical now */
1537 /* sixth step: replace old Phi */
1538 exchange(phi, projM);
1540 return res | DF_CHANGED;
1541 } /* optimize_phi */
1544 * walker, do the optimizations
1546 static void do_load_store_optimize(ir_node *n, void *env) {
1547 walk_env_t *wenv = env;
1549 switch (get_irn_opcode(n)) {
1552 wenv->changes |= optimize_load(n);
1556 wenv->changes |= optimize_store(n);
1560 wenv->changes |= optimize_phi(n, wenv);
1566 } /* do_load_store_optimize */
1569 typedef struct scc {
1570 ir_node *head; /**< the head of the list */
1573 /** A node entry. */
1574 typedef struct node_entry {
1575 unsigned DFSnum; /**< the DFS number of this node */
1576 unsigned low; /**< the low number of this node */
1577 ir_node *header; /**< the header of this node */
1578 int in_stack; /**< flag, set if the node is on the stack */
1579 ir_node *next; /**< link to the next node the the same scc */
1580 scc *pscc; /**< the scc of this node */
1581 unsigned POnum; /**< the post order number for blocks */
1584 /** A loop entry. */
1585 typedef struct loop_env {
1586 ir_phase ph; /**< the phase object */
1587 ir_node **stack; /**< the node stack */
1588 int tos; /**< tos index */
1589 unsigned nextDFSnum; /**< the current DFS number */
1590 unsigned POnum; /**< current post order number */
1592 unsigned changes; /**< a bitmask of graph changes */
1596 * Gets the node_entry of a node
1598 static node_entry *get_irn_ne(ir_node *irn, loop_env *env) {
1599 ir_phase *ph = &env->ph;
1600 node_entry *e = phase_get_irn_data(&env->ph, irn);
1603 e = phase_alloc(ph, sizeof(*e));
1604 memset(e, 0, sizeof(*e));
1605 phase_set_irn_data(ph, irn, e);
1611 * Push a node onto the stack.
1613 * @param env the loop environment
1614 * @param n the node to push
1616 static void push(loop_env *env, ir_node *n) {
1619 if (env->tos == ARR_LEN(env->stack)) {
1620 int nlen = ARR_LEN(env->stack) * 2;
1621 ARR_RESIZE(ir_node *, env->stack, nlen);
1623 env->stack[env->tos++] = n;
1624 e = get_irn_ne(n, env);
1629 * pop a node from the stack
1631 * @param env the loop environment
1633 * @return The topmost node
1635 static ir_node *pop(loop_env *env) {
1636 ir_node *n = env->stack[--env->tos];
1637 node_entry *e = get_irn_ne(n, env);
1644 * Check if irn is a region constant.
1645 * The block or irn must strictly dominate the header block.
1647 * @param irn the node to check
1648 * @param header_block the header block of the induction variable
1650 static int is_rc(ir_node *irn, ir_node *header_block) {
1651 ir_node *block = get_nodes_block(irn);
1653 return (block != header_block) && block_dominates(block, header_block);
1656 typedef struct phi_entry phi_entry;
1658 ir_node *phi; /**< A phi with a region const memory. */
1659 int pos; /**< The position of the region const memory */
1660 ir_node *load; /**< the newly created load for this phi */
1665 * Move loops out of loops if possible.
1667 * @param pscc the loop described by an SCC
1668 * @param env the loop environment
1670 static void move_loads_out_of_loops(scc *pscc, loop_env *env) {
1671 ir_node *phi, *load, *next, *other, *next_other;
1674 phi_entry *phi_list = NULL;
1676 /* collect all outer memories */
1677 for (phi = pscc->head; phi != NULL; phi = next) {
1678 node_entry *ne = get_irn_ne(phi, env);
1681 /* check all memory Phi's */
1685 assert(get_irn_mode(phi) == mode_M && "DFS geturn non-memory Phi");
1687 for (j = get_irn_arity(phi) - 1; j >= 0; --j) {
1688 ir_node *pred = get_irn_n(phi, j);
1689 node_entry *pe = get_irn_ne(pred, env);
1691 if (pe->pscc != ne->pscc) {
1692 /* not in the same SCC, is region const */
1693 phi_entry *pe = phase_alloc(&env->ph, sizeof(*pe));
1697 pe->next = phi_list;
1702 /* no Phis no fun */
1703 assert(phi_list != NULL && "DFS found a loop without Phi");
1705 for (load = pscc->head; load; load = next) {
1707 node_entry *ne = get_irn_ne(load, env);
1710 if (is_Load(load)) {
1711 ldst_info_t *info = get_irn_link(load);
1712 ir_node *ptr = get_Load_ptr(load);
1714 /* for now, we cannot handle Loads with exceptions */
1715 if (info->projs[pn_Load_res] == NULL || info->projs[pn_Load_X_regular] != NULL || info->projs[pn_Load_X_except] != NULL)
1718 /* for now, we can only handle Load(Global) */
1719 if (! is_Global(ptr))
1721 ent = get_Global_entity(ptr);
1722 load_mode = get_Load_mode(load);
1723 for (other = pscc->head; other != NULL; other = next_other) {
1724 node_entry *ne = get_irn_ne(other, env);
1725 next_other = ne->next;
1727 if (is_Store(other)) {
1728 ir_alias_relation rel = get_alias_relation(
1730 get_Store_ptr(other),
1731 get_irn_mode(get_Store_value(other)),
1733 /* if the might be an alias, we cannot pass this Store */
1734 if (rel != ir_no_alias)
1737 /* only pure Calls are allowed here, so ignore them */
1739 if (other == NULL) {
1744 /* for now, we cannot handle more than one input */
1745 if (phi_list->next != NULL)
1748 /* yep, no aliasing Store found, Load can be moved */
1749 DB((dbg, LEVEL_1, " Found a Load that could be moved: %+F\n", load));
1751 db = get_irn_dbg_info(load);
1752 for (pe = phi_list; pe != NULL; pe = pe->next) {
1754 ir_node *phi = pe->phi;
1755 ir_node *blk = get_nodes_block(phi);
1756 ir_node *pred = get_Block_cfgpred_block(blk, pos);
1759 pe->load = irn = new_rd_Load(db, current_ir_graph, pred, get_Phi_pred(phi, pos), ptr, load_mode);
1760 ninfo = get_ldst_info(irn, phase_obst(&env->ph));
1762 ninfo->projs[pn_Load_M] = mem = new_r_Proj(current_ir_graph, pred, irn, mode_M, pn_Load_M);
1763 set_Phi_pred(phi, pos, mem);
1765 ninfo->projs[pn_Load_res] = new_r_Proj(current_ir_graph, pred, irn, load_mode, pn_Load_res);
1767 DB((dbg, LEVEL_1, " Created %+F in %+F\n", irn, pred));
1770 /* now kill the old Load */
1771 exchange(info->projs[pn_Load_M], get_Load_mem(load));
1772 exchange(info->projs[pn_Load_res], ninfo->projs[pn_Load_res]);
1774 env->changes |= DF_CHANGED;
1778 } /* move_loads_out_of_loops */
1781 * Process a loop SCC.
1783 * @param pscc the SCC
1784 * @param env the loop environment
1786 static void process_loop(scc *pscc, loop_env *env) {
1787 ir_node *irn, *next, *header = NULL;
1788 node_entry *b, *h = NULL;
1789 int j, only_phi, num_outside, process = 0;
1792 /* find the header block for this scc */
1793 for (irn = pscc->head; irn; irn = next) {
1794 node_entry *e = get_irn_ne(irn, env);
1795 ir_node *block = get_nodes_block(irn);
1798 b = get_irn_ne(block, env);
1801 if (h->POnum < b->POnum) {
1812 /* check if this scc contains only Phi, Loads or Stores nodes */
1816 for (irn = pscc->head; irn; irn = next) {
1817 node_entry *e = get_irn_ne(irn, env);
1820 switch (get_irn_opcode(irn)) {
1822 if (is_Call_pure(irn)) {
1823 /* pure calls can be treated like loads */
1827 /* non-pure calls must be handle like may-alias Stores */
1830 /* cannot handle CopyB yet */
1834 if (get_Load_volatility(irn) == volatility_is_volatile) {
1835 /* cannot handle loops with volatile Loads */
1841 if (get_Store_volatility(irn) == volatility_is_volatile) {
1842 /* cannot handle loops with volatile Stores */
1851 for (j = get_irn_arity(irn) - 1; j >= 0; --j) {
1852 ir_node *pred = get_irn_n(irn, j);
1853 node_entry *pe = get_irn_ne(pred, env);
1855 if (pe->pscc != e->pscc) {
1856 /* not in the same SCC, must be a region const */
1857 if (! is_rc(pred, header)) {
1858 /* not a memory loop */
1864 } else if (out_rc != pred) {
1875 /* found a memory loop */
1876 DB((dbg, LEVEL_2, " Found a memory loop:\n "));
1877 if (only_phi && num_outside == 1) {
1878 /* a phi cycle with only one real predecessor can be collapsed */
1879 DB((dbg, LEVEL_2, " Found an USELESS Phi cycle:\n "));
1881 for (irn = pscc->head; irn; irn = next) {
1882 node_entry *e = get_irn_ne(irn, env);
1885 exchange(irn, out_rc);
1887 env->changes |= DF_CHANGED;
1891 /* set the header for every node in this scc */
1892 for (irn = pscc->head; irn; irn = next) {
1893 node_entry *e = get_irn_ne(irn, env);
1896 DB((dbg, LEVEL_2, " %+F,", irn));
1898 DB((dbg, LEVEL_2, "\n"));
1900 move_loads_out_of_loops(pscc, env);
1904 } /* process_loop */
1909 * @param pscc the SCC
1910 * @param env the loop environment
1912 static void process_scc(scc *pscc, loop_env *env) {
1913 ir_node *head = pscc->head;
1914 node_entry *e = get_irn_ne(head, env);
1916 #ifdef DEBUG_libfirm
1918 ir_node *irn, *next;
1920 DB((dbg, LEVEL_4, " SCC at %p:\n ", pscc));
1921 for (irn = pscc->head; irn; irn = next) {
1922 node_entry *e = get_irn_ne(irn, env);
1926 DB((dbg, LEVEL_4, " %+F,", irn));
1928 DB((dbg, LEVEL_4, "\n"));
1932 if (e->next != NULL) {
1933 /* this SCC has more than one member */
1934 process_loop(pscc, env);
1939 * Do Tarjan's SCC algorithm and drive load/store optimization.
1941 * @param irn start at this node
1942 * @param env the loop environment
1944 static void dfs(ir_node *irn, loop_env *env)
1947 node_entry *node = get_irn_ne(irn, env);
1949 mark_irn_visited(irn);
1951 node->DFSnum = env->nextDFSnum++;
1952 node->low = node->DFSnum;
1956 if (is_Phi(irn) || is_Sync(irn)) {
1957 n = get_irn_arity(irn);
1958 for (i = 0; i < n; ++i) {
1959 ir_node *pred = get_irn_n(irn, i);
1960 node_entry *o = get_irn_ne(pred, env);
1962 if (irn_not_visited(pred)) {
1964 node->low = MIN(node->low, o->low);
1966 if (o->DFSnum < node->DFSnum && o->in_stack)
1967 node->low = MIN(o->DFSnum, node->low);
1969 } else if (is_fragile_op(irn)) {
1970 ir_node *pred = get_fragile_op_mem(irn);
1971 node_entry *o = get_irn_ne(pred, env);
1973 if (irn_not_visited(pred)) {
1975 node->low = MIN(node->low, o->low);
1977 if (o->DFSnum < node->DFSnum && o->in_stack)
1978 node->low = MIN(o->DFSnum, node->low);
1979 } else if (is_Proj(irn)) {
1980 ir_node *pred = get_Proj_pred(irn);
1981 node_entry *o = get_irn_ne(pred, env);
1983 if (irn_not_visited(pred)) {
1985 node->low = MIN(node->low, o->low);
1987 if (o->DFSnum < node->DFSnum && o->in_stack)
1988 node->low = MIN(o->DFSnum, node->low);
1991 /* IGNORE predecessors */
1994 if (node->low == node->DFSnum) {
1995 scc *pscc = phase_alloc(&env->ph, sizeof(*pscc));
2003 e = get_irn_ne(x, env);
2005 e->next = pscc->head;
2009 process_scc(pscc, env);
2014 * Do the DFS on the memory edges a graph.
2016 * @param irg the graph to process
2017 * @param env the loop environment
2019 static void do_dfs(ir_graph *irg, loop_env *env) {
2020 ir_graph *rem = current_ir_graph;
2021 ir_node *endblk, *end;
2024 current_ir_graph = irg;
2025 inc_irg_visited(irg);
2027 /* visit all memory nodes */
2028 endblk = get_irg_end_block(irg);
2029 for (i = get_Block_n_cfgpreds(endblk) - 1; i >= 0; --i) {
2030 ir_node *pred = get_Block_cfgpred(endblk, i);
2032 pred = skip_Proj(pred);
2033 if (is_Return(pred))
2034 dfs(get_Return_mem(pred), env);
2035 else if (is_Raise(pred))
2036 dfs(get_Raise_mem(pred), env);
2037 else if (is_fragile_op(pred))
2038 dfs(get_fragile_op_mem(pred), env);
2040 assert(0 && "Unknown EndBlock predecessor");
2044 /* visit the keep-alives */
2045 end = get_irg_end(irg);
2046 for (i = get_End_n_keepalives(end) - 1; i >= 0; --i) {
2047 ir_node *ka = get_End_keepalive(end, i);
2049 if (is_Phi(ka) && irn_not_visited(ka))
2052 current_ir_graph = rem;
2056 * Initialize new phase data. We do this always explicit, so return NULL here
2058 static void *init_loop_data(ir_phase *ph, const ir_node *irn, void *data) {
2063 } /* init_loop_data */
2066 * Optimize Loads/Stores in loops.
2068 * @param irg the graph
2070 static int optimize_loops(ir_graph *irg) {
2073 env.stack = NEW_ARR_F(ir_node *, 128);
2078 phase_init(&env.ph, "ldstopt", irg, PHASE_DEFAULT_GROWTH, init_loop_data, NULL);
2080 /* calculate the SCC's and drive loop optimization. */
2083 DEL_ARR_F(env.stack);
2084 phase_free(&env.ph);
2087 } /* optimize_loops */
2090 * do the load store optimization
2092 void optimize_load_store(ir_graph *irg) {
2095 FIRM_DBG_REGISTER(dbg, "firm.opt.ldstopt");
2097 assert(get_irg_phase_state(irg) != phase_building);
2098 assert(get_irg_pinned(irg) != op_pin_state_floats &&
2099 "LoadStore optimization needs pinned graph");
2101 /* we need landing pads */
2102 remove_critical_cf_edges(irg);
2106 /* for Phi optimization post-dominators are needed ... */
2107 assure_postdoms(irg);
2109 if (get_opt_alias_analysis()) {
2110 assure_irg_address_taken_computed(irg);
2111 assure_irp_globals_address_taken_computed();
2114 obstack_init(&env.obst);
2117 /* init the links, then collect Loads/Stores/Proj's in lists */
2119 irg_walk_graph(irg, firm_clear_link, collect_nodes, &env);
2121 /* now we have collected enough information, optimize */
2122 irg_walk_graph(irg, NULL, do_load_store_optimize, &env);
2124 env.changes |= optimize_loops(irg);
2126 obstack_free(&env.obst, NULL);
2128 /* Handle graph state */
2130 set_irg_outs_inconsistent(irg);
2133 if (env.changes & CF_CHANGED) {
2134 /* is this really needed: Yes, control flow changed, block might
2135 have Bad() predecessors. */
2136 set_irg_doms_inconsistent(irg);
2138 } /* optimize_load_store */