2 * Copyright (C) 1995-2011 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Load/Store optimizations.
23 * @author Michael Beck
29 #include "iroptimize.h"
31 #include "irgraph_t.h"
39 #include "dbginfo_t.h"
40 #include "iropt_dbg.h"
47 #include "irnodehashmap.h"
52 #include "opt_manage.h"
54 /** The debug handle. */
55 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
58 #define IMAX(a,b) ((a) > (b) ? (a) : (b))
60 #define MAX_PROJ IMAX(IMAX((long)pn_Load_max, (long)pn_Store_max), (long)pn_Call_max)
63 DF_CHANGED = 1, /**< data flow changed */
64 CF_CHANGED = 2, /**< control flow changed */
70 typedef struct walk_env_t {
71 struct obstack obst; /**< list of all stores */
72 unsigned changes; /**< a bitmask of graph changes */
75 /** A Load/Store info. */
76 typedef struct ldst_info_t {
77 ir_node *projs[MAX_PROJ+1]; /**< list of Proj's of this node */
78 ir_node *exc_block; /**< the exception block if available */
79 int exc_idx; /**< predecessor index in the exception block */
80 unsigned visited; /**< visited counter for breaking loops */
84 * flags for control flow.
87 BLOCK_HAS_COND = 1, /**< Block has conditional control flow */
88 BLOCK_HAS_EXC = 2 /**< Block has exceptional control flow */
94 typedef struct block_info_t {
95 unsigned flags; /**< flags for the block */
98 /** the master visited flag for loop detection. */
99 static unsigned master_visited = 0;
101 #define INC_MASTER() ++master_visited
102 #define MARK_NODE(info) (info)->visited = master_visited
103 #define NODE_VISITED(info) (info)->visited >= master_visited
106 * get the Load/Store info of a node
108 static ldst_info_t *get_ldst_info(ir_node *node, struct obstack *obst)
110 ldst_info_t *info = (ldst_info_t*)get_irn_link(node);
113 info = OALLOCZ(obst, ldst_info_t);
114 set_irn_link(node, info);
117 } /* get_ldst_info */
120 * get the Block info of a node
122 static block_info_t *get_block_info(ir_node *node, struct obstack *obst)
124 block_info_t *info = (block_info_t*)get_irn_link(node);
127 info = OALLOCZ(obst, block_info_t);
128 set_irn_link(node, info);
131 } /* get_block_info */
134 * update the projection info for a Load/Store
136 static unsigned update_projs(ldst_info_t *info, ir_node *proj)
138 long nr = get_Proj_proj(proj);
140 assert(0 <= nr && nr <= MAX_PROJ && "Wrong proj from LoadStore");
142 if (info->projs[nr]) {
143 /* there is already one, do CSE */
144 exchange(proj, info->projs[nr]);
148 info->projs[nr] = proj;
154 * update the exception block info for a Load/Store node.
156 * @param info the load/store info struct
157 * @param block the exception handler block for this load/store
158 * @param pos the control flow input of the block
160 static unsigned update_exc(ldst_info_t *info, ir_node *block, int pos)
162 assert(info->exc_block == NULL && "more than one exception block found");
164 info->exc_block = block;
169 /** Return the number of uses of an address node */
170 #define get_irn_n_uses(adr) get_irn_n_edges(adr)
173 * walker, collects all Load/Store/Proj nodes
175 * walks from Start -> End
177 static void collect_nodes(ir_node *node, void *env)
179 walk_env_t *wenv = (walk_env_t *)env;
180 unsigned opcode = get_irn_opcode(node);
181 ir_node *pred, *blk, *pred_blk;
182 ldst_info_t *ldst_info;
184 if (opcode == iro_Proj) {
185 pred = get_Proj_pred(node);
186 opcode = get_irn_opcode(pred);
188 if (opcode == iro_Load || opcode == iro_Store || opcode == iro_Call) {
189 ldst_info = get_ldst_info(pred, &wenv->obst);
191 wenv->changes |= update_projs(ldst_info, node);
194 * Place the Proj's to the same block as the
195 * predecessor Load. This is always ok and prevents
196 * "non-SSA" form after optimizations if the Proj
197 * is in a wrong block.
199 blk = get_nodes_block(node);
200 pred_blk = get_nodes_block(pred);
201 if (blk != pred_blk) {
202 wenv->changes |= DF_CHANGED;
203 set_nodes_block(node, pred_blk);
206 } else if (opcode == iro_Block) {
209 for (i = get_Block_n_cfgpreds(node) - 1; i >= 0; --i) {
210 ir_node *pred_block, *proj;
211 block_info_t *bl_info;
214 pred = proj = get_Block_cfgpred(node, i);
217 pred = get_Proj_pred(proj);
218 is_exc = is_x_except_Proj(proj);
221 /* ignore Bad predecessors, they will be removed later */
225 pred_block = get_nodes_block(pred);
226 bl_info = get_block_info(pred_block, &wenv->obst);
228 if (is_fragile_op(pred) && is_exc)
229 bl_info->flags |= BLOCK_HAS_EXC;
230 else if (is_irn_forking(pred))
231 bl_info->flags |= BLOCK_HAS_COND;
233 opcode = get_irn_opcode(pred);
234 if (is_exc && (opcode == iro_Load || opcode == iro_Store || opcode == iro_Call)) {
235 ldst_info = get_ldst_info(pred, &wenv->obst);
237 wenv->changes |= update_exc(ldst_info, node, i);
241 } /* collect_nodes */
244 * Returns an entity if the address ptr points to a constant one.
246 * @param ptr the address
248 * @return an entity or NULL
250 static ir_entity *find_constant_entity(ir_node *ptr)
253 if (is_SymConst(ptr) && get_SymConst_kind(ptr) == symconst_addr_ent) {
254 return get_SymConst_entity(ptr);
255 } else if (is_Sel(ptr)) {
256 ir_entity *ent = get_Sel_entity(ptr);
257 ir_type *tp = get_entity_owner(ent);
259 /* Do not fiddle with polymorphism. */
260 if (is_Class_type(get_entity_owner(ent)) &&
261 ((get_entity_n_overwrites(ent) != 0) ||
262 (get_entity_n_overwrittenby(ent) != 0) ) )
265 if (is_Array_type(tp)) {
269 for (i = 0, n = get_Sel_n_indexs(ptr); i < n; ++i) {
271 ir_tarval *tlower, *tupper;
272 ir_node *index = get_Sel_index(ptr, i);
273 ir_tarval *tv = computed_value(index);
275 /* check if the index is constant */
276 if (tv == tarval_bad)
279 bound = get_array_lower_bound(tp, i);
280 tlower = computed_value(bound);
281 bound = get_array_upper_bound(tp, i);
282 tupper = computed_value(bound);
284 if (tlower == tarval_bad || tupper == tarval_bad)
287 if (tarval_cmp(tv, tlower) == ir_relation_less)
289 if (tarval_cmp(tupper, tv) == ir_relation_less)
292 /* ok, bounds check finished */
296 if (get_entity_linkage(ent) & IR_LINKAGE_CONSTANT)
300 ptr = get_Sel_ptr(ptr);
301 } else if (is_Add(ptr)) {
302 ir_node *l = get_Add_left(ptr);
303 ir_node *r = get_Add_right(ptr);
305 if (get_irn_mode(l) == get_irn_mode(ptr) && is_Const(r))
307 else if (get_irn_mode(r) == get_irn_mode(ptr) && is_Const(l))
312 /* for now, we support only one addition, reassoc should fold all others */
313 if (! is_SymConst(ptr) && !is_Sel(ptr))
315 } else if (is_Sub(ptr)) {
316 ir_node *l = get_Sub_left(ptr);
317 ir_node *r = get_Sub_right(ptr);
319 if (get_irn_mode(l) == get_irn_mode(ptr) && is_Const(r))
323 /* for now, we support only one substraction, reassoc should fold all others */
324 if (! is_SymConst(ptr) && !is_Sel(ptr))
329 } /* find_constant_entity */
332 * Return the Selection index of a Sel node from dimension n
334 static long get_Sel_array_index_long(ir_node *n, int dim)
336 ir_node *index = get_Sel_index(n, dim);
337 assert(is_Const(index));
338 return get_tarval_long(get_Const_tarval(index));
339 } /* get_Sel_array_index_long */
342 * Returns the accessed component graph path for an
343 * node computing an address.
345 * @param ptr the node computing the address
346 * @param depth current depth in steps upward from the root
349 static compound_graph_path *rec_get_accessed_path(ir_node *ptr, size_t depth)
351 compound_graph_path *res = NULL;
352 ir_entity *root, *field, *ent;
353 size_t path_len, pos, idx;
357 if (is_SymConst(ptr)) {
358 /* a SymConst. If the depth is 0, this is an access to a global
359 * entity and we don't need a component path, else we know
360 * at least its length.
362 assert(get_SymConst_kind(ptr) == symconst_addr_ent);
363 root = get_SymConst_entity(ptr);
364 res = (depth == 0) ? NULL : new_compound_graph_path(get_entity_type(root), depth);
365 } else if (is_Sel(ptr)) {
366 /* it's a Sel, go up until we find the root */
367 res = rec_get_accessed_path(get_Sel_ptr(ptr), depth+1);
371 /* fill up the step in the path at the current position */
372 field = get_Sel_entity(ptr);
373 path_len = get_compound_graph_path_length(res);
374 pos = path_len - depth - 1;
375 set_compound_graph_path_node(res, pos, field);
377 if (is_Array_type(get_entity_owner(field))) {
378 assert(get_Sel_n_indexs(ptr) == 1 && "multi dim arrays not implemented");
379 set_compound_graph_path_array_index(res, pos, get_Sel_array_index_long(ptr, 0));
381 } else if (is_Add(ptr)) {
386 ir_node *l = get_Add_left(ptr);
387 ir_node *r = get_Add_right(ptr);
388 if (is_Const(r) && get_irn_mode(l) == get_irn_mode(ptr)) {
390 tv = get_Const_tarval(r);
393 tv = get_Const_tarval(l);
397 mode = get_tarval_mode(tv);
400 /* ptr must be a Sel or a SymConst, this was checked in find_constant_entity() */
402 field = get_Sel_entity(ptr);
404 field = get_SymConst_entity(ptr);
407 for (ent = field;;) {
409 ir_tarval *sz, *tv_index, *tlower, *tupper;
412 tp = get_entity_type(ent);
413 if (! is_Array_type(tp))
415 ent = get_array_element_entity(tp);
416 size = get_type_size_bytes(get_entity_type(ent));
417 sz = new_tarval_from_long(size, mode);
419 tv_index = tarval_div(tmp, sz);
420 tmp = tarval_mod(tmp, sz);
422 if (tv_index == tarval_bad || tmp == tarval_bad)
425 assert(get_array_n_dimensions(tp) == 1 && "multiarrays not implemented");
426 bound = get_array_lower_bound(tp, 0);
427 tlower = computed_value(bound);
428 bound = get_array_upper_bound(tp, 0);
429 tupper = computed_value(bound);
431 if (tlower == tarval_bad || tupper == tarval_bad)
434 if (tarval_cmp(tv_index, tlower) == ir_relation_less)
436 if (tarval_cmp(tupper, tv_index) == ir_relation_less)
439 /* ok, bounds check finished */
442 if (! tarval_is_null(tmp)) {
443 /* access to some struct/union member */
447 /* should be at least ONE array */
451 res = rec_get_accessed_path(ptr, depth + idx);
455 path_len = get_compound_graph_path_length(res);
456 pos = path_len - depth - idx;
458 for (ent = field;;) {
460 ir_tarval *sz, *tv_index;
463 tp = get_entity_type(ent);
464 if (! is_Array_type(tp))
466 ent = get_array_element_entity(tp);
467 set_compound_graph_path_node(res, pos, ent);
469 size = get_type_size_bytes(get_entity_type(ent));
470 sz = new_tarval_from_long(size, mode);
472 tv_index = tarval_div(tv, sz);
473 tv = tarval_mod(tv, sz);
475 /* worked above, should work again */
476 assert(tv_index != tarval_bad && tv != tarval_bad);
478 /* bounds already checked above */
479 index = get_tarval_long(tv_index);
480 set_compound_graph_path_array_index(res, pos, index);
483 } else if (is_Sub(ptr)) {
484 ir_node *l = get_Sub_left(ptr);
485 ir_node *r = get_Sub_right(ptr);
488 tv = get_Const_tarval(r);
493 } /* rec_get_accessed_path */
496 * Returns an access path or NULL. The access path is only
497 * valid, if the graph is in phase_high and _no_ address computation is used.
499 static compound_graph_path *get_accessed_path(ir_node *ptr)
501 compound_graph_path *gr = rec_get_accessed_path(ptr, 0);
503 } /* get_accessed_path */
505 typedef struct path_entry {
507 struct path_entry *next;
511 static ir_node *rec_find_compound_ent_value(ir_node *ptr, path_entry *next)
513 path_entry entry, *p;
514 ir_entity *ent, *field;
515 ir_initializer_t *initializer;
521 if (is_SymConst(ptr)) {
523 ent = get_SymConst_entity(ptr);
524 initializer = get_entity_initializer(ent);
525 for (p = next; p != NULL;) {
526 if (initializer->kind != IR_INITIALIZER_COMPOUND)
528 n = get_initializer_compound_n_entries(initializer);
529 tp = get_entity_type(ent);
531 if (is_Array_type(tp)) {
532 ent = get_array_element_entity(tp);
537 initializer = get_initializer_compound_value(initializer, 0);
543 initializer = get_initializer_compound_value(initializer, p->index);
548 tp = get_entity_type(ent);
549 while (is_Array_type(tp)) {
550 ent = get_array_element_entity(tp);
551 tp = get_entity_type(ent);
553 n = get_initializer_compound_n_entries(initializer);
556 initializer = get_initializer_compound_value(initializer, 0);
559 switch (initializer->kind) {
560 case IR_INITIALIZER_CONST:
561 return get_initializer_const_value(initializer);
562 case IR_INITIALIZER_TARVAL:
563 case IR_INITIALIZER_NULL:
567 } else if (is_Sel(ptr)) {
568 entry.ent = field = get_Sel_entity(ptr);
569 tp = get_entity_owner(field);
570 if (is_Array_type(tp)) {
571 assert(get_Sel_n_indexs(ptr) == 1 && "multi dim arrays not implemented");
572 entry.index = get_Sel_array_index_long(ptr, 0) - get_array_lower_bound_int(tp, 0);
574 size_t i, n_members = get_compound_n_members(tp);
575 for (i = 0; i < n_members; ++i) {
576 if (get_compound_member(tp, i) == field)
579 if (i >= n_members) {
580 /* not found: should NOT happen */
585 return rec_find_compound_ent_value(get_Sel_ptr(ptr), &entry);
586 } else if (is_Add(ptr)) {
591 ir_node *l = get_Add_left(ptr);
592 ir_node *r = get_Add_right(ptr);
595 tv = get_Const_tarval(r);
598 tv = get_Const_tarval(l);
602 mode = get_tarval_mode(tv);
604 /* ptr must be a Sel or a SymConst, this was checked in find_constant_entity() */
606 field = get_Sel_entity(ptr);
608 field = get_SymConst_entity(ptr);
611 /* count needed entries */
613 for (ent = field;;) {
614 tp = get_entity_type(ent);
615 if (! is_Array_type(tp))
617 ent = get_array_element_entity(tp);
620 /* should be at least ONE entry */
624 /* allocate the right number of entries */
625 NEW_ARR_A(path_entry, p, pos);
629 for (ent = field;;) {
631 ir_tarval *sz, *tv_index, *tlower, *tupper;
635 tp = get_entity_type(ent);
636 if (! is_Array_type(tp))
638 ent = get_array_element_entity(tp);
640 p[pos].next = &p[pos + 1];
642 size = get_type_size_bytes(get_entity_type(ent));
643 sz = new_tarval_from_long(size, mode);
645 tv_index = tarval_div(tv, sz);
646 tv = tarval_mod(tv, sz);
648 if (tv_index == tarval_bad || tv == tarval_bad)
651 assert(get_array_n_dimensions(tp) == 1 && "multiarrays not implemented");
652 bound = get_array_lower_bound(tp, 0);
653 tlower = computed_value(bound);
654 bound = get_array_upper_bound(tp, 0);
655 tupper = computed_value(bound);
657 if (tlower == tarval_bad || tupper == tarval_bad)
660 if (tarval_cmp(tv_index, tlower) == ir_relation_less)
662 if (tarval_cmp(tupper, tv_index) == ir_relation_less)
665 /* ok, bounds check finished */
666 index = get_tarval_long(tv_index);
667 p[pos].index = index;
670 if (! tarval_is_null(tv)) {
671 /* hmm, wrong access */
674 p[pos - 1].next = next;
675 return rec_find_compound_ent_value(ptr, p);
676 } else if (is_Sub(ptr)) {
677 ir_node *l = get_Sub_left(ptr);
678 ir_node *r = get_Sub_right(ptr);
681 tv = get_Const_tarval(r);
688 static ir_node *find_compound_ent_value(ir_node *ptr)
690 return rec_find_compound_ent_value(ptr, NULL);
694 static void reduce_adr_usage(ir_node *ptr);
697 * Update a Load that may have lost its users.
699 static void handle_load_update(ir_node *load)
701 ldst_info_t *info = (ldst_info_t*)get_irn_link(load);
703 /* do NOT touch volatile loads for now */
704 if (get_Load_volatility(load) == volatility_is_volatile)
707 if (! info->projs[pn_Load_res] && ! info->projs[pn_Load_X_except]) {
708 ir_node *ptr = get_Load_ptr(load);
709 ir_node *mem = get_Load_mem(load);
711 /* a Load whose value is neither used nor exception checked, remove it */
712 exchange(info->projs[pn_Load_M], mem);
713 if (info->projs[pn_Load_X_regular])
714 exchange(info->projs[pn_Load_X_regular], new_r_Jmp(get_nodes_block(load)));
716 reduce_adr_usage(ptr);
718 } /* handle_load_update */
721 * A use of an address node has vanished. Check if this was a Proj
722 * node and update the counters.
724 static void reduce_adr_usage(ir_node *ptr)
729 if (get_irn_n_edges(ptr) > 0)
732 /* this Proj is dead now */
733 pred = get_Proj_pred(ptr);
735 ldst_info_t *info = (ldst_info_t*)get_irn_link(pred);
736 info->projs[get_Proj_proj(ptr)] = NULL;
738 /* this node lost its result proj, handle that */
739 handle_load_update(pred);
741 } /* reduce_adr_usage */
744 * Check, if an already existing value of mode old_mode can be converted
745 * into the needed one new_mode without loss.
747 static int can_use_stored_value(ir_mode *old_mode, ir_mode *new_mode)
751 if (old_mode == new_mode)
754 old_size = get_mode_size_bits(old_mode);
755 new_size = get_mode_size_bits(new_mode);
757 /* if both modes are two-complement ones, we can always convert the
758 Stored value into the needed one. (on big endian machines we currently
759 only support this for modes of same size) */
760 if (old_size >= new_size &&
761 get_mode_arithmetic(old_mode) == irma_twos_complement &&
762 get_mode_arithmetic(new_mode) == irma_twos_complement &&
763 (!be_get_backend_param()->byte_order_big_endian
764 || old_size == new_size)) {
771 * Check whether a Call is at least pure, i.e. does only read memory.
773 static unsigned is_Call_pure(ir_node *call)
775 ir_type *call_tp = get_Call_type(call);
776 unsigned prop = get_method_additional_properties(call_tp);
778 /* check first the call type */
779 if ((prop & (mtp_property_const|mtp_property_pure)) == 0) {
780 /* try the called entity */
781 ir_node *ptr = get_Call_ptr(call);
783 if (is_SymConst_addr_ent(ptr)) {
784 ir_entity *ent = get_SymConst_entity(ptr);
786 prop = get_entity_additional_properties(ent);
789 return (prop & (mtp_property_const|mtp_property_pure)) != 0;
792 static ir_node *get_base_and_offset(ir_node *ptr, long *pOffset)
794 ir_mode *mode = get_irn_mode(ptr);
797 /* TODO: long might not be enough, we should probably use some tarval thingy... */
800 ir_node *l = get_Add_left(ptr);
801 ir_node *r = get_Add_right(ptr);
803 if (get_irn_mode(l) != mode || !is_Const(r))
806 offset += get_tarval_long(get_Const_tarval(r));
808 } else if (is_Sub(ptr)) {
809 ir_node *l = get_Sub_left(ptr);
810 ir_node *r = get_Sub_right(ptr);
812 if (get_irn_mode(l) != mode || !is_Const(r))
815 offset -= get_tarval_long(get_Const_tarval(r));
817 } else if (is_Sel(ptr)) {
818 ir_entity *ent = get_Sel_entity(ptr);
819 ir_type *tp = get_entity_owner(ent);
821 if (is_Array_type(tp)) {
825 /* only one dimensional arrays yet */
826 if (get_Sel_n_indexs(ptr) != 1)
828 index = get_Sel_index(ptr, 0);
829 if (! is_Const(index))
832 tp = get_entity_type(ent);
833 if (get_type_state(tp) != layout_fixed)
836 size = get_type_size_bytes(tp);
837 offset += size * get_tarval_long(get_Const_tarval(index));
839 if (get_type_state(tp) != layout_fixed)
841 offset += get_entity_offset(ent);
843 ptr = get_Sel_ptr(ptr);
852 static int try_load_after_store(ir_node *load,
853 ir_node *load_base_ptr, long load_offset, ir_node *store)
856 ir_node *store_ptr = get_Store_ptr(store);
858 ir_node *store_base_ptr = get_base_and_offset(store_ptr, &store_offset);
859 ir_node *store_value;
868 if (load_base_ptr != store_base_ptr)
871 load_mode = get_Load_mode(load);
872 load_mode_len = get_mode_size_bytes(load_mode);
873 store_mode = get_irn_mode(get_Store_value(store));
874 store_mode_len = get_mode_size_bytes(store_mode);
875 delta = load_offset - store_offset;
876 store_value = get_Store_value(store);
878 if (delta != 0 || store_mode != load_mode) {
879 /* TODO: implement for big-endian */
880 if (delta < 0 || delta + load_mode_len > store_mode_len
881 || (be_get_backend_param()->byte_order_big_endian
882 && load_mode_len != store_mode_len))
885 if (get_mode_arithmetic(store_mode) != irma_twos_complement ||
886 get_mode_arithmetic(load_mode) != irma_twos_complement)
890 /* produce a shift to adjust offset delta */
893 ir_graph *irg = get_irn_irg(load);
895 cnst = new_r_Const_long(irg, mode_Iu, delta * 8);
896 store_value = new_r_Shr(get_nodes_block(load),
897 store_value, cnst, store_mode);
900 /* add an convert if needed */
901 if (store_mode != load_mode) {
902 store_value = new_r_Conv(get_nodes_block(load), store_value, load_mode);
906 DBG_OPT_RAW(load, store_value);
908 info = (ldst_info_t*)get_irn_link(load);
909 if (info->projs[pn_Load_M])
910 exchange(info->projs[pn_Load_M], get_Load_mem(load));
914 if (info->projs[pn_Load_X_except]) {
915 ir_graph *irg = get_irn_irg(load);
916 exchange( info->projs[pn_Load_X_except], new_r_Bad(irg, mode_X));
919 if (info->projs[pn_Load_X_regular]) {
920 exchange( info->projs[pn_Load_X_regular], new_r_Jmp(get_nodes_block(load)));
924 if (info->projs[pn_Load_res])
925 exchange(info->projs[pn_Load_res], store_value);
927 load_ptr = get_Load_ptr(load);
929 reduce_adr_usage(load_ptr);
930 return res | DF_CHANGED;
934 * Follow the memory chain as long as there are only Loads,
935 * alias free Stores, and constant Calls and try to replace the
936 * current Load by a previous ones.
937 * Note that in unreachable loops it might happen that we reach
938 * load again, as well as we can fall into a cycle.
939 * We break such cycles using a special visited flag.
941 * INC_MASTER() must be called before dive into
943 static unsigned follow_Mem_chain(ir_node *load, ir_node *curr)
946 ldst_info_t *info = (ldst_info_t*)get_irn_link(load);
948 ir_node *ptr = get_Load_ptr(load);
949 ir_node *mem = get_Load_mem(load);
950 ir_mode *load_mode = get_Load_mode(load);
952 for (pred = curr; load != pred; ) {
953 ldst_info_t *pred_info = (ldst_info_t*)get_irn_link(pred);
956 * a Load immediately after a Store -- a read after write.
957 * We may remove the Load, if both Load & Store does not have an
958 * exception handler OR they are in the same Block. In the latter
959 * case the Load cannot throw an exception when the previous Store was
962 * Why we need to check for Store Exception? If the Store cannot
963 * be executed (ROM) the exception handler might simply jump into
965 * We could make it a little bit better if we would know that the
966 * exception handler of the Store jumps directly to the end...
968 if (is_Store(pred) && ((pred_info->projs[pn_Store_X_except] == NULL
969 && info->projs[pn_Load_X_except] == NULL)
970 || get_nodes_block(load) == get_nodes_block(pred)))
973 ir_node *base_ptr = get_base_and_offset(ptr, &load_offset);
974 int changes = try_load_after_store(load, base_ptr, load_offset, pred);
977 return res | changes;
978 } else if (is_Load(pred) && get_Load_ptr(pred) == ptr &&
979 can_use_stored_value(get_Load_mode(pred), load_mode)) {
981 * a Load after a Load -- a read after read.
982 * We may remove the second Load, if it does not have an exception
983 * handler OR they are in the same Block. In the later case
984 * the Load cannot throw an exception when the previous Load was
987 * Here, there is no need to check if the previous Load has an
988 * exception hander because they would have exact the same
991 * TODO: implement load-after-load with different mode for big
994 if (info->projs[pn_Load_X_except] == NULL
995 || get_nodes_block(load) == get_nodes_block(pred)) {
998 DBG_OPT_RAR(load, pred);
1000 /* the result is used */
1001 if (info->projs[pn_Load_res]) {
1002 if (pred_info->projs[pn_Load_res] == NULL) {
1003 /* create a new Proj again */
1004 pred_info->projs[pn_Load_res] = new_r_Proj(pred, get_Load_mode(pred), pn_Load_res);
1006 value = pred_info->projs[pn_Load_res];
1008 /* add an convert if needed */
1009 if (get_Load_mode(pred) != load_mode) {
1010 value = new_r_Conv(get_nodes_block(load), value, load_mode);
1013 exchange(info->projs[pn_Load_res], value);
1016 if (info->projs[pn_Load_M])
1017 exchange(info->projs[pn_Load_M], mem);
1020 if (info->projs[pn_Load_X_except]) {
1021 ir_graph *irg = get_irn_irg(load);
1022 exchange(info->projs[pn_Load_X_except], new_r_Bad(irg, mode_X));
1025 if (info->projs[pn_Load_X_regular]) {
1026 exchange( info->projs[pn_Load_X_regular], new_r_Jmp(get_nodes_block(load)));
1031 reduce_adr_usage(ptr);
1032 return res |= DF_CHANGED;
1036 if (is_Store(pred)) {
1037 /* check if we can pass through this store */
1038 ir_alias_relation rel = get_alias_relation(
1039 get_Store_ptr(pred),
1040 get_irn_mode(get_Store_value(pred)),
1042 /* if the might be an alias, we cannot pass this Store */
1043 if (rel != ir_no_alias)
1045 pred = skip_Proj(get_Store_mem(pred));
1046 } else if (is_Load(pred)) {
1047 pred = skip_Proj(get_Load_mem(pred));
1048 } else if (is_Call(pred)) {
1049 if (is_Call_pure(pred)) {
1050 /* The called graph is at least pure, so there are no Store's
1051 in it. We can handle it like a Load and skip it. */
1052 pred = skip_Proj(get_Call_mem(pred));
1054 /* there might be Store's in the graph, stop here */
1058 /* follow only Load chains */
1062 /* check for cycles */
1063 if (NODE_VISITED(pred_info))
1065 MARK_NODE(pred_info);
1068 if (is_Sync(pred)) {
1071 /* handle all Sync predecessors */
1072 for (i = get_Sync_n_preds(pred) - 1; i >= 0; --i) {
1073 res |= follow_Mem_chain(load, skip_Proj(get_Sync_pred(pred, i)));
1080 } /* follow_Mem_chain */
1083 * Check if we can replace the load by a given const from
1084 * the const code irg.
1086 ir_node *can_replace_load_by_const(const ir_node *load, ir_node *c)
1088 ir_mode *c_mode = get_irn_mode(c);
1089 ir_mode *l_mode = get_Load_mode(load);
1090 ir_node *block = get_nodes_block(load);
1091 dbg_info *dbgi = get_irn_dbg_info(load);
1092 ir_node *res = copy_const_value(dbgi, c, block);
1094 if (c_mode != l_mode) {
1095 /* check, if the mode matches OR can be easily converted info */
1096 if (is_reinterpret_cast(c_mode, l_mode)) {
1097 /* copy the value from the const code irg and cast it */
1098 res = new_rd_Conv(dbgi, block, res, l_mode);
1109 * @param load the Load node
1111 static unsigned optimize_load(ir_node *load)
1113 ldst_info_t *info = (ldst_info_t*)get_irn_link(load);
1114 ir_node *mem, *ptr, *value;
1119 /* do NOT touch volatile loads for now */
1120 if (get_Load_volatility(load) == volatility_is_volatile)
1123 /* the address of the load to be optimized */
1124 ptr = get_Load_ptr(load);
1126 /* The mem of the Load. Must still be returned after optimization. */
1127 mem = get_Load_mem(load);
1129 if (info->projs[pn_Load_res] == NULL
1130 && info->projs[pn_Load_X_except] == NULL) {
1131 /* the value is never used and we don't care about exceptions, remove */
1132 exchange(info->projs[pn_Load_M], mem);
1134 if (info->projs[pn_Load_X_regular]) {
1135 /* should not happen, but if it does, remove it */
1136 exchange(info->projs[pn_Load_X_regular], new_r_Jmp(get_nodes_block(load)));
1140 reduce_adr_usage(ptr);
1141 return res | DF_CHANGED;
1145 /* check if we can determine the entity that will be loaded */
1146 ent = find_constant_entity(ptr);
1148 && get_entity_visibility(ent) != ir_visibility_external) {
1149 /* a static allocation that is not external: there should be NO
1150 * exception when loading even if we cannot replace the load itself.
1153 /* no exception, clear the info field as it might be checked later again */
1154 if (info->projs[pn_Load_X_except]) {
1155 ir_graph *irg = get_irn_irg(load);
1156 exchange(info->projs[pn_Load_X_except], new_r_Bad(irg, mode_X));
1157 info->projs[pn_Load_X_except] = NULL;
1160 if (info->projs[pn_Load_X_regular]) {
1161 exchange(info->projs[pn_Load_X_regular], new_r_Jmp(get_nodes_block(load)));
1162 info->projs[pn_Load_X_regular] = NULL;
1166 if (get_entity_linkage(ent) & IR_LINKAGE_CONSTANT) {
1167 if (has_entity_initializer(ent)) {
1168 /* new style initializer */
1169 value = find_compound_ent_value(ptr);
1170 } else if (entity_has_compound_ent_values(ent)) {
1171 /* old style initializer */
1172 compound_graph_path *path = get_accessed_path(ptr);
1175 assert(is_proper_compound_graph_path(path, get_compound_graph_path_length(path)-1));
1177 value = get_compound_ent_value_by_path(ent, path);
1178 DB((dbg, LEVEL_1, " Constant access at %F%F resulted in %+F\n", ent, path, value));
1179 free_compound_graph_path(path);
1182 if (value != NULL) {
1183 ir_graph *irg = get_irn_irg(load);
1184 value = can_replace_load_by_const(load, value);
1185 if (value != NULL && is_Sel(ptr) &&
1186 !is_irg_state(irg, IR_GRAPH_STATE_IMPLICIT_BITFIELD_MASKING)) {
1187 /* frontend has inserted masking operations after bitfield accesses,
1188 * so we might have to shift the const. */
1189 unsigned char bit_offset = get_entity_offset_bits_remainder(get_Sel_entity(ptr));
1190 ir_tarval *tv_old = get_Const_tarval(value);
1191 ir_tarval *tv_offset = new_tarval_from_long(bit_offset, mode_Bu);
1192 ir_tarval *tv_new = tarval_shl(tv_old, tv_offset);
1193 value = new_r_Const(irg, tv_new);
1198 if (value != NULL) {
1199 /* we completely replace the load by this value */
1200 if (info->projs[pn_Load_X_except]) {
1201 ir_graph *irg = get_irn_irg(load);
1202 exchange(info->projs[pn_Load_X_except], new_r_Bad(irg, mode_X));
1203 info->projs[pn_Load_X_except] = NULL;
1206 if (info->projs[pn_Load_X_regular]) {
1207 exchange(info->projs[pn_Load_X_regular], new_r_Jmp(get_nodes_block(load)));
1208 info->projs[pn_Load_X_regular] = NULL;
1211 if (info->projs[pn_Load_M]) {
1212 exchange(info->projs[pn_Load_M], mem);
1215 if (info->projs[pn_Load_res]) {
1216 exchange(info->projs[pn_Load_res], value);
1220 reduce_adr_usage(ptr);
1224 /* Check, if the address of this load is used more than once.
1225 * If not, more load cannot be removed in any case. */
1226 if (get_irn_n_uses(ptr) <= 1 && get_irn_n_uses(get_base_and_offset(ptr, &dummy)) <= 1)
1230 * follow the memory chain as long as there are only Loads
1231 * and try to replace current Load or Store by a previous one.
1232 * Note that in unreachable loops it might happen that we reach
1233 * load again, as well as we can fall into a cycle.
1234 * We break such cycles using a special visited flag.
1237 res = follow_Mem_chain(load, skip_Proj(mem));
1239 } /* optimize_load */
1242 * Check whether a value of mode new_mode would completely overwrite a value
1243 * of mode old_mode in memory.
1245 static int is_completely_overwritten(ir_mode *old_mode, ir_mode *new_mode)
1247 return get_mode_size_bits(new_mode) >= get_mode_size_bits(old_mode);
1248 } /* is_completely_overwritten */
1251 * Check whether small is a part of large (starting at same address).
1253 static int is_partially_same(ir_node *small, ir_node *large)
1255 ir_mode *sm = get_irn_mode(small);
1256 ir_mode *lm = get_irn_mode(large);
1258 /* FIXME: Check endianness */
1259 return is_Conv(small) && get_Conv_op(small) == large
1260 && get_mode_size_bytes(sm) < get_mode_size_bytes(lm)
1261 && get_mode_arithmetic(sm) == irma_twos_complement
1262 && get_mode_arithmetic(lm) == irma_twos_complement;
1263 } /* is_partially_same */
1266 * follow the memory chain as long as there are only Loads and alias free Stores.
1268 * INC_MASTER() must be called before dive into
1270 static unsigned follow_Mem_chain_for_Store(ir_node *store, ir_node *curr)
1273 ldst_info_t *info = (ldst_info_t*)get_irn_link(store);
1275 ir_node *ptr = get_Store_ptr(store);
1276 ir_node *mem = get_Store_mem(store);
1277 ir_node *value = get_Store_value(store);
1278 ir_mode *mode = get_irn_mode(value);
1279 ir_node *block = get_nodes_block(store);
1281 for (pred = curr; pred != store;) {
1282 ldst_info_t *pred_info = (ldst_info_t*)get_irn_link(pred);
1285 * BEWARE: one might think that checking the modes is useless, because
1286 * if the pointers are identical, they refer to the same object.
1287 * This is only true in strong typed languages, not is C were the following
1288 * is possible *(ir_type1 *)p = a; *(ir_type2 *)p = b ...
1289 * However, if the size of the mode that is written is bigger or equal the
1290 * size of the old one, the old value is completely overwritten and can be
1293 if (is_Store(pred) && get_Store_ptr(pred) == ptr &&
1294 get_nodes_block(pred) == block) {
1296 * a Store after a Store in the same Block -- a write after write.
1300 * We may remove the first Store, if the old value is completely
1301 * overwritten or the old value is a part of the new value,
1302 * and if it does not have an exception handler.
1304 * TODO: What, if both have the same exception handler ???
1306 if (get_Store_volatility(pred) != volatility_is_volatile
1307 && !pred_info->projs[pn_Store_X_except]) {
1308 ir_node *predvalue = get_Store_value(pred);
1309 ir_mode *predmode = get_irn_mode(predvalue);
1311 if (is_completely_overwritten(predmode, mode)
1312 || is_partially_same(predvalue, value)) {
1313 DBG_OPT_WAW(pred, store);
1314 exchange(pred_info->projs[pn_Store_M], get_Store_mem(pred));
1316 reduce_adr_usage(ptr);
1322 * We may remove the Store, if the old value already contains
1323 * the new value, and if it does not have an exception handler.
1325 * TODO: What, if both have the same exception handler ???
1327 if (get_Store_volatility(store) != volatility_is_volatile
1328 && !info->projs[pn_Store_X_except]) {
1329 ir_node *predvalue = get_Store_value(pred);
1331 if (is_partially_same(value, predvalue)) {
1332 DBG_OPT_WAW(pred, store);
1333 exchange(info->projs[pn_Store_M], mem);
1335 reduce_adr_usage(ptr);
1339 } else if (is_Load(pred) && get_Load_ptr(pred) == ptr &&
1340 value == pred_info->projs[pn_Load_res]) {
1342 * a Store of a value just loaded from the same address
1343 * -- a write after read.
1344 * We may remove the Store, if it does not have an exception
1347 if (! info->projs[pn_Store_X_except]) {
1348 DBG_OPT_WAR(store, pred);
1349 exchange(info->projs[pn_Store_M], mem);
1351 reduce_adr_usage(ptr);
1356 if (is_Store(pred)) {
1357 /* check if we can pass through this store */
1358 ir_alias_relation rel = get_alias_relation(
1359 get_Store_ptr(pred),
1360 get_irn_mode(get_Store_value(pred)),
1362 /* if the might be an alias, we cannot pass this Store */
1363 if (rel != ir_no_alias)
1365 pred = skip_Proj(get_Store_mem(pred));
1366 } else if (is_Load(pred)) {
1367 ir_alias_relation rel = get_alias_relation(
1368 get_Load_ptr(pred), get_Load_mode(pred),
1370 if (rel != ir_no_alias)
1373 pred = skip_Proj(get_Load_mem(pred));
1375 /* follow only Load chains */
1379 /* check for cycles */
1380 if (NODE_VISITED(pred_info))
1382 MARK_NODE(pred_info);
1385 if (is_Sync(pred)) {
1388 /* handle all Sync predecessors */
1389 for (i = get_Sync_n_preds(pred) - 1; i >= 0; --i) {
1390 res |= follow_Mem_chain_for_Store(store, skip_Proj(get_Sync_pred(pred, i)));
1396 } /* follow_Mem_chain_for_Store */
1398 /** find entity used as base for an address calculation */
1399 static ir_entity *find_entity(ir_node *ptr)
1401 switch (get_irn_opcode(ptr)) {
1403 return get_SymConst_entity(ptr);
1405 ir_node *pred = get_Sel_ptr(ptr);
1406 if (get_irg_frame(get_irn_irg(ptr)) == pred)
1407 return get_Sel_entity(ptr);
1409 return find_entity(pred);
1413 ir_node *left = get_binop_left(ptr);
1415 if (mode_is_reference(get_irn_mode(left)))
1416 return find_entity(left);
1417 right = get_binop_right(ptr);
1418 if (mode_is_reference(get_irn_mode(right)))
1419 return find_entity(right);
1430 * @param store the Store node
1432 static unsigned optimize_store(ir_node *store)
1438 if (get_Store_volatility(store) == volatility_is_volatile)
1441 ptr = get_Store_ptr(store);
1442 entity = find_entity(ptr);
1444 /* a store to an entity which is never read is unnecessary */
1445 if (entity != NULL && !(get_entity_usage(entity) & ir_usage_read)) {
1446 ldst_info_t *info = (ldst_info_t*)get_irn_link(store);
1447 if (info->projs[pn_Store_X_except] == NULL) {
1448 DB((dbg, LEVEL_1, " Killing useless %+F to never read entity %+F\n", store, entity));
1449 exchange(info->projs[pn_Store_M], get_Store_mem(store));
1451 reduce_adr_usage(ptr);
1456 /* Check, if the address of this Store is used more than once.
1457 * If not, this Store cannot be removed in any case. */
1458 if (get_irn_n_uses(ptr) <= 1)
1461 mem = get_Store_mem(store);
1463 /* follow the memory chain as long as there are only Loads */
1466 return follow_Mem_chain_for_Store(store, skip_Proj(mem));
1467 } /* optimize_store */
1470 * walker, optimizes Phi after Stores to identical places:
1471 * Does the following optimization:
1474 * val1 val2 val3 val1 val2 val3
1476 * Store Store Store \ | /
1483 * This reduces the number of stores and allows for predicated execution.
1484 * Moves Stores back to the end of a function which may be bad.
1486 * This is only possible if the predecessor blocks have only one successor.
1488 static unsigned optimize_phi(ir_node *phi, walk_env_t *wenv)
1491 ir_node *store, *ptr, *block, *phi_block, *phiM, *phiD, *exc, *projM;
1496 ir_node **inM, **inD, **projMs;
1498 dbg_info *db = NULL;
1500 block_info_t *bl_info;
1503 /* Must be a memory Phi */
1504 if (get_irn_mode(phi) != mode_M)
1507 n = get_Phi_n_preds(phi);
1511 /* must be only one user */
1512 projM = get_Phi_pred(phi, 0);
1513 if (get_irn_n_edges(projM) != 1)
1516 store = skip_Proj(projM);
1520 if (!is_Store(store))
1523 block = get_nodes_block(store);
1525 /* check if the block is post dominated by Phi-block
1526 and has no exception exit */
1527 bl_info = (block_info_t*)get_irn_link(block);
1528 if (bl_info->flags & BLOCK_HAS_EXC)
1531 phi_block = get_nodes_block(phi);
1532 if (! block_strictly_postdominates(phi_block, block))
1535 /* this is the address of the store */
1536 ptr = get_Store_ptr(store);
1537 mode = get_irn_mode(get_Store_value(store));
1538 info = (ldst_info_t*)get_irn_link(store);
1539 exc = info->exc_block;
1541 for (i = 1; i < n; ++i) {
1542 ir_node *pred = get_Phi_pred(phi, i);
1544 if (get_irn_n_edges(pred) != 1)
1547 pred = skip_Proj(pred);
1548 if (!is_Store(pred))
1551 if (ptr != get_Store_ptr(pred) || mode != get_irn_mode(get_Store_value(pred)))
1554 info = (ldst_info_t*)get_irn_link(pred);
1556 /* check, if all stores have the same exception flow */
1557 if (exc != info->exc_block)
1560 block = get_nodes_block(pred);
1562 /* check if the block is post dominated by Phi-block
1563 and has no exception exit. Note that block must be different from
1564 Phi-block, else we would move a Store from end End of a block to its
1566 bl_info = (block_info_t*)get_irn_link(block);
1567 if (bl_info->flags & BLOCK_HAS_EXC)
1569 if (block == phi_block || ! block_postdominates(phi_block, block))
1574 * ok, when we are here, we found all predecessors of a Phi that
1575 * are Stores to the same address and size. That means whatever
1576 * we do before we enter the block of the Phi, we do a Store.
1577 * So, we can move the Store to the current block:
1579 * val1 val2 val3 val1 val2 val3
1581 * | Str | | Str | | Str | \ | /
1587 * Is only allowed if the predecessor blocks have only one successor.
1590 NEW_ARR_A(ir_node *, projMs, n);
1591 NEW_ARR_A(ir_node *, inM, n);
1592 NEW_ARR_A(ir_node *, inD, n);
1593 NEW_ARR_A(int, idx, n);
1595 /* Prepare: Collect all Store nodes. We must do this
1596 first because we otherwise may loose a store when exchanging its
1599 for (i = n - 1; i >= 0; --i) {
1602 projMs[i] = get_Phi_pred(phi, i);
1603 assert(is_Proj(projMs[i]));
1605 store = get_Proj_pred(projMs[i]);
1606 info = (ldst_info_t*)get_irn_link(store);
1608 inM[i] = get_Store_mem(store);
1609 inD[i] = get_Store_value(store);
1610 idx[i] = info->exc_idx;
1612 block = get_nodes_block(phi);
1614 /* second step: create a new memory Phi */
1615 phiM = new_rd_Phi(get_irn_dbg_info(phi), block, n, inM, mode_M);
1617 /* third step: create a new data Phi */
1618 phiD = new_rd_Phi(get_irn_dbg_info(phi), block, n, inD, mode);
1620 /* rewire memory and kill the node */
1621 for (i = n - 1; i >= 0; --i) {
1622 ir_node *proj = projMs[i];
1624 if (is_Proj(proj)) {
1625 ir_node *store = get_Proj_pred(proj);
1626 exchange(proj, inM[i]);
1631 /* fourth step: create the Store */
1632 store = new_rd_Store(db, block, phiM, ptr, phiD, cons_none);
1634 co_set_irn_name(store, co_get_irn_ident(old_store));
1637 projM = new_rd_Proj(NULL, store, mode_M, pn_Store_M);
1639 info = get_ldst_info(store, &wenv->obst);
1640 info->projs[pn_Store_M] = projM;
1642 /* fifths step: repair exception flow */
1644 ir_node *projX = new_rd_Proj(NULL, store, mode_X, pn_Store_X_except);
1646 info->projs[pn_Store_X_except] = projX;
1647 info->exc_block = exc;
1648 info->exc_idx = idx[0];
1650 for (i = 0; i < n; ++i) {
1651 set_Block_cfgpred(exc, idx[i], projX);
1655 /* the exception block should be optimized as some inputs are identical now */
1661 /* sixth step: replace old Phi */
1662 exchange(phi, projM);
1664 return res | DF_CHANGED;
1665 } /* optimize_phi */
1668 * walker, do the optimizations
1670 static void do_load_store_optimize(ir_node *n, void *env)
1672 walk_env_t *wenv = (walk_env_t*)env;
1674 switch (get_irn_opcode(n)) {
1677 wenv->changes |= optimize_load(n);
1681 wenv->changes |= optimize_store(n);
1685 wenv->changes |= optimize_phi(n, wenv);
1691 } /* do_load_store_optimize */
1694 typedef struct scc {
1695 ir_node *head; /**< the head of the list */
1698 /** A node entry. */
1699 typedef struct node_entry {
1700 unsigned DFSnum; /**< the DFS number of this node */
1701 unsigned low; /**< the low number of this node */
1702 int in_stack; /**< flag, set if the node is on the stack */
1703 ir_node *next; /**< link to the next node the the same scc */
1704 scc *pscc; /**< the scc of this node */
1705 unsigned POnum; /**< the post order number for blocks */
1708 /** A loop entry. */
1709 typedef struct loop_env {
1710 ir_nodehashmap_t map;
1711 struct obstack obst;
1712 ir_node **stack; /**< the node stack */
1713 size_t tos; /**< tos index */
1714 unsigned nextDFSnum; /**< the current DFS number */
1715 unsigned POnum; /**< current post order number */
1717 unsigned changes; /**< a bitmask of graph changes */
1721 * Gets the node_entry of a node
1723 static node_entry *get_irn_ne(ir_node *irn, loop_env *env)
1725 node_entry *e = (node_entry*)ir_nodehashmap_get(&env->map, irn);
1728 e = OALLOC(&env->obst, node_entry);
1729 memset(e, 0, sizeof(*e));
1730 ir_nodehashmap_insert(&env->map, irn, e);
1736 * Push a node onto the stack.
1738 * @param env the loop environment
1739 * @param n the node to push
1741 static void push(loop_env *env, ir_node *n)
1745 if (env->tos == ARR_LEN(env->stack)) {
1746 size_t nlen = ARR_LEN(env->stack) * 2;
1747 ARR_RESIZE(ir_node *, env->stack, nlen);
1749 env->stack[env->tos++] = n;
1750 e = get_irn_ne(n, env);
1755 * pop a node from the stack
1757 * @param env the loop environment
1759 * @return The topmost node
1761 static ir_node *pop(loop_env *env)
1763 ir_node *n = env->stack[--env->tos];
1764 node_entry *e = get_irn_ne(n, env);
1771 * Check if irn is a region constant.
1772 * The block or irn must strictly dominate the header block.
1774 * @param irn the node to check
1775 * @param header_block the header block of the induction variable
1777 static int is_rc(ir_node *irn, ir_node *header_block)
1779 ir_node *block = get_nodes_block(irn);
1781 return (block != header_block) && block_dominates(block, header_block);
1784 typedef struct phi_entry phi_entry;
1786 ir_node *phi; /**< A phi with a region const memory. */
1787 int pos; /**< The position of the region const memory */
1788 ir_node *load; /**< the newly created load for this phi */
1793 * An entry in the avail set.
1795 typedef struct avail_entry_t {
1796 ir_node *ptr; /**< the address pointer */
1797 ir_mode *mode; /**< the load mode */
1798 ir_node *load; /**< the associated Load */
1802 * Compare two avail entries.
1804 static int cmp_avail_entry(const void *elt, const void *key, size_t size)
1806 const avail_entry_t *a = (const avail_entry_t*)elt;
1807 const avail_entry_t *b = (const avail_entry_t*)key;
1810 return a->ptr != b->ptr || a->mode != b->mode;
1811 } /* cmp_avail_entry */
1814 * Calculate the hash value of an avail entry.
1816 static unsigned hash_cache_entry(const avail_entry_t *entry)
1818 return get_irn_idx(entry->ptr) * 9 + HASH_PTR(entry->mode);
1819 } /* hash_cache_entry */
1822 * Move loops out of loops if possible.
1824 * @param pscc the loop described by an SCC
1825 * @param env the loop environment
1827 static void move_loads_out_of_loops(scc *pscc, loop_env *env)
1829 ir_node *phi, *load, *next, *other, *next_other;
1831 phi_entry *phi_list = NULL;
1834 avail = new_set(cmp_avail_entry, 8);
1836 /* collect all outer memories */
1837 for (phi = pscc->head; phi != NULL; phi = next) {
1838 node_entry *ne = get_irn_ne(phi, env);
1841 /* check all memory Phi's */
1845 assert(get_irn_mode(phi) == mode_M && "DFS return non-memory Phi");
1847 for (j = get_irn_arity(phi) - 1; j >= 0; --j) {
1848 ir_node *pred = get_irn_n(phi, j);
1849 node_entry *pe = get_irn_ne(pred, env);
1851 if (pe->pscc != ne->pscc) {
1852 /* not in the same SCC, is region const */
1853 phi_entry *pe = OALLOC(&env->obst, phi_entry);
1857 pe->next = phi_list;
1862 /* no Phis no fun */
1863 assert(phi_list != NULL && "DFS found a loop without Phi");
1865 /* for now, we cannot handle more than one input (only reducible cf) */
1866 if (phi_list->next != NULL)
1869 for (load = pscc->head; load; load = next) {
1871 node_entry *ne = get_irn_ne(load, env);
1874 if (is_Load(load)) {
1875 ldst_info_t *info = (ldst_info_t*)get_irn_link(load);
1876 ir_node *ptr = get_Load_ptr(load);
1878 /* for now, we cannot handle Loads with exceptions */
1879 if (info->projs[pn_Load_res] == NULL || info->projs[pn_Load_X_regular] != NULL || info->projs[pn_Load_X_except] != NULL)
1882 /* for now, we can only move Load(Global) */
1883 if (! is_SymConst_addr_ent(ptr))
1885 load_mode = get_Load_mode(load);
1886 for (other = pscc->head; other != NULL; other = next_other) {
1887 node_entry *ne = get_irn_ne(other, env);
1888 next_other = ne->next;
1890 if (is_Store(other)) {
1891 ir_alias_relation rel = get_alias_relation(
1892 get_Store_ptr(other),
1893 get_irn_mode(get_Store_value(other)),
1895 /* if the might be an alias, we cannot pass this Store */
1896 if (rel != ir_no_alias)
1899 /* only Phis and pure Calls are allowed here, so ignore them */
1901 if (other == NULL) {
1902 ldst_info_t *ninfo = NULL;
1906 /* yep, no aliasing Store found, Load can be moved */
1907 DB((dbg, LEVEL_1, " Found a Load that could be moved: %+F\n", load));
1909 db = get_irn_dbg_info(load);
1910 for (pe = phi_list; pe != NULL; pe = pe->next) {
1912 ir_node *phi = pe->phi;
1913 ir_node *blk = get_nodes_block(phi);
1914 ir_node *pred = get_Block_cfgpred_block(blk, pos);
1916 avail_entry_t entry, *res;
1919 entry.mode = load_mode;
1920 res = (avail_entry_t*)set_find(avail, &entry, sizeof(entry), hash_cache_entry(&entry));
1924 irn = new_rd_Load(db, pred, get_Phi_pred(phi, pos), ptr, load_mode, cons_none);
1926 set_insert(avail, &entry, sizeof(entry), hash_cache_entry(&entry));
1927 DB((dbg, LEVEL_1, " Created %+F in %+F\n", irn, pred));
1930 ninfo = get_ldst_info(irn, &env->obst);
1932 ninfo->projs[pn_Load_M] = mem = new_r_Proj(irn, mode_M, pn_Load_M);
1934 /* irn is from cache, so do not set phi pred again.
1935 * There might be other Loads between phi and irn already.
1937 set_Phi_pred(phi, pos, mem);
1940 ninfo->projs[pn_Load_res] = new_r_Proj(irn, load_mode, pn_Load_res);
1943 /* now kill the old Load */
1944 exchange(info->projs[pn_Load_M], get_Load_mem(load));
1945 exchange(info->projs[pn_Load_res], ninfo->projs[pn_Load_res]);
1947 env->changes |= DF_CHANGED;
1952 } /* move_loads_out_of_loops */
1955 * Process a loop SCC.
1957 * @param pscc the SCC
1958 * @param env the loop environment
1960 static void process_loop(scc *pscc, loop_env *env)
1962 ir_node *irn, *next, *header = NULL;
1963 node_entry *b, *h = NULL;
1964 int j, only_phi, num_outside, process = 0;
1967 /* find the header block for this scc */
1968 for (irn = pscc->head; irn; irn = next) {
1969 node_entry *e = get_irn_ne(irn, env);
1970 ir_node *block = get_nodes_block(irn);
1973 b = get_irn_ne(block, env);
1975 if (header != NULL) {
1976 if (h->POnum < b->POnum) {
1986 /* check if this scc contains only Phi, Loads or Stores nodes */
1990 for (irn = pscc->head; irn; irn = next) {
1991 node_entry *e = get_irn_ne(irn, env);
1994 switch (get_irn_opcode(irn)) {
1996 if (is_Call_pure(irn)) {
1997 /* pure calls can be treated like loads */
2001 /* non-pure calls must be handle like may-alias Stores */
2004 /* cannot handle CopyB yet */
2008 if (get_Load_volatility(irn) == volatility_is_volatile) {
2009 /* cannot handle loops with volatile Loads */
2015 if (get_Store_volatility(irn) == volatility_is_volatile) {
2016 /* cannot handle loops with volatile Stores */
2025 for (j = get_irn_arity(irn) - 1; j >= 0; --j) {
2026 ir_node *pred = get_irn_n(irn, j);
2027 node_entry *pe = get_irn_ne(pred, env);
2029 if (pe->pscc != e->pscc) {
2030 /* not in the same SCC, must be a region const */
2031 if (! is_rc(pred, header)) {
2032 /* not a memory loop */
2035 if (out_rc == NULL) {
2036 /* first region constant */
2039 } else if (out_rc != pred) {
2040 /* another region constant */
2051 /* found a memory loop */
2052 DB((dbg, LEVEL_2, " Found a memory loop:\n "));
2053 if (only_phi && num_outside == 1) {
2054 /* a phi cycle with only one real predecessor can be collapsed */
2055 DB((dbg, LEVEL_2, " Found an USELESS Phi cycle:\n "));
2057 for (irn = pscc->head; irn; irn = next) {
2058 node_entry *e = get_irn_ne(irn, env);
2060 exchange(irn, out_rc);
2062 env->changes |= DF_CHANGED;
2066 #ifdef DEBUG_libfirm
2067 for (irn = pscc->head; irn; irn = next) {
2068 node_entry *e = get_irn_ne(irn, env);
2070 DB((dbg, LEVEL_2, " %+F,", irn));
2072 DB((dbg, LEVEL_2, "\n"));
2074 move_loads_out_of_loops(pscc, env);
2078 } /* process_loop */
2083 * @param pscc the SCC
2084 * @param env the loop environment
2086 static void process_scc(scc *pscc, loop_env *env)
2088 ir_node *head = pscc->head;
2089 node_entry *e = get_irn_ne(head, env);
2091 #ifdef DEBUG_libfirm
2093 ir_node *irn, *next;
2095 DB((dbg, LEVEL_4, " SCC at %p:\n ", pscc));
2096 for (irn = pscc->head; irn; irn = next) {
2097 node_entry *e = get_irn_ne(irn, env);
2101 DB((dbg, LEVEL_4, " %+F,", irn));
2103 DB((dbg, LEVEL_4, "\n"));
2107 if (e->next != NULL) {
2108 /* this SCC has more than one member */
2109 process_loop(pscc, env);
2114 * Do Tarjan's SCC algorithm and drive load/store optimization.
2116 * @param irn start at this node
2117 * @param env the loop environment
2119 static void dfs(ir_node *irn, loop_env *env)
2122 node_entry *node = get_irn_ne(irn, env);
2124 mark_irn_visited(irn);
2126 node->DFSnum = env->nextDFSnum++;
2127 node->low = node->DFSnum;
2131 if (is_Phi(irn) || is_Sync(irn)) {
2132 n = get_irn_arity(irn);
2133 for (i = 0; i < n; ++i) {
2134 ir_node *pred = get_irn_n(irn, i);
2135 node_entry *o = get_irn_ne(pred, env);
2137 if (!irn_visited(pred)) {
2139 node->low = MIN(node->low, o->low);
2141 if (o->DFSnum < node->DFSnum && o->in_stack)
2142 node->low = MIN(o->DFSnum, node->low);
2144 } else if (is_fragile_op(irn)) {
2145 ir_node *pred = get_fragile_op_mem(irn);
2146 node_entry *o = get_irn_ne(pred, env);
2148 if (!irn_visited(pred)) {
2150 node->low = MIN(node->low, o->low);
2152 if (o->DFSnum < node->DFSnum && o->in_stack)
2153 node->low = MIN(o->DFSnum, node->low);
2154 } else if (is_Proj(irn)) {
2155 ir_node *pred = get_Proj_pred(irn);
2156 node_entry *o = get_irn_ne(pred, env);
2158 if (!irn_visited(pred)) {
2160 node->low = MIN(node->low, o->low);
2162 if (o->DFSnum < node->DFSnum && o->in_stack)
2163 node->low = MIN(o->DFSnum, node->low);
2166 /* IGNORE predecessors */
2169 if (node->low == node->DFSnum) {
2170 scc *pscc = OALLOC(&env->obst, scc);
2178 e = get_irn_ne(x, env);
2180 e->next = pscc->head;
2184 process_scc(pscc, env);
2189 * Do the DFS on the memory edges a graph.
2191 * @param irg the graph to process
2192 * @param env the loop environment
2194 static void do_dfs(ir_graph *irg, loop_env *env)
2196 ir_node *endblk, *end;
2199 inc_irg_visited(irg);
2201 /* visit all memory nodes */
2202 endblk = get_irg_end_block(irg);
2203 for (i = get_Block_n_cfgpreds(endblk) - 1; i >= 0; --i) {
2204 ir_node *pred = get_Block_cfgpred(endblk, i);
2206 pred = skip_Proj(pred);
2207 if (is_Return(pred)) {
2208 dfs(get_Return_mem(pred), env);
2209 } else if (is_Raise(pred)) {
2210 dfs(get_Raise_mem(pred), env);
2211 } else if (is_fragile_op(pred)) {
2212 dfs(get_fragile_op_mem(pred), env);
2213 } else if (is_Bad(pred)) {
2214 /* ignore non-optimized block predecessor */
2216 assert(0 && "Unknown EndBlock predecessor");
2220 /* visit the keep-alives */
2221 end = get_irg_end(irg);
2222 for (i = get_End_n_keepalives(end) - 1; i >= 0; --i) {
2223 ir_node *ka = get_End_keepalive(end, i);
2225 if (is_Phi(ka) && !irn_visited(ka))
2231 * Optimize Loads/Stores in loops.
2233 * @param irg the graph
2235 static int optimize_loops(ir_graph *irg)
2239 env.stack = NEW_ARR_F(ir_node *, 128);
2244 ir_nodehashmap_init(&env.map);
2245 obstack_init(&env.obst);
2247 /* calculate the SCC's and drive loop optimization. */
2250 DEL_ARR_F(env.stack);
2251 obstack_free(&env.obst, NULL);
2252 ir_nodehashmap_destroy(&env.map);
2255 } /* optimize_loops */
2258 * do the load store optimization
2260 static ir_graph_state_t do_loadstore_opt(ir_graph *irg)
2263 ir_graph_state_t res = 0;
2265 FIRM_DBG_REGISTER(dbg, "firm.opt.ldstopt");
2267 assert(get_irg_phase_state(irg) != phase_building);
2268 assert(get_irg_pinned(irg) != op_pin_state_floats &&
2269 "LoadStore optimization needs pinned graph");
2271 if (get_opt_alias_analysis()) {
2272 assure_irp_globals_entity_usage_computed();
2275 obstack_init(&env.obst);
2278 /* init the links, then collect Loads/Stores/Proj's in lists */
2280 irg_walk_graph(irg, firm_clear_link, collect_nodes, &env);
2282 /* now we have collected enough information, optimize */
2283 irg_walk_graph(irg, NULL, do_load_store_optimize, &env);
2285 env.changes |= optimize_loops(irg);
2287 obstack_free(&env.obst, NULL);
2289 /* Handle graph state */
2291 edges_deactivate(irg);
2294 if (!(env.changes & CF_CHANGED)) {
2295 res |= IR_GRAPH_STATE_CONSISTENT_DOMINANCE | IR_GRAPH_STATE_NO_BADS;
2301 static optdesc_t opt_loadstore = {
2303 IR_GRAPH_STATE_NO_UNREACHABLE_CODE | IR_GRAPH_STATE_CONSISTENT_OUT_EDGES | IR_GRAPH_STATE_NO_CRITICAL_EDGES | IR_GRAPH_STATE_CONSISTENT_DOMINANCE | IR_GRAPH_STATE_CONSISTENT_ENTITY_USAGE,
2307 int optimize_load_store(ir_graph *irg)
2309 perform_irg_optimization(irg, &opt_loadstore);
2313 ir_graph_pass_t *optimize_load_store_pass(const char *name)
2315 return def_graph_pass_ret(name ? name : "ldst", optimize_load_store);
2316 } /* optimize_load_store_pass */