2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Load/Store optimizations.
23 * @author Michael Beck
30 #include "iroptimize.h"
32 #include "irgraph_t.h"
40 #include "dbginfo_t.h"
41 #include "iropt_dbg.h"
47 #include "opt_polymorphy.h"
49 #include "irphase_t.h"
54 /** The debug handle. */
55 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
58 #include "cacheopt/cachesim.h"
62 #define IMAX(a,b) ((a) > (b) ? (a) : (b))
64 #define MAX_PROJ IMAX(IMAX(pn_Load_max, pn_Store_max), pn_Call_max)
67 DF_CHANGED = 1, /**< data flow changed */
68 CF_CHANGED = 2, /**< control flow changed */
74 typedef struct _walk_env_t {
75 struct obstack obst; /**< list of all stores */
76 unsigned changes; /**< a bitmask of graph changes */
79 /** A Load/Store info. */
80 typedef struct _ldst_info_t {
81 ir_node *projs[MAX_PROJ]; /**< list of Proj's of this node */
82 ir_node *exc_block; /**< the exception block if available */
83 int exc_idx; /**< predecessor index in the exception block */
84 unsigned visited; /**< visited counter for breaking loops */
88 * flags for control flow.
91 BLOCK_HAS_COND = 1, /**< Block has conditional control flow */
92 BLOCK_HAS_EXC = 2 /**< Block has exceptional control flow */
98 typedef struct _block_info_t {
99 unsigned flags; /**< flags for the block */
102 /** the master visited flag for loop detection. */
103 static unsigned master_visited = 0;
105 #define INC_MASTER() ++master_visited
106 #define MARK_NODE(info) (info)->visited = master_visited
107 #define NODE_VISITED(info) (info)->visited >= master_visited
110 * get the Load/Store info of a node
112 static ldst_info_t *get_ldst_info(ir_node *node, struct obstack *obst) {
113 ldst_info_t *info = get_irn_link(node);
116 info = OALLOCZ(obst, ldst_info_t);
117 set_irn_link(node, info);
120 } /* get_ldst_info */
123 * get the Block info of a node
125 static block_info_t *get_block_info(ir_node *node, struct obstack *obst) {
126 block_info_t *info = get_irn_link(node);
129 info = OALLOCZ(obst, block_info_t);
130 set_irn_link(node, info);
133 } /* get_block_info */
136 * update the projection info for a Load/Store
138 static unsigned update_projs(ldst_info_t *info, ir_node *proj)
140 long nr = get_Proj_proj(proj);
142 assert(0 <= nr && nr <= MAX_PROJ && "Wrong proj from LoadStore");
144 if (info->projs[nr]) {
145 /* there is already one, do CSE */
146 exchange(proj, info->projs[nr]);
150 info->projs[nr] = proj;
156 * update the exception block info for a Load/Store node.
158 * @param info the load/store info struct
159 * @param block the exception handler block for this load/store
160 * @param pos the control flow input of the block
162 static unsigned update_exc(ldst_info_t *info, ir_node *block, int pos)
164 assert(info->exc_block == NULL && "more than one exception block found");
166 info->exc_block = block;
171 /** Return the number of uses of an address node */
172 #define get_irn_n_uses(adr) get_irn_n_edges(adr)
175 * walker, collects all Load/Store/Proj nodes
177 * walks from Start -> End
179 static void collect_nodes(ir_node *node, void *env)
181 ir_opcode opcode = get_irn_opcode(node);
182 ir_node *pred, *blk, *pred_blk;
183 ldst_info_t *ldst_info;
184 walk_env_t *wenv = env;
186 if (opcode == iro_Proj) {
187 pred = get_Proj_pred(node);
188 opcode = get_irn_opcode(pred);
190 if (opcode == iro_Load || opcode == iro_Store || opcode == iro_Call) {
191 ldst_info = get_ldst_info(pred, &wenv->obst);
193 wenv->changes |= update_projs(ldst_info, node);
196 * Place the Proj's to the same block as the
197 * predecessor Load. This is always ok and prevents
198 * "non-SSA" form after optimizations if the Proj
199 * is in a wrong block.
201 blk = get_nodes_block(node);
202 pred_blk = get_nodes_block(pred);
203 if (blk != pred_blk) {
204 wenv->changes |= DF_CHANGED;
205 set_nodes_block(node, pred_blk);
208 } else if (opcode == iro_Block) {
211 for (i = get_Block_n_cfgpreds(node) - 1; i >= 0; --i) {
212 ir_node *pred_block, *proj;
213 block_info_t *bl_info;
216 pred = proj = get_Block_cfgpred(node, i);
219 pred = get_Proj_pred(proj);
220 is_exc = get_Proj_proj(proj) == pn_Generic_X_except;
223 /* ignore Bad predecessors, they will be removed later */
227 pred_block = get_nodes_block(pred);
228 bl_info = get_block_info(pred_block, &wenv->obst);
230 if (is_fragile_op(pred) && is_exc)
231 bl_info->flags |= BLOCK_HAS_EXC;
232 else if (is_irn_forking(pred))
233 bl_info->flags |= BLOCK_HAS_COND;
235 opcode = get_irn_opcode(pred);
236 if (is_exc && (opcode == iro_Load || opcode == iro_Store || opcode == iro_Call)) {
237 ldst_info = get_ldst_info(pred, &wenv->obst);
239 wenv->changes |= update_exc(ldst_info, node, i);
243 } /* collect_nodes */
246 * Returns an entity if the address ptr points to a constant one.
248 * @param ptr the address
250 * @return an entity or NULL
252 static ir_entity *find_constant_entity(ir_node *ptr)
255 if (is_SymConst(ptr) && get_SymConst_kind(ptr) == symconst_addr_ent) {
256 return get_SymConst_entity(ptr);
257 } else if (is_Sel(ptr)) {
258 ir_entity *ent = get_Sel_entity(ptr);
259 ir_type *tp = get_entity_owner(ent);
261 /* Do not fiddle with polymorphism. */
262 if (is_Class_type(get_entity_owner(ent)) &&
263 ((get_entity_n_overwrites(ent) != 0) ||
264 (get_entity_n_overwrittenby(ent) != 0) ) )
267 if (is_Array_type(tp)) {
271 for (i = 0, n = get_Sel_n_indexs(ptr); i < n; ++i) {
273 tarval *tlower, *tupper;
274 ir_node *index = get_Sel_index(ptr, i);
275 tarval *tv = computed_value(index);
277 /* check if the index is constant */
278 if (tv == tarval_bad)
281 bound = get_array_lower_bound(tp, i);
282 tlower = computed_value(bound);
283 bound = get_array_upper_bound(tp, i);
284 tupper = computed_value(bound);
286 if (tlower == tarval_bad || tupper == tarval_bad)
289 if (tarval_cmp(tv, tlower) & pn_Cmp_Lt)
291 if (tarval_cmp(tupper, tv) & pn_Cmp_Lt)
294 /* ok, bounds check finished */
298 if (variability_constant == get_entity_variability(ent))
302 ptr = get_Sel_ptr(ptr);
303 } else if (is_Add(ptr)) {
304 ir_node *l = get_Add_left(ptr);
305 ir_node *r = get_Add_right(ptr);
307 if (get_irn_mode(l) == get_irn_mode(ptr) && is_Const(r))
309 else if (get_irn_mode(r) == get_irn_mode(ptr) && is_Const(l))
314 /* for now, we support only one addition, reassoc should fold all others */
315 if (! is_SymConst(ptr) && !is_Sel(ptr))
317 } else if (is_Sub(ptr)) {
318 ir_node *l = get_Sub_left(ptr);
319 ir_node *r = get_Sub_right(ptr);
321 if (get_irn_mode(l) == get_irn_mode(ptr) && is_Const(r))
325 /* for now, we support only one substraction, reassoc should fold all others */
326 if (! is_SymConst(ptr) && !is_Sel(ptr))
331 } /* find_constant_entity */
334 * Return the Selection index of a Sel node from dimension n
336 static long get_Sel_array_index_long(ir_node *n, int dim) {
337 ir_node *index = get_Sel_index(n, dim);
338 assert(is_Const(index));
339 return get_tarval_long(get_Const_tarval(index));
340 } /* get_Sel_array_index_long */
343 * Returns the accessed component graph path for an
344 * node computing an address.
346 * @param ptr the node computing the address
347 * @param depth current depth in steps upward from the root
350 static compound_graph_path *rec_get_accessed_path(ir_node *ptr, int depth) {
351 compound_graph_path *res = NULL;
352 ir_entity *root, *field, *ent;
353 int path_len, pos, idx;
357 if (is_SymConst(ptr)) {
358 /* a SymConst. If the depth is 0, this is an access to a global
359 * entity and we don't need a component path, else we know
360 * at least its length.
362 assert(get_SymConst_kind(ptr) == symconst_addr_ent);
363 root = get_SymConst_entity(ptr);
364 res = (depth == 0) ? NULL : new_compound_graph_path(get_entity_type(root), depth);
365 } else if (is_Sel(ptr)) {
366 /* it's a Sel, go up until we find the root */
367 res = rec_get_accessed_path(get_Sel_ptr(ptr), depth+1);
371 /* fill up the step in the path at the current position */
372 field = get_Sel_entity(ptr);
373 path_len = get_compound_graph_path_length(res);
374 pos = path_len - depth - 1;
375 set_compound_graph_path_node(res, pos, field);
377 if (is_Array_type(get_entity_owner(field))) {
378 assert(get_Sel_n_indexs(ptr) == 1 && "multi dim arrays not implemented");
379 set_compound_graph_path_array_index(res, pos, get_Sel_array_index_long(ptr, 0));
381 } else if (is_Add(ptr)) {
382 ir_node *l = get_Add_left(ptr);
383 ir_node *r = get_Add_right(ptr);
384 ir_mode *mode = get_irn_mode(ptr);
387 if (is_Const(r) && get_irn_mode(l) == mode) {
389 tv = get_Const_tarval(r);
392 tv = get_Const_tarval(l);
395 mode = get_tarval_mode(tv);
398 /* ptr must be a Sel or a SymConst, this was checked in find_constant_entity() */
400 field = get_Sel_entity(ptr);
402 field = get_SymConst_entity(ptr);
405 for (ent = field;;) {
407 tarval *sz, *tv_index, *tlower, *tupper;
410 tp = get_entity_type(ent);
411 if (! is_Array_type(tp))
413 ent = get_array_element_entity(tp);
414 size = get_type_size_bytes(get_entity_type(ent));
415 sz = new_tarval_from_long(size, mode);
417 tv_index = tarval_div(tmp, sz);
418 tmp = tarval_mod(tmp, sz);
420 if (tv_index == tarval_bad || tmp == tarval_bad)
423 assert(get_array_n_dimensions(tp) == 1 && "multiarrays not implemented");
424 bound = get_array_lower_bound(tp, 0);
425 tlower = computed_value(bound);
426 bound = get_array_upper_bound(tp, 0);
427 tupper = computed_value(bound);
429 if (tlower == tarval_bad || tupper == tarval_bad)
432 if (tarval_cmp(tv_index, tlower) & pn_Cmp_Lt)
434 if (tarval_cmp(tupper, tv_index) & pn_Cmp_Lt)
437 /* ok, bounds check finished */
440 if (! tarval_is_null(tmp)) {
441 /* access to some struct/union member */
445 /* should be at least ONE array */
449 res = rec_get_accessed_path(ptr, depth + idx);
453 path_len = get_compound_graph_path_length(res);
454 pos = path_len - depth - idx;
456 for (ent = field;;) {
458 tarval *sz, *tv_index;
461 tp = get_entity_type(ent);
462 if (! is_Array_type(tp))
464 ent = get_array_element_entity(tp);
465 set_compound_graph_path_node(res, pos, ent);
467 size = get_type_size_bytes(get_entity_type(ent));
468 sz = new_tarval_from_long(size, mode);
470 tv_index = tarval_div(tv, sz);
471 tv = tarval_mod(tv, sz);
473 /* worked above, should work again */
474 assert(tv_index != tarval_bad && tv != tarval_bad);
476 /* bounds already checked above */
477 index = get_tarval_long(tv_index);
478 set_compound_graph_path_array_index(res, pos, index);
481 } else if (is_Sub(ptr)) {
482 ir_node *l = get_Sub_left(ptr);
483 ir_node *r = get_Sub_right(ptr);
486 tv = get_Const_tarval(r);
491 } /* rec_get_accessed_path */
494 * Returns an access path or NULL. The access path is only
495 * valid, if the graph is in phase_high and _no_ address computation is used.
497 static compound_graph_path *get_accessed_path(ir_node *ptr) {
498 compound_graph_path *gr = rec_get_accessed_path(ptr, 0);
500 } /* get_accessed_path */
502 typedef struct path_entry {
504 struct path_entry *next;
508 static ir_node *rec_find_compound_ent_value(ir_node *ptr, path_entry *next) {
509 path_entry entry, *p;
510 ir_entity *ent, *field;
511 ir_initializer_t *initializer;
517 if (is_SymConst(ptr)) {
519 ent = get_SymConst_entity(ptr);
520 initializer = get_entity_initializer(ent);
521 for (p = next; p != NULL;) {
522 if (initializer->kind != IR_INITIALIZER_COMPOUND)
524 n = get_initializer_compound_n_entries(initializer);
525 tp = get_entity_type(ent);
527 if (is_Array_type(tp)) {
528 ent = get_array_element_entity(tp);
533 initializer = get_initializer_compound_value(initializer, 0);
537 if (p->index >= (int) n)
539 initializer = get_initializer_compound_value(initializer, p->index);
544 tp = get_entity_type(ent);
545 while (is_Array_type(tp)) {
546 ent = get_array_element_entity(tp);
547 tp = get_entity_type(ent);
549 n = get_initializer_compound_n_entries(initializer);
552 initializer = get_initializer_compound_value(initializer, 0);
555 switch (initializer->kind) {
556 case IR_INITIALIZER_CONST:
557 return get_initializer_const_value(initializer);
558 case IR_INITIALIZER_TARVAL:
559 case IR_INITIALIZER_NULL:
563 } else if (is_Sel(ptr)) {
564 entry.ent = field = get_Sel_entity(ptr);
565 tp = get_entity_owner(field);
566 if (is_Array_type(tp)) {
567 assert(get_Sel_n_indexs(ptr) == 1 && "multi dim arrays not implemented");
568 entry.index = get_Sel_array_index_long(ptr, 0) - get_array_lower_bound_int(tp, 0);
570 int i, n_members = get_compound_n_members(tp);
571 for (i = 0; i < n_members; ++i) {
572 if (get_compound_member(tp, i) == field)
575 if (i >= n_members) {
576 /* not found: should NOT happen */
581 return rec_find_compound_ent_value(get_Sel_ptr(ptr), &entry);
582 } else if (is_Add(ptr)) {
583 ir_node *l = get_Add_left(ptr);
584 ir_node *r = get_Add_right(ptr);
590 tv = get_Const_tarval(r);
593 tv = get_Const_tarval(l);
596 mode = get_tarval_mode(tv);
598 /* ptr must be a Sel or a SymConst, this was checked in find_constant_entity() */
600 field = get_Sel_entity(ptr);
602 field = get_SymConst_entity(ptr);
605 /* count needed entries */
607 for (ent = field;;) {
608 tp = get_entity_type(ent);
609 if (! is_Array_type(tp))
611 ent = get_array_element_entity(tp);
614 /* should be at least ONE entry */
618 /* allocate the right number of entries */
619 NEW_ARR_A(path_entry, p, pos);
623 for (ent = field;;) {
625 tarval *sz, *tv_index, *tlower, *tupper;
629 tp = get_entity_type(ent);
630 if (! is_Array_type(tp))
632 ent = get_array_element_entity(tp);
634 p[pos].next = &p[pos + 1];
636 size = get_type_size_bytes(get_entity_type(ent));
637 sz = new_tarval_from_long(size, mode);
639 tv_index = tarval_div(tv, sz);
640 tv = tarval_mod(tv, sz);
642 if (tv_index == tarval_bad || tv == tarval_bad)
645 assert(get_array_n_dimensions(tp) == 1 && "multiarrays not implemented");
646 bound = get_array_lower_bound(tp, 0);
647 tlower = computed_value(bound);
648 bound = get_array_upper_bound(tp, 0);
649 tupper = computed_value(bound);
651 if (tlower == tarval_bad || tupper == tarval_bad)
654 if (tarval_cmp(tv_index, tlower) & pn_Cmp_Lt)
656 if (tarval_cmp(tupper, tv_index) & pn_Cmp_Lt)
659 /* ok, bounds check finished */
660 index = get_tarval_long(tv_index);
661 p[pos].index = index;
664 if (! tarval_is_null(tv)) {
665 /* hmm, wrong access */
668 p[pos - 1].next = next;
669 return rec_find_compound_ent_value(ptr, p);
670 } else if (is_Sub(ptr)) {
671 ir_node *l = get_Sub_left(ptr);
672 ir_node *r = get_Sub_right(ptr);
675 tv = get_Const_tarval(r);
682 static ir_node *find_compound_ent_value(ir_node *ptr) {
683 return rec_find_compound_ent_value(ptr, NULL);
687 static void reduce_adr_usage(ir_node *ptr);
690 * Update a Load that may have lost its users.
692 static void handle_load_update(ir_node *load) {
693 ldst_info_t *info = get_irn_link(load);
695 /* do NOT touch volatile loads for now */
696 if (get_Load_volatility(load) == volatility_is_volatile)
699 if (! info->projs[pn_Load_res] && ! info->projs[pn_Load_X_except]) {
700 ir_node *ptr = get_Load_ptr(load);
701 ir_node *mem = get_Load_mem(load);
703 /* a Load whose value is neither used nor exception checked, remove it */
704 exchange(info->projs[pn_Load_M], mem);
705 if (info->projs[pn_Load_X_regular])
706 exchange(info->projs[pn_Load_X_regular], new_r_Jmp(get_nodes_block(load)));
708 reduce_adr_usage(ptr);
710 } /* handle_load_update */
713 * A use of an address node has vanished. Check if this was a Proj
714 * node and update the counters.
716 static void reduce_adr_usage(ir_node *ptr) {
718 if (get_irn_n_edges(ptr) <= 0) {
719 /* this Proj is dead now */
720 ir_node *pred = get_Proj_pred(ptr);
723 ldst_info_t *info = get_irn_link(pred);
724 info->projs[get_Proj_proj(ptr)] = NULL;
726 /* this node lost its result proj, handle that */
727 handle_load_update(pred);
731 } /* reduce_adr_usage */
734 * Check, if an already existing value of mode old_mode can be converted
735 * into the needed one new_mode without loss.
737 static int can_use_stored_value(ir_mode *old_mode, ir_mode *new_mode) {
738 if (old_mode == new_mode)
741 /* if both modes are two-complement ones, we can always convert the
742 Stored value into the needed one. */
743 if (get_mode_size_bits(old_mode) >= get_mode_size_bits(new_mode) &&
744 get_mode_arithmetic(old_mode) == irma_twos_complement &&
745 get_mode_arithmetic(new_mode) == irma_twos_complement)
748 } /* can_use_stored_value */
751 * Check whether a Call is at least pure, ie. does only read memory.
753 static unsigned is_Call_pure(ir_node *call) {
754 ir_type *call_tp = get_Call_type(call);
755 unsigned prop = get_method_additional_properties(call_tp);
757 /* check first the call type */
758 if ((prop & (mtp_property_const|mtp_property_pure)) == 0) {
759 /* try the called entity */
760 ir_node *ptr = get_Call_ptr(call);
762 if (is_Global(ptr)) {
763 ir_entity *ent = get_Global_entity(ptr);
765 prop = get_entity_additional_properties(ent);
768 return (prop & (mtp_property_const|mtp_property_pure)) != 0;
771 static ir_node *get_base_and_offset(ir_node *ptr, long *pOffset)
773 ir_mode *mode = get_irn_mode(ptr);
776 /* TODO: long might not be enough, we should probably use some tarval thingy... */
779 ir_node *l = get_Add_left(ptr);
780 ir_node *r = get_Add_right(ptr);
782 if (get_irn_mode(l) != mode || !is_Const(r))
785 offset += get_tarval_long(get_Const_tarval(r));
787 } else if (is_Sub(ptr)) {
788 ir_node *l = get_Sub_left(ptr);
789 ir_node *r = get_Sub_right(ptr);
791 if (get_irn_mode(l) != mode || !is_Const(r))
794 offset -= get_tarval_long(get_Const_tarval(r));
796 } else if (is_Sel(ptr)) {
797 ir_entity *ent = get_Sel_entity(ptr);
798 ir_type *tp = get_entity_owner(ent);
800 if (is_Array_type(tp)) {
804 /* only one dimensional arrays yet */
805 if (get_Sel_n_indexs(ptr) != 1)
807 index = get_Sel_index(ptr, 0);
808 if (! is_Const(index))
811 tp = get_entity_type(ent);
812 if (get_type_state(tp) != layout_fixed)
815 size = get_type_size_bytes(tp);
816 offset += size * get_tarval_long(get_Const_tarval(index));
818 if (get_type_state(tp) != layout_fixed)
820 offset += get_entity_offset(ent);
822 ptr = get_Sel_ptr(ptr);
831 static int try_load_after_store(ir_node *load,
832 ir_node *load_base_ptr, long load_offset, ir_node *store)
835 ir_node *store_ptr = get_Store_ptr(store);
837 ir_node *store_base_ptr = get_base_and_offset(store_ptr, &store_offset);
838 ir_node *store_value;
847 if (load_base_ptr != store_base_ptr)
850 load_mode = get_Load_mode(load);
851 load_mode_len = get_mode_size_bytes(load_mode);
852 store_mode = get_irn_mode(get_Store_value(store));
853 store_mode_len = get_mode_size_bytes(store_mode);
854 delta = load_offset - store_offset;
855 store_value = get_Store_value(store);
857 if (delta != 0 || store_mode != load_mode) {
858 if (delta < 0 || delta + load_mode_len > store_mode_len)
861 if (get_mode_arithmetic(store_mode) != irma_twos_complement ||
862 get_mode_arithmetic(load_mode) != irma_twos_complement)
866 /* produce a shift to adjust offset delta */
870 /* FIXME: only true for little endian */
871 cnst = new_Const_long(mode_Iu, delta * 8);
872 store_value = new_r_Shr(get_nodes_block(load),
873 store_value, cnst, store_mode);
876 /* add an convert if needed */
877 if (store_mode != load_mode) {
878 store_value = new_r_Conv(get_nodes_block(load), store_value, load_mode);
882 DBG_OPT_RAW(load, store_value);
884 info = get_irn_link(load);
885 if (info->projs[pn_Load_M])
886 exchange(info->projs[pn_Load_M], get_Load_mem(load));
890 if (info->projs[pn_Load_X_except]) {
891 exchange( info->projs[pn_Load_X_except], new_Bad());
894 if (info->projs[pn_Load_X_regular]) {
895 exchange( info->projs[pn_Load_X_regular], new_r_Jmp(get_nodes_block(load)));
899 if (info->projs[pn_Load_res])
900 exchange(info->projs[pn_Load_res], store_value);
902 load_ptr = get_Load_ptr(load);
904 reduce_adr_usage(load_ptr);
905 return res | DF_CHANGED;
909 * Follow the memory chain as long as there are only Loads,
910 * alias free Stores, and constant Calls and try to replace the
911 * current Load by a previous ones.
912 * Note that in unreachable loops it might happen that we reach
913 * load again, as well as we can fall into a cycle.
914 * We break such cycles using a special visited flag.
916 * INC_MASTER() must be called before dive into
918 static unsigned follow_Mem_chain(ir_node *load, ir_node *curr) {
920 ldst_info_t *info = get_irn_link(load);
922 ir_node *ptr = get_Load_ptr(load);
923 ir_node *mem = get_Load_mem(load);
924 ir_mode *load_mode = get_Load_mode(load);
926 for (pred = curr; load != pred; ) {
927 ldst_info_t *pred_info = get_irn_link(pred);
930 * a Load immediately after a Store -- a read after write.
931 * We may remove the Load, if both Load & Store does not have an
932 * exception handler OR they are in the same MacroBlock. In the latter
933 * case the Load cannot throw an exception when the previous Store was
936 * Why we need to check for Store Exception? If the Store cannot
937 * be executed (ROM) the exception handler might simply jump into
938 * the load MacroBlock :-(
939 * We could make it a little bit better if we would know that the
940 * exception handler of the Store jumps directly to the end...
942 if (is_Store(pred) && ((pred_info->projs[pn_Store_X_except] == NULL
943 && info->projs[pn_Load_X_except] == NULL)
944 || get_nodes_MacroBlock(load) == get_nodes_MacroBlock(pred)))
947 ir_node *base_ptr = get_base_and_offset(ptr, &load_offset);
948 int changes = try_load_after_store(load, base_ptr, load_offset, pred);
951 return res | changes;
952 } else if (is_Load(pred) && get_Load_ptr(pred) == ptr &&
953 can_use_stored_value(get_Load_mode(pred), load_mode)) {
955 * a Load after a Load -- a read after read.
956 * We may remove the second Load, if it does not have an exception handler
957 * OR they are in the same MacroBlock. In the later case the Load cannot
958 * throw an exception when the previous Load was quiet.
960 * Here, there is no need to check if the previous Load has an exception
961 * hander because they would have exact the same exception...
963 if (info->projs[pn_Load_X_except] == NULL || get_nodes_MacroBlock(load) == get_nodes_MacroBlock(pred)) {
966 DBG_OPT_RAR(load, pred);
968 /* the result is used */
969 if (info->projs[pn_Load_res]) {
970 if (pred_info->projs[pn_Load_res] == NULL) {
971 /* create a new Proj again */
972 pred_info->projs[pn_Load_res] = new_r_Proj(get_nodes_block(pred), pred, get_Load_mode(pred), pn_Load_res);
974 value = pred_info->projs[pn_Load_res];
976 /* add an convert if needed */
977 if (get_Load_mode(pred) != load_mode) {
978 value = new_r_Conv(get_nodes_block(load), value, load_mode);
981 exchange(info->projs[pn_Load_res], value);
984 if (info->projs[pn_Load_M])
985 exchange(info->projs[pn_Load_M], mem);
988 if (info->projs[pn_Load_X_except]) {
989 exchange(info->projs[pn_Load_X_except], new_Bad());
992 if (info->projs[pn_Load_X_regular]) {
993 exchange( info->projs[pn_Load_X_regular], new_r_Jmp(get_nodes_block(load)));
998 reduce_adr_usage(ptr);
999 return res |= DF_CHANGED;
1003 if (is_Store(pred)) {
1004 /* check if we can pass through this store */
1005 ir_alias_relation rel = get_alias_relation(
1007 get_Store_ptr(pred),
1008 get_irn_mode(get_Store_value(pred)),
1010 /* if the might be an alias, we cannot pass this Store */
1011 if (rel != ir_no_alias)
1013 pred = skip_Proj(get_Store_mem(pred));
1014 } else if (is_Load(pred)) {
1015 pred = skip_Proj(get_Load_mem(pred));
1016 } else if (is_Call(pred)) {
1017 if (is_Call_pure(pred)) {
1018 /* The called graph is at least pure, so there are no Store's
1019 in it. We can handle it like a Load and skip it. */
1020 pred = skip_Proj(get_Call_mem(pred));
1022 /* there might be Store's in the graph, stop here */
1026 /* follow only Load chains */
1030 /* check for cycles */
1031 if (NODE_VISITED(pred_info))
1033 MARK_NODE(pred_info);
1036 if (is_Sync(pred)) {
1039 /* handle all Sync predecessors */
1040 for (i = get_Sync_n_preds(pred) - 1; i >= 0; --i) {
1041 res |= follow_Mem_chain(load, skip_Proj(get_Sync_pred(pred, i)));
1048 } /* follow_Mem_chain */
1051 * Check if we can replace the load by a given const from
1052 * the const code irg.
1054 ir_node *can_replace_load_by_const(const ir_node *load, ir_node *c) {
1055 ir_mode *c_mode = get_irn_mode(c);
1056 ir_mode *l_mode = get_Load_mode(load);
1057 ir_node *res = NULL;
1059 if (c_mode != l_mode) {
1060 /* check, if the mode matches OR can be easily converted info */
1061 if (is_reinterpret_cast(c_mode, l_mode)) {
1062 /* we can safely cast */
1063 dbg_info *dbg = get_irn_dbg_info(load);
1064 ir_node *block = get_nodes_block(load);
1066 /* copy the value from the const code irg and cast it */
1067 res = copy_const_value(dbg, c);
1068 res = new_rd_Conv(dbg, block, res, l_mode);
1071 /* copy the value from the const code irg */
1072 res = copy_const_value(get_irn_dbg_info(load), c);
1075 } /* can_replace_load_by_const */
1080 * @param load the Load node
1082 static unsigned optimize_load(ir_node *load)
1084 ldst_info_t *info = get_irn_link(load);
1085 ir_node *mem, *ptr, *value;
1090 /* do NOT touch volatile loads for now */
1091 if (get_Load_volatility(load) == volatility_is_volatile)
1094 /* the address of the load to be optimized */
1095 ptr = get_Load_ptr(load);
1098 * Check if we can remove the exception from a Load:
1099 * This can be done, if the address is from an Sel(Alloc) and
1100 * the Sel type is a subtype of the allocated type.
1102 * This optimizes some often used OO constructs,
1103 * like x = new O; x->t;
1105 if (info->projs[pn_Load_X_except]) {
1106 ir_node *addr = ptr;
1108 /* find base address */
1109 while (is_Sel(addr))
1110 addr = get_Sel_ptr(addr);
1111 if (is_Alloc(skip_Proj(skip_Cast(addr)))) {
1112 /* simple case: a direct load after an Alloc. Firm Alloc throw
1113 * an exception in case of out-of-memory. So, there is no way for an
1114 * exception in this load.
1115 * This code is constructed by the "exception lowering" in the Jack compiler.
1117 exchange(info->projs[pn_Load_X_except], new_Bad());
1118 info->projs[pn_Load_X_except] = NULL;
1119 exchange(info->projs[pn_Load_X_regular], new_r_Jmp(get_nodes_block(load)));
1120 info->projs[pn_Load_X_regular] = NULL;
1125 /* The mem of the Load. Must still be returned after optimization. */
1126 mem = get_Load_mem(load);
1128 if (! info->projs[pn_Load_res] && ! info->projs[pn_Load_X_except]) {
1129 /* a Load which value is neither used nor exception checked, remove it */
1130 exchange(info->projs[pn_Load_M], mem);
1132 if (info->projs[pn_Load_X_regular]) {
1133 /* should not happen, but if it does, remove it */
1134 exchange(info->projs[pn_Load_X_regular], new_r_Jmp(get_nodes_block(load)));
1138 reduce_adr_usage(ptr);
1139 return res | DF_CHANGED;
1142 /* Load from a constant polymorphic field, where we can resolve
1144 value = transform_polymorph_Load(load);
1145 if (value == load) {
1147 /* check if we can determine the entity that will be loaded */
1148 ent = find_constant_entity(ptr);
1150 allocation_static == get_entity_allocation(ent) &&
1151 visibility_external_allocated != get_entity_visibility(ent)) {
1152 /* a static allocation that is not external: there should be NO exception
1153 * when loading even if we cannot replace the load itself. */
1155 /* no exception, clear the info field as it might be checked later again */
1156 if (info->projs[pn_Load_X_except]) {
1157 exchange(info->projs[pn_Load_X_except], new_Bad());
1158 info->projs[pn_Load_X_except] = NULL;
1161 if (info->projs[pn_Load_X_regular]) {
1162 exchange(info->projs[pn_Load_X_regular], new_r_Jmp(get_nodes_block(load)));
1163 info->projs[pn_Load_X_regular] = NULL;
1167 if (variability_constant == get_entity_variability(ent)) {
1168 if (is_atomic_entity(ent)) {
1169 /* Might not be atomic after lowering of Sels. In this case we
1170 * could also load, but it's more complicated. */
1171 /* more simpler case: we load the content of a constant value:
1172 * replace it by the constant itself */
1173 value = get_atomic_ent_value(ent);
1174 } else if (ent->has_initializer) {
1175 /* new style initializer */
1176 value = find_compound_ent_value(ptr);
1178 /* old style initializer */
1179 compound_graph_path *path = get_accessed_path(ptr);
1182 assert(is_proper_compound_graph_path(path, get_compound_graph_path_length(path)-1));
1184 value = get_compound_ent_value_by_path(ent, path);
1185 DB((dbg, LEVEL_1, " Constant access at %F%F resulted in %+F\n", ent, path, value));
1186 free_compound_graph_path(path);
1190 value = can_replace_load_by_const(load, value);
1194 if (value != NULL) {
1195 /* we completely replace the load by this value */
1196 if (info->projs[pn_Load_X_except]) {
1197 exchange(info->projs[pn_Load_X_except], new_Bad());
1198 info->projs[pn_Load_X_except] = NULL;
1201 if (info->projs[pn_Load_X_regular]) {
1202 exchange(info->projs[pn_Load_X_regular], new_r_Jmp(get_nodes_block(load)));
1203 info->projs[pn_Load_X_regular] = NULL;
1206 if (info->projs[pn_Load_M]) {
1207 exchange(info->projs[pn_Load_M], mem);
1210 if (info->projs[pn_Load_res]) {
1211 exchange(info->projs[pn_Load_res], value);
1215 reduce_adr_usage(ptr);
1219 /* Check, if the address of this load is used more than once.
1220 * If not, more load cannot be removed in any case. */
1221 if (get_irn_n_uses(ptr) <= 1 && get_irn_n_uses(get_base_and_offset(ptr, &dummy)) <= 1)
1225 * follow the memory chain as long as there are only Loads
1226 * and try to replace current Load or Store by a previous one.
1227 * Note that in unreachable loops it might happen that we reach
1228 * load again, as well as we can fall into a cycle.
1229 * We break such cycles using a special visited flag.
1232 res = follow_Mem_chain(load, skip_Proj(mem));
1234 } /* optimize_load */
1237 * Check whether a value of mode new_mode would completely overwrite a value
1238 * of mode old_mode in memory.
1240 static int is_completely_overwritten(ir_mode *old_mode, ir_mode *new_mode)
1242 return get_mode_size_bits(new_mode) >= get_mode_size_bits(old_mode);
1243 } /* is_completely_overwritten */
1246 * Check whether small is a part of large (starting at same address).
1248 static int is_partially_same(ir_node *small, ir_node *large)
1250 ir_mode *sm = get_irn_mode(small);
1251 ir_mode *lm = get_irn_mode(large);
1253 /* FIXME: Check endianness */
1254 return is_Conv(small) && get_Conv_op(small) == large
1255 && get_mode_size_bytes(sm) < get_mode_size_bytes(lm)
1256 && get_mode_arithmetic(sm) == irma_twos_complement
1257 && get_mode_arithmetic(lm) == irma_twos_complement;
1258 } /* is_partially_same */
1261 * follow the memory chain as long as there are only Loads and alias free Stores.
1263 * INC_MASTER() must be called before dive into
1265 static unsigned follow_Mem_chain_for_Store(ir_node *store, ir_node *curr) {
1267 ldst_info_t *info = get_irn_link(store);
1269 ir_node *ptr = get_Store_ptr(store);
1270 ir_node *mem = get_Store_mem(store);
1271 ir_node *value = get_Store_value(store);
1272 ir_mode *mode = get_irn_mode(value);
1273 ir_node *block = get_nodes_block(store);
1274 ir_node *mblk = get_Block_MacroBlock(block);
1276 for (pred = curr; pred != store;) {
1277 ldst_info_t *pred_info = get_irn_link(pred);
1280 * BEWARE: one might think that checking the modes is useless, because
1281 * if the pointers are identical, they refer to the same object.
1282 * This is only true in strong typed languages, not is C were the following
1283 * is possible *(ir_type1 *)p = a; *(ir_type2 *)p = b ...
1284 * However, if the size of the mode that is written is bigger or equal the
1285 * size of the old one, the old value is completely overwritten and can be
1288 if (is_Store(pred) && get_Store_ptr(pred) == ptr &&
1289 get_nodes_MacroBlock(pred) == mblk) {
1291 * a Store after a Store in the same MacroBlock -- a write after write.
1295 * We may remove the first Store, if the old value is completely
1296 * overwritten or the old value is a part of the new value,
1297 * and if it does not have an exception handler.
1299 * TODO: What, if both have the same exception handler ???
1301 if (get_Store_volatility(pred) != volatility_is_volatile
1302 && !pred_info->projs[pn_Store_X_except]) {
1303 ir_node *predvalue = get_Store_value(pred);
1304 ir_mode *predmode = get_irn_mode(predvalue);
1306 if(is_completely_overwritten(predmode, mode)
1307 || is_partially_same(predvalue, value)) {
1308 DBG_OPT_WAW(pred, store);
1309 exchange(pred_info->projs[pn_Store_M], get_Store_mem(pred));
1311 reduce_adr_usage(ptr);
1317 * We may remove the Store, if the old value already contains
1318 * the new value, and if it does not have an exception handler.
1320 * TODO: What, if both have the same exception handler ???
1322 if (get_Store_volatility(store) != volatility_is_volatile
1323 && !info->projs[pn_Store_X_except]) {
1324 ir_node *predvalue = get_Store_value(pred);
1326 if(is_partially_same(value, predvalue)) {
1327 DBG_OPT_WAW(pred, store);
1328 exchange(info->projs[pn_Store_M], mem);
1330 reduce_adr_usage(ptr);
1334 } else if (is_Load(pred) && get_Load_ptr(pred) == ptr &&
1335 value == pred_info->projs[pn_Load_res]) {
1337 * a Store of a value just loaded from the same address
1338 * -- a write after read.
1339 * We may remove the Store, if it does not have an exception
1342 if (! info->projs[pn_Store_X_except]) {
1343 DBG_OPT_WAR(store, pred);
1344 exchange(info->projs[pn_Store_M], mem);
1346 reduce_adr_usage(ptr);
1351 if (is_Store(pred)) {
1352 /* check if we can pass through this store */
1353 ir_alias_relation rel = get_alias_relation(
1355 get_Store_ptr(pred),
1356 get_irn_mode(get_Store_value(pred)),
1358 /* if the might be an alias, we cannot pass this Store */
1359 if (rel != ir_no_alias)
1361 pred = skip_Proj(get_Store_mem(pred));
1362 } else if (is_Load(pred)) {
1363 ir_alias_relation rel = get_alias_relation(
1364 current_ir_graph, get_Load_ptr(pred), get_Load_mode(pred),
1366 if (rel != ir_no_alias)
1369 pred = skip_Proj(get_Load_mem(pred));
1371 /* follow only Load chains */
1375 /* check for cycles */
1376 if (NODE_VISITED(pred_info))
1378 MARK_NODE(pred_info);
1381 if (is_Sync(pred)) {
1384 /* handle all Sync predecessors */
1385 for (i = get_Sync_n_preds(pred) - 1; i >= 0; --i) {
1386 res |= follow_Mem_chain_for_Store(store, skip_Proj(get_Sync_pred(pred, i)));
1392 } /* follow_Mem_chain_for_Store */
1394 /** find entity used as base for an address calculation */
1395 static ir_entity *find_entity(ir_node *ptr)
1397 switch(get_irn_opcode(ptr)) {
1399 return get_SymConst_entity(ptr);
1401 ir_node *pred = get_Sel_ptr(ptr);
1402 if (get_irg_frame(get_irn_irg(ptr)) == pred)
1403 return get_Sel_entity(ptr);
1405 return find_entity(pred);
1409 ir_node *left = get_binop_left(ptr);
1411 if (mode_is_reference(get_irn_mode(left)))
1412 return find_entity(left);
1413 right = get_binop_right(ptr);
1414 if (mode_is_reference(get_irn_mode(right)))
1415 return find_entity(right);
1426 * @param store the Store node
1428 static unsigned optimize_store(ir_node *store) {
1433 if (get_Store_volatility(store) == volatility_is_volatile)
1436 ptr = get_Store_ptr(store);
1437 entity = find_entity(ptr);
1439 /* a store to an entity which is never read is unnecessary */
1440 if (entity != NULL && !(get_entity_usage(entity) & ir_usage_read)) {
1441 ldst_info_t *info = get_irn_link(store);
1442 if (info->projs[pn_Store_X_except] == NULL) {
1443 DB((dbg, LEVEL_1, " Killing useless %+F to never read entity %+F\n", store, entity));
1444 exchange(info->projs[pn_Store_M], get_Store_mem(store));
1446 reduce_adr_usage(ptr);
1451 /* Check, if the address of this Store is used more than once.
1452 * If not, this Store cannot be removed in any case. */
1453 if (get_irn_n_uses(ptr) <= 1)
1456 mem = get_Store_mem(store);
1458 /* follow the memory chain as long as there are only Loads */
1461 return follow_Mem_chain_for_Store(store, skip_Proj(mem));
1462 } /* optimize_store */
1465 * walker, optimizes Phi after Stores to identical places:
1466 * Does the following optimization:
1469 * val1 val2 val3 val1 val2 val3
1471 * Store Store Store \ | /
1478 * This reduces the number of stores and allows for predicated execution.
1479 * Moves Stores back to the end of a function which may be bad.
1481 * This is only possible if the predecessor blocks have only one successor.
1483 static unsigned optimize_phi(ir_node *phi, walk_env_t *wenv)
1486 ir_node *store, *old_store, *ptr, *block, *phi_block, *phiM, *phiD, *exc, *projM;
1488 ir_node **inM, **inD, **projMs;
1490 dbg_info *db = NULL;
1492 block_info_t *bl_info;
1495 /* Must be a memory Phi */
1496 if (get_irn_mode(phi) != mode_M)
1499 n = get_Phi_n_preds(phi);
1503 /* must be only one user */
1504 projM = get_Phi_pred(phi, 0);
1505 if (get_irn_n_edges(projM) != 1)
1508 store = skip_Proj(projM);
1510 if (!is_Store(store))
1513 block = get_nodes_block(store);
1515 /* abort on dead blocks */
1516 if (is_Block_dead(block))
1519 /* check if the block is post dominated by Phi-block
1520 and has no exception exit */
1521 bl_info = get_irn_link(block);
1522 if (bl_info->flags & BLOCK_HAS_EXC)
1525 phi_block = get_nodes_block(phi);
1526 if (! block_strictly_postdominates(phi_block, block))
1529 /* this is the address of the store */
1530 ptr = get_Store_ptr(store);
1531 mode = get_irn_mode(get_Store_value(store));
1532 info = get_irn_link(store);
1533 exc = info->exc_block;
1535 for (i = 1; i < n; ++i) {
1536 ir_node *pred = get_Phi_pred(phi, i);
1538 if (get_irn_n_edges(pred) != 1)
1541 pred = skip_Proj(pred);
1542 if (!is_Store(pred))
1545 if (ptr != get_Store_ptr(pred) || mode != get_irn_mode(get_Store_value(pred)))
1548 info = get_irn_link(pred);
1550 /* check, if all stores have the same exception flow */
1551 if (exc != info->exc_block)
1554 /* abort on dead blocks */
1555 block = get_nodes_block(pred);
1556 if (is_Block_dead(block))
1559 /* check if the block is post dominated by Phi-block
1560 and has no exception exit. Note that block must be different from
1561 Phi-block, else we would move a Store from end End of a block to its
1563 bl_info = get_irn_link(block);
1564 if (bl_info->flags & BLOCK_HAS_EXC)
1566 if (block == phi_block || ! block_postdominates(phi_block, block))
1571 * ok, when we are here, we found all predecessors of a Phi that
1572 * are Stores to the same address and size. That means whatever
1573 * we do before we enter the block of the Phi, we do a Store.
1574 * So, we can move the Store to the current block:
1576 * val1 val2 val3 val1 val2 val3
1578 * | Str | | Str | | Str | \ | /
1584 * Is only allowed if the predecessor blocks have only one successor.
1587 NEW_ARR_A(ir_node *, projMs, n);
1588 NEW_ARR_A(ir_node *, inM, n);
1589 NEW_ARR_A(ir_node *, inD, n);
1590 NEW_ARR_A(int, idx, n);
1592 /* Prepare: Collect all Store nodes. We must do this
1593 first because we otherwise may loose a store when exchanging its
1596 for (i = n - 1; i >= 0; --i) {
1599 projMs[i] = get_Phi_pred(phi, i);
1600 assert(is_Proj(projMs[i]));
1602 store = get_Proj_pred(projMs[i]);
1603 info = get_irn_link(store);
1605 inM[i] = get_Store_mem(store);
1606 inD[i] = get_Store_value(store);
1607 idx[i] = info->exc_idx;
1609 block = get_nodes_block(phi);
1611 /* second step: create a new memory Phi */
1612 phiM = new_rd_Phi(get_irn_dbg_info(phi), block, n, inM, mode_M);
1614 /* third step: create a new data Phi */
1615 phiD = new_rd_Phi(get_irn_dbg_info(phi), block, n, inD, mode);
1617 /* rewire memory and kill the node */
1618 for (i = n - 1; i >= 0; --i) {
1619 ir_node *proj = projMs[i];
1622 ir_node *store = get_Proj_pred(proj);
1623 exchange(proj, inM[i]);
1628 /* fourth step: create the Store */
1629 store = new_rd_Store(db, block, phiM, ptr, phiD, 0);
1631 co_set_irn_name(store, co_get_irn_ident(old_store));
1634 projM = new_rd_Proj(NULL, block, store, mode_M, pn_Store_M);
1636 info = get_ldst_info(store, &wenv->obst);
1637 info->projs[pn_Store_M] = projM;
1639 /* fifths step: repair exception flow */
1641 ir_node *projX = new_rd_Proj(NULL, block, store, mode_X, pn_Store_X_except);
1643 info->projs[pn_Store_X_except] = projX;
1644 info->exc_block = exc;
1645 info->exc_idx = idx[0];
1647 for (i = 0; i < n; ++i) {
1648 set_Block_cfgpred(exc, idx[i], projX);
1652 /* the exception block should be optimized as some inputs are identical now */
1658 /* sixth step: replace old Phi */
1659 exchange(phi, projM);
1661 return res | DF_CHANGED;
1662 } /* optimize_phi */
1665 * walker, do the optimizations
1667 static void do_load_store_optimize(ir_node *n, void *env) {
1668 walk_env_t *wenv = env;
1670 switch (get_irn_opcode(n)) {
1673 wenv->changes |= optimize_load(n);
1677 wenv->changes |= optimize_store(n);
1681 wenv->changes |= optimize_phi(n, wenv);
1687 } /* do_load_store_optimize */
1690 typedef struct scc {
1691 ir_node *head; /**< the head of the list */
1694 /** A node entry. */
1695 typedef struct node_entry {
1696 unsigned DFSnum; /**< the DFS number of this node */
1697 unsigned low; /**< the low number of this node */
1698 int in_stack; /**< flag, set if the node is on the stack */
1699 ir_node *next; /**< link to the next node the the same scc */
1700 scc *pscc; /**< the scc of this node */
1701 unsigned POnum; /**< the post order number for blocks */
1704 /** A loop entry. */
1705 typedef struct loop_env {
1706 ir_phase ph; /**< the phase object */
1707 ir_node **stack; /**< the node stack */
1708 int tos; /**< tos index */
1709 unsigned nextDFSnum; /**< the current DFS number */
1710 unsigned POnum; /**< current post order number */
1712 unsigned changes; /**< a bitmask of graph changes */
1716 * Gets the node_entry of a node
1718 static node_entry *get_irn_ne(ir_node *irn, loop_env *env) {
1719 ir_phase *ph = &env->ph;
1720 node_entry *e = phase_get_irn_data(&env->ph, irn);
1723 e = phase_alloc(ph, sizeof(*e));
1724 memset(e, 0, sizeof(*e));
1725 phase_set_irn_data(ph, irn, e);
1731 * Push a node onto the stack.
1733 * @param env the loop environment
1734 * @param n the node to push
1736 static void push(loop_env *env, ir_node *n) {
1739 if (env->tos == ARR_LEN(env->stack)) {
1740 int nlen = ARR_LEN(env->stack) * 2;
1741 ARR_RESIZE(ir_node *, env->stack, nlen);
1743 env->stack[env->tos++] = n;
1744 e = get_irn_ne(n, env);
1749 * pop a node from the stack
1751 * @param env the loop environment
1753 * @return The topmost node
1755 static ir_node *pop(loop_env *env) {
1756 ir_node *n = env->stack[--env->tos];
1757 node_entry *e = get_irn_ne(n, env);
1764 * Check if irn is a region constant.
1765 * The block or irn must strictly dominate the header block.
1767 * @param irn the node to check
1768 * @param header_block the header block of the induction variable
1770 static int is_rc(ir_node *irn, ir_node *header_block) {
1771 ir_node *block = get_nodes_block(irn);
1773 return (block != header_block) && block_dominates(block, header_block);
1776 typedef struct phi_entry phi_entry;
1778 ir_node *phi; /**< A phi with a region const memory. */
1779 int pos; /**< The position of the region const memory */
1780 ir_node *load; /**< the newly created load for this phi */
1785 * An entry in the avail set.
1787 typedef struct avail_entry_t {
1788 ir_node *ptr; /**< the address pointer */
1789 ir_mode *mode; /**< the load mode */
1790 ir_node *load; /**< the associated Load */
1794 * Compare two avail entries.
1796 static int cmp_avail_entry(const void *elt, const void *key, size_t size) {
1797 const avail_entry_t *a = elt;
1798 const avail_entry_t *b = key;
1801 return a->ptr != b->ptr || a->mode != b->mode;
1802 } /* cmp_avail_entry */
1805 * Calculate the hash value of an avail entry.
1807 static unsigned hash_cache_entry(const avail_entry_t *entry) {
1808 return get_irn_idx(entry->ptr) * 9 + HASH_PTR(entry->mode);
1809 } /* hash_cache_entry */
1812 * Move loops out of loops if possible.
1814 * @param pscc the loop described by an SCC
1815 * @param env the loop environment
1817 static void move_loads_out_of_loops(scc *pscc, loop_env *env) {
1818 ir_node *phi, *load, *next, *other, *next_other;
1821 phi_entry *phi_list = NULL;
1824 avail = new_set(cmp_avail_entry, 8);
1826 /* collect all outer memories */
1827 for (phi = pscc->head; phi != NULL; phi = next) {
1828 node_entry *ne = get_irn_ne(phi, env);
1831 /* check all memory Phi's */
1835 assert(get_irn_mode(phi) == mode_M && "DFS return non-memory Phi");
1837 for (j = get_irn_arity(phi) - 1; j >= 0; --j) {
1838 ir_node *pred = get_irn_n(phi, j);
1839 node_entry *pe = get_irn_ne(pred, env);
1841 if (pe->pscc != ne->pscc) {
1842 /* not in the same SCC, is region const */
1843 phi_entry *pe = phase_alloc(&env->ph, sizeof(*pe));
1847 pe->next = phi_list;
1852 /* no Phis no fun */
1853 assert(phi_list != NULL && "DFS found a loop without Phi");
1855 /* for now, we cannot handle more than one input (only reducible cf) */
1856 if (phi_list->next != NULL)
1859 for (load = pscc->head; load; load = next) {
1861 node_entry *ne = get_irn_ne(load, env);
1864 if (is_Load(load)) {
1865 ldst_info_t *info = get_irn_link(load);
1866 ir_node *ptr = get_Load_ptr(load);
1868 /* for now, we cannot handle Loads with exceptions */
1869 if (info->projs[pn_Load_res] == NULL || info->projs[pn_Load_X_regular] != NULL || info->projs[pn_Load_X_except] != NULL)
1872 /* for now, we can only move Load(Global) */
1873 if (! is_Global(ptr))
1875 ent = get_Global_entity(ptr);
1876 load_mode = get_Load_mode(load);
1877 for (other = pscc->head; other != NULL; other = next_other) {
1878 node_entry *ne = get_irn_ne(other, env);
1879 next_other = ne->next;
1881 if (is_Store(other)) {
1882 ir_alias_relation rel = get_alias_relation(
1884 get_Store_ptr(other),
1885 get_irn_mode(get_Store_value(other)),
1887 /* if the might be an alias, we cannot pass this Store */
1888 if (rel != ir_no_alias)
1891 /* only Phis and pure Calls are allowed here, so ignore them */
1893 if (other == NULL) {
1898 /* yep, no aliasing Store found, Load can be moved */
1899 DB((dbg, LEVEL_1, " Found a Load that could be moved: %+F\n", load));
1901 db = get_irn_dbg_info(load);
1902 for (pe = phi_list; pe != NULL; pe = pe->next) {
1904 ir_node *phi = pe->phi;
1905 ir_node *blk = get_nodes_block(phi);
1906 ir_node *pred = get_Block_cfgpred_block(blk, pos);
1908 avail_entry_t entry, *res;
1911 entry.mode = load_mode;
1912 res = set_find(avail, &entry, sizeof(entry), hash_cache_entry(&entry));
1916 irn = new_rd_Load(db, pred, get_Phi_pred(phi, pos), ptr, load_mode, 0);
1918 set_insert(avail, &entry, sizeof(entry), hash_cache_entry(&entry));
1919 DB((dbg, LEVEL_1, " Created %+F in %+F\n", irn, pred));
1922 ninfo = get_ldst_info(irn, phase_obst(&env->ph));
1924 ninfo->projs[pn_Load_M] = mem = new_r_Proj(pred, irn, mode_M, pn_Load_M);
1925 set_Phi_pred(phi, pos, mem);
1927 ninfo->projs[pn_Load_res] = new_r_Proj(pred, irn, load_mode, pn_Load_res);
1930 /* now kill the old Load */
1931 exchange(info->projs[pn_Load_M], get_Load_mem(load));
1932 exchange(info->projs[pn_Load_res], ninfo->projs[pn_Load_res]);
1934 env->changes |= DF_CHANGED;
1939 } /* move_loads_out_of_loops */
1942 * Process a loop SCC.
1944 * @param pscc the SCC
1945 * @param env the loop environment
1947 static void process_loop(scc *pscc, loop_env *env) {
1948 ir_node *irn, *next, *header = NULL;
1949 node_entry *b, *h = NULL;
1950 int j, only_phi, num_outside, process = 0;
1953 /* find the header block for this scc */
1954 for (irn = pscc->head; irn; irn = next) {
1955 node_entry *e = get_irn_ne(irn, env);
1956 ir_node *block = get_nodes_block(irn);
1959 b = get_irn_ne(block, env);
1961 if (header != NULL) {
1962 if (h->POnum < b->POnum) {
1972 /* check if this scc contains only Phi, Loads or Stores nodes */
1976 for (irn = pscc->head; irn; irn = next) {
1977 node_entry *e = get_irn_ne(irn, env);
1980 switch (get_irn_opcode(irn)) {
1982 if (is_Call_pure(irn)) {
1983 /* pure calls can be treated like loads */
1987 /* non-pure calls must be handle like may-alias Stores */
1990 /* cannot handle CopyB yet */
1994 if (get_Load_volatility(irn) == volatility_is_volatile) {
1995 /* cannot handle loops with volatile Loads */
2001 if (get_Store_volatility(irn) == volatility_is_volatile) {
2002 /* cannot handle loops with volatile Stores */
2011 for (j = get_irn_arity(irn) - 1; j >= 0; --j) {
2012 ir_node *pred = get_irn_n(irn, j);
2013 node_entry *pe = get_irn_ne(pred, env);
2015 if (pe->pscc != e->pscc) {
2016 /* not in the same SCC, must be a region const */
2017 if (! is_rc(pred, header)) {
2018 /* not a memory loop */
2021 if (out_rc == NULL) {
2022 /* first region constant */
2025 } else if (out_rc != pred) {
2026 /* another region constant */
2037 /* found a memory loop */
2038 DB((dbg, LEVEL_2, " Found a memory loop:\n "));
2039 if (only_phi && num_outside == 1) {
2040 /* a phi cycle with only one real predecessor can be collapsed */
2041 DB((dbg, LEVEL_2, " Found an USELESS Phi cycle:\n "));
2043 for (irn = pscc->head; irn; irn = next) {
2044 node_entry *e = get_irn_ne(irn, env);
2046 exchange(irn, out_rc);
2048 env->changes |= DF_CHANGED;
2052 #ifdef DEBUG_libfirm
2053 for (irn = pscc->head; irn; irn = next) {
2054 node_entry *e = get_irn_ne(irn, env);
2056 DB((dbg, LEVEL_2, " %+F,", irn));
2058 DB((dbg, LEVEL_2, "\n"));
2060 move_loads_out_of_loops(pscc, env);
2064 } /* process_loop */
2069 * @param pscc the SCC
2070 * @param env the loop environment
2072 static void process_scc(scc *pscc, loop_env *env) {
2073 ir_node *head = pscc->head;
2074 node_entry *e = get_irn_ne(head, env);
2076 #ifdef DEBUG_libfirm
2078 ir_node *irn, *next;
2080 DB((dbg, LEVEL_4, " SCC at %p:\n ", pscc));
2081 for (irn = pscc->head; irn; irn = next) {
2082 node_entry *e = get_irn_ne(irn, env);
2086 DB((dbg, LEVEL_4, " %+F,", irn));
2088 DB((dbg, LEVEL_4, "\n"));
2092 if (e->next != NULL) {
2093 /* this SCC has more than one member */
2094 process_loop(pscc, env);
2099 * Do Tarjan's SCC algorithm and drive load/store optimization.
2101 * @param irn start at this node
2102 * @param env the loop environment
2104 static void dfs(ir_node *irn, loop_env *env)
2107 node_entry *node = get_irn_ne(irn, env);
2109 mark_irn_visited(irn);
2111 node->DFSnum = env->nextDFSnum++;
2112 node->low = node->DFSnum;
2116 if (is_Phi(irn) || is_Sync(irn)) {
2117 n = get_irn_arity(irn);
2118 for (i = 0; i < n; ++i) {
2119 ir_node *pred = get_irn_n(irn, i);
2120 node_entry *o = get_irn_ne(pred, env);
2122 if (!irn_visited(pred)) {
2124 node->low = MIN(node->low, o->low);
2126 if (o->DFSnum < node->DFSnum && o->in_stack)
2127 node->low = MIN(o->DFSnum, node->low);
2129 } else if (is_fragile_op(irn)) {
2130 ir_node *pred = get_fragile_op_mem(irn);
2131 node_entry *o = get_irn_ne(pred, env);
2133 if (!irn_visited(pred)) {
2135 node->low = MIN(node->low, o->low);
2137 if (o->DFSnum < node->DFSnum && o->in_stack)
2138 node->low = MIN(o->DFSnum, node->low);
2139 } else if (is_Proj(irn)) {
2140 ir_node *pred = get_Proj_pred(irn);
2141 node_entry *o = get_irn_ne(pred, env);
2143 if (!irn_visited(pred)) {
2145 node->low = MIN(node->low, o->low);
2147 if (o->DFSnum < node->DFSnum && o->in_stack)
2148 node->low = MIN(o->DFSnum, node->low);
2151 /* IGNORE predecessors */
2154 if (node->low == node->DFSnum) {
2155 scc *pscc = phase_alloc(&env->ph, sizeof(*pscc));
2163 e = get_irn_ne(x, env);
2165 e->next = pscc->head;
2169 process_scc(pscc, env);
2174 * Do the DFS on the memory edges a graph.
2176 * @param irg the graph to process
2177 * @param env the loop environment
2179 static void do_dfs(ir_graph *irg, loop_env *env) {
2180 ir_graph *rem = current_ir_graph;
2181 ir_node *endblk, *end;
2184 current_ir_graph = irg;
2185 inc_irg_visited(irg);
2187 /* visit all memory nodes */
2188 endblk = get_irg_end_block(irg);
2189 for (i = get_Block_n_cfgpreds(endblk) - 1; i >= 0; --i) {
2190 ir_node *pred = get_Block_cfgpred(endblk, i);
2192 pred = skip_Proj(pred);
2193 if (is_Return(pred))
2194 dfs(get_Return_mem(pred), env);
2195 else if (is_Raise(pred))
2196 dfs(get_Raise_mem(pred), env);
2197 else if (is_fragile_op(pred))
2198 dfs(get_fragile_op_mem(pred), env);
2200 assert(0 && "Unknown EndBlock predecessor");
2204 /* visit the keep-alives */
2205 end = get_irg_end(irg);
2206 for (i = get_End_n_keepalives(end) - 1; i >= 0; --i) {
2207 ir_node *ka = get_End_keepalive(end, i);
2209 if (is_Phi(ka) && !irn_visited(ka))
2212 current_ir_graph = rem;
2216 * Initialize new phase data. We do this always explicit, so return NULL here
2218 static void *init_loop_data(ir_phase *ph, const ir_node *irn, void *data) {
2223 } /* init_loop_data */
2226 * Optimize Loads/Stores in loops.
2228 * @param irg the graph
2230 static int optimize_loops(ir_graph *irg) {
2233 env.stack = NEW_ARR_F(ir_node *, 128);
2238 phase_init(&env.ph, "ldstopt", irg, PHASE_DEFAULT_GROWTH, init_loop_data, NULL);
2240 /* calculate the SCC's and drive loop optimization. */
2243 DEL_ARR_F(env.stack);
2244 phase_free(&env.ph);
2247 } /* optimize_loops */
2250 * do the load store optimization
2252 int optimize_load_store(ir_graph *irg) {
2255 FIRM_DBG_REGISTER(dbg, "firm.opt.ldstopt");
2257 assert(get_irg_phase_state(irg) != phase_building);
2258 assert(get_irg_pinned(irg) != op_pin_state_floats &&
2259 "LoadStore optimization needs pinned graph");
2261 /* we need landing pads */
2262 remove_critical_cf_edges(irg);
2266 /* for Phi optimization post-dominators are needed ... */
2267 assure_postdoms(irg);
2269 if (get_opt_alias_analysis()) {
2270 assure_irg_entity_usage_computed(irg);
2271 assure_irp_globals_entity_usage_computed();
2274 obstack_init(&env.obst);
2277 /* init the links, then collect Loads/Stores/Proj's in lists */
2279 irg_walk_graph(irg, firm_clear_link, collect_nodes, &env);
2281 /* now we have collected enough information, optimize */
2282 irg_walk_graph(irg, NULL, do_load_store_optimize, &env);
2284 env.changes |= optimize_loops(irg);
2286 obstack_free(&env.obst, NULL);
2288 /* Handle graph state */
2290 set_irg_outs_inconsistent(irg);
2291 set_irg_entity_usage_state(irg, ir_entity_usage_not_computed);
2294 if (env.changes & CF_CHANGED) {
2295 /* is this really needed: Yes, control flow changed, block might
2296 have Bad() predecessors. */
2297 set_irg_doms_inconsistent(irg);
2299 return env.changes != 0;
2300 } /* optimize_load_store */
2302 ir_graph_pass_t *optimize_load_store_pass(const char *name)
2304 return def_graph_pass_ret(name ? name : "ldst", optimize_load_store);
2305 } /* optimize_load_store_pass */