2 * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Load/Store optimizations.
23 * @author Michael Beck
32 #include "iroptimize.h"
34 #include "irgraph_t.h"
42 #include "dbginfo_t.h"
43 #include "iropt_dbg.h"
49 #include "opt_polymorphy.h"
54 #include "cacheopt/cachesim.h"
58 #define IMAX(a,b) ((a) > (b) ? (a) : (b))
60 #define MAX_PROJ IMAX(pn_Load_max, pn_Store_max)
63 DF_CHANGED = 1, /**< data flow changed */
64 CF_CHANGED = 2, /**< control flow changed */
70 typedef struct _walk_env_t {
71 struct obstack obst; /**< list of all stores */
72 unsigned changes; /**< a bitmask of graph changes */
76 * flags for Load/Store
79 LDST_VISITED = 1 /**< if set, this Load/Store is already visited */
82 /** A Load/Store info. */
83 typedef struct _ldst_info_t {
84 ir_node *projs[MAX_PROJ]; /**< list of Proj's of this node */
85 ir_node *exc_block; /**< the exception block if available */
86 int exc_idx; /**< predecessor index in the exception block */
87 unsigned flags; /**< flags */
88 unsigned visited; /**< visited counter for breaking loops */
92 * flags for control flow.
95 BLOCK_HAS_COND = 1, /**< Block has conditional control flow */
96 BLOCK_HAS_EXC = 2 /**< Block has exceptional control flow */
102 typedef struct _block_info_t {
103 unsigned flags; /**< flags for the block */
106 /** the master visited flag for loop detection. */
107 static unsigned master_visited = 0;
109 #define INC_MASTER() ++master_visited
110 #define MARK_NODE(info) (info)->visited = master_visited
111 #define NODE_VISITED(info) (info)->visited >= master_visited
114 * get the Load/Store info of a node
116 static ldst_info_t *get_ldst_info(ir_node *node, walk_env_t *env) {
117 ldst_info_t *info = get_irn_link(node);
120 info = obstack_alloc(&env->obst, sizeof(*info));
121 memset(info, 0, sizeof(*info));
122 set_irn_link(node, info);
125 } /* get_ldst_info */
128 * get the Block info of a node
130 static block_info_t *get_block_info(ir_node *node, walk_env_t *env) {
131 block_info_t *info = get_irn_link(node);
134 info = obstack_alloc(&env->obst, sizeof(*info));
135 memset(info, 0, sizeof(*info));
136 set_irn_link(node, info);
139 } /* get_block_info */
142 * update the projection info for a Load/Store
144 static unsigned update_projs(ldst_info_t *info, ir_node *proj)
146 long nr = get_Proj_proj(proj);
148 assert(0 <= nr && nr <= MAX_PROJ && "Wrong proj from LoadStore");
150 if (info->projs[nr]) {
151 /* there is already one, do CSE */
152 exchange(proj, info->projs[nr]);
156 info->projs[nr] = proj;
162 * update the exception block info for a Load/Store node.
164 * @param info the load/store info struct
165 * @param block the exception handler block for this load/store
166 * @param pos the control flow input of the block
168 static unsigned update_exc(ldst_info_t *info, ir_node *block, int pos)
170 assert(info->exc_block == NULL && "more than one exception block found");
172 info->exc_block = block;
177 /** Return the number of uses of an address node */
178 #define get_irn_n_uses(adr) get_irn_n_edges(adr)
181 * walker, collects all Load/Store/Proj nodes
183 * walks from Start -> End
185 static void collect_nodes(ir_node *node, void *env)
187 ir_op *op = get_irn_op(node);
188 ir_node *pred, *blk, *pred_blk;
189 ldst_info_t *ldst_info;
190 walk_env_t *wenv = env;
196 pred = get_Proj_pred(node);
197 op = get_irn_op(pred);
200 ldst_info = get_ldst_info(pred, wenv);
202 wenv->changes |= update_projs(ldst_info, node);
204 if ((ldst_info->flags & LDST_VISITED) == 0) {
205 adr = get_Load_ptr(pred);
206 ldst_info->flags |= LDST_VISITED;
210 * Place the Proj's to the same block as the
211 * predecessor Load. This is always ok and prevents
212 * "non-SSA" form after optimizations if the Proj
213 * is in a wrong block.
215 blk = get_nodes_block(node);
216 pred_blk = get_nodes_block(pred);
217 if (blk != pred_blk) {
218 wenv->changes |= DF_CHANGED;
219 set_nodes_block(node, pred_blk);
221 } else if (op == op_Store) {
222 ldst_info = get_ldst_info(pred, wenv);
224 wenv->changes |= update_projs(ldst_info, node);
226 if ((ldst_info->flags & LDST_VISITED) == 0) {
227 adr = get_Store_ptr(pred);
228 ldst_info->flags |= LDST_VISITED;
232 * Place the Proj's to the same block as the
233 * predecessor Store. This is always ok and prevents
234 * "non-SSA" form after optimizations if the Proj
235 * is in a wrong block.
237 blk = get_nodes_block(node);
238 pred_blk = get_nodes_block(pred);
239 if (blk != pred_blk) {
240 wenv->changes |= DF_CHANGED;
241 set_nodes_block(node, pred_blk);
244 } else if (op == op_Block) {
247 for (i = get_Block_n_cfgpreds(node) - 1; i >= 0; --i) {
248 ir_node *pred_block, *proj;
249 block_info_t *bl_info;
252 pred = proj = get_Block_cfgpred(node, i);
255 pred = get_Proj_pred(proj);
256 is_exc = get_Proj_proj(proj) == pn_Generic_X_except;
259 /* ignore Bad predecessors, they will be removed later */
263 pred_block = get_nodes_block(pred);
264 bl_info = get_block_info(pred_block, wenv);
266 if (is_fragile_op(pred) && is_exc)
267 bl_info->flags |= BLOCK_HAS_EXC;
268 else if (is_irn_forking(pred))
269 bl_info->flags |= BLOCK_HAS_COND;
271 if (is_exc && (get_irn_op(pred) == op_Load || get_irn_op(pred) == op_Store)) {
272 ldst_info = get_ldst_info(pred, wenv);
274 wenv->changes |= update_exc(ldst_info, node, i);
278 } /* collect_nodes */
281 * Returns an entity if the address ptr points to a constant one.
283 * @param ptr the address
285 * @return an entity or NULL
287 static ir_entity *find_constant_entity(ir_node *ptr)
290 ir_op *op = get_irn_op(ptr);
292 if (op == op_SymConst && (get_SymConst_kind(ptr) == symconst_addr_ent)) {
293 ir_entity *ent = get_SymConst_entity(ptr);
294 if (variability_constant == get_entity_variability(ent))
297 } else if (op == op_Sel) {
298 ir_entity *ent = get_Sel_entity(ptr);
299 ir_type *tp = get_entity_owner(ent);
301 /* Do not fiddle with polymorphism. */
302 if (is_Class_type(get_entity_owner(ent)) &&
303 ((get_entity_n_overwrites(ent) != 0) ||
304 (get_entity_n_overwrittenby(ent) != 0) ) )
307 if (is_Array_type(tp)) {
311 for (i = 0, n = get_Sel_n_indexs(ptr); i < n; ++i) {
313 tarval *tlower, *tupper;
314 ir_node *index = get_Sel_index(ptr, i);
315 tarval *tv = computed_value(index);
317 /* check if the index is constant */
318 if (tv == tarval_bad)
321 bound = get_array_lower_bound(tp, i);
322 tlower = computed_value(bound);
323 bound = get_array_upper_bound(tp, i);
324 tupper = computed_value(bound);
326 if (tlower == tarval_bad || tupper == tarval_bad)
329 if (tarval_cmp(tv, tlower) & pn_Cmp_Lt)
331 if (tarval_cmp(tupper, tv) & pn_Cmp_Lt)
334 /* ok, bounds check finished */
338 if (variability_constant == get_entity_variability(ent))
342 ptr = get_Sel_ptr(ptr);
346 } /* find_constant_entity */
349 * Return the Selection index of a Sel node from dimension n
351 static long get_Sel_array_index_long(ir_node *n, int dim) {
352 ir_node *index = get_Sel_index(n, dim);
353 assert(get_irn_op(index) == op_Const);
354 return get_tarval_long(get_Const_tarval(index));
355 } /* get_Sel_array_index_long */
358 * Returns the accessed component graph path for an
359 * node computing an address.
361 * @param ptr the node computing the address
362 * @param depth current depth in steps upward from the root
365 static compound_graph_path *rec_get_accessed_path(ir_node *ptr, int depth) {
366 compound_graph_path *res = NULL;
367 ir_entity *root, *field;
370 if (get_irn_op(ptr) == op_SymConst) {
371 /* a SymConst. If the depth is 0, this is an access to a global
372 * entity and we don't need a component path, else we know
373 * at least it's length.
375 assert(get_SymConst_kind(ptr) == symconst_addr_ent);
376 root = get_SymConst_entity(ptr);
377 res = (depth == 0) ? NULL : new_compound_graph_path(get_entity_type(root), depth);
379 assert(get_irn_op(ptr) == op_Sel);
380 /* it's a Sel, go up until we find the root */
381 res = rec_get_accessed_path(get_Sel_ptr(ptr), depth+1);
383 /* fill up the step in the path at the current position */
384 field = get_Sel_entity(ptr);
385 path_len = get_compound_graph_path_length(res);
386 pos = path_len - depth - 1;
387 set_compound_graph_path_node(res, pos, field);
389 if (is_Array_type(get_entity_owner(field))) {
390 assert(get_Sel_n_indexs(ptr) == 1 && "multi dim arrays not implemented");
391 set_compound_graph_path_array_index(res, pos, get_Sel_array_index_long(ptr, 0));
395 } /* rec_get_accessed_path */
397 /** Returns an access path or NULL. The access path is only
398 * valid, if the graph is in phase_high and _no_ address computation is used.
400 static compound_graph_path *get_accessed_path(ir_node *ptr) {
401 return rec_get_accessed_path(ptr, 0);
402 } /* get_accessed_path */
405 static void reduce_adr_usage(ir_node *ptr);
408 * Update a Load that may lost it's usage.
410 static void handle_load_update(ir_node *load) {
411 ldst_info_t *info = get_irn_link(load);
413 /* do NOT touch volatile loads for now */
414 if (get_Load_volatility(load) == volatility_is_volatile)
417 if (! info->projs[pn_Load_res] && ! info->projs[pn_Load_X_except]) {
418 ir_node *ptr = get_Load_ptr(load);
419 ir_node *mem = get_Load_mem(load);
421 /* a Load which value is neither used nor exception checked, remove it */
422 exchange(info->projs[pn_Load_M], mem);
423 exchange(load, new_Bad());
424 reduce_adr_usage(ptr);
426 } /* handle_load_update */
429 * A Use of an address node is vanished. Check if this was a Proj
430 * node and update the counters.
432 static void reduce_adr_usage(ir_node *ptr) {
434 if (get_irn_n_edges(ptr) <= 0) {
435 /* this Proj is dead now */
436 ir_node *pred = get_Proj_pred(ptr);
439 ldst_info_t *info = get_irn_link(pred);
440 info->projs[get_Proj_proj(ptr)] = NULL;
442 /* this node lost it's result proj, handle that */
443 handle_load_update(pred);
447 } /* reduce_adr_usage */
450 * Check, if an already existing value of mode old_mode can be converted
451 * into the needed one new_mode without loss.
453 static int can_use_stored_value(ir_mode *old_mode, ir_mode *new_mode) {
454 if (old_mode == new_mode)
457 /* if both modes are two-complement ones, we can always convert the
458 Stored value into the needed one. */
459 if (get_mode_size_bits(old_mode) >= get_mode_size_bits(new_mode) &&
460 get_mode_arithmetic(old_mode) == irma_twos_complement &&
461 get_mode_arithmetic(new_mode) == irma_twos_complement)
464 } /* can_use_stored_value */
467 * Follow the memory chain as long as there are only Loads
468 * and alias free Stores and try to replace current Load or Store
469 * by a previous ones.
470 * Note that in unreachable loops it might happen that we reach
471 * load again, as well as we can fall into a cycle.
472 * We break such cycles using a special visited flag.
474 * INC_MASTER() must be called before dive into
476 static unsigned follow_Mem_chain(ir_node *load, ir_node *curr) {
478 ldst_info_t *info = get_irn_link(load);
480 ir_node *ptr = get_Load_ptr(load);
481 ir_node *mem = get_Load_mem(load);
482 ir_mode *load_mode = get_Load_mode(load);
484 for (pred = curr; load != pred; ) {
485 ldst_info_t *pred_info = get_irn_link(pred);
488 * BEWARE: one might think that checking the modes is useless, because
489 * if the pointers are identical, they refer to the same object.
490 * This is only true in strong typed languages, not in C were the following
491 * is possible a = *(ir_type1 *)p; b = *(ir_type2 *)p ...
493 if (get_irn_op(pred) == op_Store && get_Store_ptr(pred) == ptr &&
494 can_use_stored_value(get_irn_mode(get_Store_value(pred)), load_mode)) {
496 * a Load immediately after a Store -- a read after write.
497 * We may remove the Load, if both Load & Store does not have an exception handler
498 * OR they are in the same block. In the latter case the Load cannot
499 * throw an exception when the previous Store was quiet.
501 * Why we need to check for Store Exception? If the Store cannot
502 * be executed (ROM) the exception handler might simply jump into
504 * We could make it a little bit better if we would know that the exception
505 * handler of the Store jumps directly to the end...
507 if ((pred_info->projs[pn_Store_X_except] == NULL && info->projs[pn_Load_X_except] == NULL) ||
508 get_nodes_block(load) == get_nodes_block(pred)) {
509 ir_node *value = get_Store_value(pred);
511 DBG_OPT_RAW(load, value);
513 /* add an convert if needed */
514 if (get_irn_mode(get_Store_value(pred)) != load_mode) {
515 value = new_r_Conv(current_ir_graph, get_nodes_block(load), value, load_mode);
518 if (info->projs[pn_Load_M])
519 exchange(info->projs[pn_Load_M], mem);
522 if (info->projs[pn_Load_X_except]) {
523 exchange( info->projs[pn_Load_X_except], new_Bad());
527 if (info->projs[pn_Load_res])
528 exchange(info->projs[pn_Load_res], value);
530 exchange(load, new_Bad());
531 reduce_adr_usage(ptr);
532 return res | DF_CHANGED;
534 } else if (get_irn_op(pred) == op_Load && get_Load_ptr(pred) == ptr &&
535 can_use_stored_value(get_Load_mode(pred), load_mode)) {
537 * a Load after a Load -- a read after read.
538 * We may remove the second Load, if it does not have an exception handler
539 * OR they are in the same block. In the later case the Load cannot
540 * throw an exception when the previous Load was quiet.
542 * Here, there is no need to check if the previous Load has an exception
543 * hander because they would have exact the same exception...
545 if (info->projs[pn_Load_X_except] == NULL || get_nodes_block(load) == get_nodes_block(pred)) {
548 DBG_OPT_RAR(load, pred);
550 /* the result is used */
551 if (info->projs[pn_Load_res]) {
552 if (pred_info->projs[pn_Load_res] == NULL) {
553 /* create a new Proj again */
554 pred_info->projs[pn_Load_res] = new_r_Proj(current_ir_graph, get_nodes_block(pred), pred, get_Load_mode(pred), pn_Load_res);
556 value = pred_info->projs[pn_Load_res];
558 /* add an convert if needed */
559 if (get_Load_mode(pred) != load_mode) {
560 value = new_r_Conv(current_ir_graph, get_nodes_block(load), value, load_mode);
563 exchange(info->projs[pn_Load_res], value);
566 if (info->projs[pn_Load_M])
567 exchange(info->projs[pn_Load_M], mem);
570 if (info->projs[pn_Load_X_except]) {
571 exchange(info->projs[pn_Load_X_except], new_Bad());
575 exchange(load, new_Bad());
576 reduce_adr_usage(ptr);
577 return res |= DF_CHANGED;
581 if (get_irn_op(pred) == op_Store) {
582 /* check if we can pass through this store */
583 ir_alias_relation rel = get_alias_relation(
586 get_irn_mode(get_Store_value(pred)),
588 /* if the might be an alias, we cannot pass this Store */
591 pred = skip_Proj(get_Store_mem(pred));
592 } else if (get_irn_op(pred) == op_Load) {
593 pred = skip_Proj(get_Load_mem(pred));
595 /* follow only Load chains */
599 /* check for cycles */
600 if (NODE_VISITED(pred_info))
602 MARK_NODE(pred_info);
605 if (get_irn_op(pred) == op_Sync) {
608 /* handle all Sync predecessors */
609 for (i = get_Sync_n_preds(pred) - 1; i >= 0; --i) {
610 res |= follow_Mem_chain(load, skip_Proj(get_Sync_pred(pred, i)));
617 } /* follow_Mem_chain */
622 * @param load the Load node
624 static unsigned optimize_load(ir_node *load)
626 ldst_info_t *info = get_irn_link(load);
627 ir_node *mem, *ptr, *new_node;
631 /* do NOT touch volatile loads for now */
632 if (get_Load_volatility(load) == volatility_is_volatile)
635 /* the address of the load to be optimized */
636 ptr = get_Load_ptr(load);
639 * Check if we can remove the exception from a Load:
640 * This can be done, if the address is from an Sel(Alloc) and
641 * the Sel type is a subtype of the allocated type.
643 * This optimizes some often used OO constructs,
644 * like x = new O; x->t;
646 if (info->projs[pn_Load_X_except]) {
648 ir_node *mem = get_Sel_mem(ptr);
650 /* FIXME: works with the current FE, but better use the base */
651 if (get_irn_op(skip_Proj(mem)) == op_Alloc) {
652 /* ok, check the types */
653 ir_entity *ent = get_Sel_entity(ptr);
654 ir_type *s_type = get_entity_type(ent);
655 ir_type *a_type = get_Alloc_type(mem);
657 if (is_SubClass_of(s_type, a_type)) {
658 /* ok, condition met: there can't be an exception because
659 * Alloc guarantees that enough memory was allocated */
661 exchange(info->projs[pn_Load_X_except], new_Bad());
662 info->projs[pn_Load_X_except] = NULL;
666 } else if ((get_irn_op(skip_Proj(ptr)) == op_Alloc) ||
667 ((get_irn_op(ptr) == op_Cast) && (get_irn_op(skip_Proj(get_Cast_op(ptr))) == op_Alloc))) {
668 /* simple case: a direct load after an Alloc. Firm Alloc throw
669 * an exception in case of out-of-memory. So, there is no way for an
670 * exception in this load.
671 * This code is constructed by the "exception lowering" in the Jack compiler.
673 exchange(info->projs[pn_Load_X_except], new_Bad());
674 info->projs[pn_Load_X_except] = NULL;
679 /* The mem of the Load. Must still be returned after optimization. */
680 mem = get_Load_mem(load);
682 if (! info->projs[pn_Load_res] && ! info->projs[pn_Load_X_except]) {
683 /* a Load which value is neither used nor exception checked, remove it */
684 exchange(info->projs[pn_Load_M], mem);
686 exchange(load, new_Bad());
687 reduce_adr_usage(ptr);
688 return res | DF_CHANGED;
691 /* Load from a constant polymorphic field, where we can resolve
693 new_node = transform_node_Load(load);
694 if (new_node != load) {
695 if (info->projs[pn_Load_M]) {
696 exchange(info->projs[pn_Load_M], mem);
697 info->projs[pn_Load_M] = NULL;
699 if (info->projs[pn_Load_X_except]) {
700 exchange(info->projs[pn_Load_X_except], new_Bad());
701 info->projs[pn_Load_X_except] = NULL;
703 if (info->projs[pn_Load_res])
704 exchange(info->projs[pn_Load_res], new_node);
706 exchange(load, new_Bad());
707 reduce_adr_usage(ptr);
708 return res | DF_CHANGED;
711 /* check if we can determine the entity that will be loaded */
712 ent = find_constant_entity(ptr);
714 if ((allocation_static == get_entity_allocation(ent)) &&
715 (visibility_external_allocated != get_entity_visibility(ent))) {
716 /* a static allocation that is not external: there should be NO exception
719 /* no exception, clear the info field as it might be checked later again */
720 if (info->projs[pn_Load_X_except]) {
721 exchange(info->projs[pn_Load_X_except], new_Bad());
722 info->projs[pn_Load_X_except] = NULL;
726 if (variability_constant == get_entity_variability(ent)
727 && is_atomic_entity(ent)) {
728 /* Might not be atomic after
729 lowering of Sels. In this
730 case we could also load, but
731 it's more complicated. */
732 /* more simpler case: we load the content of a constant value:
733 * replace it by the constant itself
737 if (info->projs[pn_Load_M]) {
738 exchange(info->projs[pn_Load_M], mem);
742 if (info->projs[pn_Load_res]) {
743 if (is_atomic_entity(ent)) {
744 ir_node *c = copy_const_value(get_irn_dbg_info(load), get_atomic_ent_value(ent));
747 exchange(info->projs[pn_Load_res], c);
751 exchange(load, new_Bad());
752 reduce_adr_usage(ptr);
754 } else if (variability_constant == get_entity_variability(ent)) {
755 compound_graph_path *path = get_accessed_path(ptr);
760 assert(is_proper_compound_graph_path(path, get_compound_graph_path_length(path)-1));
764 for (j = 0; j < get_compound_graph_path_length(path); ++j) {
765 ir_entity *node = get_compound_graph_path_node(path, j);
766 fprintf(stdout, ".%s", get_entity_name(node));
767 if (is_Array_type(get_entity_owner(node)))
768 fprintf(stdout, "[%d]", get_compound_graph_path_array_index(path, j));
774 c = get_compound_ent_value_by_path(ent, path);
775 free_compound_graph_path(path);
777 /* printf(" cons: "); DDMN(c); */
779 if (info->projs[pn_Load_M]) {
780 exchange(info->projs[pn_Load_M], mem);
783 if (info->projs[pn_Load_res]) {
784 exchange(info->projs[pn_Load_res], copy_const_value(get_irn_dbg_info(load), c));
787 exchange(load, new_Bad());
788 reduce_adr_usage(ptr);
791 /* We can not determine a correct access path. E.g., in jack, we load
792 a byte from an object to generate an exception. Happens in test program
794 printf(">>>>>>>>>>>>> Found access to constant entity %s in function %s\n", get_entity_name(ent),
795 get_entity_name(get_irg_entity(current_ir_graph)));
796 printf(" load: "); DDMN(load);
797 printf(" ptr: "); DDMN(ptr);
804 /* Check, if the address of this load is used more than once.
805 * If not, this load cannot be removed in any case. */
806 if (get_irn_n_uses(ptr) <= 1)
810 * follow the memory chain as long as there are only Loads
811 * and try to replace current Load or Store by a previous one.
812 * Note that in unreachable loops it might happen that we reach
813 * load again, as well as we can fall into a cycle.
814 * We break such cycles using a special visited flag.
817 res = follow_Mem_chain(load, skip_Proj(mem));
819 } /* optimize_load */
822 * Check whether a value of mode new_mode would completely overwrite a value
823 * of mode old_mode in memory.
825 static int is_completely_overwritten(ir_mode *old_mode, ir_mode *new_mode)
827 return get_mode_size_bits(new_mode) >= get_mode_size_bits(old_mode);
828 } /* is_completely_overwritten */
831 * follow the memory chain as long as there are only Loads and alias free Stores.
833 * INC_MASTER() must be called before dive into
835 static unsigned follow_Mem_chain_for_Store(ir_node *store, ir_node *curr) {
837 ldst_info_t *info = get_irn_link(store);
839 ir_node *ptr = get_Store_ptr(store);
840 ir_node *mem = get_Store_mem(store);
841 ir_node *value = get_Store_value(store);
842 ir_mode *mode = get_irn_mode(value);
843 ir_node *block = get_nodes_block(store);
845 for (pred = curr; pred != store;) {
846 ldst_info_t *pred_info = get_irn_link(pred);
849 * BEWARE: one might think that checking the modes is useless, because
850 * if the pointers are identical, they refer to the same object.
851 * This is only true in strong typed languages, not is C were the following
852 * is possible *(ir_type1 *)p = a; *(ir_type2 *)p = b ...
853 * However, if the mode that is written have a bigger or equal size the the old
854 * one, the old value is completely overwritten and can be killed ...
856 if (get_irn_op(pred) == op_Store && get_Store_ptr(pred) == ptr &&
857 get_nodes_block(pred) == block &&
858 is_completely_overwritten(get_irn_mode(get_Store_value(pred)), mode)) {
860 * a Store after a Store in the same block -- a write after write.
861 * We may remove the first Store, if it does not have an exception handler.
863 * TODO: What, if both have the same exception handler ???
865 if (get_Store_volatility(pred) != volatility_is_volatile && !pred_info->projs[pn_Store_X_except]) {
866 DBG_OPT_WAW(pred, store);
867 exchange( pred_info->projs[pn_Store_M], get_Store_mem(pred) );
868 exchange(pred, new_Bad());
869 reduce_adr_usage(ptr);
872 } else if (get_irn_op(pred) == op_Load && get_Load_ptr(pred) == ptr &&
873 value == pred_info->projs[pn_Load_res]) {
875 * a Store of a value after a Load -- a write after read.
876 * We may remove the second Store, if it does not have an exception handler.
878 if (! info->projs[pn_Store_X_except]) {
879 DBG_OPT_WAR(store, pred);
880 exchange( info->projs[pn_Store_M], mem );
881 exchange(store, new_Bad());
882 reduce_adr_usage(ptr);
887 if (get_irn_op(pred) == op_Store) {
888 /* check if we can pass thru this store */
889 ir_alias_relation rel = get_alias_relation(
892 get_irn_mode(get_Store_value(pred)),
894 /* if the might be an alias, we cannot pass this Store */
897 pred = skip_Proj(get_Store_mem(pred));
898 } else if (get_irn_op(pred) == op_Load) {
899 pred = skip_Proj(get_Load_mem(pred));
901 /* follow only Load chains */
905 /* check for cycles */
906 if (NODE_VISITED(pred_info))
908 MARK_NODE(pred_info);
911 if (get_irn_op(pred) == op_Sync) {
914 /* handle all Sync predecessors */
915 for (i = get_Sync_n_preds(pred) - 1; i >= 0; --i) {
916 res |= follow_Mem_chain_for_Store(store, skip_Proj(get_Sync_pred(pred, i)));
922 } /* follow_Mem_chain_for_Store */
927 * @param store the Store node
929 static unsigned optimize_store(ir_node *store) {
932 if (get_Store_volatility(store) == volatility_is_volatile)
935 ptr = get_Store_ptr(store);
937 /* Check, if the address of this Store is used more than once.
938 * If not, this Store cannot be removed in any case. */
939 if (get_irn_n_uses(ptr) <= 1)
942 mem = get_Store_mem(store);
944 /* follow the memory chain as long as there are only Loads */
946 return follow_Mem_chain_for_Store(store, skip_Proj(mem));
947 } /* optimize_store */
950 * walker, optimizes Phi after Stores to identical places:
951 * Does the following optimization:
954 * val1 val2 val3 val1 val2 val3
956 * Store Store Store \ | /
963 * This reduces the number of stores and allows for predicated execution.
964 * Moves Stores back to the end of a function which may be bad.
966 * This is only possible if the predecessor blocks have only one successor.
968 static unsigned optimize_phi(ir_node *phi, walk_env_t *wenv)
971 ir_node *store, *old_store, *ptr, *block, *phi_block, *phiM, *phiD, *exc, *projM;
973 ir_node **inM, **inD, **projMs;
977 block_info_t *bl_info;
980 /* Must be a memory Phi */
981 if (get_irn_mode(phi) != mode_M)
984 n = get_Phi_n_preds(phi);
988 /* must be only one user */
989 projM = get_Phi_pred(phi, 0);
990 if (get_irn_n_edges(projM) != 1)
993 store = skip_Proj(projM);
995 if (get_irn_op(store) != op_Store)
998 block = get_nodes_block(store);
1000 /* abort on dead blocks */
1001 if (is_Block_dead(block))
1004 /* check if the block is post dominated by Phi-block
1005 and has no exception exit */
1006 bl_info = get_irn_link(block);
1007 if (bl_info->flags & BLOCK_HAS_EXC)
1010 phi_block = get_nodes_block(phi);
1011 if (! block_strictly_postdominates(phi_block, block))
1014 /* this is the address of the store */
1015 ptr = get_Store_ptr(store);
1016 mode = get_irn_mode(get_Store_value(store));
1017 info = get_irn_link(store);
1018 exc = info->exc_block;
1020 for (i = 1; i < n; ++i) {
1021 ir_node *pred = get_Phi_pred(phi, i);
1023 if (get_irn_n_edges(pred) != 1)
1026 pred = skip_Proj(pred);
1027 if (get_irn_op(pred) != op_Store)
1030 if (ptr != get_Store_ptr(pred) || mode != get_irn_mode(get_Store_value(pred)))
1033 info = get_irn_link(pred);
1035 /* check, if all stores have the same exception flow */
1036 if (exc != info->exc_block)
1039 /* abort on dead blocks */
1040 block = get_nodes_block(pred);
1041 if (is_Block_dead(block))
1044 /* check if the block is post dominated by Phi-block
1045 and has no exception exit. Note that block must be different from
1046 Phi-block, else we would move a Store from end End of a block to its
1048 bl_info = get_irn_link(block);
1049 if (bl_info->flags & BLOCK_HAS_EXC)
1051 if (block == phi_block || ! block_postdominates(phi_block, block))
1056 * ok, when we are here, we found all predecessors of a Phi that
1057 * are Stores to the same address and size. That means whatever
1058 * we do before we enter the block of the Phi, we do a Store.
1059 * So, we can move the Store to the current block:
1061 * val1 val2 val3 val1 val2 val3
1063 * | Str | | Str | | Str | \ | /
1069 * Is only allowed if the predecessor blocks have only one successor.
1072 NEW_ARR_A(ir_node *, projMs, n);
1073 NEW_ARR_A(ir_node *, inM, n);
1074 NEW_ARR_A(ir_node *, inD, n);
1075 NEW_ARR_A(int, idx, n);
1077 /* Prepare: Collect all Store nodes. We must do this
1078 first because we otherwise may loose a store when exchanging its
1081 for (i = n - 1; i >= 0; --i) {
1084 projMs[i] = get_Phi_pred(phi, i);
1085 assert(is_Proj(projMs[i]));
1087 store = get_Proj_pred(projMs[i]);
1088 info = get_irn_link(store);
1090 inM[i] = get_Store_mem(store);
1091 inD[i] = get_Store_value(store);
1092 idx[i] = info->exc_idx;
1094 block = get_nodes_block(phi);
1096 /* second step: create a new memory Phi */
1097 phiM = new_rd_Phi(get_irn_dbg_info(phi), current_ir_graph, block, n, inM, mode_M);
1099 /* third step: create a new data Phi */
1100 phiD = new_rd_Phi(get_irn_dbg_info(phi), current_ir_graph, block, n, inD, mode);
1102 /* rewire memory and kill the node */
1103 for (i = n - 1; i >= 0; --i) {
1104 ir_node *proj = projMs[i];
1107 ir_node *store = get_Proj_pred(proj);
1108 exchange(proj, inM[i]);
1113 /* fourth step: create the Store */
1114 store = new_rd_Store(db, current_ir_graph, block, phiM, ptr, phiD);
1116 co_set_irn_name(store, co_get_irn_ident(old_store));
1119 projM = new_rd_Proj(NULL, current_ir_graph, block, store, mode_M, pn_Store_M);
1121 info = get_ldst_info(store, wenv);
1122 info->projs[pn_Store_M] = projM;
1124 /* fifths step: repair exception flow */
1126 ir_node *projX = new_rd_Proj(NULL, current_ir_graph, block, store, mode_X, pn_Store_X_except);
1128 info->projs[pn_Store_X_except] = projX;
1129 info->exc_block = exc;
1130 info->exc_idx = idx[0];
1132 for (i = 0; i < n; ++i) {
1133 set_Block_cfgpred(exc, idx[i], projX);
1137 /* the exception block should be optimized as some inputs are identical now */
1143 /* sixth step: replace old Phi */
1144 exchange(phi, projM);
1146 return res | DF_CHANGED;
1147 } /* optimize_phi */
1150 * walker, do the optimizations
1152 static void do_load_store_optimize(ir_node *n, void *env) {
1153 walk_env_t *wenv = env;
1155 switch (get_irn_opcode(n)) {
1158 wenv->changes |= optimize_load(n);
1162 wenv->changes |= optimize_store(n);
1166 wenv->changes |= optimize_phi(n, wenv);
1171 } /* do_load_store_optimize */
1174 * do the load store optimization
1176 void optimize_load_store(ir_graph *irg) {
1179 assert(get_irg_phase_state(irg) != phase_building);
1180 assert(get_irg_pinned(irg) != op_pin_state_floats &&
1181 "LoadStore optimization needs pinned graph");
1183 if (! get_opt_redundant_loadstore())
1188 /* for Phi optimization post-dominators are needed ... */
1189 assure_postdoms(irg);
1191 if (get_opt_alias_analysis()) {
1192 assure_irg_address_taken_computed(irg);
1193 assure_irp_globals_address_taken_computed();
1196 obstack_init(&env.obst);
1199 /* init the links, then collect Loads/Stores/Proj's in lists */
1201 irg_walk_graph(irg, firm_clear_link, collect_nodes, &env);
1203 /* now we have collected enough information, optimize */
1204 irg_walk_graph(irg, NULL, do_load_store_optimize, &env);
1206 obstack_free(&env.obst, NULL);
1208 /* Handle graph state */
1210 set_irg_outs_inconsistent(irg);
1213 if (env.changes & CF_CHANGED) {
1214 /* is this really needed: Yes, control flow changed, block might
1215 have Bad() predecessors. */
1216 set_irg_doms_inconsistent(irg);
1218 } /* optimize_load_store */