3 * File name: ir/opt/ldstopt.c
4 * Purpose: load store optimizations
8 * Copyright: (c) 1998-2007 Universität Karlsruhe
9 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
26 #include "irgraph_t.h"
34 #include "dbginfo_t.h"
35 #include "iropt_dbg.h"
41 #include "opt_polymorphy.h"
45 #include "cacheopt/cachesim.h"
49 #define IMAX(a,b) ((a) > (b) ? (a) : (b))
51 #define MAX_PROJ IMAX(pn_Load_max, pn_Store_max)
54 DF_CHANGED = 1, /**< data flow changed */
55 CF_CHANGED = 2, /**< control flow changed */
61 typedef struct _walk_env_t {
62 struct obstack obst; /**< list of all stores */
63 unsigned changes; /**< a bitmask of graph changes */
67 * flags for Load/Store
70 LDST_VISITED = 1 /**< if set, this Load/Store is already visited */
73 /** A Load/Store info. */
74 typedef struct _ldst_info_t {
75 ir_node *projs[MAX_PROJ]; /**< list of Proj's of this node */
76 ir_node *exc_block; /**< the exception block if available */
77 int exc_idx; /**< predecessor index in the exception block */
78 unsigned flags; /**< flags */
79 unsigned visited; /**< visited counter for breaking loops */
83 * flags for control flow.
86 BLOCK_HAS_COND = 1, /**< Block has conditional control flow */
87 BLOCK_HAS_EXC = 2 /**< Block has exceptional control flow */
93 typedef struct _block_info_t {
94 unsigned flags; /**< flags for the block */
97 /** the master visited flag for loop detection. */
98 static unsigned master_visited = 0;
100 #define INC_MASTER() ++master_visited
101 #define MARK_NODE(info) (info)->visited = master_visited
102 #define NODE_VISITED(info) (info)->visited >= master_visited
105 * get the Load/Store info of a node
107 static ldst_info_t *get_ldst_info(ir_node *node, walk_env_t *env) {
108 ldst_info_t *info = get_irn_link(node);
111 info = obstack_alloc(&env->obst, sizeof(*info));
112 memset(info, 0, sizeof(*info));
113 set_irn_link(node, info);
116 } /* get_ldst_info */
119 * get the Block info of a node
121 static block_info_t *get_block_info(ir_node *node, walk_env_t *env) {
122 block_info_t *info = get_irn_link(node);
125 info = obstack_alloc(&env->obst, sizeof(*info));
126 memset(info, 0, sizeof(*info));
127 set_irn_link(node, info);
130 } /* get_block_info */
133 * update the projection info for a Load/Store
135 static unsigned update_projs(ldst_info_t *info, ir_node *proj)
137 long nr = get_Proj_proj(proj);
139 assert(0 <= nr && nr <= MAX_PROJ && "Wrong proj from LoadStore");
141 if (info->projs[nr]) {
142 /* there is already one, do CSE */
143 exchange(proj, info->projs[nr]);
147 info->projs[nr] = proj;
153 * update the exception block info for a Load/Store node.
155 * @param info the load/store info struct
156 * @param block the exception handler block for this load/store
157 * @param pos the control flow input of the block
159 static unsigned update_exc(ldst_info_t *info, ir_node *block, int pos)
161 assert(info->exc_block == NULL && "more than one exception block found");
163 info->exc_block = block;
168 /** Return the number of uses of an address node */
169 #define get_irn_n_uses(adr) get_irn_n_edges(adr)
172 * walker, collects all Load/Store/Proj nodes
174 * walks from Start -> End
176 static void collect_nodes(ir_node *node, void *env)
178 ir_op *op = get_irn_op(node);
179 ir_node *pred, *blk, *pred_blk;
180 ldst_info_t *ldst_info;
181 walk_env_t *wenv = env;
187 pred = get_Proj_pred(node);
188 op = get_irn_op(pred);
191 ldst_info = get_ldst_info(pred, wenv);
193 wenv->changes |= update_projs(ldst_info, node);
195 if ((ldst_info->flags & LDST_VISITED) == 0) {
196 adr = get_Load_ptr(pred);
197 ldst_info->flags |= LDST_VISITED;
201 * Place the Proj's to the same block as the
202 * predecessor Load. This is always ok and prevents
203 * "non-SSA" form after optimizations if the Proj
204 * is in a wrong block.
206 blk = get_nodes_block(node);
207 pred_blk = get_nodes_block(pred);
208 if (blk != pred_blk) {
209 wenv->changes |= DF_CHANGED;
210 set_nodes_block(node, pred_blk);
212 } else if (op == op_Store) {
213 ldst_info = get_ldst_info(pred, wenv);
215 wenv->changes |= update_projs(ldst_info, node);
217 if ((ldst_info->flags & LDST_VISITED) == 0) {
218 adr = get_Store_ptr(pred);
219 ldst_info->flags |= LDST_VISITED;
223 * Place the Proj's to the same block as the
224 * predecessor Store. This is always ok and prevents
225 * "non-SSA" form after optimizations if the Proj
226 * is in a wrong block.
228 blk = get_nodes_block(node);
229 pred_blk = get_nodes_block(pred);
230 if (blk != pred_blk) {
231 wenv->changes |= DF_CHANGED;
232 set_nodes_block(node, pred_blk);
235 } else if (op == op_Block) {
238 for (i = get_Block_n_cfgpreds(node) - 1; i >= 0; --i) {
240 block_info_t *bl_info;
242 pred = skip_Proj(get_Block_cfgpred(node, i));
244 /* ignore Bad predecessors, they will be removed later */
248 pred_block = get_nodes_block(pred);
249 bl_info = get_block_info(pred_block, wenv);
251 if (is_fragile_op(pred))
252 bl_info->flags |= BLOCK_HAS_EXC;
253 else if (is_irn_forking(pred))
254 bl_info->flags |= BLOCK_HAS_COND;
256 if (get_irn_op(pred) == op_Load || get_irn_op(pred) == op_Store) {
257 ldst_info = get_ldst_info(pred, wenv);
259 wenv->changes |= update_exc(ldst_info, node, i);
263 } /* collect_nodes */
266 * Returns an entity if the address ptr points to a constant one.
268 * @param ptr the address
270 * @return an entity or NULL
272 static ir_entity *find_constant_entity(ir_node *ptr)
275 ir_op *op = get_irn_op(ptr);
277 if (op == op_SymConst && (get_SymConst_kind(ptr) == symconst_addr_ent)) {
278 ir_entity *ent = get_SymConst_entity(ptr);
279 if (variability_constant == get_entity_variability(ent))
282 } else if (op == op_Sel) {
283 ir_entity *ent = get_Sel_entity(ptr);
284 ir_type *tp = get_entity_owner(ent);
286 /* Do not fiddle with polymorphism. */
287 if (is_Class_type(get_entity_owner(ent)) &&
288 ((get_entity_n_overwrites(ent) != 0) ||
289 (get_entity_n_overwrittenby(ent) != 0) ) )
292 if (is_Array_type(tp)) {
296 for (i = 0, n = get_Sel_n_indexs(ptr); i < n; ++i) {
298 tarval *tlower, *tupper;
299 ir_node *index = get_Sel_index(ptr, i);
300 tarval *tv = computed_value(index);
302 /* check if the index is constant */
303 if (tv == tarval_bad)
306 bound = get_array_lower_bound(tp, i);
307 tlower = computed_value(bound);
308 bound = get_array_upper_bound(tp, i);
309 tupper = computed_value(bound);
311 if (tlower == tarval_bad || tupper == tarval_bad)
314 if (tarval_cmp(tv, tlower) & pn_Cmp_Lt)
316 if (tarval_cmp(tupper, tv) & pn_Cmp_Lt)
319 /* ok, bounds check finished */
323 if (variability_constant == get_entity_variability(ent))
327 ptr = get_Sel_ptr(ptr);
331 } /* find_constant_entity */
334 * Return the Selection index of a Sel node from dimension n
336 static long get_Sel_array_index_long(ir_node *n, int dim) {
337 ir_node *index = get_Sel_index(n, dim);
338 assert(get_irn_op(index) == op_Const);
339 return get_tarval_long(get_Const_tarval(index));
340 } /* get_Sel_array_index_long */
343 * Returns the accessed component graph path for an
344 * node computing an address.
346 * @param ptr the node computing the address
347 * @param depth current depth in steps upward from the root
350 static compound_graph_path *rec_get_accessed_path(ir_node *ptr, int depth) {
351 compound_graph_path *res = NULL;
352 ir_entity *root, *field;
355 if (get_irn_op(ptr) == op_SymConst) {
356 /* a SymConst. If the depth is 0, this is an access to a global
357 * entity and we don't need a component path, else we know
358 * at least it's length.
360 assert(get_SymConst_kind(ptr) == symconst_addr_ent);
361 root = get_SymConst_entity(ptr);
362 res = (depth == 0) ? NULL : new_compound_graph_path(get_entity_type(root), depth);
364 assert(get_irn_op(ptr) == op_Sel);
365 /* it's a Sel, go up until we find the root */
366 res = rec_get_accessed_path(get_Sel_ptr(ptr), depth+1);
368 /* fill up the step in the path at the current position */
369 field = get_Sel_entity(ptr);
370 path_len = get_compound_graph_path_length(res);
371 pos = path_len - depth - 1;
372 set_compound_graph_path_node(res, pos, field);
374 if (is_Array_type(get_entity_owner(field))) {
375 assert(get_Sel_n_indexs(ptr) == 1 && "multi dim arrays not implemented");
376 set_compound_graph_path_array_index(res, pos, get_Sel_array_index_long(ptr, 0));
380 } /* rec_get_accessed_path */
382 /** Returns an access path or NULL. The access path is only
383 * valid, if the graph is in phase_high and _no_ address computation is used.
385 static compound_graph_path *get_accessed_path(ir_node *ptr) {
386 return rec_get_accessed_path(ptr, 0);
387 } /* get_accessed_path */
390 static void reduce_adr_usage(ir_node *ptr);
393 * Update a Load that may lost it's usage.
395 static void handle_load_update(ir_node *load) {
396 ldst_info_t *info = get_irn_link(load);
398 /* do NOT touch volatile loads for now */
399 if (get_Load_volatility(load) == volatility_is_volatile)
402 if (! info->projs[pn_Load_res] && ! info->projs[pn_Load_X_except]) {
403 ir_node *ptr = get_Load_ptr(load);
404 ir_node *mem = get_Load_mem(load);
406 /* a Load which value is neither used nor exception checked, remove it */
407 exchange(info->projs[pn_Load_M], mem);
408 exchange(load, new_Bad());
409 reduce_adr_usage(ptr);
411 } /* handle_load_update */
414 * A Use of an address node is vanished. Check if this was a Proj
415 * node and update the counters.
417 static void reduce_adr_usage(ir_node *ptr) {
419 if (get_irn_n_edges(ptr) <= 0) {
420 /* this Proj is dead now */
421 ir_node *pred = get_Proj_pred(ptr);
424 ldst_info_t *info = get_irn_link(pred);
425 info->projs[get_Proj_proj(ptr)] = NULL;
427 /* this node lost it's result proj, handle that */
428 handle_load_update(pred);
432 } /* reduce_adr_usage */
435 * Check, if an already existing value of mode old_mode can be converted
436 * into the needed one new_mode without loss.
438 static int can_use_stored_value(ir_mode *old_mode, ir_mode *new_mode) {
439 if (old_mode == new_mode)
442 /* if both modes are two-complement ones, we can always convert the
443 Stored value into the needed one. */
444 if (get_mode_size_bits(old_mode) >= get_mode_size_bits(new_mode) &&
445 get_mode_arithmetic(old_mode) == irma_twos_complement &&
446 get_mode_arithmetic(new_mode) == irma_twos_complement)
449 } /* can_use_stored_value */
452 * Follow the memory chain as long as there are only Loads
453 * and alias free Stores and try to replace current Load or Store
454 * by a previous ones.
455 * Note that in unreachable loops it might happen that we reach
456 * load again, as well as we can fall into a cycle.
457 * We break such cycles using a special visited flag.
459 * INC_MASTER() must be called before dive into
461 static unsigned follow_Mem_chain(ir_node *load, ir_node *curr) {
463 ldst_info_t *info = get_irn_link(load);
465 ir_node *ptr = get_Load_ptr(load);
466 ir_node *mem = get_Load_mem(load);
467 ir_mode *load_mode = get_Load_mode(load);
469 for (pred = curr; load != pred; ) {
470 ldst_info_t *pred_info = get_irn_link(pred);
473 * BEWARE: one might think that checking the modes is useless, because
474 * if the pointers are identical, they refer to the same object.
475 * This is only true in strong typed languages, not in C were the following
476 * is possible a = *(ir_type1 *)p; b = *(ir_type2 *)p ...
478 if (get_irn_op(pred) == op_Store && get_Store_ptr(pred) == ptr &&
479 can_use_stored_value(get_irn_mode(get_Store_value(pred)), load_mode)) {
481 * a Load immediately after a Store -- a read after write.
482 * We may remove the Load, if both Load & Store does not have an exception handler
483 * OR they are in the same block. In the latter case the Load cannot
484 * throw an exception when the previous Store was quiet.
486 * Why we need to check for Store Exception? If the Store cannot
487 * be executed (ROM) the exception handler might simply jump into
489 * We could make it a little bit better if we would know that the exception
490 * handler of the Store jumps directly to the end...
492 if ((!pred_info->projs[pn_Store_X_except] && !info->projs[pn_Load_X_except]) ||
493 get_nodes_block(load) == get_nodes_block(pred)) {
494 ir_node *value = get_Store_value(pred);
496 DBG_OPT_RAW(load, value);
498 /* add an convert if needed */
499 if (get_irn_mode(get_Store_value(pred)) != load_mode) {
500 value = new_r_Conv(current_ir_graph, get_nodes_block(load), value, load_mode);
503 if (info->projs[pn_Load_M])
504 exchange(info->projs[pn_Load_M], mem);
507 if (info->projs[pn_Load_X_except]) {
508 exchange( info->projs[pn_Load_X_except], new_Bad());
512 if (info->projs[pn_Load_res])
513 exchange(info->projs[pn_Load_res], value);
515 exchange(load, new_Bad());
516 reduce_adr_usage(ptr);
517 return res | DF_CHANGED;
519 } else if (get_irn_op(pred) == op_Load && get_Load_ptr(pred) == ptr &&
520 can_use_stored_value(get_Load_mode(pred), load_mode)) {
522 * a Load after a Load -- a read after read.
523 * We may remove the second Load, if it does not have an exception handler
524 * OR they are in the same block. In the later case the Load cannot
525 * throw an exception when the previous Load was quiet.
527 * Here, there is no need to check if the previous Load has an exception
528 * hander because they would have exact the same exception...
530 if (! info->projs[pn_Load_X_except] || get_nodes_block(load) == get_nodes_block(pred)) {
531 DBG_OPT_RAR(load, pred);
533 if (pred_info->projs[pn_Load_res]) {
534 ir_node *value = pred_info->projs[pn_Load_res];
536 /* add an convert if needed */
537 if (get_Load_mode(pred) != load_mode) {
538 value = new_r_Conv(current_ir_graph, get_nodes_block(load), value, load_mode);
541 /* we need a data proj from the previous load for this optimization */
542 if (info->projs[pn_Load_res])
543 exchange(info->projs[pn_Load_res], pred_info->projs[pn_Load_res]);
545 if (info->projs[pn_Load_M])
546 exchange(info->projs[pn_Load_M], mem);
548 if (info->projs[pn_Load_res]) {
549 set_Proj_pred(info->projs[pn_Load_res], pred);
550 set_nodes_block(info->projs[pn_Load_res], get_nodes_block(pred));
551 pred_info->projs[pn_Load_res] = info->projs[pn_Load_res];
553 if (info->projs[pn_Load_M]) {
554 /* Actually, this if should not be necessary. Construct the Loads
556 exchange(info->projs[pn_Load_M], mem);
561 if (info->projs[pn_Load_X_except]) {
562 exchange(info->projs[pn_Load_X_except], new_Bad());
566 exchange(load, new_Bad());
567 reduce_adr_usage(ptr);
568 return res |= DF_CHANGED;
572 if (get_irn_op(pred) == op_Store) {
573 /* check if we can pass thru this store */
574 ir_alias_relation rel = get_alias_relation(
577 get_irn_mode(get_Store_value(pred)),
579 /* if the might be an alias, we cannot pass this Store */
582 pred = skip_Proj(get_Store_mem(pred));
583 } else if (get_irn_op(pred) == op_Load) {
584 pred = skip_Proj(get_Load_mem(pred));
586 /* follow only Load chains */
590 /* check for cycles */
591 if (NODE_VISITED(pred_info))
593 MARK_NODE(pred_info);
596 if (get_irn_op(pred) == op_Sync) {
599 /* handle all Sync predecessors */
600 for (i = get_Sync_n_preds(pred) - 1; i >= 0; --i) {
601 res |= follow_Mem_chain(load, skip_Proj(get_Sync_pred(pred, i)));
608 } /* follow_Mem_chain */
613 * @param load the Load node
615 static unsigned optimize_load(ir_node *load)
617 ldst_info_t *info = get_irn_link(load);
618 ir_node *mem, *ptr, *new_node;
622 /* do NOT touch volatile loads for now */
623 if (get_Load_volatility(load) == volatility_is_volatile)
626 /* the address of the load to be optimized */
627 ptr = get_Load_ptr(load);
630 * Check if we can remove the exception from a Load:
631 * This can be done, if the address is from an Sel(Alloc) and
632 * the Sel type is a subtype of the allocated type.
634 * This optimizes some often used OO constructs,
635 * like x = new O; x->t;
637 if (info->projs[pn_Load_X_except]) {
639 ir_node *mem = get_Sel_mem(ptr);
641 /* FIXME: works with the current FE, but better use the base */
642 if (get_irn_op(skip_Proj(mem)) == op_Alloc) {
643 /* ok, check the types */
644 ir_entity *ent = get_Sel_entity(ptr);
645 ir_type *s_type = get_entity_type(ent);
646 ir_type *a_type = get_Alloc_type(mem);
648 if (is_SubClass_of(s_type, a_type)) {
649 /* ok, condition met: there can't be an exception because
650 * Alloc guarantees that enough memory was allocated */
652 exchange(info->projs[pn_Load_X_except], new_Bad());
653 info->projs[pn_Load_X_except] = NULL;
657 } else if ((get_irn_op(skip_Proj(ptr)) == op_Alloc) ||
658 ((get_irn_op(ptr) == op_Cast) && (get_irn_op(skip_Proj(get_Cast_op(ptr))) == op_Alloc))) {
659 /* simple case: a direct load after an Alloc. Firm Alloc throw
660 * an exception in case of out-of-memory. So, there is no way for an
661 * exception in this load.
662 * This code is constructed by the "exception lowering" in the Jack compiler.
664 exchange(info->projs[pn_Load_X_except], new_Bad());
665 info->projs[pn_Load_X_except] = NULL;
670 /* The mem of the Load. Must still be returned after optimization. */
671 mem = get_Load_mem(load);
673 if (! info->projs[pn_Load_res] && ! info->projs[pn_Load_X_except]) {
674 /* a Load which value is neither used nor exception checked, remove it */
675 exchange(info->projs[pn_Load_M], mem);
677 exchange(load, new_Bad());
678 reduce_adr_usage(ptr);
679 return res | DF_CHANGED;
682 /* Load from a constant polymorphic field, where we can resolve
684 new_node = transform_node_Load(load);
685 if (new_node != load) {
686 if (info->projs[pn_Load_M]) {
687 exchange(info->projs[pn_Load_M], mem);
688 info->projs[pn_Load_M] = NULL;
690 if (info->projs[pn_Load_X_except]) {
691 exchange(info->projs[pn_Load_X_except], new_Bad());
692 info->projs[pn_Load_X_except] = NULL;
694 if (info->projs[pn_Load_res])
695 exchange(info->projs[pn_Load_res], new_node);
697 exchange(load, new_Bad());
698 reduce_adr_usage(ptr);
699 return res | DF_CHANGED;
702 /* check if we can determine the entity that will be loaded */
703 ent = find_constant_entity(ptr);
705 if ((allocation_static == get_entity_allocation(ent)) &&
706 (visibility_external_allocated != get_entity_visibility(ent))) {
707 /* a static allocation that is not external: there should be NO exception
710 /* no exception, clear the info field as it might be checked later again */
711 if (info->projs[pn_Load_X_except]) {
712 exchange(info->projs[pn_Load_X_except], new_Bad());
713 info->projs[pn_Load_X_except] = NULL;
717 if (variability_constant == get_entity_variability(ent)
718 && is_atomic_entity(ent)) {
719 /* Might not be atomic after
720 lowering of Sels. In this
721 case we could also load, but
722 it's more complicated. */
723 /* more simpler case: we load the content of a constant value:
724 * replace it by the constant itself
728 if (info->projs[pn_Load_M]) {
729 exchange(info->projs[pn_Load_M], mem);
733 if (info->projs[pn_Load_res]) {
734 if (is_atomic_entity(ent)) {
735 ir_node *c = copy_const_value(get_irn_dbg_info(load), get_atomic_ent_value(ent));
738 exchange(info->projs[pn_Load_res], c);
742 exchange(load, new_Bad());
743 reduce_adr_usage(ptr);
745 } else if (variability_constant == get_entity_variability(ent)) {
746 compound_graph_path *path = get_accessed_path(ptr);
751 assert(is_proper_compound_graph_path(path, get_compound_graph_path_length(path)-1));
755 for (j = 0; j < get_compound_graph_path_length(path); ++j) {
756 ir_entity *node = get_compound_graph_path_node(path, j);
757 fprintf(stdout, ".%s", get_entity_name(node));
758 if (is_Array_type(get_entity_owner(node)))
759 fprintf(stdout, "[%d]", get_compound_graph_path_array_index(path, j));
765 c = get_compound_ent_value_by_path(ent, path);
766 free_compound_graph_path(path);
768 /* printf(" cons: "); DDMN(c); */
770 if (info->projs[pn_Load_M]) {
771 exchange(info->projs[pn_Load_M], mem);
774 if (info->projs[pn_Load_res]) {
775 exchange(info->projs[pn_Load_res], copy_const_value(get_irn_dbg_info(load), c));
778 exchange(load, new_Bad());
779 reduce_adr_usage(ptr);
782 /* We can not determine a correct access path. E.g., in jack, we load
783 a byte from an object to generate an exception. Happens in test program
785 printf(">>>>>>>>>>>>> Found access to constant entity %s in function %s\n", get_entity_name(ent),
786 get_entity_name(get_irg_entity(current_ir_graph)));
787 printf(" load: "); DDMN(load);
788 printf(" ptr: "); DDMN(ptr);
795 /* Check, if the address of this load is used more than once.
796 * If not, this load cannot be removed in any case. */
797 if (get_irn_n_uses(ptr) <= 1)
801 * follow the memory chain as long as there are only Loads
802 * and try to replace current Load or Store by a previous one.
803 * Note that in unreachable loops it might happen that we reach
804 * load again, as well as we can fall into a cycle.
805 * We break such cycles using a special visited flag.
808 res = follow_Mem_chain(load, skip_Proj(mem));
810 } /* optimize_load */
813 * Check whether a value of mode new_mode would completely overwrite a value
814 * of mode old_mode in memory.
816 static int is_completely_overwritten(ir_mode *old_mode, ir_mode *new_mode)
818 return get_mode_size_bits(new_mode) >= get_mode_size_bits(old_mode);
819 } /* is_completely_overwritten */
822 * follow the memory chain as long as there are only Loads and alias free Stores.
824 * INC_MASTER() must be called before dive into
826 static unsigned follow_Mem_chain_for_Store(ir_node *store, ir_node *curr) {
828 ldst_info_t *info = get_irn_link(store);
830 ir_node *ptr = get_Store_ptr(store);
831 ir_node *mem = get_Store_mem(store);
832 ir_node *value = get_Store_value(store);
833 ir_mode *mode = get_irn_mode(value);
834 ir_node *block = get_nodes_block(store);
836 for (pred = curr; pred != store;) {
837 ldst_info_t *pred_info = get_irn_link(pred);
840 * BEWARE: one might think that checking the modes is useless, because
841 * if the pointers are identical, they refer to the same object.
842 * This is only true in strong typed languages, not is C were the following
843 * is possible *(ir_type1 *)p = a; *(ir_type2 *)p = b ...
844 * However, if the mode that is written have a bigger or equal size the the old
845 * one, the old value is completely overwritten and can be killed ...
847 if (get_irn_op(pred) == op_Store && get_Store_ptr(pred) == ptr &&
848 get_nodes_block(pred) == block &&
849 is_completely_overwritten(get_irn_mode(get_Store_value(pred)), mode)) {
851 * a Store after a Store in the same block -- a write after write.
852 * We may remove the first Store, if it does not have an exception handler.
854 * TODO: What, if both have the same exception handler ???
856 if (get_Store_volatility(pred) != volatility_is_volatile && !pred_info->projs[pn_Store_X_except]) {
857 DBG_OPT_WAW(pred, store);
858 exchange( pred_info->projs[pn_Store_M], get_Store_mem(pred) );
859 exchange(pred, new_Bad());
860 reduce_adr_usage(ptr);
863 } else if (get_irn_op(pred) == op_Load && get_Load_ptr(pred) == ptr &&
864 value == pred_info->projs[pn_Load_res]) {
866 * a Store of a value after a Load -- a write after read.
867 * We may remove the second Store, if it does not have an exception handler.
869 if (! info->projs[pn_Store_X_except]) {
870 DBG_OPT_WAR(store, pred);
871 exchange( info->projs[pn_Store_M], mem );
872 exchange(store, new_Bad());
873 reduce_adr_usage(ptr);
878 if (get_irn_op(pred) == op_Store) {
879 /* check if we can pass thru this store */
880 ir_alias_relation rel = get_alias_relation(
883 get_irn_mode(get_Store_value(pred)),
885 /* if the might be an alias, we cannot pass this Store */
888 pred = skip_Proj(get_Store_mem(pred));
889 } else if (get_irn_op(pred) == op_Load) {
890 pred = skip_Proj(get_Load_mem(pred));
892 /* follow only Load chains */
896 /* check for cycles */
897 if (NODE_VISITED(pred_info))
899 MARK_NODE(pred_info);
902 if (get_irn_op(pred) == op_Sync) {
905 /* handle all Sync predecessors */
906 for (i = get_Sync_n_preds(pred) - 1; i >= 0; --i) {
907 res |= follow_Mem_chain_for_Store(store, skip_Proj(get_Sync_pred(pred, i)));
913 } /* follow_Mem_chain_for_Store */
918 * @param store the Store node
920 static unsigned optimize_store(ir_node *store) {
923 if (get_Store_volatility(store) == volatility_is_volatile)
926 ptr = get_Store_ptr(store);
928 /* Check, if the address of this Store is used more than once.
929 * If not, this Store cannot be removed in any case. */
930 if (get_irn_n_uses(ptr) <= 1)
933 mem = get_Store_mem(store);
935 /* follow the memory chain as long as there are only Loads */
937 return follow_Mem_chain_for_Store(store, skip_Proj(mem));
938 } /* optimize_store */
941 * walker, optimizes Phi after Stores to identical places:
942 * Does the following optimization:
945 * val1 val2 val3 val1 val2 val3
947 * Store Store Store \ | /
954 * This reduces the number of stores and allows for predicated execution.
955 * Moves Stores back to the end of a function which may be bad.
957 * This is only possible if the predecessor blocks have only one successor.
959 static unsigned optimize_phi(ir_node *phi, walk_env_t *wenv)
962 ir_node *store, *old_store, *ptr, *block, *phi_block, *phiM, *phiD, *exc, *projM;
964 ir_node **inM, **inD, **stores;
968 block_info_t *bl_info;
971 /* Must be a memory Phi */
972 if (get_irn_mode(phi) != mode_M)
975 n = get_Phi_n_preds(phi);
979 store = skip_Proj(get_Phi_pred(phi, 0));
981 if (get_irn_op(store) != op_Store)
984 block = get_nodes_block(store);
986 /* abort on dead blocks */
987 if (is_Block_dead(block))
990 /* check if the block is post dominated by Phi-block
991 and has no exception exit */
992 bl_info = get_irn_link(block);
993 if (bl_info->flags & BLOCK_HAS_EXC)
996 phi_block = get_nodes_block(phi);
997 if (! block_postdominates(phi_block, block))
1000 /* this is the address of the store */
1001 ptr = get_Store_ptr(store);
1002 mode = get_irn_mode(get_Store_value(store));
1003 info = get_irn_link(store);
1004 exc = info->exc_block;
1006 for (i = 1; i < n; ++i) {
1007 ir_node *pred = skip_Proj(get_Phi_pred(phi, i));
1009 if (get_irn_op(pred) != op_Store)
1012 if (ptr != get_Store_ptr(pred) || mode != get_irn_mode(get_Store_value(pred)))
1015 info = get_irn_link(pred);
1017 /* check, if all stores have the same exception flow */
1018 if (exc != info->exc_block)
1021 /* abort on dead blocks */
1022 block = get_nodes_block(pred);
1023 if (is_Block_dead(block))
1026 /* check if the block is post dominated by Phi-block
1027 and has no exception exit. Note that block must be different from
1028 Phi-block, else we would move a Store from end End of a block to its
1030 bl_info = get_irn_link(block);
1031 if (bl_info->flags & BLOCK_HAS_EXC)
1033 if (block == phi_block || ! block_postdominates(phi_block, block))
1038 * ok, when we are here, we found all predecessors of a Phi that
1039 * are Stores to the same address and size. That means whatever
1040 * we do before we enter the block of the Phi, we do a Store.
1041 * So, we can move the Store to the current block:
1043 * val1 val2 val3 val1 val2 val3
1045 * | Str | | Str | | Str | \ | /
1051 * Is only allowed if the predecessor blocks have only one successor.
1054 NEW_ARR_A(ir_node *, stores, n);
1055 NEW_ARR_A(ir_node *, inM, n);
1056 NEW_ARR_A(ir_node *, inD, n);
1057 NEW_ARR_A(int, idx, n);
1059 /* Prepare: Collect all Store nodes. We must do this
1060 first because we otherwise may loose a store when exchanging its
1063 for (i = 0; i < n; ++i)
1064 stores[i] = skip_Proj(get_Phi_pred(phi, i));
1066 /* Prepare: Skip the memory Proj: we need this in the case some stores
1068 Beware: One Store might be included more than once in the stores[]
1069 list, so we must prevent to do the exchange more than once.
1071 for (i = 0; i < n; ++i) {
1072 ir_node *store = stores[i];
1075 info = get_irn_link(store);
1076 proj_m = info->projs[pn_Store_M];
1078 if (is_Proj(proj_m) && get_Proj_pred(proj_m) == store)
1079 exchange(proj_m, get_Store_mem(store));
1082 /* first step: collect all inputs */
1083 for (i = 0; i < n; ++i) {
1084 ir_node *store = stores[i];
1085 info = get_irn_link(store);
1087 inM[i] = get_Store_mem(store);
1088 inD[i] = get_Store_value(store);
1089 idx[i] = info->exc_idx;
1091 block = get_nodes_block(phi);
1093 /* second step: create a new memory Phi */
1094 phiM = new_rd_Phi(get_irn_dbg_info(phi), current_ir_graph, block, n, inM, mode_M);
1096 /* third step: create a new data Phi */
1097 phiD = new_rd_Phi(get_irn_dbg_info(phi), current_ir_graph, block, n, inD, mode);
1099 /* fourth step: create the Store */
1100 store = new_rd_Store(db, current_ir_graph, block, phiM, ptr, phiD);
1102 co_set_irn_name(store, co_get_irn_ident(old_store));
1105 projM = new_rd_Proj(NULL, current_ir_graph, block, store, mode_M, pn_Store_M);
1107 info = get_ldst_info(store, wenv);
1108 info->projs[pn_Store_M] = projM;
1110 /* fifths step: repair exception flow */
1112 ir_node *projX = new_rd_Proj(NULL, current_ir_graph, block, store, mode_X, pn_Store_X_except);
1114 info->projs[pn_Store_X_except] = projX;
1115 info->exc_block = exc;
1116 info->exc_idx = idx[0];
1118 for (i = 0; i < n; ++i) {
1119 set_Block_cfgpred(exc, idx[i], projX);
1123 /* the exception block should be optimized as some inputs are identical now */
1129 /* sixth step: replace old Phi */
1130 exchange(phi, projM);
1132 return res | DF_CHANGED;
1133 } /* optimize_phi */
1136 * walker, do the optimizations
1138 static void do_load_store_optimize(ir_node *n, void *env) {
1139 walk_env_t *wenv = env;
1141 switch (get_irn_opcode(n)) {
1144 wenv->changes |= optimize_load(n);
1148 wenv->changes |= optimize_store(n);
1152 wenv->changes |= optimize_phi(n, wenv);
1157 } /* do_load_store_optimize */
1160 * do the load store optimization
1162 void optimize_load_store(ir_graph *irg) {
1165 assert(get_irg_phase_state(irg) != phase_building);
1166 assert(get_irg_pinned(irg) != op_pin_state_floats &&
1167 "LoadStore optimization needs pinned graph");
1169 if (! get_opt_redundant_loadstore())
1174 /* for Phi optimization post-dominators are needed ... */
1175 assure_postdoms(irg);
1177 if (get_opt_alias_analysis()) {
1178 assure_irg_address_taken_computed(irg);
1179 assure_irp_globals_address_taken_computed();
1182 obstack_init(&env.obst);
1185 /* init the links, then collect Loads/Stores/Proj's in lists */
1187 irg_walk_graph(irg, firm_clear_link, collect_nodes, &env);
1189 /* now we have collected enough information, optimize */
1190 irg_walk_graph(irg, NULL, do_load_store_optimize, &env);
1192 obstack_free(&env.obst, NULL);
1194 /* Handle graph state */
1196 if (get_irg_outs_state(irg) == outs_consistent)
1197 set_irg_outs_inconsistent(irg);
1200 if (env.changes & CF_CHANGED) {
1201 /* is this really needed: Yes, control flow changed, block might
1202 have Bad() predecessors. */
1203 set_irg_doms_inconsistent(irg);
1205 } /* optimize_load_store */