3 * File name: ir/opt/ldstopt.c
4 * Purpose: load store optimizations
8 * Copyright: (c) 1998-2004 Universit
\81ät Karlsruhe
9 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
26 #include "irgraph_t.h"
34 #include "dbginfo_t.h"
35 #include "iropt_dbg.h"
41 #include "opt_polymorphy.h"
44 #include "cacheopt/cachesim.h"
48 #define IMAX(a,b) ((a) > (b) ? (a) : (b))
50 #define MAX_PROJ IMAX(pn_Load_max, pn_Store_max)
53 DF_CHANGED = 1, /**< data flow changed */
54 CF_CHANGED = 2, /**< control flow changed */
60 typedef struct _walk_env_t {
61 struct obstack obst; /**< list of all stores */
62 unsigned changes; /**< a bitmask of graph changes */
66 * flags for Load/Store
69 LDST_VISITED = 1 /**< if set, this Load/Store is already visited */
72 /** A Load/Store info. */
73 typedef struct _ldst_info_t {
74 ir_node *projs[MAX_PROJ]; /**< list of Proj's of this node */
75 ir_node *exc_block; /**< the exception block if available */
76 int exc_idx; /**< predecessor index in the exception block */
77 unsigned flags; /**< flags */
78 unsigned visited; /**< visited counter for breaking loops */
82 * flags for control flow.
85 BLOCK_HAS_COND = 1, /**< Block has conditional control flow */
86 BLOCK_HAS_EXC = 2 /**< Block has exceptional control flow */
92 typedef struct _block_info_t {
93 unsigned flags; /**< flags for the block */
96 /** the master visited flag for loop detection. */
97 static unsigned master_visited = 0;
99 #define INC_MASTER() ++master_visited
100 #define MARK_NODE(info) (info)->visited = master_visited
101 #define NODE_VISITED(info) (info)->visited >= master_visited
104 * get the Load/Store info of a node
106 static ldst_info_t *get_ldst_info(ir_node *node, walk_env_t *env) {
107 ldst_info_t *info = get_irn_link(node);
110 info = obstack_alloc(&env->obst, sizeof(*info));
112 memset(info, 0, sizeof(*info));
114 set_irn_link(node, info);
120 * get the Block info of a node
122 static block_info_t *get_block_info(ir_node *node, walk_env_t *env)
124 block_info_t *info = get_irn_link(node);
127 info = obstack_alloc(&env->obst, sizeof(*info));
129 memset(info, 0, sizeof(*info));
131 set_irn_link(node, info);
137 * update the projection info for a Load/Store
139 static unsigned update_projs(ldst_info_t *info, ir_node *proj)
141 long nr = get_Proj_proj(proj);
143 assert(0 <= nr && nr <= MAX_PROJ && "Wrong proj from LoadStore");
145 if (info->projs[nr]) {
146 /* there is already one, do CSE */
147 exchange(proj, info->projs[nr]);
151 info->projs[nr] = proj;
157 * update the exception block info for a Load/Store node.
159 * @param info the load/store info struct
160 * @param block the exception handler block for this load/store
161 * @param pos the control flow input of the block
163 static unsigned update_exc(ldst_info_t *info, ir_node *block, int pos)
165 assert(info->exc_block == NULL && "more than one exception block found");
167 info->exc_block = block;
172 /** Return the number of uses of an address node */
173 #define get_irn_n_uses(adr) get_irn_n_edges(adr)
176 * walker, collects all Load/Store/Proj nodes
178 * walks from Start -> End
180 static void collect_nodes(ir_node *node, void *env)
182 ir_op *op = get_irn_op(node);
183 ir_node *pred, *blk, *pred_blk;
184 ldst_info_t *ldst_info;
185 walk_env_t *wenv = env;
191 pred = get_Proj_pred(node);
192 op = get_irn_op(pred);
195 ldst_info = get_ldst_info(pred, wenv);
197 wenv->changes |= update_projs(ldst_info, node);
199 if ((ldst_info->flags & LDST_VISITED) == 0) {
200 adr = get_Load_ptr(pred);
201 ldst_info->flags |= LDST_VISITED;
205 * Place the Proj's to the same block as the
206 * predecessor Load. This is always ok and prevents
207 * "non-SSA" form after optimizations if the Proj
208 * is in a wrong block.
210 blk = get_nodes_block(node);
211 pred_blk = get_nodes_block(pred);
212 if (blk != pred_blk) {
213 wenv->changes |= DF_CHANGED;
214 set_nodes_block(node, pred_blk);
217 else if (op == op_Store) {
218 ldst_info = get_ldst_info(pred, wenv);
220 wenv->changes |= update_projs(ldst_info, node);
222 if ((ldst_info->flags & LDST_VISITED) == 0) {
223 adr = get_Store_ptr(pred);
224 ldst_info->flags |= LDST_VISITED;
228 * Place the Proj's to the same block as the
229 * predecessor Store. This is always ok and prevents
230 * "non-SSA" form after optimizations if the Proj
231 * is in a wrong block.
233 blk = get_nodes_block(node);
234 pred_blk = get_nodes_block(pred);
235 if (blk != pred_blk) {
236 wenv->changes |= DF_CHANGED;
237 set_nodes_block(node, pred_blk);
241 else if (op == op_Block) {
244 for (i = get_Block_n_cfgpreds(node) - 1; i >= 0; --i) {
246 block_info_t *bl_info;
248 pred = skip_Proj(get_Block_cfgpred(node, i));
250 /* ignore Bad predecessors, they will be removed later */
254 pred_block = get_nodes_block(pred);
255 bl_info = get_block_info(pred_block, wenv);
257 if (is_fragile_op(pred))
258 bl_info->flags |= BLOCK_HAS_EXC;
259 else if (is_irn_forking(pred))
260 bl_info->flags |= BLOCK_HAS_COND;
262 if (get_irn_op(pred) == op_Load || get_irn_op(pred) == op_Store) {
263 ldst_info = get_ldst_info(pred, wenv);
265 wenv->changes |= update_exc(ldst_info, node, i);
272 * Returns an entity if the address ptr points to a constant one.
274 static entity *find_constant_entity(ir_node *ptr)
277 ir_op *op = get_irn_op(ptr);
279 if (op == op_SymConst && (get_SymConst_kind(ptr) == symconst_addr_ent)) {
280 return get_SymConst_entity(ptr);
282 else if (op == op_Sel) {
283 entity *ent = get_Sel_entity(ptr);
284 ir_type *tp = get_entity_owner(ent);
286 /* Do not fiddle with polymorphism. */
287 if (is_Class_type(get_entity_owner(ent)) &&
288 ((get_entity_n_overwrites(ent) != 0) ||
289 (get_entity_n_overwrittenby(ent) != 0) ) )
292 if (variability_constant == get_entity_variability(ent))
295 if (is_Array_type(tp)) {
299 for (i = 0, n = get_Sel_n_indexs(ptr); i < n; ++i) {
301 tarval *tlower, *tupper;
302 ir_node *index = get_Sel_index(ptr, i);
303 tarval *tv = computed_value(index);
305 /* check if the index is constant */
306 if (tv == tarval_bad)
309 bound = get_array_lower_bound(tp, i);
310 tlower = computed_value(bound);
311 bound = get_array_upper_bound(tp, i);
312 tupper = computed_value(bound);
314 if (tlower == tarval_bad || tupper == tarval_bad)
317 if (tarval_cmp(tv, tlower) & pn_Cmp_Lt)
319 if (tarval_cmp(tupper, tv) & pn_Cmp_Lt)
322 /* ok, bounds check finished */
327 ptr = get_Sel_ptr(ptr);
335 * Return the Selection index of a Sel node from dimension n
337 static long get_Sel_array_index_long(ir_node *n, int dim) {
338 ir_node *index = get_Sel_index(n, dim);
339 assert(get_irn_op(index) == op_Const);
340 return get_tarval_long(get_Const_tarval(index));
344 * Returns the accessed component graph path for an
345 * node computing an address.
347 * @param ptr the node computing the address
348 * @param depth current depth in steps upward from the root
351 static compound_graph_path *rec_get_accessed_path(ir_node *ptr, int depth) {
352 compound_graph_path *res = NULL;
353 entity *root, *field;
356 if (get_irn_op(ptr) == op_SymConst) {
357 /* a SymConst. If the depth is 0, this is an access to a global
358 * entity and we don't need a component path, else we know
359 * at least it's length.
361 assert(get_SymConst_kind(ptr) == symconst_addr_ent);
362 root = get_SymConst_entity(ptr);
363 res = (depth == 0) ? NULL : new_compound_graph_path(get_entity_type(root), depth);
366 assert(get_irn_op(ptr) == op_Sel);
367 /* it's a Sel, go up until we find the root */
368 res = rec_get_accessed_path(get_Sel_ptr(ptr), depth+1);
370 /* fill up the step in the path at the current position */
371 field = get_Sel_entity(ptr);
372 path_len = get_compound_graph_path_length(res);
373 pos = path_len - depth - 1;
374 set_compound_graph_path_node(res, pos, field);
376 if (is_Array_type(get_entity_owner(field))) {
377 assert(get_Sel_n_indexs(ptr) == 1 && "multi dim arrays not implemented");
378 set_compound_graph_path_array_index(res, pos, get_Sel_array_index_long(ptr, 0));
384 /** Returns an access path or NULL. The access path is only
385 * valid, if the graph is in phase_high and _no_ address computation is used.
387 static compound_graph_path *get_accessed_path(ir_node *ptr) {
388 return rec_get_accessed_path(ptr, 0);
392 static void reduce_adr_usage(ir_node *ptr);
395 * Update a Load that may lost it's usage.
397 static void handle_load_update(ir_node *load) {
398 ldst_info_t *info = get_irn_link(load);
400 /* do NOT touch volatile loads for now */
401 if (get_Load_volatility(load) == volatility_is_volatile)
404 if (! info->projs[pn_Load_res] && ! info->projs[pn_Load_X_except]) {
405 ir_node *ptr = get_Load_ptr(load);
406 ir_node *mem = get_Load_mem(load);
408 /* a Load which value is neither used nor exception checked, remove it */
409 exchange(info->projs[pn_Load_M], mem);
410 exchange(load, new_Bad());
411 reduce_adr_usage(ptr);
416 * A Use of an address node is vanished. Check if this was a Proj
417 * node and update the counters.
419 static void reduce_adr_usage(ir_node *ptr) {
421 if (get_irn_n_edges(ptr) <= 0) {
422 /* this Proj is dead now */
423 ir_node *pred = get_Proj_pred(ptr);
424 opcode code = get_irn_opcode(pred);
426 if (code == iro_Load) {
427 ldst_info_t *info = get_irn_link(pred);
428 info->projs[get_Proj_proj(ptr)] = NULL;
430 /* this node lost it's result proj, handle that */
431 handle_load_update(pred);
438 * Follow the memory chain as long as there are only Loads
439 * and try to replace current Load or Store by a previous one.
440 * Note that in unreachable loops it might happen that we reach
441 * load again, as well as we can fall into a cycle.
442 * We break such cycles using a special visited flag.
444 * INC_MASTER() must be called before dive into
446 static unsigned follow_Load_chain(ir_node *load, ir_node *curr) {
448 ldst_info_t *info = get_irn_link(load);
450 ir_node *ptr = get_Load_ptr(load);
451 ir_node *mem = get_Load_mem(load);
452 ir_mode *load_mode = get_Load_mode(load);
454 for (pred = curr; load != pred; pred = skip_Proj(get_Load_mem(pred))) {
455 ldst_info_t *pred_info = get_irn_link(pred);
458 * BEWARE: one might think that checking the modes is useless, because
459 * if the pointers are identical, they refer to the same object.
460 * This is only true in strong typed languages, not in C were the following
461 * is possible a = *(ir_type1 *)p; b = *(ir_type2 *)p ...
464 if (get_irn_op(pred) == op_Store && get_Store_ptr(pred) == ptr &&
465 get_irn_mode(get_Store_value(pred)) == load_mode) {
467 * a Load immediately after a Store -- a read after write.
468 * We may remove the Load, if both Load & Store does not have an exception handler
469 * OR they are in the same block. In the latter case the Load cannot
470 * throw an exception when the previous Store was quiet.
472 * Why we need to check for Store Exception? If the Store cannot
473 * be executed (ROM) the exception handler might simply jump into
475 * We could make it a little bit better if we would know that the exception
476 * handler of the Store jumps directly to the end...
478 if ((!pred_info->projs[pn_Store_X_except] && !info->projs[pn_Load_X_except]) ||
479 get_nodes_block(load) == get_nodes_block(pred)) {
480 ir_node *value = get_Store_value(pred);
482 DBG_OPT_RAW(load, value);
483 if (info->projs[pn_Load_M])
484 exchange(info->projs[pn_Load_M], mem);
487 if (info->projs[pn_Load_X_except]) {
488 exchange( info->projs[pn_Load_X_except], new_Bad());
492 if (info->projs[pn_Load_res])
493 exchange(info->projs[pn_Load_res], value);
495 exchange(load, new_Bad());
496 reduce_adr_usage(ptr);
497 return res | DF_CHANGED;
500 else if (get_irn_op(pred) == op_Load && get_Load_ptr(pred) == ptr &&
501 get_Load_mode(pred) == load_mode) {
503 * a Load after a Load -- a read after read.
504 * We may remove the second Load, if it does not have an exception handler
505 * OR they are in the same block. In the later case the Load cannot
506 * throw an exception when the previous Load was quiet.
508 * Here, there is no need to check if the previous Load has an exception
509 * hander because they would have exact the same exception...
511 if (! info->projs[pn_Load_X_except] || get_nodes_block(load) == get_nodes_block(pred)) {
512 DBG_OPT_RAR(load, pred);
514 if (pred_info->projs[pn_Load_res]) {
515 /* we need a data proj from the previous load for this optimization */
516 if (info->projs[pn_Load_res])
517 exchange(info->projs[pn_Load_res], pred_info->projs[pn_Load_res]);
519 if (info->projs[pn_Load_M])
520 exchange(info->projs[pn_Load_M], mem);
523 if (info->projs[pn_Load_res]) {
524 set_Proj_pred(info->projs[pn_Load_res], pred);
525 set_nodes_block(info->projs[pn_Load_res], get_nodes_block(pred));
526 pred_info->projs[pn_Load_res] = info->projs[pn_Load_res];
528 if (info->projs[pn_Load_M]) {
529 /* Actually, this if should not be necessary. Construct the Loads
531 exchange(info->projs[pn_Load_M], mem);
536 if (info->projs[pn_Load_X_except]) {
537 exchange(info->projs[pn_Load_X_except], new_Bad());
541 exchange(load, new_Bad());
542 reduce_adr_usage(ptr);
543 return res |= DF_CHANGED;
547 /* follow only Load chains */
548 if (get_irn_op(pred) != op_Load)
551 /* check for cycles */
552 if (NODE_VISITED(pred_info))
554 MARK_NODE(pred_info);
557 if (get_irn_op(pred) == op_Sync) {
560 /* handle all Sync predecessors */
561 for (i = get_Sync_n_preds(pred) - 1; i >= 0; --i) {
562 res |= follow_Load_chain(load, skip_Proj(get_Sync_pred(pred, i)));
574 static unsigned optimize_load(ir_node *load)
576 ldst_info_t *info = get_irn_link(load);
577 ir_node *mem, *ptr, *new_node;
581 /* do NOT touch volatile loads for now */
582 if (get_Load_volatility(load) == volatility_is_volatile)
585 /* the address of the load to be optimized */
586 ptr = get_Load_ptr(load);
589 * Check if we can remove the exception from a Load:
590 * This can be done, if the address is from an Sel(Alloc) and
591 * the Sel type is a subtype of the allocated type.
593 * This optimizes some often used OO constructs,
594 * like x = new O; x->t;
596 if (info->projs[pn_Load_X_except]) {
598 ir_node *mem = get_Sel_mem(ptr);
600 if (get_irn_op(skip_Proj(mem)) == op_Alloc) {
601 /* ok, check the types */
602 entity *ent = get_Sel_entity(ptr);
603 ir_type *s_type = get_entity_type(ent);
604 ir_type *a_type = get_Alloc_type(mem);
606 if (is_SubClass_of(s_type, a_type)) {
607 /* ok, condition met: there can't be an exception because
608 * Alloc guarantees that enough memory was allocated */
610 exchange(info->projs[pn_Load_X_except], new_Bad());
611 info->projs[pn_Load_X_except] = NULL;
616 else if ((get_irn_op(skip_Proj(ptr)) == op_Alloc) ||
617 ((get_irn_op(ptr) == op_Cast) && (get_irn_op(skip_Proj(get_Cast_op(ptr))) == op_Alloc))) {
618 /* simple case: a direct load after an Alloc. Firm Alloc throw
619 * an exception in case of out-of-memory. So, there is no way for an
620 * exception in this load.
621 * This code is constructed by the "exception lowering" in the Jack compiler.
623 exchange(info->projs[pn_Load_X_except], new_Bad());
624 info->projs[pn_Load_X_except] = NULL;
629 /* the mem of the Load. Must still be returned after optimization */
630 mem = get_Load_mem(load);
632 if (! info->projs[pn_Load_res] && ! info->projs[pn_Load_X_except]) {
633 /* a Load which value is neither used nor exception checked, remove it */
634 exchange(info->projs[pn_Load_M], mem);
636 exchange(load, new_Bad());
637 reduce_adr_usage(ptr);
638 return res | DF_CHANGED;
641 /* Load from a constant polymorphic field, where we can resolve
643 new_node = transform_node_Load(load);
644 if (new_node != load) {
645 if (info->projs[pn_Load_M]) {
646 exchange(info->projs[pn_Load_M], mem);
647 info->projs[pn_Load_M] = NULL;
649 if (info->projs[pn_Load_X_except]) {
650 exchange(info->projs[pn_Load_X_except], new_Bad());
651 info->projs[pn_Load_X_except] = NULL;
653 if (info->projs[pn_Load_res])
654 exchange(info->projs[pn_Load_res], new_node);
656 exchange(load, new_Bad());
657 reduce_adr_usage(ptr);
658 return res | DF_CHANGED;
661 /* check if we can determine the entity that will be loaded */
662 ent = find_constant_entity(ptr);
664 if ((allocation_static == get_entity_allocation(ent)) &&
665 (visibility_external_allocated != get_entity_visibility(ent))) {
666 /* a static allocation that is not external: there should be NO exception
669 /* no exception, clear the info field as it might be checked later again */
670 if (info->projs[pn_Load_X_except]) {
671 exchange(info->projs[pn_Load_X_except], new_Bad());
672 info->projs[pn_Load_X_except] = NULL;
676 if (variability_constant == get_entity_variability(ent)
677 && is_atomic_entity(ent)) {
678 /* Might not be atomic after
679 lowering of Sels. In this
680 case we could also load, but
681 it's more complicated. */
682 /* more simpler case: we load the content of a constant value:
683 * replace it by the constant itself
687 if (info->projs[pn_Load_M]) {
688 exchange(info->projs[pn_Load_M], mem);
692 if (info->projs[pn_Load_res]) {
693 if (is_atomic_entity(ent)) {
694 ir_node *c = copy_const_value(get_irn_dbg_info(load), get_atomic_ent_value(ent));
697 exchange(info->projs[pn_Load_res], c);
701 exchange(load, new_Bad());
702 reduce_adr_usage(ptr);
705 else if (variability_constant == get_entity_variability(ent)) {
706 compound_graph_path *path = get_accessed_path(ptr);
711 assert(is_proper_compound_graph_path(path, get_compound_graph_path_length(path)-1));
715 for (j = 0; j < get_compound_graph_path_length(path); ++j) {
716 entity *node = get_compound_graph_path_node(path, j);
717 fprintf(stdout, ".%s", get_entity_name(node));
718 if (is_Array_type(get_entity_owner(node)))
719 fprintf(stdout, "[%d]", get_compound_graph_path_array_index(path, j));
725 c = get_compound_ent_value_by_path(ent, path);
726 free_compound_graph_path(path);
728 /* printf(" cons: "); DDMN(c); */
730 if (info->projs[pn_Load_M]) {
731 exchange(info->projs[pn_Load_M], mem);
734 if (info->projs[pn_Load_res]) {
735 exchange(info->projs[pn_Load_res], copy_const_value(get_irn_dbg_info(load), c));
738 exchange(load, new_Bad());
739 reduce_adr_usage(ptr);
743 /* We can not determine a correct access path. E.g., in jack, we load
744 a byte from an object to generate an exception. Happens in test program
746 printf(">>>>>>>>>>>>> Found access to constant entity %s in function %s\n", get_entity_name(ent),
747 get_entity_name(get_irg_entity(current_ir_graph)));
748 printf(" load: "); DDMN(load);
749 printf(" ptr: "); DDMN(ptr);
756 /* Check, if the address of this load is used more than once.
757 * If not, this load cannot be removed in any case. */
758 if (get_irn_n_uses(ptr) <= 1)
762 * follow the memory chain as long as there are only Loads
763 * and try to replace current Load or Store by a previous one.
764 * Note that in unreachable loops it might happen that we reach
765 * load again, as well as we can fall into a cycle.
766 * We break such cycles using a special visited flag.
769 res = follow_Load_chain(load, skip_Proj(mem));
774 * follow the memory chain as long as there are only Loads.
776 * INC_MASTER() must be called before dive into
778 static unsigned follow_Load_chain_for_Store(ir_node *store, ir_node *curr) {
780 ldst_info_t *info = get_irn_link(store);
782 ir_node *ptr = get_Store_ptr(store);
783 ir_node *mem = get_Store_mem(store);
784 ir_node *value = get_Store_value(store);
785 ir_mode *mode = get_irn_mode(value);
786 ir_node *block = get_nodes_block(store);
788 for (pred = curr; pred != store; pred = skip_Proj(get_Load_mem(pred))) {
789 ldst_info_t *pred_info = get_irn_link(pred);
792 * BEWARE: one might think that checking the modes is useless, because
793 * if the pointers are identical, they refer to the same object.
794 * This is only true in strong typed languages, not is C were the following
795 * is possible *(ir_type1 *)p = a; *(ir_type2 *)p = b ...
797 if (get_irn_op(pred) == op_Store && get_Store_ptr(pred) == ptr &&
798 get_nodes_block(pred) == block && get_irn_mode(get_Store_value(pred)) == mode) {
800 * a Store after a Store in the same block -- a write after write.
801 * We may remove the first Store, if it does not have an exception handler.
803 * TODO: What, if both have the same exception handler ???
805 if (get_Store_volatility(pred) != volatility_is_volatile && !pred_info->projs[pn_Store_X_except]) {
806 DBG_OPT_WAW(pred, store);
807 exchange( pred_info->projs[pn_Store_M], get_Store_mem(pred) );
808 exchange(pred, new_Bad());
809 reduce_adr_usage(ptr);
813 else if (get_irn_op(pred) == op_Load && get_Load_ptr(pred) == ptr &&
814 value == pred_info->projs[pn_Load_res]) {
816 * a Store of a value after a Load -- a write after read.
817 * We may remove the second Store, if it does not have an exception handler.
819 if (! info->projs[pn_Store_X_except]) {
820 DBG_OPT_WAR(store, pred);
821 exchange( info->projs[pn_Store_M], mem );
822 exchange(store, new_Bad());
823 reduce_adr_usage(ptr);
828 /* follow only Load chains */
829 if (get_irn_op(pred) != op_Load)
832 /* check for cycles */
833 if (NODE_VISITED(pred_info))
835 MARK_NODE(pred_info);
838 if (get_irn_op(pred) == op_Sync) {
841 /* handle all Sync predecessors */
842 for (i = get_Sync_n_preds(pred) - 1; i >= 0; --i) {
843 res |= follow_Load_chain_for_Store(store, skip_Proj(get_Sync_pred(pred, i)));
854 static unsigned optimize_store(ir_node *store)
858 if (get_Store_volatility(store) == volatility_is_volatile)
861 ptr = get_Store_ptr(store);
863 /* Check, if the address of this load is used more than once.
864 * If not, this load cannot be removed in any case. */
865 if (get_irn_n_uses(ptr) <= 1)
868 mem = get_Store_mem(store);
870 /* follow the memory chain as long as there are only Loads */
872 return follow_Load_chain_for_Store(store, skip_Proj(mem));
876 * walker, optimizes Phi after Stores to identical places:
877 * Does the following optimization:
880 * val1 val2 val3 val1 val2 val3
882 * Store Store Store \ | /
889 * This reduces the number of stores and allows for predicated execution.
890 * Moves Stores back to the end of a function which may be bad.
892 * This is only possible if the predecessor blocks have only one successor.
894 static unsigned optimize_phi(ir_node *phi, walk_env_t *wenv)
897 ir_node *store, *old_store, *ptr, *block, *phi_block, *phiM, *phiD, *exc, *projM;
899 ir_node **inM, **inD, **stores;
903 block_info_t *bl_info;
906 /* Must be a memory Phi */
907 if (get_irn_mode(phi) != mode_M)
910 n = get_Phi_n_preds(phi);
914 store = skip_Proj(get_Phi_pred(phi, 0));
916 if (get_irn_op(store) != op_Store)
919 block = get_nodes_block(store);
921 /* abort on dead blocks */
922 if (is_Block_dead(block))
925 /* check if the block is post dominated by Phi-block
926 and has no exception exit */
927 bl_info = get_irn_link(block);
928 if (bl_info->flags & BLOCK_HAS_EXC)
931 phi_block = get_nodes_block(phi);
932 if (! block_postdominates(phi_block, block))
935 /* this is the address of the store */
936 ptr = get_Store_ptr(store);
937 mode = get_irn_mode(get_Store_value(store));
938 info = get_irn_link(store);
939 exc = info->exc_block;
941 for (i = 1; i < n; ++i) {
942 ir_node *pred = skip_Proj(get_Phi_pred(phi, i));
944 if (get_irn_op(pred) != op_Store)
947 if (ptr != get_Store_ptr(pred) || mode != get_irn_mode(get_Store_value(pred)))
950 info = get_irn_link(pred);
952 /* check, if all stores have the same exception flow */
953 if (exc != info->exc_block)
956 /* abort on dead blocks */
957 block = get_nodes_block(pred);
958 if (is_Block_dead(block))
961 /* check if the block is post dominated by Phi-block
962 and has no exception exit */
963 bl_info = get_irn_link(block);
964 if (bl_info->flags & BLOCK_HAS_EXC)
966 if (! block_postdominates(phi_block, block))
971 * ok, when we are here, we found all predecessors of a Phi that
972 * are Stores to the same address and size. That means whatever
973 * we do before we enter the block of the Phi, we do a Store.
974 * So, we can move the Store to the current block:
976 * val1 val2 val3 val1 val2 val3
978 * | Str | | Str | | Str | \ | /
984 * Is only allowed if the predecessor blocks have only one successor.
987 NEW_ARR_A(ir_node *, stores, n);
988 NEW_ARR_A(ir_node *, inM, n);
989 NEW_ARR_A(ir_node *, inD, n);
990 NEW_ARR_A(int, idx, n);
992 /* Prepare: Collect all Store nodes. We must do this
993 first because we otherwise may loose a store when exchanging its
996 for (i = 0; i < n; ++i)
997 stores[i] = skip_Proj(get_Phi_pred(phi, i));
999 /* Prepare: Skip the memory Proj: we need this in the case some stores
1001 Beware: One Store might be included more than once in the stores[]
1002 list, so we must prevent to do the exchange more than once.
1004 for (i = 0; i < n; ++i) {
1005 ir_node *store = stores[i];
1008 info = get_irn_link(store);
1009 proj_m = info->projs[pn_Store_M];
1011 if (is_Proj(proj_m) && get_Proj_pred(proj_m) == store)
1012 exchange(proj_m, get_Store_mem(store));
1015 /* first step: collect all inputs */
1016 for (i = 0; i < n; ++i) {
1017 ir_node *store = stores[i];
1018 info = get_irn_link(store);
1020 inM[i] = get_Store_mem(store);
1021 inD[i] = get_Store_value(store);
1022 idx[i] = info->exc_idx;
1024 block = get_nodes_block(phi);
1026 /* second step: create a new memory Phi */
1027 phiM = new_rd_Phi(get_irn_dbg_info(phi), current_ir_graph, block, n, inM, mode_M);
1029 /* third step: create a new data Phi */
1030 phiD = new_rd_Phi(get_irn_dbg_info(phi), current_ir_graph, block, n, inD, mode);
1032 /* fourth step: create the Store */
1033 store = new_rd_Store(db, current_ir_graph, block, phiM, ptr, phiD);
1035 co_set_irn_name(store, co_get_irn_ident(old_store));
1038 projM = new_rd_Proj(NULL, current_ir_graph, block, store, mode_M, pn_Store_M);
1040 info = get_ldst_info(store, wenv);
1041 info->projs[pn_Store_M] = projM;
1043 /* fifths step: repair exception flow */
1045 ir_node *projX = new_rd_Proj(NULL, current_ir_graph, block, store, mode_X, pn_Store_X_except);
1047 info->projs[pn_Store_X_except] = projX;
1048 info->exc_block = exc;
1049 info->exc_idx = idx[0];
1051 for (i = 0; i < n; ++i) {
1052 set_Block_cfgpred(exc, idx[i], projX);
1056 /* the exception block should be optimized as some inputs are identical now */
1062 /* sixth step: replace old Phi */
1063 exchange(phi, projM);
1065 return res | DF_CHANGED;
1069 * walker, do the optimizations
1071 static void do_load_store_optimize(ir_node *n, void *env)
1073 walk_env_t *wenv = env;
1075 switch (get_irn_opcode(n)) {
1078 wenv->changes |= optimize_load(n);
1082 wenv->changes |= optimize_store(n);
1086 wenv->changes |= optimize_phi(n, wenv);
1094 * do the load store optimization
1096 void optimize_load_store(ir_graph *irg)
1101 assert(get_irg_phase_state(irg) != phase_building);
1102 assert(get_irg_pinned(irg) != op_pin_state_floats &&
1103 "LoadStore optimization needs pinned graph");
1105 if (! get_opt_redundant_loadstore())
1108 was_activ = edges_activated(irg);
1110 /* we need "fresh" edges */
1111 edges_deactivate(irg);
1113 edges_activate(irg);
1115 /* for Phi optimization post-dominators are needed ... */
1116 assure_postdoms(irg);
1118 obstack_init(&env.obst);
1121 /* init the links, then collect Loads/Stores/Proj's in lists */
1123 irg_walk_graph(irg, firm_clear_link, collect_nodes, &env);
1125 /* now we have collected enough information, optimize */
1126 irg_walk_graph(irg, NULL, do_load_store_optimize, &env);
1128 obstack_free(&env.obst, NULL);
1130 /* Handle graph state */
1132 if (get_irg_outs_state(irg) == outs_consistent)
1133 set_irg_outs_inconsistent(irg);
1136 if (env.changes & CF_CHANGED) {
1137 /* is this really needed: Yes, control flow changed, block might
1138 have Bad() predecessors. */
1139 set_irg_doms_inconsistent(irg);
1143 edges_deactivate(irg);