3 * File name: ir/opt/ldstopt.c
4 * Purpose: load store optimizations
8 * Copyright: (c) 1998-2004 Universit
\81ät Karlsruhe
9 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
26 #include "irgraph_t.h"
34 #include "dbginfo_t.h"
35 #include "iropt_dbg.h"
40 #include "opt_polymorphy.h"
43 #include "cacheopt/cachesim.h"
47 #define IMAX(a,b) ((a) > (b) ? (a) : (b))
49 #define MAX_PROJ IMAX(pn_Load_max, pn_Store_max)
52 DF_CHANGED = 1, /**< data flow changed */
53 CF_CHANGED = 2, /**< control flow changed */
59 typedef struct _walk_env_t {
60 struct obstack obst; /**< list of all stores */
61 unsigned changes; /**< a bitmask of graph changes */
65 * flags for Load/Store
68 LDST_VISITED = 1 /**< if set, this Load/Store is already visited */
71 /** A Load/Store info. */
72 typedef struct _ldst_info_t {
73 ir_node *projs[MAX_PROJ]; /**< list of Proj's of this node */
74 ir_node *exc_block; /**< the exception block if available */
75 int exc_idx; /**< predecessor index in the exception block */
76 unsigned flags; /**< flags */
77 unsigned visited; /**< visited counter for breaking loops */
81 * flags for control flow.
84 BLOCK_HAS_COND = 1, /**< Block has conditional control flow */
85 BLOCK_HAS_EXC = 2 /**< Block has exceptional control flow */
91 typedef struct _block_info_t {
92 unsigned flags; /**< flags for the block */
95 /** the master visited flag for loop detection. */
96 static unsigned master_visited = 0;
98 #define INC_MASTER() ++master_visited
99 #define MARK_NODE(info) (info)->visited = master_visited
100 #define NODE_VISITED(info) (info)->visited >= master_visited
103 * get the Load/Store info of a node
105 static ldst_info_t *get_ldst_info(ir_node *node, walk_env_t *env) {
106 ldst_info_t *info = get_irn_link(node);
109 info = obstack_alloc(&env->obst, sizeof(*info));
111 memset(info, 0, sizeof(*info));
113 set_irn_link(node, info);
119 * get the Block info of a node
121 static block_info_t *get_block_info(ir_node *node, walk_env_t *env)
123 block_info_t *info = get_irn_link(node);
126 info = obstack_alloc(&env->obst, sizeof(*info));
128 memset(info, 0, sizeof(*info));
130 set_irn_link(node, info);
136 * update the projection info for a Load/Store
138 static unsigned update_projs(ldst_info_t *info, ir_node *proj)
140 long nr = get_Proj_proj(proj);
142 assert(0 <= nr && nr <= MAX_PROJ && "Wrong proj from LoadStore");
144 if (info->projs[nr]) {
145 /* there is already one, do CSE */
146 exchange(proj, info->projs[nr]);
150 info->projs[nr] = proj;
156 * update the exception block info for a Load/Store node.
158 * @param info the load/store info struct
159 * @param block the exception handler block for this load/store
160 * @param pos the control flow input of the block
162 static unsigned update_exc(ldst_info_t *info, ir_node *block, int pos)
164 assert(info->exc_block == NULL && "more than one exception block found");
166 info->exc_block = block;
171 /** Return the number of uses of an address node */
172 #define get_irn_n_uses(adr) (unsigned)PTR_TO_INT(get_irn_link(adr))
173 /** Sets the number of uses of an address node */
174 #define set_irn_n_uses(adr, n) set_irn_link(adr, INT_TO_PTR(n))
177 * walker, collects all Load/Store/Proj nodes
179 * walks from Start -> End
181 static void collect_nodes(ir_node *node, void *env)
183 ir_op *op = get_irn_op(node);
184 ir_node *pred, *blk, *pred_blk;
185 ldst_info_t *ldst_info;
186 walk_env_t *wenv = env;
192 pred = get_Proj_pred(node);
193 op = get_irn_op(pred);
196 ldst_info = get_ldst_info(pred, wenv);
198 wenv->changes |= update_projs(ldst_info, node);
200 if ((ldst_info->flags & LDST_VISITED) == 0) {
201 adr = get_Load_ptr(pred);
202 set_irn_n_uses(adr, get_irn_n_uses(adr) + 1);
204 ldst_info->flags |= LDST_VISITED;
208 * Place the Proj's to the same block as the
209 * predecessor Load. This is always ok and prevents
210 * "non-SSA" form after optimizations if the Proj
211 * is in a wrong block.
213 blk = get_nodes_block(node);
214 pred_blk = get_nodes_block(pred);
215 if (blk != pred_blk) {
216 wenv->changes |= DF_CHANGED;
217 set_nodes_block(node, pred_blk);
220 else if (op == op_Store) {
221 ldst_info = get_ldst_info(pred, wenv);
223 wenv->changes |= update_projs(ldst_info, node);
225 if ((ldst_info->flags & LDST_VISITED) == 0) {
226 adr = get_Store_ptr(pred);
227 set_irn_n_uses(adr, get_irn_n_uses(adr) + 1);
229 ldst_info->flags |= LDST_VISITED;
233 * Place the Proj's to the same block as the
234 * predecessor Store. This is always ok and prevents
235 * "non-SSA" form after optimizations if the Proj
236 * is in a wrong block.
238 blk = get_nodes_block(node);
239 pred_blk = get_nodes_block(pred);
240 if (blk != pred_blk) {
241 wenv->changes |= DF_CHANGED;
242 set_nodes_block(node, pred_blk);
246 else if (op == op_Block) { /* check, if it's an exception block */
249 for (i = get_Block_n_cfgpreds(node) - 1; i >= 0; --i) {
251 block_info_t *bl_info;
253 pred = skip_Proj(get_Block_cfgpred(node, i));
255 /* ignore Bad predecessors, they will be removed later */
259 pred_block = get_nodes_block(pred);
260 bl_info = get_block_info(pred_block, wenv);
262 if (is_fragile_op(pred))
263 bl_info->flags |= BLOCK_HAS_EXC;
264 else if (is_irn_forking(pred))
265 bl_info->flags |= BLOCK_HAS_COND;
267 if (get_irn_op(pred) == op_Load || get_irn_op(pred) == op_Store) {
268 ldst_info = get_ldst_info(pred, wenv);
270 wenv->changes |= update_exc(ldst_info, node, i);
277 * Returns an entity if the address ptr points to a constant one.
279 static entity *find_constant_entity(ir_node *ptr)
282 ir_op *op = get_irn_op(ptr);
284 if (op == op_SymConst && (get_SymConst_kind(ptr) == symconst_addr_ent)) {
285 return get_SymConst_entity(ptr);
287 else if (op == op_Sel) {
288 entity *ent = get_Sel_entity(ptr);
289 ir_type *tp = get_entity_owner(ent);
291 /* Do not fiddle with polymorphism. */
292 if (is_Class_type(get_entity_owner(ent)) &&
293 ((get_entity_n_overwrites(ent) != 0) ||
294 (get_entity_n_overwrittenby(ent) != 0) ) )
297 if (variability_constant == get_entity_variability(ent))
300 if (is_Array_type(tp)) {
304 for (i = 0, n = get_Sel_n_indexs(ptr); i < n; ++i) {
306 tarval *tlower, *tupper;
307 ir_node *index = get_Sel_index(ptr, i);
308 tarval *tv = computed_value(index);
310 /* check if the index is constant */
311 if (tv == tarval_bad)
314 bound = get_array_lower_bound(tp, i);
315 tlower = computed_value(bound);
316 bound = get_array_upper_bound(tp, i);
317 tupper = computed_value(bound);
319 if (tlower == tarval_bad || tupper == tarval_bad)
322 if (tarval_cmp(tv, tlower) & pn_Cmp_Lt)
324 if (tarval_cmp(tupper, tv) & pn_Cmp_Lt)
327 /* ok, bounds check finished */
332 ptr = get_Sel_ptr(ptr);
340 * Return the Selection index of a Sel node from dimension n
342 static long get_Sel_array_index_long(ir_node *n, int dim) {
343 ir_node *index = get_Sel_index(n, dim);
344 assert(get_irn_op(index) == op_Const);
345 return get_tarval_long(get_Const_tarval(index));
349 * Returns the accessed component graph path for an
350 * node computing an address.
352 * @param ptr the node computing the address
353 * @param depth current depth in steps upward from the root
356 static compound_graph_path *rec_get_accessed_path(ir_node *ptr, int depth) {
357 compound_graph_path *res = NULL;
358 entity *root, *field;
361 if (get_irn_op(ptr) == op_SymConst) {
362 /* a SymConst. If the depth is 0, this is an access to a global
363 * entity and we don't need a component path, else we know
364 * at least it's length.
366 assert(get_SymConst_kind(ptr) == symconst_addr_ent);
367 root = get_SymConst_entity(ptr);
368 res = (depth == 0) ? NULL : new_compound_graph_path(get_entity_type(root), depth);
371 assert(get_irn_op(ptr) == op_Sel);
372 /* it's a Sel, go up until we find the root */
373 res = rec_get_accessed_path(get_Sel_ptr(ptr), depth+1);
375 /* fill up the step in the path at the current position */
376 field = get_Sel_entity(ptr);
377 path_len = get_compound_graph_path_length(res);
378 pos = path_len - depth - 1;
379 set_compound_graph_path_node(res, pos, field);
381 if (is_Array_type(get_entity_owner(field))) {
382 assert(get_Sel_n_indexs(ptr) == 1 && "multi dim arrays not implemented");
383 set_compound_graph_path_array_index(res, pos, get_Sel_array_index_long(ptr, 0));
389 /** Returns an access path or NULL. The access path is only
390 * valid, if the graph is in phase_high and _no_ address computation is used.
392 static compound_graph_path *get_accessed_path(ir_node *ptr) {
393 return rec_get_accessed_path(ptr, 0);
397 static void reduce_adr_usage(ir_node *ptr);
400 * Update a Load that may lost it's usage.
402 static void handle_load_update(ir_node *load) {
403 ldst_info_t *info = get_irn_link(load);
405 /* do NOT touch volatile loads for now */
406 if (get_Load_volatility(load) == volatility_is_volatile)
409 if (! info->projs[pn_Load_res] && ! info->projs[pn_Load_X_except]) {
410 ir_node *ptr = get_Load_ptr(load);
411 ir_node *mem = get_Load_mem(load);
413 /* a Load which value is neither used nor exception checked, remove it */
414 exchange(info->projs[pn_Load_M], mem);
415 reduce_adr_usage(ptr);
420 * A Use of an address node is vanished. Check if this was a Proj
421 * node and update the counters.
423 static void reduce_adr_usage(ir_node *ptr) {
424 int use_count = get_irn_n_uses(ptr);
426 assert(use_count >= 0);
427 set_irn_n_uses(ptr, use_count);
430 if (use_count <= 0) {
431 /* this Proj is now dead, update the Load/Store info */
432 ir_node *pred = get_Proj_pred(ptr);
433 opcode code = get_irn_opcode(pred);
435 if (code == iro_Load) {
436 ldst_info_t *info = get_irn_link(pred);
437 info->projs[get_Proj_proj(ptr)] = NULL;
439 /* this node lost it's result proj, handle that */
440 handle_load_update(pred);
447 * Follow the memory chain as long as there are only Loads
448 * and try to replace current Load or Store by a previous one.
449 * Note that in unreachable loops it might happen that we reach
450 * load again, as well as we can fall into a cycle.
451 * We break such cycles using a special visited flag.
453 * INC_MASTER() must be called before dive into
455 static unsigned follow_Load_chain(ir_node *load, ir_node *curr) {
457 ldst_info_t *info = get_irn_link(load);
459 ir_node *ptr = get_Load_ptr(load);
460 ir_node *mem = get_Load_mem(load);
461 ir_mode *load_mode = get_Load_mode(load);
463 for (pred = curr; load != pred; pred = skip_Proj(get_Load_mem(pred))) {
464 ldst_info_t *pred_info = get_irn_link(pred);
467 * BEWARE: one might think that checking the modes is useless, because
468 * if the pointers are identical, they refer to the same object.
469 * This is only true in strong typed languages, not in C were the following
470 * is possible a = *(ir_type1 *)p; b = *(ir_type2 *)p ...
473 if (get_irn_op(pred) == op_Store && get_Store_ptr(pred) == ptr &&
474 get_irn_mode(get_Store_value(pred)) == load_mode) {
476 * a Load immediately after a Store -- a read after write.
477 * We may remove the Load, if both Load & Store does not have an exception handler
478 * OR they are in the same block. In the latter case the Load cannot
479 * throw an exception when the previous Store was quiet.
481 * Why we need to check for Store Exception? If the Store cannot
482 * be executed (ROM) the exception handler might simply jump into
484 * We could make it a little bit better if we would know that the exception
485 * handler of the Store jumps directly to the end...
487 if ((!pred_info->projs[pn_Store_X_except] && !info->projs[pn_Load_X_except]) ||
488 get_nodes_block(load) == get_nodes_block(pred)) {
489 ir_node *value = get_Store_value(pred);
491 DBG_OPT_RAW(load, value);
492 if (info->projs[pn_Load_M])
493 exchange(info->projs[pn_Load_M], mem);
496 if (info->projs[pn_Load_X_except]) {
497 exchange( info->projs[pn_Load_X_except], new_Bad());
501 if (info->projs[pn_Load_res])
502 exchange(info->projs[pn_Load_res], value);
504 reduce_adr_usage(ptr);
505 return res | DF_CHANGED;
508 else if (get_irn_op(pred) == op_Load && get_Load_ptr(pred) == ptr &&
509 get_Load_mode(pred) == load_mode) {
511 * a Load after a Load -- a read after read.
512 * We may remove the second Load, if it does not have an exception handler
513 * OR they are in the same block. In the later case the Load cannot
514 * throw an exception when the previous Load was quiet.
516 * Here, there is no need to check if the previous Load has an exception
517 * hander because they would have exact the same exception...
519 if (! info->projs[pn_Load_X_except] || get_nodes_block(load) == get_nodes_block(pred)) {
520 DBG_OPT_RAR(load, pred);
522 if (pred_info->projs[pn_Load_res]) {
523 /* we need a data proj from the previous load for this optimization */
524 if (info->projs[pn_Load_res])
525 exchange(info->projs[pn_Load_res], pred_info->projs[pn_Load_res]);
527 if (info->projs[pn_Load_M])
528 exchange(info->projs[pn_Load_M], mem);
531 if (info->projs[pn_Load_res]) {
532 set_Proj_pred(info->projs[pn_Load_res], pred);
533 set_nodes_block(info->projs[pn_Load_res], get_nodes_block(pred));
534 pred_info->projs[pn_Load_res] = info->projs[pn_Load_res];
536 if (info->projs[pn_Load_M]) {
537 /* Actually, this if should not be necessary. Construct the Loads
539 exchange(info->projs[pn_Load_M], mem);
544 if (info->projs[pn_Load_X_except]) {
545 exchange(info->projs[pn_Load_X_except], new_Bad());
549 reduce_adr_usage(ptr);
550 return res |= DF_CHANGED;
554 /* follow only Load chains */
555 if (get_irn_op(pred) != op_Load)
558 /* check for cycles */
559 if (NODE_VISITED(pred_info))
561 MARK_NODE(pred_info);
564 if (get_irn_op(pred) == op_Sync) {
567 /* handle all Sync predecessors */
568 for (i = get_Sync_n_preds(pred) - 1; i >= 0; --i) {
569 res |= follow_Load_chain(load, skip_Proj(get_Sync_pred(pred, i)));
581 static unsigned optimize_load(ir_node *load)
583 ldst_info_t *info = get_irn_link(load);
584 ir_mode *load_mode = get_Load_mode(load);
585 ir_node *mem, *ptr, *new_node;
589 /* do NOT touch volatile loads for now */
590 if (get_Load_volatility(load) == volatility_is_volatile)
593 /* the address of the load to be optimized */
594 ptr = get_Load_ptr(load);
597 * Check if we can remove the exception from a Load:
598 * This can be done, if the address is from an Sel(Alloc) and
599 * the Sel type is a subtype of the allocated type.
601 * This optimizes some often used OO constructs,
602 * like x = new O; x->t;
604 if (info->projs[pn_Load_X_except]) {
606 ir_node *mem = get_Sel_mem(ptr);
608 if (get_irn_op(skip_Proj(mem)) == op_Alloc) {
609 /* ok, check the types */
610 entity *ent = get_Sel_entity(ptr);
611 ir_type *s_type = get_entity_type(ent);
612 ir_type *a_type = get_Alloc_type(mem);
614 if (is_SubClass_of(s_type, a_type)) {
615 /* ok, condition met: there can't be an exception because
616 * Alloc guarantees that enough memory was allocated */
618 exchange(info->projs[pn_Load_X_except], new_Bad());
619 info->projs[pn_Load_X_except] = NULL;
624 else if ((get_irn_op(skip_Proj(ptr)) == op_Alloc) ||
625 ((get_irn_op(ptr) == op_Cast) && (get_irn_op(skip_Proj(get_Cast_op(ptr))) == op_Alloc))) {
626 /* simple case: a direct load after an Alloc. Firm Alloc throw
627 * an exception in case of out-of-memory. So, there is no way for an
628 * exception in this load.
629 * This code is constructed by the "exception lowering" in the Jack compiler.
631 exchange(info->projs[pn_Load_X_except], new_Bad());
632 info->projs[pn_Load_X_except] = NULL;
637 /* the mem of the Load. Must still be returned after optimization */
638 mem = get_Load_mem(load);
640 if (! info->projs[pn_Load_res] && ! info->projs[pn_Load_X_except]) {
641 /* a Load which value is neither used nor exception checked, remove it */
642 exchange(info->projs[pn_Load_M], mem);
644 reduce_adr_usage(ptr);
645 return res | DF_CHANGED;
648 /* Load from a constant polymorphic field, where we can resolve
650 new_node = transform_node_Load(load);
651 if (new_node != load) {
652 if (info->projs[pn_Load_M]) {
653 exchange(info->projs[pn_Load_M], mem);
654 info->projs[pn_Load_M] = NULL;
656 if (info->projs[pn_Load_X_except]) {
657 exchange(info->projs[pn_Load_X_except], new_Bad());
658 info->projs[pn_Load_X_except] = NULL;
660 if (info->projs[pn_Load_res])
661 exchange(info->projs[pn_Load_res], new_node);
663 reduce_adr_usage(ptr);
664 return res | DF_CHANGED;
667 /* check if we can determine the entity that will be loaded */
668 ent = find_constant_entity(ptr);
670 if ((allocation_static == get_entity_allocation(ent)) &&
671 (visibility_external_allocated != get_entity_visibility(ent))) {
672 /* a static allocation that is not external: there should be NO exception
675 /* no exception, clear the info field as it might be checked later again */
676 if (info->projs[pn_Load_X_except]) {
677 exchange(info->projs[pn_Load_X_except], new_Bad());
678 info->projs[pn_Load_X_except] = NULL;
682 if (variability_constant == get_entity_variability(ent)
683 && is_atomic_entity(ent)) {
684 /* Might not be atomic after
685 lowering of Sels. In this
686 case we could also load, but
687 it's more complicated. */
688 /* more simpler case: we load the content of a constant value:
689 * replace it by the constant itself
693 if (info->projs[pn_Load_M]) {
694 exchange(info->projs[pn_Load_M], mem);
698 if (info->projs[pn_Load_res]) {
699 if (is_atomic_entity(ent)) {
700 ir_node *c = copy_const_value(get_irn_dbg_info(load), get_atomic_ent_value(ent));
703 exchange(info->projs[pn_Load_res], c);
707 reduce_adr_usage(ptr);
710 else if (variability_constant == get_entity_variability(ent)) {
711 compound_graph_path *path = get_accessed_path(ptr);
716 assert(is_proper_compound_graph_path(path, get_compound_graph_path_length(path)-1));
720 for (j = 0; j < get_compound_graph_path_length(path); ++j) {
721 entity *node = get_compound_graph_path_node(path, j);
722 fprintf(stdout, ".%s", get_entity_name(node));
723 if (is_Array_type(get_entity_owner(node)))
724 fprintf(stdout, "[%d]", get_compound_graph_path_array_index(path, j));
730 c = get_compound_ent_value_by_path(ent, path);
731 free_compound_graph_path(path);
733 /* printf(" cons: "); DDMN(c); */
735 if (info->projs[pn_Load_M]) {
736 exchange(info->projs[pn_Load_M], mem);
739 if (info->projs[pn_Load_res]) {
740 exchange(info->projs[pn_Load_res], copy_const_value(get_irn_dbg_info(load), c));
743 reduce_adr_usage(ptr);
747 /* We can not determine a correct access path. E.g., in jack, we load
748 a byte from an object to generate an exception. Happens in test program
750 printf(">>>>>>>>>>>>> Found access to constant entity %s in function %s\n", get_entity_name(ent),
751 get_entity_name(get_irg_entity(current_ir_graph)));
752 printf(" load: "); DDMN(load);
753 printf(" ptr: "); DDMN(ptr);
760 /* Check, if the address of this load is used more than once.
761 * If not, this load cannot be removed in any case. */
762 if (get_irn_n_uses(ptr) <= 1)
766 * follow the memory chain as long as there are only Loads
767 * and try to replace current Load or Store by a previous one.
768 * Note that in unreachable loops it might happen that we reach
769 * load again, as well as we can fall into a cycle.
770 * We break such cycles using a special visited flag.
773 res = follow_Load_chain(load, skip_Proj(mem));
778 * follow the memory chain as long as there are only Loads.
780 * INC_MASTER() must be called before dive into
782 static unsigned follow_Load_chain_for_Store(ir_node *store, ir_node *curr) {
784 ldst_info_t *info = get_irn_link(store);
786 ir_node *ptr = get_Store_ptr(store);
787 ir_node *mem = get_Store_mem(store);
788 ir_node *value = get_Store_value(store);
789 ir_mode *mode = get_irn_mode(value);
790 ir_node *block = get_nodes_block(store);
792 for (pred = curr; pred != store; pred = skip_Proj(get_Load_mem(pred))) {
793 ldst_info_t *pred_info = get_irn_link(pred);
796 * BEWARE: one might think that checking the modes is useless, because
797 * if the pointers are identical, they refer to the same object.
798 * This is only true in strong typed languages, not is C were the following
799 * is possible *(ir_type1 *)p = a; *(ir_type2 *)p = b ...
801 if (get_irn_op(pred) == op_Store && get_Store_ptr(pred) == ptr &&
802 get_nodes_block(pred) == block && get_irn_mode(get_Store_value(pred)) == mode) {
804 * a Store after a Store in the same block -- a write after write.
805 * We may remove the first Store, if it does not have an exception handler.
807 * TODO: What, if both have the same exception handler ???
809 if (get_Store_volatility(pred) != volatility_is_volatile && !pred_info->projs[pn_Store_X_except]) {
810 DBG_OPT_WAW(pred, store);
811 exchange( pred_info->projs[pn_Store_M], get_Store_mem(pred) );
812 reduce_adr_usage(ptr);
816 else if (get_irn_op(pred) == op_Load && get_Load_ptr(pred) == ptr &&
817 value == pred_info->projs[pn_Load_res]) {
819 * a Store of a value after a Load -- a write after read.
820 * We may remove the second Store, if it does not have an exception handler.
822 if (! info->projs[pn_Store_X_except]) {
823 DBG_OPT_WAR(store, pred);
824 exchange( info->projs[pn_Store_M], mem );
825 reduce_adr_usage(ptr);
830 /* follow only Load chains */
831 if (get_irn_op(pred) != op_Load)
834 /* check for cycles */
835 if (NODE_VISITED(pred_info))
837 MARK_NODE(pred_info);
840 if (get_irn_op(pred) == op_Sync) {
843 /* handle all Sync predecessors */
844 for (i = get_Sync_n_preds(pred) - 1; i >= 0; --i) {
845 res |= follow_Load_chain_for_Store(store, skip_Proj(get_Sync_pred(pred, i)));
856 static unsigned optimize_store(ir_node *store)
858 ldst_info_t *info = get_irn_link(store);
861 if (get_Store_volatility(store) == volatility_is_volatile)
864 ptr = get_Store_ptr(store);
866 /* Check, if the address of this load is used more than once.
867 * If not, this load cannot be removed in any case. */
868 if (get_irn_n_uses(ptr) <= 1)
871 mem = get_Store_mem(store);
873 /* follow the memory chain as long as there are only Loads */
875 return follow_Load_chain_for_Store(store, skip_Proj(mem));
879 * walker, optimizes Phi after Stores to identical places:
880 * Does the following optimization:
883 * val1 val2 val3 val1 val2 val3
892 * This reduces the number of stores and allows for predicated execution.
893 * Moves Stores back to the end of a function which may be bad.
895 * This is only possible if the predecessor blocks have only one successor.
897 static unsigned optimize_phi(ir_node *phi, walk_env_t *wenv)
900 ir_node *store, *old_store, *ptr, *block, *phiM, *phiD, *exc, *projM;
902 ir_node **inM, **inD;
906 block_info_t *bl_info;
909 /* Must be a memory Phi */
910 if (get_irn_mode(phi) != mode_M)
913 n = get_Phi_n_preds(phi);
917 store = skip_Proj(get_Phi_pred(phi, 0));
919 if (get_irn_op(store) != op_Store)
922 /* abort on dead blocks */
923 if (is_Block_dead(get_nodes_block(store)))
926 /* check if the block has only one successor */
927 bl_info = get_irn_link(get_nodes_block(store));
931 /* this is the address of the store */
932 ptr = get_Store_ptr(store);
933 mode = get_irn_mode(get_Store_value(store));
934 info = get_irn_link(store);
935 exc = info->exc_block;
937 for (i = 1; i < n; ++i) {
938 ir_node *pred = skip_Proj(get_Phi_pred(phi, i));
940 if (get_irn_op(pred) != op_Store)
943 if (ptr != get_Store_ptr(pred) || mode != get_irn_mode(get_Store_value(pred)))
946 info = get_irn_link(pred);
948 /* check, if all stores have the same exception flow */
949 if (exc != info->exc_block)
952 /* abort on dead blocks */
953 if (is_Block_dead(get_nodes_block(store)))
956 /* check if the block has only one successor */
957 bl_info = get_irn_link(get_nodes_block(store));
963 * ok, when we are here, we found all predecessors of a Phi that
964 * are Stores to the same address and size. That means whatever
965 * we do before we enter the block of the Phi, we do a Store.
966 * So, we can move the Store to the current block:
968 * val1 val2 val3 val1 val2 val3
970 * | Str | | Str | | Str | \ | /
976 * Is only allowed if the predecessor blocks have only one successor.
979 /* first step: collect all inputs */
980 NEW_ARR_A(ir_node *, inM, n);
981 NEW_ARR_A(ir_node *, inD, n);
982 NEW_ARR_A(int, idx, n);
984 for (i = 0; i < n; ++i) {
985 ir_node *pred = skip_Proj(get_Phi_pred(phi, i));
986 info = get_irn_link(pred);
988 inM[i] = get_Store_mem(pred);
989 inD[i] = get_Store_value(pred);
990 idx[i] = info->exc_idx;
992 /* Should we here replace the Proj after the Store by
993 * the Store's memory? Would be save but should not be needed,
994 * because we checked that all pred blocks have only one
995 * control flow successor.
998 block = get_nodes_block(phi);
1000 /* second step: create a new memory Phi */
1001 phiM = new_rd_Phi(get_irn_dbg_info(phi), current_ir_graph, block, n, inM, mode_M);
1003 /* third step: create a new data Phi */
1004 phiD = new_rd_Phi(get_irn_dbg_info(phi), current_ir_graph, block, n, inD, mode);
1006 /* fourth step: create the Store */
1007 store = new_rd_Store(db, current_ir_graph, block, phiM, ptr, phiD);
1009 co_set_irn_name(store, co_get_irn_ident(old_store));
1011 /* we replaced n uses by 1 */
1012 set_irn_n_uses(ptr, get_irn_n_uses(ptr) - n + 1);
1014 projM = new_rd_Proj(NULL, current_ir_graph, block, store, mode_M, pn_Store_M);
1016 info = get_ldst_info(store, wenv);
1017 info->projs[pn_Store_M] = projM;
1019 /* fifths step: repair exception flow */
1021 ir_node *projX = new_rd_Proj(NULL, current_ir_graph, block, store, mode_X, pn_Store_X_except);
1023 info->projs[pn_Store_X_except] = projX;
1024 info->exc_block = exc;
1025 info->exc_idx = idx[0];
1027 for (i = 0; i < n; ++i) {
1028 set_Block_cfgpred(exc, idx[i], projX);
1032 /* the exception block should be optimized as some inputs are identical now */
1038 /* sixth step: replace old Phi */
1039 exchange(phi, projM);
1041 return res | DF_CHANGED;
1045 * walker, do the optimizations
1047 static void do_load_store_optimize(ir_node *n, void *env)
1049 walk_env_t *wenv = env;
1051 switch (get_irn_opcode(n)) {
1054 wenv->changes |= optimize_load(n);
1058 wenv->changes |= optimize_store(n);
1062 wenv->changes |= optimize_phi(n, wenv);
1070 * do the load store optimization
1072 void optimize_load_store(ir_graph *irg)
1076 assert(get_irg_phase_state(irg) != phase_building);
1077 assert(get_irg_pinned(irg) != op_pin_state_floats &&
1078 "LoadStore optimization needs pinned graph");
1080 if (! get_opt_redundant_loadstore())
1083 obstack_init(&env.obst);
1086 /* init the links, then collect Loads/Stores/Proj's in lists */
1088 irg_walk_graph(irg, firm_clear_link, collect_nodes, &env);
1090 /* now we have collected enough information, optimize */
1091 irg_walk_graph(irg, NULL, do_load_store_optimize, &env);
1093 obstack_free(&env.obst, NULL);
1095 /* Handle graph state */
1097 if (get_irg_outs_state(irg) == outs_consistent)
1098 set_irg_outs_inconsistent(irg);
1101 if (env.changes & CF_CHANGED) {
1102 /* is this really needed: Yes, control flow changed, block might
1103 have Bad() predecessors. */
1104 set_irg_doms_inconsistent(irg);