3 * File name: ir/opt/ldstopt.c
4 * Purpose: load store optimizations
8 * Copyright: (c) 1998-2007 Universität Karlsruhe
9 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
20 #include "irgraph_t.h"
28 #include "dbginfo_t.h"
29 #include "iropt_dbg.h"
35 #include "opt_polymorphy.h"
40 #include "cacheopt/cachesim.h"
44 #define IMAX(a,b) ((a) > (b) ? (a) : (b))
46 #define MAX_PROJ IMAX(pn_Load_max, pn_Store_max)
49 DF_CHANGED = 1, /**< data flow changed */
50 CF_CHANGED = 2, /**< control flow changed */
56 typedef struct _walk_env_t {
57 struct obstack obst; /**< list of all stores */
58 unsigned changes; /**< a bitmask of graph changes */
62 * flags for Load/Store
65 LDST_VISITED = 1 /**< if set, this Load/Store is already visited */
68 /** A Load/Store info. */
69 typedef struct _ldst_info_t {
70 ir_node *projs[MAX_PROJ]; /**< list of Proj's of this node */
71 ir_node *exc_block; /**< the exception block if available */
72 int exc_idx; /**< predecessor index in the exception block */
73 unsigned flags; /**< flags */
74 unsigned visited; /**< visited counter for breaking loops */
78 * flags for control flow.
81 BLOCK_HAS_COND = 1, /**< Block has conditional control flow */
82 BLOCK_HAS_EXC = 2 /**< Block has exceptional control flow */
88 typedef struct _block_info_t {
89 unsigned flags; /**< flags for the block */
92 /** the master visited flag for loop detection. */
93 static unsigned master_visited = 0;
95 #define INC_MASTER() ++master_visited
96 #define MARK_NODE(info) (info)->visited = master_visited
97 #define NODE_VISITED(info) (info)->visited >= master_visited
100 * get the Load/Store info of a node
102 static ldst_info_t *get_ldst_info(ir_node *node, walk_env_t *env) {
103 ldst_info_t *info = get_irn_link(node);
106 info = obstack_alloc(&env->obst, sizeof(*info));
107 memset(info, 0, sizeof(*info));
108 set_irn_link(node, info);
111 } /* get_ldst_info */
114 * get the Block info of a node
116 static block_info_t *get_block_info(ir_node *node, walk_env_t *env) {
117 block_info_t *info = get_irn_link(node);
120 info = obstack_alloc(&env->obst, sizeof(*info));
121 memset(info, 0, sizeof(*info));
122 set_irn_link(node, info);
125 } /* get_block_info */
128 * update the projection info for a Load/Store
130 static unsigned update_projs(ldst_info_t *info, ir_node *proj)
132 long nr = get_Proj_proj(proj);
134 assert(0 <= nr && nr <= MAX_PROJ && "Wrong proj from LoadStore");
136 if (info->projs[nr]) {
137 /* there is already one, do CSE */
138 exchange(proj, info->projs[nr]);
142 info->projs[nr] = proj;
148 * update the exception block info for a Load/Store node.
150 * @param info the load/store info struct
151 * @param block the exception handler block for this load/store
152 * @param pos the control flow input of the block
154 static unsigned update_exc(ldst_info_t *info, ir_node *block, int pos)
156 assert(info->exc_block == NULL && "more than one exception block found");
158 info->exc_block = block;
163 /** Return the number of uses of an address node */
164 #define get_irn_n_uses(adr) get_irn_n_edges(adr)
167 * walker, collects all Load/Store/Proj nodes
169 * walks from Start -> End
171 static void collect_nodes(ir_node *node, void *env)
173 ir_op *op = get_irn_op(node);
174 ir_node *pred, *blk, *pred_blk;
175 ldst_info_t *ldst_info;
176 walk_env_t *wenv = env;
182 pred = get_Proj_pred(node);
183 op = get_irn_op(pred);
186 ldst_info = get_ldst_info(pred, wenv);
188 wenv->changes |= update_projs(ldst_info, node);
190 if ((ldst_info->flags & LDST_VISITED) == 0) {
191 adr = get_Load_ptr(pred);
192 ldst_info->flags |= LDST_VISITED;
196 * Place the Proj's to the same block as the
197 * predecessor Load. This is always ok and prevents
198 * "non-SSA" form after optimizations if the Proj
199 * is in a wrong block.
201 blk = get_nodes_block(node);
202 pred_blk = get_nodes_block(pred);
203 if (blk != pred_blk) {
204 wenv->changes |= DF_CHANGED;
205 set_nodes_block(node, pred_blk);
207 } else if (op == op_Store) {
208 ldst_info = get_ldst_info(pred, wenv);
210 wenv->changes |= update_projs(ldst_info, node);
212 if ((ldst_info->flags & LDST_VISITED) == 0) {
213 adr = get_Store_ptr(pred);
214 ldst_info->flags |= LDST_VISITED;
218 * Place the Proj's to the same block as the
219 * predecessor Store. This is always ok and prevents
220 * "non-SSA" form after optimizations if the Proj
221 * is in a wrong block.
223 blk = get_nodes_block(node);
224 pred_blk = get_nodes_block(pred);
225 if (blk != pred_blk) {
226 wenv->changes |= DF_CHANGED;
227 set_nodes_block(node, pred_blk);
230 } else if (op == op_Block) {
233 for (i = get_Block_n_cfgpreds(node) - 1; i >= 0; --i) {
235 block_info_t *bl_info;
237 pred = skip_Proj(get_Block_cfgpred(node, i));
239 /* ignore Bad predecessors, they will be removed later */
243 pred_block = get_nodes_block(pred);
244 bl_info = get_block_info(pred_block, wenv);
246 if (is_fragile_op(pred))
247 bl_info->flags |= BLOCK_HAS_EXC;
248 else if (is_irn_forking(pred))
249 bl_info->flags |= BLOCK_HAS_COND;
251 if (get_irn_op(pred) == op_Load || get_irn_op(pred) == op_Store) {
252 ldst_info = get_ldst_info(pred, wenv);
254 wenv->changes |= update_exc(ldst_info, node, i);
258 } /* collect_nodes */
261 * Returns an entity if the address ptr points to a constant one.
263 * @param ptr the address
265 * @return an entity or NULL
267 static ir_entity *find_constant_entity(ir_node *ptr)
270 ir_op *op = get_irn_op(ptr);
272 if (op == op_SymConst && (get_SymConst_kind(ptr) == symconst_addr_ent)) {
273 ir_entity *ent = get_SymConst_entity(ptr);
274 if (variability_constant == get_entity_variability(ent))
277 } else if (op == op_Sel) {
278 ir_entity *ent = get_Sel_entity(ptr);
279 ir_type *tp = get_entity_owner(ent);
281 /* Do not fiddle with polymorphism. */
282 if (is_Class_type(get_entity_owner(ent)) &&
283 ((get_entity_n_overwrites(ent) != 0) ||
284 (get_entity_n_overwrittenby(ent) != 0) ) )
287 if (is_Array_type(tp)) {
291 for (i = 0, n = get_Sel_n_indexs(ptr); i < n; ++i) {
293 tarval *tlower, *tupper;
294 ir_node *index = get_Sel_index(ptr, i);
295 tarval *tv = computed_value(index);
297 /* check if the index is constant */
298 if (tv == tarval_bad)
301 bound = get_array_lower_bound(tp, i);
302 tlower = computed_value(bound);
303 bound = get_array_upper_bound(tp, i);
304 tupper = computed_value(bound);
306 if (tlower == tarval_bad || tupper == tarval_bad)
309 if (tarval_cmp(tv, tlower) & pn_Cmp_Lt)
311 if (tarval_cmp(tupper, tv) & pn_Cmp_Lt)
314 /* ok, bounds check finished */
318 if (variability_constant == get_entity_variability(ent))
322 ptr = get_Sel_ptr(ptr);
326 } /* find_constant_entity */
329 * Return the Selection index of a Sel node from dimension n
331 static long get_Sel_array_index_long(ir_node *n, int dim) {
332 ir_node *index = get_Sel_index(n, dim);
333 assert(get_irn_op(index) == op_Const);
334 return get_tarval_long(get_Const_tarval(index));
335 } /* get_Sel_array_index_long */
338 * Returns the accessed component graph path for an
339 * node computing an address.
341 * @param ptr the node computing the address
342 * @param depth current depth in steps upward from the root
345 static compound_graph_path *rec_get_accessed_path(ir_node *ptr, int depth) {
346 compound_graph_path *res = NULL;
347 ir_entity *root, *field;
350 if (get_irn_op(ptr) == op_SymConst) {
351 /* a SymConst. If the depth is 0, this is an access to a global
352 * entity and we don't need a component path, else we know
353 * at least it's length.
355 assert(get_SymConst_kind(ptr) == symconst_addr_ent);
356 root = get_SymConst_entity(ptr);
357 res = (depth == 0) ? NULL : new_compound_graph_path(get_entity_type(root), depth);
359 assert(get_irn_op(ptr) == op_Sel);
360 /* it's a Sel, go up until we find the root */
361 res = rec_get_accessed_path(get_Sel_ptr(ptr), depth+1);
363 /* fill up the step in the path at the current position */
364 field = get_Sel_entity(ptr);
365 path_len = get_compound_graph_path_length(res);
366 pos = path_len - depth - 1;
367 set_compound_graph_path_node(res, pos, field);
369 if (is_Array_type(get_entity_owner(field))) {
370 assert(get_Sel_n_indexs(ptr) == 1 && "multi dim arrays not implemented");
371 set_compound_graph_path_array_index(res, pos, get_Sel_array_index_long(ptr, 0));
375 } /* rec_get_accessed_path */
377 /** Returns an access path or NULL. The access path is only
378 * valid, if the graph is in phase_high and _no_ address computation is used.
380 static compound_graph_path *get_accessed_path(ir_node *ptr) {
381 return rec_get_accessed_path(ptr, 0);
382 } /* get_accessed_path */
385 static void reduce_adr_usage(ir_node *ptr);
388 * Update a Load that may lost it's usage.
390 static void handle_load_update(ir_node *load) {
391 ldst_info_t *info = get_irn_link(load);
393 /* do NOT touch volatile loads for now */
394 if (get_Load_volatility(load) == volatility_is_volatile)
397 if (! info->projs[pn_Load_res] && ! info->projs[pn_Load_X_except]) {
398 ir_node *ptr = get_Load_ptr(load);
399 ir_node *mem = get_Load_mem(load);
401 /* a Load which value is neither used nor exception checked, remove it */
402 exchange(info->projs[pn_Load_M], mem);
403 exchange(load, new_Bad());
404 reduce_adr_usage(ptr);
406 } /* handle_load_update */
409 * A Use of an address node is vanished. Check if this was a Proj
410 * node and update the counters.
412 static void reduce_adr_usage(ir_node *ptr) {
414 if (get_irn_n_edges(ptr) <= 0) {
415 /* this Proj is dead now */
416 ir_node *pred = get_Proj_pred(ptr);
419 ldst_info_t *info = get_irn_link(pred);
420 info->projs[get_Proj_proj(ptr)] = NULL;
422 /* this node lost it's result proj, handle that */
423 handle_load_update(pred);
427 } /* reduce_adr_usage */
430 * Check, if an already existing value of mode old_mode can be converted
431 * into the needed one new_mode without loss.
433 static int can_use_stored_value(ir_mode *old_mode, ir_mode *new_mode) {
434 if (old_mode == new_mode)
437 /* if both modes are two-complement ones, we can always convert the
438 Stored value into the needed one. */
439 if (get_mode_size_bits(old_mode) >= get_mode_size_bits(new_mode) &&
440 get_mode_arithmetic(old_mode) == irma_twos_complement &&
441 get_mode_arithmetic(new_mode) == irma_twos_complement)
444 } /* can_use_stored_value */
447 * Follow the memory chain as long as there are only Loads
448 * and alias free Stores and try to replace current Load or Store
449 * by a previous ones.
450 * Note that in unreachable loops it might happen that we reach
451 * load again, as well as we can fall into a cycle.
452 * We break such cycles using a special visited flag.
454 * INC_MASTER() must be called before dive into
456 static unsigned follow_Mem_chain(ir_node *load, ir_node *curr) {
458 ldst_info_t *info = get_irn_link(load);
460 ir_node *ptr = get_Load_ptr(load);
461 ir_node *mem = get_Load_mem(load);
462 ir_mode *load_mode = get_Load_mode(load);
464 for (pred = curr; load != pred; ) {
465 ldst_info_t *pred_info = get_irn_link(pred);
468 * BEWARE: one might think that checking the modes is useless, because
469 * if the pointers are identical, they refer to the same object.
470 * This is only true in strong typed languages, not in C were the following
471 * is possible a = *(ir_type1 *)p; b = *(ir_type2 *)p ...
473 if (get_irn_op(pred) == op_Store && get_Store_ptr(pred) == ptr &&
474 can_use_stored_value(get_irn_mode(get_Store_value(pred)), load_mode)) {
476 * a Load immediately after a Store -- a read after write.
477 * We may remove the Load, if both Load & Store does not have an exception handler
478 * OR they are in the same block. In the latter case the Load cannot
479 * throw an exception when the previous Store was quiet.
481 * Why we need to check for Store Exception? If the Store cannot
482 * be executed (ROM) the exception handler might simply jump into
484 * We could make it a little bit better if we would know that the exception
485 * handler of the Store jumps directly to the end...
487 if ((pred_info->projs[pn_Store_X_except] == NULL && info->projs[pn_Load_X_except] == NULL) ||
488 get_nodes_block(load) == get_nodes_block(pred)) {
489 ir_node *value = get_Store_value(pred);
491 DBG_OPT_RAW(load, value);
493 /* add an convert if needed */
494 if (get_irn_mode(get_Store_value(pred)) != load_mode) {
495 value = new_r_Conv(current_ir_graph, get_nodes_block(load), value, load_mode);
498 if (info->projs[pn_Load_M])
499 exchange(info->projs[pn_Load_M], mem);
502 if (info->projs[pn_Load_X_except]) {
503 exchange( info->projs[pn_Load_X_except], new_Bad());
507 if (info->projs[pn_Load_res])
508 exchange(info->projs[pn_Load_res], value);
510 exchange(load, new_Bad());
511 reduce_adr_usage(ptr);
512 return res | DF_CHANGED;
514 } else if (get_irn_op(pred) == op_Load && get_Load_ptr(pred) == ptr &&
515 can_use_stored_value(get_Load_mode(pred), load_mode)) {
517 * a Load after a Load -- a read after read.
518 * We may remove the second Load, if it does not have an exception handler
519 * OR they are in the same block. In the later case the Load cannot
520 * throw an exception when the previous Load was quiet.
522 * Here, there is no need to check if the previous Load has an exception
523 * hander because they would have exact the same exception...
525 if (info->projs[pn_Load_X_except] == NULL || get_nodes_block(load) == get_nodes_block(pred)) {
528 DBG_OPT_RAR(load, pred);
530 /* the result is used */
531 if (info->projs[pn_Load_res]) {
532 if (pred_info->projs[pn_Load_res] == NULL) {
533 /* create a new Proj again */
534 pred_info->projs[pn_Load_res] = new_r_Proj(current_ir_graph, get_nodes_block(pred), pred, get_Load_mode(pred), pn_Load_res);
536 value = pred_info->projs[pn_Load_res];
538 /* add an convert if needed */
539 if (get_Load_mode(pred) != load_mode) {
540 value = new_r_Conv(current_ir_graph, get_nodes_block(load), value, load_mode);
543 exchange(info->projs[pn_Load_res], value);
546 if (info->projs[pn_Load_M])
547 exchange(info->projs[pn_Load_M], mem);
550 if (info->projs[pn_Load_X_except]) {
551 exchange(info->projs[pn_Load_X_except], new_Bad());
555 exchange(load, new_Bad());
556 reduce_adr_usage(ptr);
557 return res |= DF_CHANGED;
561 if (get_irn_op(pred) == op_Store) {
562 /* check if we can pass through this store */
563 ir_alias_relation rel = get_alias_relation(
566 get_irn_mode(get_Store_value(pred)),
568 /* if the might be an alias, we cannot pass this Store */
571 pred = skip_Proj(get_Store_mem(pred));
572 } else if (get_irn_op(pred) == op_Load) {
573 pred = skip_Proj(get_Load_mem(pred));
575 /* follow only Load chains */
579 /* check for cycles */
580 if (NODE_VISITED(pred_info))
582 MARK_NODE(pred_info);
585 if (get_irn_op(pred) == op_Sync) {
588 /* handle all Sync predecessors */
589 for (i = get_Sync_n_preds(pred) - 1; i >= 0; --i) {
590 res |= follow_Mem_chain(load, skip_Proj(get_Sync_pred(pred, i)));
597 } /* follow_Mem_chain */
602 * @param load the Load node
604 static unsigned optimize_load(ir_node *load)
606 ldst_info_t *info = get_irn_link(load);
607 ir_node *mem, *ptr, *new_node;
611 /* do NOT touch volatile loads for now */
612 if (get_Load_volatility(load) == volatility_is_volatile)
615 /* the address of the load to be optimized */
616 ptr = get_Load_ptr(load);
619 * Check if we can remove the exception from a Load:
620 * This can be done, if the address is from an Sel(Alloc) and
621 * the Sel type is a subtype of the allocated type.
623 * This optimizes some often used OO constructs,
624 * like x = new O; x->t;
626 if (info->projs[pn_Load_X_except]) {
628 ir_node *mem = get_Sel_mem(ptr);
630 /* FIXME: works with the current FE, but better use the base */
631 if (get_irn_op(skip_Proj(mem)) == op_Alloc) {
632 /* ok, check the types */
633 ir_entity *ent = get_Sel_entity(ptr);
634 ir_type *s_type = get_entity_type(ent);
635 ir_type *a_type = get_Alloc_type(mem);
637 if (is_SubClass_of(s_type, a_type)) {
638 /* ok, condition met: there can't be an exception because
639 * Alloc guarantees that enough memory was allocated */
641 exchange(info->projs[pn_Load_X_except], new_Bad());
642 info->projs[pn_Load_X_except] = NULL;
646 } else if ((get_irn_op(skip_Proj(ptr)) == op_Alloc) ||
647 ((get_irn_op(ptr) == op_Cast) && (get_irn_op(skip_Proj(get_Cast_op(ptr))) == op_Alloc))) {
648 /* simple case: a direct load after an Alloc. Firm Alloc throw
649 * an exception in case of out-of-memory. So, there is no way for an
650 * exception in this load.
651 * This code is constructed by the "exception lowering" in the Jack compiler.
653 exchange(info->projs[pn_Load_X_except], new_Bad());
654 info->projs[pn_Load_X_except] = NULL;
659 /* The mem of the Load. Must still be returned after optimization. */
660 mem = get_Load_mem(load);
662 if (! info->projs[pn_Load_res] && ! info->projs[pn_Load_X_except]) {
663 /* a Load which value is neither used nor exception checked, remove it */
664 exchange(info->projs[pn_Load_M], mem);
666 exchange(load, new_Bad());
667 reduce_adr_usage(ptr);
668 return res | DF_CHANGED;
671 /* Load from a constant polymorphic field, where we can resolve
673 new_node = transform_node_Load(load);
674 if (new_node != load) {
675 if (info->projs[pn_Load_M]) {
676 exchange(info->projs[pn_Load_M], mem);
677 info->projs[pn_Load_M] = NULL;
679 if (info->projs[pn_Load_X_except]) {
680 exchange(info->projs[pn_Load_X_except], new_Bad());
681 info->projs[pn_Load_X_except] = NULL;
683 if (info->projs[pn_Load_res])
684 exchange(info->projs[pn_Load_res], new_node);
686 exchange(load, new_Bad());
687 reduce_adr_usage(ptr);
688 return res | DF_CHANGED;
691 /* check if we can determine the entity that will be loaded */
692 ent = find_constant_entity(ptr);
694 if ((allocation_static == get_entity_allocation(ent)) &&
695 (visibility_external_allocated != get_entity_visibility(ent))) {
696 /* a static allocation that is not external: there should be NO exception
699 /* no exception, clear the info field as it might be checked later again */
700 if (info->projs[pn_Load_X_except]) {
701 exchange(info->projs[pn_Load_X_except], new_Bad());
702 info->projs[pn_Load_X_except] = NULL;
706 if (variability_constant == get_entity_variability(ent)
707 && is_atomic_entity(ent)) {
708 /* Might not be atomic after
709 lowering of Sels. In this
710 case we could also load, but
711 it's more complicated. */
712 /* more simpler case: we load the content of a constant value:
713 * replace it by the constant itself
717 if (info->projs[pn_Load_M]) {
718 exchange(info->projs[pn_Load_M], mem);
722 if (info->projs[pn_Load_res]) {
723 if (is_atomic_entity(ent)) {
724 ir_node *c = copy_const_value(get_irn_dbg_info(load), get_atomic_ent_value(ent));
727 exchange(info->projs[pn_Load_res], c);
731 exchange(load, new_Bad());
732 reduce_adr_usage(ptr);
734 } else if (variability_constant == get_entity_variability(ent)) {
735 compound_graph_path *path = get_accessed_path(ptr);
740 assert(is_proper_compound_graph_path(path, get_compound_graph_path_length(path)-1));
744 for (j = 0; j < get_compound_graph_path_length(path); ++j) {
745 ir_entity *node = get_compound_graph_path_node(path, j);
746 fprintf(stdout, ".%s", get_entity_name(node));
747 if (is_Array_type(get_entity_owner(node)))
748 fprintf(stdout, "[%d]", get_compound_graph_path_array_index(path, j));
754 c = get_compound_ent_value_by_path(ent, path);
755 free_compound_graph_path(path);
757 /* printf(" cons: "); DDMN(c); */
759 if (info->projs[pn_Load_M]) {
760 exchange(info->projs[pn_Load_M], mem);
763 if (info->projs[pn_Load_res]) {
764 exchange(info->projs[pn_Load_res], copy_const_value(get_irn_dbg_info(load), c));
767 exchange(load, new_Bad());
768 reduce_adr_usage(ptr);
771 /* We can not determine a correct access path. E.g., in jack, we load
772 a byte from an object to generate an exception. Happens in test program
774 printf(">>>>>>>>>>>>> Found access to constant entity %s in function %s\n", get_entity_name(ent),
775 get_entity_name(get_irg_entity(current_ir_graph)));
776 printf(" load: "); DDMN(load);
777 printf(" ptr: "); DDMN(ptr);
784 /* Check, if the address of this load is used more than once.
785 * If not, this load cannot be removed in any case. */
786 if (get_irn_n_uses(ptr) <= 1)
790 * follow the memory chain as long as there are only Loads
791 * and try to replace current Load or Store by a previous one.
792 * Note that in unreachable loops it might happen that we reach
793 * load again, as well as we can fall into a cycle.
794 * We break such cycles using a special visited flag.
797 res = follow_Mem_chain(load, skip_Proj(mem));
799 } /* optimize_load */
802 * Check whether a value of mode new_mode would completely overwrite a value
803 * of mode old_mode in memory.
805 static int is_completely_overwritten(ir_mode *old_mode, ir_mode *new_mode)
807 return get_mode_size_bits(new_mode) >= get_mode_size_bits(old_mode);
808 } /* is_completely_overwritten */
811 * follow the memory chain as long as there are only Loads and alias free Stores.
813 * INC_MASTER() must be called before dive into
815 static unsigned follow_Mem_chain_for_Store(ir_node *store, ir_node *curr) {
817 ldst_info_t *info = get_irn_link(store);
819 ir_node *ptr = get_Store_ptr(store);
820 ir_node *mem = get_Store_mem(store);
821 ir_node *value = get_Store_value(store);
822 ir_mode *mode = get_irn_mode(value);
823 ir_node *block = get_nodes_block(store);
825 for (pred = curr; pred != store;) {
826 ldst_info_t *pred_info = get_irn_link(pred);
829 * BEWARE: one might think that checking the modes is useless, because
830 * if the pointers are identical, they refer to the same object.
831 * This is only true in strong typed languages, not is C were the following
832 * is possible *(ir_type1 *)p = a; *(ir_type2 *)p = b ...
833 * However, if the mode that is written have a bigger or equal size the the old
834 * one, the old value is completely overwritten and can be killed ...
836 if (get_irn_op(pred) == op_Store && get_Store_ptr(pred) == ptr &&
837 get_nodes_block(pred) == block &&
838 is_completely_overwritten(get_irn_mode(get_Store_value(pred)), mode)) {
840 * a Store after a Store in the same block -- a write after write.
841 * We may remove the first Store, if it does not have an exception handler.
843 * TODO: What, if both have the same exception handler ???
845 if (get_Store_volatility(pred) != volatility_is_volatile && !pred_info->projs[pn_Store_X_except]) {
846 DBG_OPT_WAW(pred, store);
847 exchange( pred_info->projs[pn_Store_M], get_Store_mem(pred) );
848 exchange(pred, new_Bad());
849 reduce_adr_usage(ptr);
852 } else if (get_irn_op(pred) == op_Load && get_Load_ptr(pred) == ptr &&
853 value == pred_info->projs[pn_Load_res]) {
855 * a Store of a value after a Load -- a write after read.
856 * We may remove the second Store, if it does not have an exception handler.
858 if (! info->projs[pn_Store_X_except]) {
859 DBG_OPT_WAR(store, pred);
860 exchange( info->projs[pn_Store_M], mem );
861 exchange(store, new_Bad());
862 reduce_adr_usage(ptr);
867 if (get_irn_op(pred) == op_Store) {
868 /* check if we can pass thru this store */
869 ir_alias_relation rel = get_alias_relation(
872 get_irn_mode(get_Store_value(pred)),
874 /* if the might be an alias, we cannot pass this Store */
877 pred = skip_Proj(get_Store_mem(pred));
878 } else if (get_irn_op(pred) == op_Load) {
879 pred = skip_Proj(get_Load_mem(pred));
881 /* follow only Load chains */
885 /* check for cycles */
886 if (NODE_VISITED(pred_info))
888 MARK_NODE(pred_info);
891 if (get_irn_op(pred) == op_Sync) {
894 /* handle all Sync predecessors */
895 for (i = get_Sync_n_preds(pred) - 1; i >= 0; --i) {
896 res |= follow_Mem_chain_for_Store(store, skip_Proj(get_Sync_pred(pred, i)));
902 } /* follow_Mem_chain_for_Store */
907 * @param store the Store node
909 static unsigned optimize_store(ir_node *store) {
912 if (get_Store_volatility(store) == volatility_is_volatile)
915 ptr = get_Store_ptr(store);
917 /* Check, if the address of this Store is used more than once.
918 * If not, this Store cannot be removed in any case. */
919 if (get_irn_n_uses(ptr) <= 1)
922 mem = get_Store_mem(store);
924 /* follow the memory chain as long as there are only Loads */
926 return follow_Mem_chain_for_Store(store, skip_Proj(mem));
927 } /* optimize_store */
930 * walker, optimizes Phi after Stores to identical places:
931 * Does the following optimization:
934 * val1 val2 val3 val1 val2 val3
936 * Store Store Store \ | /
943 * This reduces the number of stores and allows for predicated execution.
944 * Moves Stores back to the end of a function which may be bad.
946 * This is only possible if the predecessor blocks have only one successor.
948 static unsigned optimize_phi(ir_node *phi, walk_env_t *wenv)
951 ir_node *store, *old_store, *ptr, *block, *phi_block, *phiM, *phiD, *exc, *projM;
953 ir_node **inM, **inD, **stores;
957 block_info_t *bl_info;
960 /* Must be a memory Phi */
961 if (get_irn_mode(phi) != mode_M)
964 n = get_Phi_n_preds(phi);
968 store = skip_Proj(get_Phi_pred(phi, 0));
970 if (get_irn_op(store) != op_Store)
973 block = get_nodes_block(store);
975 /* abort on dead blocks */
976 if (is_Block_dead(block))
979 /* check if the block is post dominated by Phi-block
980 and has no exception exit */
981 bl_info = get_irn_link(block);
982 if (bl_info->flags & BLOCK_HAS_EXC)
985 phi_block = get_nodes_block(phi);
986 if (! block_postdominates(phi_block, block))
989 /* this is the address of the store */
990 ptr = get_Store_ptr(store);
991 mode = get_irn_mode(get_Store_value(store));
992 info = get_irn_link(store);
993 exc = info->exc_block;
995 for (i = 1; i < n; ++i) {
996 ir_node *pred = skip_Proj(get_Phi_pred(phi, i));
998 if (get_irn_op(pred) != op_Store)
1001 if (ptr != get_Store_ptr(pred) || mode != get_irn_mode(get_Store_value(pred)))
1004 info = get_irn_link(pred);
1006 /* check, if all stores have the same exception flow */
1007 if (exc != info->exc_block)
1010 /* abort on dead blocks */
1011 block = get_nodes_block(pred);
1012 if (is_Block_dead(block))
1015 /* check if the block is post dominated by Phi-block
1016 and has no exception exit. Note that block must be different from
1017 Phi-block, else we would move a Store from end End of a block to its
1019 bl_info = get_irn_link(block);
1020 if (bl_info->flags & BLOCK_HAS_EXC)
1022 if (block == phi_block || ! block_postdominates(phi_block, block))
1027 * ok, when we are here, we found all predecessors of a Phi that
1028 * are Stores to the same address and size. That means whatever
1029 * we do before we enter the block of the Phi, we do a Store.
1030 * So, we can move the Store to the current block:
1032 * val1 val2 val3 val1 val2 val3
1034 * | Str | | Str | | Str | \ | /
1040 * Is only allowed if the predecessor blocks have only one successor.
1043 NEW_ARR_A(ir_node *, stores, n);
1044 NEW_ARR_A(ir_node *, inM, n);
1045 NEW_ARR_A(ir_node *, inD, n);
1046 NEW_ARR_A(int, idx, n);
1048 /* Prepare: Collect all Store nodes. We must do this
1049 first because we otherwise may loose a store when exchanging its
1052 for (i = 0; i < n; ++i)
1053 stores[i] = skip_Proj(get_Phi_pred(phi, i));
1055 /* Prepare: Skip the memory Proj: we need this in the case some stores
1057 Beware: One Store might be included more than once in the stores[]
1058 list, so we must prevent to do the exchange more than once.
1060 for (i = 0; i < n; ++i) {
1061 ir_node *store = stores[i];
1064 info = get_irn_link(store);
1065 proj_m = info->projs[pn_Store_M];
1067 if (is_Proj(proj_m) && get_Proj_pred(proj_m) == store)
1068 exchange(proj_m, get_Store_mem(store));
1071 /* first step: collect all inputs */
1072 for (i = 0; i < n; ++i) {
1073 ir_node *store = stores[i];
1074 info = get_irn_link(store);
1076 inM[i] = get_Store_mem(store);
1077 inD[i] = get_Store_value(store);
1078 idx[i] = info->exc_idx;
1080 block = get_nodes_block(phi);
1082 /* second step: create a new memory Phi */
1083 phiM = new_rd_Phi(get_irn_dbg_info(phi), current_ir_graph, block, n, inM, mode_M);
1085 /* third step: create a new data Phi */
1086 phiD = new_rd_Phi(get_irn_dbg_info(phi), current_ir_graph, block, n, inD, mode);
1088 /* fourth step: create the Store */
1089 store = new_rd_Store(db, current_ir_graph, block, phiM, ptr, phiD);
1091 co_set_irn_name(store, co_get_irn_ident(old_store));
1094 projM = new_rd_Proj(NULL, current_ir_graph, block, store, mode_M, pn_Store_M);
1096 info = get_ldst_info(store, wenv);
1097 info->projs[pn_Store_M] = projM;
1099 /* fifths step: repair exception flow */
1101 ir_node *projX = new_rd_Proj(NULL, current_ir_graph, block, store, mode_X, pn_Store_X_except);
1103 info->projs[pn_Store_X_except] = projX;
1104 info->exc_block = exc;
1105 info->exc_idx = idx[0];
1107 for (i = 0; i < n; ++i) {
1108 set_Block_cfgpred(exc, idx[i], projX);
1112 /* the exception block should be optimized as some inputs are identical now */
1118 /* sixth step: replace old Phi */
1119 exchange(phi, projM);
1121 return res | DF_CHANGED;
1122 } /* optimize_phi */
1125 * walker, do the optimizations
1127 static void do_load_store_optimize(ir_node *n, void *env) {
1128 walk_env_t *wenv = env;
1130 switch (get_irn_opcode(n)) {
1133 wenv->changes |= optimize_load(n);
1137 wenv->changes |= optimize_store(n);
1141 wenv->changes |= optimize_phi(n, wenv);
1146 } /* do_load_store_optimize */
1149 * do the load store optimization
1151 void optimize_load_store(ir_graph *irg) {
1154 assert(get_irg_phase_state(irg) != phase_building);
1155 assert(get_irg_pinned(irg) != op_pin_state_floats &&
1156 "LoadStore optimization needs pinned graph");
1158 if (! get_opt_redundant_loadstore())
1163 /* for Phi optimization post-dominators are needed ... */
1164 assure_postdoms(irg);
1166 if (get_opt_alias_analysis()) {
1167 assure_irg_address_taken_computed(irg);
1168 assure_irp_globals_address_taken_computed();
1171 obstack_init(&env.obst);
1174 /* init the links, then collect Loads/Stores/Proj's in lists */
1176 irg_walk_graph(irg, firm_clear_link, collect_nodes, &env);
1178 /* now we have collected enough information, optimize */
1179 irg_walk_graph(irg, NULL, do_load_store_optimize, &env);
1181 obstack_free(&env.obst, NULL);
1183 /* Handle graph state */
1185 if (get_irg_outs_state(irg) == outs_consistent)
1186 set_irg_outs_inconsistent(irg);
1189 if (env.changes & CF_CHANGED) {
1190 /* is this really needed: Yes, control flow changed, block might
1191 have Bad() predecessors. */
1192 set_irg_doms_inconsistent(irg);
1194 } /* optimize_load_store */