3 * File name: ir/opt/ldstopt.c
4 * Purpose: load store optimizations
8 * Copyright: (c) 1998-2004 Universit
\81ät Karlsruhe
9 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
25 # include "irnode_t.h"
26 # include "irgraph_t.h"
27 # include "irmode_t.h"
29 # include "ircons_t.h"
34 # include "dbginfo_t.h"
35 # include "iropt_dbg.h"
36 # include "irflag_t.h"
40 # include "opt_polymorphy.h"
43 #include "cacheopt/cachesim.h"
47 #define IMAX(a,b) ((a) > (b) ? (a) : (b))
49 #define MAX_PROJ IMAX(pn_Load_max, pn_Store_max)
52 DF_CHANGED = 1, /**< data flow changed */
53 CF_CHANGED = 2, /**< control flow changed */
59 typedef struct _walk_env_t {
60 struct obstack obst; /**< list of all stores */
61 unsigned changes; /**< a bitmask of graph changes */
65 * flags for Load/Store
68 LDST_VISITED = 1 /**< if set, this Load/Store is already visited */
74 typedef struct _ldst_info_t {
75 ir_node *projs[MAX_PROJ]; /**< list of Proj's of this node */
76 ir_node *exc_block; /**< the exception block if available */
77 int exc_idx; /**< predecessor index in the exception block */
78 unsigned flags; /**< flags */
82 * flags for control flow
85 BLOCK_HAS_COND = 1, /**< Block has conditional control flow */
86 BLOCK_HAS_EXC = 2 /**< Block has exceptionl control flow */
92 typedef struct _block_info_t {
93 unsigned flags; /**< flags for the block */
97 * get the Load/Store info of a node
99 static ldst_info_t *get_ldst_info(ir_node *node, walk_env_t *env)
101 ldst_info_t *info = get_irn_link(node);
104 info = obstack_alloc(&env->obst, sizeof(*info));
106 memset(info, 0, sizeof(*info));
108 set_irn_link(node, info);
114 * get the Block info of a node
116 static block_info_t *get_block_info(ir_node *node, walk_env_t *env)
118 block_info_t *info = get_irn_link(node);
121 info = obstack_alloc(&env->obst, sizeof(*info));
123 memset(info, 0, sizeof(*info));
125 set_irn_link(node, info);
131 * update the projection info for a Load/Store
133 static unsigned update_projs(ldst_info_t *info, ir_node *proj)
135 long nr = get_Proj_proj(proj);
137 assert(0 <= nr && nr <= MAX_PROJ && "Wrong proj from LoadStore");
139 if (info->projs[nr]) {
140 /* there is already one, do CSE */
141 exchange(proj, info->projs[nr]);
145 info->projs[nr] = proj;
151 * update the exception block info for a Load/Store
153 static unsigned update_exc(ldst_info_t *info, ir_node *block, int pos)
155 assert(info->exc_block == NULL && "more than one exception block found");
157 info->exc_block = block;
162 #define get_irn_out_n(node) (unsigned)PTR_TO_INT(get_irn_link(node))
163 #define set_irn_out_n(node, n) set_irn_link(adr, INT_TO_PTR(n))
166 * walker, collects all Load/Store/Proj nodes
168 * walks form Start -> End
170 static void collect_nodes(ir_node *node, void *env)
172 ir_op *op = get_irn_op(node);
173 ir_node *pred, *blk, *pred_blk;
174 ldst_info_t *ldst_info;
175 walk_env_t *wenv = env;
181 pred = get_Proj_pred(node);
182 op = get_irn_op(pred);
185 ldst_info = get_ldst_info(pred, wenv);
187 wenv->changes |= update_projs(ldst_info, node);
189 if ((ldst_info->flags & LDST_VISITED) == 0) {
190 adr = get_Load_ptr(pred);
191 set_irn_out_n(adr, get_irn_out_n(adr) + 1);
193 ldst_info->flags |= LDST_VISITED;
197 * Place the Proj's to the same block as the
198 * predecessor Load. This is always ok and prevents
199 * "non-SSA" form after optimizations if the Proj
200 * is in a wrong block.
202 blk = get_nodes_block(node);
203 pred_blk = get_nodes_block(pred);
204 if (blk != pred_blk) {
205 wenv->changes |= DF_CHANGED;
206 set_nodes_block(node, pred_blk);
209 else if (op == op_Store) {
210 ldst_info = get_ldst_info(pred, wenv);
212 wenv->changes |= update_projs(ldst_info, node);
214 if ((ldst_info->flags & LDST_VISITED) == 0) {
215 adr = get_Store_ptr(pred);
216 set_irn_out_n(adr, get_irn_out_n(adr) + 1);
218 ldst_info->flags |= LDST_VISITED;
222 * Place the Proj's to the same block as the
223 * predecessor Store. This is always ok and prevents
224 * "non-SSA" form after optimizations if the Proj
225 * is in a wrong block.
227 blk = get_nodes_block(node);
228 pred_blk = get_nodes_block(pred);
229 if (blk != pred_blk) {
230 wenv->changes |= DF_CHANGED;
231 set_nodes_block(node, pred_blk);
235 else if (op == op_Block) { /* check, if it's an exception block */
238 for (i = 0, n = get_Block_n_cfgpreds(node); i < n; ++i) {
240 block_info_t *bl_info;
242 pred = skip_Proj(get_Block_cfgpred(node, i));
244 /* ignore Bad predecessors, they will be removed later */
248 pred_block = get_nodes_block(pred);
249 bl_info = get_block_info(pred_block, wenv);
251 if (is_fragile_op(pred))
252 bl_info->flags |= BLOCK_HAS_EXC;
253 else if (is_forking_op(pred))
254 bl_info->flags |= BLOCK_HAS_COND;
256 if (get_irn_op(pred) == op_Load || get_irn_op(pred) == op_Store) {
257 ldst_info = get_ldst_info(pred, wenv);
259 wenv->changes |= update_exc(ldst_info, node, i);
266 * Returns an entity if the address ptr points to a constant one.
268 static entity *find_constant_entity(ir_node *ptr)
271 ir_op *op = get_irn_op(ptr);
273 if (op == op_SymConst && (get_SymConst_kind(ptr) == symconst_addr_ent)) {
274 return get_SymConst_entity(ptr);
276 else if (op == op_Sel) {
277 entity *ent = get_Sel_entity(ptr);
278 type *tp = get_entity_owner(ent);
280 /* Do not fiddle about polymorphy. */
281 if (is_Class_type(get_entity_owner(ent)) &&
282 ((get_entity_n_overwrites(ent) != 0) ||
283 (get_entity_n_overwrittenby(ent) != 0) ) )
286 if (variability_constant == get_entity_variability(ent))
289 if (is_Array_type(tp)) {
293 for (i = 0, n = get_Sel_n_indexs(ptr); i < n; ++i) {
295 tarval *tlower, *tupper;
296 ir_node *index = get_Sel_index(ptr, i);
297 tarval *tv = computed_value(index);
299 /* check if the index is constant */
300 if (tv == tarval_bad)
303 bound = get_array_lower_bound(tp, i);
304 tlower = computed_value(bound);
305 bound = get_array_upper_bound(tp, i);
306 tupper = computed_value(bound);
308 if (tlower == tarval_bad || tupper == tarval_bad)
311 if (tarval_cmp(tv, tlower) & pn_Cmp_Lt)
313 if (tarval_cmp(tupper, tv) & pn_Cmp_Lt)
316 /* ok, bounds check finished */
321 ptr = get_Sel_ptr(ptr);
329 * Return the Selection index of a Sel node from dimension n
331 static long get_Sel_array_index_long(ir_node *n, int dim) {
332 ir_node *index = get_Sel_index(n, dim);
333 assert(get_irn_op(index) == op_Const);
334 return get_tarval_long(get_Const_tarval(index));
338 * Returns the accessed component graph path for an
339 * node computing an address.
341 * @param ptr the node computing the address
342 * @param depth current depth in steps upward from the root
345 static compound_graph_path *rec_get_accessed_path(ir_node *ptr, int depth) {
346 compound_graph_path *res = NULL;
347 entity *root, *field;
350 if (get_irn_op(ptr) == op_SymConst) {
351 /* a SymConst. If the depth is 0, this is an access to a global
352 * entity and we don't need a component path, else we know
353 * at least it's length.
355 assert(get_SymConst_kind(ptr) == symconst_addr_ent);
356 root = get_SymConst_entity(ptr);
357 res = (depth == 0) ? NULL : new_compound_graph_path(get_entity_type(root), depth);
360 assert(get_irn_op(ptr) == op_Sel);
361 /* it's a Sel, go up until we find the root */
362 res = rec_get_accessed_path(get_Sel_ptr(ptr), depth+1);
364 /* fill up the step in the path at the current position */
365 field = get_Sel_entity(ptr);
366 path_len = get_compound_graph_path_length(res);
367 pos = path_len - depth - 1;
368 set_compound_graph_path_node(res, pos, field);
370 if (is_Array_type(get_entity_owner(field))) {
371 assert(get_Sel_n_indexs(ptr) == 1 && "multi dim arrays not implemented");
372 set_compound_graph_path_array_index(res, pos, get_Sel_array_index_long(ptr, 0));
378 /** Returns an access path or NULL. The access path is only
379 * valid, if the graph is in phase_high and _no_ address computation is used.
381 static compound_graph_path *get_accessed_path(ir_node *ptr) {
382 return rec_get_accessed_path(ptr, 0);
388 static unsigned optimize_load(ir_node *load)
390 ldst_info_t *info = get_irn_link(load);
391 ir_mode *load_mode = get_Load_mode(load);
392 ir_node *pred, *mem, *ptr, *new_node;
396 /* do NOT touch volatile loads for now */
397 if (get_Load_volatility(load) == volatility_is_volatile)
400 /* the address of the load to be optimized */
401 ptr = get_Load_ptr(load);
404 * Check if we can remove the exception from a Load:
405 * This can be done, if the address is from an Sel(Alloc) and
406 * the Sel type is a subtype of the allocated type.
408 * This optimizes some often used OO constructs,
409 * like x = new O; x->t;
411 if (info->projs[pn_Load_X_except]) {
412 if (get_irn_op(ptr) == op_Sel) {
413 ir_node *mem = get_Sel_mem(ptr);
415 if (get_irn_op(skip_Proj(mem)) == op_Alloc) {
416 /* ok, check the types */
417 entity *ent = get_Sel_entity(ptr);
418 type *s_type = get_entity_type(ent);
419 type *a_type = get_Alloc_type(mem);
421 if (is_subclass_of(s_type, a_type)) {
422 /* ok, condition met: there can't be an exception because
423 * Alloc guarantees that enough memory was allocated */
425 exchange(info->projs[pn_Load_X_except], new_Bad());
426 info->projs[pn_Load_X_except] = NULL;
431 else if ((get_irn_op(skip_Proj(ptr)) == op_Alloc) ||
432 ((get_irn_op(ptr) == op_Cast) && (get_irn_op(skip_Proj(get_Cast_op(ptr))) == op_Alloc))) {
433 /* simple case: a direct load after an Alloc. Firm Alloc throw
434 * an exception in case of out-of-memory. So, there is no way for an
435 * exception in this load.
436 * This code is constructed by the "exception lowering" in the Jack compiler.
438 exchange(info->projs[pn_Load_X_except], new_Bad());
439 info->projs[pn_Load_X_except] = NULL;
444 /* the mem of the Load. Must still be returned after optimization */
445 mem = get_Load_mem(load);
447 if (! info->projs[pn_Load_res] && ! info->projs[pn_Load_X_except]) {
448 /* a Load which value is neither used nor exception checked, remove it */
449 exchange(info->projs[pn_Load_M], mem);
451 return res | DF_CHANGED;
454 /* Load from a constant polymorphic field, where we can resolve
456 new_node = transform_node_Load(load);
457 if (new_node != load) {
458 if (info->projs[pn_Load_M]) {
459 exchange(info->projs[pn_Load_M], mem);
460 info->projs[pn_Load_M] = NULL;
462 if (info->projs[pn_Load_X_except]) {
463 exchange(info->projs[pn_Load_X_except], new_Bad());
464 info->projs[pn_Load_X_except] = NULL;
466 if (info->projs[pn_Load_res])
467 exchange(info->projs[pn_Load_res], new_node);
468 return res | DF_CHANGED;
471 /* check if we can determine the entity that will be loaded */
472 ent = find_constant_entity(ptr);
474 if ((allocation_static == get_entity_allocation(ent)) &&
475 (visibility_external_allocated != get_entity_visibility(ent))) {
476 /* a static allocation that is not external: there should be NO exception
479 /* no exception, clear the info field as it might be checked later again */
480 if (info->projs[pn_Load_X_except]) {
481 exchange(info->projs[pn_Load_X_except], new_Bad());
482 info->projs[pn_Load_X_except] = NULL;
486 if (variability_constant == get_entity_variability(ent)
487 && is_atomic_entity(ent)) {
488 /* Might not be atomic after
489 lowering of Sels. In this
490 case we could also load, but
491 it's more complicated. */
492 /* more simpler case: we load the content of a constant value:
493 * replace it by the constant itself
497 if (info->projs[pn_Load_M]) {
498 exchange(info->projs[pn_Load_M], mem);
503 if (info->projs[pn_Load_res]) {
504 if (is_atomic_entity(ent)) {
505 ir_node *c = copy_const_value(get_atomic_ent_value(ent));
508 exchange(info->projs[pn_Load_res], c);
509 return DF_CHANGED | res;
513 else if (variability_constant == get_entity_variability(ent)) {
514 compound_graph_path *path = get_accessed_path(ptr);
519 assert(is_proper_compound_graph_path(path, get_compound_graph_path_length(path)-1));
523 for (j = 0; j < get_compound_graph_path_length(path); ++j) {
524 entity *node = get_compound_graph_path_node(path, j);
525 fprintf(stdout, ".%s", get_entity_name(node));
526 if (is_Array_type(get_entity_owner(node)))
527 fprintf(stdout, "[%d]", get_compound_graph_path_array_index(path, j));
533 c = get_compound_ent_value_by_path(ent, path);
534 free_compound_graph_path(path);
536 /* printf(" cons: "); DDMN(c); */
538 if (info->projs[pn_Load_M]) {
539 exchange(info->projs[pn_Load_M], mem);
542 if (info->projs[pn_Load_res]) {
543 exchange(info->projs[pn_Load_res], copy_const_value(c));
544 return res | DF_CHANGED;
548 /* We can not determine a correct access path. E.g., in jack, we load
549 a byte from an object to generate an exception. Happens in test program
551 printf(">>>>>>>>>>>>> Found access to constant entity %s in function %s\n", get_entity_name(ent),
552 get_entity_name(get_irg_entity(current_ir_graph)));
553 printf(" load: "); DDMN(load);
554 printf(" ptr: "); DDMN(ptr);
561 /* Check, if the address of this load is used more than once.
562 * If not, this load cannot be removed in any case. */
563 if (get_irn_out_n(ptr) <= 1)
566 /* follow the memory chain as long as there are only Loads
567 * and try to replace current Load or Store by a previous one
569 for (pred = skip_Proj(mem); ; pred = skip_Proj(get_Load_mem(pred))) {
571 * BEWARE: one might think that checking the modes is useless, because
572 * if the pointers are identical, they refer to the same object.
573 * This is only true in strong typed languages, not in C were the following
574 * is possible a = *(type1 *)p; b = *(type2 *)p ...
577 if (get_irn_op(pred) == op_Store && get_Store_ptr(pred) == ptr &&
578 get_irn_mode(get_Store_value(pred)) == load_mode) {
579 ldst_info_t *pred_info = get_irn_link(pred);
582 * a Load immediately after a Store -- a read after write.
583 * We may remove the Load, if both Load & Store does not have an exception handler
584 * OR they are in the same block. In the latter case the Load cannot
585 * throw an exception when the previous Store was quiet.
587 * Why we need to check for Store Exception? If the Store cannot
588 * be executed (ROM) the exception handler might simply jump into
590 * We could make it a little bit better if we would know that the exception
591 * handler of the Store jumps directly to the end...
593 if ((!pred_info->projs[pn_Store_X_except] && !info->projs[pn_Load_X_except]) ||
594 get_nodes_block(load) == get_nodes_block(pred)) {
595 ir_node *value = get_Store_value(pred);
597 DBG_OPT_RAW(load, value);
598 if (info->projs[pn_Load_M])
599 exchange(info->projs[pn_Load_M], mem);
602 if (info->projs[pn_Load_X_except]) {
603 exchange( info->projs[pn_Load_X_except], new_Bad());
607 if (info->projs[pn_Load_res])
608 exchange(info->projs[pn_Load_res], value);
610 return res | DF_CHANGED;
613 else if (get_irn_op(pred) == op_Load && get_Load_ptr(pred) == ptr &&
614 get_Load_mode(pred) == load_mode) {
616 * a Load after a Load -- a read after read.
617 * We may remove the second Load, if it does not have an exception handler
618 * OR they are in the same block. In the later case the Load cannot
619 * throw an exception when the previous Load was quiet.
621 * Here, there is no need to check if the previous Load has an exception
622 * hander because they would have exact the same exception...
624 if (! info->projs[pn_Load_X_except] || get_nodes_block(load) == get_nodes_block(pred)) {
625 ldst_info_t *pred_info = get_irn_link(pred);
627 DBG_OPT_RAR(load, pred);
629 if (pred_info->projs[pn_Load_res]) {
630 /* we need a data proj from the previous load for this optimization */
631 if (info->projs[pn_Load_res])
632 exchange(info->projs[pn_Load_res], pred_info->projs[pn_Load_res]);
634 if (info->projs[pn_Load_M])
635 exchange(info->projs[pn_Load_M], mem);
638 if (info->projs[pn_Load_res]) {
639 set_Proj_pred(info->projs[pn_Load_res], pred);
640 set_nodes_block(info->projs[pn_Load_res], get_nodes_block(pred));
641 pred_info->projs[pn_Load_res] = info->projs[pn_Load_res];
643 if (info->projs[pn_Load_M]) {
644 /* Actually, this if should not be necessary. Construct the Loads
646 exchange(info->projs[pn_Load_M], mem);
651 if (info->projs[pn_Load_X_except]) {
652 exchange(info->projs[pn_Load_X_except], new_Bad());
656 return res |= DF_CHANGED;
660 /* follow only Load chains */
661 if (get_irn_op(pred) != op_Load)
670 static unsigned optimize_store(ir_node *store)
672 ldst_info_t *info = get_irn_link(store);
673 ir_node *pred, *mem, *ptr, *value, *block;
677 if (get_Store_volatility(store) == volatility_is_volatile)
681 * BEWARE: one might think that checking the modes is useless, because
682 * if the pointers are identical, they refer to the same object.
683 * This is only true in strong typed languages, not is C were the following
684 * is possible *(type1 *)p = a; *(type2 *)p = b ...
687 ptr = get_Store_ptr(store);
689 /* Check, if the address of this load is used more than once.
690 * If not, this load cannot be removed in any case. */
691 if (get_irn_out_n(ptr) <= 1)
694 block = get_nodes_block(store);
695 mem = get_Store_mem(store);
696 value = get_Store_value(store);
697 mode = get_irn_mode(value);
699 /* follow the memory chain as long as there are only Loads */
700 for (pred = skip_Proj(mem); ; pred = skip_Proj(get_Load_mem(pred))) {
701 ldst_info_t *pred_info = get_irn_link(pred);
703 if (get_irn_op(pred) == op_Store && get_Store_ptr(pred) == ptr &&
704 get_nodes_block(pred) == block && get_irn_mode(get_Store_value(pred)) == mode) {
706 * a Store after a Store in the same block -- a write after write.
707 * We may remove the first Store, if it does not have an exception handler.
709 * TODO: What, if both have the same exception handler ???
711 if (get_Store_volatility(pred) != volatility_is_volatile && !pred_info->projs[pn_Store_X_except]) {
712 DBG_OPT_WAW(pred, store);
713 exchange( pred_info->projs[pn_Store_M], get_Store_mem(pred) );
717 else if (get_irn_op(pred) == op_Load && get_Load_ptr(pred) == ptr &&
718 value == pred_info->projs[pn_Load_res]) {
720 * a Store of a value after a Load -- a write after read.
721 * We may remove the second Store, if it does not have an exception handler.
723 if (! info->projs[pn_Store_X_except]) {
724 DBG_OPT_WAR(store, pred);
725 exchange( info->projs[pn_Store_M], mem );
730 /* follow only Load chains */
731 if (get_irn_op(pred) != op_Load)
738 * walker, optimizes Phi after Stores:
739 * Does the following optimization:
741 * val1 val2 val3 val1 val2 val3
749 * This removes the number of stores and allows for predicated execution.
750 * Moves Stores back to the end of a function which may be bad
752 * Is only allowed if the predecessor blocks have only one successor.
754 static unsigned optimize_phi(ir_node *phi, void *env)
756 walk_env_t *wenv = env;
758 ir_node *store, *old_store, *ptr, *block, *phiM, *phiD, *exc, *projM;
760 ir_node **inM, **inD;
764 block_info_t *bl_info;
767 /* Must be a memory Phi */
768 if (get_irn_mode(phi) != mode_M)
771 n = get_Phi_n_preds(phi);
775 store = skip_Proj(get_Phi_pred(phi, 0));
777 if (get_irn_op(store) != op_Store)
780 /* abort on dead blocks */
781 if (is_Block_dead(get_nodes_block(store)))
784 /* check if the block has only one output */
785 bl_info = get_irn_link(get_nodes_block(store));
789 /* this is the address of the store */
790 ptr = get_Store_ptr(store);
791 mode = get_irn_mode(get_Store_value(store));
792 info = get_irn_link(store);
793 exc = info->exc_block;
795 for (i = 1; i < n; ++i) {
796 ir_node *pred = skip_Proj(get_Phi_pred(phi, i));
798 if (get_irn_op(pred) != op_Store)
801 if (mode != get_irn_mode(get_Store_value(pred)) || ptr != get_Store_ptr(pred))
804 info = get_irn_link(pred);
806 /* check, if all stores have the same exception flow */
807 if (exc != info->exc_block)
810 /* abort on dead blocks */
811 if (is_Block_dead(get_nodes_block(store)))
814 /* check if the block has only one output */
815 bl_info = get_irn_link(get_nodes_block(store));
821 * ok, when we are here, we found all predecessors of a Phi that
822 * are Stores to the same address. That means whatever we do before
823 * we enter the block of the Phi, we do a Store.
824 * So, we can move the store to the current block:
826 * val1 val2 val3 val1 val2 val3
828 * | Str | | Str | | Str | \ | /
834 * Is only allowed if the predecessor blocks have only one successor.
837 /* first step: collect all inputs */
838 NEW_ARR_A(ir_node *, inM, n);
839 NEW_ARR_A(ir_node *, inD, n);
840 NEW_ARR_A(int, idx, n);
842 for (i = 0; i < n; ++i) {
843 ir_node *pred = skip_Proj(get_Phi_pred(phi, i));
844 info = get_irn_link(pred);
846 inM[i] = get_Store_mem(pred);
847 inD[i] = get_Store_value(pred);
848 idx[i] = info->exc_idx;
850 block = get_nodes_block(phi);
852 /* second step: create a new memory Phi */
853 phiM = new_rd_Phi(get_irn_dbg_info(phi), current_ir_graph, block, n, inM, mode_M);
855 /* third step: create a new data Phi */
856 phiD = new_rd_Phi(get_irn_dbg_info(phi), current_ir_graph, block, n, inD, mode);
858 /* fourth step: create the Store */
859 store = new_rd_Store(db, current_ir_graph, block, phiM, ptr, phiD);
861 co_set_irn_name(store, co_get_irn_ident(old_store));
864 projM = new_rd_Proj(NULL, current_ir_graph, block, store, mode_M, pn_Store_M);
866 info = get_ldst_info(store, wenv);
867 info->projs[pn_Store_M] = projM;
869 /* fifths step: repair exception flow */
871 ir_node *projX = new_rd_Proj(NULL, current_ir_graph, block, store, mode_X, pn_Store_X_except);
873 info->projs[pn_Store_X_except] = projX;
874 info->exc_block = exc;
875 info->exc_idx = idx[0];
877 for (i = 0; i < n; ++i) {
878 set_Block_cfgpred(exc, idx[i], projX);
882 /* the exception block should be optimized as some inputs are identical now */
888 /* sixt step: replace old Phi */
889 exchange(phi, projM);
891 return res | DF_CHANGED;
895 * walker, collects all Load/Store/Proj nodes
897 static void do_load_store_optimize(ir_node *n, void *env)
899 walk_env_t *wenv = env;
901 switch (get_irn_opcode(n)) {
904 wenv->changes |= optimize_load(n);
908 wenv->changes |= optimize_store(n);
912 wenv->changes |= optimize_phi(n, env);
920 * do the load store optimization
922 void optimize_load_store(ir_graph *irg)
926 assert(get_irg_phase_state(irg) != phase_building);
927 assert(get_irg_pinned(irg) != op_pin_state_floats &&
928 "LoadStore optimization needs pinned graph");
930 if (!get_opt_redundant_LoadStore())
933 obstack_init(&env.obst);
936 /* init the links, then collect Loads/Stores/Proj's in lists */
937 irg_walk_graph(irg, firm_clear_link, collect_nodes, &env);
939 /* now we have collected enough information, optimize */
940 irg_walk_graph(irg, NULL, do_load_store_optimize, &env);
942 obstack_free(&env.obst, NULL);
944 /* Handle graph state */
946 if (get_irg_outs_state(current_ir_graph) == outs_consistent)
947 set_irg_outs_inconsistent(current_ir_graph);
950 if (env.changes & CF_CHANGED) {
951 /* is this really needed: Yes, control flow changed, block might get Bad. */
952 if (get_irg_dom_state(current_ir_graph) == dom_consistent)
953 set_irg_dom_inconsistent(current_ir_graph);