2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Load/Store optimizations.
23 * @author Michael Beck
32 #include "iroptimize.h"
34 #include "irgraph_t.h"
42 #include "dbginfo_t.h"
43 #include "iropt_dbg.h"
49 #include "opt_polymorphy.h"
52 #include "irphase_t.h"
56 /** The debug handle. */
57 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
60 #include "cacheopt/cachesim.h"
64 #define IMAX(a,b) ((a) > (b) ? (a) : (b))
66 #define MAX_PROJ IMAX(IMAX(pn_Load_max, pn_Store_max), pn_Call_max)
69 DF_CHANGED = 1, /**< data flow changed */
70 CF_CHANGED = 2, /**< control flow changed */
76 typedef struct _walk_env_t {
77 struct obstack obst; /**< list of all stores */
78 unsigned changes; /**< a bitmask of graph changes */
81 /** A Load/Store info. */
82 typedef struct _ldst_info_t {
83 ir_node *projs[MAX_PROJ]; /**< list of Proj's of this node */
84 ir_node *exc_block; /**< the exception block if available */
85 int exc_idx; /**< predecessor index in the exception block */
86 unsigned visited; /**< visited counter for breaking loops */
90 * flags for control flow.
93 BLOCK_HAS_COND = 1, /**< Block has conditional control flow */
94 BLOCK_HAS_EXC = 2 /**< Block has exceptional control flow */
100 typedef struct _block_info_t {
101 unsigned flags; /**< flags for the block */
104 /** the master visited flag for loop detection. */
105 static unsigned master_visited = 0;
107 #define INC_MASTER() ++master_visited
108 #define MARK_NODE(info) (info)->visited = master_visited
109 #define NODE_VISITED(info) (info)->visited >= master_visited
112 * get the Load/Store info of a node
114 static ldst_info_t *get_ldst_info(ir_node *node, struct obstack *obst) {
115 ldst_info_t *info = get_irn_link(node);
118 info = obstack_alloc(obst, sizeof(*info));
119 memset(info, 0, sizeof(*info));
120 set_irn_link(node, info);
123 } /* get_ldst_info */
126 * get the Block info of a node
128 static block_info_t *get_block_info(ir_node *node, struct obstack *obst) {
129 block_info_t *info = get_irn_link(node);
132 info = obstack_alloc(obst, sizeof(*info));
133 memset(info, 0, sizeof(*info));
134 set_irn_link(node, info);
137 } /* get_block_info */
140 * update the projection info for a Load/Store
142 static unsigned update_projs(ldst_info_t *info, ir_node *proj)
144 long nr = get_Proj_proj(proj);
146 assert(0 <= nr && nr <= MAX_PROJ && "Wrong proj from LoadStore");
148 if (info->projs[nr]) {
149 /* there is already one, do CSE */
150 exchange(proj, info->projs[nr]);
154 info->projs[nr] = proj;
160 * update the exception block info for a Load/Store node.
162 * @param info the load/store info struct
163 * @param block the exception handler block for this load/store
164 * @param pos the control flow input of the block
166 static unsigned update_exc(ldst_info_t *info, ir_node *block, int pos)
168 assert(info->exc_block == NULL && "more than one exception block found");
170 info->exc_block = block;
175 /** Return the number of uses of an address node */
176 #define get_irn_n_uses(adr) get_irn_n_edges(adr)
179 * walker, collects all Load/Store/Proj nodes
181 * walks from Start -> End
183 static void collect_nodes(ir_node *node, void *env)
185 ir_opcode opcode = get_irn_opcode(node);
186 ir_node *pred, *blk, *pred_blk;
187 ldst_info_t *ldst_info;
188 walk_env_t *wenv = env;
190 if (opcode == iro_Proj) {
191 pred = get_Proj_pred(node);
192 opcode = get_irn_opcode(pred);
194 if (opcode == iro_Load || opcode == iro_Store || opcode == iro_Call) {
195 ldst_info = get_ldst_info(pred, &wenv->obst);
197 wenv->changes |= update_projs(ldst_info, node);
200 * Place the Proj's to the same block as the
201 * predecessor Load. This is always ok and prevents
202 * "non-SSA" form after optimizations if the Proj
203 * is in a wrong block.
205 blk = get_nodes_block(node);
206 pred_blk = get_nodes_block(pred);
207 if (blk != pred_blk) {
208 wenv->changes |= DF_CHANGED;
209 set_nodes_block(node, pred_blk);
212 } else if (opcode == iro_Block) {
215 for (i = get_Block_n_cfgpreds(node) - 1; i >= 0; --i) {
216 ir_node *pred_block, *proj;
217 block_info_t *bl_info;
220 pred = proj = get_Block_cfgpred(node, i);
223 pred = get_Proj_pred(proj);
224 is_exc = get_Proj_proj(proj) == pn_Generic_X_except;
227 /* ignore Bad predecessors, they will be removed later */
231 pred_block = get_nodes_block(pred);
232 bl_info = get_block_info(pred_block, &wenv->obst);
234 if (is_fragile_op(pred) && is_exc)
235 bl_info->flags |= BLOCK_HAS_EXC;
236 else if (is_irn_forking(pred))
237 bl_info->flags |= BLOCK_HAS_COND;
239 opcode = get_irn_opcode(pred);
240 if (is_exc && (opcode == iro_Load || opcode == iro_Store || opcode == iro_Call)) {
241 ldst_info = get_ldst_info(pred, &wenv->obst);
243 wenv->changes |= update_exc(ldst_info, node, i);
247 } /* collect_nodes */
250 * Returns an entity if the address ptr points to a constant one.
252 * @param ptr the address
254 * @return an entity or NULL
256 static ir_entity *find_constant_entity(ir_node *ptr)
259 if (is_SymConst(ptr) && get_SymConst_kind(ptr) == symconst_addr_ent) {
260 ir_entity *ent = get_SymConst_entity(ptr);
261 if (variability_constant == get_entity_variability(ent))
264 } else if (is_Sel(ptr)) {
265 ir_entity *ent = get_Sel_entity(ptr);
266 ir_type *tp = get_entity_owner(ent);
268 /* Do not fiddle with polymorphism. */
269 if (is_Class_type(get_entity_owner(ent)) &&
270 ((get_entity_n_overwrites(ent) != 0) ||
271 (get_entity_n_overwrittenby(ent) != 0) ) )
274 if (is_Array_type(tp)) {
278 for (i = 0, n = get_Sel_n_indexs(ptr); i < n; ++i) {
280 tarval *tlower, *tupper;
281 ir_node *index = get_Sel_index(ptr, i);
282 tarval *tv = computed_value(index);
284 /* check if the index is constant */
285 if (tv == tarval_bad)
288 bound = get_array_lower_bound(tp, i);
289 tlower = computed_value(bound);
290 bound = get_array_upper_bound(tp, i);
291 tupper = computed_value(bound);
293 if (tlower == tarval_bad || tupper == tarval_bad)
296 if (tarval_cmp(tv, tlower) & pn_Cmp_Lt)
298 if (tarval_cmp(tupper, tv) & pn_Cmp_Lt)
301 /* ok, bounds check finished */
305 if (variability_constant == get_entity_variability(ent))
309 ptr = get_Sel_ptr(ptr);
313 } /* find_constant_entity */
316 * Return the Selection index of a Sel node from dimension n
318 static long get_Sel_array_index_long(ir_node *n, int dim) {
319 ir_node *index = get_Sel_index(n, dim);
320 assert(is_Const(index));
321 return get_tarval_long(get_Const_tarval(index));
322 } /* get_Sel_array_index_long */
325 * Returns the accessed component graph path for an
326 * node computing an address.
328 * @param ptr the node computing the address
329 * @param depth current depth in steps upward from the root
332 static compound_graph_path *rec_get_accessed_path(ir_node *ptr, int depth) {
333 compound_graph_path *res = NULL;
334 ir_entity *root, *field;
337 if (is_SymConst(ptr)) {
338 /* a SymConst. If the depth is 0, this is an access to a global
339 * entity and we don't need a component path, else we know
340 * at least it's length.
342 assert(get_SymConst_kind(ptr) == symconst_addr_ent);
343 root = get_SymConst_entity(ptr);
344 res = (depth == 0) ? NULL : new_compound_graph_path(get_entity_type(root), depth);
347 /* it's a Sel, go up until we find the root */
348 res = rec_get_accessed_path(get_Sel_ptr(ptr), depth+1);
350 /* fill up the step in the path at the current position */
351 field = get_Sel_entity(ptr);
352 path_len = get_compound_graph_path_length(res);
353 pos = path_len - depth - 1;
354 set_compound_graph_path_node(res, pos, field);
356 if (is_Array_type(get_entity_owner(field))) {
357 assert(get_Sel_n_indexs(ptr) == 1 && "multi dim arrays not implemented");
358 set_compound_graph_path_array_index(res, pos, get_Sel_array_index_long(ptr, 0));
362 } /* rec_get_accessed_path */
365 * Returns an access path or NULL. The access path is only
366 * valid, if the graph is in phase_high and _no_ address computation is used.
368 static compound_graph_path *get_accessed_path(ir_node *ptr) {
369 return rec_get_accessed_path(ptr, 0);
370 } /* get_accessed_path */
372 typedef struct path_entry {
374 struct path_entry *next;
378 static ir_node *rec_find_compound_ent_value(ir_node *ptr, path_entry *next) {
379 path_entry entry, *p;
381 ir_initializer_t *initializer;
389 entry.ent = field = get_Sel_entity(ptr);
390 tp = get_entity_owner(field);
391 if (is_Array_type(tp)) {
392 assert(get_Sel_n_indexs(ptr) == 1 && "multi dim arrays not implemented");
393 entry.index = get_Sel_array_index_long(ptr, 0) - get_array_lower_bound_int(tp, 0);
395 int i, n_members = get_compound_n_members(tp);
396 for (i = 0; i < n_members; ++i) {
397 if (get_compound_member(tp, i) == field)
400 if (i >= n_members) {
401 /* not found: should NOT happen */
406 return rec_find_compound_ent_value(get_Sel_ptr(ptr), &entry);
410 assert(is_SymConst(ptr));
412 ent = get_SymConst_entity(ptr);
413 initializer = get_entity_initializer(ent);
414 for (p = next; p != NULL; p = p->next) {
417 if (initializer->kind != IR_INITIALIZER_COMPOUND)
420 n = get_initializer_compound_n_entries(initializer);
423 initializer = get_initializer_compound_value(initializer, p->index);
426 switch (initializer->kind) {
427 case IR_INITIALIZER_CONST:
428 return get_initializer_const_value(initializer);
429 case IR_INITIALIZER_TARVAL:
430 case IR_INITIALIZER_NULL:
436 static ir_node *find_compound_ent_value(ir_node *ptr) {
437 return rec_find_compound_ent_value(ptr, NULL);
441 static void reduce_adr_usage(ir_node *ptr);
444 * Update a Load that may lost it's usage.
446 static void handle_load_update(ir_node *load) {
447 ldst_info_t *info = get_irn_link(load);
449 /* do NOT touch volatile loads for now */
450 if (get_Load_volatility(load) == volatility_is_volatile)
453 if (! info->projs[pn_Load_res] && ! info->projs[pn_Load_X_except]) {
454 ir_node *ptr = get_Load_ptr(load);
455 ir_node *mem = get_Load_mem(load);
457 /* a Load which value is neither used nor exception checked, remove it */
458 exchange(info->projs[pn_Load_M], mem);
459 if (info->projs[pn_Load_X_regular])
460 exchange(info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
462 reduce_adr_usage(ptr);
464 } /* handle_load_update */
467 * A Use of an address node is vanished. Check if this was a Proj
468 * node and update the counters.
470 static void reduce_adr_usage(ir_node *ptr) {
472 if (get_irn_n_edges(ptr) <= 0) {
473 /* this Proj is dead now */
474 ir_node *pred = get_Proj_pred(ptr);
477 ldst_info_t *info = get_irn_link(pred);
478 info->projs[get_Proj_proj(ptr)] = NULL;
480 /* this node lost it's result proj, handle that */
481 handle_load_update(pred);
485 } /* reduce_adr_usage */
488 * Check, if an already existing value of mode old_mode can be converted
489 * into the needed one new_mode without loss.
491 static int can_use_stored_value(ir_mode *old_mode, ir_mode *new_mode) {
492 if (old_mode == new_mode)
495 /* if both modes are two-complement ones, we can always convert the
496 Stored value into the needed one. */
497 if (get_mode_size_bits(old_mode) >= get_mode_size_bits(new_mode) &&
498 get_mode_arithmetic(old_mode) == irma_twos_complement &&
499 get_mode_arithmetic(new_mode) == irma_twos_complement)
502 } /* can_use_stored_value */
505 * Check whether a Call is at least pure, ie. does only read memory.
507 static unsigned is_Call_pure(ir_node *call) {
508 ir_type *call_tp = get_Call_type(call);
509 unsigned prop = get_method_additional_properties(call_tp);
511 /* check first the call type */
512 if ((prop & (mtp_property_const|mtp_property_pure)) == 0) {
513 /* try the called entity */
514 ir_node *ptr = get_Call_ptr(call);
516 if (is_Global(ptr)) {
517 ir_entity *ent = get_Global_entity(ptr);
519 prop = get_entity_additional_properties(ent);
522 return (prop & (mtp_property_const|mtp_property_pure)) != 0;
526 * Follow the memory chain as long as there are only Loads,
527 * alias free Stores, and constant Calls and try to replace the
528 * current Load by a previous ones.
529 * Note that in unreachable loops it might happen that we reach
530 * load again, as well as we can fall into a cycle.
531 * We break such cycles using a special visited flag.
533 * INC_MASTER() must be called before dive into
535 static unsigned follow_Mem_chain(ir_node *load, ir_node *curr) {
537 ldst_info_t *info = get_irn_link(load);
539 ir_node *ptr = get_Load_ptr(load);
540 ir_node *mem = get_Load_mem(load);
541 ir_mode *load_mode = get_Load_mode(load);
543 for (pred = curr; load != pred; ) {
544 ldst_info_t *pred_info = get_irn_link(pred);
547 * BEWARE: one might think that checking the modes is useless, because
548 * if the pointers are identical, they refer to the same object.
549 * This is only true in strong typed languages, not in C were the following
550 * is possible a = *(ir_type1 *)p; b = *(ir_type2 *)p ...
552 if (is_Store(pred) && get_Store_ptr(pred) == ptr &&
553 can_use_stored_value(get_irn_mode(get_Store_value(pred)), load_mode)) {
555 * a Load immediately after a Store -- a read after write.
556 * We may remove the Load, if both Load & Store does not have an exception handler
557 * OR they are in the same MacroBlock. In the latter case the Load cannot
558 * throw an exception when the previous Store was quiet.
560 * Why we need to check for Store Exception? If the Store cannot
561 * be executed (ROM) the exception handler might simply jump into
562 * the load MacroBlock :-(
563 * We could make it a little bit better if we would know that the exception
564 * handler of the Store jumps directly to the end...
566 if ((pred_info->projs[pn_Store_X_except] == NULL && info->projs[pn_Load_X_except] == NULL) ||
567 get_nodes_MacroBlock(load) == get_nodes_MacroBlock(pred)) {
568 ir_node *value = get_Store_value(pred);
570 DBG_OPT_RAW(load, value);
572 /* add an convert if needed */
573 if (get_irn_mode(get_Store_value(pred)) != load_mode) {
574 value = new_r_Conv(current_ir_graph, get_nodes_block(load), value, load_mode);
577 if (info->projs[pn_Load_M])
578 exchange(info->projs[pn_Load_M], mem);
581 if (info->projs[pn_Load_X_except]) {
582 exchange( info->projs[pn_Load_X_except], new_Bad());
585 if (info->projs[pn_Load_X_regular]) {
586 exchange( info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
590 if (info->projs[pn_Load_res])
591 exchange(info->projs[pn_Load_res], value);
594 reduce_adr_usage(ptr);
595 return res | DF_CHANGED;
597 } else if (is_Load(pred) && get_Load_ptr(pred) == ptr &&
598 can_use_stored_value(get_Load_mode(pred), load_mode)) {
600 * a Load after a Load -- a read after read.
601 * We may remove the second Load, if it does not have an exception handler
602 * OR they are in the same MacroBlock. In the later case the Load cannot
603 * throw an exception when the previous Load was quiet.
605 * Here, there is no need to check if the previous Load has an exception
606 * hander because they would have exact the same exception...
608 if (info->projs[pn_Load_X_except] == NULL || get_nodes_MacroBlock(load) == get_nodes_MacroBlock(pred)) {
611 DBG_OPT_RAR(load, pred);
613 /* the result is used */
614 if (info->projs[pn_Load_res]) {
615 if (pred_info->projs[pn_Load_res] == NULL) {
616 /* create a new Proj again */
617 pred_info->projs[pn_Load_res] = new_r_Proj(current_ir_graph, get_nodes_block(pred), pred, get_Load_mode(pred), pn_Load_res);
619 value = pred_info->projs[pn_Load_res];
621 /* add an convert if needed */
622 if (get_Load_mode(pred) != load_mode) {
623 value = new_r_Conv(current_ir_graph, get_nodes_block(load), value, load_mode);
626 exchange(info->projs[pn_Load_res], value);
629 if (info->projs[pn_Load_M])
630 exchange(info->projs[pn_Load_M], mem);
633 if (info->projs[pn_Load_X_except]) {
634 exchange(info->projs[pn_Load_X_except], new_Bad());
637 if (info->projs[pn_Load_X_regular]) {
638 exchange( info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
643 reduce_adr_usage(ptr);
644 return res |= DF_CHANGED;
648 if (is_Store(pred)) {
649 /* check if we can pass through this store */
650 ir_alias_relation rel = get_alias_relation(
653 get_irn_mode(get_Store_value(pred)),
655 /* if the might be an alias, we cannot pass this Store */
656 if (rel != ir_no_alias)
658 pred = skip_Proj(get_Store_mem(pred));
659 } else if (is_Load(pred)) {
660 pred = skip_Proj(get_Load_mem(pred));
661 } else if (is_Call(pred)) {
662 if (is_Call_pure(pred)) {
663 /* The called graph is at least pure, so there are no Store's
664 in it. We can handle it like a Load and skip it. */
665 pred = skip_Proj(get_Call_mem(pred));
667 /* there might be Store's in the graph, stop here */
671 /* follow only Load chains */
675 /* check for cycles */
676 if (NODE_VISITED(pred_info))
678 MARK_NODE(pred_info);
684 /* handle all Sync predecessors */
685 for (i = get_Sync_n_preds(pred) - 1; i >= 0; --i) {
686 res |= follow_Mem_chain(load, skip_Proj(get_Sync_pred(pred, i)));
693 } /* follow_Mem_chain */
698 * @param load the Load node
700 static unsigned optimize_load(ir_node *load)
702 ldst_info_t *info = get_irn_link(load);
703 ir_node *mem, *ptr, *new_node;
707 /* do NOT touch volatile loads for now */
708 if (get_Load_volatility(load) == volatility_is_volatile)
711 /* the address of the load to be optimized */
712 ptr = get_Load_ptr(load);
715 * Check if we can remove the exception from a Load:
716 * This can be done, if the address is from an Sel(Alloc) and
717 * the Sel type is a subtype of the allocated type.
719 * This optimizes some often used OO constructs,
720 * like x = new O; x->t;
722 if (info->projs[pn_Load_X_except]) {
724 ir_node *mem = get_Sel_mem(ptr);
726 /* FIXME: works with the current FE, but better use the base */
727 if (is_Alloc(skip_Proj(mem))) {
728 /* ok, check the types */
729 ir_entity *ent = get_Sel_entity(ptr);
730 ir_type *s_type = get_entity_type(ent);
731 ir_type *a_type = get_Alloc_type(mem);
733 if (is_SubClass_of(s_type, a_type)) {
734 /* ok, condition met: there can't be an exception because
735 * Alloc guarantees that enough memory was allocated */
737 exchange(info->projs[pn_Load_X_except], new_Bad());
738 info->projs[pn_Load_X_except] = NULL;
739 exchange(info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
740 info->projs[pn_Load_X_regular] = NULL;
744 } else if (is_Alloc(skip_Proj(skip_Cast(ptr)))) {
745 /* simple case: a direct load after an Alloc. Firm Alloc throw
746 * an exception in case of out-of-memory. So, there is no way for an
747 * exception in this load.
748 * This code is constructed by the "exception lowering" in the Jack compiler.
750 exchange(info->projs[pn_Load_X_except], new_Bad());
751 info->projs[pn_Load_X_except] = NULL;
752 exchange(info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
753 info->projs[pn_Load_X_regular] = NULL;
758 /* The mem of the Load. Must still be returned after optimization. */
759 mem = get_Load_mem(load);
761 if (! info->projs[pn_Load_res] && ! info->projs[pn_Load_X_except]) {
762 /* a Load which value is neither used nor exception checked, remove it */
763 exchange(info->projs[pn_Load_M], mem);
765 if (info->projs[pn_Load_X_regular]) {
766 /* should not happen, but if it does, remove it */
767 exchange(info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
771 reduce_adr_usage(ptr);
772 return res | DF_CHANGED;
775 /* Load from a constant polymorphic field, where we can resolve
777 new_node = transform_node_Load(load);
778 if (new_node != load) {
779 if (info->projs[pn_Load_M]) {
780 exchange(info->projs[pn_Load_M], mem);
781 info->projs[pn_Load_M] = NULL;
783 if (info->projs[pn_Load_X_except]) {
784 exchange(info->projs[pn_Load_X_except], new_Bad());
785 info->projs[pn_Load_X_except] = NULL;
788 if (info->projs[pn_Load_X_regular]) {
789 exchange(info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
790 info->projs[pn_Load_X_regular] = NULL;
793 if (info->projs[pn_Load_res])
794 exchange(info->projs[pn_Load_res], new_node);
797 reduce_adr_usage(ptr);
798 return res | DF_CHANGED;
801 /* check if we can determine the entity that will be loaded */
802 ent = find_constant_entity(ptr);
804 if ((allocation_static == get_entity_allocation(ent)) &&
805 (visibility_external_allocated != get_entity_visibility(ent))) {
806 /* a static allocation that is not external: there should be NO exception
809 /* no exception, clear the info field as it might be checked later again */
810 if (info->projs[pn_Load_X_except]) {
811 exchange(info->projs[pn_Load_X_except], new_Bad());
812 info->projs[pn_Load_X_except] = NULL;
815 if (info->projs[pn_Load_X_regular]) {
816 exchange(info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
817 info->projs[pn_Load_X_regular] = NULL;
821 if (variability_constant == get_entity_variability(ent)) {
822 if (is_atomic_entity(ent)) {
823 /* Might not be atomic after
824 lowering of Sels. In this
825 case we could also load, but
826 it's more complicated. */
827 /* more simpler case: we load the content of a constant value:
828 * replace it by the constant itself
832 if (info->projs[pn_Load_M]) {
833 exchange(info->projs[pn_Load_M], mem);
837 if (info->projs[pn_Load_res]) {
838 if (is_atomic_entity(ent)) {
839 ir_node *c = copy_const_value(get_irn_dbg_info(load), get_atomic_ent_value(ent));
842 exchange(info->projs[pn_Load_res], c);
847 reduce_adr_usage(ptr);
851 if (ent->has_initializer) {
852 /* new style initializer */
853 c = find_compound_ent_value(ptr);
855 /* old style initializer */
856 compound_graph_path *path = get_accessed_path(ptr);
859 assert(is_proper_compound_graph_path(path, get_compound_graph_path_length(path)-1));
861 c = get_compound_ent_value_by_path(ent, path);
862 free_compound_graph_path(path);
866 if (info->projs[pn_Load_M]) {
867 exchange(info->projs[pn_Load_M], mem);
870 if (info->projs[pn_Load_res]) {
871 exchange(info->projs[pn_Load_res], copy_const_value(get_irn_dbg_info(load), c));
875 reduce_adr_usage(ptr);
878 /* We can not determine a correct access path. E.g., in jack, we load
879 a byte from an object to generate an exception. Happens in test program
881 printf(">>>>>>>>>>>>> Found access to constant entity %s in function %s\n", get_entity_name(ent),
882 get_entity_name(get_irg_entity(current_ir_graph)));
883 ir_printf(" load: %+F\n", load);
884 ir_printf(" ptr: %+F\n", ptr);
892 /* Check, if the address of this load is used more than once.
893 * If not, this load cannot be removed in any case. */
894 if (get_irn_n_uses(ptr) <= 1)
898 * follow the memory chain as long as there are only Loads
899 * and try to replace current Load or Store by a previous one.
900 * Note that in unreachable loops it might happen that we reach
901 * load again, as well as we can fall into a cycle.
902 * We break such cycles using a special visited flag.
905 res = follow_Mem_chain(load, skip_Proj(mem));
907 } /* optimize_load */
910 * Check whether a value of mode new_mode would completely overwrite a value
911 * of mode old_mode in memory.
913 static int is_completely_overwritten(ir_mode *old_mode, ir_mode *new_mode)
915 return get_mode_size_bits(new_mode) >= get_mode_size_bits(old_mode);
916 } /* is_completely_overwritten */
919 * follow the memory chain as long as there are only Loads and alias free Stores.
921 * INC_MASTER() must be called before dive into
923 static unsigned follow_Mem_chain_for_Store(ir_node *store, ir_node *curr) {
925 ldst_info_t *info = get_irn_link(store);
927 ir_node *ptr = get_Store_ptr(store);
928 ir_node *mem = get_Store_mem(store);
929 ir_node *value = get_Store_value(store);
930 ir_mode *mode = get_irn_mode(value);
931 ir_node *block = get_nodes_block(store);
932 ir_node *mblk = get_Block_MacroBlock(block);
934 for (pred = curr; pred != store;) {
935 ldst_info_t *pred_info = get_irn_link(pred);
938 * BEWARE: one might think that checking the modes is useless, because
939 * if the pointers are identical, they refer to the same object.
940 * This is only true in strong typed languages, not is C were the following
941 * is possible *(ir_type1 *)p = a; *(ir_type2 *)p = b ...
942 * However, if the mode that is written have a bigger or equal size the the old
943 * one, the old value is completely overwritten and can be killed ...
945 if (is_Store(pred) && get_Store_ptr(pred) == ptr &&
946 get_nodes_MacroBlock(pred) == mblk &&
947 is_completely_overwritten(get_irn_mode(get_Store_value(pred)), mode)) {
949 * a Store after a Store in the same MacroBlock -- a write after write.
950 * We may remove the first Store, if it does not have an exception handler.
952 * TODO: What, if both have the same exception handler ???
954 if (get_Store_volatility(pred) != volatility_is_volatile && !pred_info->projs[pn_Store_X_except]) {
955 DBG_OPT_WAW(pred, store);
956 exchange(pred_info->projs[pn_Store_M], get_Store_mem(pred));
958 reduce_adr_usage(ptr);
961 } else if (is_Load(pred) && get_Load_ptr(pred) == ptr &&
962 value == pred_info->projs[pn_Load_res]) {
964 * a Store of a value just loaded from the same address
965 * -- a write after read.
966 * We may remove the Store, if it does not have an exception
969 if (! info->projs[pn_Store_X_except]) {
970 DBG_OPT_WAR(store, pred);
971 exchange(info->projs[pn_Store_M], mem);
973 reduce_adr_usage(ptr);
978 if (is_Store(pred)) {
979 /* check if we can pass thru this store */
980 ir_alias_relation rel = get_alias_relation(
983 get_irn_mode(get_Store_value(pred)),
985 /* if the might be an alias, we cannot pass this Store */
986 if (rel != ir_no_alias)
988 pred = skip_Proj(get_Store_mem(pred));
989 } else if (is_Load(pred)) {
990 ir_alias_relation rel = get_alias_relation(
991 current_ir_graph, get_Load_ptr(pred), get_Load_mode(pred),
993 if (rel != ir_no_alias)
996 pred = skip_Proj(get_Load_mem(pred));
998 /* follow only Load chains */
1002 /* check for cycles */
1003 if (NODE_VISITED(pred_info))
1005 MARK_NODE(pred_info);
1008 if (is_Sync(pred)) {
1011 /* handle all Sync predecessors */
1012 for (i = get_Sync_n_preds(pred) - 1; i >= 0; --i) {
1013 res |= follow_Mem_chain_for_Store(store, skip_Proj(get_Sync_pred(pred, i)));
1019 } /* follow_Mem_chain_for_Store */
1024 * @param store the Store node
1026 static unsigned optimize_store(ir_node *store) {
1029 if (get_Store_volatility(store) == volatility_is_volatile)
1032 ptr = get_Store_ptr(store);
1034 /* Check, if the address of this Store is used more than once.
1035 * If not, this Store cannot be removed in any case. */
1036 if (get_irn_n_uses(ptr) <= 1)
1039 mem = get_Store_mem(store);
1041 /* follow the memory chain as long as there are only Loads */
1044 return follow_Mem_chain_for_Store(store, skip_Proj(mem));
1045 } /* optimize_store */
1048 * walker, optimizes Phi after Stores to identical places:
1049 * Does the following optimization:
1052 * val1 val2 val3 val1 val2 val3
1054 * Store Store Store \ | /
1061 * This reduces the number of stores and allows for predicated execution.
1062 * Moves Stores back to the end of a function which may be bad.
1064 * This is only possible if the predecessor blocks have only one successor.
1066 static unsigned optimize_phi(ir_node *phi, walk_env_t *wenv)
1069 ir_node *store, *old_store, *ptr, *block, *phi_block, *phiM, *phiD, *exc, *projM;
1071 ir_node **inM, **inD, **projMs;
1073 dbg_info *db = NULL;
1075 block_info_t *bl_info;
1078 /* Must be a memory Phi */
1079 if (get_irn_mode(phi) != mode_M)
1082 n = get_Phi_n_preds(phi);
1086 /* must be only one user */
1087 projM = get_Phi_pred(phi, 0);
1088 if (get_irn_n_edges(projM) != 1)
1091 store = skip_Proj(projM);
1093 if (!is_Store(store))
1096 block = get_nodes_block(store);
1098 /* abort on dead blocks */
1099 if (is_Block_dead(block))
1102 /* check if the block is post dominated by Phi-block
1103 and has no exception exit */
1104 bl_info = get_irn_link(block);
1105 if (bl_info->flags & BLOCK_HAS_EXC)
1108 phi_block = get_nodes_block(phi);
1109 if (! block_strictly_postdominates(phi_block, block))
1112 /* this is the address of the store */
1113 ptr = get_Store_ptr(store);
1114 mode = get_irn_mode(get_Store_value(store));
1115 info = get_irn_link(store);
1116 exc = info->exc_block;
1118 for (i = 1; i < n; ++i) {
1119 ir_node *pred = get_Phi_pred(phi, i);
1121 if (get_irn_n_edges(pred) != 1)
1124 pred = skip_Proj(pred);
1125 if (!is_Store(pred))
1128 if (ptr != get_Store_ptr(pred) || mode != get_irn_mode(get_Store_value(pred)))
1131 info = get_irn_link(pred);
1133 /* check, if all stores have the same exception flow */
1134 if (exc != info->exc_block)
1137 /* abort on dead blocks */
1138 block = get_nodes_block(pred);
1139 if (is_Block_dead(block))
1142 /* check if the block is post dominated by Phi-block
1143 and has no exception exit. Note that block must be different from
1144 Phi-block, else we would move a Store from end End of a block to its
1146 bl_info = get_irn_link(block);
1147 if (bl_info->flags & BLOCK_HAS_EXC)
1149 if (block == phi_block || ! block_postdominates(phi_block, block))
1154 * ok, when we are here, we found all predecessors of a Phi that
1155 * are Stores to the same address and size. That means whatever
1156 * we do before we enter the block of the Phi, we do a Store.
1157 * So, we can move the Store to the current block:
1159 * val1 val2 val3 val1 val2 val3
1161 * | Str | | Str | | Str | \ | /
1167 * Is only allowed if the predecessor blocks have only one successor.
1170 NEW_ARR_A(ir_node *, projMs, n);
1171 NEW_ARR_A(ir_node *, inM, n);
1172 NEW_ARR_A(ir_node *, inD, n);
1173 NEW_ARR_A(int, idx, n);
1175 /* Prepare: Collect all Store nodes. We must do this
1176 first because we otherwise may loose a store when exchanging its
1179 for (i = n - 1; i >= 0; --i) {
1182 projMs[i] = get_Phi_pred(phi, i);
1183 assert(is_Proj(projMs[i]));
1185 store = get_Proj_pred(projMs[i]);
1186 info = get_irn_link(store);
1188 inM[i] = get_Store_mem(store);
1189 inD[i] = get_Store_value(store);
1190 idx[i] = info->exc_idx;
1192 block = get_nodes_block(phi);
1194 /* second step: create a new memory Phi */
1195 phiM = new_rd_Phi(get_irn_dbg_info(phi), current_ir_graph, block, n, inM, mode_M);
1197 /* third step: create a new data Phi */
1198 phiD = new_rd_Phi(get_irn_dbg_info(phi), current_ir_graph, block, n, inD, mode);
1200 /* rewire memory and kill the node */
1201 for (i = n - 1; i >= 0; --i) {
1202 ir_node *proj = projMs[i];
1205 ir_node *store = get_Proj_pred(proj);
1206 exchange(proj, inM[i]);
1211 /* fourth step: create the Store */
1212 store = new_rd_Store(db, current_ir_graph, block, phiM, ptr, phiD);
1214 co_set_irn_name(store, co_get_irn_ident(old_store));
1217 projM = new_rd_Proj(NULL, current_ir_graph, block, store, mode_M, pn_Store_M);
1219 info = get_ldst_info(store, &wenv->obst);
1220 info->projs[pn_Store_M] = projM;
1222 /* fifths step: repair exception flow */
1224 ir_node *projX = new_rd_Proj(NULL, current_ir_graph, block, store, mode_X, pn_Store_X_except);
1226 info->projs[pn_Store_X_except] = projX;
1227 info->exc_block = exc;
1228 info->exc_idx = idx[0];
1230 for (i = 0; i < n; ++i) {
1231 set_Block_cfgpred(exc, idx[i], projX);
1235 /* the exception block should be optimized as some inputs are identical now */
1241 /* sixth step: replace old Phi */
1242 exchange(phi, projM);
1244 return res | DF_CHANGED;
1245 } /* optimize_phi */
1248 * walker, do the optimizations
1250 static void do_load_store_optimize(ir_node *n, void *env) {
1251 walk_env_t *wenv = env;
1253 switch (get_irn_opcode(n)) {
1256 wenv->changes |= optimize_load(n);
1260 wenv->changes |= optimize_store(n);
1264 wenv->changes |= optimize_phi(n, wenv);
1270 } /* do_load_store_optimize */
1273 typedef struct scc {
1274 ir_node *head; /**< the head of the list */
1277 /** A node entry. */
1278 typedef struct node_entry {
1279 unsigned DFSnum; /**< the DFS number of this node */
1280 unsigned low; /**< the low number of this node */
1281 ir_node *header; /**< the header of this node */
1282 int in_stack; /**< flag, set if the node is on the stack */
1283 ir_node *next; /**< link to the next node the the same scc */
1284 scc *pscc; /**< the scc of this node */
1285 unsigned POnum; /**< the post order number for blocks */
1288 /** A loop entry. */
1289 typedef struct loop_env {
1290 ir_phase ph; /**< the phase object */
1291 ir_node **stack; /**< the node stack */
1292 int tos; /**< tos index */
1293 unsigned nextDFSnum; /**< the current DFS number */
1294 unsigned POnum; /**< current post order number */
1296 unsigned changes; /**< a bitmask of graph changes */
1300 * Gets the node_entry of a node
1302 static node_entry *get_irn_ne(ir_node *irn, loop_env *env) {
1303 ir_phase *ph = &env->ph;
1304 node_entry *e = phase_get_irn_data(&env->ph, irn);
1307 e = phase_alloc(ph, sizeof(*e));
1308 memset(e, 0, sizeof(*e));
1309 phase_set_irn_data(ph, irn, e);
1315 * Push a node onto the stack.
1317 * @param env the loop environment
1318 * @param n the node to push
1320 static void push(loop_env *env, ir_node *n) {
1323 if (env->tos == ARR_LEN(env->stack)) {
1324 int nlen = ARR_LEN(env->stack) * 2;
1325 ARR_RESIZE(ir_node *, env->stack, nlen);
1327 env->stack[env->tos++] = n;
1328 e = get_irn_ne(n, env);
1333 * pop a node from the stack
1335 * @param env the loop environment
1337 * @return The topmost node
1339 static ir_node *pop(loop_env *env) {
1340 ir_node *n = env->stack[--env->tos];
1341 node_entry *e = get_irn_ne(n, env);
1348 * Check if irn is a region constant.
1349 * The block or irn must strictly dominate the header block.
1351 * @param irn the node to check
1352 * @param header_block the header block of the induction variable
1354 static int is_rc(ir_node *irn, ir_node *header_block) {
1355 ir_node *block = get_nodes_block(irn);
1357 return (block != header_block) && block_dominates(block, header_block);
1360 typedef struct phi_entry phi_entry;
1362 ir_node *phi; /**< A phi with a region const memory. */
1363 int pos; /**< The position of the region const memory */
1364 ir_node *load; /**< the newly created load for this phi */
1369 * Move loops out of loops if possible.
1371 * @param pscc the loop described by an SCC
1372 * @param env the loop environment
1374 static void move_loads_out_of_loops(scc *pscc, loop_env *env) {
1375 ir_node *phi, *load, *next, *other, *next_other;
1378 phi_entry *phi_list = NULL;
1380 /* collect all outer memories */
1381 for (phi = pscc->head; phi != NULL; phi = next) {
1382 node_entry *ne = get_irn_ne(phi, env);
1385 /* check all memory Phi's */
1389 assert(get_irn_mode(phi) == mode_M && "DFS geturn non-memory Phi");
1391 for (j = get_irn_arity(phi) - 1; j >= 0; --j) {
1392 ir_node *pred = get_irn_n(phi, j);
1393 node_entry *pe = get_irn_ne(pred, env);
1395 if (pe->pscc != ne->pscc) {
1396 /* not in the same SCC, is region const */
1397 phi_entry *pe = phase_alloc(&env->ph, sizeof(*pe));
1401 pe->next = phi_list;
1406 /* no Phis no fun */
1407 assert(phi_list != NULL && "DFS found a loop without Phi");
1409 for (load = pscc->head; load; load = next) {
1411 node_entry *ne = get_irn_ne(load, env);
1414 if (is_Load(load)) {
1415 ldst_info_t *info = get_irn_link(load);
1416 ir_node *ptr = get_Load_ptr(load);
1418 /* for now, we cannot handle Loads with exceptions */
1419 if (info->projs[pn_Load_res] == NULL || info->projs[pn_Load_X_regular] != NULL || info->projs[pn_Load_X_except] != NULL)
1422 /* for now, we can only handle Load(Global) */
1423 if (! is_Global(ptr))
1425 ent = get_Global_entity(ptr);
1426 load_mode = get_Load_mode(load);
1427 for (other = pscc->head; other != NULL; other = next_other) {
1428 node_entry *ne = get_irn_ne(other, env);
1429 next_other = ne->next;
1431 if (is_Store(other)) {
1432 ir_alias_relation rel = get_alias_relation(
1434 get_Store_ptr(other),
1435 get_irn_mode(get_Store_value(other)),
1437 /* if the might be an alias, we cannot pass this Store */
1438 if (rel != ir_no_alias)
1441 /* only pure Calls are allowed here, so ignore them */
1443 if (other == NULL) {
1448 /* for now, we cannot handle more than one input */
1449 if (phi_list->next != NULL)
1452 /* yep, no aliasing Store found, Load can be moved */
1453 DB((dbg, LEVEL_1, " Found a Load that could be moved: %+F\n", load));
1455 db = get_irn_dbg_info(load);
1456 for (pe = phi_list; pe != NULL; pe = pe->next) {
1458 ir_node *phi = pe->phi;
1459 ir_node *blk = get_nodes_block(phi);
1460 ir_node *pred = get_Block_cfgpred_block(blk, pos);
1463 pe->load = irn = new_rd_Load(db, current_ir_graph, pred, get_Phi_pred(phi, pos), ptr, load_mode);
1464 ninfo = get_ldst_info(irn, phase_obst(&env->ph));
1466 ninfo->projs[pn_Load_M] = mem = new_r_Proj(current_ir_graph, pred, irn, mode_M, pn_Load_M);
1467 set_Phi_pred(phi, pos, mem);
1469 ninfo->projs[pn_Load_res] = new_r_Proj(current_ir_graph, pred, irn, load_mode, pn_Load_res);
1471 DB((dbg, LEVEL_1, " Created %+F in %+F\n", irn, pred));
1474 /* now kill the old Load */
1475 exchange(info->projs[pn_Load_M], get_Load_mem(load));
1476 exchange(info->projs[pn_Load_res], ninfo->projs[pn_Load_res]);
1478 env->changes |= DF_CHANGED;
1482 } /* move_loads_out_of_loops */
1485 * Process a loop SCC.
1487 * @param pscc the SCC
1488 * @param env the loop environment
1490 static void process_loop(scc *pscc, loop_env *env) {
1491 ir_node *irn, *next, *header = NULL;
1492 node_entry *b, *h = NULL;
1493 int j, only_phi, num_outside, process = 0;
1496 /* find the header block for this scc */
1497 for (irn = pscc->head; irn; irn = next) {
1498 node_entry *e = get_irn_ne(irn, env);
1499 ir_node *block = get_nodes_block(irn);
1502 b = get_irn_ne(block, env);
1505 if (h->POnum < b->POnum) {
1516 /* check if this scc contains only Phi, Loads or Stores nodes */
1520 for (irn = pscc->head; irn; irn = next) {
1521 node_entry *e = get_irn_ne(irn, env);
1524 switch (get_irn_opcode(irn)) {
1526 if (is_Call_pure(irn)) {
1527 /* pure calls can be treated like loads */
1531 /* non-pure calls must be handle like may-alias Stores */
1534 /* cannot handle CopyB yet */
1538 if (get_Load_volatility(irn) == volatility_is_volatile) {
1539 /* cannot handle loops with volatile Loads */
1545 if (get_Store_volatility(irn) == volatility_is_volatile) {
1546 /* cannot handle loops with volatile Stores */
1555 for (j = get_irn_arity(irn) - 1; j >= 0; --j) {
1556 ir_node *pred = get_irn_n(irn, j);
1557 node_entry *pe = get_irn_ne(pred, env);
1559 if (pe->pscc != e->pscc) {
1560 /* not in the same SCC, must be a region const */
1561 if (! is_rc(pred, header)) {
1562 /* not a memory loop */
1568 } else if (out_rc != pred) {
1579 /* found a memory loop */
1580 DB((dbg, LEVEL_2, " Found a memory loop:\n "));
1581 if (only_phi && num_outside == 1) {
1582 /* a phi cycle with only one real predecessor can be collapsed */
1583 DB((dbg, LEVEL_2, " Found an USELESS Phi cycle:\n "));
1585 for (irn = pscc->head; irn; irn = next) {
1586 node_entry *e = get_irn_ne(irn, env);
1589 exchange(irn, out_rc);
1591 env->changes |= DF_CHANGED;
1595 /* set the header for every node in this scc */
1596 for (irn = pscc->head; irn; irn = next) {
1597 node_entry *e = get_irn_ne(irn, env);
1600 DB((dbg, LEVEL_2, " %+F,", irn));
1602 DB((dbg, LEVEL_2, "\n"));
1604 move_loads_out_of_loops(pscc, env);
1608 } /* process_loop */
1613 * @param pscc the SCC
1614 * @param env the loop environment
1616 static void process_scc(scc *pscc, loop_env *env) {
1617 ir_node *head = pscc->head;
1618 node_entry *e = get_irn_ne(head, env);
1620 #ifdef DEBUG_libfirm
1622 ir_node *irn, *next;
1624 DB((dbg, LEVEL_4, " SCC at %p:\n ", pscc));
1625 for (irn = pscc->head; irn; irn = next) {
1626 node_entry *e = get_irn_ne(irn, env);
1630 DB((dbg, LEVEL_4, " %+F,", irn));
1632 DB((dbg, LEVEL_4, "\n"));
1636 if (e->next != NULL) {
1637 /* this SCC has more than one member */
1638 process_loop(pscc, env);
1643 * Do Tarjan's SCC algorithm and drive load/store optimization.
1645 * @param irn start at this node
1646 * @param env the loop environment
1648 static void dfs(ir_node *irn, loop_env *env)
1651 node_entry *node = get_irn_ne(irn, env);
1653 mark_irn_visited(irn);
1655 node->DFSnum = env->nextDFSnum++;
1656 node->low = node->DFSnum;
1660 if (is_Phi(irn) || is_Sync(irn)) {
1661 n = get_irn_arity(irn);
1662 for (i = 0; i < n; ++i) {
1663 ir_node *pred = get_irn_n(irn, i);
1664 node_entry *o = get_irn_ne(pred, env);
1666 if (irn_not_visited(pred)) {
1668 node->low = MIN(node->low, o->low);
1670 if (o->DFSnum < node->DFSnum && o->in_stack)
1671 node->low = MIN(o->DFSnum, node->low);
1673 } else if (is_fragile_op(irn)) {
1674 ir_node *pred = get_fragile_op_mem(irn);
1675 node_entry *o = get_irn_ne(pred, env);
1677 if (irn_not_visited(pred)) {
1679 node->low = MIN(node->low, o->low);
1681 if (o->DFSnum < node->DFSnum && o->in_stack)
1682 node->low = MIN(o->DFSnum, node->low);
1683 } else if (is_Proj(irn)) {
1684 ir_node *pred = get_Proj_pred(irn);
1685 node_entry *o = get_irn_ne(pred, env);
1687 if (irn_not_visited(pred)) {
1689 node->low = MIN(node->low, o->low);
1691 if (o->DFSnum < node->DFSnum && o->in_stack)
1692 node->low = MIN(o->DFSnum, node->low);
1695 /* IGNORE predecessors */
1698 if (node->low == node->DFSnum) {
1699 scc *pscc = phase_alloc(&env->ph, sizeof(*pscc));
1707 e = get_irn_ne(x, env);
1709 e->next = pscc->head;
1713 process_scc(pscc, env);
1718 * Do the DFS on the memory edges a graph.
1720 * @param irg the graph to process
1721 * @param env the loop environment
1723 static void do_dfs(ir_graph *irg, loop_env *env) {
1724 ir_graph *rem = current_ir_graph;
1725 ir_node *endblk, *end;
1728 current_ir_graph = irg;
1729 inc_irg_visited(irg);
1731 /* visit all memory nodes */
1732 endblk = get_irg_end_block(irg);
1733 for (i = get_Block_n_cfgpreds(endblk) - 1; i >= 0; --i) {
1734 ir_node *pred = get_Block_cfgpred(endblk, i);
1736 pred = skip_Proj(pred);
1737 if (is_Return(pred))
1738 dfs(get_Return_mem(pred), env);
1739 else if (is_Raise(pred))
1740 dfs(get_Raise_mem(pred), env);
1741 else if (is_fragile_op(pred))
1742 dfs(get_fragile_op_mem(pred), env);
1744 assert(0 && "Unknown EndBlock predecessor");
1748 /* visit the keep-alives */
1749 end = get_irg_end(irg);
1750 for (i = get_End_n_keepalives(end) - 1; i >= 0; --i) {
1751 ir_node *ka = get_End_keepalive(end, i);
1753 if (is_Phi(ka) && irn_not_visited(ka))
1756 current_ir_graph = rem;
1760 * Initialize new phase data. We do this always explicit, so return NULL here
1762 static void *init_loop_data(ir_phase *ph, const ir_node *irn, void *data) {
1767 } /* init_loop_data */
1770 * Optimize Loads/Stores in loops.
1772 * @param irg the graph
1774 static int optimize_loops(ir_graph *irg) {
1777 env.stack = NEW_ARR_F(ir_node *, 128);
1782 phase_init(&env.ph, "ldstopt", irg, PHASE_DEFAULT_GROWTH, init_loop_data, NULL);
1784 /* calculate the SCC's and drive loop optimization. */
1787 DEL_ARR_F(env.stack);
1788 phase_free(&env.ph);
1791 } /* optimize_loops */
1794 * do the load store optimization
1796 void optimize_load_store(ir_graph *irg) {
1799 FIRM_DBG_REGISTER(dbg, "firm.opt.ldstopt");
1801 assert(get_irg_phase_state(irg) != phase_building);
1802 assert(get_irg_pinned(irg) != op_pin_state_floats &&
1803 "LoadStore optimization needs pinned graph");
1805 /* we need landing pads */
1806 remove_critical_cf_edges(irg);
1810 /* for Phi optimization post-dominators are needed ... */
1811 assure_postdoms(irg);
1813 if (get_opt_alias_analysis()) {
1814 assure_irg_address_taken_computed(irg);
1815 assure_irp_globals_address_taken_computed();
1818 obstack_init(&env.obst);
1821 /* init the links, then collect Loads/Stores/Proj's in lists */
1823 irg_walk_graph(irg, firm_clear_link, collect_nodes, &env);
1825 /* now we have collected enough information, optimize */
1826 irg_walk_graph(irg, NULL, do_load_store_optimize, &env);
1828 env.changes |= optimize_loops(irg);
1830 obstack_free(&env.obst, NULL);
1832 /* Handle graph state */
1834 set_irg_outs_inconsistent(irg);
1837 if (env.changes & CF_CHANGED) {
1838 /* is this really needed: Yes, control flow changed, block might
1839 have Bad() predecessors. */
1840 set_irg_doms_inconsistent(irg);
1842 } /* optimize_load_store */