X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fopt%2Fldstopt.c;h=ce87077cee99c0da417db2cbf5a88973f94542c6;hb=6d3394bf4a3f3c2868634837d70389115cdca589;hp=6ecabaae64760be85a57eeefde67a62edda6e09d;hpb=df8a7c32fa1de6a6c64b69f01e2748efb7ce1c11;p=libfirm diff --git a/ir/opt/ldstopt.c b/ir/opt/ldstopt.c index 6ecabaae6..ce87077ce 100644 --- a/ir/opt/ldstopt.c +++ b/ir/opt/ldstopt.c @@ -23,12 +23,9 @@ * @author Michael Beck * @version $Id$ */ -#ifdef HAVE_CONFIG_H -# include "config.h" -#endif +#include "config.h" #include -#include #include "iroptimize.h" #include "irnode_t.h" @@ -43,13 +40,12 @@ #include "dbginfo_t.h" #include "iropt_dbg.h" #include "irflag_t.h" -#include "array.h" +#include "array_t.h" #include "irhooks.h" #include "iredges.h" #include "irtools.h" #include "opt_polymorphy.h" #include "irmemory.h" -#include "xmalloc.h" #include "irphase_t.h" #include "irgopt.h" #include "debug.h" @@ -258,10 +254,7 @@ static ir_entity *find_constant_entity(ir_node *ptr) { for (;;) { if (is_SymConst(ptr) && get_SymConst_kind(ptr) == symconst_addr_ent) { - ir_entity *ent = get_SymConst_entity(ptr); - if (variability_constant == get_entity_variability(ent)) - return ent; - return NULL; + return get_SymConst_entity(ptr); } else if (is_Sel(ptr)) { ir_entity *ent = get_Sel_entity(ptr); ir_type *tp = get_entity_owner(ent); @@ -365,7 +358,7 @@ static compound_graph_path *rec_get_accessed_path(ir_node *ptr, int depth) { if (is_SymConst(ptr)) { /* a SymConst. If the depth is 0, this is an access to a global * entity and we don't need a component path, else we know - * at least it's length. + * at least its length. */ assert(get_SymConst_kind(ptr) == symconst_addr_ent); root = get_SymConst_entity(ptr); @@ -387,11 +380,12 @@ static compound_graph_path *rec_get_accessed_path(ir_node *ptr, int depth) { set_compound_graph_path_array_index(res, pos, get_Sel_array_index_long(ptr, 0)); } } else if (is_Add(ptr)) { - ir_node *l = get_Add_left(ptr); - ir_node *r = get_Add_right(ptr); - ir_mode *mode; + ir_node *l = get_Add_left(ptr); + ir_node *r = get_Add_right(ptr); + ir_mode *mode = get_irn_mode(ptr); + tarval *tmp; - if (is_Const(r)) { + if (is_Const(r) && get_irn_mode(l) == mode) { ptr = l; tv = get_Const_tarval(r); } else { @@ -400,6 +394,7 @@ static compound_graph_path *rec_get_accessed_path(ir_node *ptr, int depth) { } ptr_arith: mode = get_tarval_mode(tv); + tmp = tv; /* ptr must be a Sel or a SymConst, this was checked in find_constant_entity() */ if (is_Sel(ptr)) { @@ -420,10 +415,10 @@ ptr_arith: size = get_type_size_bytes(get_entity_type(ent)); sz = new_tarval_from_long(size, mode); - tv_index = tarval_div(tv, sz); - tv = tarval_mod(tv, sz); + tv_index = tarval_div(tmp, sz); + tmp = tarval_mod(tmp, sz); - if (tv_index == tarval_bad || tv == tarval_bad) + if (tv_index == tarval_bad || tmp == tarval_bad) return NULL; assert(get_array_n_dimensions(tp) == 1 && "multiarrays not implemented"); @@ -443,7 +438,7 @@ ptr_arith: /* ok, bounds check finished */ ++idx; } - if (! tarval_is_null(tv)) { + if (! tarval_is_null(tmp)) { /* access to some struct/union member */ return NULL; } @@ -501,7 +496,8 @@ ptr_arith: * valid, if the graph is in phase_high and _no_ address computation is used. */ static compound_graph_path *get_accessed_path(ir_node *ptr) { - return rec_get_accessed_path(ptr, 0); + compound_graph_path *gr = rec_get_accessed_path(ptr, 0); + return gr; } /* get_accessed_path */ typedef struct path_entry { @@ -539,7 +535,7 @@ static ir_node *rec_find_compound_ent_value(ir_node *ptr, path_entry *next) { continue; } } - if (p->index >= n) + if (p->index >= (int) n) return NULL; initializer = get_initializer_compound_value(initializer, p->index); @@ -692,7 +688,7 @@ static ir_node *find_compound_ent_value(ir_node *ptr) { static void reduce_adr_usage(ir_node *ptr); /** - * Update a Load that may lost it's usage. + * Update a Load that may have lost its users. */ static void handle_load_update(ir_node *load) { ldst_info_t *info = get_irn_link(load); @@ -705,7 +701,7 @@ static void handle_load_update(ir_node *load) { ir_node *ptr = get_Load_ptr(load); ir_node *mem = get_Load_mem(load); - /* a Load which value is neither used nor exception checked, remove it */ + /* a Load whose value is neither used nor exception checked, remove it */ exchange(info->projs[pn_Load_M], mem); if (info->projs[pn_Load_X_regular]) exchange(info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load))); @@ -715,7 +711,7 @@ static void handle_load_update(ir_node *load) { } /* handle_load_update */ /** - * A Use of an address node is vanished. Check if this was a Proj + * A use of an address node has vanished. Check if this was a Proj * node and update the counters. */ static void reduce_adr_usage(ir_node *ptr) { @@ -728,7 +724,7 @@ static void reduce_adr_usage(ir_node *ptr) { ldst_info_t *info = get_irn_link(pred); info->projs[get_Proj_proj(ptr)] = NULL; - /* this node lost it's result proj, handle that */ + /* this node lost its result proj, handle that */ handle_load_update(pred); } } @@ -773,41 +769,77 @@ static unsigned is_Call_pure(ir_node *call) { return (prop & (mtp_property_const|mtp_property_pure)) != 0; } /* is_Call_pure */ -static ir_node *get_base_ptr(ir_node *ptr) +static ir_node *get_base_and_offset(ir_node *ptr, long *pOffset) { - while (is_Add(ptr) && is_Const(get_Add_right(ptr))) { - ptr = get_Add_left(ptr); - } + ir_mode *mode = get_irn_mode(ptr); + long offset = 0; - return ptr; -} - -static long get_base_offset(ir_node *ptr) -{ /* TODO: long might not be enough, we should probably use some tarval thingy... */ - long offset = 0; - while (is_Add(ptr)) { - ir_node *right = get_Add_right(ptr); - if (!is_Const(right)) + for (;;) { + if (is_Add(ptr)) { + ir_node *l = get_Add_left(ptr); + ir_node *r = get_Add_right(ptr); + + if (get_irn_mode(l) != mode || !is_Const(r)) + break; + + offset += get_tarval_long(get_Const_tarval(r)); + ptr = l; + } else if (is_Sub(ptr)) { + ir_node *l = get_Sub_left(ptr); + ir_node *r = get_Sub_right(ptr); + + if (get_irn_mode(l) != mode || !is_Const(r)) + break; + + offset -= get_tarval_long(get_Const_tarval(r)); + ptr = l; + } else if (is_Sel(ptr)) { + ir_entity *ent = get_Sel_entity(ptr); + ir_type *tp = get_entity_owner(ent); + + if (is_Array_type(tp)) { + int size; + ir_node *index; + + /* only one dimensional arrays yet */ + if (get_Sel_n_indexs(ptr) != 1) + break; + index = get_Sel_index(ptr, 0); + if (! is_Const(index)) + break; + + tp = get_entity_type(ent); + if (get_type_state(tp) != layout_fixed) + break; + + size = get_type_size_bytes(tp); + offset += size * get_tarval_long(get_Const_tarval(index)); + } else { + if (get_type_state(tp) != layout_fixed) + break; + offset += get_entity_offset(ent); + } + ptr = get_Sel_ptr(ptr); + } else break; - offset += get_tarval_long(get_Const_tarval(right)); - ptr = get_Add_left(ptr); } - return offset; + *pOffset = offset; + return ptr; } -static int try_load_store(ir_node *load, +static int try_load_after_store(ir_node *load, ir_node *load_base_ptr, long load_offset, ir_node *store) { ldst_info_t *info; ir_node *store_ptr = get_Store_ptr(store); - ir_node *store_base_ptr = get_base_ptr(store_ptr); + long store_offset; + ir_node *store_base_ptr = get_base_and_offset(store_ptr, &store_offset); ir_node *store_value; ir_mode *store_mode; ir_node *load_ptr; ir_mode *load_mode; - long store_offset = get_base_offset(store_ptr); long load_mode_len; long store_mode_len; long delta; @@ -816,35 +848,41 @@ static int try_load_store(ir_node *load, if (load_base_ptr != store_base_ptr) return 0; - load_mode = get_Load_mode(load); - load_mode_len = get_mode_size_bytes(load_mode); + load_mode = get_Load_mode(load); + load_mode_len = get_mode_size_bytes(load_mode); store_mode = get_irn_mode(get_Store_value(store)); store_mode_len = get_mode_size_bytes(store_mode); + delta = load_offset - store_offset; + store_value = get_Store_value(store); - delta = load_offset - store_offset; - if (delta < 0 || delta >= store_mode_len) - return 0; + if (delta != 0 || store_mode != load_mode) { + if (delta < 0 || delta + load_mode_len > store_mode_len) + return 0; - if (store_mode_len - delta > load_mode_len) - return 0; + if (get_mode_arithmetic(store_mode) != irma_twos_complement || + get_mode_arithmetic(load_mode) != irma_twos_complement) + return 0; - store_value = get_Store_value(store); - DBG_OPT_RAW(load, store_value); - /* produce a shift to adjust offset delta */ - if (delta > 0) { - ir_node *cnst = new_r_Const_long(current_ir_graph, - get_irg_start_block(current_ir_graph), mode_Iu, delta * 8); - store_value = new_r_Shr(current_ir_graph, get_nodes_block(load), - store_value, cnst, store_mode); - } + /* produce a shift to adjust offset delta */ + if (delta > 0) { + ir_node *cnst; - /* add an convert if needed */ - if (store_mode != load_mode) { - store_value = new_r_Conv(current_ir_graph, get_nodes_block(load), - store_value, load_mode); + /* FIXME: only true for little endian */ + cnst = new_Const_long(mode_Iu, delta * 8); + store_value = new_r_Shr(current_ir_graph, get_nodes_block(load), + store_value, cnst, store_mode); + } + + /* add an convert if needed */ + if (store_mode != load_mode) { + store_value = new_r_Conv(current_ir_graph, get_nodes_block(load), + store_value, load_mode); + } } + DBG_OPT_RAW(load, store_value); + info = get_irn_link(load); if (info->projs[pn_Load_M]) exchange(info->projs[pn_Load_M], get_Load_mem(load)); @@ -880,14 +918,12 @@ static int try_load_store(ir_node *load, * INC_MASTER() must be called before dive into */ static unsigned follow_Mem_chain(ir_node *load, ir_node *curr) { - unsigned res = 0; + unsigned res = 0; ldst_info_t *info = get_irn_link(load); - ir_node *pred; - ir_node *ptr = get_Load_ptr(load); - ir_node *mem = get_Load_mem(load); - ir_mode *load_mode = get_Load_mode(load); - ir_node *base_ptr = get_base_ptr(ptr); - long load_offset = get_base_offset(ptr); + ir_node *pred; + ir_node *ptr = get_Load_ptr(load); + ir_node *mem = get_Load_mem(load); + ir_mode *load_mode = get_Load_mode(load); for (pred = curr; load != pred; ) { ldst_info_t *pred_info = get_irn_link(pred); @@ -909,11 +945,12 @@ static unsigned follow_Mem_chain(ir_node *load, ir_node *curr) { && info->projs[pn_Load_X_except] == NULL) || get_nodes_MacroBlock(load) == get_nodes_MacroBlock(pred))) { - int changes - = try_load_store(load, base_ptr, load_offset, pred); - if (changes != 0) { + long load_offset; + ir_node *base_ptr = get_base_and_offset(ptr, &load_offset); + int changes = try_load_after_store(load, base_ptr, load_offset, pred); + + if (changes != 0) return res | changes; - } } else if (is_Load(pred) && get_Load_ptr(pred) == ptr && can_use_stored_value(get_Load_mode(pred), load_mode)) { /* @@ -1047,9 +1084,10 @@ ir_node *can_replace_load_by_const(const ir_node *load, ir_node *c) { static unsigned optimize_load(ir_node *load) { ldst_info_t *info = get_irn_link(load); - ir_node *mem, *ptr, *value; - ir_entity *ent; - unsigned res = 0; + ir_node *mem, *ptr, *value; + ir_entity *ent; + long dummy; + unsigned res = 0; /* do NOT touch volatile loads for now */ if (get_Load_volatility(load) == volatility_is_volatile) @@ -1087,7 +1125,7 @@ static unsigned optimize_load(ir_node *load) } /* The mem of the Load. Must still be returned after optimization. */ - mem = get_Load_mem(load); + mem = get_Load_mem(load); if (! info->projs[pn_Load_res] && ! info->projs[pn_Load_X_except]) { /* a Load which value is neither used nor exception checked, remove it */ @@ -1110,53 +1148,48 @@ static unsigned optimize_load(ir_node *load) value = NULL; /* check if we can determine the entity that will be loaded */ ent = find_constant_entity(ptr); - if (ent != NULL) { - if ((allocation_static == get_entity_allocation(ent)) && - (visibility_external_allocated != get_entity_visibility(ent))) { - /* a static allocation that is not external: there should be NO exception - * when loading even if we cannot replace the load itself. */ - - /* no exception, clear the info field as it might be checked later again */ - if (info->projs[pn_Load_X_except]) { - exchange(info->projs[pn_Load_X_except], new_Bad()); - info->projs[pn_Load_X_except] = NULL; - res |= CF_CHANGED; - } - if (info->projs[pn_Load_X_regular]) { - exchange(info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load))); - info->projs[pn_Load_X_regular] = NULL; - res |= CF_CHANGED; - } + if (ent != NULL && + allocation_static == get_entity_allocation(ent) && + visibility_external_allocated != get_entity_visibility(ent)) { + /* a static allocation that is not external: there should be NO exception + * when loading even if we cannot replace the load itself. */ + + /* no exception, clear the info field as it might be checked later again */ + if (info->projs[pn_Load_X_except]) { + exchange(info->projs[pn_Load_X_except], new_Bad()); + info->projs[pn_Load_X_except] = NULL; + res |= CF_CHANGED; + } + if (info->projs[pn_Load_X_regular]) { + exchange(info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load))); + info->projs[pn_Load_X_regular] = NULL; + res |= CF_CHANGED; + } - if (variability_constant == get_entity_variability(ent)) { - if (is_atomic_entity(ent)) { - /* Might not be atomic after - lowering of Sels. In this - case we could also load, but - it's more complicated. */ - /* more simpler case: we load the content of a constant value: - * replace it by the constant itself - */ - value = get_atomic_ent_value(ent); - } else { - if (ent->has_initializer) { - /* new style initializer */ - value = find_compound_ent_value(ptr); - } else { - /* old style initializer */ - compound_graph_path *path = get_accessed_path(ptr); - - if (path != NULL) { - assert(is_proper_compound_graph_path(path, get_compound_graph_path_length(path)-1)); - - value = get_compound_ent_value_by_path(ent, path); - free_compound_graph_path(path); - } - } + if (variability_constant == get_entity_variability(ent)) { + if (is_atomic_entity(ent)) { + /* Might not be atomic after lowering of Sels. In this case we + * could also load, but it's more complicated. */ + /* more simpler case: we load the content of a constant value: + * replace it by the constant itself */ + value = get_atomic_ent_value(ent); + } else if (ent->has_initializer) { + /* new style initializer */ + value = find_compound_ent_value(ptr); + } else { + /* old style initializer */ + compound_graph_path *path = get_accessed_path(ptr); + + if (path != NULL) { + assert(is_proper_compound_graph_path(path, get_compound_graph_path_length(path)-1)); + + value = get_compound_ent_value_by_path(ent, path); + DB((dbg, LEVEL_1, " Constant access at %F%F resulted in %+F\n", ent, path, value)); + free_compound_graph_path(path); } - if (value != NULL) - value = can_replace_load_by_const(load, value); } + if (value != NULL) + value = can_replace_load_by_const(load, value); } } } @@ -1186,8 +1219,8 @@ static unsigned optimize_load(ir_node *load) } /* Check, if the address of this load is used more than once. - * If not, this load cannot be removed in any case. */ - if (get_irn_n_uses(ptr) <= 1 && get_irn_n_uses(get_base_ptr(ptr)) <= 1) + * If not, more load cannot be removed in any case. */ + if (get_irn_n_uses(ptr) <= 1 && get_irn_n_uses(get_base_and_offset(ptr, &dummy)) <= 1) return res; /* @@ -1211,6 +1244,21 @@ static int is_completely_overwritten(ir_mode *old_mode, ir_mode *new_mode) return get_mode_size_bits(new_mode) >= get_mode_size_bits(old_mode); } /* is_completely_overwritten */ +/** + * Check whether small is a part of large (starting at same address). + */ +static int is_partially_same(ir_node *small, ir_node *large) +{ + ir_mode *sm = get_irn_mode(small); + ir_mode *lm = get_irn_mode(large); + + /* FIXME: Check endianness */ + return is_Conv(small) && get_Conv_op(small) == large + && get_mode_size_bytes(sm) < get_mode_size_bytes(lm) + && get_mode_arithmetic(sm) == irma_twos_complement + && get_mode_arithmetic(lm) == irma_twos_complement; +} /* is_partially_same */ + /** * follow the memory chain as long as there are only Loads and alias free Stores. * @@ -1235,24 +1283,55 @@ static unsigned follow_Mem_chain_for_Store(ir_node *store, ir_node *curr) { * if the pointers are identical, they refer to the same object. * This is only true in strong typed languages, not is C were the following * is possible *(ir_type1 *)p = a; *(ir_type2 *)p = b ... - * However, if the mode that is written have a bigger or equal size the the old - * one, the old value is completely overwritten and can be killed ... + * However, if the size of the mode that is written is bigger or equal the + * size of the old one, the old value is completely overwritten and can be + * killed ... */ if (is_Store(pred) && get_Store_ptr(pred) == ptr && - get_nodes_MacroBlock(pred) == mblk && - is_completely_overwritten(get_irn_mode(get_Store_value(pred)), mode)) { + get_nodes_MacroBlock(pred) == mblk) { /* * a Store after a Store in the same MacroBlock -- a write after write. - * We may remove the first Store, if it does not have an exception handler. + */ + + /* + * We may remove the first Store, if the old value is completely + * overwritten or the old value is a part of the new value, + * and if it does not have an exception handler. * * TODO: What, if both have the same exception handler ??? */ - if (get_Store_volatility(pred) != volatility_is_volatile && !pred_info->projs[pn_Store_X_except]) { - DBG_OPT_WAW(pred, store); - exchange(pred_info->projs[pn_Store_M], get_Store_mem(pred)); - kill_node(pred); - reduce_adr_usage(ptr); - return DF_CHANGED; + if (get_Store_volatility(pred) != volatility_is_volatile + && !pred_info->projs[pn_Store_X_except]) { + ir_node *predvalue = get_Store_value(pred); + ir_mode *predmode = get_irn_mode(predvalue); + + if(is_completely_overwritten(predmode, mode) + || is_partially_same(predvalue, value)) { + DBG_OPT_WAW(pred, store); + exchange(pred_info->projs[pn_Store_M], get_Store_mem(pred)); + kill_node(pred); + reduce_adr_usage(ptr); + return DF_CHANGED; + } + } + + /* + * We may remove the Store, if the old value already contains + * the new value, and if it does not have an exception handler. + * + * TODO: What, if both have the same exception handler ??? + */ + if (get_Store_volatility(store) != volatility_is_volatile + && !info->projs[pn_Store_X_except]) { + ir_node *predvalue = get_Store_value(pred); + + if(is_partially_same(value, predvalue)) { + DBG_OPT_WAW(pred, store); + exchange(info->projs[pn_Store_M], mem); + kill_node(store); + reduce_adr_usage(ptr); + return DF_CHANGED; + } } } else if (is_Load(pred) && get_Load_ptr(pred) == ptr && value == pred_info->projs[pn_Load_res]) { @@ -1314,18 +1393,61 @@ static unsigned follow_Mem_chain_for_Store(ir_node *store, ir_node *curr) { return res; } /* follow_Mem_chain_for_Store */ +/** find entity used as base for an address calculation */ +static ir_entity *find_entity(ir_node *ptr) +{ + switch(get_irn_opcode(ptr)) { + case iro_SymConst: + return get_SymConst_entity(ptr); + case iro_Sel: { + ir_node *pred = get_Sel_ptr(ptr); + if (get_irg_frame(get_irn_irg(ptr)) == pred) + return get_Sel_entity(ptr); + + return find_entity(pred); + } + case iro_Sub: + case iro_Add: { + ir_node *left = get_binop_left(ptr); + ir_node *right; + if (mode_is_reference(get_irn_mode(left))) + return find_entity(left); + right = get_binop_right(ptr); + if (mode_is_reference(get_irn_mode(right))) + return find_entity(right); + return NULL; + } + default: + return NULL; + } +} + /** * optimize a Store * * @param store the Store node */ static unsigned optimize_store(ir_node *store) { - ir_node *ptr, *mem; + ir_node *ptr; + ir_node *mem; + ir_entity *entity; if (get_Store_volatility(store) == volatility_is_volatile) return 0; - ptr = get_Store_ptr(store); + ptr = get_Store_ptr(store); + entity = find_entity(ptr); + + /* a store to an entity which is never read is unnecessary */ + if (entity != NULL && !(get_entity_usage(entity) & ir_usage_read)) { + ldst_info_t *info = get_irn_link(store); + if (info->projs[pn_Store_X_except] == NULL) { + exchange(info->projs[pn_Store_M], get_Store_mem(store)); + kill_node(store); + reduce_adr_usage(ptr); + return DF_CHANGED; + } + } /* Check, if the address of this Store is used more than once. * If not, this Store cannot be removed in any case. */ @@ -1505,7 +1627,7 @@ static unsigned optimize_phi(ir_node *phi, walk_env_t *wenv) } /* fourth step: create the Store */ - store = new_rd_Store(db, current_ir_graph, block, phiM, ptr, phiD); + store = new_rd_Store(db, current_ir_graph, block, phiM, ptr, phiD, 0); #ifdef DO_CACHEOPT co_set_irn_name(store, co_get_irn_ident(old_store)); #endif @@ -1756,7 +1878,7 @@ static void move_loads_out_of_loops(scc *pscc, loop_env *env) { ir_node *pred = get_Block_cfgpred_block(blk, pos); ir_node *irn, *mem; - pe->load = irn = new_rd_Load(db, current_ir_graph, pred, get_Phi_pred(phi, pos), ptr, load_mode); + pe->load = irn = new_rd_Load(db, current_ir_graph, pred, get_Phi_pred(phi, pos), ptr, load_mode, 0); ninfo = get_ldst_info(irn, phase_obst(&env->ph)); ninfo->projs[pn_Load_M] = mem = new_r_Proj(current_ir_graph, pred, irn, mode_M, pn_Load_M); @@ -1959,7 +2081,7 @@ static void dfs(ir_node *irn, loop_env *env) ir_node *pred = get_irn_n(irn, i); node_entry *o = get_irn_ne(pred, env); - if (irn_not_visited(pred)) { + if (!irn_visited(pred)) { dfs(pred, env); node->low = MIN(node->low, o->low); } @@ -1970,7 +2092,7 @@ static void dfs(ir_node *irn, loop_env *env) ir_node *pred = get_fragile_op_mem(irn); node_entry *o = get_irn_ne(pred, env); - if (irn_not_visited(pred)) { + if (!irn_visited(pred)) { dfs(pred, env); node->low = MIN(node->low, o->low); } @@ -1980,7 +2102,7 @@ static void dfs(ir_node *irn, loop_env *env) ir_node *pred = get_Proj_pred(irn); node_entry *o = get_irn_ne(pred, env); - if (irn_not_visited(pred)) { + if (!irn_visited(pred)) { dfs(pred, env); node->low = MIN(node->low, o->low); } @@ -2046,7 +2168,7 @@ static void do_dfs(ir_graph *irg, loop_env *env) { for (i = get_End_n_keepalives(end) - 1; i >= 0; --i) { ir_node *ka = get_End_keepalive(end, i); - if (is_Phi(ka) && irn_not_visited(ka)) + if (is_Phi(ka) && !irn_visited(ka)) dfs(ka, env); } current_ir_graph = rem; @@ -2089,7 +2211,7 @@ static int optimize_loops(ir_graph *irg) { /* * do the load store optimization */ -void optimize_load_store(ir_graph *irg) { +int optimize_load_store(ir_graph *irg) { walk_env_t env; FIRM_DBG_REGISTER(dbg, "firm.opt.ldstopt"); @@ -2107,8 +2229,8 @@ void optimize_load_store(ir_graph *irg) { assure_postdoms(irg); if (get_opt_alias_analysis()) { - assure_irg_address_taken_computed(irg); - assure_irp_globals_address_taken_computed(); + assure_irg_entity_usage_computed(irg); + assure_irp_globals_entity_usage_computed(); } obstack_init(&env.obst); @@ -2128,6 +2250,7 @@ void optimize_load_store(ir_graph *irg) { /* Handle graph state */ if (env.changes) { set_irg_outs_inconsistent(irg); + set_irg_entity_usage_state(irg, ir_entity_usage_not_computed); } if (env.changes & CF_CHANGED) { @@ -2135,4 +2258,5 @@ void optimize_load_store(ir_graph *irg) { have Bad() predecessors. */ set_irg_doms_inconsistent(irg); } + return env.changes != 0; } /* optimize_load_store */