X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fopt%2Fopt_ldst.c;h=2e0274b72e911da847b947762f09aff6e3983996;hb=7ad86c1baab2fdceae2aa610bb584b30538cc5c6;hp=dcf7513cc5e92c4cf553cb3bfeb59d4a787b6c74;hpb=ce6161a7e42a48f7422b7babcc64d8ace18e2687;p=libfirm diff --git a/ir/opt/opt_ldst.c b/ir/opt/opt_ldst.c index dcf7513cc..2e0274b72 100644 --- a/ir/opt/opt_ldst.c +++ b/ir/opt/opt_ldst.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved. + * Copyright (C) 1995-2011 University of Karlsruhe. All right reserved. * * This file is part of libFirm. * @@ -22,7 +22,6 @@ * @brief Dataflow driven Load/Store optimizations, uses some ideas from * VanDrunen's LEPRE * @author Michael Beck - * @version $Id$ */ #include "config.h" @@ -38,7 +37,7 @@ #include "irgopt.h" #include "iropt.h" #include "iroptimize.h" -#include "irnodemap.h" +#include "irnodehashmap.h" #include "raw_bitset.h" #include "debug.h" #include "error.h" @@ -90,7 +89,7 @@ struct memop_t { memop_t *next; /**< links to the next memory op in the block in forward order. */ memop_t *prev; /**< links to the previous memory op in the block in forward order. */ unsigned flags; /**< memop flags */ - ir_node *projs[MAX_PROJ]; /**< Projs of this memory op */ + ir_node *projs[MAX_PROJ+1]; /**< Projs of this memory op */ }; /** @@ -115,17 +114,16 @@ struct block_t { * Metadata for this pass. */ typedef struct ldst_env_t { - struct obstack obst; /**< obstack for temporary data */ - ir_nodemap_t adr_map; /**< Map addresses to */ + struct obstack obst; /**< obstack for temporary data */ + ir_nodehashmap_t adr_map; /**< Map addresses to */ block_t *forward; /**< Inverse post-order list of all blocks Start->End */ block_t *backward; /**< Inverse post-order list of all blocks End->Start */ - ir_node *start_bl; /**< start block of the current graph */ ir_node *end_bl; /**< end block of the current graph */ unsigned *curr_set; /**< current set of addresses */ memop_t **curr_id_2_memop; /**< current map of address ids to memops */ unsigned curr_adr_id; /**< number for address mapping */ unsigned n_mem_ops; /**< number of memory operations (Loads/Stores) */ - unsigned rbs_size; /**< size of all bitsets in bytes */ + size_t rbs_size; /**< size of all bitsets in bytes */ int max_cfg_preds; /**< maximum number of block cfg predecessors */ int changed; /**< Flags for changed graph state */ #ifdef DEBUG_libfirm @@ -170,7 +168,7 @@ static void dump_block_list(ldst_env *env) } DB((dbg, LEVEL_2, "\n}\n\n")); } -} /* dump_block_list */ +} /** * Dumps the current set. @@ -180,9 +178,9 @@ static void dump_block_list(ldst_env *env) */ static void dump_curr(block_t *bl, const char *s) { - unsigned end = env.rbs_size - 1; - unsigned pos; - int i; + size_t end = env.rbs_size - 1; + size_t pos; + int i; DB((dbg, LEVEL_2, "%s[%+F] = {", s, bl->block)); i = 0; @@ -197,7 +195,7 @@ static void dump_curr(block_t *bl, const char *s) i = (i + 1) & 3; } DB((dbg, LEVEL_2, "\n}\n")); -} /* dump_curr */ +} #else static void dump_block_list(ldst_env *env) @@ -217,14 +215,14 @@ static block_t *get_block_entry(const ir_node *block) assert(is_Block(block)); return (block_t*)get_irn_link(block); -} /* get_block_entry */ +} /** Get the memop entry for a memory operation node */ static memop_t *get_irn_memop(const ir_node *irn) { assert(! is_Block(irn)); return (memop_t*)get_irn_link(irn); -} /* get_irn_memop */ +} /** * Walk over the memory edges from definition to users. @@ -237,7 +235,6 @@ static memop_t *get_irn_memop(const ir_node *irn) */ static void walk_memory(ir_node *irn, irg_walk_func *pre, irg_walk_func *post, void *ctx) { - int i; ir_mode *mode; mark_irn_visited(irn); @@ -248,7 +245,7 @@ static void walk_memory(ir_node *irn, irg_walk_func *pre, irg_walk_func *post, v mode = get_irn_mode(irn); if (mode == mode_M) { /* every successor uses memory */ - for (i = get_irn_n_outs(irn) - 1; i >= 0; --i) { + for (unsigned i = get_irn_n_outs(irn); i-- > 0; ) { ir_node *succ = get_irn_out(irn, i); if (! irn_visited(succ)) @@ -256,7 +253,7 @@ static void walk_memory(ir_node *irn, irg_walk_func *pre, irg_walk_func *post, v } } else if (mode == mode_T) { /* only some Proj's uses memory */ - for (i = get_irn_n_outs(irn) - 1; i >= 0; --i) { + for (unsigned i = get_irn_n_outs(irn); i-- > 0; ) { ir_node *proj = get_irn_out(irn, i); if (get_irn_mode(proj) == mode_M && ! irn_visited(proj)) @@ -265,7 +262,7 @@ static void walk_memory(ir_node *irn, irg_walk_func *pre, irg_walk_func *post, v } if (post) post(irn, ctx); -} /* walk_memory */ +} /** * Walks over all memory nodes of a graph. @@ -288,7 +285,7 @@ static void walk_memory_irg(ir_graph *irg, irg_walk_func pre, irg_walk_func post walk_memory(get_irg_initial_mem(irg), pre, post, ctx); ir_free_resources(irg, IR_RESOURCE_IRN_VISITED); -} /* walk_memory_irg */ +} /** * Register an address and allocate a (sparse, 0..n) ID for it. @@ -301,25 +298,21 @@ static unsigned register_address(ir_node *adr) { address_entry *entry; - /* skip Confirms and Casts */ + /* skip Confirms */ restart: if (is_Confirm(adr)) { adr = get_Confirm_value(adr); goto restart; } - if (is_Cast(adr)) { - adr = get_Cast_op(adr); - goto restart; - } - entry = (address_entry*)ir_nodemap_get(&env.adr_map, adr); + entry = ir_nodehashmap_get(address_entry, &env.adr_map, adr); if (entry == NULL) { /* new address */ entry = OALLOC(&env.obst, address_entry); entry->id = env.curr_adr_id++; - ir_nodemap_insert(&env.adr_map, adr, entry); + ir_nodehashmap_insert(&env.adr_map, adr, entry); DB((dbg, LEVEL_3, "ADDRESS %+F has ID %u\n", adr, entry->id)); #ifdef DEBUG_libfirm @@ -327,7 +320,7 @@ restart: #endif } return entry->id; -} /* register_address */ +} /** @@ -343,7 +336,7 @@ static ir_node *phi_translate(ir_node *address, const ir_node *block, int pos) if (is_Phi(address) && get_nodes_block(address) == block) address = get_Phi_pred(address, pos); return address; -} /* phi_translate */ +} /** * Walker: allocate an block entry for every block @@ -390,7 +383,7 @@ static void prepare_blocks(ir_node *irn, void *ctx) (void)register_address(irn); } } -} /* prepare_blocks */ +} /** * Post-Walker, link in all Phi's @@ -403,7 +396,7 @@ static void link_phis(ir_node *irn, void *ctx) ir_node *block = get_nodes_block(irn); add_Block_phi(block, irn); } -} /* link_phis */ +} /** * Block walker: creates the inverse post-order list for the CFG. @@ -424,7 +417,7 @@ static void inverse_post_order(ir_node *block, void *ctx) /* remember the first visited (last in list) entry, needed for later */ if (env.backward == NULL) env.backward = entry; -} /* inverse_post_order */ +} /** * Block walker: create backward links for the memops of a block. @@ -455,7 +448,7 @@ static void collect_backward(ir_node *block, void *ctx) last = op; } entry->memop_backward = last; -} /* collect_backward */ +} /** * Allocate a memop. @@ -484,7 +477,7 @@ static memop_t *alloc_memop(ir_node *irn) if (irn != NULL) set_irn_link(irn, m); return m; -} /* alloc_memop */ +} /** * Create a memop for a Phi-replacement. @@ -506,7 +499,7 @@ static memop_t *clone_memop_phi(memop_t *op, ir_node *phi) set_irn_link(phi, m); return m; -} /* clone_memop_phi */ +} /** * Return the memory properties of a call node. @@ -525,14 +518,14 @@ static unsigned get_Call_memory_properties(ir_node *call) /* try the called entity */ ir_node *ptr = get_Call_ptr(call); - if (is_Global(ptr)) { - ir_entity *ent = get_Global_entity(ptr); + if (is_SymConst_addr_ent(ptr)) { + ir_entity *ent = get_SymConst_entity(ptr); prop = get_entity_additional_properties(ent); } } return prop & (mtp_property_const|mtp_property_pure); -} /* get_Call_memory_properties */ +} /** * Returns an entity if the address ptr points to a constant one. @@ -578,9 +571,9 @@ static ir_entity *find_constant_entity(ir_node *ptr) if (tlower == tarval_bad || tupper == tarval_bad) return NULL; - if (tarval_cmp(tv, tlower) & pn_Cmp_Lt) + if (tarval_cmp(tv, tlower) == ir_relation_less) return NULL; - if (tarval_cmp(tupper, tv) & pn_Cmp_Lt) + if (tarval_cmp(tupper, tv) == ir_relation_less) return NULL; /* ok, bounds check finished */ @@ -620,7 +613,7 @@ static ir_entity *find_constant_entity(ir_node *ptr) } else return NULL; } -} /* find_constant_entity */ +} /** * Return the Selection index of a Sel node from dimension n @@ -628,178 +621,13 @@ static ir_entity *find_constant_entity(ir_node *ptr) static long get_Sel_array_index_long(ir_node *n, int dim) { ir_node *index = get_Sel_index(n, dim); - assert(is_Const(index)); return get_tarval_long(get_Const_tarval(index)); -} /* get_Sel_array_index_long */ - -/** - * Returns the accessed component graph path for an - * node computing an address. - * - * @param ptr the node computing the address - * @param depth current depth in steps upward from the root - * of the address - */ -static compound_graph_path *rec_get_accessed_path(ir_node *ptr, int depth) -{ - compound_graph_path *res = NULL; - ir_entity *root, *field, *ent; - int path_len, pos, idx; - ir_tarval *tv; - ir_type *tp; - - if (is_SymConst(ptr)) { - /* a SymConst. If the depth is 0, this is an access to a global - * entity and we don't need a component path, else we know - * at least its length. - */ - assert(get_SymConst_kind(ptr) == symconst_addr_ent); - root = get_SymConst_entity(ptr); - res = (depth == 0) ? NULL : new_compound_graph_path(get_entity_type(root), depth); - } else if (is_Sel(ptr)) { - /* it's a Sel, go up until we find the root */ - res = rec_get_accessed_path(get_Sel_ptr(ptr), depth+1); - if (res == NULL) - return NULL; - - /* fill up the step in the path at the current position */ - field = get_Sel_entity(ptr); - path_len = get_compound_graph_path_length(res); - pos = path_len - depth - 1; - set_compound_graph_path_node(res, pos, field); - - if (is_Array_type(get_entity_owner(field))) { - assert(get_Sel_n_indexs(ptr) == 1 && "multi dim arrays not implemented"); - set_compound_graph_path_array_index(res, pos, get_Sel_array_index_long(ptr, 0)); - } - } else if (is_Add(ptr)) { - ir_mode *mode; - ir_tarval *tmp; - - { - ir_node *l = get_Add_left(ptr); - ir_node *r = get_Add_right(ptr); - if (is_Const(r) && get_irn_mode(l) == get_irn_mode(ptr)) { - ptr = l; - tv = get_Const_tarval(r); - } else { - ptr = r; - tv = get_Const_tarval(l); - } - } -ptr_arith: - mode = get_tarval_mode(tv); - tmp = tv; - - /* ptr must be a Sel or a SymConst, this was checked in find_constant_entity() */ - if (is_Sel(ptr)) { - field = get_Sel_entity(ptr); - } else { - field = get_SymConst_entity(ptr); - } - idx = 0; - for (ent = field;;) { - unsigned size; - ir_tarval *sz, *tv_index, *tlower, *tupper; - ir_node *bound; - - tp = get_entity_type(ent); - if (! is_Array_type(tp)) - break; - ent = get_array_element_entity(tp); - size = get_type_size_bytes(get_entity_type(ent)); - sz = new_tarval_from_long(size, mode); - - tv_index = tarval_div(tmp, sz); - tmp = tarval_mod(tmp, sz); - - if (tv_index == tarval_bad || tmp == tarval_bad) - return NULL; - - assert(get_array_n_dimensions(tp) == 1 && "multiarrays not implemented"); - bound = get_array_lower_bound(tp, 0); - tlower = computed_value(bound); - bound = get_array_upper_bound(tp, 0); - tupper = computed_value(bound); - - if (tlower == tarval_bad || tupper == tarval_bad) - return NULL; - - if (tarval_cmp(tv_index, tlower) & pn_Cmp_Lt) - return NULL; - if (tarval_cmp(tupper, tv_index) & pn_Cmp_Lt) - return NULL; - - /* ok, bounds check finished */ - ++idx; - } - if (! tarval_is_null(tmp)) { - /* access to some struct/union member */ - return NULL; - } - - /* should be at least ONE array */ - if (idx == 0) - return NULL; - - res = rec_get_accessed_path(ptr, depth + idx); - if (res == NULL) - return NULL; - - path_len = get_compound_graph_path_length(res); - pos = path_len - depth - idx; - - for (ent = field;;) { - unsigned size; - ir_tarval *sz, *tv_index; - long index; - - tp = get_entity_type(ent); - if (! is_Array_type(tp)) - break; - ent = get_array_element_entity(tp); - set_compound_graph_path_node(res, pos, ent); - - size = get_type_size_bytes(get_entity_type(ent)); - sz = new_tarval_from_long(size, mode); - - tv_index = tarval_div(tv, sz); - tv = tarval_mod(tv, sz); - - /* worked above, should work again */ - assert(tv_index != tarval_bad && tv != tarval_bad); - - /* bounds already checked above */ - index = get_tarval_long(tv_index); - set_compound_graph_path_array_index(res, pos, index); - ++pos; - } - } else if (is_Sub(ptr)) { - ir_node *l = get_Sub_left(ptr); - ir_node *r = get_Sub_right(ptr); - - ptr = l; - tv = get_Const_tarval(r); - tv = tarval_neg(tv); - goto ptr_arith; - } - return res; -} /* rec_get_accessed_path */ - -/** - * Returns an access path or NULL. The access path is only - * valid, if the graph is in phase_high and _no_ address computation is used. - */ -static compound_graph_path *get_accessed_path(ir_node *ptr) -{ - compound_graph_path *gr = rec_get_accessed_path(ptr, 0); - return gr; -} /* get_accessed_path */ +} typedef struct path_entry { ir_entity *ent; struct path_entry *next; - long index; + size_t index; } path_entry; static ir_node *rec_find_compound_ent_value(ir_node *ptr, path_entry *next) @@ -809,7 +637,7 @@ static ir_node *rec_find_compound_ent_value(ir_node *ptr, path_entry *next) ir_initializer_t *initializer; ir_tarval *tv; ir_type *tp; - unsigned n; + size_t n; entry.next = next; if (is_SymConst(ptr)) { @@ -832,7 +660,7 @@ static ir_node *rec_find_compound_ent_value(ir_node *ptr, path_entry *next) continue; } } - if (p->index >= (int) n) + if (p->index >= n) return NULL; initializer = get_initializer_compound_value(initializer, p->index); @@ -865,7 +693,7 @@ static ir_node *rec_find_compound_ent_value(ir_node *ptr, path_entry *next) assert(get_Sel_n_indexs(ptr) == 1 && "multi dim arrays not implemented"); entry.index = get_Sel_array_index_long(ptr, 0) - get_array_lower_bound_int(tp, 0); } else { - int i, n_members = get_compound_n_members(tp); + size_t i, n_members = get_compound_n_members(tp); for (i = 0; i < n_members; ++i) { if (get_compound_member(tp, i) == field) break; @@ -923,7 +751,7 @@ ptr_arith: for (ent = field;;) { unsigned size; ir_tarval *sz, *tv_index, *tlower, *tupper; - long index; + size_t index; ir_node *bound; tp = get_entity_type(ent); @@ -951,9 +779,9 @@ ptr_arith: if (tlower == tarval_bad || tupper == tarval_bad) return NULL; - if (tarval_cmp(tv_index, tlower) & pn_Cmp_Lt) + if (tarval_cmp(tv_index, tlower) == ir_relation_less) return NULL; - if (tarval_cmp(tupper, tv_index) & pn_Cmp_Lt) + if (tarval_cmp(tupper, tv_index) == ir_relation_less) return NULL; /* ok, bounds check finished */ @@ -977,12 +805,12 @@ ptr_arith: goto ptr_arith; } return NULL; -} /* rec_find_compound_ent_value */ +} static ir_node *find_compound_ent_value(ir_node *ptr) { return rec_find_compound_ent_value(ptr, NULL); -} /* find_compound_ent_value */ +} /** * Mark a Load memop to be replace by a definition @@ -994,7 +822,7 @@ static void mark_replace_load(memop_t *op, ir_node *def) op->replace = def; op->flags |= FLAG_KILLED_NODE; env.changed = 1; -} /* mark_replace_load */ +} /** * Mark a Store memop to be removed. @@ -1005,7 +833,7 @@ static void mark_remove_store(memop_t *op) { op->flags |= FLAG_KILLED_NODE; env.changed = 1; -} /* mark_remove_store */ +} /** * Update a memop for a Load. @@ -1014,7 +842,6 @@ static void mark_remove_store(memop_t *op) */ static void update_Load_memop(memop_t *m) { - int i; ir_node *load = m->node; ir_node *ptr; ir_entity *ent; @@ -1026,7 +853,7 @@ static void update_Load_memop(memop_t *m) m->value.address = ptr; - for (i = get_irn_n_outs(load) - 1; i >= 0; --i) { + for (unsigned i = get_irn_n_outs(load); i-- > 0; ) { ir_node *proj = get_irn_out(load, i); long pn; @@ -1065,7 +892,7 @@ static void update_Load_memop(memop_t *m) /* no exception, clear the m fields as it might be checked later again */ if (m->projs[pn_Load_X_except]) { ir_graph *irg = get_irn_irg(ptr); - exchange(m->projs[pn_Load_X_except], new_r_Bad(irg)); + exchange(m->projs[pn_Load_X_except], new_r_Bad(irg, mode_X)); m->projs[pn_Load_X_except] = NULL; m->flags &= ~FLAG_EXCEPTION; env.changed = 1; @@ -1080,17 +907,6 @@ static void update_Load_memop(memop_t *m) if (ent->initializer) { /* new style initializer */ value = find_compound_ent_value(ptr); - } else if (entity_has_compound_ent_values(ent)) { - /* old style initializer */ - compound_graph_path *path = get_accessed_path(ptr); - - if (path != NULL) { - assert(is_proper_compound_graph_path(path, get_compound_graph_path_length(path)-1)); - - value = get_compound_ent_value_by_path(ent, path); - DB((dbg, LEVEL_1, " Constant access at %F%F resulted in %+F\n", ent, path, value)); - free_compound_graph_path(path); - } } if (value != NULL) value = can_replace_load_by_const(load, value); @@ -1112,7 +928,7 @@ static void update_Load_memop(memop_t *m) /* no user, KILL it */ mark_replace_load(m, NULL); } -} /* update_Load_memop */ +} /** * Update a memop for a Store. @@ -1121,7 +937,6 @@ static void update_Load_memop(memop_t *m) */ static void update_Store_memop(memop_t *m) { - int i; ir_node *store = m->node; ir_node *adr = get_Store_ptr(store); @@ -1135,7 +950,7 @@ static void update_Store_memop(memop_t *m) m->value.address = adr; - for (i = get_irn_n_outs(store) - 1; i >= 0; --i) { + for (unsigned i = get_irn_n_outs(store); i-- > 0; ) { ir_node *proj = get_irn_out(store, i); long pn; @@ -1160,7 +975,7 @@ static void update_Store_memop(memop_t *m) } m->value.value = get_Store_value(store); m->value.mode = get_irn_mode(m->value.value); -} /* update_Store_memop */ +} /** * Update a memop for a Call. @@ -1171,7 +986,6 @@ static void update_Call_memop(memop_t *m) { ir_node *call = m->node; unsigned prop = get_Call_memory_properties(call); - int i; if (prop & mtp_property_const) { /* A constant call did NOT use memory at all, we @@ -1182,7 +996,7 @@ static void update_Call_memop(memop_t *m) } else m->flags = FLAG_KILL_ALL; - for (i = get_irn_n_outs(call) - 1; i >= 0; --i) { + for (unsigned i = get_irn_n_outs(call); i-- > 0; ) { ir_node *proj = get_irn_out(call, i); /* beware of keep edges */ @@ -1198,19 +1012,18 @@ static void update_Call_memop(memop_t *m) break; } } -} /* update_Call_memop */ +} /** - * Update a memop for a Div/Mod/Quot/DivMod. + * Update a memop for a Div/Mod. * * @param m the memop */ -static void update_DivOp_memop(memop_t *m) +static void update_Div_memop(memop_t *m) { ir_node *div = m->node; - int i; - for (i = get_irn_n_outs(div) - 1; i >= 0; --i) { + for (unsigned i = get_irn_n_outs(div); i-- > 0; ) { ir_node *proj = get_irn_out(div, i); /* beware of keep edges */ @@ -1218,15 +1031,37 @@ static void update_DivOp_memop(memop_t *m) continue; switch (get_Proj_proj(proj)) { - case pn_Generic_X_except: + case pn_Div_X_except: m->flags |= FLAG_EXCEPTION; break; - case pn_Generic_M: + case pn_Div_M: m->mem = proj; break; } } -} /* update_DivOp_memop */ +} + +static void update_Mod_memop(memop_t *m) +{ + ir_node *div = m->node; + + for (unsigned i = get_irn_n_outs(div); i-- > 0; ) { + ir_node *proj = get_irn_out(div, i); + + /* beware of keep edges */ + if (is_End(proj)) + continue; + + switch (get_Proj_proj(proj)) { + case pn_Mod_X_except: + m->flags |= FLAG_EXCEPTION; + break; + case pn_Mod_M: + m->mem = proj; + break; + } + } +} /** * Update a memop for a Phi. @@ -1235,9 +1070,9 @@ static void update_DivOp_memop(memop_t *m) */ static void update_Phi_memop(memop_t *m) { - /* the Phi is it's own mem */ + /* the Phi is its own mem */ m->mem = m->node; -} /* update_Phi_memop */ +} /** * Memory walker: collect all memory ops and build topological lists. @@ -1291,10 +1126,10 @@ static void collect_memops(ir_node *irn, void *ctx) /* we can those to find the memory edge */ break; case iro_Div: - case iro_DivMod: - case iro_Quot: + update_Div_memop(op); + break; case iro_Mod: - update_DivOp_memop(op); + update_Mod_memop(op); break; case iro_Builtin: @@ -1313,7 +1148,7 @@ static void collect_memops(ir_node *irn, void *ctx) entry->memop_backward = op; } } -} /* collect_memops */ +} /** * Find an address in the current set. @@ -1338,7 +1173,7 @@ static memop_t *find_address(const value_t *value) return res; } return NULL; -} /* find_address */ +} /** * Find an address in the avail_out set. @@ -1359,7 +1194,7 @@ static memop_t *find_address_avail(const block_t *bl, unsigned id, const ir_mode return res; } return NULL; -} /* find_address_avail */ +} /** * Kill all addresses from the current set. @@ -1370,7 +1205,7 @@ static void kill_all(void) /* set sentinel */ rbitset_set(env.curr_set, env.rbs_size - 1); -} /* kill_all */ +} /** * Kill memops that are not alias free due to a Store value from the current set. @@ -1379,8 +1214,8 @@ static void kill_all(void) */ static void kill_memops(const value_t *value) { - unsigned end = env.rbs_size - 1; - unsigned pos; + size_t end = env.rbs_size - 1; + size_t pos; for (pos = rbitset_next(env.curr_set, 0, 1); pos < end; pos = rbitset_next(env.curr_set, pos + 1, 1)) { memop_t *op = env.curr_id_2_memop[pos]; @@ -1392,7 +1227,7 @@ static void kill_memops(const value_t *value) DB((dbg, LEVEL_2, "KILLING %+F because of possible alias address %+F\n", op->node, value->address)); } } -} /* kill_memops */ +} /** * Add the value of a memop to the current set. @@ -1403,7 +1238,7 @@ static void add_memop(memop_t *op) { rbitset_set(env.curr_set, op->value.id); env.curr_id_2_memop[op->value.id] = op; -} /* add_memop */ +} /** * Add the value of a memop to the avail_out set. @@ -1415,7 +1250,7 @@ static void add_memop_avail(block_t *bl, memop_t *op) { rbitset_set(bl->avail_out, op->value.id); bl->id_2_memop_avail[op->value.id] = op; -} /* add_memop_avail */ +} /** * Check, if we can convert a value of one mode to another mode @@ -1431,7 +1266,7 @@ static int can_convert_to(const ir_mode *from, const ir_mode *to) get_mode_size_bits(from) == get_mode_size_bits(to)) return 1; return 0; -} /* can_convert_to */ +} /** * Add a Conv to the requested mode if needed. @@ -1455,7 +1290,7 @@ static ir_node *conv_to(ir_node *irn, ir_mode *mode) return NULL; } return irn; -} /* conv_to */ +} /** * Update the address of an value if this address was a load result @@ -1475,7 +1310,7 @@ static void update_address(value_t *value) value->address = op->replace; } } -} /* update_address */ +} /** * Do forward dataflow analysis on the given block and calculate the @@ -1563,9 +1398,7 @@ static void calc_gen_kill_avail(block_t *bl) kill_all(); } } -} /* calc_gen_kill_avail */ - -#define BYTE_SIZE(x) (((x) + 7) >> 3) +} /** * Do forward dataflow analysis on a given block to calculate the avail_out set @@ -1581,7 +1414,7 @@ static void forward_avail(block_t *bl) calc_gen_kill_avail(bl); dump_curr(bl, "Avail_out"); -} /* forward_avail */ +} /** * Do backward dataflow analysis on a given block to calculate the antic set @@ -1601,8 +1434,8 @@ static int backward_antic(block_t *bl) ir_node *succ = get_Block_cfg_out(block, 0); block_t *succ_bl = get_block_entry(succ); int pred_pos = get_Block_cfgpred_pos(succ, block); - unsigned end = env.rbs_size - 1; - unsigned pos; + size_t end = env.rbs_size - 1; + size_t pos; kill_all(); @@ -1705,7 +1538,7 @@ static int backward_antic(block_t *bl) } dump_curr(bl, "AnticL_in"); return 0; -} /* backward_antic */ +} /** * Replace a Load memop by a already known value. @@ -1748,13 +1581,13 @@ static void replace_load(memop_t *op) proj = op->projs[pn_Load_X_except]; if (proj != NULL) { ir_graph *irg = get_irn_irg(load); - exchange(proj, new_r_Bad(irg)); + exchange(proj, new_r_Bad(irg, mode_X)); } proj = op->projs[pn_Load_X_regular]; if (proj != NULL) { exchange(proj, new_r_Jmp(get_nodes_block(load))); } -} /* replace_load */ +} /** * Remove a Store memop. @@ -1775,13 +1608,13 @@ static void remove_store(memop_t *op) proj = op->projs[pn_Store_X_except]; if (proj != NULL) { ir_graph *irg = get_irn_irg(store); - exchange(proj, new_r_Bad(irg)); + exchange(proj, new_r_Bad(irg, mode_X)); } proj = op->projs[pn_Store_X_regular]; if (proj != NULL) { exchange(proj, new_r_Jmp(get_nodes_block(store))); } -} /* remove_store */ +} /** @@ -1805,7 +1638,7 @@ static void do_replacements(block_t *bl) } } } -} /* do_replacements */ +} /** * Calculate the Avail_out sets for all basic blocks. @@ -1827,7 +1660,7 @@ static void calcAvail(void) /* restore the current sets */ env.curr_id_2_memop = tmp_memop; env.curr_set = tmp_set; -} /* calcAvail */ +} /** * Calculate the Antic_in sets for all basic blocks. @@ -1853,7 +1686,7 @@ static void calcAntic(void) ++i; } while (need_iter); DB((dbg, LEVEL_2, "Get anticipated Load set after %d iterations\n", i)); -} /* calcAntic */ +} /** * Return the node representing the last memory in a block. @@ -1869,7 +1702,7 @@ static ir_node *find_last_memory(block_t *bl) /* if there is NO memory in this block, go to the dominator */ bl = get_block_entry(get_Block_idom(bl->block)); } -} /* find_last_memory */ +} /** * Reroute all memory users of old memory @@ -1880,9 +1713,7 @@ static ir_node *find_last_memory(block_t *bl) */ static void reroute_all_mem_users(ir_node *omem, ir_node *nmem) { - int i; - - for (i = get_irn_n_outs(omem) - 1; i >= 0; --i) { + for (unsigned i = get_irn_n_outs(omem); i-- > 0; ) { int n_pos; ir_node *user = get_irn_out_ex(omem, i, &n_pos); @@ -1890,8 +1721,8 @@ static void reroute_all_mem_users(ir_node *omem, ir_node *nmem) } /* all edges previously point to omem now point to nmem */ - nmem->out = omem->out; -} /* reroute_all_mem_users */ + nmem->o.out = omem->o.out; +} /** * Reroute memory users of old memory that are dominated by a given block @@ -1903,10 +1734,11 @@ static void reroute_all_mem_users(ir_node *omem, ir_node *nmem) */ static void reroute_mem_through(ir_node *omem, ir_node *nmem, ir_node *pass_bl) { - int i, j, n = get_irn_n_outs(omem); - ir_def_use_edge *edges = NEW_ARR_D(ir_def_use_edge, &env.obst, n + 1); + unsigned n = get_irn_n_outs(omem); + ir_def_use_edges *new_out = OALLOCF(&env.obst, ir_def_use_edges, edges, n); - for (i = j = 0; i < n; ++i) { + unsigned j = 0; + for (unsigned i = 0; i < n; ++i) { int n_pos; ir_node *user = get_irn_out_ex(omem, i, &n_pos); ir_node *use_bl = get_nodes_block(user); @@ -1917,21 +1749,21 @@ static void reroute_mem_through(ir_node *omem, ir_node *nmem, ir_node *pass_bl) } if (block_dominates(pass_bl, use_bl)) { /* found an user that is dominated */ + new_out->edges[j].pos = n_pos; + new_out->edges[j].use = user; ++j; - edges[j].pos = n_pos; - edges[j].use = user; set_irn_n(user, n_pos, nmem); } } + new_out->n_edges = j; /* Modify the out structure: we create a new out edge array on our - temporary obstack here. This should be no problem, as we invalidate the edges - at the end either. */ + temporary obstack here. This should be no problem, as we invalidate the + edges at the end either. */ /* first entry is used for the length */ - edges[0].pos = j; - nmem->out = edges; -} /* reroute_mem_through */ + nmem->o.out = new_out; +} /** * insert Loads, making partly redundant Loads fully redundant @@ -1940,8 +1772,7 @@ static int insert_Load(block_t *bl) { ir_node *block = bl->block; int i, n = get_Block_n_cfgpreds(block); - unsigned end = env.rbs_size - 1; - unsigned pos; + size_t end = env.rbs_size - 1; DB((dbg, LEVEL_3, "processing %+F\n", block)); @@ -1952,7 +1783,7 @@ static int insert_Load(block_t *bl) if (n > 1) { ir_node **ins; - int pos; + size_t pos; NEW_ARR_A(ir_node *, ins, n); @@ -1979,25 +1810,21 @@ static int insert_Load(block_t *bl) } /* * Ensure that all values are in the map: build Phi's if necessary: - * Note: the last bit is the sentinel and ALWAYS set, so start with -2. + * Note: the last bit is the sentinel and ALWAYS set, so end with -2. */ - for (pos = env.rbs_size - 2; pos >= 0; --pos) { + for (pos = 0; pos < env.rbs_size - 1; ++pos) { if (! rbitset_is_set(env.curr_set, pos)) env.curr_id_2_memop[pos] = NULL; else { - ir_node *pred = get_Block_cfgpred_block(bl->block, 0); - block_t *pred_bl = get_block_entry(pred); - int need_phi = 0; - memop_t *first = NULL; - ir_mode *mode = NULL; + int need_phi = 0; + memop_t *first = NULL; + ir_mode *mode = NULL; for (i = 0; i < n; ++i) { - memop_t *mop; - - pred = get_Block_cfgpred_block(bl->block, i); - pred_bl = get_block_entry(pred); + ir_node *pred = get_Block_cfgpred_block(bl->block, i); + block_t *pred_bl = get_block_entry(pred); - mop = pred_bl->id_2_memop_avail[pos]; + memop_t *mop = pred_bl->id_2_memop_avail[pos]; if (first == NULL) { first = mop; ins[0] = first->value.value; @@ -2045,6 +1872,8 @@ static int insert_Load(block_t *bl) } if (n > 1) { + size_t pos; + /* check for partly redundant values */ for (pos = rbitset_next(bl->anticL_in, 0, 1); pos < end; @@ -2183,7 +2012,7 @@ static int insert_Load(block_t *bl) } dump_curr(bl, "Avail_out"); return 0; -} /* insert_Load */ +} /** * Insert Loads upwards. @@ -2210,106 +2039,9 @@ static void insert_Loads_upwards(void) } while (need_iter); DB((dbg, LEVEL_2, "Finished Load inserting after %d iterations\n", i)); -} /* insert_Loads_upwards */ - -/** - * Kill unreachable control flow. - * - * @param irg the graph to operate on - */ -static void kill_unreachable_blocks(ir_graph *irg) -{ - block_t *bl; - ir_node **ins; - int changed = 0; - - NEW_ARR_A(ir_node *, ins, env.max_cfg_preds); - - for (bl = env.forward; bl != NULL; bl = bl->forward_next) { - ir_node *block = bl->block; - int i, j, k, n; - - assert(get_Block_mark(block)); - - n = get_Block_n_cfgpreds(block); - - for (i = j = 0; i < n; ++i) { - ir_node *pred = get_Block_cfgpred(block, i); - ir_node *pred_bl; - - if (is_Bad(pred)) - continue; - - pred_bl = get_nodes_block(skip_Proj(pred)); - if (! get_Block_mark(pred_bl)) - continue; - - ins[j++] = pred; - } - if (j != n) { - ir_node *phi, *next; - - /* some unreachable blocks detected */ - changed = 1; - - DB((dbg, LEVEL_1, "Killing dead block predecessors on %+F\n", block)); - - set_irn_in(block, j, ins); - - /* shorten all Phi nodes */ - for (phi = get_Block_phis(block); phi != NULL; phi = next) { - next = get_Phi_next(phi); - - for (i = k = 0; i < n; ++i) { - ir_node *pred = get_Block_cfgpred_block(block, i); - - if (is_Bad(pred)) - continue; - - if (! get_Block_mark(pred)) - continue; - - ins[k++] = get_Phi_pred(phi, i); - } - if (k == 1) - exchange(phi, ins[0]); - else - set_irn_in(phi, k, ins); - } - } - - } - - if (changed) { - /* kick keep alives */ - ir_node *end = get_irg_end(irg); - int i, j, n = get_End_n_keepalives(end); - - NEW_ARR_A(ir_node *, ins, n); - - for (i = j = 0; i < n; ++i) { - ir_node *ka = get_End_keepalive(end, i); - ir_node *ka_bl; - - if (is_Bad(ka)) - continue; - if (is_Block(ka)) - ka_bl = ka; - else - ka_bl = get_nodes_block(skip_Proj(ka)); - if (get_Block_mark(ka_bl)) - ins[j++] = ka; - } - if (j != n) - set_End_keepalives(end, j, ins); - - free_irg_outs(irg); - - /* this transformation do NOT invalidate the dominance */ - } -} /* kill_unreachable_blocks */ +} -int opt_ldst(ir_graph *irg) +void opt_ldst(ir_graph *irg) { block_t *bl; @@ -2317,16 +2049,19 @@ int opt_ldst(ir_graph *irg) DB((dbg, LEVEL_1, "\nDoing Load/Store optimization on %+F\n", irg)); - /* we need landing pads */ - remove_critical_cf_edges(irg); + assure_irg_properties(irg, + IR_GRAPH_PROPERTY_NO_CRITICAL_EDGES /* we need landing pads */ + | IR_GRAPH_PROPERTY_CONSISTENT_ENTITY_USAGE + | IR_GRAPH_PROPERTY_CONSISTENT_OUTS + | IR_GRAPH_PROPERTY_NO_UNREACHABLE_CODE + | IR_GRAPH_PROPERTY_CONSISTENT_DOMINANCE); if (get_opt_alias_analysis()) { - assure_irg_entity_usage_computed(irg); assure_irp_globals_entity_usage_computed(); } obstack_init(&env.obst); - ir_nodemap_init(&env.adr_map); + ir_nodehashmap_init(&env.adr_map); env.forward = NULL; env.backward = NULL; @@ -2334,14 +2069,11 @@ int opt_ldst(ir_graph *irg) env.n_mem_ops = 0; env.max_cfg_preds = 0; env.changed = 0; - env.start_bl = get_irg_start_block(irg); env.end_bl = get_irg_end_block(irg); #ifdef DEBUG_libfirm env.id_2_address = NEW_ARR_F(ir_node *, 0); #endif - assure_irg_outs(irg); - ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK | IR_RESOURCE_BLOCK_MARK); /* first step: allocate block entries. Note that some blocks might be @@ -2350,7 +2082,8 @@ int opt_ldst(ir_graph *irg) /* produce an inverse post-order list for the CFG: this links only reachable blocks */ - irg_out_block_walk(get_irg_start_block(irg), NULL, inverse_post_order, NULL); + ir_node *const start_block = get_irg_start_block(irg); + irg_out_block_walk(start_block, NULL, inverse_post_order, NULL); if (! get_Block_mark(env.end_bl)) { /* @@ -2364,11 +2097,6 @@ int opt_ldst(ir_graph *irg) set_Block_mark(env.end_bl, 1); } - /* KILL unreachable blocks: these disturb the data flow analysis */ - kill_unreachable_blocks(irg); - - assure_doms(irg); - /* second step: find and sort all memory ops */ walk_memory_irg(irg, collect_memops, NULL, NULL); @@ -2379,7 +2107,7 @@ int opt_ldst(ir_graph *irg) if (env.n_mem_ops == 0) { /* no memory ops */ - goto end; + goto no_changes; } /* create the backward links. */ @@ -2392,7 +2120,7 @@ int opt_ldst(ir_graph *irg) env.backward = bl; /* check that we really start with the start / end block */ - assert(env.forward->block == env.start_bl); + assert(env.forward->block == start_block); assert(env.backward->block == env.end_bl); /* create address sets: for now, only the existing addresses are allowed plus one @@ -2402,22 +2130,19 @@ int opt_ldst(ir_graph *irg) /* create the current set */ env.curr_set = rbitset_obstack_alloc(&env.obst, env.rbs_size); rbitset_set(env.curr_set, env.rbs_size - 1); - env.curr_id_2_memop = NEW_ARR_D(memop_t *, &env.obst, env.rbs_size); - memset(env.curr_id_2_memop, 0, env.rbs_size * sizeof(env.curr_id_2_memop[0])); + env.curr_id_2_memop = NEW_ARR_DZ(memop_t*, &env.obst, env.rbs_size); for (bl = env.forward; bl != NULL; bl = bl->forward_next) { /* set sentinel bits */ bl->avail_out = rbitset_obstack_alloc(&env.obst, env.rbs_size); rbitset_set(bl->avail_out, env.rbs_size - 1); - bl->id_2_memop_avail = NEW_ARR_D(memop_t *, &env.obst, env.rbs_size); - memset(bl->id_2_memop_avail, 0, env.rbs_size * sizeof(bl->id_2_memop_avail[0])); + bl->id_2_memop_avail = NEW_ARR_DZ(memop_t*, &env.obst, env.rbs_size); bl->anticL_in = rbitset_obstack_alloc(&env.obst, env.rbs_size); rbitset_set(bl->anticL_in, env.rbs_size - 1); - bl->id_2_memop_antic = NEW_ARR_D(memop_t *, &env.obst, env.rbs_size); - memset(bl->id_2_memop_antic, 0, env.rbs_size * sizeof(bl->id_2_memop_antic[0])); + bl->id_2_memop_antic = NEW_ARR_DZ(memop_t*, &env.obst, env.rbs_size); } (void) dump_block_list; @@ -2435,23 +2160,22 @@ int opt_ldst(ir_graph *irg) /* not only invalidate but free them. We might allocate new out arrays on our obstack which will be deleted yet. */ - free_irg_outs(irg); - set_irg_entity_usage_state(irg, ir_entity_usage_not_computed); + confirm_irg_properties(irg, IR_GRAPH_PROPERTIES_CONTROL_FLOW); + } else { +no_changes: + confirm_irg_properties(irg, IR_GRAPH_PROPERTIES_ALL); } -end: ir_free_resources(irg, IR_RESOURCE_IRN_LINK | IR_RESOURCE_BLOCK_MARK); - ir_nodemap_destroy(&env.adr_map); + ir_nodehashmap_destroy(&env.adr_map); obstack_free(&env.obst, NULL); #ifdef DEBUG_libfirm DEL_ARR_F(env.id_2_address); #endif - - return env.changed != 0; -} /* opt_ldst */ +} ir_graph_pass_t *opt_ldst_pass(const char *name) { - return def_graph_pass_ret(name ? name : "ldst_df", opt_ldst); -} /* opt_ldst_pass */ + return def_graph_pass(name ? name : "ldst_df", opt_ldst); +}