X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fopt%2Fopt_ldst.c;h=342dc0fe4e4dbaf140a89cf88283bb822dfd2092;hb=a1e9069afa4fa1e16e2d176bcd7905d6a1ed4677;hp=b2e43064e3f609bbd9cc6204f9438e894a5da2a0;hpb=cef83dae71c5bd9689d49572c08555d2acc0389a;p=libfirm diff --git a/ir/opt/opt_ldst.c b/ir/opt/opt_ldst.c index b2e43064e..342dc0fe4 100644 --- a/ir/opt/opt_ldst.c +++ b/ir/opt/opt_ldst.c @@ -42,6 +42,7 @@ #include "raw_bitset.h" #include "debug.h" #include "error.h" +#include "irpass.h" /* maximum number of output Proj's */ #define MAX_PROJ (pn_Load_max > pn_Store_max ? pn_Load_max : pn_Store_max) @@ -57,13 +58,10 @@ typedef struct address_entry_t { * Memop-flags. */ enum memop_flags { - FLAG_KILL_LOADS = 1, /**< KILL all Loads */ - FLAG_KILL_STORES = 2, /**< KILL all Stores */ - FLAG_KILLED_NODE = 4, /**< this node was killed */ - FLAG_EXCEPTION = 8, /**< this node has exception flow */ - FLAG_IGNORE = 16, /**< ignore this node (volatile or other) */ - /** this memop KILLS all addresses */ - FLAG_KILL_ALL = FLAG_KILL_LOADS|FLAG_KILL_STORES + FLAG_KILL_ALL = 1, /**< KILL all addresses */ + FLAG_KILLED_NODE = 2, /**< this node was killed */ + FLAG_EXCEPTION = 4, /**< this node has exception flow */ + FLAG_IGNORE = 8, /**< ignore this node (volatile or other) */ }; /** @@ -95,12 +93,6 @@ struct memop_t { ir_node *projs[MAX_PROJ]; /**< Projs of this memory op */ }; -enum block_flags { - BLK_FLAG_NONE = 0, /**< No flags */ - BLK_FLAG_REACHABLE = 1, /**< This block can be reached. */ - BLK_FLAG_EVALUATED = 2, /**< This block was evaluated once. */ -}; - /** * Additional data for every basic block. */ @@ -116,7 +108,7 @@ struct block_t { block_t *forward_next; /**< next block entry for forward iteration */ block_t *backward_next; /**< next block entry for backward iteration */ memop_t *avail; /**< used locally for the avail map */ - unsigned flags; /**< additional flags */ + memop_t **trans_results; /**< used to cached translated nodes due antic calculation. */ }; /** @@ -134,22 +126,27 @@ typedef struct ldst_env_t { unsigned curr_adr_id; /**< number for address mapping */ unsigned n_mem_ops; /**< number of memory operations (Loads/Stores) */ unsigned rbs_size; /**< size of all bitsets in bytes */ + int max_cfg_preds; /**< maximum number of block cfg predecessors */ int changed; /**< Flags for changed graph state */ +#ifdef DEBUG_libfirm + ir_node **id_2_address; /**< maps an id to the used address */ +#endif } ldst_env; +/* the one and only environment */ +static ldst_env env; + #ifdef DEBUG_libfirm static firm_dbg_module_t *dbg; -/* the one and only environment */ -static ldst_env env; - /** * Dumps the block list. * * @param ldst environment */ -static void dump_block_list(ldst_env *env) { +static void dump_block_list(ldst_env *env) +{ block_t *entry; memop_t *op; int i; @@ -161,20 +158,19 @@ static void dump_block_list(ldst_env *env) { for (op = entry->memop_forward; op != NULL; op = op->next) { if (i == 0) { DB((dbg, LEVEL_2, "\n\t")); - } DB((dbg, LEVEL_2, "%+F", op->node)); + } + DB((dbg, LEVEL_2, "%+F", op->node)); if ((op->flags & FLAG_KILL_ALL) == FLAG_KILL_ALL) DB((dbg, LEVEL_2, "X")); - else if (op->flags & FLAG_KILL_LOADS) - DB((dbg, LEVEL_2, "L")); - else if (op->flags & FLAG_KILL_STORES) - DB((dbg, LEVEL_2, "S")); + else if (op->flags & FLAG_KILL_ALL) + DB((dbg, LEVEL_2, "K")); DB((dbg, LEVEL_2, ", ")); i = (i + 1) & 3; } DB((dbg, LEVEL_2, "\n}\n\n")); } -} +} /* dump_block_list */ /** * Dumps the current set. @@ -182,42 +178,53 @@ static void dump_block_list(ldst_env *env) { * @param bl current block * @param s name of the set */ -static void dump_curr(block_t *bl, const char *s) { - unsigned pos = 0; +static void dump_curr(block_t *bl, const char *s) +{ unsigned end = env.rbs_size - 1; + unsigned pos; int i; DB((dbg, LEVEL_2, "%s[%+F] = {", s, bl->block)); i = 0; - for (pos = rbitset_next(env.curr_set, pos, 1); pos != end; pos = rbitset_next(env.curr_set, pos + 1, 1)) { + for (pos = rbitset_next(env.curr_set, 0, 1); pos < end; pos = rbitset_next(env.curr_set, pos + 1, 1)) { memop_t *op = env.curr_id_2_memop[pos]; if (i == 0) { DB((dbg, LEVEL_2, "\n\t")); } + DB((dbg, LEVEL_2, "<%+F, %+F>, ", op->value.address, op->value.value)); i = (i + 1) & 3; } DB((dbg, LEVEL_2, "\n}\n")); -} +} /* dump_curr */ #else -#define dump_block_list() -#define dump_curr(bl, s) +static void dump_block_list(ldst_env *env) +{ + (void) env; +} +static void dump_curr(block_t *bl, const char *s) +{ + (void) bl; + (void) s; +} #endif /* DEBUG_libfirm */ /** Get the block entry for a block node */ -static block_t *get_block_entry(const ir_node *block) { +static block_t *get_block_entry(const ir_node *block) +{ assert(is_Block(block)); return get_irn_link(block); -} +} /* get_block_entry */ /** Get the memop entry for a memory operation node */ -static memop_t *get_irn_memop(const ir_node *irn) { +static memop_t *get_irn_memop(const ir_node *irn) +{ assert(! is_Block(irn)); return get_irn_link(irn); -} +} /* get_irn_memop */ /** * Walk over the memory edges from definition to users. @@ -228,7 +235,8 @@ static memop_t *get_irn_memop(const ir_node *irn) { * @param post post walker function * @param ctx context parameter for the walker functions */ -static void walk_memory(ir_node *irn, irg_walk_func *pre, irg_walk_func *post, void *ctx) { +static void walk_memory(ir_node *irn, irg_walk_func *pre, irg_walk_func *post, void *ctx) +{ int i; ir_mode *mode; @@ -257,7 +265,7 @@ static void walk_memory(ir_node *irn, irg_walk_func *pre, irg_walk_func *post, v } if (post) post(irn, ctx); -} +} /* walk_memory */ /** * Walks over all memory nodes of a graph. @@ -267,7 +275,8 @@ static void walk_memory(ir_node *irn, irg_walk_func *pre, irg_walk_func *post, v * @param post post walker function * @param ctx context parameter for the walker functions */ -static void walk_memory_irg(ir_graph *irg, irg_walk_func pre, irg_walk_func post, void *ctx) { +static void walk_memory_irg(ir_graph *irg, irg_walk_func pre, irg_walk_func post, void *ctx) +{ inc_irg_visited(irg); ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED); @@ -279,61 +288,154 @@ static void walk_memory_irg(ir_graph *irg, irg_walk_func pre, irg_walk_func post walk_memory(get_irg_initial_mem(irg), pre, post, ctx); ir_free_resources(irg, IR_RESOURCE_IRN_VISITED); -} +} /* walk_memory_irg */ /** - * Block walker: allocate an block entry for every block. + * Register an address and allocate a (sparse, 0..n) ID for it. + * + * @param adr the IR-node representing the address + * + * @return the allocated id */ -static void prepare_blocks(ir_node *block, void *ctx) { - block_t *entry = obstack_alloc(&env.obst, sizeof(*entry)); +static unsigned register_address(ir_node *adr) +{ + address_entry *entry; + /* skip Confirms and Casts */ +restart: + if (is_Confirm(adr)) { + adr = get_Confirm_value(adr); + goto restart; + } + if (is_Cast(adr)) { + adr = get_Cast_op(adr); + goto restart; + } + + entry = ir_nodemap_get(&env.adr_map, adr); + + if (entry == NULL) { + /* new address */ + entry = OALLOC(&env.obst, address_entry); + + entry->id = env.curr_adr_id++; + ir_nodemap_insert(&env.adr_map, adr, entry); + + DB((dbg, LEVEL_3, "ADDRESS %+F has ID %u\n", adr, entry->id)); +#ifdef DEBUG_libfirm + ARR_APP1(ir_node *, env.id_2_address, adr); +#endif + } + return entry->id; +} /* register_address */ + + +/** + * translate an address through a Phi node into a given predecessor + * block. + * + * @param address the address + * @param block the block + * @param pos the position of the predecessor in block + */ +static ir_node *phi_translate(ir_node *address, const ir_node *block, int pos) +{ + if (is_Phi(address) && get_nodes_block(address) == block) + address = get_Phi_pred(address, pos); + return address; +} /* phi_translate */ + +/** + * Walker: allocate an block entry for every block + * and register all potential addresses. + */ +static void prepare_blocks(ir_node *irn, void *ctx) +{ (void)ctx; - entry->memop_forward = NULL; - entry->memop_backward = NULL; - entry->avail_out = NULL; - entry->id_2_memop_avail = NULL; - entry->anticL_in = NULL; - entry->id_2_memop_antic = NULL; - entry->block = block; - entry->forward_next = NULL; - entry->backward_next = NULL; - entry->avail = NULL; - entry->flags = BLK_FLAG_NONE; - set_irn_link(block, entry); -} + if (is_Block(irn)) { + block_t *entry = OALLOC(&env.obst, block_t); + int n; + + entry->memop_forward = NULL; + entry->memop_backward = NULL; + entry->avail_out = NULL; + entry->id_2_memop_avail = NULL; + entry->anticL_in = NULL; + entry->id_2_memop_antic = NULL; + entry->block = irn; + entry->forward_next = NULL; + entry->backward_next = NULL; + entry->avail = NULL; + entry->trans_results = NULL; + set_irn_link(irn, entry); + + set_Block_phis(irn, NULL); + + /* use block marks to track unreachable blocks */ + set_Block_mark(irn, 0); + + n = get_Block_n_cfgpreds(irn); + if (n > env.max_cfg_preds) + env.max_cfg_preds = n; + } else { + ir_mode *mode = get_irn_mode(irn); + + if (mode_is_reference(mode)) { + /* + * Register ALL possible addresses: this is overkill yet but + * simpler then doing it for all possible translated addresses + * (which would be sufficient in the moment. + */ + (void)register_address(irn); + } + } +} /* prepare_blocks */ + +/** + * Post-Walker, link in all Phi's + */ +static void link_phis(ir_node *irn, void *ctx) +{ + (void)ctx; + + if (is_Phi(irn)) { + ir_node *block = get_nodes_block(irn); + add_Block_phi(block, irn); + } +} /* link_phis */ /** * Block walker: creates the inverse post-order list for the CFG. */ -static void inverse_post_order(ir_node *block, void *ctx) { +static void inverse_post_order(ir_node *block, void *ctx) +{ block_t *entry = get_block_entry(block); (void)ctx; + /* mark this block IS reachable from start */ + set_Block_mark(block, 1); + /* create the list in inverse order */ entry->forward_next = env.forward; - entry->flags |= BLK_FLAG_REACHABLE; env.forward = entry; /* remember the first visited (last in list) entry, needed for later */ if (env.backward == NULL) env.backward = entry; -} +} /* inverse_post_order */ /** * Block walker: create backward links for the memops of a block. */ -static void collect_backward(ir_node *block, void *ctx) { +static void collect_backward(ir_node *block, void *ctx) +{ block_t *entry = get_block_entry(block); memop_t *last, *op; (void)ctx; - /* ignore unreachable blocks */ - if (!(entry->flags & BLK_FLAG_REACHABLE)) - return; - /* * Do NOT link in the end block yet. We want it to be * the first in the list. This is NOT guaranteed by the walker @@ -353,15 +455,19 @@ static void collect_backward(ir_node *block, void *ctx) { last = op; } entry->memop_backward = last; -} +} /* collect_backward */ /** * Allocate a memop. * - * @param irn the IR-node representing the memop + * @param irn the IR-node representing the memop or NULL + * if this is a translated (virtual) memop + * + * @return the allocated memop */ -static memop_t *alloc_memop(ir_node *irn) { - memop_t *m = obstack_alloc(&env.obst, sizeof(*m)); +static memop_t *alloc_memop(ir_node *irn) +{ + memop_t *m = OALLOC(&env.obst, memop_t); m->value.address = NULL; m->value.value = NULL; @@ -375,9 +481,10 @@ static memop_t *alloc_memop(ir_node *irn) { memset(m->projs, 0, sizeof(m->projs)); - set_irn_link(irn, m); + if (irn != NULL) + set_irn_link(irn, m); return m; -} +} /* alloc_memop */ /** * Create a memop for a Phi-replacement. @@ -385,8 +492,9 @@ static memop_t *alloc_memop(ir_node *irn) { * @param op the memop to clone * @param phi the Phi-node representing the new value */ -static memop_t *clone_memop_phi(memop_t *op, ir_node *phi) { - memop_t *m = obstack_alloc(&env.obst, sizeof(*m)); +static memop_t *clone_memop_phi(memop_t *op, ir_node *phi) +{ + memop_t *m = OALLOC(&env.obst, memop_t); m->value = op->value; m->value.value = phi; @@ -398,40 +506,7 @@ static memop_t *clone_memop_phi(memop_t *op, ir_node *phi) { set_irn_link(phi, m); return m; -} - -/** - * Register an address and allocate an ID for it. - * - * @param adr the IR-node representing the address - */ -static unsigned register_address(ir_node *adr) { - address_entry *entry; - - /* skip Confirms and Casts */ -restart: - if (is_Confirm(adr)) { - adr = get_Confirm_value(adr); - goto restart; - } - if (is_Cast(adr)) { - adr = get_Cast_op(adr); - goto restart; - } - - entry = ir_nodemap_get(&env.adr_map, adr); - - if (entry == NULL) { - /* new address */ - entry = obstack_alloc(&env.obst, sizeof(*entry)); - - entry->id = env.curr_adr_id++; - ir_nodemap_insert(&env.adr_map, adr, entry); - - DB((dbg, LEVEL_3, "ADDRESS %+F has ID %u\n", adr, entry->id)); - } - return entry->id; -} +} /* clone_memop_phi */ /** * Return the memory properties of a call node. @@ -440,7 +515,8 @@ restart: * * return a bitset of mtp_property_const and mtp_property_pure */ -static unsigned get_Call_memory_properties(ir_node *call) { +static unsigned get_Call_memory_properties(ir_node *call) +{ ir_type *call_tp = get_Call_type(call); unsigned prop = get_method_additional_properties(call_tp); @@ -456,7 +532,7 @@ static unsigned get_Call_memory_properties(ir_node *call) { } } return prop & (mtp_property_const|mtp_property_pure); -} +} /* get_Call_memory_properties */ /** * Returns an entity if the address ptr points to a constant one. @@ -511,7 +587,7 @@ static ir_entity *find_constant_entity(ir_node *ptr) } } - if (variability_constant == get_entity_variability(ent)) + if (get_entity_linkage(ent) == IR_LINKAGE_CONSTANT) return ent; /* try next */ @@ -534,7 +610,7 @@ static ir_entity *find_constant_entity(ir_node *ptr) ir_node *l = get_Sub_left(ptr); ir_node *r = get_Sub_right(ptr); - if (get_irn_mode(l) == get_irn_mode(ptr) && is_Const(r)) + if (get_irn_mode(l) == get_irn_mode(ptr) && is_Const(r)) ptr = l; else return NULL; @@ -549,7 +625,8 @@ static ir_entity *find_constant_entity(ir_node *ptr) /** * Return the Selection index of a Sel node from dimension n */ -static long get_Sel_array_index_long(ir_node *n, int dim) { +static long get_Sel_array_index_long(ir_node *n, int dim) +{ ir_node *index = get_Sel_index(n, dim); assert(is_Const(index)); return get_tarval_long(get_Const_tarval(index)); @@ -563,7 +640,8 @@ static long get_Sel_array_index_long(ir_node *n, int dim) { * @param depth current depth in steps upward from the root * of the address */ -static compound_graph_path *rec_get_accessed_path(ir_node *ptr, int depth) { +static compound_graph_path *rec_get_accessed_path(ir_node *ptr, int depth) +{ compound_graph_path *res = NULL; ir_entity *root, *field, *ent; int path_len, pos, idx; @@ -710,7 +788,8 @@ ptr_arith: * Returns an access path or NULL. The access path is only * valid, if the graph is in phase_high and _no_ address computation is used. */ -static compound_graph_path *get_accessed_path(ir_node *ptr) { +static compound_graph_path *get_accessed_path(ir_node *ptr) +{ compound_graph_path *gr = rec_get_accessed_path(ptr, 0); return gr; } /* get_accessed_path */ @@ -721,7 +800,8 @@ typedef struct path_entry { long index; } path_entry; -static ir_node *rec_find_compound_ent_value(ir_node *ptr, path_entry *next) { +static ir_node *rec_find_compound_ent_value(ir_node *ptr, path_entry *next) +{ path_entry entry, *p; ir_entity *ent, *field; ir_initializer_t *initializer; @@ -895,7 +975,8 @@ ptr_arith: return NULL; } /* rec_find_compound_ent_value */ -static ir_node *find_compound_ent_value(ir_node *ptr) { +static ir_node *find_compound_ent_value(ir_node *ptr) +{ return rec_find_compound_ent_value(ptr, NULL); } /* find_compound_ent_value */ @@ -904,7 +985,8 @@ static ir_node *find_compound_ent_value(ir_node *ptr) { * * @param op the Load memop */ -static void mark_replace_load(memop_t *op, ir_node *def) { +static void mark_replace_load(memop_t *op, ir_node *def) +{ op->replace = def; op->flags |= FLAG_KILLED_NODE; env.changed = 1; @@ -915,7 +997,8 @@ static void mark_replace_load(memop_t *op, ir_node *def) { * * @param op the Store memop */ -static void mark_remove_store(memop_t *op) { +static void mark_remove_store(memop_t *op) +{ op->flags |= FLAG_KILLED_NODE; env.changed = 1; } /* mark_remove_store */ @@ -925,7 +1008,8 @@ static void mark_remove_store(memop_t *op) { * * @param m the memop */ -static void update_Load_memop(memop_t *m) { +static void update_Load_memop(memop_t *m) +{ int i; ir_node *load = m->node; ir_node *ptr; @@ -969,9 +1053,7 @@ static void update_Load_memop(memop_t *m) { /* check if we can determine the entity that will be loaded */ ent = find_constant_entity(ptr); - if (ent != NULL && - allocation_static == get_entity_allocation(ent) && - visibility_external_allocated != get_entity_visibility(ent)) { + if (ent != NULL && get_entity_visibility(ent) != ir_visibility_external) { /* a static allocation that is not external: there should be NO exception * when loading even if we cannot replace the load itself. */ ir_node *value = NULL; @@ -984,22 +1066,16 @@ static void update_Load_memop(memop_t *m) { env.changed = 1; } if (m->projs[pn_Load_X_regular]) { - exchange(m->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load))); + exchange(m->projs[pn_Load_X_regular], new_r_Jmp(get_nodes_block(load))); m->projs[pn_Load_X_regular] = NULL; env.changed = 1; } - if (variability_constant == get_entity_variability(ent)) { - if (is_atomic_entity(ent)) { - /* Might not be atomic after lowering of Sels. In this case we - * could also load, but it's more complicated. */ - /* more simpler case: we load the content of a constant value: - * replace it by the constant itself */ - value = get_atomic_ent_value(ent); - } else if (ent->has_initializer) { + if (get_entity_linkage(ent) & IR_LINKAGE_CONSTANT) { + if (ent->initializer) { /* new style initializer */ value = find_compound_ent_value(ptr); - } else { + } else if (entity_has_compound_ent_values(ent)) { /* old style initializer */ compound_graph_path *path = get_accessed_path(ptr); @@ -1029,16 +1105,17 @@ static void update_Load_memop(memop_t *m) { ++env.n_mem_ops; } else { /* no user, KILL it */ - m->flags |= FLAG_KILLED_NODE; + mark_replace_load(m, NULL); } -} +} /* update_Load_memop */ /** * Update a memop for a Store. * * @param m the memop */ -static void update_Store_memop(memop_t *m) { +static void update_Store_memop(memop_t *m) +{ int i; ir_node *store = m->node; ir_node *adr = get_Store_ptr(store); @@ -1078,14 +1155,15 @@ static void update_Store_memop(memop_t *m) { } m->value.value = get_Store_value(store); m->value.mode = get_irn_mode(m->value.value); -} +} /* update_Store_memop */ /** * Update a memop for a Call. * * @param m the memop */ -static void update_Call_memop(memop_t *m) { +static void update_Call_memop(memop_t *m) +{ ir_node *call = m->node; unsigned prop = get_Call_memory_properties(call); int i; @@ -1095,7 +1173,7 @@ static void update_Call_memop(memop_t *m) { can kick it from the list. */ } else if (prop & mtp_property_pure) { /* pure calls READ memory */ - m->flags = FLAG_KILL_STORES; + m->flags = 0; } else m->flags = FLAG_KILL_ALL; @@ -1110,19 +1188,20 @@ static void update_Call_memop(memop_t *m) { case pn_Call_X_except: m->flags |= FLAG_EXCEPTION; break; - case pn_Call_M_regular: + case pn_Call_M: m->mem = proj; break; } } -} +} /* update_Call_memop */ /** * Update a memop for a Div/Mod/Quot/DivMod. * * @param m the memop */ -static void update_DivOp_memop(memop_t *m) { +static void update_DivOp_memop(memop_t *m) +{ ir_node *div = m->node; int i; @@ -1137,27 +1216,29 @@ static void update_DivOp_memop(memop_t *m) { case pn_Generic_X_except: m->flags |= FLAG_EXCEPTION; break; - case pn_Generic_M_regular: + case pn_Generic_M: m->mem = proj; break; } } -} +} /* update_DivOp_memop */ /** * Update a memop for a Phi. * * @param m the memop */ -static void update_Phi_memop(memop_t *m) { +static void update_Phi_memop(memop_t *m) +{ /* the Phi is it's own mem */ m->mem = m->node; -} +} /* update_Phi_memop */ /** * Memory walker: collect all memory ops and build topological lists. */ -static void collect_memops(ir_node *irn, void *ctx) { +static void collect_memops(ir_node *irn, void *ctx) +{ memop_t *op; ir_node *block; block_t *entry; @@ -1226,14 +1307,19 @@ static void collect_memops(ir_node *irn, void *ctx) { entry->memop_backward = op; } } -} +} /* collect_memops */ /** * Find an address in the current set. * * @param value the value to be searched for + * + * @return a memop for the value or NULL if the value does + * not exists in the set or cannot be converted into + * the requested mode */ -static memop_t *find_address(const value_t *value) { +static memop_t *find_address(const value_t *value) +{ if (rbitset_is_set(env.curr_set, value->id)) { memop_t *res = env.curr_id_2_memop[value->id]; @@ -1246,121 +1332,72 @@ static memop_t *find_address(const value_t *value) { return res; } return NULL; -} +} /* find_address */ /** * Find an address in the avail_out set. * * @param bl the block - * @param value the value to be searched for */ -static memop_t *find_address_avail(const block_t *bl, const value_t *value) { - if (rbitset_is_set(bl->avail_out, value->id)) { - memop_t *res = bl->id_2_memop_avail[value->id]; +static memop_t *find_address_avail(const block_t *bl, unsigned id, const ir_mode *mode) +{ + if (rbitset_is_set(bl->avail_out, id)) { + memop_t *res = bl->id_2_memop_avail[id]; - if (res->value.mode == value->mode) + if (res->value.mode == mode) return res; /* allow hidden casts */ if (get_mode_arithmetic(res->value.mode) == irma_twos_complement && - get_mode_arithmetic(value->mode) == irma_twos_complement && - get_mode_size_bits(res->value.mode) == get_mode_size_bits(value->mode)) + get_mode_arithmetic(mode) == irma_twos_complement && + get_mode_size_bits(res->value.mode) == get_mode_size_bits(mode)) return res; } return NULL; -} - -/** - * Kill all Loads from the current set. - */ -static void kill_all_loads(void) { - unsigned pos = 0; - unsigned end = env.rbs_size - 1; - - for (pos = rbitset_next(env.curr_set, pos, 1); pos != end; pos = rbitset_next(env.curr_set, pos + 1, 1)) { - memop_t *op = env.curr_id_2_memop[pos]; - - if (! is_Store(op->node)) - rbitset_clear(env.curr_set, pos); - } -} - -/** - * Kill all Stores from the current set. - */ -static void kill_all_stores(void) { - unsigned pos = 0; - unsigned end = env.rbs_size - 1; - - for (pos = rbitset_next(env.curr_set, pos, 1); pos != end; pos = rbitset_next(env.curr_set, pos + 1, 1)) { - memop_t *op = env.curr_id_2_memop[pos]; - - if (is_Store(op->node)) - rbitset_clear(env.curr_set, pos); - } -} +} /* find_address_avail */ /** * Kill all addresses from the current set. */ -static void kill_all(void) { +static void kill_all(void) +{ rbitset_clear_all(env.curr_set, env.rbs_size); /* set sentinel */ rbitset_set(env.curr_set, env.rbs_size - 1); -} - - -/** - * Kill Stores that are not alias free due to a Load value from the current set. - * - * @param value the Load value - */ -static void kill_stores(const value_t *value) { - unsigned pos = 0; - unsigned end = env.rbs_size - 1; - - for (pos = rbitset_next(env.curr_set, pos, 1); pos != end; pos = rbitset_next(env.curr_set, pos + 1, 1)) { - memop_t *op = env.curr_id_2_memop[pos]; - - if (is_Store(op->node)) { - if (ir_no_alias != get_alias_relation(current_ir_graph, value->address, value->mode, - op->value.address, op->value.mode)) { - rbitset_clear(env.curr_set, pos); - env.curr_id_2_memop[pos] = NULL; - } - } - } -} +} /* kill_all */ /** * Kill memops that are not alias free due to a Store value from the current set. * * @param value the Store value */ -static void kill_memops(const value_t *value) { - unsigned pos = 0; +static void kill_memops(const value_t *value) +{ unsigned end = env.rbs_size - 1; + unsigned pos; - for (pos = rbitset_next(env.curr_set, pos, 1); pos != end; pos = rbitset_next(env.curr_set, pos + 1, 1)) { + for (pos = rbitset_next(env.curr_set, 0, 1); pos < end; pos = rbitset_next(env.curr_set, pos + 1, 1)) { memop_t *op = env.curr_id_2_memop[pos]; if (ir_no_alias != get_alias_relation(current_ir_graph, value->address, value->mode, op->value.address, op->value.mode)) { rbitset_clear(env.curr_set, pos); env.curr_id_2_memop[pos] = NULL; + DB((dbg, LEVEL_2, "KILLING %+F because of possible alias address %+F\n", op->node, value->address)); } } -} +} /* kill_memops */ /** * Add the value of a memop to the current set. * * @param op the memory op */ -static void add_memop(memop_t *op) { +static void add_memop(memop_t *op) +{ rbitset_set(env.curr_set, op->value.id); env.curr_id_2_memop[op->value.id] = op; -} +} /* add_memop */ /** * Add the value of a memop to the avail_out set. @@ -1368,50 +1405,71 @@ static void add_memop(memop_t *op) { * @param bl the block * @param op the memory op */ -static void add_memop_avail(block_t *bl, memop_t *op) { +static void add_memop_avail(block_t *bl, memop_t *op) +{ rbitset_set(bl->avail_out, op->value.id); bl->id_2_memop_avail[op->value.id] = op; -} - -/** - * Update a value of a memop to the avail_out set. - * - * @param bl the block - * @param op the memory op - */ -static void update_memop_avail(block_t *bl, memop_t *op) { - if (rbitset_is_set(bl->avail_out, op->value.id)) - bl->id_2_memop_avail[op->value.id] = op; -} +} /* add_memop_avail */ /** * Check, if we can convert a value of one mode to another mode * without changing the representation of bits. + * + * @param from the original mode + * @param to the destination mode */ -static int can_convert_to(const ir_mode *from, const ir_mode *to) { +static int can_convert_to(const ir_mode *from, const ir_mode *to) +{ if (get_mode_arithmetic(from) == irma_twos_complement && get_mode_arithmetic(to) == irma_twos_complement && get_mode_size_bits(from) == get_mode_size_bits(to)) return 1; return 0; -} +} /* can_convert_to */ /** - * Add a Conv if needed. + * Add a Conv to the requested mode if needed. + * + * @param irn the IR-node to convert + * @param mode the destination mode + * + * @return the possible converted node or NULL + * if the conversion is not possible */ -static ir_node *conv_to(ir_node *irn, ir_mode *mode) { +static ir_node *conv_to(ir_node *irn, ir_mode *mode) +{ ir_mode *other = get_irn_mode(irn); if (other != mode) { /* different modes: check if conversion is possible without changing the bits */ if (can_convert_to(other, mode)) { ir_node *block = get_nodes_block(irn); - return new_r_Conv(current_ir_graph, block, irn, mode); + return new_r_Conv(block, irn, mode); } /* otherwise not possible ... yet */ return NULL; } return irn; -} +} /* conv_to */ + +/** + * Update the address of an value if this address was a load result + * and the load is killed now. + * + * @param value the value whose address is updated + */ +static void update_address(value_t *value) +{ + if (is_Proj(value->address)) { + ir_node *load = get_Proj_pred(value->address); + + if (is_Load(load)) { + const memop_t *op = get_irn_memop(load); + + if (op->flags & FLAG_KILLED_NODE) + value->address = op->replace; + } + } +} /* update_address */ /** * Do forward dataflow analysis on the given block and calculate the @@ -1419,7 +1477,8 @@ static ir_node *conv_to(ir_node *irn, ir_mode *mode) { * * @param bl the block */ -static void calc_gen_kill_avail(block_t *bl) { +static void calc_gen_kill_avail(block_t *bl) +{ memop_t *op; ir_node *def; @@ -1434,7 +1493,10 @@ static void calc_gen_kill_avail(block_t *bl) { case iro_Load: if (! (op->flags & (FLAG_KILLED_NODE|FLAG_IGNORE))) { /* do we have this already? */ - memop_t *other = find_address(&op->value); + memop_t *other; + + update_address(&op->value); + other = find_address(&op->value); if (other != NULL && other != op) { def = conv_to(other->value.value, op->value.mode); if (def != NULL) { @@ -1448,65 +1510,54 @@ static void calc_gen_kill_avail(block_t *bl) { } #endif mark_replace_load(op, def); - } else { - /* overwrite it */ - add_memop(op); + /* do NOT change the memop table */ + continue; } - } else { - /* add this value */ - kill_stores(&op->value); - add_memop(op); } + /* add this value */ + add_memop(op); } break; case iro_Store: - if (! (op->flags & (FLAG_KILLED_NODE|FLAG_IGNORE))) { + if (! (op->flags & FLAG_KILLED_NODE)) { /* do we have this store already */ - memop_t *other = find_address(&op->value); + memop_t *other; + + update_address(&op->value); + other = find_address(&op->value); if (other != NULL) { if (is_Store(other->node)) { - if (op != other && get_nodes_block(other->node) == get_nodes_block(op->node)) { + if (op != other && !(other->flags & FLAG_IGNORE) && + get_nodes_block(other->node) == get_nodes_block(op->node)) { /* * A WAW in the same block we can kick the first store. * This is a shortcut: we know that the second Store will be anticipated * then in an case. */ - DB((dbg, LEVEL_1, "WAW %+F <- %+F\n", op->node, other->node)); + DB((dbg, LEVEL_1, "WAW %+F <- %+F\n", other->node, op->node)); mark_remove_store(other); /* FIXME: a Load might be get freed due to this killed store */ } - } else if (other->value.value == op->value.value) { + } else if (other->value.value == op->value.value && !(op->flags & FLAG_IGNORE)) { /* WAR */ DB((dbg, LEVEL_1, "WAR %+F <- %+F\n", op->node, other->node)); mark_remove_store(op); - } else { - /* we overwrite the value that was loaded */ - add_memop(op); + /* do NOT change the memop table */ + continue; } - } else { - /* add this value */ - kill_memops(&op->value); - add_memop(op); } + /* KILL all possible aliases */ + kill_memops(&op->value); + /* add this value */ + add_memop(op); } break; default: - switch (op->flags & (FLAG_KILL_LOADS|FLAG_KILL_STORES)) { - case FLAG_KILL_LOADS|FLAG_KILL_STORES: + if (op->flags & FLAG_KILL_ALL) kill_all(); - break; - case FLAG_KILL_LOADS: - kill_all_loads(); - break; - case FLAG_KILL_STORES: - kill_all_stores(); - break; - case 0: - break; - } } } -} +} /* calc_gen_kill_avail */ #define BYTE_SIZE(x) (((x) + 7) >> 3) @@ -1516,14 +1567,15 @@ static void calc_gen_kill_avail(block_t *bl) { * * @param block the block */ -static void forward_avail(block_t *bl) { +static void forward_avail(block_t *bl) +{ /* fill the data from the current block */ env.curr_id_2_memop = bl->id_2_memop_avail; env.curr_set = bl->avail_out; calc_gen_kill_avail(bl); dump_curr(bl, "Avail_out"); -} +} /* forward_avail */ /** * Do backward dataflow analysis on a given block to calculate the antic set @@ -1533,18 +1585,72 @@ static void forward_avail(block_t *bl) { * * @return non-zero if the set has changed since last iteration */ -static int backward_antic(block_t *bl) { +static int backward_antic(block_t *bl) +{ memop_t *op; - int n = get_Block_n_cfg_outs(bl->block); + ir_node *block = bl->block; + int n = get_Block_n_cfg_outs(block); + + if (n == 1) { + ir_node *succ = get_Block_cfg_out(block, 0); + block_t *succ_bl = get_block_entry(succ); + int pred_pos = get_Block_cfgpred_pos(succ, block); + unsigned end = env.rbs_size - 1; + unsigned pos; + + kill_all(); - if (n >= 1) { - ir_node *succ = get_Block_cfg_out(bl->block, 0); + if (bl->trans_results == NULL) { + /* allocate the translate cache */ + bl->trans_results = OALLOCNZ(&env.obst, memop_t*, env.curr_adr_id); + } + + /* check for partly redundant values */ + for (pos = rbitset_next(succ_bl->anticL_in, 0, 1); + pos < end; + pos = rbitset_next(succ_bl->anticL_in, pos + 1, 1)) { + /* + * do Phi-translation here: Note that at this point the nodes are + * not changed, so we can safely cache the results. + * However: Loads of Load results ARE bad, because we have no way + to translate them yet ... + */ + memop_t *op = bl->trans_results[pos]; + if (op == NULL) { + /* not yet translated */ + ir_node *adr, *trans_adr; + + op = succ_bl->id_2_memop_antic[pos]; + adr = op->value.address; + + trans_adr = phi_translate(adr, succ, pred_pos); + if (trans_adr != adr) { + /* create a new entry for the translated one */ + memop_t *new_op; + + new_op = alloc_memop(NULL); + new_op->value.address = trans_adr; + new_op->value.id = register_address(trans_adr); + new_op->value.mode = op->value.mode; + new_op->node = op->node; /* we need the node to decide if Load/Store */ + new_op->flags = op->flags; + + bl->trans_results[pos] = new_op; + op = new_op; + } + } + env.curr_id_2_memop[op->value.id] = op; + rbitset_set(env.curr_set, op->value.id); + } + } else if (n > 1) { + ir_node *succ = get_Block_cfg_out(block, 0); block_t *succ_bl = get_block_entry(succ); int i; - rbitset_cpy(env.curr_set, succ_bl->anticL_in, env.rbs_size); + rbitset_copy(env.curr_set, succ_bl->anticL_in, env.rbs_size); memcpy(env.curr_id_2_memop, succ_bl->id_2_memop_antic, env.rbs_size * sizeof(env.curr_id_2_memop[0])); + /* Hmm: probably we want kill merges of Loads ans Stores here */ for (i = n - 1; i > 0; --i) { ir_node *succ = get_Block_cfg_out(bl->block, i); block_t *succ_bl = get_block_entry(succ); @@ -1556,18 +1662,6 @@ static int backward_antic(block_t *bl) { kill_all(); } -#if 0 - /* cleanup: kill those Loads which address is not available */ - for (pos = rbitset_next(env.curr_set, pos, 1); pos != end; pos = rbitset_next(env.curr_set, pos + 1, 1)) { - memop_t *op = succ_bl->id_2_memop[pos]; - ir_node *ptr = get_Load_ptr(op->node); - ir_node *ptr_bl = get_nodes_block(ptr); - - if (!block_dominates(ptr_bl, bl->block)) - rbitset_clear(env.curr_set, pos); - } -#endif - dump_curr(bl, "AnticL_out"); for (op = bl->memop_backward; op != NULL; op = op->prev) { @@ -1585,45 +1679,35 @@ static int backward_antic(block_t *bl) { } break; case iro_Store: - if (! (op->flags & (FLAG_KILLED_NODE|FLAG_IGNORE))) { + if (! (op->flags & FLAG_KILLED_NODE)) { /* a Store: check which memops must be killed */ kill_memops(&op->value); } break; default: - switch (op->flags & (FLAG_KILL_LOADS|FLAG_KILL_STORES)) { - case FLAG_KILL_LOADS|FLAG_KILL_STORES: + if (op->flags & FLAG_KILL_ALL) kill_all(); - break; - case FLAG_KILL_LOADS: - kill_all_loads(); - break; - case FLAG_KILL_STORES: - /*kill_all_stores();*/ - break; - case 0: - break; - } } } memcpy(bl->id_2_memop_antic, env.curr_id_2_memop, env.rbs_size * sizeof(env.curr_id_2_memop[0])); - if (! rbitset_equal(bl->anticL_in, env.curr_set, env.rbs_size)) { + if (! rbitsets_equal(bl->anticL_in, env.curr_set, env.rbs_size)) { /* changed */ - rbitset_cpy(bl->anticL_in, env.curr_set, env.rbs_size); + rbitset_copy(bl->anticL_in, env.curr_set, env.rbs_size); dump_curr(bl, "AnticL_in*"); return 1; } dump_curr(bl, "AnticL_in"); return 0; -} +} /* backward_antic */ /** * Replace a Load memop by a already known value. * * @param op the Load memop */ -static void replace_load(memop_t *op) { +static void replace_load(memop_t *op) +{ ir_node *load = op->node; ir_node *def = skip_Id(op->replace); ir_node *proj; @@ -1651,7 +1735,7 @@ static void replace_load(memop_t *op) { /* a hidden cast */ dbg_info *db = get_irn_dbg_info(load); ir_node *block = get_nodes_block(proj); - def = new_rd_Conv(db, current_ir_graph, block, def, mode); + def = new_rd_Conv(db, block, def, mode); } exchange(proj, def); } @@ -1661,16 +1745,17 @@ static void replace_load(memop_t *op) { } proj = op->projs[pn_Load_X_regular]; if (proj != NULL) { - exchange(proj, new_r_Jmp(current_ir_graph, get_nodes_block(load))); + exchange(proj, new_r_Jmp(get_nodes_block(load))); } -} +} /* replace_load */ /** * Remove a Store memop. * * @param op the Store memop */ -static void remove_store(memop_t *op) { +static void remove_store(memop_t *op) +{ ir_node *store = op->node; ir_node *proj; @@ -1686,9 +1771,9 @@ static void remove_store(memop_t *op) { } proj = op->projs[pn_Store_X_regular]; if (proj != NULL) { - exchange(proj, new_r_Jmp(current_ir_graph, get_nodes_block(store))); + exchange(proj, new_r_Jmp(get_nodes_block(store))); } -} +} /* remove_store */ /** @@ -1696,7 +1781,8 @@ static void remove_store(memop_t *op) { * * @param bl the block */ -static void do_replacements(block_t *bl) { +static void do_replacements(block_t *bl) +{ memop_t *op; for (op = bl->memop_forward; op != NULL; op = op->next) { @@ -1711,12 +1797,13 @@ static void do_replacements(block_t *bl) { } } } -} +} /* do_replacements */ /** * Calculate the Avail_out sets for all basic blocks. */ -static void calcAvail(void) { +static void calcAvail(void) +{ memop_t **tmp_memop = env.curr_id_2_memop; unsigned *tmp_set = env.curr_set; block_t *bl; @@ -1732,12 +1819,13 @@ static void calcAvail(void) { /* restore the current sets */ env.curr_id_2_memop = tmp_memop; env.curr_set = tmp_set; -} +} /* calcAvail */ /** * Calculate the Antic_in sets for all basic blocks. */ -static void calcAntic(void) { +static void calcAntic(void) +{ int i, need_iter; /* calculate antic_out */ @@ -1757,29 +1845,15 @@ static void calcAntic(void) { ++i; } while (need_iter); DB((dbg, LEVEL_2, "Get anticipated Load set after %d iterations\n", i)); -} - -/** - * Return the node representing the last memory in a block. - * - * @param bl the block - */ -static ir_node *find_first_memory(block_t *bl) { - for (;;) { - if (bl->memop_forward != NULL) { - return bl->memop_forward->node; - } - /* if there is NO memory in this block, go to the post dominator */ - bl = get_block_entry(get_Block_ipostdom(bl->block)); - } -} +} /* calcAntic */ /** * Return the node representing the last memory in a block. * * @param bl the block */ -static ir_node *find_last_memory(block_t *bl) { +static ir_node *find_last_memory(block_t *bl) +{ for (;;) { if (bl->memop_backward != NULL) { return bl->memop_backward->mem; @@ -1787,7 +1861,7 @@ static ir_node *find_last_memory(block_t *bl) { /* if there is NO memory in this block, go to the dominator */ bl = get_block_entry(get_Block_idom(bl->block)); } -} +} /* find_last_memory */ /** * Reroute all memory users of old memory @@ -1796,7 +1870,8 @@ static ir_node *find_last_memory(block_t *bl) { * @param omem the old memory IR-node * @param nmem the new memory IR-node */ -static void reroute_all_mem_users(ir_node *omem, ir_node *nmem) { +static void reroute_all_mem_users(ir_node *omem, ir_node *nmem) +{ int i; for (i = get_irn_n_outs(omem) - 1; i >= 0; --i) { @@ -1808,7 +1883,7 @@ static void reroute_all_mem_users(ir_node *omem, ir_node *nmem) { /* all edges previously point to omem now point to nmem */ nmem->out = omem->out; -} +} /* reroute_all_mem_users */ /** * Reroute memory users of old memory that are dominated by a given block @@ -1818,7 +1893,8 @@ static void reroute_all_mem_users(ir_node *omem, ir_node *nmem) { * @param nmem the new memory IR-node * @param pass_bl the block the memory must pass */ -static void reroute_mem_through(ir_node *omem, ir_node *nmem, ir_node *pass_bl) { +static void reroute_mem_through(ir_node *omem, ir_node *nmem, ir_node *pass_bl) +{ int i, j, n = get_irn_n_outs(omem); ir_def_use_edge *edges = NEW_ARR_D(ir_def_use_edge, &env.obst, n + 1); @@ -1847,43 +1923,41 @@ static void reroute_mem_through(ir_node *omem, ir_node *nmem, ir_node *pass_bl) /* first entry is used for the length */ edges[0].pos = j; nmem->out = edges; -} +} /* reroute_mem_through */ /** * insert Loads, making partly redundant Loads fully redundant */ -static int insert_Load(block_t *bl) { +static int insert_Load(block_t *bl) +{ ir_node *block = bl->block; int i, n = get_Block_n_cfgpreds(block); - unsigned pos = 0; unsigned end = env.rbs_size - 1; - int res = 0; - ir_node *pred; - block_t *pred_bl; + unsigned pos; DB((dbg, LEVEL_3, "processing %+F\n", block)); if (n == 0) { /* might still happen for an unreachable block (end for instance) */ - return res; + return 0; } - pred = get_Block_cfgpred_block(bl->block, 0); - pred_bl = get_block_entry(pred); - - rbitset_cpy(env.curr_set, pred_bl->avail_out, env.rbs_size); - if (n > 1) { - int i, pos; ir_node **ins; + int pos; NEW_ARR_A(ir_node *, ins, n); - /* more than one predecessors, calculate the join for all avail_outs */ - for (i = n - 1; i > 0; --i) { - ir_node *pred = skip_Proj(get_Block_cfgpred(block, i)); - block_t *pred_bl = get_block_entry(get_nodes_block(pred)); + rbitset_set_all(env.curr_set, env.rbs_size); + /* More than one predecessors, calculate the join for all avail_outs ignoring unevaluated + Blocks. These put in Top anyway. */ + for (i = n - 1; i >= 0; --i) { + ir_node *pred = skip_Proj(get_Block_cfgpred(block, i)); + ir_node *blk = get_nodes_block(pred); + block_t *pred_bl; + + pred_bl = get_block_entry(blk); rbitset_and(env.curr_set, pred_bl->avail_out, env.rbs_size); if (is_Load(pred) || is_Store(pred)) { @@ -1906,36 +1980,39 @@ static int insert_Load(block_t *bl) { ir_node *pred = get_Block_cfgpred_block(bl->block, 0); block_t *pred_bl = get_block_entry(pred); int need_phi = 0; - ir_mode *mode; - memop_t *first; + memop_t *first = NULL; + ir_mode *mode = NULL; - first = pred_bl->id_2_memop_avail[pos]; - ins[0] = first->value.value; - mode = get_irn_mode(ins[0]); + for (i = 0; i < n; ++i) { + memop_t *mop; - for (i = 1; i < n; ++i) { pred = get_Block_cfgpred_block(bl->block, i); pred_bl = get_block_entry(pred); - ins[i] = conv_to(pred_bl->id_2_memop_avail[pos]->value.value, mode); - if (ins[i] != ins[0]) { - need_phi = 1; - if (ins[i] == NULL) { - /* conversion failed */ - need_phi = 2; - break; + mop = pred_bl->id_2_memop_avail[pos]; + if (first == NULL) { + first = mop; + ins[0] = first->value.value; + mode = get_irn_mode(ins[0]); + + /* no Phi needed so far */ + env.curr_id_2_memop[pos] = first; + } else { + ins[i] = conv_to(mop->value.value, mode); + if (ins[i] != ins[0]) { + if (ins[i] == NULL) { + /* conversion failed */ + env.curr_id_2_memop[pos] = NULL; + rbitset_clear(env.curr_set, pos); + break; + } + need_phi = 1; } } - } - switch (need_phi) { - case 0: - /* no Phi needed */ - env.curr_id_2_memop[pos] = first; - break; - case 1: { + if (need_phi) { /* build a Phi */ - ir_node *phi = new_r_Phi(current_ir_graph, bl->block, n, ins, mode); + ir_node *phi = new_r_Phi(bl->block, n, ins, mode); memop_t *phiop = alloc_memop(phi); phiop->value = first->value; @@ -1946,169 +2023,165 @@ static int insert_Load(block_t *bl) { env.curr_id_2_memop[pos] = phiop; DB((dbg, LEVEL_3, "Created new %+F on merging value for address %+F\n", phi, first->value.address)); - break; - } - default: - /* not possible because of different modes, delete the entry */ - rbitset_clear(env.curr_set, pos); - break; } } } } else { /* only one predecessor, simply copy the map */ - memcpy(env.curr_id_2_memop, pred_bl->id_2_memop_avail, env.rbs_size * sizeof(bl->id_2_memop_avail[0])); - } - + ir_node *pred = get_Block_cfgpred_block(bl->block, 0); + block_t *pred_bl = get_block_entry(pred); - /* recalculate avail by gen and kill */ - calc_gen_kill_avail(bl); + rbitset_copy(env.curr_set, pred_bl->avail_out, env.rbs_size); - if (!rbitset_equal(bl->avail_out, env.curr_set, env.rbs_size)) { - /* the avail set has changed */ - rbitset_cpy(bl->avail_out, env.curr_set, env.rbs_size); - memcpy(bl->id_2_memop_avail, env.curr_id_2_memop, env.rbs_size * sizeof(env.curr_id_2_memop[0])); - dump_curr(bl, "Avail_out*"); - res = 1; + memcpy(env.curr_id_2_memop, pred_bl->id_2_memop_avail, env.rbs_size * sizeof(bl->id_2_memop_avail[0])); } - if (n <= 1) - return res; - - /* check for partly redundant values */ - for (pos = rbitset_next(bl->anticL_in, pos, 1); pos != end; pos = rbitset_next(bl->anticL_in, pos + 1, 1)) { - memop_t *op = bl->id_2_memop_antic[pos]; - int have_some, all_same; - ir_node *first; - - assert(is_Load(op->node)); - - if (op->flags & FLAG_KILLED_NODE) - continue; - DB((dbg, LEVEL_3, "anticipated %+F\n", op->node)); - - have_some = 0; - all_same = 1; - first = 0; - for (i = n - 1; i >= 0; --i) { - ir_node *pred = get_Block_cfgpred_block(block, i); - block_t *pred_bl = get_block_entry(pred); - memop_t *e = find_address_avail(pred_bl, &op->value); - ir_mode *mode = op->value.mode; - - if (e == NULL) { - ir_node *block = get_nodes_block(op->value.address); - if (! block_dominates(block, pred)) { - /* cannot place a copy here */ - have_some = 0; - break; - } - DB((dbg, LEVEL_3, "%+F is not available in predecessor %+F\n", op->node, pred)); - pred_bl->avail = NULL; - all_same = 0; - } else { - if (e->value.mode != mode && !can_convert_to(e->value.mode, mode)) { - /* cannot create a Phi due to different modes */ - have_some = 0; - break; - } - pred_bl->avail = e; - have_some = 1; - DB((dbg, LEVEL_3, "%+F is available for %+F in predecessor %+F\n", e->node, op->node, pred)); - if (first == NULL) - first = e->node; - else if (first != e->node) - all_same = 0; + if (n > 1) { + /* check for partly redundant values */ + for (pos = rbitset_next(bl->anticL_in, 0, 1); + pos < end; + pos = rbitset_next(bl->anticL_in, pos + 1, 1)) { + memop_t *op = bl->id_2_memop_antic[pos]; + int have_some, all_same; + ir_node *first; + + if (rbitset_is_set(env.curr_set, pos)) { + /* already avail */ + continue; } - } - if (have_some && !all_same) { - ir_mode *mode = op->value.mode; - ir_node **in, *phi; - NEW_ARR_A(ir_node *, in, n); + assert(is_Load(op->node)); + + DB((dbg, LEVEL_3, "anticipated %+F\n", op->node)); + have_some = 0; + all_same = 1; + first = 0; for (i = n - 1; i >= 0; --i) { ir_node *pred = get_Block_cfgpred_block(block, i); block_t *pred_bl = get_block_entry(pred); - - if (pred_bl->avail == NULL) { - /* create a new Load here and make to make it fully redundant */ - dbg_info *db = get_irn_dbg_info(op->node); - ir_node *last_mem = find_last_memory(pred_bl); - ir_node *load, *def; - memop_t *new_op; - - assert(last_mem != NULL); - load = new_rd_Load(db, current_ir_graph, pred, last_mem, op->value.address, mode, cons_none); - def = new_r_Proj(current_ir_graph, pred, load, mode, pn_Load_res); - DB((dbg, LEVEL_1, "Created new %+F in %+F for party redundant %+F\n", load, pred, op->node)); - - new_op = alloc_memop(load); - new_op->mem = new_r_Proj(current_ir_graph, pred, load, mode_M, pn_Load_M); - new_op->value.address = op->value.address; - new_op->value.id = op->value.id; - new_op->value.mode = mode; - new_op->value.value = def; - - new_op->projs[pn_Load_M] = new_op->mem; - new_op->projs[pn_Load_res] = def; - - new_op->prev = pred_bl->memop_backward; - pred_bl->memop_backward = new_op; - - if (pred_bl->memop_forward == NULL) - pred_bl->memop_forward = new_op; - - if (get_nodes_block(last_mem) == pred) { - /* We have add a new last memory op in pred block. - If pred had already a last mem, reroute all memory - users. */ - reroute_all_mem_users(last_mem, new_op->mem); - } else { - /* reroute only those memory going through the pre block */ - reroute_mem_through(last_mem, new_op->mem, pred); + ir_mode *mode = op->value.mode; + memop_t *e; + ir_node *adr; + + adr = phi_translate(op->value.address, block, i); + DB((dbg, LEVEL_3, ".. using address %+F in pred %d\n", adr, i)); + e = find_address_avail(pred_bl, register_address(adr), mode); + if (e == NULL) { + ir_node *ef_block = get_nodes_block(adr); + if (! block_dominates(ef_block, pred)) { + /* cannot place a copy here */ + have_some = 0; + DB((dbg, LEVEL_3, "%+F cannot be moved into predecessor %+F\n", op->node, pred)); + break; } - - /* we added this load at the end, so it will be avail anyway */ - add_memop_avail(pred_bl, new_op); - pred_bl->avail = new_op; + DB((dbg, LEVEL_3, "%+F is not available in predecessor %+F\n", op->node, pred)); + pred_bl->avail = NULL; + all_same = 0; + } else { + if (e->value.mode != mode && !can_convert_to(e->value.mode, mode)) { + /* cannot create a Phi due to different modes */ + have_some = 0; + break; + } + pred_bl->avail = e; + have_some = 1; + DB((dbg, LEVEL_3, "%+F is available for %+F in predecessor %+F\n", e->node, op->node, pred)); + if (first == NULL) + first = e->node; + else if (first != e->node) + all_same = 0; } - in[i] = conv_to(pred_bl->avail->value.value, mode); } - phi = new_r_Phi(current_ir_graph, block, n, in, mode); - DB((dbg, LEVEL_1, "Created new %+F in %+F for now redundant %+F\n", phi, block, op->node)); - - if (get_nodes_block(op->node) == block) { - /* The redundant node was in the current block: - In that case, DO NOT update avail_out. If it was NOT - avail although it is executed in this bLock, it is killed by a later - instruction. - */ - memop_t *phi_op = clone_memop_phi(op, phi); - - update_memop_avail(bl, phi_op); - - mark_replace_load(op, phi); - } else { - /* The redundant node is NOT in the current block and anticipated. */ - memop_t *phi_op = clone_memop_phi(op, phi); + if (have_some && !all_same) { + ir_mode *mode = op->value.mode; + ir_node **in, *phi; + memop_t *phi_op; + + NEW_ARR_A(ir_node *, in, n); + + for (i = n - 1; i >= 0; --i) { + ir_node *pred = get_Block_cfgpred_block(block, i); + block_t *pred_bl = get_block_entry(pred); + + if (pred_bl->avail == NULL) { + /* create a new Load here and make to make it fully redundant */ + dbg_info *db = get_irn_dbg_info(op->node); + ir_node *last_mem = find_last_memory(pred_bl); + ir_node *load, *def, *adr; + memop_t *new_op; + + assert(last_mem != NULL); + adr = phi_translate(op->value.address, block, i); + load = new_rd_Load(db, pred, last_mem, adr, mode, cons_none); + def = new_r_Proj(load, mode, pn_Load_res); + DB((dbg, LEVEL_1, "Created new %+F in %+F for party redundant %+F\n", load, pred, op->node)); + + new_op = alloc_memop(load); + new_op->mem = new_r_Proj(load, mode_M, pn_Load_M); + new_op->value.address = adr; + new_op->value.id = op->value.id; + new_op->value.mode = mode; + new_op->value.value = def; + + new_op->projs[pn_Load_M] = new_op->mem; + new_op->projs[pn_Load_res] = def; + + new_op->prev = pred_bl->memop_backward; + if (pred_bl->memop_backward != NULL) + pred_bl->memop_backward->next = new_op; + + pred_bl->memop_backward = new_op; + + if (pred_bl->memop_forward == NULL) + pred_bl->memop_forward = new_op; + + if (get_nodes_block(last_mem) == pred) { + /* We have add a new last memory op in pred block. + If pred had already a last mem, reroute all memory + users. */ + reroute_all_mem_users(last_mem, new_op->mem); + } else { + /* reroute only those memory going through the pre block */ + reroute_mem_through(last_mem, new_op->mem, pred); + } - add_memop_avail(bl, phi_op); + /* we added this load at the end, so it will be avail anyway */ + add_memop_avail(pred_bl, new_op); + pred_bl->avail = new_op; + } + in[i] = conv_to(pred_bl->avail->value.value, mode); + } + phi = new_r_Phi(block, n, in, mode); + DB((dbg, LEVEL_1, "Created new %+F in %+F for now redundant %+F\n", phi, block, op->node)); - /* propagate it downwards */ - res = 1; + phi_op = clone_memop_phi(op, phi); + add_memop(phi_op); } - /* clear it so we do not found it the next iteration */ - rbitset_clear(bl->anticL_in, pos); } } - return res; -} + + /* recalculate avail by gen and kill */ + calc_gen_kill_avail(bl); + + /* always update the map after gen/kill, as values might have been changed due to RAR/WAR/WAW */ + memcpy(bl->id_2_memop_avail, env.curr_id_2_memop, env.rbs_size * sizeof(env.curr_id_2_memop[0])); + + if (!rbitsets_equal(bl->avail_out, env.curr_set, env.rbs_size)) { + /* the avail set has changed */ + rbitset_copy(bl->avail_out, env.curr_set, env.rbs_size); + dump_curr(bl, "Avail_out*"); + return 1; + } + dump_curr(bl, "Avail_out"); + return 0; +} /* insert_Load */ /** * Insert Loads upwards. */ -static void insert_Loads_upwards(void) { +static void insert_Loads_upwards(void) +{ int i, need_iter; block_t *bl; @@ -2129,24 +2202,119 @@ static void insert_Loads_upwards(void) { } while (need_iter); DB((dbg, LEVEL_2, "Finished Load inserting after %d iterations\n", i)); -} +} /* insert_Loads_upwards */ + +/** + * Kill unreachable control flow. + * + * @param irg the graph to operate on + */ +static void kill_unreachable_blocks(ir_graph *irg) +{ + block_t *bl; + ir_node **ins; + int changed = 0; + + NEW_ARR_A(ir_node *, ins, env.max_cfg_preds); + + for (bl = env.forward; bl != NULL; bl = bl->forward_next) { + ir_node *block = bl->block; + int i, j, k, n; + + assert(get_Block_mark(block)); + + n = get_Block_n_cfgpreds(block); + + for (i = j = 0; i < n; ++i) { + ir_node *pred = get_Block_cfgpred(block, i); + ir_node *pred_bl; + + if (is_Bad(pred)) + continue; + + pred_bl = get_nodes_block(skip_Proj(pred)); + if (! get_Block_mark(pred_bl)) + continue; + + ins[j++] = pred; + } + if (j != n) { + ir_node *phi, *next; + + /* some unreachable blocks detected */ + changed = 1; + + DB((dbg, LEVEL_1, "Killing dead block predecessors on %+F\n", block)); + + set_irn_in(block, j, ins); + + /* shorten all Phi nodes */ + for (phi = get_Block_phis(block); phi != NULL; phi = next) { + next = get_Phi_next(phi); + + for (i = k = 0; i < n; ++i) { + ir_node *pred = get_Block_cfgpred_block(block, i); + + if (is_Bad(pred)) + continue; + + if (! get_Block_mark(pred)) + continue; + + ins[k++] = get_Phi_pred(phi, i); + } + if (k == 1) + exchange(phi, ins[0]); + else + set_irn_in(phi, k, ins); + } + } + + } + + if (changed) { + /* kick keep alives */ + ir_node *end = get_irg_end(irg); + int i, j, n = get_End_n_keepalives(end); + + NEW_ARR_A(ir_node *, ins, n); + + for (i = j = 0; i < n; ++i) { + ir_node *ka = get_End_keepalive(end, i); + ir_node *ka_bl; -int opt_ldst(ir_graph *irg) { + if (is_Bad(ka)) + continue; + if (is_Block(ka)) + ka_bl = ka; + else + ka_bl = get_nodes_block(skip_Proj(ka)); + if (get_Block_mark(ka_bl)) + ins[j++] = ka; + } + if (j != n) + set_End_keepalives(end, j, ins); + + free_irg_outs(irg); + + /* this transformation do NOT invalidate the dominance */ + } +} /* kill_unreachable_blocks */ + +int opt_ldst(ir_graph *irg) +{ block_t *bl; ir_graph *rem = current_ir_graph; current_ir_graph = irg; FIRM_DBG_REGISTER(dbg, "firm.opt.ldst"); -// firm_dbg_set_mask(dbg, -1); DB((dbg, LEVEL_1, "\nDoing Load/Store optimization on %+F\n", irg)); /* we need landing pads */ remove_critical_cf_edges(irg); - dump_ir_block_graph(irg, "-XXX"); - if (get_opt_alias_analysis()) { assure_irg_entity_usage_computed(irg); assure_irp_globals_entity_usage_computed(); @@ -2155,43 +2323,55 @@ int opt_ldst(ir_graph *irg) { obstack_init(&env.obst); ir_nodemap_init(&env.adr_map); - env.forward = NULL; - env.backward = NULL; - env.curr_adr_id = 0; - env.n_mem_ops = 0; - env.changed = 0; - env.start_bl = get_irg_start_block(irg); - env.end_bl = get_irg_end_block(irg); + env.forward = NULL; + env.backward = NULL; + env.curr_adr_id = 0; + env.n_mem_ops = 0; + env.max_cfg_preds = 0; + env.changed = 0; + env.start_bl = get_irg_start_block(irg); + env.end_bl = get_irg_end_block(irg); +#ifdef DEBUG_libfirm + env.id_2_address = NEW_ARR_F(ir_node *, 0); +#endif - assure_doms(irg); assure_irg_outs(irg); - ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK); + ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK | IR_RESOURCE_BLOCK_MARK); /* first step: allocate block entries. Note that some blocks might be - unreachable here. Using the normal walk ensures that ALL blocks are initialised. */ - irg_block_walk_graph(irg, NULL, prepare_blocks, NULL); + unreachable here. Using the normal walk ensures that ALL blocks are initialized. */ + irg_walk_graph(irg, prepare_blocks, link_phis, NULL); /* produce an inverse post-order list for the CFG: this links only reachable blocks */ irg_out_block_walk(get_irg_start_block(irg), NULL, inverse_post_order, NULL); - bl = get_block_entry(env.end_bl); - if (!(bl->flags & BLK_FLAG_REACHABLE)) { + if (! get_Block_mark(env.end_bl)) { /* * The end block is NOT reachable due to endless loops * or no_return calls. * Place the end block last. * env.backward points to the last block in the list for this purpose. */ - env.backward->forward_next = bl; + env.backward->forward_next = get_block_entry(env.end_bl); - bl->flags |= BLK_FLAG_REACHABLE; + set_Block_mark(env.end_bl, 1); } + /* KILL unreachable blocks: these disturb the data flow analysis */ + kill_unreachable_blocks(irg); + + assure_doms(irg); + /* second step: find and sort all memory ops */ walk_memory_irg(irg, collect_memops, NULL, NULL); +#ifdef DEBUG_libfirm + /* check that the backward map is correct */ + assert((unsigned)ARR_LEN(env.id_2_address) == env.curr_adr_id); +#endif + if (env.n_mem_ops == 0) { /* no memory ops */ goto end; @@ -2212,7 +2392,7 @@ int opt_ldst(ir_graph *irg) { /* create address sets: for now, only the existing addresses are allowed plus one needed for the sentinel */ - env.rbs_size = env.n_mem_ops + 1; + env.rbs_size = env.curr_adr_id + 1; /* create the current set */ env.curr_set = rbitset_obstack_alloc(&env.obst, env.rbs_size); @@ -2235,7 +2415,7 @@ int opt_ldst(ir_graph *irg) { memset(bl->id_2_memop_antic, 0, env.rbs_size * sizeof(bl->id_2_memop_antic[0])); } -// dump_block_list(&env); + (void) dump_block_list; calcAvail(); calcAntic(); @@ -2255,13 +2435,19 @@ int opt_ldst(ir_graph *irg) { } end: - ir_free_resources(irg, IR_RESOURCE_IRN_LINK); + ir_free_resources(irg, IR_RESOURCE_IRN_LINK | IR_RESOURCE_BLOCK_MARK); ir_nodemap_destroy(&env.adr_map); obstack_free(&env.obst, NULL); - dump_ir_block_graph(irg, "-YYY"); +#ifdef DEBUG_libfirm + DEL_ARR_F(env.id_2_address); +#endif current_ir_graph = rem; - return env.changed != 0; -} +} /* opt_ldst */ + +ir_graph_pass_t *opt_ldst_pass(const char *name) +{ + return def_graph_pass_ret(name ? name : "ldst_df", opt_ldst); +} /* opt_ldst_pass */