X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fopt%2Fopt_ldst.c;h=e330ff0092aaa7ff3ddb1e1f860df4f5a13008ce;hb=2e179fbd54125606903fc7e438d02a6d3aacc6eb;hp=b704abe2753fbbc0454dd42dead0a983dfb5bb5c;hpb=6730cf921d356d992d35526daf57f82af7ab0816;p=libfirm diff --git a/ir/opt/opt_ldst.c b/ir/opt/opt_ldst.c index b704abe27..e330ff009 100644 --- a/ir/opt/opt_ldst.c +++ b/ir/opt/opt_ldst.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved. + * Copyright (C) 1995-2011 University of Karlsruhe. All right reserved. * * This file is part of libFirm. * @@ -38,14 +38,14 @@ #include "irgopt.h" #include "iropt.h" #include "iroptimize.h" -#include "irnodemap.h" +#include "irnodehashmap.h" #include "raw_bitset.h" #include "debug.h" #include "error.h" #include "irpass.h" /* maximum number of output Proj's */ -#define MAX_PROJ (pn_Load_max > pn_Store_max ? pn_Load_max : pn_Store_max) +#define MAX_PROJ ((long)pn_Load_max > (long)pn_Store_max ? (long)pn_Load_max : (long)pn_Store_max) /** * Mapping an address to an dense ID. @@ -90,7 +90,7 @@ struct memop_t { memop_t *next; /**< links to the next memory op in the block in forward order. */ memop_t *prev; /**< links to the previous memory op in the block in forward order. */ unsigned flags; /**< memop flags */ - ir_node *projs[MAX_PROJ]; /**< Projs of this memory op */ + ir_node *projs[MAX_PROJ+1]; /**< Projs of this memory op */ }; /** @@ -115,8 +115,8 @@ struct block_t { * Metadata for this pass. */ typedef struct ldst_env_t { - struct obstack obst; /**< obstack for temporary data */ - ir_nodemap_t adr_map; /**< Map addresses to */ + struct obstack obst; /**< obstack for temporary data */ + ir_nodehashmap_t adr_map; /**< Map addresses to */ block_t *forward; /**< Inverse post-order list of all blocks Start->End */ block_t *backward; /**< Inverse post-order list of all blocks End->Start */ ir_node *start_bl; /**< start block of the current graph */ @@ -125,7 +125,7 @@ typedef struct ldst_env_t { memop_t **curr_id_2_memop; /**< current map of address ids to memops */ unsigned curr_adr_id; /**< number for address mapping */ unsigned n_mem_ops; /**< number of memory operations (Loads/Stores) */ - unsigned rbs_size; /**< size of all bitsets in bytes */ + size_t rbs_size; /**< size of all bitsets in bytes */ int max_cfg_preds; /**< maximum number of block cfg predecessors */ int changed; /**< Flags for changed graph state */ #ifdef DEBUG_libfirm @@ -180,9 +180,9 @@ static void dump_block_list(ldst_env *env) */ static void dump_curr(block_t *bl, const char *s) { - unsigned end = env.rbs_size - 1; - unsigned pos; - int i; + size_t end = env.rbs_size - 1; + size_t pos; + int i; DB((dbg, LEVEL_2, "%s[%+F] = {", s, bl->block)); i = 0; @@ -216,14 +216,14 @@ static block_t *get_block_entry(const ir_node *block) { assert(is_Block(block)); - return get_irn_link(block); + return (block_t*)get_irn_link(block); } /* get_block_entry */ /** Get the memop entry for a memory operation node */ static memop_t *get_irn_memop(const ir_node *irn) { assert(! is_Block(irn)); - return get_irn_link(irn); + return (memop_t*)get_irn_link(irn); } /* get_irn_memop */ /** @@ -312,14 +312,14 @@ restart: goto restart; } - entry = ir_nodemap_get(&env.adr_map, adr); + entry = (address_entry*)ir_nodehashmap_get(&env.adr_map, adr); if (entry == NULL) { /* new address */ entry = OALLOC(&env.obst, address_entry); entry->id = env.curr_adr_id++; - ir_nodemap_insert(&env.adr_map, adr, entry); + ir_nodehashmap_insert(&env.adr_map, adr, entry); DB((dbg, LEVEL_3, "ADDRESS %+F has ID %u\n", adr, entry->id)); #ifdef DEBUG_libfirm @@ -525,8 +525,8 @@ static unsigned get_Call_memory_properties(ir_node *call) /* try the called entity */ ir_node *ptr = get_Call_ptr(call); - if (is_Global(ptr)) { - ir_entity *ent = get_Global_entity(ptr); + if (is_SymConst_addr_ent(ptr)) { + ir_entity *ent = get_SymConst_entity(ptr); prop = get_entity_additional_properties(ent); } @@ -561,10 +561,10 @@ static ir_entity *find_constant_entity(ir_node *ptr) int i, n; for (i = 0, n = get_Sel_n_indexs(ptr); i < n; ++i) { - ir_node *bound; - tarval *tlower, *tupper; - ir_node *index = get_Sel_index(ptr, i); - tarval *tv = computed_value(index); + ir_node *bound; + ir_tarval *tlower, *tupper; + ir_node *index = get_Sel_index(ptr, i); + ir_tarval *tv = computed_value(index); /* check if the index is constant */ if (tv == tarval_bad) @@ -578,9 +578,9 @@ static ir_entity *find_constant_entity(ir_node *ptr) if (tlower == tarval_bad || tupper == tarval_bad) return NULL; - if (tarval_cmp(tv, tlower) & pn_Cmp_Lt) + if (tarval_cmp(tv, tlower) == ir_relation_less) return NULL; - if (tarval_cmp(tupper, tv) & pn_Cmp_Lt) + if (tarval_cmp(tupper, tv) == ir_relation_less) return NULL; /* ok, bounds check finished */ @@ -640,12 +640,12 @@ static long get_Sel_array_index_long(ir_node *n, int dim) * @param depth current depth in steps upward from the root * of the address */ -static compound_graph_path *rec_get_accessed_path(ir_node *ptr, int depth) +static compound_graph_path *rec_get_accessed_path(ir_node *ptr, size_t depth) { compound_graph_path *res = NULL; ir_entity *root, *field, *ent; - int path_len, pos, idx; - tarval *tv; + size_t path_len, pos, idx; + ir_tarval *tv; ir_type *tp; if (is_SymConst(ptr)) { @@ -673,17 +673,19 @@ static compound_graph_path *rec_get_accessed_path(ir_node *ptr, int depth) set_compound_graph_path_array_index(res, pos, get_Sel_array_index_long(ptr, 0)); } } else if (is_Add(ptr)) { - ir_node *l = get_Add_left(ptr); - ir_node *r = get_Add_right(ptr); - ir_mode *mode = get_irn_mode(ptr); - tarval *tmp; - - if (is_Const(r) && get_irn_mode(l) == mode) { - ptr = l; - tv = get_Const_tarval(r); - } else { - ptr = r; - tv = get_Const_tarval(l); + ir_mode *mode; + ir_tarval *tmp; + + { + ir_node *l = get_Add_left(ptr); + ir_node *r = get_Add_right(ptr); + if (is_Const(r) && get_irn_mode(l) == get_irn_mode(ptr)) { + ptr = l; + tv = get_Const_tarval(r); + } else { + ptr = r; + tv = get_Const_tarval(l); + } } ptr_arith: mode = get_tarval_mode(tv); @@ -697,9 +699,9 @@ ptr_arith: } idx = 0; for (ent = field;;) { - unsigned size; - tarval *sz, *tv_index, *tlower, *tupper; - ir_node *bound; + unsigned size; + ir_tarval *sz, *tv_index, *tlower, *tupper; + ir_node *bound; tp = get_entity_type(ent); if (! is_Array_type(tp)) @@ -723,9 +725,9 @@ ptr_arith: if (tlower == tarval_bad || tupper == tarval_bad) return NULL; - if (tarval_cmp(tv_index, tlower) & pn_Cmp_Lt) + if (tarval_cmp(tv_index, tlower) == ir_relation_less) return NULL; - if (tarval_cmp(tupper, tv_index) & pn_Cmp_Lt) + if (tarval_cmp(tupper, tv_index) == ir_relation_less) return NULL; /* ok, bounds check finished */ @@ -748,9 +750,9 @@ ptr_arith: pos = path_len - depth - idx; for (ent = field;;) { - unsigned size; - tarval *sz, *tv_index; - long index; + unsigned size; + ir_tarval *sz, *tv_index; + long index; tp = get_entity_type(ent); if (! is_Array_type(tp)) @@ -797,7 +799,7 @@ static compound_graph_path *get_accessed_path(ir_node *ptr) typedef struct path_entry { ir_entity *ent; struct path_entry *next; - long index; + size_t index; } path_entry; static ir_node *rec_find_compound_ent_value(ir_node *ptr, path_entry *next) @@ -805,9 +807,9 @@ static ir_node *rec_find_compound_ent_value(ir_node *ptr, path_entry *next) path_entry entry, *p; ir_entity *ent, *field; ir_initializer_t *initializer; - tarval *tv; + ir_tarval *tv; ir_type *tp; - unsigned n; + size_t n; entry.next = next; if (is_SymConst(ptr)) { @@ -830,7 +832,7 @@ static ir_node *rec_find_compound_ent_value(ir_node *ptr, path_entry *next) continue; } } - if (p->index >= (int) n) + if (p->index >= n) return NULL; initializer = get_initializer_compound_value(initializer, p->index); @@ -863,7 +865,7 @@ static ir_node *rec_find_compound_ent_value(ir_node *ptr, path_entry *next) assert(get_Sel_n_indexs(ptr) == 1 && "multi dim arrays not implemented"); entry.index = get_Sel_array_index_long(ptr, 0) - get_array_lower_bound_int(tp, 0); } else { - int i, n_members = get_compound_n_members(tp); + size_t i, n_members = get_compound_n_members(tp); for (i = 0; i < n_members; ++i) { if (get_compound_member(tp, i) == field) break; @@ -876,17 +878,19 @@ static ir_node *rec_find_compound_ent_value(ir_node *ptr, path_entry *next) } return rec_find_compound_ent_value(get_Sel_ptr(ptr), &entry); } else if (is_Add(ptr)) { - ir_node *l = get_Add_left(ptr); - ir_node *r = get_Add_right(ptr); - ir_mode *mode; + ir_mode *mode; unsigned pos; - if (is_Const(r)) { - ptr = l; - tv = get_Const_tarval(r); - } else { - ptr = r; - tv = get_Const_tarval(l); + { + ir_node *l = get_Add_left(ptr); + ir_node *r = get_Add_right(ptr); + if (is_Const(r)) { + ptr = l; + tv = get_Const_tarval(r); + } else { + ptr = r; + tv = get_Const_tarval(l); + } } ptr_arith: mode = get_tarval_mode(tv); @@ -917,10 +921,10 @@ ptr_arith: /* fill them up */ pos = 0; for (ent = field;;) { - unsigned size; - tarval *sz, *tv_index, *tlower, *tupper; - long index; - ir_node *bound; + unsigned size; + ir_tarval *sz, *tv_index, *tlower, *tupper; + size_t index; + ir_node *bound; tp = get_entity_type(ent); if (! is_Array_type(tp)) @@ -947,9 +951,9 @@ ptr_arith: if (tlower == tarval_bad || tupper == tarval_bad) return NULL; - if (tarval_cmp(tv_index, tlower) & pn_Cmp_Lt) + if (tarval_cmp(tv_index, tlower) == ir_relation_less) return NULL; - if (tarval_cmp(tupper, tv_index) & pn_Cmp_Lt) + if (tarval_cmp(tupper, tv_index) == ir_relation_less) return NULL; /* ok, bounds check finished */ @@ -1061,7 +1065,7 @@ static void update_Load_memop(memop_t *m) /* no exception, clear the m fields as it might be checked later again */ if (m->projs[pn_Load_X_except]) { ir_graph *irg = get_irn_irg(ptr); - exchange(m->projs[pn_Load_X_except], new_r_Bad(irg)); + exchange(m->projs[pn_Load_X_except], new_r_Bad(irg, mode_X)); m->projs[pn_Load_X_except] = NULL; m->flags &= ~FLAG_EXCEPTION; env.changed = 1; @@ -1197,11 +1201,11 @@ static void update_Call_memop(memop_t *m) } /* update_Call_memop */ /** - * Update a memop for a Div/Mod/Quot/DivMod. + * Update a memop for a Div/Mod. * * @param m the memop */ -static void update_DivOp_memop(memop_t *m) +static void update_Div_memop(memop_t *m) { ir_node *div = m->node; int i; @@ -1214,15 +1218,38 @@ static void update_DivOp_memop(memop_t *m) continue; switch (get_Proj_proj(proj)) { - case pn_Generic_X_except: + case pn_Div_X_except: m->flags |= FLAG_EXCEPTION; break; - case pn_Generic_M: + case pn_Div_M: m->mem = proj; break; } } -} /* update_DivOp_memop */ +} + +static void update_Mod_memop(memop_t *m) +{ + ir_node *div = m->node; + int i; + + for (i = get_irn_n_outs(div) - 1; i >= 0; --i) { + ir_node *proj = get_irn_out(div, i); + + /* beware of keep edges */ + if (is_End(proj)) + continue; + + switch (get_Proj_proj(proj)) { + case pn_Mod_X_except: + m->flags |= FLAG_EXCEPTION; + break; + case pn_Mod_M: + m->mem = proj; + break; + } + } +} /** * Update a memop for a Phi. @@ -1231,7 +1258,7 @@ static void update_DivOp_memop(memop_t *m) */ static void update_Phi_memop(memop_t *m) { - /* the Phi is it's own mem */ + /* the Phi is its own mem */ m->mem = m->node; } /* update_Phi_memop */ @@ -1247,7 +1274,8 @@ static void collect_memops(ir_node *irn, void *ctx) (void) ctx; if (is_Proj(irn)) { /* we can safely ignore ProjM's except the initial memory */ - if (irn != get_irg_initial_mem(current_ir_graph)) + ir_graph *irg = get_irn_irg(irn); + if (irn != get_irg_initial_mem(irg)) return; } @@ -1286,10 +1314,10 @@ static void collect_memops(ir_node *irn, void *ctx) /* we can those to find the memory edge */ break; case iro_Div: - case iro_DivMod: - case iro_Quot: + update_Div_memop(op); + break; case iro_Mod: - update_DivOp_memop(op); + update_Mod_memop(op); break; case iro_Builtin: @@ -1374,13 +1402,13 @@ static void kill_all(void) */ static void kill_memops(const value_t *value) { - unsigned end = env.rbs_size - 1; - unsigned pos; + size_t end = env.rbs_size - 1; + size_t pos; for (pos = rbitset_next(env.curr_set, 0, 1); pos < end; pos = rbitset_next(env.curr_set, pos + 1, 1)) { memop_t *op = env.curr_id_2_memop[pos]; - if (ir_no_alias != get_alias_relation(current_ir_graph, value->address, value->mode, + if (ir_no_alias != get_alias_relation(value->address, value->mode, op->value.address, op->value.mode)) { rbitset_clear(env.curr_set, pos); env.curr_id_2_memop[pos] = NULL; @@ -1596,8 +1624,8 @@ static int backward_antic(block_t *bl) ir_node *succ = get_Block_cfg_out(block, 0); block_t *succ_bl = get_block_entry(succ); int pred_pos = get_Block_cfgpred_pos(succ, block); - unsigned end = env.rbs_size - 1; - unsigned pos; + size_t end = env.rbs_size - 1; + size_t pos; kill_all(); @@ -1743,7 +1771,7 @@ static void replace_load(memop_t *op) proj = op->projs[pn_Load_X_except]; if (proj != NULL) { ir_graph *irg = get_irn_irg(load); - exchange(proj, new_r_Bad(irg)); + exchange(proj, new_r_Bad(irg, mode_X)); } proj = op->projs[pn_Load_X_regular]; if (proj != NULL) { @@ -1770,7 +1798,7 @@ static void remove_store(memop_t *op) proj = op->projs[pn_Store_X_except]; if (proj != NULL) { ir_graph *irg = get_irn_irg(store); - exchange(proj, new_r_Bad(irg)); + exchange(proj, new_r_Bad(irg, mode_X)); } proj = op->projs[pn_Store_X_regular]; if (proj != NULL) { @@ -1935,8 +1963,7 @@ static int insert_Load(block_t *bl) { ir_node *block = bl->block; int i, n = get_Block_n_cfgpreds(block); - unsigned end = env.rbs_size - 1; - unsigned pos; + size_t end = env.rbs_size - 1; DB((dbg, LEVEL_3, "processing %+F\n", block)); @@ -1947,7 +1974,7 @@ static int insert_Load(block_t *bl) if (n > 1) { ir_node **ins; - int pos; + size_t pos; NEW_ARR_A(ir_node *, ins, n); @@ -1974,9 +2001,9 @@ static int insert_Load(block_t *bl) } /* * Ensure that all values are in the map: build Phi's if necessary: - * Note: the last bit is the sentinel and ALWAYS set, so start with -2. + * Note: the last bit is the sentinel and ALWAYS set, so end with -2. */ - for (pos = env.rbs_size - 2; pos >= 0; --pos) { + for (pos = 0; pos < env.rbs_size - 1; ++pos) { if (! rbitset_is_set(env.curr_set, pos)) env.curr_id_2_memop[pos] = NULL; else { @@ -2040,6 +2067,8 @@ static int insert_Load(block_t *bl) } if (n > 1) { + size_t pos; + /* check for partly redundant values */ for (pos = rbitset_next(bl->anticL_in, 0, 1); pos < end; @@ -2306,10 +2335,7 @@ static void kill_unreachable_blocks(ir_graph *irg) int opt_ldst(ir_graph *irg) { - block_t *bl; - ir_graph *rem = current_ir_graph; - - current_ir_graph = irg; + block_t *bl; FIRM_DBG_REGISTER(dbg, "firm.opt.ldst"); @@ -2324,7 +2350,7 @@ int opt_ldst(ir_graph *irg) } obstack_init(&env.obst); - ir_nodemap_init(&env.adr_map); + ir_nodehashmap_init(&env.adr_map); env.forward = NULL; env.backward = NULL; @@ -2434,19 +2460,18 @@ int opt_ldst(ir_graph *irg) /* not only invalidate but free them. We might allocate new out arrays on our obstack which will be deleted yet. */ free_irg_outs(irg); - set_irg_entity_usage_state(irg, ir_entity_usage_not_computed); + clear_irg_state(irg, IR_GRAPH_STATE_CONSISTENT_ENTITY_USAGE); } end: ir_free_resources(irg, IR_RESOURCE_IRN_LINK | IR_RESOURCE_BLOCK_MARK); - ir_nodemap_destroy(&env.adr_map); + ir_nodehashmap_destroy(&env.adr_map); obstack_free(&env.obst, NULL); #ifdef DEBUG_libfirm DEL_ARR_F(env.id_2_address); #endif - current_ir_graph = rem; return env.changed != 0; } /* opt_ldst */