}
DB((dbg, LEVEL_2, "\n}\n\n"));
}
-} /* dump_block_list */
+}
/**
* Dumps the current set.
i = (i + 1) & 3;
}
DB((dbg, LEVEL_2, "\n}\n"));
-} /* dump_curr */
+}
#else
static void dump_block_list(ldst_env *env)
assert(is_Block(block));
return (block_t*)get_irn_link(block);
-} /* get_block_entry */
+}
/** Get the memop entry for a memory operation node */
static memop_t *get_irn_memop(const ir_node *irn)
{
assert(! is_Block(irn));
return (memop_t*)get_irn_link(irn);
-} /* get_irn_memop */
+}
/**
* Walk over the memory edges from definition to users.
*/
static void walk_memory(ir_node *irn, irg_walk_func *pre, irg_walk_func *post, void *ctx)
{
- int i;
ir_mode *mode;
mark_irn_visited(irn);
mode = get_irn_mode(irn);
if (mode == mode_M) {
/* every successor uses memory */
- for (i = get_irn_n_outs(irn) - 1; i >= 0; --i) {
+ for (unsigned i = get_irn_n_outs(irn); i-- > 0; ) {
ir_node *succ = get_irn_out(irn, i);
if (! irn_visited(succ))
}
} else if (mode == mode_T) {
/* only some Proj's uses memory */
- for (i = get_irn_n_outs(irn) - 1; i >= 0; --i) {
+ for (unsigned i = get_irn_n_outs(irn); i-- > 0; ) {
ir_node *proj = get_irn_out(irn, i);
if (get_irn_mode(proj) == mode_M && ! irn_visited(proj))
}
if (post)
post(irn, ctx);
-} /* walk_memory */
+}
/**
* Walks over all memory nodes of a graph.
walk_memory(get_irg_initial_mem(irg), pre, post, ctx);
ir_free_resources(irg, IR_RESOURCE_IRN_VISITED);
-} /* walk_memory_irg */
+}
/**
* Register an address and allocate a (sparse, 0..n) ID for it.
{
address_entry *entry;
- /* skip Confirms and Casts */
+ /* skip Confirms */
restart:
if (is_Confirm(adr)) {
adr = get_Confirm_value(adr);
goto restart;
}
- if (is_Cast(adr)) {
- adr = get_Cast_op(adr);
- goto restart;
- }
- entry = (address_entry*)ir_nodehashmap_get(&env.adr_map, adr);
+ entry = ir_nodehashmap_get(address_entry, &env.adr_map, adr);
if (entry == NULL) {
/* new address */
#endif
}
return entry->id;
-} /* register_address */
+}
/**
if (is_Phi(address) && get_nodes_block(address) == block)
address = get_Phi_pred(address, pos);
return address;
-} /* phi_translate */
+}
/**
* Walker: allocate an block entry for every block
(void)register_address(irn);
}
}
-} /* prepare_blocks */
+}
/**
* Post-Walker, link in all Phi's
ir_node *block = get_nodes_block(irn);
add_Block_phi(block, irn);
}
-} /* link_phis */
+}
/**
* Block walker: creates the inverse post-order list for the CFG.
/* remember the first visited (last in list) entry, needed for later */
if (env.backward == NULL)
env.backward = entry;
-} /* inverse_post_order */
+}
/**
* Block walker: create backward links for the memops of a block.
last = op;
}
entry->memop_backward = last;
-} /* collect_backward */
+}
/**
* Allocate a memop.
if (irn != NULL)
set_irn_link(irn, m);
return m;
-} /* alloc_memop */
+}
/**
* Create a memop for a Phi-replacement.
set_irn_link(phi, m);
return m;
-} /* clone_memop_phi */
+}
/**
* Return the memory properties of a call node.
}
}
return prop & (mtp_property_const|mtp_property_pure);
-} /* get_Call_memory_properties */
+}
/**
* Returns an entity if the address ptr points to a constant one.
} else
return NULL;
}
-} /* find_constant_entity */
+}
/**
* Return the Selection index of a Sel node from dimension n
static long get_Sel_array_index_long(ir_node *n, int dim)
{
ir_node *index = get_Sel_index(n, dim);
- assert(is_Const(index));
return get_tarval_long(get_Const_tarval(index));
-} /* get_Sel_array_index_long */
+}
typedef struct path_entry {
ir_entity *ent;
goto ptr_arith;
}
return NULL;
-} /* rec_find_compound_ent_value */
+}
static ir_node *find_compound_ent_value(ir_node *ptr)
{
return rec_find_compound_ent_value(ptr, NULL);
-} /* find_compound_ent_value */
+}
/**
* Mark a Load memop to be replace by a definition
op->replace = def;
op->flags |= FLAG_KILLED_NODE;
env.changed = 1;
-} /* mark_replace_load */
+}
/**
* Mark a Store memop to be removed.
{
op->flags |= FLAG_KILLED_NODE;
env.changed = 1;
-} /* mark_remove_store */
+}
/**
* Update a memop for a Load.
*/
static void update_Load_memop(memop_t *m)
{
- int i;
ir_node *load = m->node;
ir_node *ptr;
ir_entity *ent;
m->value.address = ptr;
- for (i = get_irn_n_outs(load) - 1; i >= 0; --i) {
+ for (unsigned i = get_irn_n_outs(load); i-- > 0; ) {
ir_node *proj = get_irn_out(load, i);
long pn;
/* no user, KILL it */
mark_replace_load(m, NULL);
}
-} /* update_Load_memop */
+}
/**
* Update a memop for a Store.
*/
static void update_Store_memop(memop_t *m)
{
- int i;
ir_node *store = m->node;
ir_node *adr = get_Store_ptr(store);
m->value.address = adr;
- for (i = get_irn_n_outs(store) - 1; i >= 0; --i) {
+ for (unsigned i = get_irn_n_outs(store); i-- > 0; ) {
ir_node *proj = get_irn_out(store, i);
long pn;
}
m->value.value = get_Store_value(store);
m->value.mode = get_irn_mode(m->value.value);
-} /* update_Store_memop */
+}
/**
* Update a memop for a Call.
{
ir_node *call = m->node;
unsigned prop = get_Call_memory_properties(call);
- int i;
if (prop & mtp_property_const) {
/* A constant call did NOT use memory at all, we
} else
m->flags = FLAG_KILL_ALL;
- for (i = get_irn_n_outs(call) - 1; i >= 0; --i) {
+ for (unsigned i = get_irn_n_outs(call); i-- > 0; ) {
ir_node *proj = get_irn_out(call, i);
/* beware of keep edges */
break;
}
}
-} /* update_Call_memop */
+}
/**
* Update a memop for a Div/Mod.
static void update_Div_memop(memop_t *m)
{
ir_node *div = m->node;
- int i;
- for (i = get_irn_n_outs(div) - 1; i >= 0; --i) {
+ for (unsigned i = get_irn_n_outs(div); i-- > 0; ) {
ir_node *proj = get_irn_out(div, i);
/* beware of keep edges */
static void update_Mod_memop(memop_t *m)
{
ir_node *div = m->node;
- int i;
- for (i = get_irn_n_outs(div) - 1; i >= 0; --i) {
+ for (unsigned i = get_irn_n_outs(div); i-- > 0; ) {
ir_node *proj = get_irn_out(div, i);
/* beware of keep edges */
{
/* the Phi is its own mem */
m->mem = m->node;
-} /* update_Phi_memop */
+}
/**
* Memory walker: collect all memory ops and build topological lists.
entry->memop_backward = op;
}
}
-} /* collect_memops */
+}
/**
* Find an address in the current set.
return res;
}
return NULL;
-} /* find_address */
+}
/**
* Find an address in the avail_out set.
return res;
}
return NULL;
-} /* find_address_avail */
+}
/**
* Kill all addresses from the current set.
/* set sentinel */
rbitset_set(env.curr_set, env.rbs_size - 1);
-} /* kill_all */
+}
/**
* Kill memops that are not alias free due to a Store value from the current set.
DB((dbg, LEVEL_2, "KILLING %+F because of possible alias address %+F\n", op->node, value->address));
}
}
-} /* kill_memops */
+}
/**
* Add the value of a memop to the current set.
{
rbitset_set(env.curr_set, op->value.id);
env.curr_id_2_memop[op->value.id] = op;
-} /* add_memop */
+}
/**
* Add the value of a memop to the avail_out set.
{
rbitset_set(bl->avail_out, op->value.id);
bl->id_2_memop_avail[op->value.id] = op;
-} /* add_memop_avail */
+}
/**
* Check, if we can convert a value of one mode to another mode
get_mode_size_bits(from) == get_mode_size_bits(to))
return 1;
return 0;
-} /* can_convert_to */
+}
/**
* Add a Conv to the requested mode if needed.
return NULL;
}
return irn;
-} /* conv_to */
+}
/**
* Update the address of an value if this address was a load result
value->address = op->replace;
}
}
-} /* update_address */
+}
/**
* Do forward dataflow analysis on the given block and calculate the
kill_all();
}
}
-} /* calc_gen_kill_avail */
-
-#define BYTE_SIZE(x) (((x) + 7) >> 3)
+}
/**
* Do forward dataflow analysis on a given block to calculate the avail_out set
calc_gen_kill_avail(bl);
dump_curr(bl, "Avail_out");
-} /* forward_avail */
+}
/**
* Do backward dataflow analysis on a given block to calculate the antic set
}
dump_curr(bl, "AnticL_in");
return 0;
-} /* backward_antic */
+}
/**
* Replace a Load memop by a already known value.
if (proj != NULL) {
exchange(proj, new_r_Jmp(get_nodes_block(load)));
}
-} /* replace_load */
+}
/**
* Remove a Store memop.
if (proj != NULL) {
exchange(proj, new_r_Jmp(get_nodes_block(store)));
}
-} /* remove_store */
+}
/**
}
}
}
-} /* do_replacements */
+}
/**
* Calculate the Avail_out sets for all basic blocks.
/* restore the current sets */
env.curr_id_2_memop = tmp_memop;
env.curr_set = tmp_set;
-} /* calcAvail */
+}
/**
* Calculate the Antic_in sets for all basic blocks.
++i;
} while (need_iter);
DB((dbg, LEVEL_2, "Get anticipated Load set after %d iterations\n", i));
-} /* calcAntic */
+}
/**
* Return the node representing the last memory in a block.
/* if there is NO memory in this block, go to the dominator */
bl = get_block_entry(get_Block_idom(bl->block));
}
-} /* find_last_memory */
+}
/**
* Reroute all memory users of old memory
*/
static void reroute_all_mem_users(ir_node *omem, ir_node *nmem)
{
- int i;
-
- for (i = get_irn_n_outs(omem) - 1; i >= 0; --i) {
+ for (unsigned i = get_irn_n_outs(omem); i-- > 0; ) {
int n_pos;
ir_node *user = get_irn_out_ex(omem, i, &n_pos);
}
/* all edges previously point to omem now point to nmem */
- nmem->out = omem->out;
-} /* reroute_all_mem_users */
+ nmem->o.out = omem->o.out;
+}
/**
* Reroute memory users of old memory that are dominated by a given block
*/
static void reroute_mem_through(ir_node *omem, ir_node *nmem, ir_node *pass_bl)
{
- int i, j, n = get_irn_n_outs(omem);
- ir_def_use_edge *edges = NEW_ARR_D(ir_def_use_edge, &env.obst, n + 1);
+ unsigned n = get_irn_n_outs(omem);
+ ir_def_use_edges *new_out = OALLOCF(&env.obst, ir_def_use_edges, edges, n);
- for (i = j = 0; i < n; ++i) {
+ unsigned j = 0;
+ for (unsigned i = 0; i < n; ++i) {
int n_pos;
ir_node *user = get_irn_out_ex(omem, i, &n_pos);
ir_node *use_bl = get_nodes_block(user);
}
if (block_dominates(pass_bl, use_bl)) {
/* found an user that is dominated */
+ new_out->edges[j].pos = n_pos;
+ new_out->edges[j].use = user;
++j;
- edges[j].pos = n_pos;
- edges[j].use = user;
set_irn_n(user, n_pos, nmem);
}
}
+ new_out->n_edges = j;
/* Modify the out structure: we create a new out edge array on our
- temporary obstack here. This should be no problem, as we invalidate the edges
- at the end either. */
+ temporary obstack here. This should be no problem, as we invalidate the
+ edges at the end either. */
/* first entry is used for the length */
- edges[0].pos = j;
- nmem->out = edges;
-} /* reroute_mem_through */
+ nmem->o.out = new_out;
+}
/**
* insert Loads, making partly redundant Loads fully redundant
if (! rbitset_is_set(env.curr_set, pos))
env.curr_id_2_memop[pos] = NULL;
else {
- ir_node *pred = get_Block_cfgpred_block(bl->block, 0);
- block_t *pred_bl = get_block_entry(pred);
- int need_phi = 0;
- memop_t *first = NULL;
- ir_mode *mode = NULL;
+ int need_phi = 0;
+ memop_t *first = NULL;
+ ir_mode *mode = NULL;
for (i = 0; i < n; ++i) {
- memop_t *mop;
-
- pred = get_Block_cfgpred_block(bl->block, i);
- pred_bl = get_block_entry(pred);
+ ir_node *pred = get_Block_cfgpred_block(bl->block, i);
+ block_t *pred_bl = get_block_entry(pred);
- mop = pred_bl->id_2_memop_avail[pos];
+ memop_t *mop = pred_bl->id_2_memop_avail[pos];
if (first == NULL) {
first = mop;
ins[0] = first->value.value;
}
dump_curr(bl, "Avail_out");
return 0;
-} /* insert_Load */
+}
/**
* Insert Loads upwards.
} while (need_iter);
DB((dbg, LEVEL_2, "Finished Load inserting after %d iterations\n", i));
-} /* insert_Loads_upwards */
-
-/**
- * Kill unreachable control flow.
- *
- * @param irg the graph to operate on
- */
-static void kill_unreachable_blocks(ir_graph *irg)
-{
- block_t *bl;
- ir_node **ins;
- int changed = 0;
-
- NEW_ARR_A(ir_node *, ins, env.max_cfg_preds);
-
- for (bl = env.forward; bl != NULL; bl = bl->forward_next) {
- ir_node *block = bl->block;
- int i, j, k, n;
-
- assert(get_Block_mark(block));
-
- n = get_Block_n_cfgpreds(block);
-
- for (i = j = 0; i < n; ++i) {
- ir_node *pred = get_Block_cfgpred(block, i);
- ir_node *pred_bl;
-
- if (is_Bad(pred))
- continue;
-
- pred_bl = get_nodes_block(skip_Proj(pred));
- if (! get_Block_mark(pred_bl))
- continue;
-
- ins[j++] = pred;
- }
- if (j != n) {
- ir_node *phi, *next;
-
- /* some unreachable blocks detected */
- changed = 1;
-
- DB((dbg, LEVEL_1, "Killing dead block predecessors on %+F\n", block));
-
- set_irn_in(block, j, ins);
-
- /* shorten all Phi nodes */
- for (phi = get_Block_phis(block); phi != NULL; phi = next) {
- next = get_Phi_next(phi);
-
- for (i = k = 0; i < n; ++i) {
- ir_node *pred = get_Block_cfgpred_block(block, i);
-
- if (is_Bad(pred))
- continue;
-
- if (! get_Block_mark(pred))
- continue;
-
- ins[k++] = get_Phi_pred(phi, i);
- }
- if (k == 1)
- exchange(phi, ins[0]);
- else
- set_irn_in(phi, k, ins);
- }
- }
-
- }
-
- if (changed) {
- /* kick keep alives */
- ir_node *end = get_irg_end(irg);
- int i, j, n = get_End_n_keepalives(end);
-
- NEW_ARR_A(ir_node *, ins, n);
-
- for (i = j = 0; i < n; ++i) {
- ir_node *ka = get_End_keepalive(end, i);
- ir_node *ka_bl;
-
- if (is_Bad(ka))
- continue;
- if (is_Block(ka))
- ka_bl = ka;
- else
- ka_bl = get_nodes_block(skip_Proj(ka));
- if (get_Block_mark(ka_bl))
- ins[j++] = ka;
- }
- if (j != n)
- set_End_keepalives(end, j, ins);
-
- free_irg_outs(irg);
-
- /* this transformation do NOT invalidate the dominance */
- }
-} /* kill_unreachable_blocks */
+}
-int opt_ldst(ir_graph *irg)
+void opt_ldst(ir_graph *irg)
{
block_t *bl;
DB((dbg, LEVEL_1, "\nDoing Load/Store optimization on %+F\n", irg));
- /* we need landing pads */
- remove_critical_cf_edges(irg);
+ assure_irg_properties(irg,
+ IR_GRAPH_PROPERTY_NO_CRITICAL_EDGES /* we need landing pads */
+ | IR_GRAPH_PROPERTY_CONSISTENT_ENTITY_USAGE
+ | IR_GRAPH_PROPERTY_CONSISTENT_OUTS
+ | IR_GRAPH_PROPERTY_NO_UNREACHABLE_CODE
+ | IR_GRAPH_PROPERTY_CONSISTENT_DOMINANCE);
if (get_opt_alias_analysis()) {
- assure_irg_entity_usage_computed(irg);
assure_irp_globals_entity_usage_computed();
}
env.id_2_address = NEW_ARR_F(ir_node *, 0);
#endif
- assure_irg_outs(irg);
-
ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK | IR_RESOURCE_BLOCK_MARK);
/* first step: allocate block entries. Note that some blocks might be
set_Block_mark(env.end_bl, 1);
}
- /* KILL unreachable blocks: these disturb the data flow analysis */
- kill_unreachable_blocks(irg);
-
- assure_doms(irg);
-
/* second step: find and sort all memory ops */
walk_memory_irg(irg, collect_memops, NULL, NULL);
if (env.n_mem_ops == 0) {
/* no memory ops */
- goto end;
+ goto no_changes;
}
/* create the backward links. */
/* create the current set */
env.curr_set = rbitset_obstack_alloc(&env.obst, env.rbs_size);
rbitset_set(env.curr_set, env.rbs_size - 1);
- env.curr_id_2_memop = NEW_ARR_D(memop_t *, &env.obst, env.rbs_size);
- memset(env.curr_id_2_memop, 0, env.rbs_size * sizeof(env.curr_id_2_memop[0]));
+ env.curr_id_2_memop = NEW_ARR_DZ(memop_t*, &env.obst, env.rbs_size);
for (bl = env.forward; bl != NULL; bl = bl->forward_next) {
/* set sentinel bits */
bl->avail_out = rbitset_obstack_alloc(&env.obst, env.rbs_size);
rbitset_set(bl->avail_out, env.rbs_size - 1);
- bl->id_2_memop_avail = NEW_ARR_D(memop_t *, &env.obst, env.rbs_size);
- memset(bl->id_2_memop_avail, 0, env.rbs_size * sizeof(bl->id_2_memop_avail[0]));
+ bl->id_2_memop_avail = NEW_ARR_DZ(memop_t*, &env.obst, env.rbs_size);
bl->anticL_in = rbitset_obstack_alloc(&env.obst, env.rbs_size);
rbitset_set(bl->anticL_in, env.rbs_size - 1);
- bl->id_2_memop_antic = NEW_ARR_D(memop_t *, &env.obst, env.rbs_size);
- memset(bl->id_2_memop_antic, 0, env.rbs_size * sizeof(bl->id_2_memop_antic[0]));
+ bl->id_2_memop_antic = NEW_ARR_DZ(memop_t*, &env.obst, env.rbs_size);
}
(void) dump_block_list;
/* not only invalidate but free them. We might allocate new out arrays
on our obstack which will be deleted yet. */
- free_irg_outs(irg);
- clear_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_ENTITY_USAGE);
+ confirm_irg_properties(irg, IR_GRAPH_PROPERTIES_CONTROL_FLOW);
+ } else {
+no_changes:
+ confirm_irg_properties(irg, IR_GRAPH_PROPERTIES_ALL);
}
-end:
ir_free_resources(irg, IR_RESOURCE_IRN_LINK | IR_RESOURCE_BLOCK_MARK);
ir_nodehashmap_destroy(&env.adr_map);
#ifdef DEBUG_libfirm
DEL_ARR_F(env.id_2_address);
#endif
-
- return env.changed != 0;
-} /* opt_ldst */
+}
ir_graph_pass_t *opt_ldst_pass(const char *name)
{
- return def_graph_pass_ret(name ? name : "ldst_df", opt_ldst);
-} /* opt_ldst_pass */
+ return def_graph_pass(name ? name : "ldst_df", opt_ldst);
+}