* @file
* @brief Load/Store optimizations.
* @author Michael Beck
- * @version $Id$
*/
#include "config.h"
#include "ircons_t.h"
#include "irgmod.h"
#include "irgwalk.h"
+#include "irtools.h"
#include "tv_t.h"
#include "dbginfo_t.h"
#include "iropt_dbg.h"
#include "irhooks.h"
#include "iredges.h"
#include "irpass.h"
-#include "opt_polymorphy.h"
#include "irmemory.h"
-#include "irphase_t.h"
+#include "irnodehashmap.h"
#include "irgopt.h"
#include "set.h"
#include "be.h"
#include "debug.h"
+#include "opt_manage.h"
/** The debug handle. */
DEBUG_ONLY(static firm_dbg_module_t *dbg;)
/* try the called entity */
ir_node *ptr = get_Call_ptr(call);
- if (is_Global(ptr)) {
- ir_entity *ent = get_Global_entity(ptr);
+ if (is_SymConst_addr_ent(ptr)) {
+ ir_entity *ent = get_SymConst_entity(ptr);
prop = get_entity_additional_properties(ent);
}
return res | DF_CHANGED;
}
- /* Load from a constant polymorphic field, where we can resolve
- polymorphism. */
- value = transform_polymorph_Load(load);
- if (value == load) {
- value = NULL;
- /* check if we can determine the entity that will be loaded */
- ent = find_constant_entity(ptr);
- if (ent != NULL
- && get_entity_visibility(ent) != ir_visibility_external) {
- /* a static allocation that is not external: there should be NO
- * exception when loading even if we cannot replace the load itself.
- */
+ value = NULL;
+ /* check if we can determine the entity that will be loaded */
+ ent = find_constant_entity(ptr);
+ if (ent != NULL
+ && get_entity_visibility(ent) != ir_visibility_external) {
+ /* a static allocation that is not external: there should be NO
+ * exception when loading even if we cannot replace the load itself.
+ */
- /* no exception, clear the info field as it might be checked later again */
- if (info->projs[pn_Load_X_except]) {
- ir_graph *irg = get_irn_irg(load);
- exchange(info->projs[pn_Load_X_except], new_r_Bad(irg, mode_X));
- info->projs[pn_Load_X_except] = NULL;
- res |= CF_CHANGED;
- }
- if (info->projs[pn_Load_X_regular]) {
- exchange(info->projs[pn_Load_X_regular], new_r_Jmp(get_nodes_block(load)));
- info->projs[pn_Load_X_regular] = NULL;
- res |= CF_CHANGED;
- }
+ /* no exception, clear the info field as it might be checked later again */
+ if (info->projs[pn_Load_X_except]) {
+ ir_graph *irg = get_irn_irg(load);
+ exchange(info->projs[pn_Load_X_except], new_r_Bad(irg, mode_X));
+ info->projs[pn_Load_X_except] = NULL;
+ res |= CF_CHANGED;
+ }
+ if (info->projs[pn_Load_X_regular]) {
+ exchange(info->projs[pn_Load_X_regular], new_r_Jmp(get_nodes_block(load)));
+ info->projs[pn_Load_X_regular] = NULL;
+ res |= CF_CHANGED;
+ }
- if (get_entity_linkage(ent) & IR_LINKAGE_CONSTANT) {
- if (has_entity_initializer(ent)) {
- /* new style initializer */
- value = find_compound_ent_value(ptr);
- } else if (entity_has_compound_ent_values(ent)) {
- /* old style initializer */
- compound_graph_path *path = get_accessed_path(ptr);
+ if (get_entity_linkage(ent) & IR_LINKAGE_CONSTANT) {
+ if (has_entity_initializer(ent)) {
+ /* new style initializer */
+ value = find_compound_ent_value(ptr);
+ } else if (entity_has_compound_ent_values(ent)) {
+ /* old style initializer */
+ compound_graph_path *path = get_accessed_path(ptr);
- if (path != NULL) {
- assert(is_proper_compound_graph_path(path, get_compound_graph_path_length(path)-1));
+ if (path != NULL) {
+ assert(is_proper_compound_graph_path(path, get_compound_graph_path_length(path)-1));
- value = get_compound_ent_value_by_path(ent, path);
- DB((dbg, LEVEL_1, " Constant access at %F%F resulted in %+F\n", ent, path, value));
- free_compound_graph_path(path);
- }
+ value = get_compound_ent_value_by_path(ent, path);
+ DB((dbg, LEVEL_1, " Constant access at %F%F resulted in %+F\n", ent, path, value));
+ free_compound_graph_path(path);
}
- if (value != NULL) {
- ir_graph *irg = get_irn_irg(load);
- value = can_replace_load_by_const(load, value);
- if (value != NULL && is_Sel(ptr) &&
- !is_irg_state(irg, IR_GRAPH_STATE_IMPLICIT_BITFIELD_MASKING)) {
- /* frontend has inserted masking operations after bitfield accesses,
- * so we might have to shift the const. */
- unsigned char bit_offset = get_entity_offset_bits_remainder(get_Sel_entity(ptr));
- ir_tarval *tv_old = get_Const_tarval(value);
- ir_tarval *tv_offset = new_tarval_from_long(bit_offset, mode_Bu);
- ir_tarval *tv_new = tarval_shl(tv_old, tv_offset);
- value = new_r_Const(irg, tv_new);
- }
+ }
+ if (value != NULL) {
+ ir_graph *irg = get_irn_irg(load);
+ value = can_replace_load_by_const(load, value);
+ if (value != NULL && is_Sel(ptr) &&
+ !is_irg_state(irg, IR_GRAPH_STATE_IMPLICIT_BITFIELD_MASKING)) {
+ /* frontend has inserted masking operations after bitfield accesses,
+ * so we might have to shift the const. */
+ unsigned char bit_offset = get_entity_offset_bits_remainder(get_Sel_entity(ptr));
+ ir_tarval *tv_old = get_Const_tarval(value);
+ ir_tarval *tv_offset = new_tarval_from_long(bit_offset, mode_Bu);
+ ir_tarval *tv_new = tarval_shl(tv_old, tv_offset);
+ value = new_r_Const(irg, tv_new);
}
}
}
/** A loop entry. */
typedef struct loop_env {
- ir_phase ph; /**< the phase object */
- ir_node **stack; /**< the node stack */
- size_t tos; /**< tos index */
- unsigned nextDFSnum; /**< the current DFS number */
- unsigned POnum; /**< current post order number */
-
- unsigned changes; /**< a bitmask of graph changes */
+ ir_nodehashmap_t map;
+ struct obstack obst;
+ ir_node **stack; /**< the node stack */
+ size_t tos; /**< tos index */
+ unsigned nextDFSnum; /**< the current DFS number */
+ unsigned POnum; /**< current post order number */
+
+ unsigned changes; /**< a bitmask of graph changes */
} loop_env;
/**
*/
static node_entry *get_irn_ne(ir_node *irn, loop_env *env)
{
- ir_phase *ph = &env->ph;
- node_entry *e = (node_entry*)phase_get_irn_data(&env->ph, irn);
+ node_entry *e = (node_entry*)ir_nodehashmap_get(&env->map, irn);
- if (! e) {
- e = (node_entry*)phase_alloc(ph, sizeof(*e));
+ if (e == NULL) {
+ e = OALLOC(&env->obst, node_entry);
memset(e, 0, sizeof(*e));
- phase_set_irn_data(ph, irn, e);
+ ir_nodehashmap_insert(&env->map, irn, e);
}
return e;
} /* get_irn_ne */
if (pe->pscc != ne->pscc) {
/* not in the same SCC, is region const */
- phi_entry *pe = (phi_entry*)phase_alloc(&env->ph, sizeof(*pe));
+ phi_entry *pe = OALLOC(&env->obst, phi_entry);
pe->phi = phi;
pe->pos = j;
continue;
/* for now, we can only move Load(Global) */
- if (! is_Global(ptr))
+ if (! is_SymConst_addr_ent(ptr))
continue;
load_mode = get_Load_mode(load);
for (other = pscc->head; other != NULL; other = next_other) {
DB((dbg, LEVEL_1, " Created %+F in %+F\n", irn, pred));
}
pe->load = irn;
- ninfo = get_ldst_info(irn, phase_obst(&env->ph));
+ ninfo = get_ldst_info(irn, &env->obst);
ninfo->projs[pn_Load_M] = mem = new_r_Proj(irn, mode_M, pn_Load_M);
if (res == NULL) {
node->low = MIN(o->DFSnum, node->low);
}
} else if (is_fragile_op(irn)) {
- ir_node *pred = get_fragile_op_mem(irn);
+ ir_node *pred = get_memop_mem(irn);
node_entry *o = get_irn_ne(pred, env);
if (!irn_visited(pred)) {
}
if (node->low == node->DFSnum) {
- scc *pscc = (scc*)phase_alloc(&env->ph, sizeof(*pscc));
+ scc *pscc = OALLOC(&env->obst, scc);
ir_node *x;
pscc->head = NULL;
} else if (is_Raise(pred)) {
dfs(get_Raise_mem(pred), env);
} else if (is_fragile_op(pred)) {
- dfs(get_fragile_op_mem(pred), env);
+ dfs(get_memop_mem(pred), env);
} else if (is_Bad(pred)) {
/* ignore non-optimized block predecessor */
} else {
env.nextDFSnum = 0;
env.POnum = 0;
env.changes = 0;
- phase_init(&env.ph, irg, phase_irn_init_default);
+ ir_nodehashmap_init(&env.map);
+ obstack_init(&env.obst);
/* calculate the SCC's and drive loop optimization. */
do_dfs(irg, &env);
DEL_ARR_F(env.stack);
- phase_deinit(&env.ph);
+ obstack_free(&env.obst, NULL);
+ ir_nodehashmap_destroy(&env.map);
return env.changes;
} /* optimize_loops */
/*
* do the load store optimization
*/
-int optimize_load_store(ir_graph *irg)
+static ir_graph_state_t do_loadstore_opt(ir_graph *irg)
{
walk_env_t env;
+ ir_graph_state_t res = 0;
FIRM_DBG_REGISTER(dbg, "firm.opt.ldstopt");
assert(get_irg_pinned(irg) != op_pin_state_floats &&
"LoadStore optimization needs pinned graph");
- /* we need landing pads */
- remove_critical_cf_edges(irg);
-
- edges_assure(irg);
-
- /* for Phi optimization post-dominators are needed ... */
- assure_postdoms(irg);
-
if (get_opt_alias_analysis()) {
- assure_irg_entity_usage_computed(irg);
assure_irp_globals_entity_usage_computed();
}
/* Handle graph state */
if (env.changes) {
- set_irg_entity_usage_state(irg, ir_entity_usage_not_computed);
edges_deactivate(irg);
}
- if (env.changes & CF_CHANGED) {
- /* is this really needed: Yes, control flow changed, block might
- have Bad() predecessors. */
- set_irg_doms_inconsistent(irg);
+ if (!(env.changes & CF_CHANGED)) {
+ res |= IR_GRAPH_STATE_CONSISTENT_DOMINANCE | IR_GRAPH_STATE_NO_BADS;
}
- return env.changes != 0;
-} /* optimize_load_store */
+
+ return res;
+}
+
+static optdesc_t opt_loadstore = {
+ "load-store",
+ IR_GRAPH_STATE_NO_UNREACHABLE_CODE | IR_GRAPH_STATE_CONSISTENT_OUT_EDGES | IR_GRAPH_STATE_NO_CRITICAL_EDGES | IR_GRAPH_STATE_CONSISTENT_DOMINANCE | IR_GRAPH_STATE_CONSISTENT_ENTITY_USAGE,
+ do_loadstore_opt,
+};
+
+int optimize_load_store(ir_graph *irg)
+{
+ perform_irg_optimization(irg, &opt_loadstore);
+ return 1;
+}
ir_graph_pass_t *optimize_load_store_pass(const char *name)
{