* Called by get_alias_relation().
*/
typedef ir_alias_relation (*DISAMBIGUATOR_FUNC)(
- const ir_graph *irg,
const ir_node *adr1, const ir_mode *mode1,
const ir_node *adr2, const ir_mode *mode2);
/**
* Classify a base pointer.
*
- * @param irg the graph of the pointer
* @param irn the node representing the base address
* @param ent the base entity of the base address iff any
*/
-FIRM_API ir_storage_class_class_t classify_pointer(const ir_graph *irg,
- const ir_node *irn,
+FIRM_API ir_storage_class_class_t classify_pointer(const ir_node *irn,
const ir_entity *ent);
/**
* interrogated to detect the alias relation.
*/
FIRM_API ir_alias_relation get_alias_relation(
- const ir_graph *irg,
const ir_node *adr1, const ir_mode *mode1,
const ir_node *adr2, const ir_mode *mode2);
* @see get_alias_relation()
*/
FIRM_API ir_alias_relation get_alias_relation_ex(
- const ir_graph *irg,
const ir_node *adr1, const ir_mode *mode1,
const ir_node *adr2, const ir_mode *mode2);
return 0;
} /* is_malloc_Result */
-/**
- * Classify a base pointer.
- *
- * @param irg the graph of the pointer
- * @param irn the node representing the base address
- * @param ent the base entity of the base address iff any
- */
-ir_storage_class_class_t classify_pointer(const ir_graph *irg, const ir_node *irn, const ir_entity *ent)
+ir_storage_class_class_t classify_pointer(const ir_node *irn,
+ const ir_entity *ent)
{
+ ir_graph *irg = get_irn_irg(irn);
ir_storage_class_class_t res = ir_sc_pointer;
if (is_Global(irn)) {
ir_entity *entity = get_Global_entity(irn);
/**
* Determine the alias relation between two addresses.
*
- * @param irg the graph of both memory operations
* @param addr1 pointer address of the first memory operation
* @param mode1 the mode of the accessed data through addr1
* @param addr2 pointer address of the second memory operation
* @return found memory relation
*/
static ir_alias_relation _get_alias_relation(
- const ir_graph *irg,
const ir_node *adr1, const ir_mode *mode1,
const ir_node *adr2, const ir_mode *mode2)
{
const ir_node *base2;
const ir_node *orig_adr1 = adr1;
const ir_node *orig_adr2 = adr2;
+ ir_graph *irg;
unsigned mode_size;
ir_storage_class_class_t class1, class2, mod1, mod2;
int have_const_offsets;
if (adr1 == adr2)
return ir_sure_alias;
+ irg = get_irn_irg(adr1);
options = get_irg_memory_disambiguator_options(irg);
/* The Armageddon switch */
return different_sel_offsets(adr1, adr2);
}
- mod1 = classify_pointer(irg, base1, ent1);
- mod2 = classify_pointer(irg, base2, ent2);
+ mod1 = classify_pointer(base1, ent1);
+ mod2 = classify_pointer(base2, ent2);
class1 = GET_BASE_SC(mod1);
class2 = GET_BASE_SC(mod2);
/* do we have a language specific memory disambiguator? */
if (language_disambuigator != NULL) {
- ir_alias_relation rel = language_disambuigator(irg, orig_adr1, mode1, orig_adr2, mode2);
+ ir_alias_relation rel = language_disambuigator(orig_adr1, mode1, orig_adr2, mode2);
if (rel != ir_may_alias)
return rel;
}
* Determine the alias relation between two addresses.
*/
ir_alias_relation get_alias_relation(
- const ir_graph *irg,
const ir_node *adr1, const ir_mode *mode1,
const ir_node *adr2, const ir_mode *mode2)
{
- ir_alias_relation rel = _get_alias_relation(irg, adr1, mode1, adr2, mode2);
+ ir_alias_relation rel = _get_alias_relation(adr1, mode1, adr2, mode2);
DB((dbg, LEVEL_1, "alias(%+F, %+F) = %s\n", adr1, adr2, get_ir_alias_relation_name(rel)));
return rel;
} /* get_alias_relation */
* Determine the alias relation between two addresses.
*/
ir_alias_relation get_alias_relation_ex(
- const ir_graph *irg,
const ir_node *adr1, const ir_mode *mode1,
const ir_node *adr2, const ir_mode *mode2)
{
if (entry != NULL)
return entry->result;
- key.result = get_alias_relation(irg, adr1, mode1, adr2, mode2);
+ key.result = get_alias_relation(adr1, mode1, adr2, mode2);
set_insert(result_cache, &key, sizeof(key), HASH_ENTRY(adr1, adr2));
return key.result;
/* still alias free */
ptr = find_base_adr(ptr, &ent);
- sc = GET_BASE_SC(classify_pointer(current_ir_graph, ptr, ent));
+ sc = GET_BASE_SC(classify_pointer(ptr, ent));
if (sc != ir_sc_localvar && sc != ir_sc_malloced) {
/* non-local memory access */
env->only_local_mem = 0;
env->changed = 1;
DB((dbg, LEVEL_1, "boolopt: %+F: fusing (ub %+F lb %+F)\n",
- current_ir_graph, upper_block, lower_block));
+ get_irn_irg(upper_block), upper_block, lower_block));
/* move all expressions on the path to lower/upper block */
move_nodes_to_block(get_Block_cfgpred(block, up_idx), upper_block);
int i, j, n, changed;
ir_node **in = NULL;
ir_node *cond, *end = get_irg_end(irg);
- ir_graph *rem = current_ir_graph;
plist_element_t *el;
merge_env env;
assert(get_irg_pinned(irg) != op_pin_state_floats &&
"Control flow optimization need a pinned graph");
- current_ir_graph = irg;
-
/* FIXME: control flow opt destroys block edges. So edges are deactivated here. Fix the edges! */
edges_deactivate(irg);
fprintf(stderr, "VERIFY_BAD in optimize_cf()\n");
}
}
-
- current_ir_graph = rem;
}
/* Creates an ir_graph pass for optimize_cf. */
int in_dead_block = is_Block_unreachable(curr_block);
int depth = 0;
ir_node *b = NULL; /* The block to place this node in */
+ ir_graph *irg = get_irn_irg(n);
assert(!is_Block(n));
if (is_irn_start_block_placed(n)) {
/* These nodes will not be placed by the loop below. */
- b = get_irg_start_block(current_ir_graph);
+ b = get_irg_start_block(irg);
depth = 1;
}
in the backend phase. */
if (depth == 1 &&
get_Block_dom_depth(get_nodes_block(n)) > 1 &&
- get_irg_phase_state(current_ir_graph) != phase_backend) {
- b = get_Block_cfg_out(get_irg_start_block(current_ir_graph), 0);
- assert(b != get_irg_start_block(current_ir_graph));
+ get_irg_phase_state(irg) != phase_backend) {
+ b = get_Block_cfg_out(get_irg_start_block(irg), 0);
+ assert(b != get_irg_start_block(irg));
depth = 2;
}
}
*
* @param worklist a worklist, used for the algorithm, empty on in/output
*/
-static void place_early(waitq *worklist)
+static void place_early(ir_graph *irg, waitq *worklist)
{
assert(worklist);
- inc_irg_visited(current_ir_graph);
+ inc_irg_visited(irg);
/* this inits the worklist */
- place_floats_early(get_irg_end(current_ir_graph), worklist);
+ place_floats_early(get_irg_end(irg), worklist);
/* Work the content of the worklist. */
while (!waitq_empty(worklist)) {
if (!irn_visited(n))
place_floats_early(n, worklist);
}
- set_irg_pinned(current_ir_graph, op_pin_state_pinned);
+ set_irg_pinned(irg, op_pin_state_pinned);
}
/**
*
* @param worklist the worklist containing the nodes to place
*/
-static void place_late(waitq *worklist)
+static void place_late(ir_graph *irg, waitq *worklist)
{
assert(worklist);
- inc_irg_visited(current_ir_graph);
+ inc_irg_visited(irg);
/* This fills the worklist initially. */
- place_floats_late(get_irg_start_block(current_ir_graph), worklist);
+ place_floats_late(get_irg_start_block(irg), worklist);
/* And now empty the worklist again... */
while (!waitq_empty(worklist)) {
void place_code(ir_graph *irg)
{
waitq *worklist;
- ir_graph *rem = current_ir_graph;
- current_ir_graph = irg;
remove_critical_cf_edges(irg);
/* Handle graph state */
/* Place all floating nodes as early as possible. This guarantees
a legal code placement. */
worklist = new_waitq();
- place_early(worklist);
+ place_early(irg, worklist);
/* Note: place_early changes only blocks, no data edges. So, the
* data out edges are still valid, no need to recalculate them here. */
/* Now move the nodes down in the dominator tree. This reduces the
unnecessary executions of the node. */
- place_late(worklist);
+ place_late(irg, worklist);
set_irg_outs_inconsistent(irg);
set_irg_loopinfo_inconsistent(irg);
del_waitq(worklist);
- current_ir_graph = rem;
}
/**
}
/**
- * Copies the graph reachable from current_ir_graph->end to the obstack
- * in current_ir_graph and fixes the environment.
- * Then fixes the fields in current_ir_graph containing nodes of the
- * graph.
+ * Copies the graph reachable from the End node to the obstack
+ * in irg. Then fixes the fields containing nodes of the graph.
*
* @param copy_node_nr If non-zero, the node number will be copied
*/
*/
void dead_node_elimination(ir_graph *irg)
{
- ir_graph *rem;
struct obstack *graveyard_obst = NULL;
struct obstack *rebirth_obst = NULL;
/* inform statistics that we started a dead-node elimination run */
hook_dead_node_elim(irg, 1);
- /* Remember external state of current_ir_graph. */
- rem = current_ir_graph;
- current_ir_graph = irg;
-
assert(get_irg_phase_state(irg) != phase_building);
/* Handle graph state */
/* inform statistics that the run is over */
hook_dead_node_elim(irg, 0);
-
- current_ir_graph = rem;
}
ir_graph_pass_t *dead_node_elimination_pass(const char *name)
++ctx->n_calls_SymConst;
} else if (get_opt_closed_world() &&
is_Sel(ptr) &&
- get_irg_callee_info_state(current_ir_graph) == irg_callee_info_consistent) {
+ get_irg_callee_info_state(get_irn_irg(node)) == irg_callee_info_consistent) {
/* If all possible callees are const functions, we can remove the memory edge. */
int i, n_callees = get_Call_n_callees(call);
if (n_callees == 0) {
{
ir_node *call, *next, *mem, *proj;
int exc_changed = 0;
- ir_graph *rem = current_ir_graph;
-
- current_ir_graph = irg;
/* First step: fix all calls by removing their memory input and let
* them floating.
/* ... including exception edges */
set_irg_doms_inconsistent(irg);
}
- current_ir_graph = rem;
} /* fix_const_call_list */
/**
++ctx->n_calls_SymConst;
} else if (get_opt_closed_world() &&
is_Sel(ptr) &&
- get_irg_callee_info_state(current_ir_graph) == irg_callee_info_consistent) {
+ get_irg_callee_info_state(get_irn_irg(node)) == irg_callee_info_consistent) {
/* If all possible callees are nothrow functions, we can remove the exception edge. */
int i, n_callees = get_Call_n_callees(call);
if (n_callees == 0) {
{
ir_node *call, *next, *proj;
int exc_changed = 0;
- ir_graph *rem = current_ir_graph;
-
- current_ir_graph = irg;
/* First step: go through the list of calls and mark them. */
for (call = call_list; call; call = next) {
/* ... including exception edges */
set_irg_doms_inconsistent(irg);
}
- current_ir_graph = rem;
} /* fix_nothrow_call_list */
/* marking */
ir_entity *ent = get_SymConst_entity(ptr);
ir_graph *irg = get_entity_irg(ent);
- if (irg == current_ir_graph) {
+ if (irg == get_irn_irg(node)) {
/* A self-recursive call. The property did not depend on this call. */
} else if (irg == NULL) {
m = get_entity_additional_properties(ent) & (mtp_property_const|mtp_property_pure);
ir_node *end, *endbl;
int j;
unsigned prop = get_irg_additional_properties(irg);
- ir_graph *rem = current_ir_graph;
if (prop & mtp_property_const) {
/* already marked as a const function */
endbl = get_nodes_block(end);
prop = mtp_property_const;
- current_ir_graph = irg;
-
ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED);
inc_irg_visited(irg);
/* mark the initial mem: recursion of follow_mem() stops here */
SET_IRG_READY(irg);
CLEAR_IRG_BUSY(irg);
ir_free_resources(irg, IR_RESOURCE_IRN_VISITED);
- current_ir_graph = rem;
return prop;
} /* check_const_or_pure_function */
if (get_irg_visited(irg) < get_max_irg_visited()) {
set_irg_visited(irg, get_max_irg_visited());
}
- current_ir_graph = irg;
irg_walk_2(node, visit_node, NULL, NULL);
}
if (is_Store(pred)) {
/* check if we can pass through this store */
ir_alias_relation rel = get_alias_relation(
- current_ir_graph,
get_Store_ptr(pred),
get_irn_mode(get_Store_value(pred)),
ptr, load_mode);
if (is_Store(pred)) {
/* check if we can pass through this store */
ir_alias_relation rel = get_alias_relation(
- current_ir_graph,
get_Store_ptr(pred),
get_irn_mode(get_Store_value(pred)),
ptr, mode);
pred = skip_Proj(get_Store_mem(pred));
} else if (is_Load(pred)) {
ir_alias_relation rel = get_alias_relation(
- current_ir_graph, get_Load_ptr(pred), get_Load_mode(pred),
+ get_Load_ptr(pred), get_Load_mode(pred),
ptr, mode);
if (rel != ir_no_alias)
break;
if (is_Store(other)) {
ir_alias_relation rel = get_alias_relation(
- current_ir_graph,
get_Store_ptr(other),
get_irn_mode(get_Store_value(other)),
ptr, load_mode);
*/
static void do_dfs(ir_graph *irg, loop_env *env)
{
- ir_graph *rem = current_ir_graph;
ir_node *endblk, *end;
int i;
- current_ir_graph = irg;
inc_irg_visited(irg);
/* visit all memory nodes */
if (is_Phi(ka) && !irn_visited(ka))
dfs(ka, env);
}
- current_ir_graph = rem;
} /* do_dfs */
/**
}
/* collect all no-return blocks */
- end = get_irg_end(current_ir_graph);
+ end = get_irg_end(get_irn_irg(end_block));
for (i = get_End_n_keepalives(end) - 1; i >= 0; --i) {
ir_node *ka = get_End_keepalive(end, i);
ir_node *block;
int i, k, n;
pred_t *preds;
- if (block == get_irg_end_block(current_ir_graph)) {
+ if (block == get_irg_end_block(get_irn_irg(block))) {
/* always create a partition for the end block */
partition_for_end_block(block, env);
return;
/* Combines congruent end blocks into one. */
int shape_blocks(ir_graph *irg)
{
- ir_graph *rem;
environment_t env;
partition_t *part;
block_t *bl;
int res, n;
- rem = current_ir_graph;
- current_ir_graph = irg;
-
/* register a debug mask */
FIRM_DBG_REGISTER(dbg, "firm.opt.blocks");
DEL_ARR_F(env.live_outs);
del_set(env.opcode2id_map);
obstack_free(&env.obst, NULL);
- current_ir_graph = rem;
return res;
} /* shape_blocks */
if (is_Global(n)) {
/* global references are never NULL */
return 1;
- } else if (n == get_irg_frame(current_ir_graph)) {
+ } else if (n == get_irg_frame(get_irn_irg(n))) {
/* local references are never NULL */
return 1;
} else if (is_Alloc(n)) {
(void) ctx;
if (is_Proj(irn)) {
/* we can safely ignore ProjM's except the initial memory */
- if (irn != get_irg_initial_mem(current_ir_graph))
+ ir_graph *irg = get_irn_irg(irn);
+ if (irn != get_irg_initial_mem(irg))
return;
}
for (pos = rbitset_next(env.curr_set, 0, 1); pos < end; pos = rbitset_next(env.curr_set, pos + 1, 1)) {
memop_t *op = env.curr_id_2_memop[pos];
- if (ir_no_alias != get_alias_relation(current_ir_graph, value->address, value->mode,
+ if (ir_no_alias != get_alias_relation(value->address, value->mode,
op->value.address, op->value.mode)) {
rbitset_clear(env.curr_set, pos);
env.curr_id_2_memop[pos] = NULL;
int opt_ldst(ir_graph *irg)
{
- block_t *bl;
- ir_graph *rem = current_ir_graph;
-
- current_ir_graph = irg;
+ block_t *bl;
FIRM_DBG_REGISTER(dbg, "firm.opt.ldst");
DEL_ARR_F(env.id_2_address);
#endif
- current_ir_graph = rem;
return env.changed != 0;
} /* opt_ldst */
if (result != irn) {
node_entry *e;
- hook_strength_red(current_ir_graph, irn);
+ hook_strength_red(get_irn_irg(irn), irn);
exchange(irn, result);
e = get_irn_ne(result, env);
if (e->pscc == NULL) {
*/
static void do_dfs(ir_graph *irg, iv_env *env)
{
- ir_graph *rem = current_ir_graph;
ir_node *end = get_irg_end(irg);
int i;
ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED);
- current_ir_graph = irg;
inc_irg_visited(irg);
/* visit all visible nodes */
}
ir_free_resources(irg, IR_RESOURCE_IRN_VISITED);
-
- current_ir_graph = rem;
} /* do_dfs */
/**
/* Remove any Phi cycles with only one real input. */
void remove_phi_cycles(ir_graph *irg)
{
- iv_env env;
- ir_graph *rem;
- int projs_moved;
-
- rem = current_ir_graph;
- current_ir_graph = irg;
+ iv_env env;
+ int projs_moved;
FIRM_DBG_REGISTER(dbg, "firm.opt.remove_phi");
DEL_ARR_F(env.stack);
obstack_free(&env.obst, NULL);
-
- current_ir_graph = rem;
} /* remove_phi_cycles */
ir_graph_pass_t *remove_phi_cycles_pass(const char *name)
void opt_osr(ir_graph *irg, unsigned flags)
{
iv_env env;
- ir_graph *rem;
int edges;
int projs_moved;
- rem = current_ir_graph;
- current_ir_graph = irg;
-
FIRM_DBG_REGISTER(dbg, "firm.opt.osr");
DB((dbg, LEVEL_1, "Doing Operator Strength Reduction for %+F\n", irg));
if (! edges)
edges_deactivate(irg);
-
- current_ir_graph = rem;
} /* opt_osr */
struct pass_t {
ir_node *org_ptr = pi->origin_ptr;
ir_mode *store_mode = get_irn_mode(get_Store_value(pred));
ir_node *store_ptr = get_Store_ptr(pred);
- if (get_alias_relation(current_ir_graph, org_ptr, org_mode, store_ptr, store_mode) == ir_no_alias) {
+ if (get_alias_relation(org_ptr, org_mode, store_ptr, store_mode) == ir_no_alias) {
ir_node *mem = get_Store_mem(pred);
ir_nodeset_insert(&pi->user_mem, irn);
parallelize_load(pi, mem);
ir_node *org_ptr = pi->origin_ptr;
ir_mode *load_mode = get_Load_mode(pred);
ir_node *load_ptr = get_Load_ptr(pred);
- if (get_alias_relation(current_ir_graph, org_ptr, org_mode, load_ptr, load_mode) == ir_no_alias) {
+ if (get_alias_relation(org_ptr, org_mode, load_ptr, load_mode) == ir_no_alias) {
ir_node *mem = get_Load_mem(pred);
ir_nodeset_insert(&pi->user_mem, irn);
parallelize_store(pi, mem);
ir_node *org_ptr = pi->origin_ptr;
ir_mode *store_mode = get_irn_mode(get_Store_value(pred));
ir_node *store_ptr = get_Store_ptr(pred);
- if (get_alias_relation(current_ir_graph, org_ptr, org_mode, store_ptr, store_mode) == ir_no_alias) {
+ if (get_alias_relation(org_ptr, org_mode, store_ptr, store_mode) == ir_no_alias) {
ir_node *mem;
ir_nodeset_insert(&pi->user_mem, irn);
n = ir_nodeset_size(&pi.user_mem);
if (n != 0) { /* nothing happened otherwise */
- ir_graph *irg = current_ir_graph;
+ ir_graph *irg = get_irn_irg(block);
ir_node *sync;
ir_node **in;
ir_nodeset_iterator_t iter;
set_irg_doms_inconsistent(irg);
set_irg_extblk_inconsistent(irg); /* may not be needed */
set_irg_outs_inconsistent(irg);
- set_irg_loopinfo_inconsistent(current_ir_graph);
+ set_irg_loopinfo_inconsistent(irg);
}
/* Create a graph pass. */
* @param modes A flexible array, containing all the modes of
* the value numbers.
*/
-static void do_scalar_replacements(pset *sels, int nvals, ir_mode **modes)
+static void do_scalar_replacements(ir_graph *irg, pset *sels, int nvals,
+ ir_mode **modes)
{
env_t env;
- ssa_cons_start(current_ir_graph, nvals);
+ ssa_cons_start(irg, nvals);
- env.nvals = nvals;
- env.modes = modes;
- env.sels = sels;
+ env.nvals = nvals;
+ env.modes = modes;
+ env.sels = sels;
/*
* second step: walk over the graph blockwise in topological order
* and fill the array as much as possible.
*/
- DB((dbg, SET_LEVEL_3, "Substituting Loads and Stores in %+F\n", current_ir_graph));
- irg_walk_blkwise_graph(current_ir_graph, NULL, topologic_walker, &env);
+ DB((dbg, SET_LEVEL_3, "Substituting Loads and Stores in %+F\n", irg));
+ irg_walk_blkwise_graph(irg, NULL, topologic_walker, &env);
- ssa_cons_finish(current_ir_graph);
+ ssa_cons_finish(irg);
}
/*
set *set_ent;
pset *sels;
ir_type *ent_type, *frame_tp;
- ir_graph *rem;
int res = 0;
- rem = current_ir_graph;
- current_ir_graph = irg;
-
/* Call algorithm that computes the out edges */
assure_irg_outs(irg);
/* If scalars were found. */
if (nvals > 0) {
- do_scalar_replacements(sels, nvals, modes);
+ do_scalar_replacements(irg, sels, nvals, modes);
foreach_set(set_ent, value) {
free_entity(value->ent);
ir_free_resources(irg, IR_RESOURCE_IRN_LINK);
irp_free_resources(irp, IR_RESOURCE_ENTITY_LINK);
- current_ir_graph = rem;
return res;
}
/*
* the first block has the initial exec as cfg predecessor
*/
- if (node != get_irg_start_block(current_ir_graph)) {
+ if (node != get_irg_start_block(get_irn_irg(node))) {
for (i = 0; i < n_pred; ++i) {
if (get_Block_cfgpred(node, i) == data->proj_X) {
data->block = node;
int rem = get_optimize();
ir_entity *ent = get_irg_entity(irg);
ir_type *method_tp = get_entity_type(ent);
- ir_graph *old = current_ir_graph;
-
- current_ir_graph = irg;
assert(env->n_tail_calls > 0);
set_irg_doms_inconsistent(irg);
set_irg_outs_inconsistent(irg);
set_irg_extblk_inconsistent(irg);
- set_irg_loopinfo_state(current_ir_graph, loopinfo_cf_inconsistent);
+ set_irg_loopinfo_state(irg, loopinfo_cf_inconsistent);
set_trouts_inconsistent();
set_irg_callee_info_state(irg, irg_callee_info_inconsistent);
exchange(p, bad);
}
}
- current_ir_graph = old;
}
/**
for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
irg = get_irp_irg(i);
- current_ir_graph = irg;
-
ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
if (opt_tail_rec_irg(irg))
++n_opt_applications;