if (get_opt_global_cse())
set_irg_pinned(irg, op_pin_state_floats);
- set_irg_outs_inconsistent(irg);
set_irg_doms_inconsistent(irg);
- set_irg_loopinfo_inconsistent(irg);
/* Clean the value_table in irg for the CSE. */
new_identities(irg);
current_ir_graph = rem;
}
+static void enqueue_node(ir_node *node, pdeq *waitq)
+{
+ if (get_irn_link(node) == waitq)
+ return;
+ pdeq_putr(waitq, node);
+ set_irn_link(node, waitq);
+}
+
/**
* Enqueue all users of a node to a wait queue.
* Handles mode_T nodes.
foreach_out_edge(n, edge) {
ir_node *succ = get_edge_src_irn(edge);
- if (get_irn_link(succ) != waitq) {
- pdeq_putr(waitq, succ);
- set_irn_link(succ, waitq);
- }
+ enqueue_node(succ, waitq);
if (get_irn_mode(succ) == mode_T) {
/* A mode_T node has Proj's. Because most optimizations
run on the Proj's we have to enqueue them also. */
/**
* Block-Walker: uses dominance depth to mark dead blocks.
*/
-static void kill_dead_blocks(ir_node *block, void *env)
+static void find_unreachable_blocks(ir_node *block, void *env)
{
pdeq *waitq = (pdeq*) env;
if (get_Block_dom_depth(block) < 0) {
- /*
- * Note that the new dominance code correctly handles
- * the End block, i.e. it is always reachable from Start
- */
ir_graph *irg = get_irn_irg(block);
- enqueue_users(block, waitq);
- exchange(block, new_r_Bad(irg, mode_BB));
+ ir_node *end = get_irg_end(irg);
+
+ const ir_edge_t *edge;
+ foreach_block_succ(block, edge) {
+ const ir_edge_t *edge2;
+ ir_node *succ_block = get_edge_src_irn(edge);
+ enqueue_node(succ_block, waitq);
+ foreach_out_edge(succ_block, edge2) {
+ ir_node *succ = get_edge_src_irn(edge2);
+ if (is_Phi(succ))
+ enqueue_node(succ, waitq);
+ }
+ }
+ enqueue_node(end, waitq);
}
}
}
}
-static void clear_block_phis(ir_node *node, void *env) {
- (void) env;
- if (is_Block(node)) {
- set_Block_phis(node, NULL);
- }
-}
-
-static void collect_block_phis(ir_node *node, void *env) {
- (void) env;
- if (is_Phi(node)) {
- add_Block_phi(get_nodes_block(node), node);
- }
-}
-
-static int count_non_bads(ir_node *node) {
- int arity = get_irn_arity(node);
- int count = 0;
- int i;
- for (i=0; i<arity; ++i) {
- if (!is_Bad(get_irn_n(node, i)))
- count++;
- }
- return count;
-}
-
-static void block_remove_bads(ir_node *block, int *changed) {
- int i, j;
- ir_node **new_in;
- const int max = get_irn_arity(block);
- const int new_max = count_non_bads(block);
- assert (max >= new_max);
-
- if (is_Bad(block) || max == new_max) return;
-
- new_in = ALLOCAN(ir_node*, new_max);
- *changed = 1;
-
- assert (get_Block_dom_depth(block) >= 0);
-
- /* 1. Create a new block without Bad inputs */
- j = 0;
- for (i = 0; i < max; ++i) {
- ir_node *block_pred = get_irn_n(block, i);
- if (!is_Bad(block_pred)) {
- new_in[j++] = block_pred;
- }
- }
- assert (j == new_max);
-
- /* If the end block is unreachable, it might have zero predecessors. */
- ir_node *end_block = get_irg_end_block(get_irn_irg(block));
- if (new_max == 0 && block == end_block) {
- set_irn_in(block, new_max, new_in);
- return;
- }
-
- ir_node *new_block = new_r_Block(get_irn_irg(block), new_max, new_in);
-
- /* 2. Remove inputs on Phis, where the block input is Bad. */
- ir_node *phi = get_Block_phis(block);
- if (phi != NULL) {
- do {
- ir_node* next = get_Phi_next(phi);
- if (get_irn_arity(phi) != new_max) {
- j = 0;
- for (i = 0; i < max; ++i) {
- ir_node *block_pred = get_irn_n(block, i);
-
- if (!is_Bad(block_pred)) {
- ir_node *pred = get_irn_n(phi, i);
- new_in[j++] = pred;
- }
- }
- assert (j == new_max);
-
- ir_node *new_phi = new_r_Phi(new_block, new_max, new_in, get_irn_mode(phi));
- exchange(phi, new_phi);
- }
- phi = next;
- } while (phi != NULL);
- }
-
- exchange(block, new_block);
-}
-
-/* Remove Bad nodes from Phi and Block inputs.
- *
- * Precondition: No unreachable code.
- * Postcondition: No Bad nodes.
- */
-static int remove_Bads(ir_graph *irg) {
- int changed = 0;
- /* build phi list per block */
- irg_walk_graph(irg, clear_block_phis, collect_block_phis, NULL);
-
- /* actually remove Bads */
- irg_block_walk_graph(irg, NULL, (void (*)(struct ir_node *, void *)) block_remove_bads, &changed);
-
- return changed;
-}
-
/* Applies local optimizations to all nodes in the graph until fixpoint. */
int optimize_graph_df(ir_graph *irg)
{
set_irg_state(irg, IR_GRAPH_STATE_BAD_BLOCK);
/* invalidate info */
- set_irg_outs_inconsistent(irg);
set_irg_doms_inconsistent(irg);
- set_irg_loopinfo_inconsistent(irg);
ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
+ /* Calculate dominance so we can kill unreachable code */
+ assure_doms(irg);
+
/* walk over the graph, but don't touch keep-alives */
irg_walk_graph(irg, NULL, opt_walker, waitq);
* so if it's not empty, the graph has been changed */
changed = !pdeq_empty(waitq);
- do {
+ while (!pdeq_empty(waitq)) {
/* finish the wait queue */
while (! pdeq_empty(waitq)) {
ir_node *n = (ir_node*)pdeq_getl(waitq);
opt_walker(n, waitq);
}
- /* kill newly generated unreachable code */
- set_irg_outs_inconsistent(irg);
+ /* Calculate dominance so we can kill unreachable code */
compute_doms(irg);
- irg_block_walk_graph(irg, NULL, kill_dead_blocks, waitq);
- } while (! pdeq_empty(waitq));
+ irg_block_walk_graph(irg, NULL, find_unreachable_blocks, waitq);
+ }
+ set_irg_doms_inconsistent(irg);
del_pdeq(waitq);
if (! state)
edges_deactivate(irg);
+ if (remove_bads(irg)) {
+ edges_deactivate(irg);
+ }
+
/* Finally kill BAD and doublets from the keep alives.
Doing this AFTER edges where deactivated saves cycles */
end = get_irg_end(irg);
remove_End_Bads_and_doublets(end);
- if (remove_Bads(irg))
- edges_deactivate(irg);
-
clear_irg_state(irg, IR_GRAPH_STATE_BAD_BLOCK);
current_ir_graph = rem;