-void optimize_cf(ir_graph *irg) {
- int i, n;
- ir_node **in;
- ir_node *end = get_irg_end(irg);
- ir_graph *rem = current_ir_graph;
- irg_dom_state dom_state = get_irg_dom_state(current_ir_graph);
- current_ir_graph = irg;
-
- /* if the graph is not pinned, we cannot determine empty blocks */
- assert(get_irg_pinned(irg) != op_pin_state_floats &&
- "Control flow optimization need a pinned graph");
-
- /* Handle graph state */
- assert(get_irg_phase_state(irg) != phase_building);
- if (get_irg_outs_state(current_ir_graph) == outs_consistent)
- set_irg_outs_inconsistent(current_ir_graph);
- if (get_irg_dom_state(current_ir_graph) == dom_consistent)
- set_irg_dom_inconsistent(current_ir_graph);
-
- if (dom_state == dom_consistent && get_opt_optimize() && get_opt_unreachable_code()) {
- ir_node *end = get_irg_end(irg);
-
- /* we have dominance info, we can kill dead block */
- irg_block_walk_graph(irg, NULL, remove_dead_block_cf, NULL);
-
- /* fix the keep-alives */
- for (i = 0, n = get_End_n_keepalives(end); i < n; ++i) {
- ir_node *ka = get_End_keepalive(end, i);
-
- if (is_Block(ka)) {
- if (get_Block_dom_depth(ka) == -1)
- set_End_keepalive(end, i, new_Bad());
- }
- else if (get_Block_dom_depth(get_nodes_block(ka)) == -1)
- set_End_keepalive(end, i, new_Bad());
- }
- }
-
- irg_block_walk_graph(current_ir_graph, NULL, remove_senseless_conds, NULL);
-
- /* Use block visited flag to mark non-empty blocks. */
- inc_irg_block_visited(irg);
- irg_walk(end, merge_blocks, collect_nodes, NULL);
-
- /* Optimize the standard code. */
- irg_block_walk(get_irg_end_block(irg), optimize_blocks, NULL, NULL);
-
- /* Walk all keep alives, optimize them if block, add to new in-array
- for end if useful. */
- in = NEW_ARR_F (ir_node *, 1);
- in[0] = get_nodes_block(end);
- inc_irg_visited(current_ir_graph);
-
- for (i = 0; i < get_End_n_keepalives(end); i++) {
- ir_node *ka = get_End_keepalive(end, i);
-
- if (irn_not_visited(ka)) {
- if ((get_irn_op(ka) == op_Block) && Block_not_block_visited(ka)) {
- set_irg_block_visited(current_ir_graph, /* Don't walk all the way to Start. */
- get_irg_block_visited(current_ir_graph)-1);
- irg_block_walk(ka, optimize_blocks, NULL, NULL);
- mark_irn_visited(ka);
- ARR_APP1 (ir_node *, in, ka);
- } else if (get_irn_op(ka) == op_Phi) {
- mark_irn_visited(ka);
- ARR_APP1 (ir_node *, in, ka);
- } else if (get_irn_op(ka) == op_IJmp) {
- mark_irn_visited(ka);
- ARR_APP1 (ir_node *, in, ka);
- }
- }
- }
- /* DEL_ARR_F(end->in); GL @@@ tut nicht ! */
- end->in = in;
-
-
- /* the verifier doesn't work yet with floating nodes */
- if (get_irg_pinned(irg) == op_pin_state_pinned) {
- /* after optimize_cf(), only Bad data flow may remain. */
- if (irg_vrfy_bads(irg, BAD_DF | BAD_BLOCK | TUPLE)) {
- dump_ir_block_graph(irg, "-vrfy-cf");
- dump_ir_graph(irg, "-vrfy-cf");
- fprintf(stderr, "VRFY_BAD in optimize_cf()\n");
- }
- }
-
- current_ir_graph = rem;
+static bool optimize_pred_cond(ir_node *block, int i, int j)
+{
+ ir_node *projA, *projB, *cond, *pred_block, *jmp, *bad;
+ assert(i != j);
+
+ projA = get_Block_cfgpred(block, i);
+ if (!is_Proj(projA)) return false;
+ projB = get_Block_cfgpred(block, j);
+ if (!is_Proj(projB)) return false;
+ cond = get_Proj_pred(projA);
+ if (!is_Cond(cond)) return false;
+
+ if (cond != get_Proj_pred(projB)) return false;
+ if (is_switch_Cond(cond)) return false;
+
+ /* cond should actually be a Jmp */
+ pred_block = get_nodes_block(cond);
+ jmp = new_r_Jmp(pred_block);
+ bad = new_r_Bad(get_irn_irg(block), mode_X);
+
+ assert(projA != projB);
+ exchange(projA, jmp);
+ exchange(projB, bad);
+ return true;
+}
+
+static void compute_block_info(ir_node *n, void *x)
+{
+ ir_phase *block_info = (ir_phase *)x;
+
+ if (is_Block(n)) {
+ int i, max = get_Block_n_cfgpreds(n);
+ for (i=0; i<max; i++) {
+ ir_node *pred = get_Block_cfgpred(n,i);
+ if (is_unknown_jump(pred)) {
+ set_is_unknown_jump_target(block_info, n);
+ }
+ }
+ } else if (is_Phi(n)) {
+ ir_node *block = get_nodes_block(n);
+ set_has_phis(block_info, block);
+ } else if (is_Jmp(n) || is_Cond(n) || is_Cmp(n) || is_Proj(n)) {
+ /* ignore */
+ } else {
+ ir_node *block = get_nodes_block(n);
+ set_has_operations(block_info, block);
+ }
+}
+
+typedef struct skip_env {
+ bool changed;
+ ir_phase *phase;
+} skip_env;
+
+static void optimize_conds(ir_node *b, void *x)
+{
+ skip_env *env = (skip_env*)x;
+ int i, j;
+ int n_preds = get_Block_n_cfgpreds(b);
+
+ if (has_phis(env->phase,b)) return;
+
+ /* optimize Cond predecessors (might produce Bad predecessors) */
+ for (i = 0; i < n_preds; i++) {
+ for (j = i+1; j < n_preds; j++) {
+ optimize_pred_cond(b, i, j);
+ }
+ }
+}
+
+static void remove_empty_blocks(ir_node *b, void *x)
+{
+ skip_env *env = (skip_env*)x;
+ int i;
+ int n_preds = get_Block_n_cfgpreds(b);
+
+ for (i = 0; i < n_preds; ++i) {
+ ir_node *jmp, *jmp_block, *pred, *pred_block;
+
+ jmp = get_Block_cfgpred(b, i);
+ if (!is_Jmp(jmp)) continue;
+ if (is_unknown_jump(jmp)) continue;
+ jmp_block = get_nodes_block(jmp);
+ if (is_unknown_jump_target(env->phase, jmp_block)) continue;
+ if (has_operations(env->phase,jmp_block)) continue;
+ /* jmp_block is an empty block! */
+
+ if (get_Block_n_cfgpreds(jmp_block) != 1) continue;
+ pred = get_Block_cfgpred(jmp_block, 0);
+ exchange(jmp, pred);
+ env->changed = true;
+
+ /* cleanup: jmp_block might have a Keep edge! */
+ pred_block = get_nodes_block(pred);
+ exchange(jmp_block, pred_block);
+ }
+}
+
+/*
+ * Some cfg optimizations, which do not touch Phi nodes */
+static void cfgopt_ignoring_phis(ir_graph *irg) {
+ ir_phase *block_info = new_phase(irg, NULL);
+ skip_env env = { false, block_info };
+
+ irg_walk_graph(irg, compute_block_info, NULL, block_info);
+
+ for(;;) {
+ env.changed = false;
+
+ /* Conds => Jmp optimization; might produce empty blocks */
+ irg_block_walk_graph(irg, optimize_conds, NULL, &env);
+
+ /* Remove empty blocks */
+ irg_block_walk_graph(irg, remove_empty_blocks, NULL, &env);
+ if (env.changed) {
+ set_irg_doms_inconsistent(irg);
+ /* Removing blocks might enable more Cond optimizations */
+ continue;
+ } else {
+ break;
+ }
+ }
+
+ phase_free(block_info);
+}
+
+/* Optimizations of the control flow that also require changes of Phi nodes. */
+void optimize_cf(ir_graph *irg)
+{
+ int i, j, n;
+ ir_node **in = NULL;
+ ir_node *end = get_irg_end(irg);
+ ir_node *new_end;
+ merge_env env;
+
+ assert(get_irg_phase_state(irg) != phase_building);
+
+ /* if the graph is not pinned, we cannot determine empty blocks */
+ assert(get_irg_pinned(irg) != op_pin_state_floats &&
+ "Control flow optimization need a pinned graph");
+
+ /* FIXME: control flow opt destroys block edges. So edges are deactivated
+ * here. Fix the edges! */
+ edges_deactivate(irg);
+
+ cfgopt_ignoring_phis(irg);
+
+ /* we use the mark flag to mark removable blocks */
+ ir_reserve_resources(irg, IR_RESOURCE_BLOCK_MARK | IR_RESOURCE_IRN_LINK);
+
+ /* The switch Cond optimization might expose unreachable code, so we loop */
+ for (;;) {
+ int length;
+ ir_node **switch_conds = NULL;
+ env.changed = false;
+ env.phis_moved = false;
+
+ assure_doms(irg);
+
+ /*
+ * This pass collects all Phi nodes in a link list in the block
+ * nodes. Further it performs simple control flow optimizations.
+ * Finally it marks all blocks that do not contain useful
+ * computations, i.e., these blocks might be removed.
+ */
+ switch_conds = NEW_ARR_F(ir_node*, 0);
+ irg_walk(end, clear_link, collect_nodes, &switch_conds);
+
+ /* handle all collected switch-Conds */
+ length = ARR_LEN(switch_conds);
+ for (i = 0; i < length; ++i) {
+ ir_node *cond = switch_conds[i];
+ env.changed |= handle_switch_cond(cond);
+ }
+ DEL_ARR_F(switch_conds);
+
+ if (!env.changed) break;
+
+ set_irg_doms_inconsistent(irg);
+ set_irg_extblk_inconsistent(irg);
+ set_irg_entity_usage_state(irg, ir_entity_usage_not_computed);
+ }
+
+ /* assert due to collect_nodes:
+ * 1. removable blocks are now marked as such
+ * 2. phi lists are up to date
+ */
+
+ /* Optimize the standard code.
+ * It walks only over block nodes and adapts these and the Phi nodes in these
+ * blocks, which it finds in a linked list computed before.
+ * */
+ assure_doms(irg);
+ irg_block_walk_graph(irg, optimize_blocks, NULL, &env);
+
+ new_end = optimize_in_place(end);
+ if (new_end != end) {
+ set_irg_end(irg, new_end);
+ end = new_end;
+ }
+ remove_End_Bads_and_doublets(end);
+
+ ir_free_resources(irg, IR_RESOURCE_BLOCK_MARK | IR_RESOURCE_IRN_LINK);
+
+ if (env.phis_moved) {
+ /* Bad: when we moved Phi's, we might produce dead Phi nodes
+ that are kept-alive.
+ Some other phases cannot copy with this, so will them.
+ */
+ n = get_End_n_keepalives(end);
+ if (n > 0) {
+ NEW_ARR_A(ir_node *, in, n);
+ assure_irg_outs(irg);
+
+ for (i = j = 0; i < n; ++i) {
+ ir_node *ka = get_End_keepalive(end, i);
+
+ if (is_Phi(ka)) {
+ int k;
+
+ for (k = get_irn_n_outs(ka) - 1; k >= 0; --k) {
+ ir_node *user = get_irn_out(ka, k);
+
+ if (user != ka && user != end) {
+ /* Is it a real user or just a self loop ? */
+ break;
+ }
+ }
+ if (k >= 0)
+ in[j++] = ka;
+ } else
+ in[j++] = ka;
+ }
+ if (j != n) {
+ set_End_keepalives(end, j, in);
+ env.changed = true;
+ }
+ }
+ }
+
+ if (env.changed) {
+ /* Handle graph state if was changed. */
+ set_irg_doms_inconsistent(irg);
+ set_irg_extblk_inconsistent(irg);
+ set_irg_entity_usage_state(irg, ir_entity_usage_not_computed);
+ }
+}
+
+/* Creates an ir_graph pass for optimize_cf. */
+ir_graph_pass_t *optimize_cf_pass(const char *name)
+{
+ return def_graph_pass(name ? name : "optimize_cf", optimize_cf);