+
+/**
+ * Environment for inlining irgs.
+ */
+typedef struct {
+ int n_nodes; /**< Nodes in graph except Id, Tuple, Proj, Start, End */
+ int n_nodes_orig; /**< for statistics */
+ eset *call_nodes; /**< All call nodes in this graph */
+ int n_call_nodes;
+ int n_call_nodes_orig; /**< for statistics */
+ int n_callers; /**< Number of known graphs that call this graphs. */
+ int n_callers_orig; /**< for statistics */
+} inline_irg_env;
+
+static inline_irg_env *new_inline_irg_env(void) {
+ inline_irg_env *env = malloc(sizeof(inline_irg_env));
+ env->n_nodes = -2; /* uncount Start, End */
+ env->n_nodes_orig = -2; /* uncount Start, End */
+ env->call_nodes = eset_create();
+ env->n_call_nodes = 0;
+ env->n_call_nodes_orig = 0;
+ env->n_callers = 0;
+ env->n_callers_orig = 0;
+ return env;
+}
+
+static void free_inline_irg_env(inline_irg_env *env) {
+ eset_destroy(env->call_nodes);
+ free(env);
+}
+
+static void collect_calls2(ir_node *call, void *env) {
+ inline_irg_env *x = (inline_irg_env *)env;
+ ir_op *op = get_irn_op(call);
+ ir_graph *callee;
+
+ /* count nodes in irg */
+ if (op != op_Proj && op != op_Tuple && op != op_Sync) {
+ x->n_nodes++;
+ x->n_nodes_orig++;
+ }
+
+ if (op != op_Call) return;
+
+ /* collect all call nodes */
+ eset_insert(x->call_nodes, (void *)call);
+ x->n_call_nodes++;
+ x->n_call_nodes_orig++;
+
+ /* count all static callers */
+ callee = get_call_called_irg(call);
+ if (callee) {
+ ((inline_irg_env *)get_irg_link(callee))->n_callers++;
+ ((inline_irg_env *)get_irg_link(callee))->n_callers_orig++;
+ }
+}
+
+INLINE static int is_leave(ir_graph *irg) {
+ return (((inline_irg_env *)get_irg_link(irg))->n_call_nodes == 0);
+}
+
+INLINE static int is_smaller(ir_graph *callee, int size) {
+ return (((inline_irg_env *)get_irg_link(callee))->n_nodes < size);
+}
+
+
+/* Inlines small leave methods at call sites where the called address comes
+ from a Const node that references the entity representing the called
+ method.
+ The size argument is a rough measure for the code size of the method:
+ Methods where the obstack containing the firm graph is smaller than
+ size are inlined. */
+void inline_leave_functions(int maxsize, int leavesize, int size) {
+ inline_irg_env *env;
+ int i, n_irgs = get_irp_n_irgs();
+ ir_graph *rem = current_ir_graph;
+ int did_inline = 1;
+
+ if (!(get_optimize() && get_opt_inline())) return;
+
+ /* extend all irgs by a temporary data structure for inlineing. */
+ for (i = 0; i < n_irgs; ++i)
+ set_irg_link(get_irp_irg(i), new_inline_irg_env());
+
+ /* Precompute information in temporary data structure. */
+ for (i = 0; i < n_irgs; ++i) {
+ current_ir_graph = get_irp_irg(i);
+ assert(get_irg_phase_state(current_ir_graph) != phase_building);
+ assert(get_irg_callee_info_state(current_ir_graph) == irg_callee_info_none);
+
+ irg_walk(get_irg_end(current_ir_graph), NULL, collect_calls2,
+ get_irg_link(current_ir_graph));
+ env = (inline_irg_env *)get_irg_link(current_ir_graph);
+ }
+
+ /* and now inline.
+ Inline leaves recursively -- we might construct new leaves. */
+ //int itercnt = 1;
+ while (did_inline) {
+ //printf("iteration %d\n", itercnt++);
+ did_inline = 0;
+ for (i = 0; i < n_irgs; ++i) {
+ ir_node *call;
+ eset *walkset;
+ int phiproj_computed = 0;
+
+ current_ir_graph = get_irp_irg(i);
+ env = (inline_irg_env *)get_irg_link(current_ir_graph);
+
+ /* we can not walk and change a set, nor remove from it.
+ So recompute.*/
+ walkset = env->call_nodes;
+ env->call_nodes = eset_create();
+ for (call = eset_first(walkset); call; call = eset_next(walkset)) {
+ inline_irg_env *callee_env;
+ ir_graph *callee = get_call_called_irg(call);
+
+ if (env->n_nodes > maxsize) break;
+ if (callee && is_leave(callee) && is_smaller(callee, leavesize)) {
+ if (!phiproj_computed) {
+ phiproj_computed = 1;
+ collect_phiprojs(current_ir_graph);
+ }
+ callee_env = (inline_irg_env *)get_irg_link(callee);
+// printf(" %s: Inlineing %s.\n", get_entity_name(get_irg_entity(current_ir_graph)),
+// get_entity_name(get_irg_entity(callee)));
+ inline_method(call, callee);
+ did_inline = 1;
+ env->n_call_nodes--;
+ eset_insert_all(env->call_nodes, callee_env->call_nodes);
+ env->n_call_nodes += callee_env->n_call_nodes;
+ env->n_nodes += callee_env->n_nodes;
+ callee_env->n_callers--;
+ } else {
+ eset_insert(env->call_nodes, call);
+ }
+ }
+ eset_destroy(walkset);
+ }
+ }
+
+ //printf("Non leaves\n");
+ /* inline other small functions. */
+ for (i = 0; i < n_irgs; ++i) {
+ ir_node *call;
+ eset *walkset;
+ int phiproj_computed = 0;
+
+ current_ir_graph = get_irp_irg(i);
+ env = (inline_irg_env *)get_irg_link(current_ir_graph);
+
+ /* we can not walk and change a set, nor remove from it.
+ So recompute.*/
+ walkset = env->call_nodes;
+ env->call_nodes = eset_create();
+ for (call = eset_first(walkset); call; call = eset_next(walkset)) {
+ inline_irg_env *callee_env;
+ ir_graph *callee = get_call_called_irg(call);
+
+ if (env->n_nodes > maxsize) break;
+ if (callee && is_smaller(callee, size)) {
+ if (!phiproj_computed) {
+ phiproj_computed = 1;
+ collect_phiprojs(current_ir_graph);
+ }
+ callee_env = (inline_irg_env *)get_irg_link(callee);
+// printf(" %s: Inlineing %s.\n", get_entity_name(get_irg_entity(current_ir_graph)),
+// get_entity_name(get_irg_entity(callee)));
+ inline_method(call, callee);
+ did_inline = 1;
+ env->n_call_nodes--;
+ eset_insert_all(env->call_nodes, callee_env->call_nodes);
+ env->n_call_nodes += callee_env->n_call_nodes;
+ env->n_nodes += callee_env->n_nodes;
+ callee_env->n_callers--;
+ } else {
+ eset_insert(env->call_nodes, call);
+ }
+ }
+ eset_destroy(walkset);
+ }
+
+ for (i = 0; i < n_irgs; ++i) {
+ current_ir_graph = get_irp_irg(i);
+#if 0
+ env = (inline_irg_env *)get_irg_link(current_ir_graph);
+ if ((env->n_call_nodes_orig != env->n_call_nodes) ||
+ (env->n_callers_orig != env->n_callers))
+ printf("Nodes:%3d ->%3d, calls:%3d ->%3d, callers:%3d ->%3d, -- %s\n",
+ env->n_nodes_orig, env->n_nodes, env->n_call_nodes_orig, env->n_call_nodes,
+ env->n_callers_orig, env->n_callers,
+ get_entity_name(get_irg_entity(current_ir_graph)));
+#endif
+ free_inline_irg_env((inline_irg_env *)get_irg_link(current_ir_graph));
+ }
+
+ current_ir_graph = rem;
+}
+
+/********************************************************************/
+/* Code Placement. Pins all floating nodes to a block where they */
+/* will be executed only if needed. */
+/********************************************************************/
+
+/* Find the earliest correct block for N. --- Place N into the
+ same Block as its dominance-deepest Input. */
+static void
+place_floats_early(ir_node *n, pdeq *worklist)
+{
+ int i, start, irn_arity;
+
+ /* we must not run into an infinite loop */
+ assert (irn_not_visited(n));
+ mark_irn_visited(n);
+
+ /* Place floating nodes. */
+ if (get_op_pinned(get_irn_op(n)) == floats) {
+ int depth = 0;
+ ir_node *b = new_Bad(); /* The block to place this node in */
+
+ assert(get_irn_op(n) != op_Block);
+
+ if ((get_irn_op(n) == op_Const) ||
+ (get_irn_op(n) == op_SymConst) ||
+ (is_Bad(n)) ||
+ (get_irn_op(n) == op_Unknown)) {
+ /* These nodes will not be placed by the loop below. */
+ b = get_irg_start_block(current_ir_graph);
+ depth = 1;
+ }
+
+ /* find the block for this node. */
+ irn_arity = get_irn_arity(n);
+ for (i = 0; i < irn_arity; i++) {
+ ir_node *dep = get_irn_n(n, i);
+ ir_node *dep_block;
+ if ((irn_not_visited(dep)) &&
+ (get_op_pinned(get_irn_op(dep)) == floats)) {
+ place_floats_early(dep, worklist);
+ }
+ /* Because all loops contain at least one pinned node, now all
+ our inputs are either pinned or place_early has already
+ been finished on them. We do not have any unfinished inputs! */
+ dep_block = get_nodes_Block(dep);
+ if ((!is_Bad(dep_block)) &&
+ (get_Block_dom_depth(dep_block) > depth)) {
+ b = dep_block;
+ depth = get_Block_dom_depth(dep_block);
+ }
+ /* Avoid that the node is placed in the Start block */
+ if ((depth == 1) && (get_Block_dom_depth(get_nodes_Block(n)) > 1)) {
+ b = get_Block_cfg_out(get_irg_start_block(current_ir_graph), 0);
+ assert(b != get_irg_start_block(current_ir_graph));
+ depth = 2;
+ }
+ }
+ set_nodes_Block(n, b);
+ }
+
+ /* Add predecessors of non floating nodes on worklist. */
+ start = (get_irn_op(n) == op_Block) ? 0 : -1;
+ irn_arity = get_irn_arity(n);
+ for (i = start; i < irn_arity; i++) {
+ ir_node *pred = get_irn_n(n, i);
+ if (irn_not_visited(pred)) {
+ pdeq_putr (worklist, pred);
+ }
+ }
+}
+
+/* Floating nodes form subgraphs that begin at nodes as Const, Load,
+ Start, Call and end at pinned nodes as Store, Call. Place_early
+ places all floating nodes reachable from its argument through floating
+ nodes and adds all beginnings at pinned nodes to the worklist. */
+static INLINE void place_early(pdeq* worklist) {
+ assert(worklist);
+ inc_irg_visited(current_ir_graph);
+
+ /* this inits the worklist */
+ place_floats_early(get_irg_end(current_ir_graph), worklist);
+
+ /* Work the content of the worklist. */
+ while (!pdeq_empty (worklist)) {
+ ir_node *n = pdeq_getl (worklist);
+ if (irn_not_visited(n)) place_floats_early(n, worklist);
+ }
+
+ set_irg_outs_inconsistent(current_ir_graph);
+ current_ir_graph->pinned = pinned;
+}
+
+
+/* deepest common dominance ancestor of DCA and CONSUMER of PRODUCER */
+static ir_node *
+consumer_dom_dca (ir_node *dca, ir_node *consumer, ir_node *producer)
+{
+ ir_node *block = NULL;
+
+ /* Compute the latest block into which we can place a node so that it is
+ before consumer. */
+ if (get_irn_op(consumer) == op_Phi) {
+ /* our consumer is a Phi-node, the effective use is in all those
+ blocks through which the Phi-node reaches producer */
+ int i, irn_arity;
+ ir_node *phi_block = get_nodes_Block(consumer);
+ irn_arity = get_irn_arity(consumer);
+ for (i = 0; i < irn_arity; i++) {
+ if (get_irn_n(consumer, i) == producer) {
+ block = get_nodes_Block(get_Block_cfgpred(phi_block, i));
+ }
+ }
+ } else {
+ assert(is_no_Block(consumer));
+ block = get_nodes_Block(consumer);
+ }
+
+ /* Compute the deepest common ancestor of block and dca. */
+ assert(block);
+ if (!dca) return block;
+ while (get_Block_dom_depth(block) > get_Block_dom_depth(dca))
+ block = get_Block_idom(block);
+ while (get_Block_dom_depth(dca) > get_Block_dom_depth(block))
+ dca = get_Block_idom(dca);
+ while (block != dca)
+ { block = get_Block_idom(block); dca = get_Block_idom(dca); }
+
+ return dca;
+}
+
+static INLINE int get_irn_loop_depth(ir_node *n) {
+ return get_loop_depth(get_irn_loop(n));
+}
+
+/* Move n to a block with less loop depth than it's current block. The
+ new block must be dominated by early. */
+static void
+move_out_of_loops (ir_node *n, ir_node *early)
+{
+ ir_node *best, *dca;
+ assert(n && early);
+
+
+ /* Find the region deepest in the dominator tree dominating
+ dca with the least loop nesting depth, but still dominated
+ by our early placement. */
+ dca = get_nodes_Block(n);
+ best = dca;
+ while (dca != early) {
+ dca = get_Block_idom(dca);
+ if (!dca) break; /* should we put assert(dca)? */
+ if (get_irn_loop_depth(dca) < get_irn_loop_depth(best)) {
+ best = dca;
+ }
+ }
+ if (best != get_nodes_Block(n)) {
+ /* debug output
+ printf("Moving out of loop: "); DDMN(n);
+ printf(" Outermost block: "); DDMN(early);
+ printf(" Best block: "); DDMN(best);
+ printf(" Innermost block: "); DDMN(get_nodes_Block(n));
+ */
+ set_nodes_Block(n, best);
+ }
+}
+
+/* Find the latest legal block for N and place N into the
+ `optimal' Block between the latest and earliest legal block.
+ The `optimal' block is the dominance-deepest block of those
+ with the least loop-nesting-depth. This places N out of as many
+ loops as possible and then makes it as control dependant as
+ possible. */
+static void
+place_floats_late(ir_node *n, pdeq *worklist)
+{
+ int i;
+ ir_node *early;
+
+ assert (irn_not_visited(n)); /* no multiple placement */
+
+ /* no need to place block nodes, control nodes are already placed. */
+ if ((get_irn_op(n) != op_Block) &&
+ (!is_cfop(n)) &&
+ (get_irn_mode(n) != mode_X)) {
+ /* Remember the early placement of this block to move it
+ out of loop no further than the early placement. */
+ early = get_nodes_Block(n);
+ /* Assure that our users are all placed, except the Phi-nodes.
+ --- Each data flow cycle contains at least one Phi-node. We
+ have to break the `user has to be placed before the
+ producer' dependence cycle and the Phi-nodes are the
+ place to do so, because we need to base our placement on the
+ final region of our users, which is OK with Phi-nodes, as they
+ are pinned, and they never have to be placed after a
+ producer of one of their inputs in the same block anyway. */
+ for (i = 0; i < get_irn_n_outs(n); i++) {
+ ir_node *succ = get_irn_out(n, i);
+ if (irn_not_visited(succ) && (get_irn_op(succ) != op_Phi))
+ place_floats_late(succ, worklist);
+ }
+
+ /* We have to determine the final block of this node... except for
+ constants. */
+ if ((get_op_pinned(get_irn_op(n)) == floats) &&
+ (get_irn_op(n) != op_Const) &&
+ (get_irn_op(n) != op_SymConst)) {
+ ir_node *dca = NULL; /* deepest common ancestor in the
+ dominator tree of all nodes'
+ blocks depending on us; our final
+ placement has to dominate DCA. */
+ for (i = 0; i < get_irn_n_outs(n); i++) {
+ dca = consumer_dom_dca (dca, get_irn_out(n, i), n);
+ }
+ set_nodes_Block(n, dca);
+
+ move_out_of_loops (n, early);
+ }
+ }
+
+ mark_irn_visited(n);
+
+ /* Add predecessors of all non-floating nodes on list. (Those of floating
+ nodes are placeded already and therefore are marked.) */
+ for (i = 0; i < get_irn_n_outs(n); i++) {
+ if (irn_not_visited(get_irn_out(n, i))) {
+ pdeq_putr (worklist, get_irn_out(n, i));
+ }
+ }
+}
+
+static INLINE void place_late(pdeq* worklist) {
+ assert(worklist);
+ inc_irg_visited(current_ir_graph);
+
+ /* This fills the worklist initially. */
+ place_floats_late(get_irg_start_block(current_ir_graph), worklist);
+ /* And now empty the worklist again... */
+ while (!pdeq_empty (worklist)) {
+ ir_node *n = pdeq_getl (worklist);
+ if (irn_not_visited(n)) place_floats_late(n, worklist);
+ }
+}
+
+void place_code(ir_graph *irg) {
+ pdeq* worklist;
+ ir_graph *rem = current_ir_graph;
+
+ current_ir_graph = irg;
+
+ if (!(get_optimize() && get_opt_global_cse())) return;
+
+ /* Handle graph state */
+ assert(get_irg_phase_state(irg) != phase_building);
+ if (get_irg_dom_state(irg) != dom_consistent)
+ compute_doms(irg);
+
+ construct_backedges(irg);
+
+ /* Place all floating nodes as early as possible. This guarantees
+ a legal code placement. */
+ worklist = new_pdeq();
+ place_early(worklist);
+
+ /* place_early invalidates the outs, place_late needs them. */
+ compute_outs(irg);
+ /* Now move the nodes down in the dominator tree. This reduces the
+ unnecessary executions of the node. */
+ place_late(worklist);
+
+ set_irg_outs_inconsistent(current_ir_graph);
+ del_pdeq(worklist);
+ current_ir_graph = rem;
+}
+
+
+
+/********************************************************************/
+/* Control flow optimization. */
+/* Removes Bad control flow predecessors and empty blocks. A block */
+/* is empty if it contains only a Jmp node. */
+/* Blocks can only be removed if they are not needed for the */
+/* semantics of Phi nodes. */
+/********************************************************************/
+
+/* Removes Tuples from Block control flow predecessors.
+ Optimizes blocks with equivalent_node().
+ Replaces n by Bad if n is unreachable control flow. */
+static void merge_blocks(ir_node *n, void *env) {
+ int i;
+ set_irn_link(n, NULL);
+
+ if (get_irn_op(n) == op_Block) {
+ /* Remove Tuples */
+ for (i = 0; i < get_Block_n_cfgpreds(n); i++)
+ /* GL @@@ : is this possible? if (get_opt_normalize()) -- added, all tests go through.
+ A different order of optimizations might cause problems. */
+ if (get_opt_normalize())
+ set_Block_cfgpred(n, i, skip_Tuple(get_Block_cfgpred(n, i)));
+ } else if (get_optimize() && (get_irn_mode(n) == mode_X)) {
+ /* We will soon visit a block. Optimize it before visiting! */
+ ir_node *b = get_nodes_Block(n);
+ ir_node *new_node = equivalent_node(b);
+ while (irn_not_visited(b) && (!is_Bad(new_node)) && (new_node != b)) {
+ /* We would have to run gigo if new is bad, so we
+ promote it directly below. */
+ assert(((b == new_node) ||
+ get_opt_control_flow_straightening() ||
+ get_opt_control_flow_weak_simplification()) &&
+ ("strange flag setting"));
+ exchange (b, new_node);
+ b = new_node;
+ new_node = equivalent_node(b);
+ }
+ /* GL @@@ get_opt_normalize hinzugefuegt, 5.5.2003 */
+ if (is_Bad(new_node) && get_opt_normalize()) exchange(n, new_Bad());
+ }
+}
+
+/* Collects all Phi nodes in link list of Block.
+ Marks all blocks "block_visited" if they contain a node other
+ than Jmp. */
+static void collect_nodes(ir_node *n, void *env) {
+ if (is_no_Block(n)) {
+ ir_node *b = get_nodes_Block(n);
+
+ if ((get_irn_op(n) == op_Phi)) {
+ /* Collect Phi nodes to compact ins along with block's ins. */
+ set_irn_link(n, get_irn_link(b));
+ set_irn_link(b, n);
+ } else if (get_irn_op(n) != op_Jmp) { /* Check for non empty block. */
+ mark_Block_block_visited(b);
+ }
+ }
+}
+
+/* Returns true if pred is pred of block */
+static int is_pred_of(ir_node *pred, ir_node *b) {
+ int i;
+ for (i = 0; i < get_Block_n_cfgpreds(b); i++) {
+ ir_node *b_pred = get_nodes_Block(get_Block_cfgpred(b, i));
+ if (b_pred == pred) return 1;
+ }
+ return 0;
+}
+
+static int test_whether_dispensable(ir_node *b, int pos) {
+ int i, j, n_preds = 1;
+ int dispensable = 1;
+ ir_node *cfop = get_Block_cfgpred(b, pos);
+ ir_node *pred = get_nodes_Block(cfop);
+
+ if (get_Block_block_visited(pred) + 1
+ < get_irg_block_visited(current_ir_graph)) {
+ if (!get_optimize() || !get_opt_control_flow_strong_simplification()) {
+ /* Mark block so that is will not be removed. */
+ set_Block_block_visited(pred, get_irg_block_visited(current_ir_graph)-1);
+ return 1;
+ }
+ /* Seems to be empty. */
+ if (!get_irn_link(b)) {
+ /* There are no Phi nodes ==> dispensable. */
+ n_preds = get_Block_n_cfgpreds(pred);
+ } else {
+ /* b's pred blocks and pred's pred blocks must be pairwise disjunct.
+ Work preds < pos as if they were already removed. */
+ for (i = 0; i < pos; i++) {
+ ir_node *b_pred = get_nodes_Block(get_Block_cfgpred(b, i));
+ if (get_Block_block_visited(b_pred) + 1
+ < get_irg_block_visited(current_ir_graph)) {
+ for (j = 0; j < get_Block_n_cfgpreds(b_pred); j++) {
+ ir_node *b_pred_pred = get_nodes_Block(get_Block_cfgpred(b_pred, j));
+ if (is_pred_of(b_pred_pred, pred)) dispensable = 0;
+ }
+ } else {
+ if (is_pred_of(b_pred, pred)) dispensable = 0;
+ }
+ }
+ for (i = pos +1; i < get_Block_n_cfgpreds(b); i++) {
+ ir_node *b_pred = get_nodes_Block(get_Block_cfgpred(b, i));
+ if (is_pred_of(b_pred, pred)) dispensable = 0;
+ }
+ if (!dispensable) {
+ set_Block_block_visited(pred, get_irg_block_visited(current_ir_graph)-1);
+ n_preds = 1;
+ } else {
+ n_preds = get_Block_n_cfgpreds(pred);
+ }
+ }
+ }
+
+ return n_preds;
+}
+
+static void optimize_blocks(ir_node *b, void *env) {
+ int i, j, k, max_preds, n_preds;
+ ir_node *pred, *phi;
+ ir_node **in;
+
+ /* Count the number of predecessor if this block is merged with pred blocks
+ that are empty. */
+ max_preds = 0;
+ for (i = 0; i < get_Block_n_cfgpreds(b); i++) {
+ max_preds += test_whether_dispensable(b, i);
+ }
+ in = (ir_node **) malloc(max_preds * sizeof(ir_node *));
+
+/**
+ printf(" working on "); DDMN(b);
+ for (i = 0; i < get_Block_n_cfgpreds(b); i++) {
+ pred = get_nodes_Block(get_Block_cfgpred(b, i));
+ if (is_Bad(get_Block_cfgpred(b, i))) {
+ printf(" removing Bad %i\n ", i);
+ } else if (get_Block_block_visited(pred) +1
+ < get_irg_block_visited(current_ir_graph)) {
+ printf(" removing pred %i ", i); DDMN(pred);
+ } else { printf(" Nothing to do for "); DDMN(pred); }
+ }
+ * end Debug output **/
+
+ /** Fix the Phi nodes **/
+ phi = get_irn_link(b);
+ while (phi) {
+ assert(get_irn_op(phi) == op_Phi);
+ /* Find the new predecessors for the Phi */
+ n_preds = 0;
+ for (i = 0; i < get_Block_n_cfgpreds(b); i++) {
+ pred = get_nodes_Block(get_Block_cfgpred(b, i));
+ if (is_Bad(get_Block_cfgpred(b, i))) {
+ /* Do nothing */
+ } else if (get_Block_block_visited(pred) +1
+ < get_irg_block_visited(current_ir_graph)) {
+ /* It's an empty block and not yet visited. */
+ ir_node *phi_pred = get_Phi_pred(phi, i);
+ for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
+ if (get_nodes_Block(phi_pred) == pred) {
+ assert(get_irn_op(phi_pred) == op_Phi); /* Block is empty!! */
+ in[n_preds] = get_Phi_pred(phi_pred, j);
+ } else {
+ in[n_preds] = phi_pred;
+ }
+ n_preds++;
+ }
+ /* The Phi_pred node is replaced now if it is a Phi.
+ In Schleifen kann offenbar der entfernte Phi Knoten legal verwendet werden.
+ Daher muss der Phiknoten durch den neuen ersetzt werden.
+ Weiter muss der alte Phiknoten entfernt werden (durch ersetzen oder
+ durch einen Bad) damit er aus den keep_alive verschwinden kann.
+ Man sollte also, falls keine Schleife vorliegt, exchange mit new_Bad
+ aufrufen. */
+ if (get_nodes_Block(phi_pred) == pred) {
+ /* remove the Phi as it might be kept alive. Further there
+ might be other users. */
+ exchange(phi_pred, phi); /* geht, ist aber doch semantisch falsch! Warum?? */
+ }
+ } else {
+ in[n_preds] = get_Phi_pred(phi, i);
+ n_preds ++;
+ }
+ }
+ /* Fix the node */
+ set_irn_in(phi, n_preds, in);
+
+ phi = get_irn_link(phi);
+ }
+
+/**
+ This happens only if merge between loop backedge and single loop entry. **/
+ for (k = 0; k < get_Block_n_cfgpreds(b); k++) {
+ pred = get_nodes_Block(get_Block_cfgpred(b, k));
+ if (get_Block_block_visited(pred) +1
+ < get_irg_block_visited(current_ir_graph)) {
+ phi = get_irn_link(pred);
+ while (phi) {
+ if (get_irn_op(phi) == op_Phi) {
+ set_nodes_Block(phi, b);
+
+ n_preds = 0;
+ for (i = 0; i < k; i++) {
+ pred = get_nodes_Block(get_Block_cfgpred(b, i));
+ if (is_Bad(get_Block_cfgpred(b, i))) {
+ /* Do nothing */
+ } else if (get_Block_block_visited(pred) +1
+ < get_irg_block_visited(current_ir_graph)) {
+ /* It's an empty block and not yet visited. */
+ for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
+ /* @@@ Hier brauche ich Schleifeninformation!!! Kontrollflusskante
+ muss Rueckwaertskante sein! (An allen vier in[n_preds] = phi
+ Anweisungen.) Trotzdem tuts bisher!! */
+ in[n_preds] = phi;
+ n_preds++;
+ }
+ } else {
+ in[n_preds] = phi;
+ n_preds++;
+ }
+ }
+ for (i = 0; i < get_Phi_n_preds(phi); i++) {
+ in[n_preds] = get_Phi_pred(phi, i);
+ n_preds++;
+ }
+ for (i = k+1; i < get_Block_n_cfgpreds(b); i++) {
+ pred = get_nodes_Block(get_Block_cfgpred(b, i));
+ if (is_Bad(get_Block_cfgpred(b, i))) {
+ /* Do nothing */
+ } else if (get_Block_block_visited(pred) +1
+ < get_irg_block_visited(current_ir_graph)) {
+ /* It's an empty block and not yet visited. */
+ for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
+ in[n_preds] = phi;
+ n_preds++;
+ }
+ } else {
+ in[n_preds] = phi;
+ n_preds++;
+ }
+ }
+ set_irn_in(phi, n_preds, in);
+ }
+ phi = get_irn_link(phi);
+ }
+ }
+ }
+
+ /** Fix the block **/
+ n_preds = 0;
+ for (i = 0; i < get_Block_n_cfgpreds(b); i++) {
+ pred = get_nodes_Block(get_Block_cfgpred(b, i));
+ if (is_Bad(get_Block_cfgpred(b, i))) {
+ /* Do nothing */
+ } else if (get_Block_block_visited(pred) +1
+ < get_irg_block_visited(current_ir_graph)) {
+ /* It's an empty block and not yet visited. */
+ assert(get_Block_n_cfgpreds(b) > 1);
+ /* Else it should be optimized by equivalent_node. */
+ for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
+ in[n_preds] = get_Block_cfgpred(pred, j);
+ n_preds++;
+ }
+ /* Remove block as it might be kept alive. */
+ exchange(pred, b/*new_Bad()*/);
+ } else {
+ in[n_preds] = get_Block_cfgpred(b, i);
+ n_preds ++;
+ }
+ }
+ set_irn_in(b, n_preds, in);
+ free(in);
+}
+
+void optimize_cf(ir_graph *irg) {
+ int i;
+ ir_node **in;
+ ir_node *end = get_irg_end(irg);
+ ir_graph *rem = current_ir_graph;
+ current_ir_graph = irg;
+
+ /* Handle graph state */
+ assert(get_irg_phase_state(irg) != phase_building);
+ if (get_irg_outs_state(current_ir_graph) == outs_consistent)
+ set_irg_outs_inconsistent(current_ir_graph);
+ if (get_irg_dom_state(current_ir_graph) == dom_consistent)
+ set_irg_dom_inconsistent(current_ir_graph);
+
+ /* Use block visited flag to mark non-empty blocks. */
+ inc_irg_block_visited(irg);
+ irg_walk(end, merge_blocks, collect_nodes, NULL);
+
+ /* Optimize the standard code. */
+ irg_block_walk(get_irg_end_block(irg), optimize_blocks, NULL, NULL);
+
+ /* Walk all keep alives, optimize them if block, add to new in-array
+ for end if useful. */
+ in = NEW_ARR_F (ir_node *, 1);
+ in[0] = get_nodes_Block(end);
+ inc_irg_visited(current_ir_graph);
+ for(i = 0; i < get_End_n_keepalives(end); i++) {
+ ir_node *ka = get_End_keepalive(end, i);
+ if (irn_not_visited(ka)) {
+ if ((get_irn_op(ka) == op_Block) && Block_not_block_visited(ka)) {
+ set_irg_block_visited(current_ir_graph, /* Don't walk all the way to Start. */
+ get_irg_block_visited(current_ir_graph)-1);
+ irg_block_walk(ka, optimize_blocks, NULL, NULL);
+ mark_irn_visited(ka);
+ ARR_APP1 (ir_node *, in, ka);
+ } else if (get_irn_op(ka) == op_Phi) {
+ mark_irn_visited(ka);
+ ARR_APP1 (ir_node *, in, ka);
+ }
+ }
+ }
+ /* DEL_ARR_F(end->in); GL @@@ tut nicht ! */
+ end->in = in;
+
+ current_ir_graph = rem;
+}
+
+
+/**
+ * Called by walker of remove_critical_cf_edges.
+ *
+ * Place an empty block to an edge between a blocks of multiple
+ * predecessors and a block of multiple successors.
+ *
+ * @param n IR node
+ * @param env Environment of walker. This field is unused and has
+ * the value NULL.
+ */
+static void walk_critical_cf_edges(ir_node *n, void *env) {
+ int arity, i;
+ ir_node *pre, *block, **in, *jmp;
+
+ /* Block has multiple predecessors */
+ if ((op_Block == get_irn_op(n)) &&
+ (get_irn_arity(n) > 1)) {
+ arity = get_irn_arity(n);
+
+ if (n == get_irg_end_block(current_ir_graph))
+ return; // No use to add a block here.
+
+ for (i=0; i<arity; i++) {
+ pre = get_irn_n(n, i);
+ /* Predecessor has multiple successors. Insert new flow edge */
+ if ((NULL != pre) &&
+ (op_Proj == get_irn_op(pre)) &&
+ op_Raise != get_irn_op(skip_Proj(pre))) {
+
+ /* set predecessor array for new block */
+ in = NEW_ARR_D (ir_node *, current_ir_graph->obst, 1);
+ /* set predecessor of new block */
+ in[0] = pre;
+ block = new_Block(1, in);
+ /* insert new jmp node to new block */
+ switch_block(block);
+ jmp = new_Jmp();
+ switch_block(n);
+ /* set successor of new block */
+ set_irn_n(n, i, jmp);
+
+ } /* predecessor has multiple successors */
+ } /* for all predecessors */
+ } /* n is a block */
+}
+
+void remove_critical_cf_edges(ir_graph *irg) {
+ if (get_opt_critical_edges())
+ irg_walk_graph(irg, NULL, walk_critical_cf_edges, NULL);
+}