- res_pred = xmalloc (n_res * sizeof(*res_pred));
- cf_pred = xmalloc (arity * sizeof(*res_pred));
-
- set_irg_current_block(current_ir_graph, post_bl); /* just to make sure */
-
- /* -- archive keepalives -- */
- irn_arity = get_irn_arity(end);
- for (i = 0; i < irn_arity; i++)
- add_End_keepalive(get_irg_end(current_ir_graph), get_irn_n(end, i));
-
- /* The new end node will die. We need not free as the in array is on the obstack:
- copy_node() only generated 'D' arrays. */
-
- /* -- Replace Return nodes by Jump nodes. -- */
- n_ret = 0;
- for (i = 0; i < arity; i++) {
- ir_node *ret;
- ret = get_irn_n(end_bl, i);
- if (is_Return(ret)) {
- cf_pred[n_ret] = new_r_Jmp(current_ir_graph, get_nodes_block(ret));
- n_ret++;
- }
- }
- set_irn_in(post_bl, n_ret, cf_pred);
-
- /* -- Build a Tuple for all results of the method.
- Add Phi node if there was more than one Return. -- */
- turn_into_tuple(post_call, 4);
- /* First the Memory-Phi */
- n_ret = 0;
- for (i = 0; i < arity; i++) {
- ret = get_irn_n(end_bl, i);
- if (is_Return(ret)) {
- cf_pred[n_ret] = get_Return_mem(ret);
- n_ret++;
- }
- }
- phi = new_Phi(n_ret, cf_pred, mode_M);
- set_Tuple_pred(call, pn_Call_M_regular, phi);
- /* Conserve Phi-list for further inlinings -- but might be optimized */
- if (get_nodes_block(phi) == post_bl) {
- set_irn_link(phi, get_irn_link(post_bl));
- set_irn_link(post_bl, phi);
- }
- /* Now the real results */
- if (n_res > 0) {
- for (j = 0; j < n_res; j++) {
- n_ret = 0;
- for (i = 0; i < arity; i++) {
- ret = get_irn_n(end_bl, i);
- if (get_irn_op(ret) == op_Return) {
- cf_pred[n_ret] = get_Return_res(ret, j);
- n_ret++;
- }
- }
- if (n_ret > 0)
- phi = new_Phi(n_ret, cf_pred, get_irn_mode(cf_pred[0]));
- else
- phi = new_Bad();
- res_pred[j] = phi;
- /* Conserve Phi-list for further inlinings -- but might be optimized */
- if (get_nodes_block(phi) == post_bl) {
- set_irn_link(phi, get_irn_link(post_bl));
- set_irn_link(post_bl, phi);
- }
- }
- set_Tuple_pred(call, pn_Call_T_result, new_Tuple(n_res, res_pred));
- } else {
- set_Tuple_pred(call, pn_Call_T_result, new_Bad());
- }
- /* Finally the exception control flow.
- We have two (three) possible situations:
- First if the Call branches to an exception handler: We need to add a Phi node to
- collect the memory containing the exception objects. Further we need
- to add another block to get a correct representation of this Phi. To
- this block we add a Jmp that resolves into the X output of the Call
- when the Call is turned into a tuple.
- Second the Call branches to End, the exception is not handled. Just
- add all inlined exception branches to the End node.
- Third: there is no Exception edge at all. Handle as case two. */
- if (exc_handling == 0) {
- n_exc = 0;
- for (i = 0; i < arity; i++) {
- ir_node *ret;
- ret = get_irn_n(end_bl, i);
- if (is_fragile_op(skip_Proj(ret)) || (get_irn_op(skip_Proj(ret)) == op_Raise)) {
- cf_pred[n_exc] = ret;
- n_exc++;
- }
- }
- if (n_exc > 0) {
- new_Block(n_exc, cf_pred); /* watch it: current_block is changed! */
- set_Tuple_pred(call, pn_Call_X_except, new_Jmp());
- /* The Phi for the memories with the exception objects */
- n_exc = 0;
- for (i = 0; i < arity; i++) {
- ir_node *ret;
- ret = skip_Proj(get_irn_n(end_bl, i));
- if (is_Call(ret)) {
- cf_pred[n_exc] = new_r_Proj(current_ir_graph, get_nodes_block(ret), ret, mode_M, 3);
- n_exc++;
- } else if (is_fragile_op(ret)) {
- /* We rely that all cfops have the memory output at the same position. */
- cf_pred[n_exc] = new_r_Proj(current_ir_graph, get_nodes_block(ret), ret, mode_M, 0);
- n_exc++;
- } else if (get_irn_op(ret) == op_Raise) {
- cf_pred[n_exc] = new_r_Proj(current_ir_graph, get_nodes_block(ret), ret, mode_M, 1);
- n_exc++;
- }
- }
- set_Tuple_pred(call, pn_Call_M_except, new_Phi(n_exc, cf_pred, mode_M));
- } else {
- set_Tuple_pred(call, pn_Call_X_except, new_Bad());
- set_Tuple_pred(call, pn_Call_M_except, new_Bad());
- }
- } else {
- ir_node *main_end_bl;
- int main_end_bl_arity;
- ir_node **end_preds;
-
- /* assert(exc_handling == 1 || no exceptions. ) */
- n_exc = 0;
- for (i = 0; i < arity; i++) {
- ir_node *ret = get_irn_n(end_bl, i);
-
- if (is_fragile_op(skip_Proj(ret)) || (get_irn_op(skip_Proj(ret)) == op_Raise)) {
- cf_pred[n_exc] = ret;
- n_exc++;
- }
- }
- main_end_bl = get_irg_end_block(current_ir_graph);
- main_end_bl_arity = get_irn_arity(main_end_bl);
- end_preds = xmalloc ((n_exc + main_end_bl_arity) * sizeof(*end_preds));
-
- for (i = 0; i < main_end_bl_arity; ++i)
- end_preds[i] = get_irn_n(main_end_bl, i);
- for (i = 0; i < n_exc; ++i)
- end_preds[main_end_bl_arity + i] = cf_pred[i];
- set_irn_in(main_end_bl, n_exc + main_end_bl_arity, end_preds);
- set_Tuple_pred(call, pn_Call_X_except, new_Bad());
- set_Tuple_pred(call, pn_Call_M_except, new_Bad());
- free(end_preds);
- }
- free(res_pred);
- free(cf_pred);
-
-#if 0 /* old. now better, correcter, faster implementation. */
- if (n_exc > 0) {
- /* -- If the exception control flow from the inlined Call directly
- branched to the end block we now have the following control
- flow predecessor pattern: ProjX -> Tuple -> Jmp. We must
- remove the Jmp along with it's empty block and add Jmp's
- predecessors as predecessors of this end block. No problem if
- there is no exception, because then branches Bad to End which
- is fine. --
- @@@ can't we know this beforehand: by getting the Proj(1) from
- the Call link list and checking whether it goes to Proj. */
- /* find the problematic predecessor of the end block. */
- end_bl = get_irg_end_block(current_ir_graph);
- for (i = 0; i < get_Block_n_cfgpreds(end_bl); i++) {
- cf_op = get_Block_cfgpred(end_bl, i);
- if (get_irn_op(cf_op) == op_Proj) {
- cf_op = get_Proj_pred(cf_op);
- if ((get_irn_op(cf_op) == op_Tuple) && (cf_op == call)) {
- /* There are unoptimized tuples from inlineing before when no exc */
- assert(get_Proj_proj(get_Block_cfgpred(end_bl, i)) == pn_Call_X_except);
- cf_op = get_Tuple_pred(cf_op, pn_Call_X_except);
- assert(get_irn_op(cf_op) == op_Jmp);
- break;
- }
- }
- }
- /* repair */
- if (i < get_Block_n_cfgpreds(end_bl)) {
- bl = get_nodes_block(cf_op);
- arity = get_Block_n_cfgpreds(end_bl) + get_Block_n_cfgpreds(bl) - 1;
- cf_pred = xmalloc (arity * sizeof(*cf_pred));
- for (j = 0; j < i; j++)
- cf_pred[j] = get_Block_cfgpred(end_bl, j);
- for (j = j; j < i + get_Block_n_cfgpreds(bl); j++)
- cf_pred[j] = get_Block_cfgpred(bl, j-i);
- for (j = j; j < arity; j++)
- cf_pred[j] = get_Block_cfgpred(end_bl, j-get_Block_n_cfgpreds(bl) +1);
- set_irn_in(end_bl, arity, cf_pred);
- free(cf_pred);
- /* Remove the exception pred from post-call Tuple. */
- set_Tuple_pred(call, pn_Call_X_except, new_Bad());
- }
- }
-#endif
-
- /* -- Turn CSE back on. -- */
- set_optimize(rem_opt);
-
- return 1;
-}
-
-/********************************************************************/
-/* Apply inlineing to small methods. */
-/********************************************************************/
-
-/* It makes no sense to inline too many calls in one procedure. Anyways,
- I didn't get a version with NEW_ARR_F to run. */
-#define MAX_INLINE 1024
-
-/**
- * environment for inlining small irgs
- */
-typedef struct _inline_env_t {
- int pos;
- ir_node *calls[MAX_INLINE];
-} inline_env_t;
-
-/**
- * Returns the irg called from a Call node. If the irg is not
- * known, NULL is returned.
- */
-static ir_graph *get_call_called_irg(ir_node *call) {
- ir_node *addr;
- ir_graph *called_irg = NULL;
-
- assert(is_Call(call));
-
- addr = get_Call_ptr(call);
- if ((get_irn_op(addr) == op_SymConst) && (get_SymConst_kind (addr) == symconst_addr_ent)) {
- called_irg = get_entity_irg(get_SymConst_entity(addr));
- }
-
- return called_irg;
-}
-
-static void collect_calls(ir_node *call, void *env) {
- ir_node *addr;
-
- if (! is_Call(call)) return;
-
- addr = get_Call_ptr(call);
-
- if (get_irn_op(addr) == op_SymConst) {
- if (get_SymConst_kind(addr) == symconst_addr_ent) {
- ir_graph *called_irg = get_entity_irg(get_SymConst_entity(addr));
- inline_env_t *ienv = (inline_env_t *)env;
- if (called_irg && ienv->pos < MAX_INLINE) {
- /* The Call node calls a locally defined method. Remember to inline. */
- ienv->calls[ienv->pos++] = call;
- }
- }
- }
-}
-
-/**
- * Inlines all small methods at call sites where the called address comes
- * from a Const node that references the entity representing the called
- * method.
- * The size argument is a rough measure for the code size of the method:
- * Methods where the obstack containing the firm graph is smaller than
- * size are inlined.
- */
-void inline_small_irgs(ir_graph *irg, int size) {
- int i;
- ir_graph *rem = current_ir_graph;
- inline_env_t env /* = {0, NULL}*/;
-
- if (!(get_opt_optimize() && get_opt_inline())) return;
-
- current_ir_graph = irg;
- /* Handle graph state */
- assert(get_irg_phase_state(current_ir_graph) != phase_building);
- free_callee_info(current_ir_graph);
-
- /* Find Call nodes to inline.
- (We can not inline during a walk of the graph, as inlineing the same
- method several times changes the visited flag of the walked graph:
- after the first inlineing visited of the callee equals visited of
- the caller. With the next inlineing both are increased.) */
- env.pos = 0;
- irg_walk(get_irg_end(irg), NULL, collect_calls, &env);
-
- if ((env.pos > 0) && (env.pos < MAX_INLINE)) {
- /* There are calls to inline */
- collect_phiprojs(irg);
- for (i = 0; i < env.pos; i++) {
- ir_graph *callee;
- callee = get_entity_irg(get_SymConst_entity(get_Call_ptr(env.calls[i])));
- if (((_obstack_memory_used(callee->obst) - (int)obstack_room(callee->obst)) < size) ||
- (get_irg_inline_property(callee) == irg_inline_forced)) {
- inline_method(env.calls[i], callee);
- }
- }
- }
-
- current_ir_graph = rem;
-}
-
-/**
- * Environment for inlining irgs.
- */
-typedef struct {
- int n_nodes; /**< Nodes in graph except Id, Tuple, Proj, Start, End */
- int n_nodes_orig; /**< for statistics */
- eset *call_nodes; /**< All call nodes in this graph */
- int n_call_nodes;
- int n_call_nodes_orig; /**< for statistics */
- int n_callers; /**< Number of known graphs that call this graphs. */
- int n_callers_orig; /**< for statistics */
-} inline_irg_env;
-
-/**
- * Allocate a new environment for inlining.
- */
-static inline_irg_env *new_inline_irg_env(void) {
- inline_irg_env *env = xmalloc(sizeof(*env));
- env->n_nodes = -2; /* do not count count Start, End */
- env->n_nodes_orig = -2; /* do not count Start, End */
- env->call_nodes = eset_create();
- env->n_call_nodes = 0;
- env->n_call_nodes_orig = 0;
- env->n_callers = 0;
- env->n_callers_orig = 0;
- return env;
-}
-
-/**
- * destroy an environment for inlining.
- */
-static void free_inline_irg_env(inline_irg_env *env) {
- eset_destroy(env->call_nodes);
- free(env);
-}
-
-/**
- * post-walker: collect all calls in the inline-environment
- * of a graph and sum some statistics.
- */
-static void collect_calls2(ir_node *call, void *env) {
- inline_irg_env *x = (inline_irg_env *)env;
- ir_op *op = get_irn_op(call);
- ir_graph *callee;
-
- /* count meaningful nodes in irg */
- if (op != op_Proj && op != op_Tuple && op != op_Sync) {
- x->n_nodes++;
- x->n_nodes_orig++;
- }
-
- if (op != op_Call) return;
-
- /* collect all call nodes */
- eset_insert(x->call_nodes, call);
- x->n_call_nodes++;
- x->n_call_nodes_orig++;
-
- /* count all static callers */
- callee = get_call_called_irg(call);
- if (callee) {
- inline_irg_env *callee_env = get_irg_link(callee);
- callee_env->n_callers++;
- callee_env->n_callers_orig++;
- }
-}
-
-/**
- * Returns TRUE if the number of callers in 0 in the irg's environment,
- * hence this irg is a leave.
- */
-INLINE static int is_leave(ir_graph *irg) {
- return (((inline_irg_env *)get_irg_link(irg))->n_call_nodes == 0);
-}
-
-/**
- * Returns TRUE if the number of callers is smaller size in the irg's environment.
- */
-INLINE static int is_smaller(ir_graph *callee, int size) {
- return (((inline_irg_env *)get_irg_link(callee))->n_nodes < size);
-}
-
-
-/*
- * Inlines small leave methods at call sites where the called address comes
- * from a Const node that references the entity representing the called
- * method.
- * The size argument is a rough measure for the code size of the method:
- * Methods where the obstack containing the firm graph is smaller than
- * size are inlined.
- */
-void inline_leave_functions(int maxsize, int leavesize, int size) {
- inline_irg_env *env;
- int i, n_irgs = get_irp_n_irgs();
- ir_graph *rem = current_ir_graph;
- int did_inline = 1;
-
- if (!(get_opt_optimize() && get_opt_inline())) return;
-
- /* extend all irgs by a temporary data structure for inlining. */
- for (i = 0; i < n_irgs; ++i)
- set_irg_link(get_irp_irg(i), new_inline_irg_env());
-
- /* Precompute information in temporary data structure. */
- for (i = 0; i < n_irgs; ++i) {
- current_ir_graph = get_irp_irg(i);
- assert(get_irg_phase_state(current_ir_graph) != phase_building);
- free_callee_info(current_ir_graph);
-
- irg_walk(get_irg_end(current_ir_graph), NULL, collect_calls2,
- get_irg_link(current_ir_graph));
- }
-
- /* -- and now inline. -- */
-
- /* Inline leaves recursively -- we might construct new leaves. */
- while (did_inline) {
- did_inline = 0;
-
- for (i = 0; i < n_irgs; ++i) {
- ir_node *call;
- int phiproj_computed = 0;
-
- current_ir_graph = get_irp_irg(i);
- env = (inline_irg_env *)get_irg_link(current_ir_graph);
-
- for (call = eset_first(env->call_nodes); call; call = eset_next(env->call_nodes)) {
- ir_graph *callee;
-
- if (get_irn_op(call) == op_Tuple) continue; /* We already have inlined this call. */
- callee = get_call_called_irg(call);
-
- if (env->n_nodes > maxsize) continue; // break;
-
- if (callee && (is_leave(callee) && is_smaller(callee, leavesize))) {
- if (!phiproj_computed) {
- phiproj_computed = 1;
- collect_phiprojs(current_ir_graph);
- }
- did_inline = inline_method(call, callee);
-
- if (did_inline) {
- /* Do some statistics */
- inline_irg_env *callee_env = (inline_irg_env *)get_irg_link(callee);
- env->n_call_nodes --;
- env->n_nodes += callee_env->n_nodes;
- callee_env->n_callers--;
- }
- }
- }
- }
- }
-
- /* inline other small functions. */
- for (i = 0; i < n_irgs; ++i) {
- ir_node *call;
- eset *walkset;
- int phiproj_computed = 0;
-
- current_ir_graph = get_irp_irg(i);
- env = (inline_irg_env *)get_irg_link(current_ir_graph);
-
- /* we can not walk and change a set, nor remove from it.
- So recompute.*/
- walkset = env->call_nodes;
- env->call_nodes = eset_create();
- for (call = eset_first(walkset); call; call = eset_next(walkset)) {
- ir_graph *callee;
-
- if (get_irn_op(call) == op_Tuple) continue; /* We already inlined. */
- callee = get_call_called_irg(call);
-
- if (callee &&
- ((is_smaller(callee, size) && (env->n_nodes < maxsize)) || /* small function */
- (get_irg_inline_property(callee) == irg_inline_forced))) {
- if (!phiproj_computed) {
- phiproj_computed = 1;
- collect_phiprojs(current_ir_graph);
- }
- if (inline_method(call, callee)) {
- inline_irg_env *callee_env = (inline_irg_env *)get_irg_link(callee);
- env->n_call_nodes--;
- eset_insert_all(env->call_nodes, callee_env->call_nodes); /* @@@ ??? This are the wrong nodes !? Not the copied ones. */
- env->n_call_nodes += callee_env->n_call_nodes;
- env->n_nodes += callee_env->n_nodes;
- callee_env->n_callers--;
- }
- } else {
- eset_insert(env->call_nodes, call);
- }
- }
- eset_destroy(walkset);
- }
-
- for (i = 0; i < n_irgs; ++i) {
- current_ir_graph = get_irp_irg(i);
-#if 0
- env = (inline_irg_env *)get_irg_link(current_ir_graph);
- if ((env->n_call_nodes_orig != env->n_call_nodes) ||
- (env->n_callers_orig != env->n_callers))
- printf("Nodes:%3d ->%3d, calls:%3d ->%3d, callers:%3d ->%3d, -- %s\n",
- env->n_nodes_orig, env->n_nodes, env->n_call_nodes_orig, env->n_call_nodes,
- env->n_callers_orig, env->n_callers,
- get_entity_name(get_irg_entity(current_ir_graph)));
-#endif
- free_inline_irg_env((inline_irg_env *)get_irg_link(current_ir_graph));
- }
-
- current_ir_graph = rem;
-}
-
-/*******************************************************************/
-/* Code Placement. Pins all floating nodes to a block where they */
-/* will be executed only if needed. */
-/*******************************************************************/
-
-/**
- * Returns non-zero, is a block is not reachable from Start.
- *
- * @param block the block to test
- */
-static int
-is_Block_unreachable(ir_node *block) {
- return is_Block_dead(block) || get_Block_dom_depth(block) < 0;
-}
-
-/**
- * Find the earliest correct block for N. --- Place N into the
- * same Block as its dominance-deepest Input.
- *
- * We have to avoid calls to get_nodes_block() here
- * because the graph is floating.
- *
- * move_out_of_loops() expects that place_floats_early() have placed
- * all "living" nodes into a living block. That's why we must
- * move nodes in dead block with "live" successors into a valid
- * block.
- * We move them just into the same block as it's successor (or
- * in case of a Phi into the effective use block). For Phi successors,
- * this may still be a dead block, but then there is no real use, as
- * the control flow will be dead later.
- */
-static void
-place_floats_early(ir_node *n, pdeq *worklist)
-{
- int i, irn_arity;
-
- /* we must not run into an infinite loop */
- assert(irn_not_visited(n));
- mark_irn_visited(n);
-
- /* Place floating nodes. */
- if (get_irn_pinned(n) == op_pin_state_floats) {
- ir_node *curr_block = get_irn_n(n, -1);
- int in_dead_block = is_Block_unreachable(curr_block);
- int depth = 0;
- ir_node *b = NULL; /* The block to place this node in */
-
- assert(get_irn_op(n) != op_Block);
-
- if ((get_irn_op(n) == op_Const) ||
- (get_irn_op(n) == op_SymConst) ||
- (is_Bad(n)) ||
- (get_irn_op(n) == op_Unknown)) {
- /* These nodes will not be placed by the loop below. */
- b = get_irg_start_block(current_ir_graph);
- depth = 1;
- }
-
- /* find the block for this node. */
- irn_arity = get_irn_arity(n);
- for (i = 0; i < irn_arity; i++) {
- ir_node *pred = get_irn_n(n, i);
- ir_node *pred_block;
-
- if ((irn_not_visited(pred))
- && (get_irn_pinned(pred) == op_pin_state_floats)) {
-
- /*
- * If the current node is NOT in a dead block, but one of its
- * predecessors is, we must move the predecessor to a live block.
- * Such thing can happen, if global CSE chose a node from a dead block.
- * We move it simple to our block.
- * Note that neither Phi nor End nodes are floating, so we don't
- * need to handle them here.
- */
- if (! in_dead_block) {
- if (get_irn_pinned(pred) == op_pin_state_floats &&
- is_Block_unreachable(get_irn_n(pred, -1)))
- set_nodes_block(pred, curr_block);
- }
- place_floats_early(pred, worklist);
- }
-
- /*
- * A node in the Bad block must stay in the bad block,
- * so don't compute a new block for it.
- */
- if (in_dead_block)
- continue;
-
- /* Because all loops contain at least one op_pin_state_pinned node, now all
- our inputs are either op_pin_state_pinned or place_early() has already
- been finished on them. We do not have any unfinished inputs! */
- pred_block = get_irn_n(pred, -1);
- if ((!is_Block_dead(pred_block)) &&
- (get_Block_dom_depth(pred_block) > depth)) {
- b = pred_block;
- depth = get_Block_dom_depth(pred_block);
- }
- /* Avoid that the node is placed in the Start block */
- if ((depth == 1) && (get_Block_dom_depth(get_irn_n(n, -1)) > 1)) {
- b = get_Block_cfg_out(get_irg_start_block(current_ir_graph), 0);
- assert(b != get_irg_start_block(current_ir_graph));
- depth = 2;
- }
- }
- if (b)
- set_nodes_block(n, b);
- }
-
- /*
- * Add predecessors of non floating nodes and non-floating predecessors
- * of floating nodes to worklist and fix their blocks if the are in dead block.
- */
- irn_arity = get_irn_arity(n);
-
- if (get_irn_op(n) == op_End) {
- /*
- * Simplest case: End node. Predecessors are keep-alives,
- * no need to move out of dead block.
- */
- for (i = -1; i < irn_arity; ++i) {
- ir_node *pred = get_irn_n(n, i);
- if (irn_not_visited(pred))
- pdeq_putr(worklist, pred);
- }
- }
- else if (is_Block(n)) {
- /*
- * Blocks: Predecessors are control flow, no need to move
- * them out of dead block.
- */
- for (i = irn_arity - 1; i >= 0; --i) {
- ir_node *pred = get_irn_n(n, i);
- if (irn_not_visited(pred))
- pdeq_putr(worklist, pred);
- }
- }
- else if (is_Phi(n)) {
- ir_node *pred;
- ir_node *curr_block = get_irn_n(n, -1);
- int in_dead_block = is_Block_unreachable(curr_block);
-
- /*
- * Phi nodes: move nodes from dead blocks into the effective use
- * of the Phi-input if the Phi is not in a bad block.
- */
- pred = get_irn_n(n, -1);
- if (irn_not_visited(pred))
- pdeq_putr(worklist, pred);
-
- for (i = irn_arity - 1; i >= 0; --i) {
- ir_node *pred = get_irn_n(n, i);
-
- if (irn_not_visited(pred)) {
- if (! in_dead_block &&
- get_irn_pinned(pred) == op_pin_state_floats &&
- is_Block_unreachable(get_irn_n(pred, -1))) {
- set_nodes_block(pred, get_Block_cfgpred_block(curr_block, i));
- }
- pdeq_putr(worklist, pred);
- }
- }
- }
- else {
- ir_node *pred;
- ir_node *curr_block = get_irn_n(n, -1);
- int in_dead_block = is_Block_unreachable(curr_block);
-
- /*
- * All other nodes: move nodes from dead blocks into the same block.
- */
- pred = get_irn_n(n, -1);
- if (irn_not_visited(pred))
- pdeq_putr(worklist, pred);
-
- for (i = irn_arity - 1; i >= 0; --i) {
- ir_node *pred = get_irn_n(n, i);
-
- if (irn_not_visited(pred)) {
- if (! in_dead_block &&
- get_irn_pinned(pred) == op_pin_state_floats &&
- is_Block_unreachable(get_irn_n(pred, -1))) {
- set_nodes_block(pred, curr_block);
- }
- pdeq_putr(worklist, pred);
- }
- }
- }
-}
-
-/**
- * Floating nodes form subgraphs that begin at nodes as Const, Load,
- * Start, Call and that end at op_pin_state_pinned nodes as Store, Call. Place_early
- * places all floating nodes reachable from its argument through floating
- * nodes and adds all beginnings at op_pin_state_pinned nodes to the worklist.
- */
-static INLINE void place_early(pdeq *worklist) {
- assert(worklist);
- inc_irg_visited(current_ir_graph);
-
- /* this inits the worklist */
- place_floats_early(get_irg_end(current_ir_graph), worklist);
-
- /* Work the content of the worklist. */
- while (!pdeq_empty(worklist)) {
- ir_node *n = pdeq_getl(worklist);
- if (irn_not_visited(n))
- place_floats_early(n, worklist);
- }
-
- set_irg_outs_inconsistent(current_ir_graph);
- set_irg_pinned(current_ir_graph, op_pin_state_pinned);
-}
-
-/**
- * Compute the deepest common ancestor of block and dca.
- */
-static ir_node *calc_dca(ir_node *dca, ir_node *block)
-{
- assert(block);
-
- /* we do not want to place nodes in dead blocks */
- if (is_Block_dead(block))
- return dca;
-
- /* We found a first legal placement. */
- if (!dca) return block;
-
- /* Find a placement that is dominates both, dca and block. */
- while (get_Block_dom_depth(block) > get_Block_dom_depth(dca))
- block = get_Block_idom(block);
-
- while (get_Block_dom_depth(dca) > get_Block_dom_depth(block)) {
- dca = get_Block_idom(dca);
- }
-
- while (block != dca)
- { block = get_Block_idom(block); dca = get_Block_idom(dca); }
-
- return dca;
-}
-
-/** Deepest common dominance ancestor of DCA and CONSUMER of PRODUCER.
- * I.e., DCA is the block where we might place PRODUCER.
- * A data flow edge points from producer to consumer.
- */
-static ir_node *
-consumer_dom_dca(ir_node *dca, ir_node *consumer, ir_node *producer)
-{
- ir_node *block = NULL;
-
- /* Compute the latest block into which we can place a node so that it is
- before consumer. */
- if (get_irn_op(consumer) == op_Phi) {
- /* our consumer is a Phi-node, the effective use is in all those
- blocks through which the Phi-node reaches producer */
- int i, irn_arity;
- ir_node *phi_block = get_nodes_block(consumer);
- irn_arity = get_irn_arity(consumer);
-
- for (i = 0; i < irn_arity; i++) {
- if (get_irn_n(consumer, i) == producer) {
- ir_node *new_block = get_nodes_block(get_Block_cfgpred(phi_block, i));
-
- if (! is_Block_unreachable(new_block))
- block = calc_dca(block, new_block);
- }
- }
-
- if (! block)
- block = get_irn_n(producer, -1);
- }
- else {
- assert(is_no_Block(consumer));
- block = get_nodes_block(consumer);
- }
-
- /* Compute the deepest common ancestor of block and dca. */
- return calc_dca(dca, block);
-}
-
-/* FIXME: the name clashes here with the function from ana/field_temperature.c
- * please rename. */
-static INLINE int get_irn_loop_depth(ir_node *n) {
- return get_loop_depth(get_irn_loop(n));
-}
-
-/**
- * Move n to a block with less loop depth than it's current block. The
- * new block must be dominated by early.
- *
- * @param n the node that should be moved
- * @param early the earliest block we can n move to
- */
-static void
-move_out_of_loops (ir_node *n, ir_node *early)
-{
- ir_node *best, *dca;
- assert(n && early);
-
-
- /* Find the region deepest in the dominator tree dominating
- dca with the least loop nesting depth, but still dominated
- by our early placement. */
- dca = get_nodes_block(n);
-
- best = dca;
- while (dca != early) {
- dca = get_Block_idom(dca);
- if (!dca || is_Bad(dca)) break; /* may be Bad if not reachable from Start */
- if (get_irn_loop_depth(dca) < get_irn_loop_depth(best)) {
- best = dca;
- }
- }
- if (best != get_nodes_block(n)) {
- /* debug output
- printf("Moving out of loop: "); DDMN(n);
- printf(" Outermost block: "); DDMN(early);
- printf(" Best block: "); DDMN(best);
- printf(" Innermost block: "); DDMN(get_nodes_block(n));
- */
- set_nodes_block(n, best);
- }
-}
-
-/**
- * Find the latest legal block for N and place N into the
- * `optimal' Block between the latest and earliest legal block.
- * The `optimal' block is the dominance-deepest block of those
- * with the least loop-nesting-depth. This places N out of as many
- * loops as possible and then makes it as control dependent as
- * possible.
- */
-static void
-place_floats_late(ir_node *n, pdeq *worklist)
-{
- int i;
- ir_node *early_blk;
-
- assert(irn_not_visited(n)); /* no multiple placement */
-
- mark_irn_visited(n);
-
- /* no need to place block nodes, control nodes are already placed. */
- if ((get_irn_op(n) != op_Block) &&
- (!is_cfop(n)) &&
- (get_irn_mode(n) != mode_X)) {
- /* Remember the early_blk placement of this block to move it
- out of loop no further than the early_blk placement. */
- early_blk = get_irn_n(n, -1);
-
- /*
- * BEWARE: Here we also get code, that is live, but
- * was in a dead block. If the node is life, but because
- * of CSE in a dead block, we still might need it.
- */
-
- /* Assure that our users are all placed, except the Phi-nodes.
- --- Each data flow cycle contains at least one Phi-node. We
- have to break the `user has to be placed before the
- producer' dependence cycle and the Phi-nodes are the
- place to do so, because we need to base our placement on the
- final region of our users, which is OK with Phi-nodes, as they
- are op_pin_state_pinned, and they never have to be placed after a
- producer of one of their inputs in the same block anyway. */
- for (i = get_irn_n_outs(n) - 1; i >= 0; --i) {
- ir_node *succ = get_irn_out(n, i);
- if (irn_not_visited(succ) && (get_irn_op(succ) != op_Phi))
- place_floats_late(succ, worklist);
- }
-
- if (! is_Block_dead(early_blk)) {
- /* do only move things that where not dead */
-
- /* We have to determine the final block of this node... except for
- constants. */
- if ((get_irn_pinned(n) == op_pin_state_floats) &&
- (get_irn_op(n) != op_Const) &&
- (get_irn_op(n) != op_SymConst)) {
- ir_node *dca = NULL; /* deepest common ancestor in the
- dominator tree of all nodes'
- blocks depending on us; our final
- placement has to dominate DCA. */
- for (i = get_irn_n_outs(n) - 1; i >= 0; --i) {
- ir_node *succ = get_irn_out(n, i);
- ir_node *succ_blk;
-
- if (get_irn_op(succ) == op_End) {
- /*
- * This consumer is the End node, a keep alive edge.
- * This is not a real consumer, so we ignore it
- */
- continue;
- }
-
- /* ignore if succ is in dead code */
- succ_blk = get_irn_n(succ, -1);
- if (is_Block_unreachable(succ_blk))
- continue;
- dca = consumer_dom_dca(dca, succ, n);
- }
- if (dca) {
- set_nodes_block(n, dca);
- move_out_of_loops(n, early_blk);
- }
- }
- }
- }
-
- /* Add predecessors of all non-floating nodes on list. (Those of floating
- nodes are placed already and therefore are marked.) */
- for (i = 0; i < get_irn_n_outs(n); i++) {
- ir_node *succ = get_irn_out(n, i);
- if (irn_not_visited(get_irn_out(n, i))) {
- pdeq_putr(worklist, succ);
- }
- }
-}
-
-static INLINE void place_late(pdeq *worklist) {
- assert(worklist);
- inc_irg_visited(current_ir_graph);
-
- /* This fills the worklist initially. */
- place_floats_late(get_irg_start_block(current_ir_graph), worklist);
-
- /* And now empty the worklist again... */
- while (!pdeq_empty(worklist)) {
- ir_node *n = pdeq_getl(worklist);
- if (irn_not_visited(n))
- place_floats_late(n, worklist);
- }