-/* We use the block_visited flag to mark that we have computed the
- number of useful predecessors for this block.
- Further we encode the new arity in this flag in the old blocks.
- Remembering the arity is useful, as it saves a lot of pointer
- accesses. This function is called for all Phi and Block nodes
- in a Block. */
+/**
+ * We use the block_visited flag to mark that we have computed the
+ * number of useful predecessors for this block.
+ * Further we encode the new arity in this flag in the old blocks.
+ * Remembering the arity is useful, as it saves a lot of pointer
+ * accesses. This function is called for all Phi and Block nodes
+ * in a Block.
+ */
-/* Copies the node to the new obstack. The Ins of the new node point to
- the predecessors on the old obstack. For block/phi nodes not all
- predecessors might be copied. n->link points to the new node.
- For Phi and Block nodes the function allocates in-arrays with an arity
- only for useful predecessors. The arity is determined by counting
- the non-bad predecessors of the block. */
+/**
+ * Copies the node to the new obstack. The Ins of the new node point to
+ * the predecessors on the old obstack. For block/phi nodes not all
+ * predecessors might be copied. n->link points to the new node.
+ * For Phi and Block nodes the function allocates in-arrays with an arity
+ * only for useful predecessors. The arity is determined by counting
+ * the non-bad predecessors of the block.
+ */
- set_irn_n (nn, j, get_new_node(get_irn_n(n, i)));
- /*if (is_backedge(n, i)) set_backedge(nn, j);*/
- j++;
+ set_irn_n (nn, j, get_new_node(get_irn_n(n, i)));
+ /*if (is_backedge(n, i)) set_backedge(nn, j);*/
+ j++;
- (get_Block_n_cfgpreds(nn) == 1) &&
- (get_irn_op(get_Block_cfgpred(nn, 0)) == op_Jmp))
- exchange(nn, get_nodes_Block(get_Block_cfgpred(nn, 0)));
+ (get_Block_n_cfgpreds(nn) == 1) &&
+ (get_irn_op(get_Block_cfgpred(nn, 0)) == op_Jmp)) {
+ ir_node *old = get_nodes_Block(get_Block_cfgpred(nn, 0));
+ if (nn == old) {
+ /* Jmp jumps into the block it is in -- deal self cycle. */
+ assert(is_Bad(get_new_node(get_irg_bad(current_ir_graph))));
+ exchange(nn, get_new_node(get_irg_bad(current_ir_graph)));
+ } else {
+ exchange(nn, old);
+ }
+ }
- set_irn_n (nn, j, get_new_node(get_irn_n(n, i)));
- /*if (is_backedge(n, i)) set_backedge(nn, j);*/
- j++;
+ set_irn_n (nn, j, get_new_node(get_irn_n(n, i)));
+ /*if (is_backedge(n, i)) set_backedge(nn, j);*/
+ j++;
ir_node *ka; /* keep alive */
int i, irn_arity;
oe = get_irg_end(current_ir_graph);
/* copy the end node by hand, allocate dynamic in array! */
ne = new_ir_node(get_irn_dbg_info(oe),
ir_node *ka; /* keep alive */
int i, irn_arity;
oe = get_irg_end(current_ir_graph);
/* copy the end node by hand, allocate dynamic in array! */
ne = new_ir_node(get_irn_dbg_info(oe),
/* copy the live nodes */
irg_walk(get_nodes_Block(oe), copy_node, copy_preds, NULL);
/* copy_preds for the end node ... */
set_nodes_Block(ne, get_new_node(get_nodes_Block(oe)));
/* copy the live nodes */
irg_walk(get_nodes_Block(oe), copy_node, copy_preds, NULL);
/* copy_preds for the end node ... */
set_nodes_Block(ne, get_new_node(get_nodes_Block(oe)));
/* First pick the not marked block nodes and walk them. We must pick these
first as else we will oversee blocks reachable from Phis. */
irn_arity = get_irn_arity(oe);
for (i = 0; i < irn_arity; i++) {
/* First pick the not marked block nodes and walk them. We must pick these
first as else we will oversee blocks reachable from Phis. */
irn_arity = get_irn_arity(oe);
for (i = 0; i < irn_arity; i++) {
/* We must keep the block alive and copy everything reachable */
set_irg_visited(current_ir_graph, get_irg_visited(current_ir_graph)-1);
irg_walk(ka, copy_node, copy_preds, NULL);
/* We must keep the block alive and copy everything reachable */
set_irg_visited(current_ir_graph, get_irg_visited(current_ir_graph)-1);
irg_walk(ka, copy_node, copy_preds, NULL);
- /* We didn't copy the Phi yet. */
- set_irg_visited(current_ir_graph, get_irg_visited(current_ir_graph)-1);
- irg_walk(ka, copy_node, copy_preds, NULL);
+ /* We didn't copy the Phi yet. */
+ set_irg_visited(current_ir_graph, get_irg_visited(current_ir_graph)-1);
+ irg_walk(ka, copy_node, copy_preds, NULL);
- set_irn_link(get_irg_frame (current_ir_graph), NULL);
- set_irn_link(get_irg_globals(current_ir_graph), NULL);
- set_irn_link(get_irg_args (current_ir_graph), NULL);
+ set_irn_link(get_irg_frame (current_ir_graph), NULL);
+ set_irn_link(get_irg_globals (current_ir_graph), NULL);
+ set_irn_link(get_irg_args (current_ir_graph), NULL);
+ set_irn_link(get_irg_initial_mem(current_ir_graph), NULL);
if (get_irn_link(get_irg_args(current_ir_graph)) == NULL) {
copy_node (get_irg_args(current_ir_graph), NULL);
copy_preds(get_irg_args(current_ir_graph), NULL);
}
if (get_irn_link(get_irg_args(current_ir_graph)) == NULL) {
copy_node (get_irg_args(current_ir_graph), NULL);
copy_preds(get_irg_args(current_ir_graph), NULL);
}
- get_new_node(get_irg_start_block(current_ir_graph)));
- set_irg_frame (current_ir_graph, get_new_node(get_irg_frame(current_ir_graph)));
- set_irg_globals(current_ir_graph, get_new_node(get_irg_globals(current_ir_graph)));
- set_irg_args (current_ir_graph, get_new_node(get_irg_args(current_ir_graph)));
+ get_new_node(get_irg_start_block(current_ir_graph)));
+ set_irg_frame (current_ir_graph, get_new_node(get_irg_frame(current_ir_graph)));
+ set_irg_globals (current_ir_graph, get_new_node(get_irg_globals(current_ir_graph)));
+ set_irg_initial_mem(current_ir_graph, get_new_node(get_irg_initial_mem(current_ir_graph)));
+ set_irg_args (current_ir_graph, get_new_node(get_irg_args(current_ir_graph)));
+
if (get_irn_link(get_irg_bad(current_ir_graph)) == NULL) {
copy_node(get_irg_bad(current_ir_graph), NULL);
copy_preds(get_irg_bad(current_ir_graph), NULL);
}
set_irg_bad(current_ir_graph, get_new_node(get_irg_bad(current_ir_graph)));
if (get_irn_link(get_irg_bad(current_ir_graph)) == NULL) {
copy_node(get_irg_bad(current_ir_graph), NULL);
copy_preds(get_irg_bad(current_ir_graph), NULL);
}
set_irg_bad(current_ir_graph, get_new_node(get_irg_bad(current_ir_graph)));
-/* Copies all reachable nodes to a new obstack. Removes bad inputs
- from block nodes and the corresponding inputs from Phi nodes.
- Merges single exit blocks with single entry blocks and removes
- 1-input Phis.
- Adds all new nodes to a new hash table for cse. Does not
- perform cse, so the hash table might contain common subexpressions. */
-/* Amroq call this emigrate() */
+/**
+ * Copies all reachable nodes to a new obstack. Removes bad inputs
+ * from block nodes and the corresponding inputs from Phi nodes.
+ * Merges single exit blocks with single entry blocks and removes
+ * 1-input Phis.
+ * Adds all new nodes to a new hash table for cse. Does not
+ * perform cse, so the hash table might contain common subexpressions.
+ */
-/* Relink bad predeseccors of a block and store the old in array to the
- link field. This function is called by relink_bad_predecessors().
- The array of link field starts with the block operand at position 0.
- If block has bad predecessors, create a new in array without bad preds.
- Otherwise let in array untouched. */
+/**
+ * Relink bad predeseccors of a block and store the old in array to the
+ * link field. This function is called by relink_bad_predecessors().
+ * The array of link field starts with the block operand at position 0.
+ * If block has bad predecessors, create a new in array without bad preds.
+ * Otherwise let in array untouched.
+ */
static void relink_bad_block_predecessors(ir_node *n, void *env) {
ir_node **new_in, *irn;
int i, new_irn_n, old_irn_arity, new_irn_arity = 0;
static void relink_bad_block_predecessors(ir_node *n, void *env) {
ir_node **new_in, *irn;
int i, new_irn_n, old_irn_arity, new_irn_arity = 0;
-/* Relinks Bad predecesors from Bocks and Phis called by walker
- remove_bad_predecesors(). If n is a Block, call
- relink_bad_block_redecessors(). If n is a Phinode, call also the relinking
- function of Phi's Block. If this block has bad predecessors, relink preds
- of the Phinode. */
+/*
+ * Relinks Bad predecesors from Bocks and Phis called by walker
+ * remove_bad_predecesors(). If n is a Block, call
+ * relink_bad_block_redecessors(). If n is a Phinode, call also the relinking
+ * function of Phi's Block. If this block has bad predecessors, relink preds
+ * of the Phinode.
+ */
static void relink_bad_predecessors(ir_node *n, void *env) {
ir_node *block, **old_in;
int i, old_irn_arity, new_irn_arity;
static void relink_bad_predecessors(ir_node *n, void *env) {
ir_node *block, **old_in;
int i, old_irn_arity, new_irn_arity;
-/* Removes Bad Bad predecesors from Blocks and the corresponding
- inputs to Phi nodes as in dead_node_elimination but without
- copying the graph.
- On walking up set the link field to NULL, on walking down call
- relink_bad_predecessors() (This function stores the old in array
- to the link field and sets a new in array if arity of predecessors
- changes) */
+/**
+ * Removes Bad Bad predecesors from Blocks and the corresponding
+ * inputs to Phi nodes as in dead_node_elimination but without
+ * copying the graph.
+ * On walking up set the link field to NULL, on walking down call
+ * relink_bad_predecessors() (This function stores the old in array
+ * to the link field and sets a new in array if arity of predecessors
+ * changes).
+ */
* inlineing but not for dead node elimination.
*
* Copies the node by calling copy_node and then updates the entity if
* it's a local one. env must be a pointer of the frame type of the
* inlined procedure. The new entities must be in the link field of
* inlineing but not for dead node elimination.
*
* Copies the node by calling copy_node and then updates the entity if
* it's a local one. env must be a pointer of the frame type of the
* inlined procedure. The new entities must be in the link field of
+static void find_addr(ir_node *node, void *env)
+{
+ if (get_irn_opcode(node) == iro_Proj) {
+ if (get_Proj_proj(node) == pn_Start_P_value_arg_base)
+ *(int *)env = 0;
+ }
+}
+
+/*
+ * currently, we cannot inline two cases:
+ * - call with compound arguments
+ * - graphs that take the address of a parameter
+ *
+ * check these condition here
+ */
+static int can_inline(ir_node *call, ir_graph *called_graph)
+{
+ type *call_type = get_Call_type(call);
+ int params, ress, i, res;
+
+ assert(is_method_type(call_type));
+
+ params = get_method_n_params(call_type);
+ ress = get_method_n_ress(call_type);
+
+ /* check params */
+ for (i = 0; i < params; ++i) {
+ type *p_type = get_method_param_type(call_type, i);
+
+ if (is_compound_type(p_type))
+ return 0;
+ }
-void inline_method(ir_node *call, ir_graph *called_graph) {
+ /* check res */
+ for (i = 0; i < ress; ++i) {
+ type *r_type = get_method_res_type(call_type, i);
+
+ if (is_compound_type(r_type))
+ return 0;
+ }
+
+ res = 1;
+ irg_walk_graph(called_graph, find_addr, NULL, &res);
+
+ return res;
+}
+
+int inline_method(ir_node *call, ir_graph *called_graph) {
ir_node **cf_pred;
ir_node *ret, *phi;
int arity, n_ret, n_exc, n_res, i, j, rem_opt, irn_arity;
ir_node **cf_pred;
ir_node *ret, *phi;
int arity, n_ret, n_exc, n_res, i, j, rem_opt, irn_arity;
+ irg_inline_property prop = get_irg_inline_property(called_graph);
+
+ if ( (prop != irg_inline_forced) && (!get_opt_optimize() || !get_opt_inline() ||
+ (prop == irg_inline_forbidden))) return 0;
+
+
+ /*
+ * currently, we cannot inline two cases:
+ * - call with compound arguments
+ * - graphs that take the address of a parameter
+ */
+ if (! can_inline(call, called_graph))
+ return 0;
- in[0] = new_Jmp();
- in[1] = get_Call_mem(call);
- in[2] = get_irg_frame(current_ir_graph);
- in[3] = get_irg_globals(current_ir_graph);
- in[4] = new_Tuple (get_Call_n_params(call), get_Call_param_arr(call));
+ in[pn_Start_X_initial_exec] = new_Jmp();
+ in[pn_Start_M] = get_Call_mem(call);
+ in[pn_Start_P_frame_base] = get_irg_frame(current_ir_graph);
+ in[pn_Start_P_globals] = get_irg_globals(current_ir_graph);
+ in[pn_Start_T_args] = new_Tuple(get_Call_n_params(call), get_Call_param_arr(call));
+ /* in[pn_Start_P_value_arg_base] = ??? */
- set_irn_visited(get_irg_start(called_graph),
- get_irg_visited(current_ir_graph));
- set_irn_link(get_irg_start_block(called_graph),
- get_nodes_Block(pre_call));
- set_irn_visited(get_irg_start_block(called_graph),
- get_irg_visited(current_ir_graph));
+ set_irn_visited(get_irg_start(called_graph), get_irg_visited(current_ir_graph));
+ set_irn_link(get_irg_start_block(called_graph), get_nodes_Block(pre_call));
+ set_irn_visited(get_irg_start_block(called_graph), get_irg_visited(current_ir_graph));
+ set_irn_link(get_irg_bad(called_graph), get_irg_bad(current_ir_graph));
+ set_irn_visited(get_irg_bad(called_graph), get_irg_visited(current_ir_graph));
entities. */
/* @@@ endless loops are not copied!! -- they should be, I think... */
irg_walk(get_irg_end(called_graph), copy_node_inline, copy_preds,
entities. */
/* @@@ endless loops are not copied!! -- they should be, I think... */
irg_walk(get_irg_end(called_graph), copy_node_inline, copy_preds,
- -1: Block of Tuple.
- 0: Phi of all Memories of Return statements.
- 1: Jmp from new Block that merges the control flow from all exception
- predecessors of the old end block.
- 2: Tuple of all arguments.
- 3: Phi of Exception memories.
+ -1: Block of Tuple.
+ 0: Phi of all Memories of Return statements.
+ 1: Jmp from new Block that merges the control flow from all exception
+ predecessors of the old end block.
+ 2: Tuple of all arguments.
+ 3: Phi of Exception memories.
- ret = get_irn_n(end_bl, i);
- if (get_irn_op(ret) == op_Return) {
- cf_pred[n_ret] = get_Return_res(ret, j);
- n_ret++;
- }
+ ret = get_irn_n(end_bl, i);
+ if (get_irn_op(ret) == op_Return) {
+ cf_pred[n_ret] = get_Return_res(ret, j);
+ n_ret++;
+ }
}
phi = new_Phi(n_ret, cf_pred, get_irn_mode(cf_pred[0]));
res_pred[j] = phi;
/* Conserve Phi-list for further inlinings -- but might be optimized */
if (get_nodes_Block(phi) == post_bl) {
}
phi = new_Phi(n_ret, cf_pred, get_irn_mode(cf_pred[0]));
res_pred[j] = phi;
/* Conserve Phi-list for further inlinings -- but might be optimized */
if (get_nodes_Block(phi) == post_bl) {
- ir_node *ret;
- ret = skip_Proj(get_irn_n(end_bl, i));
- if (get_irn_op(ret) == op_Call) {
- cf_pred[n_exc] = new_r_Proj(current_ir_graph, get_nodes_Block(ret), ret, mode_M, 3);
- n_exc++;
- } else if (is_fragile_op(ret)) {
- /* We rely that all cfops have the memory output at the same position. */
- cf_pred[n_exc] = new_r_Proj(current_ir_graph, get_nodes_Block(ret), ret, mode_M, 0);
- n_exc++;
- } else if (get_irn_op(ret) == op_Raise) {
- cf_pred[n_exc] = new_r_Proj(current_ir_graph, get_nodes_Block(ret), ret, mode_M, 1);
- n_exc++;
- }
+ ir_node *ret;
+ ret = skip_Proj(get_irn_n(end_bl, i));
+ if (get_irn_op(ret) == op_Call) {
+ cf_pred[n_exc] = new_r_Proj(current_ir_graph, get_nodes_Block(ret), ret, mode_M, 3);
+ n_exc++;
+ } else if (is_fragile_op(ret)) {
+ /* We rely that all cfops have the memory output at the same position. */
+ cf_pred[n_exc] = new_r_Proj(current_ir_graph, get_nodes_Block(ret), ret, mode_M, 0);
+ n_exc++;
+ } else if (get_irn_op(ret) == op_Raise) {
+ cf_pred[n_exc] = new_r_Proj(current_ir_graph, get_nodes_Block(ret), ret, mode_M, 1);
+ n_exc++;
+ }
- set_Tuple_pred(call, 1, new_Bad());
- set_Tuple_pred(call, 3, new_Bad());
+ set_Tuple_pred(call, pn_Call_X_except, new_Bad());
+ set_Tuple_pred(call, pn_Call_M_except, new_Bad());
for (i = 0; i < n_exc; ++i)
end_preds[main_end_bl_arity + i] = cf_pred[i];
set_irn_in(main_end_bl, n_exc + main_end_bl_arity, end_preds);
for (i = 0; i < n_exc; ++i)
end_preds[main_end_bl_arity + i] = cf_pred[i];
set_irn_in(main_end_bl, n_exc + main_end_bl_arity, end_preds);
- set_Tuple_pred(call, 1, new_Bad());
- set_Tuple_pred(call, 3, new_Bad());
+ set_Tuple_pred(call, pn_Call_X_except, new_Bad());
+ set_Tuple_pred(call, pn_Call_M_except, new_Bad());
- cf_op = get_Proj_pred(cf_op);
- if ((get_irn_op(cf_op) == op_Tuple) && (cf_op == call)) {
- // There are unoptimized tuples from inlineing before when no exc
- assert(get_Proj_proj(get_Block_cfgpred(end_bl, i)) == pn_Call_X_except);
- cf_op = get_Tuple_pred(cf_op, pn_Call_X_except);
- assert(get_irn_op(cf_op) == op_Jmp);
- break;
- }
+ cf_op = get_Proj_pred(cf_op);
+ if ((get_irn_op(cf_op) == op_Tuple) && (cf_op == call)) {
+ /* There are unoptimized tuples from inlineing before when no exc */
+ assert(get_Proj_proj(get_Block_cfgpred(end_bl, i)) == pn_Call_X_except);
+ cf_op = get_Tuple_pred(cf_op, pn_Call_X_except);
+ assert(get_irn_op(cf_op) == op_Jmp);
+ break;
+ }
-/* Inlines all small methods at call sites where the called address comes
- from a Const node that references the entity representing the called
- method.
- The size argument is a rough measure for the code size of the method:
- Methods where the obstack containing the firm graph is smaller than
- size are inlined. */
+/**
+ * Inlines all small methods at call sites where the called address comes
+ * from a Const node that references the entity representing the called
+ * method.
+ * The size argument is a rough measure for the code size of the method:
+ * Methods where the obstack containing the firm graph is smaller than
+ * size are inlined.
+ */
/* Find Call nodes to inline.
(We can not inline during a walk of the graph, as inlineing the same
method several times changes the visited flag of the walked graph:
after the first inlineing visited of the callee equals visited of
the caller. With the next inlineing both are increased.) */
/* Find Call nodes to inline.
(We can not inline during a walk of the graph, as inlineing the same
method several times changes the visited flag of the walked graph:
after the first inlineing visited of the callee equals visited of
the caller. With the next inlineing both are increased.) */
- int n_nodes; /* Nodes in graph except Id, Tuple, Proj, Start, End */
- int n_nodes_orig; /* for statistics */
- eset *call_nodes; /* All call nodes in this graph */
+ int n_nodes; /**< Nodes in graph except Id, Tuple, Proj, Start, End */
+ int n_nodes_orig; /**< for statistics */
+ eset *call_nodes; /**< All call nodes in this graph */
-/* Inlines small leave methods at call sites where the called address comes
- from a Const node that references the entity representing the called
- method.
- The size argument is a rough measure for the code size of the method:
- Methods where the obstack containing the firm graph is smaller than
- size are inlined. */
+/*
+ * Inlines small leave methods at call sites where the called address comes
+ * from a Const node that references the entity representing the called
+ * method.
+ * The size argument is a rough measure for the code size of the method:
+ * Methods where the obstack containing the firm graph is smaller than
+ * size are inlined.
+ */
void inline_leave_functions(int maxsize, int leavesize, int size) {
inline_irg_env *env;
int i, n_irgs = get_irp_n_irgs();
ir_graph *rem = current_ir_graph;
int did_inline = 1;
void inline_leave_functions(int maxsize, int leavesize, int size) {
inline_irg_env *env;
int i, n_irgs = get_irp_n_irgs();
ir_graph *rem = current_ir_graph;
int did_inline = 1;
-// printf(" %s: Inlineing %s.\n", get_entity_name(get_irg_entity(current_ir_graph)),
-// get_entity_name(get_irg_entity(callee)));
- inline_method(call, callee);
- did_inline = 1;
- env->n_call_nodes--;
- eset_insert_all(env->call_nodes, callee_env->call_nodes);
- env->n_call_nodes += callee_env->n_call_nodes;
- env->n_nodes += callee_env->n_nodes;
- callee_env->n_callers--;
+/* printf(" %s: Inlineing %s.\n", get_entity_name(get_irg_entity(current_ir_graph)), */
+/* get_entity_name(get_irg_entity(callee))); */
+ if (inline_method(call, callee)) {
+ did_inline = 1;
+ env->n_call_nodes--;
+ eset_insert_all(env->call_nodes, callee_env->call_nodes);
+ env->n_call_nodes += callee_env->n_call_nodes;
+ env->n_nodes += callee_env->n_nodes;
+ callee_env->n_callers--;
+ }
-// printf(" %s: Inlineing %s.\n", get_entity_name(get_irg_entity(current_ir_graph)),
-// get_entity_name(get_irg_entity(callee)));
- inline_method(call, callee);
- did_inline = 1;
- env->n_call_nodes--;
- eset_insert_all(env->call_nodes, callee_env->call_nodes);
- env->n_call_nodes += callee_env->n_call_nodes;
- env->n_nodes += callee_env->n_nodes;
- callee_env->n_callers--;
+/* printf(" %s: Inlineing %s.\n", get_entity_name(get_irg_entity(current_ir_graph)), */
+/* get_entity_name(get_irg_entity(callee))); */
+ if (inline_method(call, callee)) {
+ did_inline = 1;
+ env->n_call_nodes--;
+ eset_insert_all(env->call_nodes, callee_env->call_nodes);
+ env->n_call_nodes += callee_env->n_call_nodes;
+ env->n_nodes += callee_env->n_nodes;
+ callee_env->n_callers--;
+ }
- env->n_nodes_orig, env->n_nodes, env->n_call_nodes_orig, env->n_call_nodes,
- env->n_callers_orig, env->n_callers,
- get_entity_name(get_irg_entity(current_ir_graph)));
+ env->n_nodes_orig, env->n_nodes, env->n_call_nodes_orig, env->n_call_nodes,
+ env->n_callers_orig, env->n_callers,
+ get_entity_name(get_irg_entity(current_ir_graph)));
-/********************************************************************/
-/* Code Placement. Pinns all floating nodes to a block where they */
-/* will be executed only if needed. */
-/********************************************************************/
-
-static pdeq *worklist; /* worklist of ir_node*s */
+/*******************************************************************/
+/* Code Placement. Pins all floating nodes to a block where they */
+/* will be executed only if needed. */
+/*******************************************************************/
/* Because all loops contain at least one pinned node, now all
our inputs are either pinned or place_early has already
been finished on them. We do not have any unfinished inputs! */
dep_block = get_nodes_Block(dep);
if ((!is_Bad(dep_block)) &&
/* Because all loops contain at least one pinned node, now all
our inputs are either pinned or place_early has already
been finished on them. We do not have any unfinished inputs! */
dep_block = get_nodes_Block(dep);
if ((!is_Bad(dep_block)) &&
-/* Floating nodes form subgraphs that begin at nodes as Const, Load,
- Start, Call and end at pinned nodes as Store, Call. Place_early
- places all floating nodes reachable from its argument through floating
- nodes and adds all beginnings at pinned nodes to the worklist. */
-static INLINE void place_early (void) {
+/**
+ * Floating nodes form subgraphs that begin at nodes as Const, Load,
+ * Start, Call and that end at pinned nodes as Store, Call. Place_early
+ * places all floating nodes reachable from its argument through floating
+ * nodes and adds all beginnings at pinned nodes to the worklist.
+ */
+static INLINE void place_early(pdeq* worklist) {
blocks through which the Phi-node reaches producer */
int i, irn_arity;
ir_node *phi_block = get_nodes_Block(consumer);
irn_arity = get_irn_arity(consumer);
for (i = 0; i < irn_arity; i++) {
if (get_irn_n(consumer, i) == producer) {
blocks through which the Phi-node reaches producer */
int i, irn_arity;
ir_node *phi_block = get_nodes_Block(consumer);
irn_arity = get_irn_arity(consumer);
for (i = 0; i < irn_arity; i++) {
if (get_irn_n(consumer, i) == producer) {
-/* Find the latest legal block for N and place N into the
- `optimal' Block between the latest and earliest legal block.
- The `optimal' block is the dominance-deepest block of those
- with the least loop-nesting-depth. This places N out of as many
- loops as possible and then makes it as controldependant as
- possible. */
+/**
+ * Find the latest legal block for N and place N into the
+ * `optimal' Block between the latest and earliest legal block.
+ * The `optimal' block is the dominance-deepest block of those
+ * with the least loop-nesting-depth. This places N out of as many
+ * loops as possible and then makes it as control dependant as
+ * possible.
+ */
place to do so, because we need to base our placement on the
final region of our users, which is OK with Phi-nodes, as they
are pinned, and they never have to be placed after a
place to do so, because we need to base our placement on the
final region of our users, which is OK with Phi-nodes, as they
are pinned, and they never have to be placed after a
- (get_irn_op(n) != op_Const) &&
- (get_irn_op(n) != op_SymConst)) {
- ir_node *dca = NULL; /* deepest common ancestor in the
- dominator tree of all nodes'
- blocks depending on us; our final
- placement has to dominate DCA. */
+ (get_irn_op(n) != op_Const) &&
+ (get_irn_op(n) != op_SymConst)) {
+ ir_node *dca = NULL; /* deepest common ancestor in the
+ dominator tree of all nodes'
+ blocks depending on us; our final
+ placement has to dominate DCA. */
- promote it directly below. */
- assert(((b == new) ||
- get_opt_control_flow_straightening() ||
- get_opt_control_flow_weak_simplification()) &&
- ("strange flag setting"));
- exchange (b, new);
- b = new;
- new = equivalent_node(b);
+ promote it directly below. */
+ assert(((b == new_node) ||
+ get_opt_control_flow_straightening() ||
+ get_opt_control_flow_weak_simplification()) &&
+ ("strange flag setting"));
+ exchange (b, new_node);
+ b = new_node;
+ new_node = equivalent_node(b);
- ir_node *b_pred = get_nodes_Block(get_Block_cfgpred(b, i));
- if (get_Block_block_visited(b_pred) + 1
- < get_irg_block_visited(current_ir_graph)) {
- for (j = 0; j < get_Block_n_cfgpreds(b_pred); j++) {
- ir_node *b_pred_pred = get_nodes_Block(get_Block_cfgpred(b_pred, j));
- if (is_pred_of(b_pred_pred, pred)) dispensable = 0;
- }
- } else {
- if (is_pred_of(b_pred, pred)) dispensable = 0;
- }
+ ir_node *b_pred = get_nodes_Block(get_Block_cfgpred(b, i));
+ if (get_Block_block_visited(b_pred) + 1
+ < get_irg_block_visited(current_ir_graph)) {
+ for (j = 0; j < get_Block_n_cfgpreds(b_pred); j++) {
+ ir_node *b_pred_pred = get_nodes_Block(get_Block_cfgpred(b_pred, j));
+ if (is_pred_of(b_pred_pred, pred)) dispensable = 0;
+ }
+ } else {
+ if (is_pred_of(b_pred, pred)) dispensable = 0;
+ }
printf(" working on "); DDMN(b);
for (i = 0; i < get_Block_n_cfgpreds(b); i++) {
pred = get_nodes_Block(get_Block_cfgpred(b, i));
if (is_Bad(get_Block_cfgpred(b, i))) {
printf(" removing Bad %i\n ", i);
} else if (get_Block_block_visited(pred) +1
printf(" working on "); DDMN(b);
for (i = 0; i < get_Block_n_cfgpreds(b); i++) {
pred = get_nodes_Block(get_Block_cfgpred(b, i));
if (is_Bad(get_Block_cfgpred(b, i))) {
printf(" removing Bad %i\n ", i);
} else if (get_Block_block_visited(pred) +1
for (i = 0; i < get_Block_n_cfgpreds(b); i++) {
pred = get_nodes_Block(get_Block_cfgpred(b, i));
if (is_Bad(get_Block_cfgpred(b, i))) {
for (i = 0; i < get_Block_n_cfgpreds(b); i++) {
pred = get_nodes_Block(get_Block_cfgpred(b, i));
if (is_Bad(get_Block_cfgpred(b, i))) {
- < get_irg_block_visited(current_ir_graph)) {
- /* It's an empty block and not yet visited. */
- ir_node *phi_pred = get_Phi_pred(phi, i);
- for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
- if (get_nodes_Block(phi_pred) == pred) {
- assert(get_irn_op(phi_pred) == op_Phi); /* Block is empty!! */
- in[n_preds] = get_Phi_pred(phi_pred, j);
- } else {
- in[n_preds] = phi_pred;
- }
- n_preds++;
- }
- /* The Phi_pred node is replaced now if it is a Phi.
- In Schleifen kann offenbar der entfernte Phi Knoten legal verwendet werden.
- Daher muss der Phiknoten durch den neuen ersetzt werden.
- Weiter muss der alte Phiknoten entfernt werden (durch ersetzen oder
- durch einen Bad) damit er aus den keep_alive verschwinden kann.
- Man sollte also, falls keine Schleife vorliegt, exchange mit new_Bad
- aufrufen. */
- if (get_nodes_Block(phi_pred) == pred) {
- /* remove the Phi as it might be kept alive. Further there
- might be other users. */
- exchange(phi_pred, phi); /* geht, ist aber doch semantisch falsch! Warum?? */
- }
+ < get_irg_block_visited(current_ir_graph)) {
+ /* It's an empty block and not yet visited. */
+ ir_node *phi_pred = get_Phi_pred(phi, i);
+ for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
+ if (get_nodes_Block(phi_pred) == pred) {
+ assert(get_irn_op(phi_pred) == op_Phi); /* Block is empty!! */
+ in[n_preds] = get_Phi_pred(phi_pred, j);
- in[n_preds] = get_Phi_pred(phi, i);
- n_preds ++;
+ in[n_preds] = phi_pred;
+ }
+ n_preds++;
+ }
+ /* The Phi_pred node is replaced now if it is a Phi.
+ In Schleifen kann offenbar der entfernte Phi Knoten legal verwendet werden.
+ Daher muss der Phiknoten durch den neuen ersetzt werden.
+ Weiter muss der alte Phiknoten entfernt werden (durch ersetzen oder
+ durch einen Bad) damit er aus den keep_alive verschwinden kann.
+ Man sollte also, falls keine Schleife vorliegt, exchange mit new_Bad
+ aufrufen. */
+ if (get_nodes_Block(phi_pred) == pred) {
+ /* remove the Phi as it might be kept alive. Further there
+ might be other users. */
+ exchange(phi_pred, phi); /* geht, ist aber doch semantisch falsch! Warum?? */
+ }
+ } else {
+ in[n_preds] = get_Phi_pred(phi, i);
+ n_preds ++;
- if (get_irn_op(phi) == op_Phi) {
- set_nodes_Block(phi, b);
-
- n_preds = 0;
- for (i = 0; i < k; i++) {
- pred = get_nodes_Block(get_Block_cfgpred(b, i));
- if (is_Bad(get_Block_cfgpred(b, i))) {
- /* Do nothing */
- } else if (get_Block_block_visited(pred) +1
- < get_irg_block_visited(current_ir_graph)) {
- /* It's an empty block and not yet visited. */
- for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
- /* @@@ Hier brauche ich Schleifeninformation!!! Kontrollflusskante
- muss Rueckwaertskante sein! (An allen vier in[n_preds] = phi
- Anweisungen.) Trotzdem tuts bisher!! */
- in[n_preds] = phi;
- n_preds++;
- }
- } else {
- in[n_preds] = phi;
- n_preds++;
- }
- }
- for (i = 0; i < get_Phi_n_preds(phi); i++) {
- in[n_preds] = get_Phi_pred(phi, i);
- n_preds++;
- }
- for (i = k+1; i < get_Block_n_cfgpreds(b); i++) {
- pred = get_nodes_Block(get_Block_cfgpred(b, i));
- if (is_Bad(get_Block_cfgpred(b, i))) {
- /* Do nothing */
- } else if (get_Block_block_visited(pred) +1
- < get_irg_block_visited(current_ir_graph)) {
- /* It's an empty block and not yet visited. */
- for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
- in[n_preds] = phi;
- n_preds++;
- }
- } else {
- in[n_preds] = phi;
- n_preds++;
- }
- }
- set_irn_in(phi, n_preds, in);
- }
- phi = get_irn_link(phi);
+ if (get_irn_op(phi) == op_Phi) {
+ set_nodes_Block(phi, b);
+
+ n_preds = 0;
+ for (i = 0; i < k; i++) {
+ pred = get_nodes_Block(get_Block_cfgpred(b, i));
+ if (is_Bad(get_Block_cfgpred(b, i))) {
+ /* Do nothing */
+ } else if (get_Block_block_visited(pred) +1
+ < get_irg_block_visited(current_ir_graph)) {
+ /* It's an empty block and not yet visited. */
+ for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
+ /* @@@ Hier brauche ich Schleifeninformation!!! Kontrollflusskante
+ muss Rueckwaertskante sein! (An allen vier in[n_preds] = phi
+ Anweisungen.) Trotzdem tuts bisher!! */
+ in[n_preds] = phi;
+ n_preds++;
+ }
+ } else {
+ in[n_preds] = phi;
+ n_preds++;
+ }
+ }
+ for (i = 0; i < get_Phi_n_preds(phi); i++) {
+ in[n_preds] = get_Phi_pred(phi, i);
+ n_preds++;
+ }
+ for (i = k+1; i < get_Block_n_cfgpreds(b); i++) {
+ pred = get_nodes_Block(get_Block_cfgpred(b, i));
+ if (is_Bad(get_Block_cfgpred(b, i))) {
+ /* Do nothing */
+ } else if (get_Block_block_visited(pred) +1
+ < get_irg_block_visited(current_ir_graph)) {
+ /* It's an empty block and not yet visited. */
+ for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
+ in[n_preds] = phi;
+ n_preds++;
+ }
+ } else {
+ in[n_preds] = phi;
+ n_preds++;
+ }
+ }
+ set_irn_in(phi, n_preds, in);
+ }
+ phi = get_irn_link(phi);
n_preds = 0;
for (i = 0; i < get_Block_n_cfgpreds(b); i++) {
pred = get_nodes_Block(get_Block_cfgpred(b, i));
if (is_Bad(get_Block_cfgpred(b, i))) {
/* Do nothing */
} else if (get_Block_block_visited(pred) +1
n_preds = 0;
for (i = 0; i < get_Block_n_cfgpreds(b); i++) {
pred = get_nodes_Block(get_Block_cfgpred(b, i));
if (is_Bad(get_Block_cfgpred(b, i))) {
/* Do nothing */
} else if (get_Block_block_visited(pred) +1
/* It's an empty block and not yet visited. */
assert(get_Block_n_cfgpreds(b) > 1);
/* Else it should be optimized by equivalent_node. */
for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
/* It's an empty block and not yet visited. */
assert(get_Block_n_cfgpreds(b) > 1);
/* Else it should be optimized by equivalent_node. */
for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
- set_irg_block_visited(current_ir_graph, /* Don't walk all the way to Start. */
- get_irg_block_visited(current_ir_graph)-1);
- irg_block_walk(ka, optimize_blocks, NULL, NULL);
- mark_irn_visited(ka);
- ARR_APP1 (ir_node *, in, ka);
+ set_irg_block_visited(current_ir_graph, /* Don't walk all the way to Start. */
+ get_irg_block_visited(current_ir_graph)-1);
+ irg_block_walk(ka, optimize_blocks, NULL, NULL);
+ mark_irn_visited(ka);
+ ARR_APP1 (ir_node *, in, ka);
- (op_Proj == get_irn_op(pre)) &&
- op_Raise != get_irn_op(skip_Proj(pre))) {
-
- /* set predecessor array for new block */
- in = NEW_ARR_D (ir_node *, current_ir_graph->obst, 1);
- /* set predecessor of new block */
- in[0] = pre;
- block = new_Block(1, in);
- /* insert new jmp node to new block */
- switch_block(block);
- jmp = new_Jmp();
- switch_block(n);
- /* set sucessor of new block */
- set_irn_n(n, i, jmp);
-
- } /* predecessor has multiple sucessors */
+ (op_Proj == get_irn_op(pre)) &&
+ op_Raise != get_irn_op(skip_Proj(pre))) {
+
+ /* set predecessor array for new block */
+ in = NEW_ARR_D (ir_node *, current_ir_graph->obst, 1);
+ /* set predecessor of new block */
+ in[0] = pre;
+ block = new_Block(1, in);
+ /* insert new jmp node to new block */
+ switch_block(block);
+ jmp = new_Jmp();
+ switch_block(n);
+ /* set successor of new block */
+ set_irn_n(n, i, jmp);
+
+ } /* predecessor has multiple successors */