**
** Author: Christian Schaefer
**
-** dead node elemination
-** walks one time through the whole graph and copies it into another graph,
-** so unreachable nodes will be lost.
+** Optimizations for a whole ir graph, i.e., a procedure.
*/
+# include <assert.h>
+
# include "irgopt.h"
-# include "irnode.h"
+# include "irnode_t.h"
+# include "irgraph_t.h"
# include "iropt.h"
# include "irgwalk.h"
-# include "irgraph.h"
# include "ircons.h"
+# include "misc.h"
+# include "irgmod.h"
+
+/********************************************************************/
+/* apply optimizations of iropt to all nodes. */
+/********************************************************************/
void
optimize_in_place_wrapper (ir_node *n, void *env) {
}
}
-
void
local_optimize_graph (ir_graph *irg) {
ir_graph *rem = current_ir_graph;
current_ir_graph = rem;
}
-/* Remeber the new node in the old node,
- by using a field that all nodes have. */
-void *
+/********************************************************************/
+/* Routines for dead node elimination / copying garbage collection */
+/* of the obstack. */
+/********************************************************************/
+
+/* Remeber the new node in the old node by using a field all nodes have. */
+inline void
set_new_node (ir_node *old, ir_node *new)
{
- old->in[0] = new;
- return old;
+ old->link = new;
}
/* Get this new node, before the old node is forgotton.*/
-ir_node *
+inline ir_node *
get_new_node (ir_node * n)
{
- return n->in[0];
+ return n->link;
}
+/* We use the block_visited flag to mark that we have computed the
+ number of useful predecessors for this block.
+ Further we encode the new arity in this flag. Remembering the arity is
+ useful, as it saves a lot of pointer accesses. This function is called
+ for all Phi and Block nodes in a Block. */
+inline int
+compute_new_arity(ir_node *b) {
+ int i, res;
+ int irg_v, block_v;
+
+ irg_v = get_irg_block_visited(current_ir_graph);
+ block_v = get_Block_block_visited(b);
+ if (block_v >= irg_v) {
+ /* we computed the number of preds for this block and saved it in the
+ block_v flag */
+ return block_v - irg_v;
+ } else {
+ /* compute the number of good predecessors */
+ res = get_irn_arity(b);
+ for (i = 0; i < get_irn_arity(b); i++)
+ if (get_irn_opcode(get_irn_n(b, i)) == iro_Bad) res--;
+ /* save it in the flag. */
+ set_Block_block_visited(b, irg_v + res);
+ return res;
+ }
+}
-/* Create this node on a new obstack. */
-void
+/* Copies the node to the new obstack. The Ins of the new node point to
+ the predecessors on the old obstack. n->link points to the new node.
+ For Phi and Block nodes the function allocate in arrays with an arity
+ only for useful predecessors. The arity is determined by counting
+ the non-bad predecessors of the block. */
+inline void
copy_node (ir_node *n, void *env) {
- ir_node * res, a, b;
-
- if (is_binop(n)) {
- a = get_binop_left(n);
- b = get_binop_right(n);
- } else if (is_unop(n)) {
- a = get_unop_op(n);
+ ir_node *nn, *block;
+ int new_arity;
+
+ if (get_irn_opcode(n) == iro_Block) {
+ block = NULL;
+ new_arity = compute_new_arity(n);
+ } else {
+ block = get_nodes_Block(n);
+ if (get_irn_opcode(n) == iro_Phi) {
+ new_arity = compute_new_arity(block);
+ } else {
+ new_arity = get_irn_arity(n);
+ }
}
+ nn = new_ir_node(current_ir_graph,
+ block,
+ get_irn_op(n),
+ get_irn_mode(n),
+ new_arity,
+ get_irn_in(n));
+ copy_attrs(n, nn);
+ set_new_node(n, nn);
+}
- switch (get_irn_opcode(n)) {
- case iro_Block:
- int i;
- ir_node **in [get_Block_n_cfgpreds(n)];
- for (i=0; i <(get_Return_n_res(n)); i++) {
- in[i] = get_Block_cfgpred (n, i);
- }
- res = new_r_Block (current_ir_graph, get_Block_n_cfgpreds(n), in);
- set_new_node(n, res);
- break;
- case iro_Start:
- res = new_r_Start (current_ir_graph, get_new_node(n));
- set_new_node(n, res);
- break;
- case iro_End:
- res = new_r_End (current_ir_graph, get_new_node(n));
- set_new_node(n, res);
- break;
- case iro_Jmp:
- res = new_r_Jmp (current_ir_graph, get_new_node(n));
- set_new_node(n, res);
- break;
- case iro_Cond:
- res = new_r_Cond (current_ir_graph, get_new_node(n),
- get_Cond_selector(n));
- set_new_node(n, res);
- break;
- case iro_Return:
- {
- /*CS malloc*/
- int i;
- ir_node **in [get_Return_n_res(n)];
- for (i=0; i <(get_Return_n_res(n)); i++) {
- in[i] = get_Return_res (n, i);
- }
- res = new_r_Return (current_ir_graph, get_new_node(n),
- get_Return_mem (n), get_Return_n_res(n), in);
- set_new_node(n, res);
- }
- break;
- case iro_Raise:
- res = new_r_Raise (current_ir_graph, get_new_node(n),
- get_Raise_mem(n), get_Raise_exoptr(n));
- set_new_node(n, res);
- break;
- case iro_Const:
- res = new_r_Const (current_ir_graph, get_new_node(n),
- get_irn_mode(n), get_Const_tarval(n));
- set_new_node(n, res);
- break;
- case iro_SymConst:
- {
- if (get_SymConst_kind(n) == type_tag || get_SymConst_kind(n) == size)
- {
- res = new_r_Raise (current_ir_graph, get_new_node(n),
- get_SymConst_type(n), get_SymConst_kind (n));
- }
- else
- /* if get_SymConst_kind(n) == linkage_ptr_info */
- {
- res = new_r_Raise (current_ir_graph, get_new_node(n),
- get_SymConst_ptrinfo(n), get_SymConst_kind (n));
- }
- set_new_node(n, res);
- }
- break;
- case iro_Sel:
- {
- int i;
- ir_node **in [get_Sel_n_index(n)];
- for (i=0; i <(get_Sel_n_index(n)); i++) {
- in[i] = get_Sel_index (n, i);
+/* Copies new predecessors of old node to new node remembered in link.
+ Spare the Bad predecessors of Phi and Block nodes. */
+inline void
+copy_preds (ir_node *n, void *env) {
+ ir_node *nn, *block/*, *on*/;
+ int i, j;
+
+ nn = get_new_node(n);
+
+ if (get_irn_opcode(n) == iro_Block) {
+ /* Don't copy Bad nodes. */
+ j = 0;
+ for (i = 0; i < get_irn_arity(n); i++)
+ if (get_irn_opcode(get_irn_n(n, i)) != iro_Bad) {
+ set_irn_n (nn, j, get_new_node(get_irn_n(n, i)));
+ j++;
}
- res = new_r_Sel (current_ir_graph, get_new_node(n),
- get_Sel_mem(n), get_Sel_ptr(n), get_Sel_n_index(n),
- in, get_Sel_entity(n));
- set_new_node(n, res);
- }
- break;
- case iro_Call:
- {
- int i;
- ir_node **in [get_Call_arity(n)];
- for (i=0; i <(get_Call_arity(n)); i++) {
- in[i] = get_Call_param (n, i);
+ /* repair the block visited flag from above misuse */
+ set_Block_block_visited(nn, 0);
+ /* Local optimization could not merge two subsequent blocks if
+ in array contained Bads. Now it's possible. *
+ on = optimize_in_place(nn);
+ if (nn != on) exchange(nn, on);*/
+ } else if (get_irn_opcode(n) == iro_Phi) {
+ /* Don't copy node if corresponding predecessor in block is Bad.
+ The Block itself should not be Bad. */
+ block = get_nodes_Block(n);
+ set_irn_n (nn, -1, get_new_node(block));
+ j = 0;
+ for (i = 0; i < get_irn_arity(n); i++)
+ if (get_irn_opcode(get_irn_n(block, i)) != iro_Bad) {
+ set_irn_n (nn, j, get_new_node(get_irn_n(n, i)));
+ j++;
}
- res = new_r_Call (current_ir_graph, get_new_node(n), get_Call_mem(n),
- get_Call_ptr(n), get_Call_arity(n),
- in, get_Call_type (n));
- set_new_node(n, res);
- }
- break;
- case iro_Add:
- res = new_r_Add (current_ir_graph, get_new_node(n),
- get_new_node(a),
- get_new_node(b), get_irn_mode(n));
- set_new_node(n, res);
- break;
- case iro_Sub:
- res = new_r_Sub (current_ir_graph, get_new_node(get_nodes_block(n)),
- get_new_node(a),
- get_new_node(b), get_irn_mode(n));
- set_new_node(n, res);
- break;
- case iro_Minus:
- res = new_r_Minus (current_ir_graph, get_new_node(n), get_new_node(a),
- get_irn_mode(n));
- set_new_node(n, res);
- break;
- case iro_Mul:
- break;
- case iro_Quot:
- break;
- case iro_DivMod:
- break;
- case iro_Div:
- break;
- case iro_Mod:
- break;
- case iro_Abs:
- break;
- case iro_And:
- break;
- case iro_Or:
- break;
- case iro_Eor:
- break;
- case iro_Not:
- break;
- case iro_Cmp:
- break;
- case iro_Shl:
- break;
- case iro_Shr:
- break;
- case iro_Shrs:
- break;
- case iro_Rot:
- break;
- case iro_Conv:
- break;
- case iro_Phi:
- break;
- case iro_Load:
- break;
- case iro_Store:
- break;
- case iro_Alloc:
- break;
- case iro_Free:
- break;
- case iro_Sync:
- break;
- case iro_Proj:
- break;
- case iro_Tuple:
- break;
- case iro_Id:
- break;
- case iro_Bad:
- break;
+ /* Compacting the Phi's ins might generate Phis with only one
+ predecessor. *
+ if (get_irn_arity(n) == 1)
+ exchange(n, get_irn_n(n, 0)); */
+ } else {
+ for (i = -1; i < get_irn_arity(n); i++)
+ set_irn_n (nn, i, get_new_node(get_irn_n(n, i)));
}
+}
+/* Copies the graph reachable from current_ir_graph->end to the obstack
+ in current_ir_graph.
+ Then fixes the fields in current_ir_graph containing nodes of the
+ graph. */
+void
+copy_graph () {
+ /* Not all nodes remembered in current_ir_graph might be reachable
+ from the end node. Assure their link is set to NULL so that
+ we can test whether new nodes have been computed. */
+ set_irn_link(get_irg_frame (current_ir_graph), NULL);
+ set_irn_link(get_irg_globals(current_ir_graph), NULL);
+ set_irn_link(get_irg_args (current_ir_graph), NULL);
+
+ /* we use the block walk flag for removing Bads from Blocks ins. */
+ inc_irg_block_visited(current_ir_graph);
+
+ /* copy the graph */
+ irg_walk(get_irg_end(current_ir_graph), copy_node, copy_preds, NULL);
+
+ /* fix the fields in current_ir_graph */
+ set_irg_end (current_ir_graph, get_new_node(get_irg_end(current_ir_graph)));
+ set_irg_end_block (current_ir_graph, get_new_node(get_irg_end_block(current_ir_graph)));
+ if (get_irn_link(get_irg_frame(current_ir_graph)) == NULL)
+ irg_walk(get_irg_frame(current_ir_graph), copy_node, copy_preds, NULL);
+ if (get_irn_link(get_irg_globals(current_ir_graph)) == NULL)
+ irg_walk(get_irg_globals(current_ir_graph), copy_node, copy_preds, NULL);
+ if (get_irn_link(get_irg_args(current_ir_graph)) == NULL)
+ irg_walk(get_irg_args(current_ir_graph), copy_node, copy_preds, NULL);
+ set_irg_start (current_ir_graph, get_new_node(get_irg_start(current_ir_graph)));
+ set_irg_start_block(current_ir_graph,
+ get_new_node(get_irg_start_block(current_ir_graph)));
+ set_irg_frame (current_ir_graph, get_new_node(get_irg_frame(current_ir_graph)));
+ set_irg_globals(current_ir_graph, get_new_node(get_irg_globals(current_ir_graph)));
+ set_irg_args (current_ir_graph, get_new_node(get_irg_args(current_ir_graph)));
+ if (get_irn_link(get_irg_bad(current_ir_graph)) == NULL) {
+ copy_node(get_irg_bad(current_ir_graph), NULL);
+ copy_preds(get_irg_bad(current_ir_graph), NULL);
+ }
+ set_irg_bad(current_ir_graph, get_new_node(get_irg_bad(current_ir_graph)));
}
+/* Amroq call this emigrate() */
void
-dead_node_elemination(ir_graph *irg) {
- struct obstack *graveyard_obst;
- struct obstack *rebirth_obst;
+dead_node_elimination(ir_graph *irg) {
+ ir_graph *rem;
+ struct obstack *graveyard_obst = NULL;
+ struct obstack *rebirth_obst = NULL;
+
+ /* Remember external state of current_ir_graph. */
+ rem = current_ir_graph;
+ current_ir_graph = irg;
- /* A quiet place, where the old obstack can rest in peace,
- until it will be cremated. */
- graveyard_obst = irg->obst;
+ if (get_optimize() && get_opt_dead_node_elimination()) {
- /* A new obstack, where the reachable nodes will be copied to. */
- rebirth_obst = (struct obstack *) xmalloc (sizeof (struct obstack));
- current_ir_graph->obst = rebirth_obst;
- obstack_init (current_ir_graph->obst);
+ /* A quiet place, where the old obstack can rest in peace,
+ until it will be cremated. */
+ graveyard_obst = irg->obst;
- /* Walks the graph once, and at the recursive way do the copy thing.
- all reachable nodes will be copied to a new obstack. */
- irg_walk(irg->end, NULL, copy_node, NULL);
+ /* A new obstack, where the reachable nodes will be copied to. */
+ rebirth_obst = (struct obstack *) xmalloc (sizeof (struct obstack));
+ current_ir_graph->obst = rebirth_obst;
+ obstack_init (current_ir_graph->obst);
- /* Free memory from old unoptimized obstack */
- xfree (graveyard_obst);
+ /* Copy the graph from the old to the new obstack */
+ copy_graph();
+ /* Free memory from old unoptimized obstack */
+ obstack_free(graveyard_obst, 0); /* First empty the obstack ... */
+ xfree (graveyard_obst); /* ... then free it. */
+ }
+
+ current_ir_graph = rem;
}