ir_node *res;
res = new_r_Block (current_ir_graph, arity, in);
- current_ir_graph->current_block = res;
/* Create and initialize array for Phi-node construction. */
res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
res = optimize (res);
+ current_ir_graph->current_block = res;
+
irn_vrfy (res);
return res;
/* file to dump to */
static FILE *F;
+/* A compiler option to turn off edge labels */
int edge_label = 1;
+/* A global variable to record output of the Bad node. */
+int Bad_dumped;
+
/*******************************************************************/
/* routines to dump information about a single node */
/*******************************************************************/
assert(get_irn_n(n, i));
xfprintf (F, "edge: {sourcename: \"%p\" targetname: \"%p\"",
n, get_irn_n(n, i));
- fprintf (F, " label: \"%d\" ", i+1);
+ fprintf (F, " label: \"%d\" ", i);
print_edge_vcgattr(n, i);
fprintf (F, "}\n");
}
dump_node(n);
dump_ir_data_edges(n);
}
+ if (get_irn_op(n) == op_Bad)
+ Bad_dumped = 1;
}
void
}
}
+
+void
+dump_blockless_nodes (ir_node *n, void *env) {
+ if (is_no_Block(n) && get_irn_op(get_nodes_Block(n)) == op_Bad) {
+ dump_node(n);
+ dump_ir_data_edges(n);
+ dump_ir_block_edge(n);
+ }
+ if (get_irn_op(n) == op_Bad)
+ Bad_dumped = 1;
+}
+
void
dump_ir_block_graph (ir_graph *irg)
{
vcg_open (irg, "");
+ Bad_dumped = 0;
/* walk over the blocks in the graph */
irg_block_walk(irg->end, dump_ir_block, NULL, irg);
+ /* dump all nodes that are not in a Block */
+ irg_walk(irg->end, dump_blockless_nodes, NULL, NULL);
+
+ /* dump the Bad node */
+ if (!Bad_dumped)
+ dump_node(get_irg_bad(irg));
vcg_close();
current_ir_graph = rem;
}
/* 0 - don't do this optimization
1 - lets see, if there is a better graph */
-int opt_cse = 1;
-int opt_constant_folding = 1;
-int opt_dead_node_elimination = 1;
+int opt_cse = 1; /* Hash the nodes */
+int opt_constant_folding = 1; /* Evaluate operations */
+int opt_unreachable_code = 1; /* Bad node propagation */
+int opt_dead_node_elimination = 1; /* Reclaim memory */
int optimized = 1;
+int opt_inline = 1;
/* set the flags with set_flagname, get the flag with get_flagname */
return opt_constant_folding;
}
+inline void
+set_opt_unreachable_code(int value)
+{
+ opt_unreachable_code = value;
+}
+
+inline int
+get_opt_unreachable_code(void)
+{
+ return opt_unreachable_code;
+}
+
+
inline void
set_opt_dead_node_elimination (int value)
{
{
return optimized;
}
+
+
+void set_opt_inline (int value) {
+ opt_inline = value;
+}
+
+int get_opt_inline (void) {
+ return opt_inline;
+}
void set_opt_cse (int value);
int get_opt_cse (void);
+/* If opt_unreachable_code == 1 replace nodes (except Block,
+ Phi and Tuple) with a Bad predecessor by the Bad node.
+ Default: opt_unreachable_code == 1. */
+inline void set_opt_unreachable_code(int value);
+inline int get_opt_unreachable_code(void);
+
/* If opt_dead_node_elimination == 1 deallocate all dead nodes
by copying the firm graph.
Default: opt_dead_node_elimination == 0. @@@ as buggy, else 1. */
void set_opt_dead_node_elimination (int value);
int get_opt_dead_node_elimination (void);
+/* If opt_inline == 1 the inlining transformation is performed. */
+void set_opt_inline (int value);
+int get_opt_inline (void);
+
#endif
# include "irgraph_t.h"
# include "irgmod.h"
# include "array.h"
+# include "ircons.h"
/* Turns a node into a "useless" Tuple. The Tuple just forms a tuple
from several inputs.
old->in[0] = block;
old->in[1] = new;
}
+
+
+/**********************************************************************/
+/* Funcionality for collect_phis */
+/**********************************************************************/
+
+void
+clear_link (ir_node *n, void *env) {
+ set_irn_link(n, NULL);
+}
+
+void
+collect (ir_node *n, void *env) {
+ ir_node *pred;
+ if (get_irn_op(n) == op_Phi) {
+ set_irn_link(n, get_irn_link(get_nodes_Block(n)));
+ set_irn_link(get_nodes_Block(n), n);
+ }
+ if (get_irn_op(n) == op_Proj) {
+ pred = n;
+ while (get_irn_op(pred) == op_Proj)
+ pred = get_Proj_pred(pred);
+ set_irn_link(n, get_irn_link(pred));
+ set_irn_link(pred, n);
+ }
+}
+
+void collect_phiprojs(ir_graph *irg) {
+ ir_graph *rem;
+
+ /* Remember external state of current_ir_graph. */
+ rem = current_ir_graph;
+ current_ir_graph = irg;
+
+ irg_walk(get_irg_end(current_ir_graph), clear_link, collect, NULL);
+
+ current_ir_graph = rem;
+}
+
+
+/**********************************************************************/
+/* Funcionality for part_block */
+/**********************************************************************/
+
+/* Moves node and all predecessors of node from from_bl to to_bl.
+ Does not move predecessors of Phi nodes (or block nodes). */
+
+void move (ir_node *node, ir_node *from_bl, ir_node *to_bl) {
+ int i;
+ ir_node *proj, *pred;
+
+ /* move this node */
+ set_nodes_Block(node, to_bl);
+
+ /* move its projs */
+ if (get_irn_mode(node) == mode_T) {
+ proj = get_irn_link(node);
+ while (proj) {
+ if (get_nodes_Block(proj) == from_bl)
+ set_nodes_Block(proj, to_bl);
+ proj = get_irn_link(proj);
+ }
+ }
+
+ /* recursion ... */
+ if (get_irn_op(node) == op_Phi) return;
+
+ for (i = 0; i < get_irn_arity(node); i++) {
+ pred = get_irn_n(node, i);
+ if (get_nodes_Block(pred) == from_bl)
+ move(pred, from_bl, to_bl);
+ }
+}
+
+void part_block(ir_node *node) {
+ ir_node *new_block;
+ ir_node *old_block;
+ ir_node *phi;
+
+ /* Transform the control flow */
+ old_block = get_nodes_Block(node);
+ new_block = new_Block(get_Block_n_cfgpreds(old_block),
+ get_Block_cfgpred_arr(old_block));
+ set_irg_current_block(current_ir_graph, new_block);
+ {
+ ir_node *in[1];
+ in[0] = new_Jmp();
+ set_irn_in(old_block, 1, in);
+ irn_vrfy(old_block);
+ }
+
+ /* move node and its predecessors to new_block */
+ move(node, old_block, new_block);
+
+ /* move Phi nodes to new_block */
+ phi = get_irn_link(old_block);
+ set_irn_link(new_block, phi);
+ set_irn_link(old_block, NULL);
+ while (phi) {
+ set_nodes_Block(phi, new_block);
+ phi = get_irn_link(phi);
+ }
+}
/* Turns a node into a "useless" Tuple. The Tuple node just forms a tuple
from several inputs. The predecessors of the tuple have to be
- set by hand.
+ set by hand. The block predecessor automatically remains the same.
This is useful if a node returning a tuple is removed, but the Projs
extracting values from the tuple are not available. */
void turn_into_tuple (ir_node *node, int arity);
current_ir_graph is set properly. */
inline void exchange (ir_node *old, ir_node *new);
+/* Walks over the passed ir graph and collects all Phi nodes as a
+ list built with the link field in their corresponding block.
+ Further it collects all Proj nodes in a list of the node producing
+ the tuple. In case of nested tuples the Projs are collected in the
+ node producing the outermost Tuple. */
+void collect_phiprojs(ir_graph *irg);
+
+/* Parts a block into two. This is useful to insert other blocks within a
+ given block.
+ Adds a new block (new_block) in the control flow before the block
+ (old_block) of node. Moves node and its predecessors from old_block to
+ new_block. Moves all Projs that depend on moved nodes and are in old_block
+ to new_block. Moves all Phi nodes from old_block to new_block. To achieve
+ this the routine assumes that all Phi nodes are in a list (using the link
+ field) in the link field of old_block. Further it assumes that all Proj nodes
+ are accessible by the link field of the nodes producing the Tuple. This
+ can be established by collect_phiprojs(). part_block conserves this property.
+ Adds a Jmp node to new_block that jumps to old_block.
+ Assumes that node is contained in current_ir_graph. Sets current_block in
+ this ir_graph to new_block. */
+void part_block(ir_node *node);
+
#endif /* ifndef _IRGMOD_H_ */
# include "irgmod.h"
# include "pset.h"
+
+/* @@@ for testing */
+#include "irdump.h"
+
+/* Defined in iropt.c */
pset *new_identities (void);
void del_identities (pset *value_table);
-void add_identity (pset *value_table, ir_node *node);
-
+void add_identities (pset *value_table, ir_node *node);
+#if 0 /* Warum ist das hier nochmal definiert?
+ Hat's nicht gelinkt wegen typeO tities - tity ?? */
/* To fill the hash table */
void
add_identity (pset *value_table, ir_node *n) {
/* identify_remember (value_table, n);*/
}
+#endif
/********************************************************************/
/* apply optimizations of iropt to all nodes. */
/********************************************************************/
+void init_link (ir_node *n, void *env) {
+ set_irn_link(n, NULL);
+}
+
void
optimize_in_place_wrapper (ir_node *n, void *env) {
int i;
irg->value_table = new_identities();
/* walk over the graph */
- irg_walk(irg->end, NULL, optimize_in_place_wrapper, NULL);
+ irg_walk(irg->end, init_link, optimize_in_place_wrapper, NULL);
current_ir_graph = rem;
}
}
/* Copies the node to the new obstack. The Ins of the new node point to
- the predecessors on the old obstack. n->link points to the new node.
+ the predecessors on the old obstack. For block/phi nodes not all
+ predecessors might be copied. n->link points to the new node.
For Phi and Block nodes the function allocates in-arrays with an arity
only for useful predecessors. The arity is determined by counting
the non-bad predecessors of the block. */
-inline void
+void
copy_node (ir_node *n, void *env) {
ir_node *nn, *block;
int new_arity;
frees e.g. the memory of the graph_arr allocated in new_immBlock. */
copy_attrs(n, nn);
set_new_node(n, nn);
+
+ /* printf("\n old node: "); DDMSG2(n);
+ printf(" new node: "); DDMSG2(nn); */
+
}
/* Copies new predecessors of old node to new node remembered in link.
Spare the Bad predecessors of Phi and Block nodes. */
-inline void
+void
copy_preds (ir_node *n, void *env) {
ir_node *nn, *block, *on;
int i, j;
nn = get_new_node(n);
+ /* printf("\n old node: "); DDMSG2(n);
+ printf(" new node: "); DDMSG2(nn);
+ printf(" arities: old: %d, new: %d\n", get_irn_arity(n), get_irn_arity(nn)); */
+
if (get_irn_opcode(n) == iro_Block) {
/* Don't copy Bad nodes. */
j = 0;
set_irn_n (nn, j, get_new_node(get_irn_n(n, i)));
j++;
}
- /* repair the block visited flag from above misuse */
+ /* repair the block visited flag from above misuse. Repair it in both
+ graphs so that the old one can still be used. */
set_Block_block_visited(nn, 0);
+ set_Block_block_visited(n, 0);
/* Local optimization could not merge two subsequent blocks if
- in array contained Bads. Now it's possible, but don't do it for
- the end block! */
- /* GL: this is inefficient!!
- if (n != current_ir_graph->end_block) on = optimize_in_place(nn);
- else on = nn;
- if (nn != on) exchange(nn, on);
- better: */
- if (n != current_ir_graph->end_block) {
- on = optimize_in_place(nn);
- if ((nn != on)) {
- if (get_irn_op(on) == op_Bad)
- /* if on is Bad it is the old Bad node. */
- on = get_new_node(on);
- exchange(nn, on);
- nn = on; /* For cse ... */
- }
- }
+ in array contained Bads. Now it's possible.
+ @@@ I removed the call to optimize_in_place as it requires
+ that the fields in ir_graph are set properly. */
+ if (get_Block_n_cfgpreds(nn) == 1
+ && get_irn_op(get_Block_cfgpred(nn, 0)) == op_Jmp)
+ exchange(nn, get_nodes_Block(get_Block_cfgpred(nn, 0)));
} else if (get_irn_opcode(n) == iro_Phi) {
/* Don't copy node if corresponding predecessor in block is Bad.
The Block itself should not be Bad. */
set_irn_n (nn, j, get_new_node(get_irn_n(n, i)));
j++;
}
+ /* If the pre walker reached this Phi after the post walker visited the
+ block block_visited is > 0. */
+ set_Block_block_visited(get_nodes_Block(n), 0);
/* Compacting the Phi's ins might generate Phis with only one
predecessor. */
if (get_irn_arity(n) == 1)
set_irn_n (nn, i, get_new_node(get_irn_n(n, i)));
}
/* Now the new node is complete. We can add it to the hash table for cse. */
- add_identity (current_ir_graph->value_table, nn);
+ /* add_identity (current_ir_graph->value_table, nn); */
+ add_identities (current_ir_graph->value_table, nn);
}
/* Copies the graph reachable from current_ir_graph->end to the obstack
graph. */
void
copy_graph () {
-
- ir_node *old, *new;
-
/* Not all nodes remembered in current_ir_graph might be reachable
from the end node. Assure their link is set to NULL, so that
we can test whether new nodes have been computed. */
current_ir_graph = rem;
}
+
+/**********************************************************************/
+/* Funcionality for inlining */
+/**********************************************************************/
+
+void inline_method(ir_node *call, ir_graph *called_graph) {
+ ir_node *pre_call;
+ ir_node *post_call, *post_bl;
+ ir_node *in[5];
+ ir_node *end, *end_bl;
+ ir_node **res_pred;
+ ir_node **cf_pred;
+ ir_node *ret, *phi;
+ ir_node *cf_op, *bl;
+ int arity, n_ret, n_exc, n_res, i, j;
+
+ if (!get_opt_inline()) return;
+
+ /** Check preconditions **/
+ assert(get_irn_op(call) == op_Call);
+ assert(get_Call_type(call) == get_entity_type(get_irg_ent(called_graph)));
+ assert(get_type_tpop(get_Call_type(call)) == type_method);
+ if (called_graph == current_ir_graph) return;
+
+ /** Part the Call node into two nodes. Pre_call collects the parameters of
+ the procedure and later replaces the Start node of the called graph.
+ Post_call is the old Call node and collects the results of the called
+ graph. Both will end up being a tuple. **/
+ post_bl = get_nodes_Block(call);
+ set_irg_current_block(current_ir_graph, post_bl);
+ /* XxMxPxP von Start + Parameter von Call */
+ in[0] = new_Jmp();
+ in[1] = get_Call_mem(call);
+ in[2] = get_irg_frame(current_ir_graph);
+ in[3] = get_irg_globals(current_ir_graph);
+ in[4] = new_Tuple (get_Call_n_params(call),
+ get_Call_param_arr(call));
+ pre_call = new_Tuple(5, in);
+ post_call = call;
+
+ /** Part the block of the Call node into two blocks.
+ The new block gets the ins of the old block, pre_call and all its
+ predecessors and all Phi nodes. **/
+ part_block(pre_call);
+
+ /** Prepare state for dead node elimination **/
+ /* Visited flags in calling irg must be >= flag in called irg.
+ Else walker and arity computation will not work. */
+ if (get_irg_visited(current_ir_graph) <= get_irg_visited(called_graph))
+ set_irg_visited(current_ir_graph, get_irg_visited(called_graph)+1); /***/
+ if (get_irg_block_visited(current_ir_graph)< get_irg_block_visited(called_graph))
+ set_irg_block_visited(current_ir_graph, get_irg_block_visited(called_graph));
+ /* Set pre_call as new Start node in link field of the start node of
+ calling graph and pre_calls block as new block for the start block
+ of calling graph.
+ Further mark these nodes so that they are not visited by the
+ copying. */
+ set_irn_link(get_irg_start(called_graph), pre_call);
+ set_irn_visited(get_irg_start(called_graph),
+ get_irg_visited(current_ir_graph));/***/
+ set_irn_link(get_irg_start_block(called_graph),
+ get_nodes_Block(pre_call));
+ set_irn_visited(get_irg_start_block(called_graph),
+ get_irg_visited(current_ir_graph)); /***/
+
+ /* Initialize for compaction of in arrays */
+ inc_irg_block_visited(current_ir_graph);
+ /*
+ set_Block_block_visited(get_irg_start_block(called_graph),
+ get_irg_block_visited(current_ir_graph) +1 +1); /* count for self edge */
+
+ /*** Replicate local entities of the called_graph ***/
+ /* @@@ */
+
+ /* visited is > than that of called graph. With this trick visited will
+ remain unchanged so that an outer walker calling this inline will
+ not visit the inlined nodes. */
+ set_irg_visited(current_ir_graph, get_irg_visited(current_ir_graph)-1);
+
+ /** Performing dead node elimination inlines the graph **/
+ /* Copies the nodes to the obstack of current_ir_graph. */
+ /* @@@ endless loops are not copied!! */
+ irg_walk(get_irg_end(called_graph), copy_node, copy_preds, NULL);
+
+ /* Repair called_graph */
+ set_irg_visited(called_graph, get_irg_visited(current_ir_graph));
+ set_irg_block_visited(called_graph, get_irg_block_visited(current_ir_graph));
+ set_Block_block_visited(get_irg_start_block(called_graph), 0);
+
+ /*** Merge the end of the inlined procedure with the call site ***/
+
+ /** Precompute some values **/
+ end_bl = get_new_node(get_irg_end_block(called_graph));
+ end = get_new_node(get_irg_end(called_graph));
+ arity = get_irn_arity(end_bl); /* arity = n_exc + n_ret */
+ n_res = get_method_n_res(get_Call_type(call));
+
+ res_pred = (ir_node **) malloc ((n_res) * sizeof (ir_node *));
+ cf_pred = (ir_node **) malloc (arity * sizeof (ir_node *));
+
+ set_irg_current_block(current_ir_graph, post_bl); /* just to make sure */
+
+
+ /** Collect control flow from Return blocks to post_calls block. Replace
+ Return nodes by Jump nodes. **/
+ n_ret = 0;
+ for (i = 0; i < arity; i++) {
+ ir_node *ret;
+ ret = get_irn_n(end_bl, i);
+ if (get_irn_op(ret) == op_Return) {
+ cf_pred[n_ret] = new_r_Jmp(current_ir_graph, get_nodes_Block(ret));
+ n_ret++;
+ }
+ }
+ set_irn_in(post_bl, n_ret, cf_pred);
+
+ /** Collect results from Return nodes to post_call. Post_call is
+ turned into a tuple. **/
+ turn_into_tuple(post_call, 4);
+ /* First the Memory-Phi */
+ n_ret = 0;
+ for (i = 0; i < arity; i++) {
+ ret = get_irn_n(end_bl, i);
+ if (get_irn_op(ret) == op_Return) {
+ cf_pred[n_ret] = get_Return_mem(ret);
+ n_ret++;
+ }
+ }
+ phi = new_Phi(n_ret, cf_pred, mode_M);
+ set_Tuple_pred(call, 0, phi);
+ set_irn_link(phi, get_irn_link(post_bl)); /* Conserve Phi-list for further inlinings */
+ set_irn_link(post_bl, phi);
+ /* Now the real results */
+ if (n_res > 0) {
+ for (j = 0; j < n_res; j++) {
+ n_ret = 0;
+ for (i = 0; i < arity; i++) {
+ ret = get_irn_n(end_bl, i);
+ if (get_irn_op(ret) == op_Return) {
+ cf_pred[n_ret] = get_Return_res(ret, j);
+ n_ret++;
+ }
+ }
+ phi = new_Phi(n_ret, cf_pred, get_irn_mode(cf_pred[0]));
+ res_pred[j] = phi;
+ set_irn_link(phi, get_irn_link(post_bl)); /* Conserve Phi-list for further inlinings */
+ set_irn_link(post_bl, phi);
+ }
+ set_Tuple_pred(call, 2, new_Tuple(n_res, res_pred));
+ } else {
+ set_Tuple_pred(call, 2, new_Bad());
+ }
+ /* Finally the exception control flow. We need to add a Phi node to
+ collect the memory containing the exception objects. Further we need
+ to add another block to get a correct representation of this Phi. To
+ this block we add a Jmp that resolves into the X output of the Call
+ when the Call is turned into a tuple. */
+ n_exc = 0;
+ for (i = 0; i < arity; i++) {
+ ir_node *ret;
+ ret = get_irn_n(end_bl, i);
+ if (is_fragile_op(skip_Proj(ret)) || (get_irn_op(skip_Proj(ret)) == op_Raise)) {
+ cf_pred[n_exc] = ret;
+ n_exc++;
+ }
+ }
+ if (n_exc > 0) {
+ new_Block(n_exc, cf_pred); /* whatch it: current_block is changed! */
+ set_Tuple_pred(call, 1, new_Jmp());
+ /* The Phi for the memories with the exception objects */
+ n_exc = 0;
+ for (i = 0; i < arity; i++) {
+ ir_node *ret;
+ ret = skip_Proj(get_irn_n(end_bl, i));
+ if (is_fragile_op(ret) || (get_irn_op(ret) == op_Raise)) {
+ cf_pred[n_exc] = new_r_Proj(current_ir_graph, get_nodes_Block(ret), ret, mode_M, 1);
+ n_exc++;
+ }
+ }
+ set_Tuple_pred(call, 3, new_Phi(n_exc, cf_pred, mode_M));
+ } else {
+ set_Tuple_pred(call, 1, new_Bad());
+ set_Tuple_pred(call, 3, new_Bad());
+ }
+ free(res_pred);
+ free(cf_pred);
+
+ /*** Correct the control flow to the end node.
+ If the exception control flow from the Call directly branched to the
+ end block we now have the following control flow predecessor pattern:
+ ProjX -> Tuple -> Jmp.
+ We must remove the Jmp along with it's empty block and add Jmp's
+ predecessors as predecessors of this end block. ***/
+ /* find the problematic predecessor of the end block. */
+ end_bl = get_irg_end_block(current_ir_graph);
+ for (i = 0; i < get_Block_n_cfgpreds(end_bl); i++) {
+ cf_op = get_Block_cfgpred(end_bl, i);
+ if (get_irn_op(cf_op) == op_Proj) {
+ cf_op = get_Proj_pred(cf_op);
+ if (get_irn_op(cf_op) == op_Tuple) {
+ cf_op = get_Tuple_pred(cf_op, 1);
+ assert(get_irn_op(cf_op) == op_Jmp);
+ break;
+ }
+ }
+ }
+ /* repair */
+ if (i < get_Block_n_cfgpreds(end_bl)) {
+ bl = get_nodes_Block(cf_op);
+ arity = get_Block_n_cfgpreds(end_bl) + get_Block_n_cfgpreds(bl) - 1;
+ cf_pred = (ir_node **) malloc (arity * sizeof (ir_node *));
+ for (j = 0; j < i; j++)
+ cf_pred[j] = get_Block_cfgpred(end_bl, j);
+ for (j = j; j < i + get_Block_n_cfgpreds(bl); j++)
+ cf_pred[j] = get_Block_cfgpred(bl, j-i);
+ for (j = j; j < arity; j++)
+ cf_pred[j] = get_Block_cfgpred(end_bl, j-get_Block_n_cfgpreds(bl) +1);
+ set_irn_in(end_bl, arity, cf_pred);
+ free(cf_pred);
+ }
+}
development/debugging are not conserved by copying. */
void dead_node_elimination(ir_graph *irg);
+/* Inlines a method at the given call site.
+ Assumes that call is a Call node in current_ir_graph and that
+ the type in the Call nodes type attribute is the same as the
+ type of the called graph.
+ Further it assumes that all Phi nodes in a block of current_ir_graph
+ are assembled in a "link" list in the link field of the corresponding
+ block nodes. Further assumes that all Proj nodes are in a "link" list
+ in the nodes producing the tuple. Conserves this feature for the old
+ nodes of the graph. This precondition can be established by a call to
+ collect_phis(), see irgmod.h.
+ Called_graph must be unequal to current_ir_graph. Will not inline
+ if they are equal.
+ Sets visited masterflag in curren_ir_graph to max of flag in current
+ and called graphs.
+ Removes the call node and splits the basic block the call node
+ belongs to. Inserts a copy of the called graph between these nodes.
+ It is recommended to call local_optimize_graph after inlining as this
+ function leaves a set of obscure Tuple nodes, e.g. a Proj-Tuple-Jmp
+ combination as control flow operation. */
+void inline_method(ir_node *call, ir_graph *called_graph);
+
# endif /* _IRGOPT_H_ */
{
int i;
+
assert(node);
-/* printf(" - "); DDMSG2(node); */
+ /* printf(" - "); DDMSG2(node); */
if (get_irn_visited(node) < get_irg_visited(current_ir_graph)) {
}
/***************************************************************************/
+
+/* Walks back from n until it finds a real cf op. */
+ir_node *get_cf_op(ir_node *n) {
+ ir_node *pred;
+
+ n = skip_nop(n);
+ n = skip_Tuple(n);
+ pred = skip_Proj(n);
+ if (!(is_cfop(pred) || is_fragile_op(pred) ||
+ (get_irn_op(pred) == op_Bad)))
+ n = get_cf_op(n);
+
+ return skip_Proj(n);
+}
+
void irg_block_walk_2(ir_node *node, void (pre)(ir_node*, void*),
void (post)(ir_node*, void*), void *env)
{
for(i = get_Block_n_cfgpreds(node) - 1; i >= 0; --i) {
/* find the corresponding predecessor block. */
- ir_node *pred = skip_Proj(get_Block_cfgpred(node, i));
+ ir_node *pred = get_cf_op(get_Block_cfgpred(node, i));
/* GL: I'm not sure whether this assert must go through. There
- could be Id chains?? */
- assert(is_cfop(pred) || is_fragile_op(pred));
+ could be Id chains?? Tuple/Proj .... */
+
+ assert(is_cfop(pred) || is_fragile_op(pred) ||
+ (get_irn_op(pred) == op_Bad));
pred = get_nodes_Block(pred);
if(get_irn_opcode(pred) == iro_Block) {
/* recursion */
return ((node)->in);
}
+inline void
+set_irn_in (ir_node *node, int arity, ir_node **in) {
+ assert(node);
+ if (arity != get_irn_arity(node)) {
+ ir_node *block = node->in[0];
+ node->in = NEW_ARR_D (ir_node *, current_ir_graph->obst, (arity+1));
+ node->in[0] = block;
+ }
+ memcpy (&node->in[1], in, sizeof (ir_node *) * arity);
+}
+
/* to iterate through the predecessors without touching the array */
/* To iterate over the operands iterate from 0 to i < get_irn_arity(),
to iterate includind the Block predecessor iterate from i = -1 to
}
}
+inline ir_node *
+skip_Tuple (ir_node *node) {
+ if ((node->op == op_Proj) && (get_irn_op(get_Proj_pred(node)) == op_Tuple))
+ return get_Tuple_pred(get_Proj_pred(node), get_Proj_proj(node));
+ return node;
+}
+
+
inline ir_node *
skip_nop (ir_node *node) {
/* don't assert node !!! */
|| (get_irn_opcode(node) == iro_Alloc)
|| (get_irn_opcode(node) == iro_Bad));
}
+
+
+/* Returns the memory operand of fragile operations. */
+ir_node *get_fragile_op_mem(ir_node *node) {
+ assert(node && is_fragile_op(node));
+
+ switch (get_irn_opcode (node)) {
+ case iro_Call :
+ case iro_Quot :
+ case iro_DivMod:
+ case iro_Div :
+ case iro_Mod :
+ case iro_Load :
+ case iro_Store :
+ case iro_Alloc :
+ return get_irn_n(node, 0);
+ case iro_Bad :
+ return node;
+ default: ;
+ }
+}
int get_irn_arity (ir_node *node);
/* returns the array with the ins: */
inline ir_node **get_irn_in (ir_node *node);
+/* Replaces the old in array by a new one that will contain the ins given in
+ the parameters. It copies the array passed.
+ This function is necessary to ajust in arrays of blocks, calls and phis.
+ Assumes the current_ir_graph is set to the graph containing "node".
+ "in" must contain all predecessors except the block that are required for
+ the nodes opcode. */
+inline void set_irn_in (ir_node *node, int arity,
+ ir_node **in);
/* to iterate through the predecessors without touching the array. No
order of predecessors guaranteed.
To iterate over the operands iterate from 0 to i < get_irn_arity(),
/* Get the opcode-enum of the node */
inline opcode get_irn_opcode (ir_node *node);
/* Get the ident for a string representation of the opcode */
-inline const char *get_irn_opname (ir_node *node);
inline ident *get_irn_opident (ir_node *node);
+/* Get the string representation of the opcode */
+inline const char *get_irn_opname (ir_node *node);
inline void set_irn_visited (ir_node *node, unsigned long visited);
inline unsigned long get_irn_visited (ir_node *node);
inline void set_irn_link (ir_node *node, ir_node *link);
inline ir_node *skip_Proj (ir_node *node);
/* returns operand of node if node is a Id */
inline ir_node *skip_nop (ir_node *node);
+/* returns corresponding operand of Tuple if node is a Proj from
+ a Tuple. */
+inline ir_node *skip_Tuple (ir_node *node);
/* returns true if node is a Bad node. */
inline int is_Bad (ir_node *node);
/* returns true if the node is not a Block */
/* Returns true if the operation can change the control flow because
of an exception. */
int is_fragile_op(ir_node *node);
+/* Returns the memory operand of fragile operations. */
+ir_node *get_fragile_op_mem(ir_node *node);
/*****/
#define DDMSG printf("%s(l.%i)\n", __FUNCTION__, __LINE__)
#define DDMSG1(X) printf("%s(l.%i) %s\n", __FUNCTION__, __LINE__, \
id_to_str(get_irn_opident(X)))
-#define DDMSG2(X) printf("%s(l.%i) %s: %ld\n", __FUNCTION__, __LINE__, \
- id_to_str(get_irn_opident(X)), get_irn_node_nr(X))
+#define DDMSG2(X) printf("%s(l.%i) %s%s: %ld\n", __FUNCTION__, __LINE__, \
+ id_to_str(get_irn_opident(X)), id_to_str(get_irn_modeident(X)), \
+ get_irn_node_nr(X))
#define DDMSG3(X) printf("%s(l.%i) %s: %p\n", __FUNCTION__, __LINE__, \
print_firm_kind(X), (X))
if it is not the Start block. */
/* !!! Beware, all Phi-nodes of n must have been optimized away.
This should be true, as the block is matured before optimize is called.
- But what about Phi-cycles with the Phi0/Id that could not be resolved? */
+ But what about Phi-cycles with the Phi0/Id that could not be resolved?
+ Remaining Phi nodes are just Ids. */
if (get_Block_n_cfgpreds(n) == 1
&& get_irn_op(get_Block_cfgpred(n, 0)) == op_Jmp) {
n = get_nodes_Block(get_Block_cfgpred(n, 0));
case iro_Load:
{
+#if 0 /* Is an illegal transformation: different nodes can
+ represent the same pointer value!! */
a = skip_Proj(get_Load_mem(n));
- b = skip_Proj(get_Load_ptr(n));
+ b = get_Load_ptr(n);
if (get_irn_op(a) == op_Store) {
if ( different_identity (b, get_Store_ptr(a))) {
} else if (( 0 /* ???didn't get cryptic test that returns 0 */ )) {
}
}
+#endif
}
break;
case iro_Store:
if ( get_Proj_proj(n) <= get_Tuple_n_preds(a) ) {
n = get_Tuple_pred(a, get_Proj_proj(n));
} else {
- assert(0); /* This should not happen! (GL added this assert) */
+ assert(0); /* This should not happen! */
n = new_Bad();
}
} else if (get_irn_mode(n) == mode_X &&
del_pset (value_table);
}
-void
-add_identities (pset *value_table, ir_node *node) {
- identify_remember (value_table, node);
-}
-
/* Return the canonical node computing the same value as n.
Looks up the node in a hash table. */
static inline ir_node *
return o;
}
+void
+add_identities (pset *value_table, ir_node *node) {
+ identify_remember (value_table, node);
+}
+
/* garbage in, garbage out. If a node has a dead input, i.e., the
Bad node is input to the node, return the Bad node. */
static inline ir_node *
n = transform_node (n);
/* Remove nodes with dead (Bad) input. */
- n = gigo (n);
+ if (get_opt_unreachable_code())
+ n = gigo (n);
/* Now we can verify the node, as it has no dead inputs any more. */
irn_vrfy(n);
n = transform_node (n);
/* Remove nodes with dead (Bad) input. */
- n = gigo (n);
+ if (get_opt_unreachable_code())
+ n = gigo (n);
/* Now we can verify the node, as it has no dead inputs any more. */
irn_vrfy(n);
ir_node **in;
opcode = get_irn_opcode (n);
+
+ if (opcode != iro_Phi && opcode != iro_Block)
+ for (i = 0; i < get_irn_arity(n); i++)
+ if (get_irn_opcode(get_irn_n(n, i)) == iro_Bad)
+ return;
+
mymode = get_irn_mode (n);
in = get_irn_in (n);
void set_entity_ld_ident (entity *ent, ident *ld_ident);
/*
-char *get_entity_ld_name (entity *ent);
-void set_entity_ld_name (entity *ent, char *ld_name);
+char *get_entity_ld_name (entity *ent);
+void set_entity_ld_name (entity *ent, char *ld_name);
*/
-type *get_entity_owner (entity *ent);
+type *get_entity_owner (entity *ent);
/* Sets the owner field in entity to owner. */
-void set_entity_owner (entity *ent, type *owner);
-inline void assert_legal_owner_of_ent(type *owner);
+void set_entity_owner (entity *ent, type *owner);
+inline void assert_legal_owner_of_ent(type *owner);
type *get_entity_type (entity *ent);
void set_entity_type (entity *ent, type *type);