** by Goetz Lindenmaier
*/
+/* $Id$ */
+
#ifdef HAVE_CONFIG_H
# include <config.h>
#endif
# include "common.h"
# include "irvrfy.h"
# include "irop.h"
-# include "iropt.h"
+# include "iropt_t.h"
# include "irgmod.h"
# include "array.h"
/* memset belongs to string.h */
res = optimize (res);
irn_vrfy (res);
+
+ /* Memory Phis in endless loops must be kept alive.
+ As we can't distinguish these easily we keep all of them alive. */
+ if ((res->op == op_Phi) && (mode == mode_M))
+ add_End_keepalive(irg->end, res);
return res;
}
ir_node *in[2] = {store, obj};
ir_node *res;
res = new_ir_node (irg, block, op_Raise, mode_T, 2, in);
-
res = optimize (res);
irn_vrfy (res);
return res;
new_End (void)
{
ir_node *res;
-
res = new_ir_node (current_ir_graph, current_ir_graph->current_block,
op_End, mode_X, -1, NULL);
-
res = optimize (res);
irn_vrfy (res);
create a proper Phi node, therefore a Phi0, i.e., a Phi without
predecessors is returned. This node is added to the linked list (field
"link") of the containing block to be completed when this block is
- matured. (Comlpletion will add a new Phi and turn the Phi0 into an Id
+ matured. (Completion will add a new Phi and turn the Phi0 into an Id
node.)
2. The value is already known in this block, graph_arr[pos] is set and we
known = res;
for (i=0; i < ins; ++i)
{
+ assert(in[i]);
+
if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
if (known==res)
} else {
res = optimize (res);
irn_vrfy (res);
+ /* Memory Phis in endless loops must be kept alive.
+ As we can't distinguish these easily we keep all of the alive. */
+ if ((res->op == op_Phi) && (mode == mode_M))
+ add_End_keepalive(irg->end, res);
}
return res;
inline ir_node *
get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
+#if PRECISE_EXC_CONTEXT
+static inline ir_node *
+phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
+
+ir_node **
+new_frag_arr (ir_node *n) {
+ ir_node **arr;
+ int opt;
+ arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
+ memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
+ sizeof(ir_node *)*current_ir_graph->n_loc);
+ /* turn off optimization before allocating Proj nodes, as res isn't
+ finished yet. */
+ opt = get_optimize(); set_optimize(0);
+ /* Here we rely on the fact that all frag ops have Memory as first result! */
+ if (get_irn_op(n) == op_Call)
+ arr[0] = new_Proj(n, mode_M, 3);
+ else
+ arr[0] = new_Proj(n, mode_M, 0);
+ set_optimize(opt);
+ current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
+ return arr;
+}
+
+inline ir_node **
+get_frag_arr (ir_node *n) {
+ if (get_irn_op(n) == op_Call) {
+ return n->attr.call.frag_arr;
+ } else if (get_irn_op(n) == op_Alloc) {
+ return n->attr.a.frag_arr;
+ } else {
+ return n->attr.frag_arr;
+ }
+}
+
+inline ir_node *
+set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
+ if (!frag_arr[pos]) frag_arr[pos] = val;
+ if (frag_arr[current_ir_graph->n_loc - 1])
+ set_frag_value (get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]), pos, val);
+}
+
+inline ir_node *
+get_r_frag_value_internal (ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
+ ir_node *res;
+ ir_node **rem;
+ ir_node **frag_arr;
+
+ assert(is_fragile_op(cfOp) && (get_irn_op(cfOp) != op_Bad));
+
+ frag_arr = get_frag_arr(cfOp);
+ res = frag_arr[pos];
+ if (!res) {
+ if (block->attr.block.graph_arr[pos]) {
+ /* There was a set_value after the cfOp and no get_value before that
+ set_value. We must build a Phi node now. */
+ if (block->attr.block.matured) {
+ int ins = get_irn_arity(block);
+ ir_node **nin;
+ NEW_ARR_A (ir_node *, nin, ins);
+ res = phi_merge(block, pos, mode, nin, ins);
+ } else {
+ res = new_r_Phi0 (current_ir_graph, block, mode);
+ res->attr.phi0_pos = pos;
+ res->link = block->link;
+ block->link = res;
+ }
+ assert(res);
+ /* @@@ tested by Flo: set_frag_value(frag_arr, pos, res);
+ but this should be better: (remove comment if this works) */
+ /* It's a Phi, we can write this into all graph_arrs with NULL */
+ set_frag_value(block->attr.block.graph_arr, pos, res);
+ } else {
+ res = get_r_value_internal(block, pos, mode);
+ set_frag_value(block->attr.block.graph_arr, pos, res);
+ }
+ }
+ return res;
+}
+#endif
+
/** This function allocates a dummy Phi node to break recursions,
computes the predecessors for the real phi node, and then
allocates and returns this node. The routine called to allocate the
static inline ir_node *
phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
{
- ir_node *prevBlock, *res, *phi0;
+ ir_node *prevBlock, *prevCfOp, *res, *phi0;
int i;
-
/* If this block has no value at pos create a Phi0 and remember it
- in graph_arr to break recursions. */
+ in graph_arr to break recursions.
+ Else we may not set graph_arr as there a later value is remembered. */
phi0 = NULL;
if (!block->attr.block.graph_arr[pos]) {
/* This is commented out as collapsing to Bads is no good idea.
Either we need an assert here, or we need to call a routine
that deals with this case as appropriate for the given language.
- Right now a self referencing Id is created which will crash irg_vryfy().
+ Right now a self referencing Id is created which will crash irg_vrfy().
Even if all variables are defined before use, it can happen that
we get to the start block, if a cond has been replaced by a tuple
if (block == get_irg_start_block(current_ir_graph)) {
block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
+ /* We don't need to care about exception ops in the start block.
+ There are none by definition. */
return block->attr.block.graph_arr[pos];
- } else {
+ } else {
phi0 = new_r_Phi0(current_ir_graph, block, mode);
block->attr.block.graph_arr[pos] = phi0;
+#if PRECISE_EXC_CONTEXT
+ /* Set graph_arr for fragile ops. Also here we should break recursion.
+ We could choose a cyclic path through an cfop. But the recursion would
+ break at some point. */
+ set_frag_value(block->attr.block.graph_arr, pos, phi0);
+#endif
}
}
is in and there finds the operands of the Phi node by calling
get_r_value_internal. */
for (i = 1; i <= ins; ++i) {
- assert (block->in[i]);
- if (is_Bad(block->in[i])) {
+ prevCfOp = skip_Proj(block->in[i]);
+ assert (prevCfOp);
+ if (is_Bad(prevCfOp)) {
/* In case a Cond has been optimized we would get right to the start block
with an invalid definition. */
nin[i-1] = new_Bad();
prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
assert (prevBlock);
if (!is_Bad(prevBlock)) {
- nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
+#if PRECISE_EXC_CONTEXT
+ if (is_fragile_op(prevCfOp) && (get_irn_op (prevCfOp) != op_Bad)) {
+ assert(get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode));
+ nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode);
+ } else
+#endif
+ nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
} else {
nin[i-1] = new_Bad();
}
if (phi0) {
exchange(phi0, res);
block->attr.block.graph_arr[pos] = res;
+ /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
+ only an optimization. */
}
return res;
ir_node *next;
assert (get_irn_opcode(block) == iro_Block);
+ // assert (!get_Block_matured(block) && "Block already matured");
if (!get_Block_matured(block)) {
- /* turn the dynamic in-array into a static one. */
+ /* An array for building the Phi nodes. */
ins = ARR_LEN (block->in)-1;
NEW_ARR_A (ir_node *, nin, ins);
- /* @@@ something is strange here... why isn't the array copied? */
+ /* shouldn't we delete this array at the end of the procedure? @@@ memory leak? */
/* Traverse a chain of Phi nodes attached to this block and mature
these, too. **/
we can not free the node on the obstack. Therefore we have to call
optimize_in_place.
Unfortunately the optimization does not change a lot, as all allocated
- nodes refer to the unoptimized node. */
- block = optimize_in_place(block);
+ nodes refer to the unoptimized node.
+ We can call _2, as global cse has no effect on blocks. */
+ block = optimize_in_place_2(block);
irn_vrfy(block);
}
}
ir_node *
new_Quot (ir_node *memop, ir_node *op1, ir_node *op2)
{
- return new_r_Quot (current_ir_graph, current_ir_graph->current_block,
+ ir_node *res;
+ res = new_r_Quot (current_ir_graph, current_ir_graph->current_block,
memop, op1, op2);
+#if PRECISE_EXC_CONTEXT
+ if ((current_ir_graph->phase_state == phase_building) &&
+ (get_irn_op(res) == op_Quot)) /* Could be optimized away. */
+ res->attr.frag_arr = new_frag_arr(res);
+#endif
+
+ return res;
}
ir_node *
new_DivMod (ir_node *memop, ir_node *op1, ir_node *op2)
{
- return new_r_DivMod (current_ir_graph, current_ir_graph->current_block,
+ ir_node *res;
+ res = new_r_DivMod (current_ir_graph, current_ir_graph->current_block,
memop, op1, op2);
+#if PRECISE_EXC_CONTEXT
+ if ((current_ir_graph->phase_state == phase_building) &&
+ (get_irn_op(res) == op_DivMod)) /* Could be optimized away. */
+ res->attr.frag_arr = new_frag_arr(res);
+#endif
+
+ return res;
}
ir_node *
new_Div (ir_node *memop, ir_node *op1, ir_node *op2)
{
- return new_r_Div (current_ir_graph, current_ir_graph->current_block,
+ ir_node *res;
+ res = new_r_Div (current_ir_graph, current_ir_graph->current_block,
memop, op1, op2);
+#if PRECISE_EXC_CONTEXT
+ if ((current_ir_graph->phase_state == phase_building) &&
+ (get_irn_op(res) == op_Div)) /* Could be optimized away. */
+ res->attr.frag_arr = new_frag_arr(res);
+#endif
+
+ return res;
}
ir_node *
new_Mod (ir_node *memop, ir_node *op1, ir_node *op2)
{
- return new_r_Mod (current_ir_graph, current_ir_graph->current_block,
+ ir_node *res;
+ res = new_r_Mod (current_ir_graph, current_ir_graph->current_block,
memop, op1, op2);
+#if PRECISE_EXC_CONTEXT
+ if ((current_ir_graph->phase_state == phase_building) &&
+ (get_irn_op(res) == op_Mod)) /* Could be optimized away. */
+ res->attr.frag_arr = new_frag_arr(res);
+#endif
+
+ return res;
}
ir_node *
new_Call (ir_node *store, ir_node *callee, int arity, ir_node **in,
type *type)
{
- return new_r_Call (current_ir_graph, current_ir_graph->current_block,
+ ir_node *res;
+ res = new_r_Call (current_ir_graph, current_ir_graph->current_block,
store, callee, arity, in, type);
+#if PRECISE_EXC_CONTEXT
+ if ((current_ir_graph->phase_state == phase_building) &&
+ (get_irn_op(res) == op_Call)) /* Could be optimized away. */
+ res->attr.call.frag_arr = new_frag_arr(res);
+#endif
+
+ return res;
}
ir_node *
ir_node *
new_Load (ir_node *store, ir_node *addr)
{
- return new_r_Load (current_ir_graph, current_ir_graph->current_block,
+ ir_node *res;
+ res = new_r_Load (current_ir_graph, current_ir_graph->current_block,
store, addr);
+#if PRECISE_EXC_CONTEXT
+ if ((current_ir_graph->phase_state == phase_building) &&
+ (get_irn_op(res) == op_Load)) /* Could be optimized away. */
+ res->attr.frag_arr = new_frag_arr(res);
+#endif
+
+ return res;
}
ir_node *
new_Store (ir_node *store, ir_node *addr, ir_node *val)
{
- return new_r_Store (current_ir_graph, current_ir_graph->current_block,
+ ir_node *res;
+ res = new_r_Store (current_ir_graph, current_ir_graph->current_block,
store, addr, val);
+#if PRECISE_EXC_CONTEXT
+ if ((current_ir_graph->phase_state == phase_building) &&
+ (get_irn_op(res) == op_Store)) /* Could be optimized away. */
+ res->attr.frag_arr = new_frag_arr(res);
+#endif
+
+ return res;
}
ir_node *
new_Alloc (ir_node *store, ir_node *size, type *alloc_type,
where_alloc where)
{
- return new_r_Alloc (current_ir_graph, current_ir_graph->current_block,
+ ir_node *res;
+ res = new_r_Alloc (current_ir_graph, current_ir_graph->current_block,
store, size, alloc_type, where);
+#if PRECISE_EXC_CONTEXT
+ if ((current_ir_graph->phase_state == phase_building) &&
+ (get_irn_op(res) == op_Alloc)) /* Could be optimized away. */
+ res->attr.a.frag_arr = new_frag_arr(res);
+#endif
+
+ return res;
}
ir_node *
ir_node *new_immBlock (void) {
ir_node *res;
+ assert(get_irg_phase_state (current_ir_graph) == phase_building);
/* creates a new dynamic in-array as length of in is -1 */
res = new_ir_node (current_ir_graph, NULL, op_Block, mode_R, -1, NULL);
current_ir_graph->current_block = res;
ir_node *
get_value (int pos, ir_mode *mode)
{
+ assert(get_irg_phase_state (current_ir_graph) == phase_building);
inc_irg_visited(current_ir_graph);
return get_r_value_internal (current_ir_graph->current_block, pos + 1, mode);
}
inline void
set_value (int pos, ir_node *value)
{
+ assert(get_irg_phase_state (current_ir_graph) == phase_building);
current_ir_graph->current_block->attr.block.graph_arr[pos + 1] = value;
}
inline ir_node *
get_store (void)
{
+ assert(get_irg_phase_state (current_ir_graph) == phase_building);
/* GL: one could call get_value instead */
inc_irg_visited(current_ir_graph);
return get_r_value_internal (current_ir_graph->current_block, 0, mode_M);
inline void
set_store (ir_node *store)
{
+ assert(get_irg_phase_state (current_ir_graph) == phase_building);
/* GL: one could call set_value instead */
current_ir_graph->current_block->attr.block.graph_arr[0] = store;
}
+inline void
+keep_alive (ir_node *ka)
+{
+ add_End_keepalive(current_ir_graph->end, ka);
+}
+
+/** Useful access routines **/
+/* Returns the current block of the current graph. To set the current
+ block use switch_block(). */
+ir_node *get_cur_block() {
+ return get_irg_current_block(current_ir_graph);
+}
+
+/* Returns the frame type of the current graph */
+type *get_cur_frame_type() {
+ return get_irg_frame_type(current_ir_graph);
+}
+
+
/* ********************************************************************* */
/* initialize */
init_cons (void)
{
}
+
+/* call for each graph */
+void
+finalize_cons (ir_graph *irg) {
+ irg->phase_state = phase_high;
+}