** by Goetz Lindenmaier
*/
+/* $Id$ */
+
#ifdef HAVE_CONFIG_H
# include <config.h>
#endif
# include "common.h"
# include "irvrfy.h"
# include "irop.h"
-# include "iropt.h"
+# include "iropt_t.h"
# include "irgmod.h"
# include "array.h"
/* memset belongs to string.h */
res = optimize (res);
irn_vrfy (res);
+
+ /* Memory Phis in endless loops must be kept alive.
+ As we can't distinguish these easily we keep all of them alive. */
+ if ((res->op == op_Phi) && (mode == mode_M))
+ add_End_keepalive(irg->end, res);
return res;
}
ir_node *in[2] = {store, obj};
ir_node *res;
res = new_ir_node (irg, block, op_Raise, mode_T, 2, in);
-
res = optimize (res);
irn_vrfy (res);
return res;
new_End (void)
{
ir_node *res;
-
res = new_ir_node (current_ir_graph, current_ir_graph->current_block,
op_End, mode_X, -1, NULL);
-
res = optimize (res);
irn_vrfy (res);
known = res;
for (i=0; i < ins; ++i)
{
+ assert(in[i]);
+
if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
if (known==res)
} else {
res = optimize (res);
irn_vrfy (res);
+ /* Memory Phis in endless loops must be kept alive.
+ As we can't distinguish these easily we keep all of the alive. */
+ if ((res->op == op_Phi) && (mode == mode_M))
+ add_End_keepalive(irg->end, res);
}
return res;
static inline ir_node *
phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
-inline ir_node **
+ir_node **
new_frag_arr (ir_node *n) {
ir_node **arr;
+ int opt;
arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
sizeof(ir_node *)*current_ir_graph->n_loc);
+ /* turn off optimization before allocating Proj nodes, as res isn't
+ finished yet. */
+ opt = get_optimize(); set_optimize(0);
/* Here we rely on the fact that all frag ops have Memory as first result! */
if (get_irn_op(n) == op_Call)
arr[0] = new_Proj(n, mode_M, 3);
else
arr[0] = new_Proj(n, mode_M, 0);
+ set_optimize(opt);
current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
return arr;
}
ir_node **rem;
ir_node **frag_arr;
- DDMSG2(cfOp);
- assert(is_fragile_op(cfOp));
+ assert(is_fragile_op(cfOp) && (get_irn_op(cfOp) != op_Bad));
frag_arr = get_frag_arr(cfOp);
res = frag_arr[pos];
int ins = get_irn_arity(block);
ir_node **nin;
NEW_ARR_A (ir_node *, nin, ins);
- phi_merge(block, pos, mode, nin, ins);
+ res = phi_merge(block, pos, mode, nin, ins);
} else {
res = new_r_Phi0 (current_ir_graph, block, mode);
res->attr.phi0_pos = pos;
res->link = block->link;
block->link = res;
}
- set_frag_value(frag_arr, pos, res);
+ assert(res);
+ /* @@@ tested by Flo: set_frag_value(frag_arr, pos, res);
+ but this should be better: (remove comment if this works) */
+ /* It's a Phi, we can write this into all graph_arrs with NULL */
+ set_frag_value(block->attr.block.graph_arr, pos, res);
} else {
res = get_r_value_internal(block, pos, mode);
+ set_frag_value(block->attr.block.graph_arr, pos, res);
}
}
return res;
ir_node *prevBlock, *prevCfOp, *res, *phi0;
int i;
-
/* If this block has no value at pos create a Phi0 and remember it
in graph_arr to break recursions.
Else we may not set graph_arr as there a later value is remembered. */
/* We don't need to care about exception ops in the start block.
There are none by definition. */
return block->attr.block.graph_arr[pos];
- } else {
+ } else {
phi0 = new_r_Phi0(current_ir_graph, block, mode);
block->attr.block.graph_arr[pos] = phi0;
#if PRECISE_EXC_CONTEXT
- /* Set graph_arr for fragile ops. Also here we should break recursion. */
+ /* Set graph_arr for fragile ops. Also here we should break recursion.
+ We could choose a cyclic path through an cfop. But the recursion would
+ break at some point. */
set_frag_value(block->attr.block.graph_arr, pos, phi0);
#endif
}
assert (prevBlock);
if (!is_Bad(prevBlock)) {
#if PRECISE_EXC_CONTEXT
- if (is_fragile_op(prevCfOp))
+ if (is_fragile_op(prevCfOp) && (get_irn_op (prevCfOp) != op_Bad)) {
+ assert(get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode));
nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode);
- else
+ } else
#endif
nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
} else {
ir_node *next;
assert (get_irn_opcode(block) == iro_Block);
+ // assert (!get_Block_matured(block) && "Block already matured");
if (!get_Block_matured(block)) {
- /* turn the dynamic in-array into a static one. */
+ /* An array for building the Phi nodes. */
ins = ARR_LEN (block->in)-1;
NEW_ARR_A (ir_node *, nin, ins);
- /* @@@ something is strange here... why isn't the array copied? */
+ /* shouldn't we delete this array at the end of the procedure? @@@ memory leak? */
/* Traverse a chain of Phi nodes attached to this block and mature
these, too. **/
we can not free the node on the obstack. Therefore we have to call
optimize_in_place.
Unfortunately the optimization does not change a lot, as all allocated
- nodes refer to the unoptimized node. */
- block = optimize_in_place(block);
+ nodes refer to the unoptimized node.
+ We can call _2, as global cse has no effect on blocks. */
+ block = optimize_in_place_2(block);
irn_vrfy(block);
}
}
res = new_r_Quot (current_ir_graph, current_ir_graph->current_block,
memop, op1, op2);
#if PRECISE_EXC_CONTEXT
- res->attr.frag_arr = new_frag_arr(res);
+ if ((current_ir_graph->phase_state == phase_building) &&
+ (get_irn_op(res) == op_Quot)) /* Could be optimized away. */
+ res->attr.frag_arr = new_frag_arr(res);
#endif
return res;
res = new_r_DivMod (current_ir_graph, current_ir_graph->current_block,
memop, op1, op2);
#if PRECISE_EXC_CONTEXT
- res->attr.frag_arr = new_frag_arr(res);
+ if ((current_ir_graph->phase_state == phase_building) &&
+ (get_irn_op(res) == op_DivMod)) /* Could be optimized away. */
+ res->attr.frag_arr = new_frag_arr(res);
#endif
return res;
res = new_r_Div (current_ir_graph, current_ir_graph->current_block,
memop, op1, op2);
#if PRECISE_EXC_CONTEXT
- res->attr.frag_arr = new_frag_arr(res);
+ if ((current_ir_graph->phase_state == phase_building) &&
+ (get_irn_op(res) == op_Div)) /* Could be optimized away. */
+ res->attr.frag_arr = new_frag_arr(res);
#endif
return res;
res = new_r_Mod (current_ir_graph, current_ir_graph->current_block,
memop, op1, op2);
#if PRECISE_EXC_CONTEXT
- res->attr.frag_arr = new_frag_arr(res);
+ if ((current_ir_graph->phase_state == phase_building) &&
+ (get_irn_op(res) == op_Mod)) /* Could be optimized away. */
+ res->attr.frag_arr = new_frag_arr(res);
#endif
return res;
res = new_r_Call (current_ir_graph, current_ir_graph->current_block,
store, callee, arity, in, type);
#if PRECISE_EXC_CONTEXT
- res->attr.call.frag_arr = new_frag_arr(res);
+ if ((current_ir_graph->phase_state == phase_building) &&
+ (get_irn_op(res) == op_Call)) /* Could be optimized away. */
+ res->attr.call.frag_arr = new_frag_arr(res);
#endif
return res;
res = new_r_Load (current_ir_graph, current_ir_graph->current_block,
store, addr);
#if PRECISE_EXC_CONTEXT
- res->attr.frag_arr = new_frag_arr(res);
+ if ((current_ir_graph->phase_state == phase_building) &&
+ (get_irn_op(res) == op_Load)) /* Could be optimized away. */
+ res->attr.frag_arr = new_frag_arr(res);
#endif
return res;
res = new_r_Store (current_ir_graph, current_ir_graph->current_block,
store, addr, val);
#if PRECISE_EXC_CONTEXT
- res->attr.frag_arr = new_frag_arr(res);
+ if ((current_ir_graph->phase_state == phase_building) &&
+ (get_irn_op(res) == op_Store)) /* Could be optimized away. */
+ res->attr.frag_arr = new_frag_arr(res);
#endif
return res;
res = new_r_Alloc (current_ir_graph, current_ir_graph->current_block,
store, size, alloc_type, where);
#if PRECISE_EXC_CONTEXT
- res->attr.a.frag_arr = new_frag_arr(res);
+ if ((current_ir_graph->phase_state == phase_building) &&
+ (get_irn_op(res) == op_Alloc)) /* Could be optimized away. */
+ res->attr.a.frag_arr = new_frag_arr(res);
#endif
return res;
ir_node *new_immBlock (void) {
ir_node *res;
+ assert(get_irg_phase_state (current_ir_graph) == phase_building);
/* creates a new dynamic in-array as length of in is -1 */
res = new_ir_node (current_ir_graph, NULL, op_Block, mode_R, -1, NULL);
current_ir_graph->current_block = res;
ir_node *
get_value (int pos, ir_mode *mode)
{
+ assert(get_irg_phase_state (current_ir_graph) == phase_building);
inc_irg_visited(current_ir_graph);
return get_r_value_internal (current_ir_graph->current_block, pos + 1, mode);
}
inline void
set_value (int pos, ir_node *value)
{
+ assert(get_irg_phase_state (current_ir_graph) == phase_building);
current_ir_graph->current_block->attr.block.graph_arr[pos + 1] = value;
}
inline ir_node *
get_store (void)
{
+ assert(get_irg_phase_state (current_ir_graph) == phase_building);
/* GL: one could call get_value instead */
inc_irg_visited(current_ir_graph);
return get_r_value_internal (current_ir_graph->current_block, 0, mode_M);
inline void
set_store (ir_node *store)
{
+ assert(get_irg_phase_state (current_ir_graph) == phase_building);
/* GL: one could call set_value instead */
current_ir_graph->current_block->attr.block.graph_arr[0] = store;
}
+inline void
+keep_alive (ir_node *ka)
+{
+ add_End_keepalive(current_ir_graph->end, ka);
+}
+
+/** Useful access routines **/
+/* Returns the current block of the current graph. To set the current
+ block use switch_block(). */
+ir_node *get_cur_block() {
+ return get_irg_current_block(current_ir_graph);
+}
+
+/* Returns the frame type of the current graph */
+type *get_cur_frame_type() {
+ return get_irg_frame_type(current_ir_graph);
+}
+
+
/* ********************************************************************* */
/* initialize */
init_cons (void)
{
}
+
+/* call for each graph */
+void
+finalize_cons (ir_graph *irg) {
+ irg->phase_state = phase_high;
+}