ir_node *in[2] = {store, obj};
ir_node *res;
res = new_ir_node (irg, block, op_Raise, mode_T, 2, in);
-
res = optimize (res);
irn_vrfy (res);
return res;
known = res;
for (i=0; i < ins; ++i)
{
+ assert(in[i]);
+
if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
if (known==res)
static inline ir_node *
phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
-inline ir_node **
+ir_node **
new_frag_arr (ir_node *n) {
ir_node **arr;
+ int opt;
arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
sizeof(ir_node *)*current_ir_graph->n_loc);
+ /* turn off optimization before allocating Proj nodes, as res isn't
+ finished yet. */
+ opt = get_optimize(); set_optimize(0);
/* Here we rely on the fact that all frag ops have Memory as first result! */
if (get_irn_op(n) == op_Call)
arr[0] = new_Proj(n, mode_M, 3);
else
arr[0] = new_Proj(n, mode_M, 0);
+ set_optimize(opt);
current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
return arr;
}
ir_node **rem;
ir_node **frag_arr;
- assert(is_fragile_op(cfOp));
+ assert(is_fragile_op(cfOp) && (get_irn_op(cfOp) != op_Bad));
frag_arr = get_frag_arr(cfOp);
res = frag_arr[pos];
int ins = get_irn_arity(block);
ir_node **nin;
NEW_ARR_A (ir_node *, nin, ins);
- phi_merge(block, pos, mode, nin, ins);
+ res = phi_merge(block, pos, mode, nin, ins);
} else {
res = new_r_Phi0 (current_ir_graph, block, mode);
res->attr.phi0_pos = pos;
res->link = block->link;
block->link = res;
}
- set_frag_value(frag_arr, pos, res);
+ assert(res);
+ /* @@@ tested by Flo: set_frag_value(frag_arr, pos, res);
+ but this should be better: (remove comment if this works) */
+ /* It's a Phi, we can write this into all graph_arrs with NULL */
+ set_frag_value(block->attr.block.graph_arr, pos, res);
} else {
res = get_r_value_internal(block, pos, mode);
+ set_frag_value(block->attr.block.graph_arr, pos, res);
}
}
return res;
ir_node *prevBlock, *prevCfOp, *res, *phi0;
int i;
-
/* If this block has no value at pos create a Phi0 and remember it
in graph_arr to break recursions.
Else we may not set graph_arr as there a later value is remembered. */
/* We don't need to care about exception ops in the start block.
There are none by definition. */
return block->attr.block.graph_arr[pos];
- } else {
+ } else {
phi0 = new_r_Phi0(current_ir_graph, block, mode);
block->attr.block.graph_arr[pos] = phi0;
#if PRECISE_EXC_CONTEXT
- /* Set graph_arr for fragile ops. Also here we should break recursion. */
+ /* Set graph_arr for fragile ops. Also here we should break recursion.
+ We could choose a cyclic path through an cfop. But the recursion would
+ break at some point. */
set_frag_value(block->attr.block.graph_arr, pos, phi0);
#endif
}
assert (prevBlock);
if (!is_Bad(prevBlock)) {
#if PRECISE_EXC_CONTEXT
- if (is_fragile_op(prevCfOp))
+ if (is_fragile_op(prevCfOp) && (get_irn_op (prevCfOp) != op_Bad)) {
+ assert(get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode));
nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode);
- else
+ } else
#endif
nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
} else {
ir_node *next;
assert (get_irn_opcode(block) == iro_Block);
+ // assert (!get_Block_matured(block) && "Block already matured");
if (!get_Block_matured(block)) {
- /* turn the dynamic in-array into a static one. */
+ /* An array for building the Phi nodes. */
ins = ARR_LEN (block->in)-1;
NEW_ARR_A (ir_node *, nin, ins);
- /* @@@ something is strange here... why isn't the array copied? */
+ /* shouldn't we delete this array at the end of the procedure? @@@ memory leak? */
/* Traverse a chain of Phi nodes attached to this block and mature
these, too. **/
res = new_r_Quot (current_ir_graph, current_ir_graph->current_block,
memop, op1, op2);
#if PRECISE_EXC_CONTEXT
- res->attr.frag_arr = new_frag_arr(res);
+ if ((current_ir_graph->phase_state == phase_building) &&
+ (get_irn_op(res) == op_Quot)) /* Could be optimized away. */
+ res->attr.frag_arr = new_frag_arr(res);
#endif
return res;
res = new_r_DivMod (current_ir_graph, current_ir_graph->current_block,
memop, op1, op2);
#if PRECISE_EXC_CONTEXT
- res->attr.frag_arr = new_frag_arr(res);
+ if ((current_ir_graph->phase_state == phase_building) &&
+ (get_irn_op(res) == op_DivMod)) /* Could be optimized away. */
+ res->attr.frag_arr = new_frag_arr(res);
#endif
return res;
res = new_r_Div (current_ir_graph, current_ir_graph->current_block,
memop, op1, op2);
#if PRECISE_EXC_CONTEXT
- res->attr.frag_arr = new_frag_arr(res);
+ if ((current_ir_graph->phase_state == phase_building) &&
+ (get_irn_op(res) == op_Div)) /* Could be optimized away. */
+ res->attr.frag_arr = new_frag_arr(res);
#endif
return res;
res = new_r_Mod (current_ir_graph, current_ir_graph->current_block,
memop, op1, op2);
#if PRECISE_EXC_CONTEXT
- res->attr.frag_arr = new_frag_arr(res);
+ if ((current_ir_graph->phase_state == phase_building) &&
+ (get_irn_op(res) == op_Mod)) /* Could be optimized away. */
+ res->attr.frag_arr = new_frag_arr(res);
#endif
return res;
res = new_r_Call (current_ir_graph, current_ir_graph->current_block,
store, callee, arity, in, type);
#if PRECISE_EXC_CONTEXT
- res->attr.call.frag_arr = new_frag_arr(res);
+ if ((current_ir_graph->phase_state == phase_building) &&
+ (get_irn_op(res) == op_Call)) /* Could be optimized away. */
+ res->attr.call.frag_arr = new_frag_arr(res);
#endif
return res;
res = new_r_Load (current_ir_graph, current_ir_graph->current_block,
store, addr);
#if PRECISE_EXC_CONTEXT
- res->attr.frag_arr = new_frag_arr(res);
+ if ((current_ir_graph->phase_state == phase_building) &&
+ (get_irn_op(res) == op_Load)) /* Could be optimized away. */
+ res->attr.frag_arr = new_frag_arr(res);
#endif
return res;
res = new_r_Store (current_ir_graph, current_ir_graph->current_block,
store, addr, val);
#if PRECISE_EXC_CONTEXT
- res->attr.frag_arr = new_frag_arr(res);
+ if ((current_ir_graph->phase_state == phase_building) &&
+ (get_irn_op(res) == op_Store)) /* Could be optimized away. */
+ res->attr.frag_arr = new_frag_arr(res);
#endif
return res;
res = new_r_Alloc (current_ir_graph, current_ir_graph->current_block,
store, size, alloc_type, where);
#if PRECISE_EXC_CONTEXT
- res->attr.a.frag_arr = new_frag_arr(res);
+ if ((current_ir_graph->phase_state == phase_building) &&
+ (get_irn_op(res) == op_Alloc)) /* Could be optimized away. */
+ res->attr.a.frag_arr = new_frag_arr(res);
#endif
return res;