* Some projection numbers must be always equal to support automatic phi construction
*/
enum pn_generic {
- pn_Generic_M_regular = 0, /**< The memory result. */
+ pn_Generic_M = 0, /**< The memory result. */
pn_Generic_X_regular = 1, /**< Execution result if no exception occurred. */
pn_Generic_X_except = 2, /**< The control flow result branching to the exception handler */
pn_Generic_other = 3 /**< First free projection number */
* Projection numbers for result of Call node: use for Proj nodes!
*/
typedef enum {
- pn_Call_M_regular = pn_Generic_M_regular, /**< The memory result. */
+ pn_Call_M = pn_Generic_M, /**< The memory result. */
pn_Call_X_regular = pn_Generic_X_regular, /**< The control flow result when no exception occurs. */
pn_Call_X_except = pn_Generic_X_except, /**< The control flow result branching to the exception handler. */
pn_Call_T_result = pn_Generic_other, /**< The tuple containing all (0, 1, 2, ...) results. */
- pn_Call_M_except, /**< The memory result in case the called method terminated with
- an exception. */
pn_Call_P_value_res_base, /**< A pointer to the memory region containing copied results
passed by value (for compound result types). */
pn_Call_max /**< number of projections from a Call */
} pn_Call; /* Projection numbers for Call. */
-#define pn_Call_M pn_Call_M_regular
/** Retrieve the memory input of a Call. */
ir_node *get_Call_mem(const ir_node *node);
* Projection numbers for result of Builtin node: use for Proj nodes!
*/
typedef enum {
- pn_Builtin_M = pn_Generic_M_regular, /**< The memory result. */
- pn_Builtin_1_result = pn_Generic_other, /**< first result. */
- pn_Builtin_max /**< number of projections from a Builtin */
+ pn_Builtin_M = pn_Generic_M, /**< The memory result. */
+ pn_Builtin_1_result = pn_Generic_other, /**< first result. */
+ pn_Builtin_max /**< number of projections from a Builtin */
} pn_Builtin; /* Projection numbers for Builtin. */
ir_node *get_Builtin_mem(const ir_node *node);
* Projection numbers for Quot: use for Proj nodes!
*/
typedef enum {
- pn_Quot_M = pn_Generic_M_regular, /**< Memory result. */
+ pn_Quot_M = pn_Generic_M, /**< Memory result. */
pn_Quot_X_regular = pn_Generic_X_regular, /**< Execution result if no exception occurred. */
pn_Quot_X_except = pn_Generic_X_except, /**< Execution result if exception occurred. */
pn_Quot_res = pn_Generic_other, /**< Result of computation. */
* Projection numbers for DivMod: use for Proj nodes!
*/
typedef enum {
- pn_DivMod_M = pn_Generic_M_regular, /**< Memory result. */
+ pn_DivMod_M = pn_Generic_M, /**< Memory result. */
pn_DivMod_X_regular = pn_Generic_X_regular, /**< Execution result if no exception occurred. */
pn_DivMod_X_except = pn_Generic_X_except, /**< Execution result if exception occurred. */
pn_DivMod_res_div = pn_Generic_other, /**< Result of computation a / b. */
* Projection numbers for Div: use for Proj nodes!
*/
typedef enum {
- pn_Div_M = pn_Generic_M_regular, /**< Memory result. */
+ pn_Div_M = pn_Generic_M, /**< Memory result. */
pn_Div_X_regular = pn_Generic_X_regular, /**< Execution result if no exception occurred. */
pn_Div_X_except = pn_Generic_X_except, /**< Execution result if exception occurred. */
pn_Div_res = pn_Generic_other, /**< Result of computation. */
* Projection numbers for Mod: use for Proj nodes!
*/
typedef enum {
- pn_Mod_M = pn_Generic_M_regular, /**< Memory result. */
+ pn_Mod_M = pn_Generic_M, /**< Memory result. */
pn_Mod_X_regular = pn_Generic_X_regular, /**< Execution result if no exception occurred. */
pn_Mod_X_except = pn_Generic_X_except, /**< Execution result if exception occurred. */
pn_Mod_res = pn_Generic_other, /**< Result of computation. */
* Projection numbers for Load: use for Proj nodes!
*/
typedef enum {
- pn_Load_M = pn_Generic_M_regular, /**< Memory result. */
+ pn_Load_M = pn_Generic_M, /**< Memory result. */
pn_Load_X_regular = pn_Generic_X_regular, /**< Execution result if no exception occurred. */
pn_Load_X_except = pn_Generic_X_except, /**< Execution result if exception occurred. */
pn_Load_res = pn_Generic_other, /**< Result of load operation. */
* Projection numbers for Store: use for Proj nodes!
*/
typedef enum {
- pn_Store_M = pn_Generic_M_regular, /**< Memory result. */
+ pn_Store_M = pn_Generic_M, /**< Memory result. */
pn_Store_X_regular = pn_Generic_X_regular, /**< Execution result if no exception occurred. */
pn_Store_X_except = pn_Generic_X_except, /**< Execution result if exception occurred. */
pn_Store_max = pn_Generic_other /**< number of projections from a Store */
* Projection numbers for Alloc: use for Proj nodes!
*/
typedef enum {
- pn_Alloc_M = pn_Generic_M_regular, /**< Memory result. */
+ pn_Alloc_M = pn_Generic_M, /**< Memory result. */
pn_Alloc_X_regular = pn_Generic_X_regular, /**< Execution result if no exception occurred. */
pn_Alloc_X_except = pn_Generic_X_except, /**< Execution result if exception occurred. */
pn_Alloc_res = pn_Generic_other, /**< Result of allocation. */
* Projection numbers for result of CopyB node: use for Proj nodes!
*/
typedef enum {
- pn_CopyB_M_regular = pn_Generic_M_regular, /**< The memory result. */
+ pn_CopyB_M_regular = pn_Generic_M, /**< The memory result. */
pn_CopyB_X_regular = pn_Generic_X_regular, /**< Execution result if no exception occurred. */
pn_CopyB_X_except = pn_Generic_X_except, /**< The control flow result branching to the exception handler */
- pn_CopyB_M_except = pn_Generic_other, /**< The memory result in case the runtime function terminated with
- an exception */
- pn_CopyB_max /**< number of projections from a CopyB */
+ pn_CopyB_max = pn_Generic_other /**< number of projections from a CopyB */
} pn_CopyB; /* Projection numbers for CopyB. */
#define pn_CopyB_M pn_CopyB_M_regular
* Projection numbers for result of InstOf node: use for Proj nodes!
*/
typedef enum {
- pn_InstOf_M_regular = pn_Generic_M_regular, /**< The memory result. */
+ pn_InstOf_M_regular = pn_Generic_M, /**< The memory result. */
pn_InstOf_X_regular = pn_Generic_X_regular, /**< Execution result if no exception occurred. */
pn_InstOf_X_except = pn_Generic_X_except, /**< The control flow result branching to the exception handler */
pn_InstOf_res = pn_Generic_other, /**< The checked object pointer. */
- pn_InstOf_M_except, /**< The memory result in case the runtime function terminated with
- an exception */
pn_InstOf_max /**< number of projections from an InstOf */
} pn_InstOf;
#define pn_InstOf_M pn_InstOf_M_regular
* Projection numbers for Raise.
*/
typedef enum {
- pn_Raise_M = pn_Generic_M_regular, /**< The Memory result. */
+ pn_Raise_M = pn_Generic_M, /**< The Memory result. */
pn_Raise_X = pn_Generic_X_regular, /**< The control flow to the exception handler. */
pn_Raise_max /**< number of projections from a Raise */
} pn_Raise; /* Projection numbers for Raise. */
* Projection numbers for result of Bound node: use for Proj nodes!
*/
typedef enum {
- pn_Bound_M = pn_Generic_M_regular, /**< The memory result. */
+ pn_Bound_M = pn_Generic_M, /**< The memory result. */
pn_Bound_X_regular = pn_Generic_X_regular, /**< Execution result if no exception occurred. */
pn_Bound_X_except = pn_Generic_X_except, /**< The control flow result branching to the exception handler */
pn_Bound_res = pn_Generic_other, /**< The checked index. */
* Projection numbers for result of be_Call node: use for Proj nodes!
*/
typedef enum {
- pn_be_Call_M_regular = pn_Call_M_regular, /**< The memory result of a be_Call. */
+ pn_be_Call_M_regular = pn_Call_M, /**< The memory result of a be_Call. */
pn_be_Call_sp = pn_Call_max,
- pn_be_Call_first_res /**< The first result proj number of a be_Call. */
+ pn_be_Call_first_res /**< The first result proj number of a be_Call. */
} pn_be_Call;
/**
/* should not happen here */
edges_reroute(proj, bad, irg);
break;
- case pn_Call_M_except:
+ case pn_Call_M:
/* should not happen here */
edges_reroute(proj, nomem, irg);
break;
}
turn_into_tuple(call, pn_Call_max);
- set_Tuple_pred(call, pn_Call_M_regular, nomem);
/*
* Beware:
* We do not check here if this call really has exception and regular Proj's.
jmp = new_r_Jmp(block);
set_opt_cse(old_cse);
+ set_Tuple_pred(call, pn_Call_M, nomem);
set_Tuple_pred(call, pn_Call_X_regular, jmp);
set_Tuple_pred(call, pn_Call_X_except, bad);
set_Tuple_pred(call, pn_Call_T_result, res);
- set_Tuple_pred(call, pn_Call_M_except, nomem);
set_Tuple_pred(call, pn_Call_P_value_res_base, bad);
}
}
callee = new_rd_SymConst_addr_ent(env->dbg, env->irg, mode_P_code, method_ent, method_type);
call = new_rd_Call(env->dbg, env->block, memory, callee, 1, in, method_type);
call_results = new_rd_Proj(env->dbg, env->block, call, mode_T, pn_Call_T_result);
- memory = new_rd_Proj(env->dbg, env->block, call, mode_M, pn_Call_M_regular);
+ memory = new_rd_Proj(env->dbg, env->block, call, mode_M, pn_Call_M);
return new_rd_Proj(env->dbg, env->block, call_results, to_mode, 0);
}
adr = new_r_SymConst(irg, mode_P_code, sym, symconst_addr_ent);
call = new_r_Call(first_block, get_irg_no_mem(irg), adr, 0, NULL, get_entity_type(ent));
- new_mem = new_r_Proj(first_block, call, mode_M, pn_Call_M_regular);
+ new_mem = new_r_Proj(first_block, call, mode_M, pn_Call_M);
initial_mem = get_irg_initial_mem(irg);
edges_reroute(initial_mem, new_mem, irg);
opt = get_opt_optimize(); set_optimize(0);
/* Here we rely on the fact that all frag ops have Memory as first result! */
if (is_Call(n)) {
- arr[0] = new_Proj(n, mode_M, pn_Call_M_except);
+ arr[0] = new_Proj(n, mode_M, pn_Call_M);
} else if (is_CopyB(n)) {
- arr[0] = new_Proj(n, mode_M, pn_CopyB_M_except);
+ arr[0] = new_Proj(n, mode_M, pn_CopyB_M);
} else {
assert((pn_Quot_M == pn_DivMod_M) &&
(pn_Quot_M == pn_Div_M) &&
/** the lookup table for Proj(Call) names */
static const pns_lookup_t call_lut[] = {
#define X(a) { pn_Call_##a, #a }
- X(M_regular),
+ X(M),
X(X_regular),
X(X_except),
X(T_result),
- X(M_except),
X(P_value_res_base)
#undef X
};
X(M),
X(X_regular),
X(X_except),
- X(M_except)
#undef X
};
X(X_regular),
X(X_except),
X(res),
- X(M_except),
#undef X
};
case iro_Alloc :
case iro_Bound :
case iro_CopyB :
- return get_irn_n(node, pn_Generic_M_regular);
+ return get_irn_n(node, pn_Generic_M);
case iro_Bad :
case iro_Unknown:
return node;
DBG_OPT_ALGSIM0(oldn, proj, FS_OPT_NOP);
break;
- case pn_CopyB_M_except:
case pn_CopyB_X_except:
DBG_OPT_EXC_REM(proj);
proj = get_irg_bad(current_ir_graph);
DBG_OPT_EXC_REM(proj);
proj = new_r_Jmp(get_nodes_block(copyb));
break;
- case pn_CopyB_M_except:
case pn_CopyB_X_except:
DBG_OPT_EXC_REM(proj);
proj = get_irg_bad(get_irn_irg(proj));
ins[3] = new_r_Const_long(irg, mode_Iu, n_blocks);
call = new_r_Call(bb, get_irg_initial_mem(irg), symconst, 4, ins, init_type);
- ret = new_r_Return(bb, new_r_Proj(bb, call, mode_M, pn_Call_M_regular), 0, NULL);
+ ret = new_r_Return(bb, new_r_Proj(bb, call, mode_M, pn_Call_M), 0, NULL);
mature_immBlock(bb);
add_immBlock_pred(get_irg_end_block(irg), ret);
ASSERT_AND_RET_DBG(
(
- (proj == pn_InstOf_M_regular && mode == mode_M) ||
+ (proj == pn_InstOf_M && mode == mode_M) ||
(proj == pn_InstOf_X_regular && mode == mode_X) ||
(proj == pn_InstOf_X_except && mode == mode_X) ||
- (proj == pn_InstOf_res && mode_is_reference(mode)) ||
- (proj == pn_InstOf_M_except && mode == mode_M)
+ (proj == pn_InstOf_res && mode_is_reference(mode))
),
"wrong Proj from InstOf", 0,
show_proj_failure(p);
ASSERT_AND_RET_DBG(
(
- (proj == pn_Call_M_regular && mode == mode_M) ||
+ (proj == pn_Call_M && mode == mode_M) ||
(proj == pn_Call_X_regular && mode == mode_X) ||
(proj == pn_Call_X_except && mode == mode_X) ||
(proj == pn_Call_T_result && mode == mode_T) ||
- (proj == pn_Call_M_except && mode == mode_M) ||
(proj == pn_Call_P_value_res_base && mode_is_reference(mode))
),
"wrong Proj from Call", 0,
ASSERT_AND_RET(
!is_NoMem(get_Call_mem(n)),
"Exception Proj from FunctionCall", 0);
- else if (proj == pn_Call_M_regular || proj == pn_Call_M_except)
+ else if (proj == pn_Call_M)
ASSERT_AND_RET(
(!is_NoMem(get_Call_mem(n)) || 1),
"Memory Proj from FunctionCall", 0);
ASSERT_AND_RET_DBG(
(
- (proj == pn_CopyB_M_regular && mode == mode_M) ||
+ (proj == pn_CopyB_M && mode == mode_M) ||
(proj == pn_CopyB_X_regular && mode == mode_X) ||
- (proj == pn_CopyB_X_except && mode == mode_X) ||
- (proj == pn_CopyB_M_except && mode == mode_M)
+ (proj == pn_CopyB_X_except && mode == mode_X)
),
"wrong Proj from CopyB", 0,
show_proj_failure(p);
/* get rid of the CopyB */
turn_into_tuple(p, pn_CopyB_max);
- set_Tuple_pred(p, pn_CopyB_M_regular, mem);
- set_Tuple_pred(p, pn_CopyB_M_except, get_irg_bad(irg));
+ set_Tuple_pred(p, pn_CopyB_M, mem);
set_Tuple_pred(p, pn_CopyB_X_regular, new_r_Jmp(blk));
set_Tuple_pred(p, pn_CopyB_X_except, get_irg_bad(irg));
++n_args;
pred,
tp
);
- mem = new_r_Proj(bl, copy, mode_M, pn_CopyB_M_regular);
+ mem = new_r_Proj(bl, copy, mode_M, pn_CopyB_M);
}
}
if (lp->flags & LF_RETURN_HIDDEN) {
}
turn_into_tuple(irn, pn_CopyB_max);
- set_Tuple_pred(irn, pn_CopyB_M_regular, mem);
+ set_Tuple_pred(irn, pn_CopyB_M, mem);
set_Tuple_pred(irn, pn_CopyB_X_regular, get_irg_bad(irg));
set_Tuple_pred(irn, pn_CopyB_X_except, get_irg_bad(irg));
- set_Tuple_pred(irn, pn_CopyB_M_except, get_irg_bad(irg));
}
/**
case pn_Div_M: /* Memory result. */
/* reroute to the call */
set_Proj_pred(proj, call);
- set_Proj_proj(proj, pn_Call_M_except);
+ set_Proj_proj(proj, pn_Call_M);
break;
case pn_Div_X_except: /* Execution result if exception occurred. */
/* reroute to the call */
case pn_Mod_M: /* Memory result. */
/* reroute to the call */
set_Proj_pred(proj, call);
- set_Proj_proj(proj, pn_Call_M_except);
+ set_Proj_proj(proj, pn_Call_M);
break;
case pn_Mod_X_except: /* Execution result if exception occurred. */
/* reroute to the call */
case pn_DivMod_M: /* Memory result. */
/* reroute to the first call */
set_Proj_pred(proj, callDiv ? callDiv : (callMod ? callMod : mem));
- set_Proj_proj(proj, pn_Call_M_except);
+ set_Proj_proj(proj, pn_Call_M);
break;
case pn_DivMod_X_except: /* Execution result if exception occurred. */
/* reroute to the first call */
irn = new_r_Tuple(block, 1, &irn);
turn_into_tuple(call, pn_Call_max);
- set_Tuple_pred(call, pn_Call_M_regular, mem);
+ set_Tuple_pred(call, pn_Call_M, mem);
set_Tuple_pred(call, pn_Call_X_regular, reg_jmp);
set_Tuple_pred(call, pn_Call_X_except, exc_jmp);
set_Tuple_pred(call, pn_Call_T_result, irn);
- set_Tuple_pred(call, pn_Call_M_except, mem);
set_Tuple_pred(call, pn_Call_P_value_res_base, new_Bad());
} /* replace_call */
for (i = 0; i < n_proj; ++i)
set_Tuple_pred(node, i, new_r_Bad(irg));
if (rt->mem_proj_nr >= 0)
- set_Tuple_pred(node, rt->mem_proj_nr, new_r_Proj(bl, call, mode_M, pn_Call_M_regular));
+ set_Tuple_pred(node, rt->mem_proj_nr, new_r_Proj(bl, call, mode_M, pn_Call_M));
if (!is_NoMem(mem)) {
/* Exceptions can only be handled with real memory */
if (rt->regular_proj_nr >= 0)
if (rt->exc_proj_nr >= 0)
set_Tuple_pred(node, rt->exc_proj_nr, new_r_Proj(bl, call, mode_X, pn_Call_X_except));
if (rt->exc_mem_proj_nr >= 0)
- set_Tuple_pred(node, rt->mem_proj_nr, new_r_Proj(bl, call, mode_M, pn_Call_M_except));
+ set_Tuple_pred(node, rt->mem_proj_nr, new_r_Proj(bl, call, mode_M, pn_Call_M));
}
if (rt->res_proj_nr >= 0)
mem = get_Call_mem(call);
blk = get_nodes_block(call);
turn_into_tuple(call, pn_Call_max);
- set_Tuple_pred(call, pn_Call_M_regular, mem);
- set_Tuple_pred(call, pn_Call_X_regular, new_r_Jmp(blk));
- set_Tuple_pred(call, pn_Call_X_except, new_r_Bad(irg));
- set_Tuple_pred(call, pn_Call_T_result, new_r_Bad(irg));
- set_Tuple_pred(call, pn_Call_M_except, mem);
+ set_Tuple_pred(call, pn_Call_M, mem);
+ set_Tuple_pred(call, pn_Call_X_regular, new_r_Jmp(blk));
+ set_Tuple_pred(call, pn_Call_X_except, new_r_Bad(irg));
+ set_Tuple_pred(call, pn_Call_T_result, new_r_Bad(irg));
set_Tuple_pred(call, pn_Call_P_value_res_base, new_r_Bad(irg));
++env->nr_deads;
/* collect the Proj's in the Proj list */
switch (get_Proj_proj(node)) {
- case pn_Call_M_regular:
+ case pn_Call_M:
case pn_Call_X_except:
case pn_Call_X_regular:
- case pn_Call_M_except:
set_irn_link(node, ctx->proj_list);
ctx->proj_list = node;
break;
assert(get_irn_mode(mem) == mode_M);
switch (get_Proj_proj(proj)) {
- case pn_Call_M_regular: {
+ case pn_Call_M: {
/* in dead code there might be cycles where proj == mem */
if (proj != mem)
exchange(proj, mem);
break;
}
case pn_Call_X_except:
- case pn_Call_M_except:
exc_changed = 1;
exchange(proj, get_irg_bad(irg));
break;
/* collect the Proj's in the Proj list */
switch (get_Proj_proj(node)) {
- case pn_Call_M_regular:
+ case pn_Call_M:
case pn_Call_X_except:
case pn_Call_X_regular:
- case pn_Call_M_except:
set_irn_link(node, ctx->proj_list);
ctx->proj_list = node;
break;
/* kill any exception flow */
switch (get_Proj_proj(proj)) {
case pn_Call_X_except:
- case pn_Call_M_except:
exc_changed = 1;
exchange(proj, get_irg_bad(irg));
break;
}
enum exc_mode {
- exc_handler = 0, /**< There is a handler. */
- exc_to_end = 1, /**< Branches to End. */
- exc_no_handler = 2 /**< Exception handling not represented. */
+ exc_handler, /**< There is a handler. */
+ exc_no_handler /**< Exception handling not represented. */
};
/* Inlines a method at the given call site. */
ir_node **args_in;
ir_node *ret, *phi;
int arity, n_ret, n_exc, n_res, i, n, j, rem_opt, irn_arity, n_params;
+ int n_mem_phi;
enum exc_mode exc_handling;
ir_type *called_frame, *curr_frame, *mtp, *ctp;
ir_entity *ent;
for the Call node, or do we branch directly to End on an exception?
exc_handling:
0 There is a handler.
- 1 Branches to End.
2 Exception handling not represented in Firm. -- */
{
- ir_node *proj, *Mproj = NULL, *Xproj = NULL;
+ ir_node *Xproj = NULL;
+ ir_node *proj;
for (proj = get_irn_link(call); proj; proj = get_irn_link(proj)) {
long proj_nr = get_Proj_proj(proj);
if (proj_nr == pn_Call_X_except) Xproj = proj;
- if (proj_nr == pn_Call_M_except) Mproj = proj;
}
- if (Mproj) { assert(Xproj); exc_handling = exc_handler; } /* Mproj */
- else if (Xproj) { exc_handling = exc_to_end; } /* !Mproj && Xproj */
- else { exc_handling = exc_no_handler; } /* !Mproj && !Xproj */
+ exc_handling = Xproj != NULL ? exc_handler : exc_no_handler;
}
/* create the argument tuple */
Add Phi node if there was more than one Return. -- */
turn_into_tuple(post_call, pn_Call_max);
/* First the Memory-Phi */
- n_ret = 0;
+ n_mem_phi = 0;
for (i = 0; i < arity; i++) {
ret = get_Block_cfgpred(end_bl, i);
if (is_Return(ret)) {
- cf_pred[n_ret] = get_Return_mem(ret);
- n_ret++;
+ cf_pred[n_mem_phi++] = get_Return_mem(ret);
+ }
+ /* memory output for some exceptions is directly connected to End */
+ if (is_Call(ret)) {
+ cf_pred[n_mem_phi++] = new_r_Proj(get_nodes_block(ret), ret, mode_M, 3);
+ } else if (is_fragile_op(ret)) {
+ /* We rely that all cfops have the memory output at the same position. */
+ cf_pred[n_mem_phi++] = new_r_Proj(get_nodes_block(ret), ret, mode_M, 0);
+ } else if (is_Raise(ret)) {
+ cf_pred[n_mem_phi++] = new_r_Proj(get_nodes_block(ret), ret, mode_M, 1);
}
}
- phi = new_Phi(n_ret, cf_pred, mode_M);
- set_Tuple_pred(call, pn_Call_M_regular, phi);
+ phi = new_Phi(n_mem_phi, cf_pred, mode_M);
+ set_Tuple_pred(call, pn_Call_M, phi);
/* Conserve Phi-list for further inlinings -- but might be optimized */
if (get_nodes_block(phi) == post_bl) {
set_irn_link(phi, get_irn_link(post_bl));
set_Tuple_pred(call, pn_Call_P_value_res_base, new_Bad());
/* Finally the exception control flow.
- We have two (three) possible situations:
- First if the Call branches to an exception handler: We need to add a Phi node to
+ We have two possible situations:
+ First if the Call branches to an exception handler:
+ We need to add a Phi node to
collect the memory containing the exception objects. Further we need
to add another block to get a correct representation of this Phi. To
this block we add a Jmp that resolves into the X output of the Call
when the Call is turned into a tuple.
- Second the Call branches to End, the exception is not handled. Just
- add all inlined exception branches to the End node.
- Third: there is no Exception edge at all. Handle as case two. */
+ Second: There is no exception edge. Just add all inlined exception
+ branches to the End node.
+ */
if (exc_handling == exc_handler) {
n_exc = 0;
for (i = 0; i < arity; i++) {
if (n_exc > 0) {
ir_node *block = new_Block(n_exc, cf_pred);
set_cur_block(block);
-
set_Tuple_pred(call, pn_Call_X_except, new_Jmp());
- /* The Phi for the memories with the exception objects */
- n_exc = 0;
- for (i = 0; i < arity; i++) {
- ir_node *ret;
- ret = skip_Proj(get_Block_cfgpred(end_bl, i));
- if (is_Call(ret)) {
- cf_pred[n_exc] = new_r_Proj(get_nodes_block(ret), ret, mode_M, 3);
- n_exc++;
- } else if (is_fragile_op(ret)) {
- /* We rely that all cfops have the memory output at the same position. */
- cf_pred[n_exc] = new_r_Proj(get_nodes_block(ret), ret, mode_M, 0);
- n_exc++;
- } else if (is_Raise(ret)) {
- cf_pred[n_exc] = new_r_Proj(get_nodes_block(ret), ret, mode_M, 1);
- n_exc++;
- }
- }
- set_Tuple_pred(call, pn_Call_M_except, new_Phi(n_exc, cf_pred, mode_M));
} else {
set_Tuple_pred(call, pn_Call_X_except, new_Bad());
- set_Tuple_pred(call, pn_Call_M_except, new_Bad());
}
} else {
ir_node *main_end_bl;
end_preds[main_end_bl_arity + i] = cf_pred[i];
set_irn_in(main_end_bl, n_exc + main_end_bl_arity, end_preds);
set_Tuple_pred(call, pn_Call_X_except, new_Bad());
- set_Tuple_pred(call, pn_Call_M_except, new_Bad());
free(end_preds);
}
free(res_pred);
case pn_Call_X_except:
m->flags |= FLAG_EXCEPTION;
break;
- case pn_Call_M_regular:
+ case pn_Call_M:
m->mem = proj;
break;
}
case pn_Generic_X_except:
m->flags |= FLAG_EXCEPTION;
break;
- case pn_Generic_M_regular:
+ case pn_Generic_M:
m->mem = proj;
break;
}
set_Tuple_pred(call, pn_Call_X_regular, jmp);
set_Tuple_pred(call, pn_Call_X_except, bad);
set_Tuple_pred(call, pn_Call_T_result, tuple);
- set_Tuple_pred(call, pn_Call_M_except, mem);
set_Tuple_pred(call, pn_Call_P_value_res_base, bad);
for (i = 0; i < env->n_ress; ++i) {
}
''')
+# not used - as we have the pn_ declarations in libfirm/irnode.h where they
+# contain informative comments
+# {% for node in nodes %}
+# {% if node.outs %}
+# typedef enum {
+# {%- for out in node.outs %}
+# pn_{{node.name}}_{{out}},
+# {%- endfor %}
+# pn_{{node.name}}_max
+# } pn_{{node.name}};
+# {% endif %}
+# {% endfor %}
+
irnode_h_template = env.from_string('''
/* Warning: automatically generated code */
class Builtin(Op):
ins = [ "mem" ]
arity = "variable"
- outs = [ "M_regular", "X_regular", "X_except", "T_result", "M_except", "P_value_res_base" ]
+ outs = [ "M", "X_regular", "X_except", "T_result", "P_value_res_base" ]
flags = [ "uses_memory" ]
attrs = [
dict(
class Call(Op):
ins = [ "mem", "ptr" ]
arity = "variable"
- outs = [ "M_regular", "X_regular", "X_except", "T_result", "M_except", "P_value_res_base" ]
+ outs = [ "M", "X_regular", "X_except", "T_result", "P_value_res_base" ]
flags = [ "fragile", "uses_memory" ]
attrs = [
dict(
class InstOf(Op):
ins = [ "store", "obj" ]
- outs = [ "M", "X_regular", "X_except", "res", "M_except" ]
+ outs = [ "M", "X_regular", "X_except", "res" ]
flags = [ "highlevel" ]
attrs = [
dict(