#define BINOP_Right_Low 2
#define BINOP_Right_High 3
+/**
+ * Replace a call be a tuple of l_res, h_res.
+ */
static void resolve_call(ir_node *call, ir_node *l_res, ir_node *h_res, ir_graph *irg, ir_node *block) {
ir_node *res, *in[2];
turn_into_tuple(call, pn_Call_max);
set_Tuple_pred(call, pn_Call_M_regular, get_irg_no_mem(irg));
+ set_Tuple_pred(call, pn_Call_X_regular, new_r_Jmp(irg, block));
set_Tuple_pred(call, pn_Call_X_except, get_irg_bad(irg));
set_Tuple_pred(call, pn_Call_T_result, res);
set_Tuple_pred(call, pn_Call_M_except, get_irg_no_mem(irg));
* This is useful if a node returning a tuple is removed, but the Projs
* extracting values from the tuple are not available.
*/
-void turn_into_tuple (ir_node *node, int arity)
+void turn_into_tuple(ir_node *node, int arity)
{
assert(node);
set_irn_op(node, op_Tuple);
/** Exchanges two nodes by conserving edges leaving old (i.e.,
pointers pointing to old). Turns the old node into an Id. */
-void exchange (ir_node *old, ir_node *nw);
+void exchange(ir_node *old, ir_node *nw);
/** Turns a node into a "useless" Tuple.
*
* @param node The node to be turned into a tuple.
* @param arity The number of values formed into a Tuple.
*/
-void turn_into_tuple (ir_node *node, int arity);
+void turn_into_tuple(ir_node *node, int arity);
/** Walks over the passed ir graph and collects all Phi nodes as a
* list built with the link field in their corresponding block.
*/
void part_block(ir_node *node);
-#endif
+#endif /* FIRM_IR_IRGMOD_H */
arity = get_irn_arity(end_bl); /* arity = n_exc + n_ret */
n_res = get_method_n_ress(get_Call_type(call));
- res_pred = xmalloc (n_res * sizeof(*res_pred));
- cf_pred = xmalloc (arity * sizeof(*res_pred));
+ res_pred = xmalloc(n_res * sizeof(*res_pred));
+ cf_pred = xmalloc(arity * sizeof(*res_pred));
set_irg_current_block(current_ir_graph, post_bl); /* just to make sure */
/* -- Build a Tuple for all results of the method.
Add Phi node if there was more than one Return. -- */
- turn_into_tuple(post_call, 4);
+ turn_into_tuple(post_call, 4); /* FIXME: is th 4 corrct here ? */
/* First the Memory-Phi */
n_ret = 0;
for (i = 0; i < arity; i++) {
/**
* Copy all dependencies from a node to another.
- * @param tgt The node which sould be enriched.
+ * @param tgt The node which should be enriched.
* @param src The node whose dependencies shall be copied.
*/
void add_irn_deps(ir_node *tgt, ir_node *src);
* predecessors are removed, the node has the same predecessors in
* both views.
* @@@ Maybe better: arity is zero if no cg preds. */
-void set_Block_cg_cfgpred_arr(ir_node * node, int arity, ir_node ** in);
-void set_Block_cg_cfgpred(ir_node * node, int pos, ir_node * pred);
+void set_Block_cg_cfgpred_arr(ir_node *node, int arity, ir_node **in);
+void set_Block_cg_cfgpred(ir_node *node, int pos, ir_node *pred);
/* @@@ not supported */
-ir_node **get_Block_cg_cfgpred_arr(ir_node * node);
+ir_node **get_Block_cg_cfgpred_arr(ir_node *node);
/** Returns the number of interprocedural predecessors. 0 if none. */
-int get_Block_cg_n_cfgpreds(ir_node * node);
+int get_Block_cg_n_cfgpreds(ir_node *node);
/** Return the interprocedural predecessor at position pos. */
-ir_node *get_Block_cg_cfgpred(ir_node * node, int pos);
-/* frees the memory. */
-void remove_Block_cg_cfgpred_arr(ir_node * node);
+ir_node *get_Block_cg_cfgpred(ir_node *node, int pos);
+/** Frees the memory allocated for interprocedural predecessors. */
+void remove_Block_cg_cfgpred_arr(ir_node *node);
-/** returns the extended basic block a block belongs to */
+/** Returns the extended basic block a block belongs to. */
ir_extblk *get_Block_extbb(const ir_node *block);
-/** sets the extended basic block a block belongs to */
+/** Sets the extended basic block a block belongs to. */
void set_Block_extbb(ir_node *block, ir_extblk *extblk);
/** Return the number of Keep alive node. */
ir_node *get_Return_res(ir_node *node, int pos);
void set_Return_res(ir_node *node, int pos, ir_node *res);
+/**
+ * Possible classes for constant classification.
+ */
typedef enum {
CNST_NULL = 0, /**< The node is a const(0). */
CNST_ONE = +1, /**< The node is a const(1). */
*/
typedef enum {
pn_Call_M_regular = 0, /**< The memory result. */
- pn_Call_X_except = 1, /**< The control flow result branching to the exception handler */
- pn_Call_T_result = 2, /**< The tuple containing all (0, 1, 2, ...) results */
- pn_Call_M_except = 3, /**< The memory result in case the called method terminated with
- an exception */
- pn_Call_P_value_res_base = 4,/**< A pointer to the memory region containing copied results
+ pn_Call_X_regular = 1, /**< The control flow result when no exception occurs. */
+ pn_Call_X_except = 2, /**< The control flow result branching to the exception handler. */
+ pn_Call_T_result = 3, /**< The tuple containing all (0, 1, 2, ...) results. */
+ pn_Call_M_except = 4, /**< The memory result in case the called method terminated with
+ an exception. */
+ pn_Call_P_value_res_base = 5,/**< A pointer to the memory region containing copied results
passed by value (for compound result types). */
- pn_Call_max = 5 /**< number of projections from a Call */
+ pn_Call_max = 6 /**< number of projections from a Call */
} pn_Call; /* Projection numbers for Call. */
#define pn_Call_M pn_Call_M_regular
* Projection numbers for Quot: use for Proj nodes!
*/
typedef enum {
- pn_Quot_M, /**< Memory result. */
+ pn_Quot_M, /**< Memory result. */
+ pn_Quot_X_regular, /**< Execution result if no exception occurred. */
pn_Quot_X_except, /**< Execution result if exception occurred. */
pn_Quot_res, /**< Result of computation. */
pn_Quot_max /**< number of projections from a Quot */
* Projection numbers for DivMod: use for Proj nodes!
*/
typedef enum {
- pn_DivMod_M, /**< Memory result. */
+ pn_DivMod_M, /**< Memory result. */
+ pn_DivMod_X_regular, /**< Execution result if no exception occurred. */
pn_DivMod_X_except, /**< Execution result if exception occurred. */
pn_DivMod_res_div, /**< Result of computation a / b. */
pn_DivMod_res_mod, /**< Result of computation a % b. */
* Projection numbers for Div: use for Proj nodes!
*/
typedef enum {
- pn_Div_M, /**< Memory result. */
+ pn_Div_M, /**< Memory result. */
+ pn_Div_X_regular, /**< Execution result if no exception occurred. */
pn_Div_X_except, /**< Execution result if exception occurred. */
pn_Div_res, /**< Result of computation. */
pn_Div_max /**< number of projections from a Div */
*/
typedef enum {
pn_Mod_M, /**< Memory result. */
+ pn_Mod_X_regular, /**< Execution result if no exception occurred. */
pn_Mod_X_except, /**< Execution result if exception occurred. */
pn_Mod_res, /**< Result of computation. */
pn_Mod_max /**< number of projections from a Mod */
* Projection numbers for Load: use for Proj nodes!
*/
typedef enum {
- pn_Load_M, /**< Memory result. */
+ pn_Load_M, /**< Memory result. */
+ pn_Load_X_regular, /**< Execution result if no exception occurred. */
pn_Load_X_except, /**< Execution result if exception occurred. */
pn_Load_res, /**< Result of load operation. */
pn_Load_max /**< number of projections from a Load */
* Projection numbers for Store: use for Proj nodes!
*/
typedef enum {
- pn_Store_M, /**< Memory result. */
+ pn_Store_M, /**< Memory result. */
+ pn_Store_X_regular, /**< Execution result if no exception occurred. */
pn_Store_X_except, /**< Execution result if exception occurred. */
pn_Store_max /**< number of projections from a Store */
} pn_Store; /* Projection numbers for Store. */
*/
typedef enum {
pn_Alloc_M, /**< Memory result. */
+ pn_Alloc_X_regular, /**< Execution result if no exception occurred. */
pn_Alloc_X_except, /**< Execution result if exception occurred. */
pn_Alloc_res, /**< Result of allocation. */
pn_Alloc_max /**< number of projections from an Alloc */
*/
typedef enum {
pn_CopyB_M_regular = 0, /**< The memory result. */
- pn_CopyB_X_except = 1, /**< The control flow result branching to the exception handler */
- pn_CopyB_M_except = 2, /**< The memory result in case the runtime function terminated with
+ pn_CopyB_X_regular = 1, /**< Execution result if no exception occurred. */
+ pn_CopyB_X_except = 2, /**< The control flow result branching to the exception handler */
+ pn_CopyB_M_except = 3, /**< The memory result in case the runtime function terminated with
an exception */
- pn_CopyB_max = 3 /**< number of projections from a CopyB */
+ pn_CopyB_max = 4 /**< number of projections from a CopyB */
} pn_CopyB; /* Projection numbers for CopyB. */
#define pn_CopyB_M pn_CopyB_M_regular
*/
typedef enum {
pn_InstOf_M_regular = 0, /**< The memory result. */
- pn_InstOf_X_except = 1, /**< The control flow result branching to the exception handler */
- pn_InstOf_res = 2, /**< The checked object pointer. */
- pn_InstOf_M_except = 3, /**< The memory result in case the runtime function terminated with
+ pn_InstOf_X_regular = 1, /**< Execution result if no exception occurred. */
+ pn_InstOf_X_except = 2, /**< The control flow result branching to the exception handler */
+ pn_InstOf_res = 3, /**< The checked object pointer. */
+ pn_InstOf_M_except = 4, /**< The memory result in case the runtime function terminated with
an exception */
- pn_InstOf_max = 4 /**< number of projections from an InstOf */
+ pn_InstOf_max = 5 /**< number of projections from an InstOf */
} pn_InstOf;
#define pn_InstOf_M pn_InstOf_M_regular
*/
typedef enum {
pn_Bound_M = 0, /**< The memory result. */
- pn_Bound_X_except = 1, /**< The control flow result branching to the exception handler */
- pn_Bound_res = 2, /**< The checked index. */
- pn_Bound_max = 3 /**< number of projections from a Bound */
+ pn_Bound_X_regular = 1, /**< Execution result if no exception occurred. */
+ pn_Bound_X_except = 2, /**< The control flow result branching to the exception handler */
+ pn_Bound_res = 3, /**< The checked index. */
+ pn_Bound_max = 4 /**< number of projections from a Bound */
} pn_Bound;
/** Returns the memory input of a Bound operation. */
if (classify_tarval(value_of(b)) == TV_CLASSIFY_ONE) { /* div(x, 1) == x */
/* Turn Div into a tuple (mem, bad, a) */
ir_node *mem = get_Div_mem(n);
+ ir_node *blk = get_nodes_block(n);
turn_into_tuple(n, pn_Div_max);
- set_Tuple_pred(n, pn_Div_M, mem);
- set_Tuple_pred(n, pn_Div_X_except, new_Bad()); /* no exception */
- set_Tuple_pred(n, pn_Div_res, a);
+ set_Tuple_pred(n, pn_Div_M, mem);
+ set_Tuple_pred(n, pn_Div_X_regular, new_r_Jmp(current_ir_graph, blk));
+ set_Tuple_pred(n, pn_Div_X_except, new_Bad()); /* no exception */
+ set_Tuple_pred(n, pn_Div_res, a);
}
return n;
} /* equivalent_node_Div */
if (classify_tarval(value_of(b)) == TV_CLASSIFY_ONE) { /* Quot(x, 1) == x */
/* Turn Quot into a tuple (mem, bad, a) */
ir_node *mem = get_Quot_mem(n);
+ ir_node *blk = get_nodes_block(n);
turn_into_tuple(n, pn_Quot_max);
- set_Tuple_pred(n, pn_Quot_M, mem);
- set_Tuple_pred(n, pn_Quot_X_except, new_Bad()); /* no exception */
- set_Tuple_pred(n, pn_Quot_res, a);
+ set_Tuple_pred(n, pn_Quot_M, mem);
+ set_Tuple_pred(n, pn_Quot_X_regular, new_r_Jmp(current_ir_graph, blk));
+ set_Tuple_pred(n, pn_Quot_X_except, new_Bad()); /* no exception */
+ set_Tuple_pred(n, pn_Quot_res, a);
}
return n;
} /* equivalent_node_Quot */
* Optimize a / 1 = a.
*/
static ir_node *equivalent_node_DivMod(ir_node *n) {
- ir_node *a = get_DivMod_left(n);
ir_node *b = get_DivMod_right(n);
/* Div is not commutative. */
if (classify_tarval(value_of(b)) == TV_CLASSIFY_ONE) { /* div(x, 1) == x */
/* Turn DivMod into a tuple (mem, bad, a, 0) */
+ ir_node *a = get_DivMod_left(n);
ir_node *mem = get_Div_mem(n);
- ir_mode *mode = get_irn_mode(b);
+ ir_node *blk = get_nodes_block(n);
+ ir_mode *mode = get_DivMod_resmode(n);
turn_into_tuple(n, pn_DivMod_max);
- set_Tuple_pred(n, pn_DivMod_M, mem);
- set_Tuple_pred(n, pn_DivMod_X_except, new_Bad()); /* no exception */
- set_Tuple_pred(n, pn_DivMod_res_div, a);
- set_Tuple_pred(n, pn_DivMod_res_mod, new_Const(mode, get_mode_null(mode)));
+ set_Tuple_pred(n, pn_DivMod_M, mem);
+ set_Tuple_pred(n, pn_DivMod_X_regular, new_r_Jmp(current_ir_graph, blk));
+ set_Tuple_pred(n, pn_DivMod_X_except, new_Bad()); /* no exception */
+ set_Tuple_pred(n, pn_DivMod_res_div, a);
+ set_Tuple_pred(n, pn_DivMod_res_mod, new_Const(mode, get_mode_null(mode)));
}
return n;
} /* equivalent_node_DivMod */
if (a == b) {
/* Turn CopyB into a tuple (mem, bad, bad) */
ir_node *mem = get_CopyB_mem(n);
+ ir_node *blk = get_nodes_block(n);
turn_into_tuple(n, pn_CopyB_max);
set_Tuple_pred(n, pn_CopyB_M, mem);
+ set_Tuple_pred(n, pn_CopyB_X_regular, new_r_Jmp(current_ir_graph, blk));
set_Tuple_pred(n, pn_CopyB_X_except, new_Bad()); /* no exception */
set_Tuple_pred(n, pn_CopyB_M_except, new_Bad());
}
if (ret_tuple) {
/* Turn Bound into a tuple (mem, bad, idx) */
ir_node *mem = get_Bound_mem(n);
+ ir_node *blk = get_nodes_block(n);
turn_into_tuple(n, pn_Bound_max);
- set_Tuple_pred(n, pn_Bound_M, mem);
- set_Tuple_pred(n, pn_Bound_X_except, new_Bad()); /* no exception */
- set_Tuple_pred(n, pn_Bound_res, idx);
+ set_Tuple_pred(n, pn_Bound_M, mem);
+ set_Tuple_pred(n, pn_Bound_X_regular, new_r_Jmp(current_ir_graph, blk)); /* no exception */
+ set_Tuple_pred(n, pn_Bound_X_except, new_Bad()); /* no exception */
+ set_Tuple_pred(n, pn_Bound_res, idx);
}
return n;
} /* equivalent_node_Bound */
if (value != n) {
/* Turn Div into a tuple (mem, bad, value) */
ir_node *mem = get_Div_mem(n);
+ ir_node *blk = get_nodes_block(n);
turn_into_tuple(n, pn_Div_max);
- set_Tuple_pred(n, pn_Div_M, mem);
- set_Tuple_pred(n, pn_Div_X_except, new_Bad());
- set_Tuple_pred(n, pn_Div_res, value);
+ set_Tuple_pred(n, pn_Div_M, mem);
+ set_Tuple_pred(n, pn_Div_X_regular, new_r_Jmp(current_ir_graph, blk));
+ set_Tuple_pred(n, pn_Div_X_except, new_Bad());
+ set_Tuple_pred(n, pn_Div_res, value);
}
return n;
} /* transform_node_Div */
if (value != n) {
/* Turn Mod into a tuple (mem, bad, value) */
ir_node *mem = get_Mod_mem(n);
+ ir_node *blk = get_nodes_block(n);
turn_into_tuple(n, pn_Mod_max);
- set_Tuple_pred(n, pn_Mod_M, mem);
- set_Tuple_pred(n, pn_Mod_X_except, new_Bad());
- set_Tuple_pred(n, pn_Mod_res, value);
+ set_Tuple_pred(n, pn_Mod_M, mem);
+ set_Tuple_pred(n, pn_Mod_X_regular, new_r_Jmp(current_ir_graph, blk));
+ set_Tuple_pred(n, pn_Mod_X_except, new_Bad());
+ set_Tuple_pred(n, pn_Mod_res, value);
}
return n;
} /* transform_node_Mod */
if (evaluated) { /* replace by tuple */
ir_node *mem = get_DivMod_mem(n);
+ ir_node *blk = get_nodes_block(n);
turn_into_tuple(n, pn_DivMod_max);
- set_Tuple_pred(n, pn_DivMod_M, mem);
- set_Tuple_pred(n, pn_DivMod_X_except, new_Bad()); /* no exception */
- set_Tuple_pred(n, pn_DivMod_res_div, a);
+ set_Tuple_pred(n, pn_DivMod_M, mem);
+ set_Tuple_pred(n, pn_DivMod_X_regular, new_r_Jmp(current_ir_graph, blk));
+ set_Tuple_pred(n, pn_DivMod_X_except, new_Bad()); /* no exception */
+ set_Tuple_pred(n, pn_DivMod_res_div, a);
set_Tuple_pred(n, pn_DivMod_res_mod, b);
}
*/
static void transform_allocs(ir_graph *irg, walk_env_t *env)
{
- ir_node *alloc, *next, *mem, *sel, *size;
+ ir_node *alloc, *next, *mem, *sel, *size, *blk;
ir_type *ftp, *atp, *tp;
ir_entity *ent;
char name[128];
DBG((dbgHandle, LEVEL_1, "%+F allocation of %+F unused, deleted.\n", irg, alloc));
mem = get_Alloc_mem(alloc);
+ blk = get_nodes_block(alloc);
turn_into_tuple(alloc, pn_Alloc_max);
set_Tuple_pred(alloc, pn_Alloc_M, mem);
+ set_Tuple_pred(alloc, pn_Alloc_X_regular, new_r_Jmp(irg, blk));
set_Tuple_pred(alloc, pn_Alloc_X_except, new_r_Bad(irg));
++env->nr_deads;
turn_into_tuple(alloc, pn_Alloc_max);
set_Tuple_pred(alloc, pn_Alloc_M, mem);
+ set_Tuple_pred(alloc, pn_Alloc_X_regular, new_r_Jmp(irg, blk));
set_Tuple_pred(alloc, pn_Alloc_X_except, new_r_Bad(irg));
set_Tuple_pred(alloc, pn_Alloc_res, sel);
*/
static void transform_alloc_calls(ir_graph *irg, walk_env_t *env)
{
- ir_node *call, *next, *mem, *size;
+ ir_node *call, *next, *mem, *size, *blk;
ir_type *ftp, *atp, *tp;
/* kill all dead allocs */
DBG((dbgHandle, LEVEL_1, "%+F allocation of %+F unused, deleted.\n", irg, call));
mem = get_Call_mem(call);
+ blk = get_nodes_block(call);
turn_into_tuple(call, pn_Call_max);
set_Tuple_pred(call, pn_Call_M_regular, mem);
+ set_Tuple_pred(call, pn_Call_X_regular, new_r_Jmp(irg, blk));
set_Tuple_pred(call, pn_Call_X_except, new_r_Bad(irg));
set_Tuple_pred(call, pn_Call_T_result, new_r_Bad(irg));
set_Tuple_pred(call, pn_Call_M_except, mem);
val = new_d_Conv(get_irn_dbg_info(node), val, mode);
turn_into_tuple(node, pn_Load_max);
- set_Tuple_pred(node, pn_Load_M, mem);
- set_Tuple_pred(node, pn_Load_res, val);
- set_Tuple_pred(node, pn_Load_X_except, new_Bad());
+ set_Tuple_pred(node, pn_Load_M, mem);
+ set_Tuple_pred(node, pn_Load_res, val);
+ set_Tuple_pred(node, pn_Load_X_regular, new_r_Jmp(current_ir_graph, block));
+ set_Tuple_pred(node, pn_Load_X_except, new_Bad());
} else {
l = obstack_alloc(&env->obst, sizeof(*l));
l->node = node;
value_arr[vnum] = val;
mem = get_Store_mem(node);
+ block = get_nodes_block(node);
turn_into_tuple(node, pn_Store_max);
- set_Tuple_pred(node, pn_Store_M, mem);
- set_Tuple_pred(node, pn_Store_X_except, new_Bad());
+ set_Tuple_pred(node, pn_Store_M, mem);
+ set_Tuple_pred(node, pn_Store_X_regular, new_r_Jmp(current_ir_graph, block));
+ set_Tuple_pred(node, pn_Store_X_except, new_Bad());
} else if (op == op_Phi && get_irn_mode(node) == mode_M) {
/*
* found a memory Phi: Here, we must create new Phi nodes
val = new_Unknown(env->modes[l->vnum]);
}
- mem = get_Load_mem(load);
/* Beware: A Load can contain a hidden conversion in Firm.
Handle this here. */
mode = get_Load_mode(load);
if (mode != get_irn_mode(val))
val = new_d_Conv(get_irn_dbg_info(load), val, mode);
+ mem = get_Load_mem(load);
+
turn_into_tuple(load, pn_Load_max);
set_Tuple_pred(load, pn_Load_M, mem);
set_Tuple_pred(load, pn_Load_res, val);
+ set_Tuple_pred(load, pn_Load_X_except, new_r_Jmp(current_ir_graph, block));
set_Tuple_pred(load, pn_Load_X_except, new_Bad());
}
}