(proj == pn_Start_X_initial_exec && mode == mode_X) ||
(proj == pn_Start_M && mode == mode_M) ||
(proj == pn_Start_P_frame_base && mode_is_reference(mode)) ||
- (proj == pn_Start_P_globals && mode_is_reference(mode)) ||
(proj == pn_Start_P_tls && mode_is_reference(mode)) ||
(proj == pn_Start_T_args && mode == mode_T) ||
(proj == pn_Start_P_value_arg_base && mode_is_reference(mode)) ||
ASSERT_AND_RET(is_Block(mb) || is_Bad(mb), "Block node with wrong MacroBlock", 0);
if (is_Block(mb) && mb != n) {
+ ir_node *pred;
+
/* Blocks with more than one predecessor must be header blocks */
ASSERT_AND_RET(get_Block_n_cfgpreds(n) == 1, "partBlock with more than one predecessor", 0);
+ if (get_irg_phase_state(irg) != phase_backend) {
+ pred = get_Block_cfgpred(n, 0);
+ if (is_Proj(pred)) {
+ /* the predecessor MUST be a regular Proj */
+ ir_node *frag_op = get_Proj_pred(pred);
+ ASSERT_AND_RET(
+ is_fragile_op(frag_op) && get_Proj_proj(pred) == pn_Generic_X_regular,
+ "partBlock with non-regular predecessor", 0);
+ } else {
+ /* We allow Jmps to be predecessors of partBlocks. This can happen due to optimization
+ of fragile nodes during construction. It does not violate our assumption of dominance
+ so let it. */
+ ASSERT_AND_RET(is_Jmp(pred) || is_Bad(pred),
+ "partBlock with non-regular predecessor", 0);
+ }
+ } else {
+ /* relax in backend: Bound nodes are probably lowered into conditional jumps */
+ }
}
for (i = get_Block_n_cfgpreds(n) - 1; i >= 0; --i) {
ir_node *pred = get_Block_cfgpred(n, i);
ASSERT_AND_RET(
- (
- is_Bad(pred) ||
- is_Unknown(pred) ||
- (get_irn_mode(pred) == mode_X)
- ),
- "Block node", 0);
+ is_Bad(pred) || (get_irn_mode(pred) == mode_X),
+ "Block node must have a mode_X predecessor", 0);
}
if (n == get_irg_end_block(irg) && get_irg_phase_state(irg) != phase_backend)
- /* End block may only have Return, Raise or fragile ops as preds. */
+ /* End block may only have Return, Raise or fragile ops as preds. */
for (i = get_Block_n_cfgpreds(n) - 1; i >= 0; --i) {
ir_node *pred = skip_Proj(get_Block_cfgpred(n, i));
if (is_Proj(pred) || is_Tuple(pred))
#define verify_node_Shrs verify_node_Shift
/**
- * verify a Rot node
+ * verify a Rotl node
*/
-static int verify_node_Rot(ir_node *n, ir_graph *irg) {
+static int verify_node_Rotl(ir_node *n, ir_graph *irg) {
ir_mode *mymode = get_irn_mode(n);
- ir_mode *op1mode = get_irn_mode(get_Rot_left(n));
- ir_mode *op2mode = get_irn_mode(get_Rot_right(n));
+ ir_mode *op1mode = get_irn_mode(get_Rotl_left(n));
+ ir_mode *op2mode = get_irn_mode(get_Rotl_right(n));
(void) irg;
ASSERT_AND_RET_DBG(
- /* Rot: BB x int x int --> int */
+ /* Rotl: BB x int x int --> int */
mode_is_int(op1mode) &&
mode_is_int(op2mode) &&
mymode == op1mode,
- "Rot node", 0,
- show_binop_failure(n, "/* Rot: BB x int x int --> int */");
+ "Rotl node", 0,
+ show_binop_failure(n, "/* Rotl: BB x int x int --> int */");
);
return 1;
}
!mode_is_signed(op2mode) &&
mymode == mode_T,
"Alloc node", 0,
- show_binop_failure(n, "/* Alloc: BB x M x int_u --> M x X x ref */");
+ show_node_failure(n);
);
return 1;
}
CASE(Shl);
CASE(Shr);
CASE(Shrs);
- CASE(Rot);
+ CASE(Rotl);
CASE(Conv);
CASE(Cast);
CASE(Phi);