/* Blocks with more than one predecessor must be header blocks */
ASSERT_AND_RET(get_Block_n_cfgpreds(n) == 1, "partBlock with more than one predecessor", 0);
- pred = get_Block_cfgpred(n, 0);
- if (is_Proj(pred)) {
- /* the predecessor MUST be a regular Proj */
- ir_node *frag_op = get_Proj_pred(pred);
- ASSERT_AND_RET(is_fragile_op(frag_op) && get_Proj_proj(pred) == pn_Generic_X_regular,
- "partBlock with non-regular predecessor", 0);
+ if (get_irg_phase_state(irg) != phase_backend) {
+ pred = get_Block_cfgpred(n, 0);
+ if (is_Proj(pred)) {
+ /* the predecessor MUST be a regular Proj */
+ ir_node *frag_op = get_Proj_pred(pred);
+ ASSERT_AND_RET(
+ is_fragile_op(frag_op) && get_Proj_proj(pred) == pn_Generic_X_regular,
+ "partBlock with non-regular predecessor", 0);
+ } else {
+ /* We allow Jmps to be predecessors of partBlocks. This can happen due to optimization
+ of fragile nodes during construction. It does not violate our assumption of dominance
+ so let it. */
+ ASSERT_AND_RET(is_Jmp(pred) || is_Bad(pred),
+ "partBlock with non-regular predecessor", 0);
+ }
} else {
- /* We allow Jmps to be predecessors of partBlocks. This can happen due to optimization
- of fragile nodes during construction. It does not violate our assumption of dominance
- so let it. */
- ASSERT_AND_RET(is_Jmp(pred) || is_Bad(pred),
- "partBlock with non-regular predecessor", 0);
+ /* relax in backend: Bound nodes are probably lowered into conditional jumps */
}
}
(mymode ==op1mode && mymode == op2mode && mode_is_data(op1mode)) ||
/* Pointer Sub: BB x ref x int --> ref */
(op1mode == mymode && mode_is_int(op2mode) && mode_is_reference(mymode)) ||
- /* Pointer Sub: BB x int x ref --> ref */
- (mode_is_int(op1mode) && op2mode == mymode && mode_is_reference(mymode)) ||
/* Pointer Sub: BB x ref x ref --> int */
(op1mode == op2mode && mode_is_reference(op2mode) && mode_is_int(mymode))
),
"Sub node", 0,
show_binop_failure(n, "/* common Sub: BB x numP x numP --> numP */ |\n"
"/* Pointer Sub: BB x ref x int --> ref */ |\n"
- "/* Pointer Sub: BB x int x ref --> ref */ |\n"
"/* Pointer Sub: BB x ref x ref --> int */" );
);
return 1;
#define verify_node_Shrs verify_node_Shift
/**
- * verify a Rot node
+ * verify a Rotl node
*/
-static int verify_node_Rot(ir_node *n, ir_graph *irg) {
+static int verify_node_Rotl(ir_node *n, ir_graph *irg) {
ir_mode *mymode = get_irn_mode(n);
- ir_mode *op1mode = get_irn_mode(get_Rot_left(n));
- ir_mode *op2mode = get_irn_mode(get_Rot_right(n));
+ ir_mode *op1mode = get_irn_mode(get_Rotl_left(n));
+ ir_mode *op2mode = get_irn_mode(get_Rotl_right(n));
(void) irg;
ASSERT_AND_RET_DBG(
- /* Rot: BB x int x int --> int */
+ /* Rotl: BB x int x int --> int */
mode_is_int(op1mode) &&
mode_is_int(op2mode) &&
mymode == op1mode,
- "Rot node", 0,
- show_binop_failure(n, "/* Rot: BB x int x int --> int */");
+ "Rotl node", 0,
+ show_binop_failure(n, "/* Rotl: BB x int x int --> int */");
);
return 1;
}
!mode_is_signed(op2mode) &&
mymode == mode_T,
"Alloc node", 0,
- show_binop_failure(n, "/* Alloc: BB x M x int_u --> M x X x ref */");
+ show_node_failure(n);
);
return 1;
}
CASE(Shl);
CASE(Shr);
CASE(Shrs);
- CASE(Rot);
+ CASE(Rotl);
CASE(Conv);
CASE(Cast);
CASE(Phi);