X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fir%2Firvrfy.c;h=12d7313a6f79c6f137f751a9172b4ef6d57e7f8b;hb=b87f2c1918a831bd5ed1068f4658cb24542b021d;hp=468903b79c2127c959eb795756d66cd3fe1c987b;hpb=fa0cff1844c34ba8b24db7b6b3a35f9091773071;p=libfirm diff --git a/ir/ir/irvrfy.c b/ir/ir/irvrfy.c index 468903b79..12d7313a6 100644 --- a/ir/ir/irvrfy.c +++ b/ir/ir/irvrfy.c @@ -304,7 +304,6 @@ static int verify_node_Proj_Start(ir_node *n, ir_node *p) { (proj == pn_Start_X_initial_exec && mode == mode_X) || (proj == pn_Start_M && mode == mode_M) || (proj == pn_Start_P_frame_base && mode_is_reference(mode)) || - (proj == pn_Start_P_globals && mode_is_reference(mode)) || (proj == pn_Start_P_tls && mode_is_reference(mode)) || (proj == pn_Start_T_args && mode == mode_T) || (proj == pn_Start_P_value_arg_base && mode_is_reference(mode)) || @@ -869,18 +868,23 @@ static int verify_node_Block(ir_node *n, ir_graph *irg) { /* Blocks with more than one predecessor must be header blocks */ ASSERT_AND_RET(get_Block_n_cfgpreds(n) == 1, "partBlock with more than one predecessor", 0); - pred = get_Block_cfgpred(n, 0); - if (is_Proj(pred)) { - /* the predecessor MUST be a regular Proj */ - ir_node *frag_op = get_Proj_pred(pred); - ASSERT_AND_RET(is_fragile_op(frag_op) && get_Proj_proj(pred) == pn_Generic_X_regular, - "partBlock with non-regular predecessor", 0); + if (get_irg_phase_state(irg) != phase_backend) { + pred = get_Block_cfgpred(n, 0); + if (is_Proj(pred)) { + /* the predecessor MUST be a regular Proj */ + ir_node *frag_op = get_Proj_pred(pred); + ASSERT_AND_RET( + is_fragile_op(frag_op) && get_Proj_proj(pred) == pn_Generic_X_regular, + "partBlock with non-regular predecessor", 0); + } else { + /* We allow Jmps to be predecessors of partBlocks. This can happen due to optimization + of fragile nodes during construction. It does not violate our assumption of dominance + so let it. */ + ASSERT_AND_RET(is_Jmp(pred) || is_Bad(pred), + "partBlock with non-regular predecessor", 0); + } } else { - /* We allow Jmps to be predecessors of partBlocks. This can happen due to optimization - of fragile nodes during construction. It does not violate our assumption of dominance - so let it. */ - ASSERT_AND_RET(is_Jmp(pred) || is_Bad(pred), - "partBlock with non-regular predecessor", 0); + /* relax in backend: Bound nodes are probably lowered into conditional jumps */ } } @@ -1275,15 +1279,12 @@ static int verify_node_Sub(ir_node *n, ir_graph *irg) { (mymode ==op1mode && mymode == op2mode && mode_is_data(op1mode)) || /* Pointer Sub: BB x ref x int --> ref */ (op1mode == mymode && mode_is_int(op2mode) && mode_is_reference(mymode)) || - /* Pointer Sub: BB x int x ref --> ref */ - (mode_is_int(op1mode) && op2mode == mymode && mode_is_reference(mymode)) || /* Pointer Sub: BB x ref x ref --> int */ (op1mode == op2mode && mode_is_reference(op2mode) && mode_is_int(mymode)) ), "Sub node", 0, show_binop_failure(n, "/* common Sub: BB x numP x numP --> numP */ |\n" "/* Pointer Sub: BB x ref x int --> ref */ |\n" - "/* Pointer Sub: BB x int x ref --> ref */ |\n" "/* Pointer Sub: BB x ref x ref --> int */" ); ); return 1; @@ -1539,21 +1540,21 @@ static int verify_node_Shift(ir_node *n, ir_graph *irg) { #define verify_node_Shrs verify_node_Shift /** - * verify a Rot node + * verify a Rotl node */ -static int verify_node_Rot(ir_node *n, ir_graph *irg) { +static int verify_node_Rotl(ir_node *n, ir_graph *irg) { ir_mode *mymode = get_irn_mode(n); - ir_mode *op1mode = get_irn_mode(get_Rot_left(n)); - ir_mode *op2mode = get_irn_mode(get_Rot_right(n)); + ir_mode *op1mode = get_irn_mode(get_Rotl_left(n)); + ir_mode *op2mode = get_irn_mode(get_Rotl_right(n)); (void) irg; ASSERT_AND_RET_DBG( - /* Rot: BB x int x int --> int */ + /* Rotl: BB x int x int --> int */ mode_is_int(op1mode) && mode_is_int(op2mode) && mymode == op1mode, - "Rot node", 0, - show_binop_failure(n, "/* Rot: BB x int x int --> int */"); + "Rotl node", 0, + show_binop_failure(n, "/* Rotl: BB x int x int --> int */"); ); return 1; } @@ -1750,7 +1751,7 @@ static int verify_node_Alloc(ir_node *n, ir_graph *irg) { !mode_is_signed(op2mode) && mymode == mode_T, "Alloc node", 0, - show_binop_failure(n, "/* Alloc: BB x M x int_u --> M x X x ref */"); + show_node_failure(n); ); return 1; } @@ -2251,7 +2252,7 @@ void firm_set_default_verifyer(ir_opcode code, ir_op_ops *ops) { CASE(Shl); CASE(Shr); CASE(Shrs); - CASE(Rot); + CASE(Rotl); CASE(Conv); CASE(Cast); CASE(Phi);