/*
- * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
+ * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
*
* This file is part of libFirm.
*
#endif /* #ifndef NDEBUG */
-/** If the address is Sel or SymConst, return the entity. */
+/**
+ * If the address is Sel or SymConst, return the entity.
+ *
+ * @param ptr the node representing the address
+ */
static ir_entity *get_ptr_entity(ir_node *ptr) {
if (get_irn_op(ptr) == op_Sel) {
return get_Sel_entity(ptr);
- } else if ((get_irn_op(ptr) == op_SymConst) && (get_SymConst_kind(ptr) == symconst_addr_ent)) {
+ } else if (is_SymConst_addr_ent(ptr)) {
return get_SymConst_entity(ptr);
}
return NULL;
(proj == pn_Start_X_initial_exec && mode == mode_X) ||
(proj == pn_Start_M && mode == mode_M) ||
(proj == pn_Start_P_frame_base && mode_is_reference(mode)) ||
- (proj == pn_Start_P_globals && mode_is_reference(mode)) ||
(proj == pn_Start_P_tls && mode_is_reference(mode)) ||
(proj == pn_Start_T_args && mode == mode_T) ||
(proj == pn_Start_P_value_arg_base && mode_is_reference(mode)) ||
"wrong Proj from Call", 0,
show_proj_failure(p);
);
+ /* if we have exception flow, we must have a real Memory input */
if (proj == pn_Call_X_regular)
ASSERT_AND_RET(
get_irn_op(get_Call_mem(n)) != op_NoMem,
"wrong Proj from Cmp", 0,
show_proj_failure(p);
);
+ ASSERT_AND_RET_DBG(
+ (mode_is_float(get_irn_mode(get_Cmp_left(n))) || !(proj & pn_Cmp_Uo)),
+ "unordered Proj for non-float Cmp", 0,
+ show_proj_failure(p);
+ );
return 1;
}
ASSERT_AND_RET(is_Block(mb) || is_Bad(mb), "Block node with wrong MacroBlock", 0);
if (is_Block(mb) && mb != n) {
+ ir_node *pred;
+
/* Blocks with more than one predecessor must be header blocks */
ASSERT_AND_RET(get_Block_n_cfgpreds(n) == 1, "partBlock with more than one predecessor", 0);
+ if (get_irg_phase_state(irg) != phase_backend) {
+ pred = get_Block_cfgpred(n, 0);
+ if (is_Proj(pred)) {
+ /* the predecessor MUST be a regular Proj */
+ ir_node *frag_op = get_Proj_pred(pred);
+ ASSERT_AND_RET(
+ is_fragile_op(frag_op) && get_Proj_proj(pred) == pn_Generic_X_regular,
+ "partBlock with non-regular predecessor", 0);
+ } else {
+ /* We allow Jmps to be predecessors of partBlocks. This can happen due to optimization
+ of fragile nodes during construction. It does not violate our assumption of dominance
+ so let it. */
+ ASSERT_AND_RET(is_Jmp(pred) || is_Bad(pred),
+ "partBlock with non-regular predecessor", 0);
+ }
+ } else {
+ /* relax in backend: Bound nodes are probably lowered into conditional jumps */
+ }
}
for (i = get_Block_n_cfgpreds(n) - 1; i >= 0; --i) {
ir_node *pred = get_Block_cfgpred(n, i);
ASSERT_AND_RET(
- (
- is_Bad(pred) ||
- is_Unknown(pred) ||
- (get_irn_mode(pred) == mode_X)
- ),
- "Block node", 0);
+ is_Bad(pred) || (get_irn_mode(pred) == mode_X),
+ "Block node must have a mode_X predecessor", 0);
}
if (n == get_irg_end_block(irg) && get_irg_phase_state(irg) != phase_backend)
- /* End block may only have Return, Raise or fragile ops as preds. */
+ /* End block may only have Return, Raise or fragile ops as preds. */
for (i = get_Block_n_cfgpreds(n) - 1; i >= 0; --i) {
ir_node *pred = skip_Proj(get_Block_cfgpred(n, i));
if (is_Proj(pred) || is_Tuple(pred))
get_Call_n_params(n) >= get_method_n_params(mt),
"Number of args for Call doesn't match number of args in variadic type.",
0,
- fprintf(stderr, "Call has %d params, method %s type %d\n",
- get_Call_n_params(n), get_type_name(mt), get_method_n_params(mt));
+ ir_fprintf(stderr, "Call %+F has %d params, method %s type %d\n",
+ n, get_Call_n_params(n), get_type_name(mt), get_method_n_params(mt));
);
} else {
- ASSERT_AND_RET(
+ ASSERT_AND_RET_DBG(
get_Call_n_params(n) == get_method_n_params(mt),
"Number of args for Call doesn't match number of args in non variadic type.",
- 0);
+ 0,
+ ir_fprintf(stderr, "Call %+F has %d params, method %s type %d\n",
+ n, get_Call_n_params(n), get_type_name(mt), get_method_n_params(mt));
+ );
}
for (i = 0; i < get_method_n_params(mt); i++) {
#define verify_node_Shrs verify_node_Shift
/**
- * verify a Rot node
+ * verify a Rotl node
*/
-static int verify_node_Rot(ir_node *n, ir_graph *irg) {
+static int verify_node_Rotl(ir_node *n, ir_graph *irg) {
ir_mode *mymode = get_irn_mode(n);
- ir_mode *op1mode = get_irn_mode(get_Rot_left(n));
- ir_mode *op2mode = get_irn_mode(get_Rot_right(n));
+ ir_mode *op1mode = get_irn_mode(get_Rotl_left(n));
+ ir_mode *op2mode = get_irn_mode(get_Rotl_right(n));
(void) irg;
ASSERT_AND_RET_DBG(
- /* Rot: BB x int x int --> int */
+ /* Rotl: BB x int x int --> int */
mode_is_int(op1mode) &&
mode_is_int(op2mode) &&
mymode == op1mode,
- "Rot node", 0,
- show_binop_failure(n, "/* Rot: BB x int x int --> int */");
+ "Rotl node", 0,
+ show_binop_failure(n, "/* Rotl: BB x int x int --> int */");
);
return 1;
}
!mode_is_signed(op2mode) &&
mymode == mode_T,
"Alloc node", 0,
- show_binop_failure(n, "/* Alloc: BB x M x int_u --> M x X x ref */");
+ show_node_failure(n);
);
return 1;
}
rem = current_ir_graph;
current_ir_graph = irg;
- last_irg_error = NULL;
+
+#ifndef NDEBUG
+ last_irg_error = NULL;
+#endif /* NDEBUG */
assert(get_irg_pinned(irg) == op_pin_state_pinned && "Verification need pinned graph");
CASE(Shl);
CASE(Shr);
CASE(Shrs);
- CASE(Rot);
+ CASE(Rotl);
CASE(Conv);
CASE(Cast);
CASE(Phi);