* If optimize == 0 no optimizations are performed at all.
* Default: optimize == 1.
*/
-void set_optimize (int value);
+void set_optimize(int value);
int get_optimize(void);
/** Enables/Disables constant folding optimization.
* - simplification of tests ( !(a < b) ==> (a >= b))
* Default: opt_constant_folding == 1.
*/
-void set_opt_constant_folding (int value);
-
-/** Enables/Disables output of information about loop unrolling.
- */
-void set_opt_loop_unrolling_verbose (int value);
+void set_opt_constant_folding(int value);
/** Enables/Disables common subexpression elimination.
*
* If opt_cse == 1 perform common subexpression elimination.
* Default: opt_cse == 1.
*/
-void set_opt_cse (int value);
+void set_opt_cse(int value);
/** Returns constant folding optimization setting. */
int get_opt_cse(void);
* right after a call to local_optimize with global cse turned on.
* Default: opt_global_cse == 0.
*/
-void set_opt_global_cse (int value);
+void set_opt_global_cse(int value);
/** Enables/Disables strength reduction.
*
*
* Default: opt_strength_red = 1;
*/
-void set_opt_strength_red (int value);
-
-/** Enables/Disables output of information about strength reduction.
- */
-void set_opt_strength_red_verbose (int value);
+void set_opt_strength_red(int value);
/** Enables/Disables unreachable code elimination.
*
* If the flag is turned on Sel nodes can be replaced by Const nodes representing
* the address of a function.
*/
-void set_opt_dyn_meth_dispatch (int value);
-int get_opt_dyn_meth_dispatch (void);
+void set_opt_dyn_meth_dispatch(int value);
+int get_opt_dyn_meth_dispatch(void);
/** Enable/Disable type optimization of cast nodes.
*
* Controls the optimizations in tropt.h. Default: on.
*/
-void set_opt_optimize_class_casts (int value);
-void set_opt_optimize_class_casts_verbose (int value);
+void set_opt_optimize_class_casts(int value);
/** Restricts the behavior of cast optimization.
*
* Enable/Disable scalar replacement optimization.
*/
void set_opt_scalar_replacement(int value);
-void set_opt_scalar_replacement_verbose(int value);
/**
* Enable/Disable Null exception in Load and Store nodes only.
*
* @note ATTENTION: not all such transformations are guarded by a flag.
*/
-void set_opt_normalize (int value);
+void set_opt_normalize(int value);
/** Enable/Disable precise exception context.
*
#include "irgwalk.h"
#include "ident.h"
#include "trouts.h"
+#include "debug.h"
#include "error.h"
-#define VERBOSE_UNKNOWN_TYPE(s) printf s
+DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL);
static ir_type *phi_cycle_type = NULL;
*/
static void precompute_pointer_types(void) {
#if 0
- int i;
- set_type_link(firm_unknown_type, firm_unknown_type);
- set_type_link(firm_none_type, firm_unknown_type);
-
- for (i = get_irp_n_types() - 1; i >= 0; --i)
- set_type_link(get_irp_type(i), (void *)firm_unknown_type);
-
- for (i = get_irp_n_types() - 1; i >= 0; --i) {
- ir_type *tp = get_irp_type(i);
- if (is_Pointer_type(tp))
- set_type_link(get_pointer_points_to_type(tp), (void *)tp);
- }
+ int i;
+ set_type_link(firm_unknown_type, firm_unknown_type);
+ set_type_link(firm_none_type, firm_unknown_type);
+
+ for (i = get_irp_n_types() - 1; i >= 0; --i)
+ set_type_link(get_irp_type(i), (void *)firm_unknown_type);
+
+ for (i = get_irp_n_types() - 1; i >= 0; --i) {
+ ir_type *tp = get_irp_type(i);
+ if (is_Pointer_type(tp))
+ set_type_link(get_pointer_points_to_type(tp), (void *)tp);
+ }
#else
- compute_trouts();
+ compute_trouts();
#endif
}
*/
static ir_type *find_pointer_type_to (ir_type *tp) {
#if 0
- return (ir_type *)get_type_link(tp);
+ return (ir_type *)get_type_link(tp);
#else
- if (get_type_n_pointertypes_to(tp) > 0)
- return get_type_pointertype_to(tp, 0);
- else
- return firm_unknown_type;
+ if (get_type_n_pointertypes_to(tp) > 0)
+ return get_type_pointertype_to(tp, 0);
+ else
+ return firm_unknown_type;
#endif
}
* If a type cannot be determined, return @p firm_none_type.
*/
static ir_type *find_type_for_Proj(ir_node *n) {
- ir_type *tp;
-
- /* Avoid nested Tuples. */
- ir_node *pred = skip_Tuple(get_Proj_pred(n));
- ir_mode *m = get_irn_mode(n);
-
- if (m == mode_T ||
- m == mode_BB ||
- m == mode_X ||
- m == mode_M ||
- m == mode_b )
- return firm_none_type;
-
- switch (get_irn_opcode(pred)) {
- case iro_Proj: {
- ir_node *pred_pred;
- /* Deal with Start / Call here: we need to know the Proj Nr. */
- assert(get_irn_mode(pred) == mode_T);
- pred_pred = get_Proj_pred(pred);
- if (get_irn_op(pred_pred) == op_Start) {
- ir_type *mtp = get_entity_type(get_irg_entity(get_irn_irg(pred_pred)));
- tp = get_method_param_type(mtp, get_Proj_proj(n));
- } else if (get_irn_op(pred_pred) == op_Call) {
- ir_type *mtp = get_Call_type(pred_pred);
- tp = get_method_res_type(mtp, get_Proj_proj(n));
- } else if (get_irn_op(pred_pred) == op_Tuple) {
- panic("Encountered nested Tuple");
- } else {
- VERBOSE_UNKNOWN_TYPE(("Proj %ld from Proj from ??: unknown type\n", get_irn_node_nr(n)));
- tp = firm_unknown_type;
- }
- } break;
- case iro_Start: {
- /* frame pointer, globals and tls */
- switch (get_Proj_proj(n)) {
- case pn_Start_P_frame_base:
- tp = find_pointer_type_to(get_irg_frame_type(get_irn_irg(pred)));
- break;
- case pn_Start_P_globals:
- tp = find_pointer_type_to(get_glob_type());
- break;
- case pn_Start_P_tls:
- tp = find_pointer_type_to(get_tls_type());
- break;
- case pn_Start_P_value_arg_base:
- VERBOSE_UNKNOWN_TYPE(("Value arg base proj %ld from Start: unknown type\n", get_irn_node_nr(n)));
- tp = firm_unknown_type; /* find_pointer_type_to(get....(get_entity_type(get_irg_entity(get_irn_irg(pred))))); */
- break;
- default:
- VERBOSE_UNKNOWN_TYPE(("Proj %ld %ld from Start: unknown type\n", get_Proj_proj(n), get_irn_node_nr(n)));
- tp = firm_unknown_type;
- }
- } break;
- case iro_Call: {
- /* value args pointer */
- if (get_Proj_proj(n) == pn_Call_P_value_res_base) {
- VERBOSE_UNKNOWN_TYPE(("Value res base Proj %ld from Call: unknown type\n", get_irn_node_nr(n)));
- tp = firm_unknown_type; /* find_pointer_type_to(get....get_Call_type(pred)); */
- } else {
- VERBOSE_UNKNOWN_TYPE(("Proj %ld %ld from Call: unknown type\n", get_Proj_proj(n), get_irn_node_nr(n)));
- tp = firm_unknown_type;
- }
- } break;
- case iro_Tuple: {
- tp = compute_irn_type(get_Tuple_pred(pred, get_Proj_proj(n)));
- } break;
- default:
- tp = compute_irn_type(pred);
- }
-
- return tp;
+ ir_type *tp;
+
+ /* Avoid nested Tuples. */
+ ir_node *pred = skip_Tuple(get_Proj_pred(n));
+ ir_mode *m = get_irn_mode(n);
+
+ if (m == mode_T ||
+ m == mode_BB ||
+ m == mode_X ||
+ m == mode_M ||
+ m == mode_b )
+ return firm_none_type;
+
+ switch (get_irn_opcode(pred)) {
+ case iro_Proj: {
+ ir_node *pred_pred;
+ /* Deal with Start / Call here: we need to know the Proj Nr. */
+ assert(get_irn_mode(pred) == mode_T);
+ pred_pred = get_Proj_pred(pred);
+ if (get_irn_op(pred_pred) == op_Start) {
+ ir_type *mtp = get_entity_type(get_irg_entity(get_irn_irg(pred_pred)));
+ tp = get_method_param_type(mtp, get_Proj_proj(n));
+ } else if (get_irn_op(pred_pred) == op_Call) {
+ ir_type *mtp = get_Call_type(pred_pred);
+ tp = get_method_res_type(mtp, get_Proj_proj(n));
+ } else if (get_irn_op(pred_pred) == op_Tuple) {
+ panic("Encountered nested Tuple");
+ } else {
+ DB((dbg, SET_LEVEL_1, "Proj %ld from Proj from ??: unknown type\n", get_irn_node_nr(n)));
+ tp = firm_unknown_type;
+ }
+ break;
+ }
+ case iro_Start:
+ /* frame pointer, globals and tls */
+ switch (get_Proj_proj(n)) {
+ case pn_Start_P_frame_base:
+ tp = find_pointer_type_to(get_irg_frame_type(get_irn_irg(pred)));
+ break;
+ case pn_Start_P_globals:
+ tp = find_pointer_type_to(get_glob_type());
+ break;
+ case pn_Start_P_tls:
+ tp = find_pointer_type_to(get_tls_type());
+ break;
+ case pn_Start_P_value_arg_base:
+ DB((dbg, SET_LEVEL_1, "Value arg base proj %ld from Start: unknown type\n", get_irn_node_nr(n)));
+ tp = firm_unknown_type; /* find_pointer_type_to(get....(get_entity_type(get_irg_entity(get_irn_irg(pred))))); */
+ break;
+ default:
+ DB((dbg, SET_LEVEL_1, "Proj %ld %ld from Start: unknown type\n", get_Proj_proj(n), get_irn_node_nr(n)));
+ tp = firm_unknown_type;
+ }
+ break;
+ case iro_Call:
+ /* value args pointer */
+ if (get_Proj_proj(n) == pn_Call_P_value_res_base) {
+ DB((dbg, SET_LEVEL_1, "Value res base Proj %ld from Call: unknown type\n", get_irn_node_nr(n)));
+ tp = firm_unknown_type; /* find_pointer_type_to(get....get_Call_type(pred)); */
+ } else {
+ DB((dbg, SET_LEVEL_1, "Proj %ld %ld from Call: unknown type\n", get_Proj_proj(n), get_irn_node_nr(n)));
+ tp = firm_unknown_type;
+ }
+ break;
+ case iro_Tuple:
+ tp = compute_irn_type(get_Tuple_pred(pred, get_Proj_proj(n)));
+ break;
+ default:
+ tp = compute_irn_type(pred);
+ }
+
+ return tp;
}
/**
* If a type cannot be determined, return @p firm_none_type.
*/
static ir_type *find_type_for_node(ir_node *n) {
- ir_type *tp = firm_unknown_type;
- ir_type *tp1 = NULL, *tp2 = NULL;
- ir_node *a = NULL, *b = NULL;
-
- /* DDMN(n); */
-
- if (is_unop(n)) {
- a = get_unop_op(n);
- tp1 = compute_irn_type(a);
- }
- if (is_binop(n)) {
- a = get_binop_left(n);
- b = get_binop_right(n);
- tp1 = compute_irn_type(a);
- tp2 = compute_irn_type(b);
- }
-
- switch (get_irn_opcode(n)) {
-
- case iro_InstOf: {
- assert(0 && "op_InstOf not supported");
- } break;
-
- /* has no type */
- case iro_Return: {
- /* Check returned type. */
- /*
- int i;
- ir_type *meth_type = get_entity_type(get_irg_entity(current_ir_graph));
- for (i = 0; i < get_method_n_ress(meth_type); i++) {
- ir_type *res_type = get_method_res_type(meth_type, i);
- ir_type *ana_res_type = get_irn_type(get_Return_res(n, i));
- if (ana_res_type == firm_unknown_type) continue;
- if (res_type != ana_res_type && "return value has wrong type") {
- DDMN(n);
- assert(res_type == ana_res_type && "return value has wrong type");
- }
- }
- */
- }
- case iro_Block:
- case iro_Start:
- case iro_End:
- case iro_Jmp:
- case iro_Cond:
- case iro_Raise:
- case iro_Call:
- case iro_Cmp:
- case iro_Store:
- case iro_Free:
- case iro_Sync:
- case iro_Tuple:
- case iro_Bad:
- case iro_NoMem:
- case iro_Break:
- case iro_CallBegin:
- case iro_EndReg:
- case iro_EndExcept:
- break;
-
- /* compute the type */
- case iro_Const: tp = get_Const_type(n); break;
- case iro_SymConst:
- tp = get_SymConst_value_type(n); break;
- case iro_Sel:
- tp = find_pointer_type_to(get_entity_type(get_Sel_entity(n))); break;
- /* asymmetric binops */
- case iro_Shl:
- case iro_Shr:
- case iro_Shrs:
- case iro_Rot:
- tp = tp1; break;
- case iro_Cast:
- tp = get_Cast_type(n); break;
- case iro_Phi: {
- int i;
- int n_preds = get_Phi_n_preds(n);
-
- if (n_preds == 0)
- break;
-
- /* initialize this Phi */
- set_irn_typeinfo_type(n, phi_cycle_type);
-
- /* find a first real type */
- for (i = 0; i < n_preds; ++i) {
- tp1 = compute_irn_type(get_Phi_pred(n, i));
- assert(tp1 != initial_type);
- if ((tp1 != phi_cycle_type) && (tp1 != firm_none_type))
- break;
- }
-
- /* find a second real type */
- tp2 = tp1;
- for (; (i < n_preds); ++i) {
- tp2 = compute_irn_type(get_Phi_pred(n, i));
- if ((tp2 == phi_cycle_type) || (tp2 == firm_none_type)) {
- tp2 = tp1;
- continue;
- }
- if (tp2 != tp1) break;
- }
-
- /* printf("Types in Phi %s and %s \n", get_type_name(tp1), get_type_name(tp2)); */
-
- if (tp1 == tp2) { tp = tp1; break; }
-
- if (get_firm_verbosity() > 55) { // Do not commit 55! should be 1.
- VERBOSE_UNKNOWN_TYPE(("Phi %ld with two different types: %s, %s: unknown type.\n", get_irn_node_nr(n),
- get_type_name(tp1), get_type_name(tp2)));
- }
- tp = firm_unknown_type; /* Test for supertypes? */
- } break;
- case iro_Load: {
- ir_node *a = get_Load_ptr(n);
- if (is_Sel(a))
- tp = get_entity_type(get_Sel_entity(a));
- else if (is_Pointer_type(compute_irn_type(a))) {
- tp = get_pointer_points_to_type(get_irn_typeinfo_type(a));
- if (is_Array_type(tp))
- tp = get_array_element_type(tp);
- } else {
- VERBOSE_UNKNOWN_TYPE(("Load %ld with typeless address. result: unknown type\n", get_irn_node_nr(n)));
- }
- } break;
- case iro_Alloc:
- tp = find_pointer_type_to(get_Alloc_type(n)); break;
- case iro_Proj:
- tp = find_type_for_Proj(n); break;
- case iro_Id:
- tp = compute_irn_type(get_Id_pred(n)); break;
- case iro_Unknown:
- tp = firm_unknown_type; break;
- case iro_Filter:
- assert(0 && "Filter not implemented"); break;
-
- /* catch special cases with fallthrough to binop/unop cases in default. */
- case iro_Sub: {
- if (mode_is_int(get_irn_mode(n)) &&
- mode_is_reference(get_irn_mode(a)) &&
- mode_is_reference(get_irn_mode(b)) ) {
- VERBOSE_UNKNOWN_TYPE(("Sub %ld ptr - ptr = int: unknown type\n", get_irn_node_nr(n)));
- tp = firm_unknown_type; break;
- }
- } /* fall through to Add. */
- case iro_Add: {
- if (mode_is_reference(get_irn_mode(n)) &&
- mode_is_reference(get_irn_mode(a)) &&
- mode_is_int(get_irn_mode(b)) ) {
- tp = tp1; break;
- }
- if (mode_is_reference(get_irn_mode(n)) &&
- mode_is_int(get_irn_mode(a)) &&
- mode_is_reference(get_irn_mode(b)) ) {
- tp = tp2; break;
- }
- goto default_code;
- } break;
- case iro_Mul: {
- if (get_irn_mode(n) != get_irn_mode(a)) {
- VERBOSE_UNKNOWN_TYPE(("Mul %ld int1 * int1 = int2: unknown type\n", get_irn_node_nr(n)));
- tp = firm_unknown_type; break;
- }
- goto default_code;
- } break;
- case iro_Mux: {
- a = get_Mux_true(n);
- b = get_Mux_false(n);
- tp1 = compute_irn_type(a);
- tp2 = compute_irn_type(b);
- if (tp1 == tp2)
- tp = tp1;
- } break;
- case iro_Psi: {
- int i, n_conds = get_Psi_n_conds(n);
- tp1 = compute_irn_type(get_Psi_default(n));
-
- for (i = 0; i < n_conds; ++i) {
- tp2 = compute_irn_type(get_Psi_val(n, i));
- if (tp2 != tp1)
- break;
- }
- if (tp1 == tp2)
- tp = tp1;
- } break;
- case iro_Bound:
- tp = compute_irn_type(get_Bound_index(n));
- break;
- case iro_Confirm:
- tp = compute_irn_type(get_Confirm_value(n));
- break;
- case iro_Conv:
- /* Conv is a unop, but changing the mode implies
- changing the type. */
- break;
-
- default:
+ ir_type *tp = firm_unknown_type;
+ ir_type *tp1 = NULL, *tp2 = NULL;
+ ir_node *a = NULL, *b = NULL;
+
+ if (is_unop(n)) {
+ a = get_unop_op(n);
+ tp1 = compute_irn_type(a);
+ }
+ if (is_binop(n)) {
+ a = get_binop_left(n);
+ b = get_binop_right(n);
+ tp1 = compute_irn_type(a);
+ tp2 = compute_irn_type(b);
+ }
+
+ switch (get_irn_opcode(n)) {
+
+ case iro_InstOf: {
+ assert(0 && "op_InstOf not supported");
+ } break;
+
+ /* has no type */
+ case iro_Return: {
+ /* Check returned type. */
+ /*
+ int i;
+ ir_type *meth_type = get_entity_type(get_irg_entity(current_ir_graph));
+ for (i = 0; i < get_method_n_ress(meth_type); i++) {
+ ir_type *res_type = get_method_res_type(meth_type, i);
+ ir_type *ana_res_type = get_irn_type(get_Return_res(n, i));
+ if (ana_res_type == firm_unknown_type) continue;
+ if (res_type != ana_res_type && "return value has wrong type") {
+ DDMN(n);
+ assert(res_type == ana_res_type && "return value has wrong type");
+ }
+ }
+ */
+ }
+ case iro_Block:
+ case iro_Start:
+ case iro_End:
+ case iro_Jmp:
+ case iro_Cond:
+ case iro_Raise:
+ case iro_Call:
+ case iro_Cmp:
+ case iro_Store:
+ case iro_Free:
+ case iro_Sync:
+ case iro_Tuple:
+ case iro_Bad:
+ case iro_NoMem:
+ case iro_Break:
+ case iro_CallBegin:
+ case iro_EndReg:
+ case iro_EndExcept:
+ break;
+
+ /* compute the type */
+ case iro_Const: tp = get_Const_type(n); break;
+ case iro_SymConst:
+ tp = get_SymConst_value_type(n); break;
+ case iro_Sel:
+ tp = find_pointer_type_to(get_entity_type(get_Sel_entity(n))); break;
+ /* asymmetric binops */
+ case iro_Shl:
+ case iro_Shr:
+ case iro_Shrs:
+ case iro_Rot:
+ tp = tp1; break;
+ case iro_Cast:
+ tp = get_Cast_type(n); break;
+ case iro_Phi: {
+ int i;
+ int n_preds = get_Phi_n_preds(n);
+
+ if (n_preds == 0)
+ break;
+
+ /* initialize this Phi */
+ set_irn_typeinfo_type(n, phi_cycle_type);
+
+ /* find a first real type */
+ for (i = 0; i < n_preds; ++i) {
+ tp1 = compute_irn_type(get_Phi_pred(n, i));
+ assert(tp1 != initial_type);
+ if ((tp1 != phi_cycle_type) && (tp1 != firm_none_type))
+ break;
+ }
+
+ /* find a second real type */
+ tp2 = tp1;
+ for (; (i < n_preds); ++i) {
+ tp2 = compute_irn_type(get_Phi_pred(n, i));
+ if ((tp2 == phi_cycle_type) || (tp2 == firm_none_type)) {
+ tp2 = tp1;
+ continue;
+ }
+ if (tp2 != tp1) break;
+ }
+
+ /* printf("Types in Phi %s and %s \n", get_type_name(tp1), get_type_name(tp2)); */
+
+ if (tp1 == tp2) { tp = tp1; break; }
+
+ DB((dbg, SET_LEVEL_2, "Phi %ld with two different types: %s, %s: unknown type.\n", get_irn_node_nr(n),
+ get_type_name(tp1), get_type_name(tp2)));
+ tp = firm_unknown_type; /* Test for supertypes? */
+ } break;
+
+ case iro_Load: {
+ ir_node *a = get_Load_ptr(n);
+ if (is_Sel(a))
+ tp = get_entity_type(get_Sel_entity(a));
+ else if (is_Pointer_type(compute_irn_type(a))) {
+ tp = get_pointer_points_to_type(get_irn_typeinfo_type(a));
+ if (is_Array_type(tp))
+ tp = get_array_element_type(tp);
+ } else {
+ DB((dbg, SET_LEVEL_1, "Load %ld with typeless address. result: unknown type\n", get_irn_node_nr(n)));
+ }
+ } break;
+ case iro_Alloc:
+ tp = find_pointer_type_to(get_Alloc_type(n)); break;
+ case iro_Proj:
+ tp = find_type_for_Proj(n); break;
+ case iro_Id:
+ tp = compute_irn_type(get_Id_pred(n)); break;
+ case iro_Unknown:
+ tp = firm_unknown_type; break;
+ case iro_Filter:
+ assert(0 && "Filter not implemented"); break;
+
+ /* catch special cases with fallthrough to binop/unop cases in default. */
+ case iro_Sub: {
+ if (mode_is_int(get_irn_mode(n)) &&
+ mode_is_reference(get_irn_mode(a)) &&
+ mode_is_reference(get_irn_mode(b)) ) {
+ DB((dbg, SET_LEVEL_1, "Sub %ld ptr - ptr = int: unknown type\n", get_irn_node_nr(n)));
+ tp = firm_unknown_type; break;
+ }
+ } /* fall through to Add. */
+ case iro_Add: {
+ if (mode_is_reference(get_irn_mode(n)) &&
+ mode_is_reference(get_irn_mode(a)) &&
+ mode_is_int(get_irn_mode(b)) ) {
+ tp = tp1; break;
+ }
+ if (mode_is_reference(get_irn_mode(n)) &&
+ mode_is_int(get_irn_mode(a)) &&
+ mode_is_reference(get_irn_mode(b)) ) {
+ tp = tp2; break;
+ }
+ goto default_code;
+ } break;
+
+ case iro_Mul: {
+ if (get_irn_mode(n) != get_irn_mode(a)) {
+ DB((dbg, SET_LEVEL_1, "Mul %ld int1 * int1 = int2: unknown type\n", get_irn_node_nr(n)));
+ tp = firm_unknown_type; break;
+ }
+ goto default_code;
+ } break;
+
+ case iro_Mux: {
+ a = get_Mux_true(n);
+ b = get_Mux_false(n);
+ tp1 = compute_irn_type(a);
+ tp2 = compute_irn_type(b);
+ if (tp1 == tp2)
+ tp = tp1;
+ } break;
+
+ case iro_Psi: {
+ int i, n_conds = get_Psi_n_conds(n);
+ tp1 = compute_irn_type(get_Psi_default(n));
+
+ for (i = 0; i < n_conds; ++i) {
+ tp2 = compute_irn_type(get_Psi_val(n, i));
+ if (tp2 != tp1)
+ break;
+ }
+ if (tp1 == tp2)
+ tp = tp1;
+ } break;
+
+ case iro_Bound:
+ tp = compute_irn_type(get_Bound_index(n));
+ break;
+ case iro_Confirm:
+ tp = compute_irn_type(get_Confirm_value(n));
+ break;
+ case iro_Conv:
+ /* Conv is a unop, but changing the mode implies
+ changing the type. */
+ break;
+
+ default:
default_code: {
- if (is_unop(n)) {
- /* It's not proper to walk past a Conv, so this case is handled above. */
- tp = tp1;
- break;
- }
-
- if (is_binop(n)) {
- if (tp1 == tp2) {
- tp = tp1;
- break;
- }
- if((tp1 == phi_cycle_type) || (tp2 == phi_cycle_type)) {
- tp = phi_cycle_type;
- break;
- }
- if (get_firm_verbosity() > 55) {
- VERBOSE_UNKNOWN_TYPE(("Binop %ld with two different types: %s, %s: unknown type \n", get_irn_node_nr(n),
- get_type_name(tp1), get_type_name(tp2)));
- }
- tp = firm_unknown_type;
- break;
- }
-
- panic(" not implemented: %+F", n);
- } break; /* default */
- } /* end switch */
-
- return tp;
+ if (is_unop(n)) {
+ /* It's not proper to walk past a Conv, so this case is handled above. */
+ tp = tp1;
+ break;
+ }
+
+ if (is_binop(n)) {
+ if (tp1 == tp2) {
+ tp = tp1;
+ break;
+ }
+ if((tp1 == phi_cycle_type) || (tp2 == phi_cycle_type)) {
+ tp = phi_cycle_type;
+ break;
+ }
+ DB((dbg, SET_LEVEL_2, "Binop %ld with two different types: %s, %s: unknown type \n", get_irn_node_nr(n),
+ get_type_name(tp1), get_type_name(tp2)));
+ tp = firm_unknown_type;
+ break;
+ }
+
+ panic(" not implemented: %+F", n);
+ } break; /* default */
+ } /* end switch */
+
+ return tp;
}
/** Compute the type of an IR node. */
static ir_type *compute_irn_type(ir_node *n) {
- ir_type *tp = get_irn_typeinfo_type(n);
+ ir_type *tp = get_irn_typeinfo_type(n);
- if (tp == initial_type) {
- tp = find_type_for_node(n);
- set_irn_typeinfo_type(n, tp);
- }
-
- return tp;
+ if (tp == initial_type) {
+ tp = find_type_for_node(n);
+ set_irn_typeinfo_type(n, tp);
+ }
+ return tp;
}
/**
* nodes are already computed.
*/
static void compute_type(ir_node *n, void *env) {
-
- ir_type *tp = get_irn_typeinfo_type(n);
- (void) env;
- if (tp == phi_cycle_type) {
- /* printf(" recomputing for phi_cycle_type "); DDMN(n); */
- set_irn_typeinfo_type(n, initial_type);
- }
- compute_irn_type(n);
+ ir_type *tp = get_irn_typeinfo_type(n);
+ (void) env;
+ if (tp == phi_cycle_type) {
+ /* printf(" recomputing for phi_cycle_type "); DDMN(n); */
+ set_irn_typeinfo_type(n, initial_type);
+ }
+ compute_irn_type(n);
}
/**
* Compute the types for all nodes of a graph.
*/
static void analyse_irg (ir_graph *irg) {
- set_irg_typeinfo_state(irg, ir_typeinfo_consistent);
- irg_walk_graph(irg, NULL, compute_type, NULL);
+ set_irg_typeinfo_state(irg, ir_typeinfo_consistent);
+ irg_walk_graph(irg, NULL, compute_type, NULL);
}
/**
* computing pointer types for all class and struct types.
*/
static void init_irsimpletype(void) {
- init_irtypeinfo();
- if (!phi_cycle_type)
- phi_cycle_type = new_type_class(new_id_from_str("phi_cycle_type"));
- precompute_pointer_types();
+ init_irtypeinfo();
+ if (!phi_cycle_type)
+ phi_cycle_type = new_type_class(new_id_from_str("phi_cycle_type"));
+ precompute_pointer_types();
}
/* Computes type information for each node in all ir graphs. */
void simple_analyse_types(void) {
- int i;
- init_irsimpletype();
- for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
- ir_graph *irg = get_irp_irg(i);
- analyse_irg(irg);
- }
- set_irp_typeinfo_state(ir_typeinfo_consistent);
+ int i;
+ FIRM_DBG_REGISTER(dbg, "firm.ana.simpletype");
+
+ init_irsimpletype();
+ for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
+ ir_graph *irg = get_irp_irg(i);
+ analyse_irg(irg);
+ }
+ set_irp_typeinfo_state(ir_typeinfo_consistent);
}
void free_simple_type_information(void) {
- free_irtypeinfo();
+ free_irtypeinfo();
- if (phi_cycle_type) {
- free_type(phi_cycle_type);
- phi_cycle_type = NULL;
- }
- set_irp_typeinfo_state(ir_typeinfo_none);
+ if (phi_cycle_type) {
+ free_type(phi_cycle_type);
+ phi_cycle_type = NULL;
+ }
+ set_irp_typeinfo_state(ir_typeinfo_none);
}
#include "funccall_t.h"
#include "irhooks.h"
#include "iredges_t.h"
+#include "tropt.h"
#include "debugger.h"
/* returns the firm root */
firm_init_entity();
/* allocate a hash table. */
init_type_identify(def_params.ti_if);
+ /* class cast optimization */
+ firm_init_class_casts_opt();
/* Init architecture dependent optimizations. */
arch_dep_init(arch_dep_default_factory);
/* verbose is always off on default */
optimization_state_t libFIRM_verb = 0;
-/** The Firm verbosity level */
-int firm_verbosity_level;
-
/* an external flag can be set and get from outside */
#define E_FLAG(name, value, def) \
void set_opt_##name(int flag) { \
set_opt_control_flow_strong_simplification(value);
}
-void set_firm_verbosity (int value) {
- firm_verbosity_level = value;
-}
-
-int (get_firm_verbosity) (void) {
- return _get_firm_verbosity();
-}
-
/* Save the current optimization state. */
void save_optimization_state(optimization_state_t *state) {
*state = libFIRM_opt;
extern optimization_state_t libFIRM_opt, libFIRM_running, libFIRM_verb;
extern firm_verification_t opt_do_node_verification;
-extern int firm_verbosity_level;
-
/** initialises the flags */
void firm_init_flags(void);
#define E_FLAG(name, value, def) \
static INLINE int _get_opt_##name(void) { \
return libFIRM_opt & irf_##name; \
-} \
-static INLINE int get_opt_##name##_verbose(void) { \
- return libFIRM_verb & irf_##name; \
}
/* generate the getter functions for internal access */
#define I_FLAG(name, value, def) \
static INLINE int get_opt_##name(void) { \
return libFIRM_opt & irf_##name; \
-} \
-static INLINE int get_opt_##name##_verbose(void) { \
- return libFIRM_verb & irf_##name; \
}
/* generate getter and setter functions for running flags */
#undef E_FLAG
#undef R_FLAG
-static INLINE int _get_firm_verbosity(void) {
- return firm_verbosity_level;
-}
-
static INLINE int _get_optimize(void) {
return get_opt_optimize();
}
#define get_optimize() _get_optimize()
#define get_opt_cse() _get_opt_cse()
-#define get_firm_verbosity() _get_firm_verbosity()
#define get_opt_dyn_meth_dispatch() _get_opt_dyn_meth_dispatch()
#define get_opt_optimize_class_casts() _get_opt_optimize_class_casts()
#define get_opt_suppress_downcast_optimization() _get_opt_suppress_downcast_optimization()
#include "irgmod.h"
#include "irflag_t.h"
#include "xmalloc.h"
+#include "debug.h"
+#include "tropt.h"
+
+DEBUG_ONLY(static firm_dbg_module_t *dbg;)
/* - statistics ---------------------------------------------- */
* Uses and updates trouts if available.
*/
static ir_type *default_gen_pointer_type_to(ir_type *tp) {
- ir_type *res = NULL;
- if (get_trouts_state() == outs_consistent) {
- if (get_type_n_pointertypes_to(tp) > 0) {
- res = get_type_pointertype_to(tp, 0);
- } else {
- ir_mode *mode = is_Method_type(tp) ? mode_P_code : mode_P_data;
-
- res = new_type_pointer(mangle_u(get_type_ident(tp), ptr_type_suffix), tp, mode);
- /* Update trout for pointer types, so we can use it in next call. */
- add_type_pointertype_to(tp, res);
- }
- }
- else {
- res = find_pointer_type_to_type(tp);
- if (res == firm_unknown_type)
- res = new_type_pointer(mangle_u(get_type_ident(tp), ptr_type_suffix), tp, mode_P_data);
- }
-
- return res;
+ ir_type *res = NULL;
+ if (get_trouts_state() == outs_consistent) {
+ if (get_type_n_pointertypes_to(tp) > 0) {
+ res = get_type_pointertype_to(tp, 0);
+ } else {
+ ir_mode *mode = is_Method_type(tp) ? mode_P_code : mode_P_data;
+
+ res = new_type_pointer(mangle_u(get_type_ident(tp), ptr_type_suffix), tp, mode);
+ /* Update trout for pointer types, so we can use it in next call. */
+ add_type_pointertype_to(tp, res);
+ }
+ }
+ else {
+ res = find_pointer_type_to_type(tp);
+ if (res == firm_unknown_type)
+ res = new_type_pointer(mangle_u(get_type_ident(tp), ptr_type_suffix), tp, mode_P_data);
+ }
+
+ return res;
}
/** Return a type that is a depth times pointer to type. */
static ir_type *pointerize_type(ir_type *tp, int depth) {
- for (; depth > 0; --depth) {
- tp = gen_pointer_type_to(tp);
- }
- return tp;
+ for (; depth > 0; --depth) {
+ tp = gen_pointer_type_to(tp);
+ }
+ return tp;
}
static ir_node *normalize_values_type(ir_type *totype, ir_node *pred) {
- ir_type *fromtype = get_irn_typeinfo_type(pred);
- ir_node *new_cast = pred;
- int ref_depth = 0;
-
- if (totype == fromtype) return pred; /* Case for optimization! */
-
- while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
- totype = get_pointer_points_to_type(totype);
- fromtype = get_pointer_points_to_type(fromtype);
- ref_depth++;
- }
-
- if (!is_Class_type(totype)) return pred;
- if (!is_Class_type(fromtype)) return pred;
-
- if ((get_class_supertype_index(totype, fromtype) != -1) ||
- (get_class_supertype_index(fromtype, totype) != -1) ) {
- /* It's just what we want ... */
- return pred;
- }
-
- set_cur_block(get_nodes_block(pred));
-
- if (is_SubClass_of(totype, fromtype)) {
- /* downcast */
- while (get_class_subtype_index(fromtype, totype) == -1) {
- /* Insert a cast to a subtype of fromtype. */
- ir_type *new_type = NULL;
- ir_node *new_cast;
- int i, n_subtypes = get_class_n_subtypes(fromtype);
- for (i = 0; i < n_subtypes && !new_type; ++i) {
- ir_type *new_sub = get_class_subtype(fromtype, i);
- if (is_SuperClass_of(new_sub, totype))
- new_type = new_sub;
- }
- assert(new_type);
- fromtype = new_type;
- new_type = pointerize_type(new_type, ref_depth);
- new_cast = new_Cast(pred, new_type);
- pred = new_cast;
- n_casts_normalized ++;
- set_irn_typeinfo_type(new_cast, new_type); /* keep type information up to date. */
- if (get_trouts_state() != outs_none) add_type_cast(new_type, new_cast);
- }
- }
- else {
- assert(is_SuperClass_of(totype, fromtype));
- /* upcast */
- while (get_class_supertype_index(fromtype, totype) == -1) {
- /* Insert a cast to a supertype of fromtype. */
- ir_type *new_type = NULL;
- int i, n_supertypes = get_class_n_supertypes(fromtype);
- for (i = 0; i < n_supertypes && !new_type; ++i) {
- ir_type *new_super = get_class_supertype(fromtype, i);
- if (is_SubClass_of(new_super, totype))
- new_type = new_super;
- }
- assert(new_type);
- fromtype = new_type;
- new_type = pointerize_type(new_type, ref_depth);
- new_cast = new_Cast(pred, new_type);
- pred = new_cast;
- n_casts_normalized ++;
- set_irn_typeinfo_type(new_cast, new_type); /* keep type information up to date. */
- if (get_trouts_state() != outs_none) add_type_cast(new_type, new_cast);
- }
- }
- return new_cast;
+ ir_type *fromtype = get_irn_typeinfo_type(pred);
+ ir_node *new_cast = pred;
+ int ref_depth = 0;
+
+ if (totype == fromtype) return pred; /* Case for optimization! */
+
+ while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
+ totype = get_pointer_points_to_type(totype);
+ fromtype = get_pointer_points_to_type(fromtype);
+ ref_depth++;
+ }
+
+ if (!is_Class_type(totype)) return pred;
+ if (!is_Class_type(fromtype)) return pred;
+
+ if ((get_class_supertype_index(totype, fromtype) != -1) ||
+ (get_class_supertype_index(fromtype, totype) != -1) ) {
+ /* It's just what we want ... */
+ return pred;
+ }
+
+ set_cur_block(get_nodes_block(pred));
+
+ if (is_SubClass_of(totype, fromtype)) {
+ /* downcast */
+ while (get_class_subtype_index(fromtype, totype) == -1) {
+ /* Insert a cast to a subtype of fromtype. */
+ ir_type *new_type = NULL;
+ ir_node *new_cast;
+ int i, n_subtypes = get_class_n_subtypes(fromtype);
+ for (i = 0; i < n_subtypes && !new_type; ++i) {
+ ir_type *new_sub = get_class_subtype(fromtype, i);
+ if (is_SuperClass_of(new_sub, totype))
+ new_type = new_sub;
+ }
+ assert(new_type);
+ fromtype = new_type;
+ new_type = pointerize_type(new_type, ref_depth);
+ new_cast = new_Cast(pred, new_type);
+ pred = new_cast;
+ n_casts_normalized ++;
+ set_irn_typeinfo_type(new_cast, new_type); /* keep type information up to date. */
+ if (get_trouts_state() != outs_none) add_type_cast(new_type, new_cast);
+ }
+ } else {
+ assert(is_SuperClass_of(totype, fromtype));
+ /* upcast */
+ while (get_class_supertype_index(fromtype, totype) == -1) {
+ /* Insert a cast to a supertype of fromtype. */
+ ir_type *new_type = NULL;
+ int i, n_supertypes = get_class_n_supertypes(fromtype);
+ for (i = 0; i < n_supertypes && !new_type; ++i) {
+ ir_type *new_super = get_class_supertype(fromtype, i);
+ if (is_SubClass_of(new_super, totype))
+ new_type = new_super;
+ }
+ assert(new_type);
+ fromtype = new_type;
+ new_type = pointerize_type(new_type, ref_depth);
+ new_cast = new_Cast(pred, new_type);
+ pred = new_cast;
+ n_casts_normalized ++;
+ set_irn_typeinfo_type(new_cast, new_type); /* keep type information up to date. */
+ if (get_trouts_state() != outs_none) add_type_cast(new_type, new_cast);
+ }
+ }
+ return new_cast;
}
-
+/**
+ * Post-Walker.
+ */
static void normalize_irn_class_cast(ir_node *n, void *env) {
- ir_node *res;
- (void) env;
- if (get_irn_op(n) == op_Cast) {
- ir_node *pred = get_Cast_op(n);
- ir_type *totype = get_Cast_type(n);
- res = normalize_values_type(totype, pred);
- set_Cast_op(n, res);
- } else if (get_irn_op(n) == op_Call) {
- int i, n_params = get_Call_n_params(n);
- ir_type *tp = get_Call_type(n);
- for (i = 0; i < n_params; ++i) {
- res = normalize_values_type(get_method_param_type(tp, i), get_Call_param(n, i));
- set_Call_param(n, i, res);
- }
- }
+ ir_node *res;
+ (void) env;
+ if (is_Cast(n)) {
+ ir_node *pred = get_Cast_op(n);
+ ir_type *totype = get_Cast_type(n);
+ res = normalize_values_type(totype, pred);
+ set_Cast_op(n, res);
+ } else if (is_Call(n)) {
+ int i, n_params = get_Call_n_params(n);
+ ir_type *tp = get_Call_type(n);
+ for (i = 0; i < n_params; ++i) {
+ res = normalize_values_type(get_method_param_type(tp, i), get_Call_param(n, i));
+ set_Call_param(n, i, res);
+ }
+ }
}
static void pure_normalize_irg_class_casts(ir_graph *irg) {
- assert(get_irg_class_cast_state(irg) != ir_class_casts_any &&
- "Cannot normalize irregular casts.");
- if (get_irg_class_cast_state(irg) == ir_class_casts_normalized) {
- verify_irg_class_cast_state(irg);
- return;
- }
-
- irg_walk_graph(irg, NULL, normalize_irn_class_cast, NULL);
- set_irg_class_cast_state(irg, ir_class_casts_normalized);
+ assert(get_irg_class_cast_state(irg) != ir_class_casts_any &&
+ "Cannot normalize irregular casts.");
+ if (get_irg_class_cast_state(irg) == ir_class_casts_normalized) {
+ verify_irg_class_cast_state(irg);
+ return;
+ }
+
+ irg_walk_graph(irg, NULL, normalize_irn_class_cast, NULL);
+ set_irg_class_cast_state(irg, ir_class_casts_normalized);
}
void normalize_irg_class_casts(ir_graph *irg, gen_pointer_type_to_func gppt_fct) {
- assert(get_irp_typeinfo_state() == ir_typeinfo_consistent);
+ assert(get_irp_typeinfo_state() == ir_typeinfo_consistent);
- if (gppt_fct) gen_pointer_type_to = gppt_fct;
+ if (gppt_fct) gen_pointer_type_to = gppt_fct;
- if (!ptr_type_suffix)
- ptr_type_suffix = new_id_from_str(PTR_TYPE_SUFFIX);
+ if (!ptr_type_suffix)
+ ptr_type_suffix = new_id_from_str(PTR_TYPE_SUFFIX);
- pure_normalize_irg_class_casts(irg);
+ pure_normalize_irg_class_casts(irg);
- gen_pointer_type_to = default_gen_pointer_type_to;
+ gen_pointer_type_to = default_gen_pointer_type_to;
}
void normalize_irp_class_casts(gen_pointer_type_to_func gppt_fct) {
- int i, n_irgs = get_irp_n_irgs();
- if (gppt_fct) gen_pointer_type_to = gppt_fct;
+ int i, n_irgs = get_irp_n_irgs();
+ if (gppt_fct) gen_pointer_type_to = gppt_fct;
- if (get_irp_typeinfo_state() != ir_typeinfo_consistent)
- simple_analyse_types();
+ if (get_irp_typeinfo_state() != ir_typeinfo_consistent)
+ simple_analyse_types();
- for (i = 0; i < n_irgs; ++i) {
- pure_normalize_irg_class_casts(get_irp_irg(i));
- }
+ for (i = 0; i < n_irgs; ++i) {
+ pure_normalize_irg_class_casts(get_irp_irg(i));
+ }
- set_irp_class_cast_state(ir_class_casts_normalized);
- gen_pointer_type_to = default_gen_pointer_type_to;
+ set_irp_class_cast_state(ir_class_casts_normalized);
+ gen_pointer_type_to = default_gen_pointer_type_to;
- if (get_opt_optimize_class_casts_verbose() && get_firm_verbosity()) {
- printf(" Cast normalization: %d Casts inserted.\n", n_casts_normalized);
- }
+ DB((dbg, SET_LEVEL_1, " Cast normalization: %d Casts inserted.\n", n_casts_normalized));
}
* if possible.
*/
static void cancel_out_casts(ir_node *cast) {
- ir_node *orig, *pred = get_Cast_op(cast);
- ir_type *tp_cast, *tp_pred, *tp_orig;
- int ref_depth = 0;
-
- if (get_irn_op(pred) != op_Cast) return;
- orig = get_Cast_op(pred);
-
- tp_cast = get_Cast_type(cast);
- tp_pred = get_Cast_type(pred);
- tp_orig = get_irn_typeinfo_type(orig);
-
- while (is_Pointer_type(tp_cast) &&
- is_Pointer_type(tp_pred) &&
- is_Pointer_type(tp_orig) ) {
- tp_cast = get_pointer_points_to_type(tp_cast);
- tp_pred = get_pointer_points_to_type(tp_pred);
- tp_orig = get_pointer_points_to_type(tp_orig);
- ref_depth++;
- }
-
- if (!is_Class_type(tp_cast)) return;
- if (!is_Class_type(tp_pred)) return;
- if (!is_Class_type(tp_orig)) return;
-
- if (is_SubClass_of(tp_pred, tp_cast) && get_opt_suppress_downcast_optimization())
- return;
-
- if (tp_cast == tp_orig) {
- exchange(cast, orig);
- n_casts_removed += 2;
- return;
- }
-
- if (!(is_SubClass_of (tp_cast, tp_orig) || is_SubClass_of (tp_orig, tp_cast))) {
- /* Avoid (B2)(A)(new B1()) --> (B2)(new B1())
- * if B1 =!> B2 and B2 =!> B1
- */
- return;
- }
-
- if ((is_SubClass_of (tp_cast, tp_pred) && is_SuperClass_of(tp_pred, tp_orig)) ||
- (is_SuperClass_of(tp_cast, tp_pred) && is_SubClass_of (tp_pred, tp_orig)) ) {
- /* Cast --> Pred --> Orig */
- set_Cast_op (cast, orig);
- n_casts_removed ++;
- }
+ ir_node *orig, *pred = get_Cast_op(cast);
+ ir_type *tp_cast, *tp_pred, *tp_orig;
+ int ref_depth = 0;
+
+ if (get_irn_op(pred) != op_Cast) return;
+ orig = get_Cast_op(pred);
+
+ tp_cast = get_Cast_type(cast);
+ tp_pred = get_Cast_type(pred);
+ tp_orig = get_irn_typeinfo_type(orig);
+
+ while (is_Pointer_type(tp_cast) &&
+ is_Pointer_type(tp_pred) &&
+ is_Pointer_type(tp_orig) ) {
+ tp_cast = get_pointer_points_to_type(tp_cast);
+ tp_pred = get_pointer_points_to_type(tp_pred);
+ tp_orig = get_pointer_points_to_type(tp_orig);
+ ref_depth++;
+ }
+
+ if (!is_Class_type(tp_cast)) return;
+ if (!is_Class_type(tp_pred)) return;
+ if (!is_Class_type(tp_orig)) return;
+
+ if (is_SubClass_of(tp_pred, tp_cast) && get_opt_suppress_downcast_optimization())
+ return;
+
+ if (tp_cast == tp_orig) {
+ exchange(cast, orig);
+ n_casts_removed += 2;
+ return;
+ }
+
+ if (!(is_SubClass_of (tp_cast, tp_orig) || is_SubClass_of (tp_orig, tp_cast))) {
+ /* Avoid (B2)(A)(new B1()) --> (B2)(new B1())
+ * if B1 =!> B2 and B2 =!> B1
+ */
+ return;
+ }
+
+ if ((is_SubClass_of (tp_cast, tp_pred) && is_SuperClass_of(tp_pred, tp_orig)) ||
+ (is_SuperClass_of(tp_cast, tp_pred) && is_SubClass_of (tp_pred, tp_orig))) {
+ /* Cast --> Pred --> Orig */
+ set_Cast_op(cast, orig);
+ n_casts_removed ++;
+ }
}
static void concretize_selected_entity(ir_node *sel) {
- ir_node *cast, *ptr = get_Sel_ptr(sel);
- ir_type *orig_tp, *cast_tp;
- ir_entity *new_ent, *sel_ent;
+ ir_node *cast, *ptr = get_Sel_ptr(sel);
+ ir_type *orig_tp, *cast_tp;
+ ir_entity *new_ent, *sel_ent;
- sel_ent = get_Sel_entity(sel);
- cast = get_Sel_ptr(sel);
+ sel_ent = get_Sel_entity(sel);
+ cast = get_Sel_ptr(sel);
- while (get_irn_op(cast) == op_Cast) {
- cast_tp = get_Cast_type(cast);
- ptr = get_Cast_op(cast);
- orig_tp = get_irn_typeinfo_type(ptr);
+ while (get_irn_op(cast) == op_Cast) {
+ cast_tp = get_Cast_type(cast);
+ ptr = get_Cast_op(cast);
+ orig_tp = get_irn_typeinfo_type(ptr);
- if (!is_Pointer_type(orig_tp)) return;
- if (!is_Pointer_type(cast_tp)) return;
- orig_tp = get_pointer_points_to_type(orig_tp);
- cast_tp = get_pointer_points_to_type(cast_tp);
- if (!is_Class_type(orig_tp)) return;
- if (!is_Class_type(cast_tp)) return;
+ if (!is_Pointer_type(orig_tp)) return;
+ if (!is_Pointer_type(cast_tp)) return;
+ orig_tp = get_pointer_points_to_type(orig_tp);
+ cast_tp = get_pointer_points_to_type(cast_tp);
+ if (!is_Class_type(orig_tp)) return;
+ if (!is_Class_type(cast_tp)) return;
- /* We only want to concretize, but not generalize. */
- if (!is_SuperClass_of(cast_tp, orig_tp)) return;
+ /* We only want to concretize, but not generalize. */
+ if (!is_SuperClass_of(cast_tp, orig_tp)) return;
- /* Hmm, we are not properly typed. */
- if (get_class_member_index(cast_tp, sel_ent) == -1) return;
+ /* Hmm, we are not properly typed. */
+ if (get_class_member_index(cast_tp, sel_ent) == -1) return;
- new_ent = resolve_ent_polymorphy(orig_tp, sel_ent);
+ new_ent = resolve_ent_polymorphy(orig_tp, sel_ent);
- /* New ent must be member of orig_tp. */
- if (get_class_member_index(orig_tp, new_ent) == -1) return;
+ /* New ent must be member of orig_tp. */
+ if (get_class_member_index(orig_tp, new_ent) == -1) return;
- set_Sel_entity(sel, new_ent);
- set_Sel_ptr(sel, ptr);
- n_sels_concretized++;
+ set_Sel_entity(sel, new_ent);
+ set_Sel_ptr(sel, ptr);
+ n_sels_concretized++;
- sel_ent = new_ent;
- cast = ptr;
- }
+ sel_ent = new_ent;
+ cast = ptr;
+ }
}
static void concretize_Phi_type(ir_node *phi) {
- int i, n_preds = get_Phi_n_preds(phi);
- ir_node **pred = alloca(n_preds * sizeof(ir_node *));
- ir_node *new;
- ir_type *totype, *fromtype;
-
- if (n_preds == 0) return;
- pred[0] = get_Phi_pred(phi, 0);
-
- if (get_irn_op(pred[0]) != op_Cast) return;
-
- if (!is_Cast_upcast(pred[0])) return;
-
- fromtype = get_irn_typeinfo_type(get_Cast_op(pred[0]));
- totype = get_Cast_type(pred[0]);
-
- pred[0] = get_Cast_op(pred[0]);
- for (i = 1; i < n_preds; ++i) {
- pred[i] = get_Phi_pred(phi, i);
- if (get_irn_op(pred[i]) != op_Cast) return;
- if (get_irn_typeinfo_type(get_Cast_op(pred[i])) != fromtype) return;
- pred[i] = get_Cast_op(pred[i]);
- }
-
- /* Transform Phi */
- set_cur_block(get_nodes_block(phi));
- new = new_Phi(n_preds, pred, get_irn_mode(phi));
- set_irn_typeinfo_type(new, fromtype);
- new = new_Cast(new, totype);
- set_irn_typeinfo_type(new, totype);
- exchange(phi, new);
+ int i, n_preds = get_Phi_n_preds(phi);
+ ir_node **pred = alloca(n_preds * sizeof(ir_node *));
+ ir_node *nn;
+ ir_type *totype, *fromtype;
+
+ if (n_preds == 0) return;
+ pred[0] = get_Phi_pred(phi, 0);
+
+ if (get_irn_op(pred[0]) != op_Cast) return;
+
+ if (!is_Cast_upcast(pred[0])) return;
+
+ fromtype = get_irn_typeinfo_type(get_Cast_op(pred[0]));
+ totype = get_Cast_type(pred[0]);
+
+ pred[0] = get_Cast_op(pred[0]);
+ for (i = 1; i < n_preds; ++i) {
+ pred[i] = get_Phi_pred(phi, i);
+ if (get_irn_op(pred[i]) != op_Cast) return;
+ if (get_irn_typeinfo_type(get_Cast_op(pred[i])) != fromtype) return;
+ pred[i] = get_Cast_op(pred[i]);
+ }
+
+ /* Transform Phi */
+ set_cur_block(get_nodes_block(phi));
+ nn = new_Phi(n_preds, pred, get_irn_mode(phi));
+ set_irn_typeinfo_type(nn, fromtype);
+ nn = new_Cast(nn, totype);
+ set_irn_typeinfo_type(nn, totype);
+ exchange(phi, nn);
}
void remove_Cmp_Null_cast(ir_node *cmp) {
- ir_node *cast, *null, *new_null;
- int cast_pos, null_pos;
- ir_type *fromtype;
-
- cast = get_Cmp_left(cmp);
- cast_pos = 0;
- if (get_irn_op(cast) != op_Cast) {
- null = cast;
- null_pos = cast_pos;
- cast = get_Cmp_right(cmp);
- cast_pos = 1;
- if (get_irn_op(cast) != op_Cast) return;
- } else {
- null = get_Cmp_right(cmp);
- null_pos = 1;
- }
-
- if (get_irn_op(null) != op_Const) return;
- if (!mode_is_reference(get_irn_mode(null))) return;
- if (get_Const_tarval(null) != get_mode_null(get_irn_mode(null))) return;
-
- /* Transform Cmp */
- set_irn_n(cmp, cast_pos, get_Cast_op(cast));
- fromtype = get_irn_typeinfo_type(get_Cast_op(cast));
- new_null = new_Const_type(get_Const_tarval(null), fromtype);
- set_irn_typeinfo_type(new_null, fromtype);
- set_irn_n(cmp, null_pos, new_null);
- n_casts_removed ++;
+ ir_node *cast, *null, *new_null;
+ int cast_pos, null_pos;
+ ir_type *fromtype;
+
+ cast = get_Cmp_left(cmp);
+ cast_pos = 0;
+ if (!is_Cast(cast)) {
+ null = cast;
+ null_pos = cast_pos;
+ cast = get_Cmp_right(cmp);
+ cast_pos = 1;
+ if (!is_Cast(cast)) return;
+ } else {
+ null = get_Cmp_right(cmp);
+ null_pos = 1;
+ }
+
+ if (! is_Const(null)) return;
+ if (!mode_is_reference(get_irn_mode(null))) return;
+ if (get_Const_tarval(null) != get_mode_null(get_irn_mode(null))) return;
+
+ /* Transform Cmp */
+ set_irn_n(cmp, cast_pos, get_Cast_op(cast));
+ fromtype = get_irn_typeinfo_type(get_Cast_op(cast));
+ new_null = new_Const_type(get_Const_tarval(null), fromtype);
+ set_irn_typeinfo_type(new_null, fromtype);
+ set_irn_n(cmp, null_pos, new_null);
+ n_casts_removed ++;
}
/**
* Post-Walker:
*/
static void irn_optimize_class_cast(ir_node *n, void *env) {
- (void) env;
- if (get_irn_op(n) == op_Cast)
- cancel_out_casts(n);
- else if (get_irn_op(n) == op_Sel)
- concretize_selected_entity(n);
- else if (get_irn_op(n) == op_Phi)
- concretize_Phi_type(n);
- else if (get_irn_op(n) == op_Cmp)
- remove_Cmp_Null_cast(n);
+ (void) env;
+ if (is_Cast(n))
+ cancel_out_casts(n);
+ else if (is_Sel(n))
+ concretize_selected_entity(n);
+ else if (is_Phi(n))
+ concretize_Phi_type(n);
+ else if (is_Cmp(n))
+ remove_Cmp_Null_cast(n);
}
void optimize_class_casts(void) {
- int i, n_irgs = get_irp_n_irgs();
+ int i, n_irgs = get_irp_n_irgs();
+
+ if (get_irp_typeinfo_state() != ir_typeinfo_consistent)
+ simple_analyse_types();
- if (get_irp_typeinfo_state() != ir_typeinfo_consistent)
- simple_analyse_types();
+ all_irg_walk(NULL, irn_optimize_class_cast, NULL);
- all_irg_walk(NULL, irn_optimize_class_cast, NULL);
+ set_trouts_inconsistent();
+ for (i = 0; i < n_irgs; ++i)
+ set_irg_outs_inconsistent(get_irp_irg(i));
- set_trouts_inconsistent();
- for (i = 0; i < n_irgs; ++i)
- set_irg_outs_inconsistent(get_irp_irg(i));
+ DB((dbg, SET_LEVEL_1, " Cast optimization: %d Casts removed, %d Sels concretized.\n",
+ n_casts_removed, n_sels_concretized));
+}
- if (get_opt_optimize_class_casts_verbose() && get_firm_verbosity()) {
- printf(" Cast optimization: %d Casts removed, %d Sels concretized.\n",
- n_casts_removed, n_sels_concretized);
- }
+void firm_init_class_casts_opt(void) {
+ FIRM_DBG_REGISTER(dbg, "firm.opt.tropt");
}
--- /dev/null
+/*
+ * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
+ *
+ * This file is part of libFirm.
+ *
+ * This file may be distributed and/or modified under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation and appearing in the file LICENSE.GPL included in the
+ * packaging of this file.
+ *
+ * Licensees holding valid libFirm Professional Edition licenses may use
+ * this file in accordance with the libFirm Commercial License.
+ * Agreement provided with the Software.
+ *
+ * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
+ * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
+/**
+ * @file
+ * @brief Perform optimizations of the type representation.
+ * @version $Id: $
+ */
+#ifndef FIRM_OPT_TROPT_H
+#define FIRM_OPT_TROPT_H
+
+void firm_init_class_casts_opt(void);
+
+#endif