+static int default_recursion_weight = 5;
+
+/* The final evaluation of a node. In this function we can
+ adapt the heuristic. Combine execution frequency with
+ recursion depth.
+ @@@ the second version of the heuristic.
+
+ Return 0 if the node is neither in a loop nor in a recursion. */
+double get_irn_final_cost(ir_node *n) {
+ double cost_loop = get_irn_exec_freq(n);
+ double cost_method = get_irg_method_execution_frequency(get_irn_irg(n));
+ int rec_depth = get_irn_recursion_depth(n);
+ double cost_rec = 0;
+
+#if 0
+ if (get_irn_recursion_depth(n) == 0 &&
+ get_irn_loop_depth(n) == 0 &&
+ get_irg_method_loop_depth(get_irn_irg(n)) == 0)
+ return 0;
+#else
+ if (get_weighted_loop_depth(n) == 0) return 0;
+#endif
+
+ if (rec_depth) cost_rec = pow(default_recursion_weight, rec_depth);
+ return cost_loop*(cost_method + cost_rec);
+}
+
+double get_type_estimated_n_instances(ir_type *tp) {
+ int i, n_allocs = get_type_n_allocs(tp);
+ double n_instances = 0;
+ for (i = 0; i < n_allocs; ++i) {
+ ir_node *alloc = get_type_alloc(tp, i);
+ n_instances += get_irn_final_cost(alloc);
+ }
+ return n_instances;
+}
+
+double get_type_estimated_mem_consumption_bytes(ir_type *tp) {
+ (void) tp;
+ assert(0);
+ return 0.0;
+}
+
+int get_type_estimated_n_fields(ir_type *tp) {
+ int s = 0;
+ switch(get_type_tpop_code(tp)) {
+
+ case tpo_primitive:
+ case tpo_pointer:
+ case tpo_enumeration:
+ s = 1;
+ break;
+
+ case tpo_class:
+ s = 1; /* dispatch pointer */
+ /* fall through */
+ case tpo_struct: {
+ int i, n_mem = get_compound_n_members(tp);
+ for (i = 0; i < n_mem; ++i) {
+ ir_entity *mem = get_compound_member(tp, i);
+ if (get_entity_allocation(mem) == allocation_automatic) {
+ s += get_type_estimated_n_fields(get_entity_type(mem));
+ }
+ }
+ } break;
+
+ case tpo_array: {
+ long n_elt = DEFAULT_N_ARRAY_ELEMENTS;
+ assert(get_array_n_dimensions(tp) == 1 && "other not implemented");
+ if ((get_irn_op(get_array_lower_bound(tp, 0)) == op_Const) &&
+ (get_irn_op(get_array_upper_bound(tp, 0)) == op_Const) ) {
+ n_elt = get_array_upper_bound_int(tp, 0) - get_array_upper_bound_int(tp, 0);
+ }
+ s = n_elt;
+ } break;
+
+ default:
+ panic("Unsupported type in get_type_estimated_n_fields %+F", tp);
+ }
+
+ return s;
+}
+
+int get_type_estimated_size_bytes(ir_type *tp) {
+ int s = 0;
+
+ switch(get_type_tpop_code(tp)) {
+
+ case tpo_primitive:
+ case tpo_pointer:
+ case tpo_enumeration:
+ s = get_mode_size_bytes(get_type_mode(tp));
+ break;
+
+ case tpo_class:
+ s = get_mode_size_bytes(mode_P_data); /* dispatch pointer */
+ /* fall through */
+ case tpo_struct: {
+ int i, n_mem = get_compound_n_members(tp);
+ for (i = 0; i < n_mem; ++i) {
+ ir_entity *mem = get_compound_member(tp, i);
+ s += get_type_estimated_size_bytes(get_entity_type(mem));
+
+ if (get_entity_allocation(mem) == allocation_automatic) {
+ } /* allocation_automatic */
+ }
+ } break;
+
+ case tpo_array: {
+ int elt_s = get_type_estimated_size_bytes(get_array_element_type(tp));
+ long n_elt = DEFAULT_N_ARRAY_ELEMENTS;
+ assert(get_array_n_dimensions(tp) == 1 && "other not implemented");
+ if ((get_irn_op(get_array_lower_bound(tp, 0)) == op_Const) &&
+ (get_irn_op(get_array_upper_bound(tp, 0)) == op_Const) ) {
+ n_elt = get_array_upper_bound_int(tp, 0) - get_array_lower_bound_int(tp, 0);
+ }
+ s = n_elt * elt_s;
+ break;
+ }
+
+ default: assert(0);
+ }
+
+ return s;