2 * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Compute an estimate of field temperature, i.e., field access heuristic.
23 * @author Goetz Lindenmaier
31 #ifdef INTERPROCEDURAL_VIEW
35 #include "field_temperature.h"
38 #include "execution_frequency.h"
41 #include "irgraph_t.h"
49 /* *************************************************************************** */
50 /* initialize, global variables. */
51 /* *************************************************************************** */
53 /* *************************************************************************** */
54 /* Access routines for irnodes */
55 /* *************************************************************************** */
57 /* The entities that can be accessed by this Sel node. */
58 int get_Sel_n_accessed_entities(ir_node *sel) {
63 ir_entity *get_Sel_accessed_entity(ir_node *sel, int pos) {
65 return get_Sel_entity(sel);
68 /* *************************************************************************** */
70 /* *************************************************************************** */
72 int get_irn_loop_call_depth(ir_node *n) {
73 ir_graph *irg = get_irn_irg(n);
74 return get_irg_loop_depth(irg);
77 int get_irn_cfloop_depth(ir_node *n) {
78 ir_loop *l = get_irn_loop(get_nodes_block(n));
80 return get_loop_depth(l);
85 int get_irn_recursion_depth(ir_node *n) {
86 ir_graph *irg = get_irn_irg(n);
87 return get_irg_recursion_depth(irg);
91 /** @@@ the second version of the heuristic. */
92 int get_weighted_loop_depth(ir_node *n) {
93 int loop_call_depth = get_irn_loop_call_depth(n);
94 int loop_depth = get_irn_cfloop_depth(n);
95 int recursion_depth = get_irn_recursion_depth(n);
97 return loop_call_depth + loop_depth + recursion_depth;
101 /* *************************************************************************** */
102 /* The 2. heuristic */
103 /* *************************************************************************** */
105 static int default_recursion_weight = 5;
107 /* The final evaluation of a node. In this function we can
108 adapt the heuristic. Combine execution frequency with
110 @@@ the second version of the heuristic.
112 Return 0 if the node is neither in a loop nor in a recursion. */
113 double get_irn_final_cost(ir_node *n) {
114 double cost_loop = get_irn_exec_freq(n);
115 double cost_method = get_irg_method_execution_frequency(get_irn_irg(n));
116 int rec_depth = get_irn_recursion_depth(n);
120 if (get_irn_recursion_depth(n) == 0 &&
121 get_irn_loop_depth(n) == 0 &&
122 get_irg_method_loop_depth(get_irn_irg(n)) == 0)
125 if (get_weighted_loop_depth(n) == 0) return 0;
128 if (rec_depth) cost_rec = pow(default_recursion_weight, rec_depth);
129 return cost_loop*(cost_method + cost_rec);
132 double get_type_estimated_n_instances(ir_type *tp) {
133 int i, n_allocs = get_type_n_allocs(tp);
134 double n_instances = 0;
135 for (i = 0; i < n_allocs; ++i) {
136 ir_node *alloc = get_type_alloc(tp, i);
137 n_instances += get_irn_final_cost(alloc);
142 double get_type_estimated_mem_consumption_bytes(ir_type *tp) {
148 int get_type_estimated_n_fields(ir_type *tp) {
150 switch(get_type_tpop_code(tp)) {
154 case tpo_enumeration:
159 s = 1; /* dispatch pointer */
162 int i, n_mem = get_compound_n_members(tp);
163 for (i = 0; i < n_mem; ++i) {
164 ir_entity *mem = get_compound_member(tp, i);
165 if (get_entity_allocation(mem) == allocation_automatic) {
166 s += get_type_estimated_n_fields(get_entity_type(mem));
172 long n_elt = DEFAULT_N_ARRAY_ELEMENTS;
173 assert(get_array_n_dimensions(tp) == 1 && "other not implemented");
174 if ((get_irn_op(get_array_lower_bound(tp, 0)) == op_Const) &&
175 (get_irn_op(get_array_upper_bound(tp, 0)) == op_Const) ) {
176 n_elt = get_array_upper_bound_int(tp, 0) - get_array_upper_bound_int(tp, 0);
182 panic("Unsupported type in get_type_estimated_n_fields %+F", tp);
188 int get_type_estimated_size_bytes(ir_type *tp) {
191 switch(get_type_tpop_code(tp)) {
195 case tpo_enumeration:
196 s = get_mode_size_bytes(get_type_mode(tp));
200 s = get_mode_size_bytes(mode_P_data); /* dispatch pointer */
203 int i, n_mem = get_compound_n_members(tp);
204 for (i = 0; i < n_mem; ++i) {
205 ir_entity *mem = get_compound_member(tp, i);
206 s += get_type_estimated_size_bytes(get_entity_type(mem));
208 if (get_entity_allocation(mem) == allocation_automatic) {
209 } /* allocation_automatic */
214 int elt_s = get_type_estimated_size_bytes(get_array_element_type(tp));
215 long n_elt = DEFAULT_N_ARRAY_ELEMENTS;
216 assert(get_array_n_dimensions(tp) == 1 && "other not implemented");
217 if ((get_irn_op(get_array_lower_bound(tp, 0)) == op_Const) &&
218 (get_irn_op(get_array_upper_bound(tp, 0)) == op_Const) ) {
219 n_elt = get_array_upper_bound_int(tp, 0) - get_array_lower_bound_int(tp, 0);
231 double get_type_estimated_n_casts(ir_type *tp) {
232 int i, n_casts = get_type_n_casts(tp);
233 double n_instances = 0;
234 for (i = 0; i < n_casts; ++i) {
235 ir_node *cast = get_type_cast(tp, i);
236 n_instances += get_irn_final_cost(cast);
241 double get_class_estimated_n_upcasts(ir_type *clss) {
242 double n_instances = 0;
243 int i, j, n_casts, n_pointertypes;
245 n_casts = get_type_n_casts(clss);
246 for (i = 0; i < n_casts; ++i) {
247 ir_node *cast = get_type_cast(clss, i);
248 if (get_irn_opcode(cast) != iro_Cast) continue; /* Could be optimized away. */
250 if (is_Cast_upcast(cast))
251 n_instances += get_irn_final_cost(cast);
254 n_pointertypes = get_type_n_pointertypes_to(clss);
255 for (j = 0; j < n_pointertypes; ++j) {
256 n_instances += get_class_estimated_n_upcasts(get_type_pointertype_to(clss, j));
262 double get_class_estimated_n_downcasts(ir_type *clss) {
263 double n_instances = 0;
264 int i, j, n_casts, n_pointertypes;
266 n_casts = get_type_n_casts(clss);
267 for (i = 0; i < n_casts; ++i) {
268 ir_node *cast = get_type_cast(clss, i);
269 if (get_irn_opcode(cast) != iro_Cast) continue; /* Could be optimized away. */
271 if (is_Cast_downcast(cast))
272 n_instances += get_irn_final_cost(cast);
275 n_pointertypes = get_type_n_pointertypes_to(clss);
276 for (j = 0; j < n_pointertypes; ++j) {
277 n_instances += get_class_estimated_n_downcasts(get_type_pointertype_to(clss, j));
284 double get_class_estimated_dispatch_writes(ir_type *clss) {
285 return get_type_estimated_n_instances(clss);
288 /** Returns the number of reads of the dispatch pointer. */
289 double get_class_estimated_dispatch_reads (ir_type *clss) {
290 int i, n_mems = get_class_n_members(clss);
292 for (i = 0; i < n_mems; ++i) {
293 ir_entity *mem = get_class_member(clss, i);
294 n_calls += get_entity_estimated_n_dyncalls(mem);
299 double get_class_estimated_n_dyncalls(ir_type *clss) {
300 return get_class_estimated_dispatch_reads(clss) +
301 get_class_estimated_dispatch_writes(clss);
304 double get_entity_estimated_n_loads(ir_entity *ent) {
305 int i, n_acc = get_entity_n_accesses(ent);
307 for (i = 0; i < n_acc; ++i) {
308 ir_node *acc = get_entity_access(ent, i);
309 if (get_irn_op(acc) == op_Load) {
310 n_loads += get_irn_final_cost(acc);
316 double get_entity_estimated_n_stores(ir_entity *ent) {
317 int i, n_acc = get_entity_n_accesses(ent);
319 for (i = 0; i < n_acc; ++i) {
320 ir_node *acc = get_entity_access(ent, i);
321 if (get_irn_op(acc) == op_Store)
322 n_stores += get_irn_final_cost(acc);
327 /* @@@ Should we evaluate the callee array? */
328 double get_entity_estimated_n_calls(ir_entity *ent) {
329 int i, n_acc = get_entity_n_accesses(ent);
331 for (i = 0; i < n_acc; ++i) {
332 ir_node *acc = get_entity_access(ent, i);
333 if (get_irn_op(acc) == op_Call)
335 n_calls += get_irn_final_cost(acc);
340 double get_entity_estimated_n_dyncalls(ir_entity *ent) {
341 int i, n_acc = get_entity_n_accesses(ent);
343 for (i = 0; i < n_acc; ++i) {
344 ir_node *acc = get_entity_access(ent, i);
346 /* Call->Sel(ent) combination */
347 if (is_Call(acc) && is_Sel(get_Call_ptr(acc))) {
348 n_calls += get_irn_final_cost(acc);
350 /* MemOp->Sel combination for static, overwritten entities */
351 } else if (is_memop(acc) && is_Sel(get_memop_ptr(acc))) {
352 ir_entity *ent = get_Sel_entity(get_memop_ptr(acc));
353 if (is_Class_type(get_entity_owner(ent))) {
354 /* We might call this for inner entities in compounds. */
355 if (get_entity_n_overwrites(ent) > 0 ||
356 get_entity_n_overwrittenby(ent) > 0) {
357 n_calls += get_irn_final_cost(acc);
367 /* Move this to the jack compiler */
369 /* ------------------------------------------------------------------------- */
371 /* ------------------------------------------------------------------------- */
373 int is_jack_rts_name(ident *name) {
374 if (id_is_suffix(new_id_from_str("Exception"), name)) return 1;
375 if (id_is_suffix(new_id_from_str("Throwable"), name)) return 1;
376 if (id_is_suffix(new_id_from_str("Error"), name)) return 1;
380 if (id_is_prefix(new_id_from_str("java/"), name)) return 1;
381 if (id_is_prefix(new_id_from_str("["), name)) return 1;
382 if (id_is_prefix(new_id_from_str("gnu/"), name)) return 1;
383 if (id_is_prefix(new_id_from_str("java/"), name)) return 1;
384 if (id_is_prefix(new_id_from_str("CStringToCoreString"), name)) return 1;
390 int is_jack_rts_class(ir_type *t) {
391 ident *name = get_type_ident(t);
392 return is_jack_rts_name(name);
395 #include "entity_t.h" // for the assertion.
397 int is_jack_rts_entity(ir_entity *e) {
401 name = get_entity_ld_ident(e);
403 return is_jack_rts_name(name);