2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Compute an estimate of basic block executions.
23 * @author Goetz Lindenmaier
29 #include "execution_frequency.h"
31 #include "firm_common_t.h"
37 #include "irgraph_t.h"
42 #include "interval_analysis.h"
44 void set_irp_exec_freq_state(exec_freq_state s);
46 /*------------------------------------------------------------------*/
47 /* A hashmap mapping the frequency to block and loop nodes. Block
48 * and loop nodes are regions. */
49 /*------------------------------------------------------------------*/
57 /* We use this set for all nodes in all irgraphs. */
58 static set *exec_freq_set = NULL;
60 static int exec_freq_cmp(const void *e1, const void *e2, size_t size) {
61 reg_exec_freq *ef1 = (reg_exec_freq *)e1;
62 reg_exec_freq *ef2 = (reg_exec_freq *)e2;
65 return (ef1->reg != ef2->reg);
68 static INLINE unsigned int exec_freq_hash(reg_exec_freq *e) {
69 return HASH_PTR(e->reg);
72 static INLINE void set_region_exec_freq(void *reg, double freq) {
76 set_insert(exec_freq_set, &ef, sizeof(ef), exec_freq_hash(&ef));
79 double get_region_exec_freq(void *reg) {
80 reg_exec_freq ef, *found;
82 assert(exec_freq_set);
84 found = set_find(exec_freq_set, &ef, sizeof(ef), exec_freq_hash(&ef));
86 /* Not found if information is invalid. */
93 /* Returns the number of times the block is executed. */
94 double get_Block_exec_freq(ir_node *b) {
95 return get_region_exec_freq((void *)b);
98 double get_irn_exec_freq(ir_node *n) {
99 if (!is_Block(n)) n = get_nodes_block(n);
100 return get_Block_exec_freq(n);
104 /*------------------------------------------------------------------*/
105 /* A algorithm that precomputes whether Conds lead to an exception.
106 * Computes a field for all Projs from Conds that says the following:
107 * - The Proj projs from a normal dual Cond with probability 50:50
108 * - This Proj of the Cond leads to an exception, i.e., a raise node.
109 * It is taken with exception probability.
110 * - The Proj of the Cond avoids an exception. It is taken with
111 * 1 - exception probability. */
112 /*------------------------------------------------------------------*/
119 Cond_prob_avoid_exception,
120 Cond_prob_exception_taken,
121 Cond_prob_was_exception_taken,
124 static int just_passed_a_Raise = 0;
125 static ir_node *Cond_list = NULL;
127 /* We do not use an extra set, as Projs are not yet in the existing one. */
128 void set_ProjX_probability(ir_node *n, Cond_prob prob) {
132 set_insert(exec_freq_set, &ef, sizeof(ef), exec_freq_hash(&ef));
135 Cond_prob get_ProjX_probability(ir_node *n) {
136 reg_exec_freq ef, *found;
139 found = set_find(exec_freq_set, &ef, sizeof(ef), exec_freq_hash(&ef));
142 return (Cond_prob)found->prob;
144 return Cond_prob_none;
147 /* A walker that only visits the nodes we want to see. */
150 my_irg_walk_2_both(ir_node *node, irg_walk_func *pre, irg_walk_func *post, void * env) {
152 set_irn_visited(node, current_ir_graph->visited);
156 if (node->op != op_Block) {
158 if (node->op == op_Proj)
159 pred = get_irn_n(node, 0);
161 pred = get_irn_n(node, -1);
162 if (pred->visited < current_ir_graph->visited)
163 my_irg_walk_2_both(pred, pre, post, env);
167 for (i = get_irn_arity(node) - 1; i >= 0; --i) {
168 ir_node *pred = get_irn_n(node, i);
169 if (pred->visited < current_ir_graph->visited)
170 my_irg_walk_2_both(pred, pre, post, env);
174 if (node->op == op_End) {
175 for (i = get_irn_arity(node) - 1; i >= 0; --i) {
176 ir_node *pred = get_irn_n(node, i);
177 if ((pred->op == op_Block) && (pred->visited < current_ir_graph->visited))
178 my_irg_walk_2_both(pred, pre, post, env);
184 static void my_irg_walk_current_graph(irg_walk_func *pre, irg_walk_func *post, void *env) {
185 inc_irg_visited(current_ir_graph);
186 my_irg_walk_2_both(get_irg_end(current_ir_graph), pre, post, env);
190 static void walk_pre(ir_node *n, void *env)
194 just_passed_a_Raise = 1;
196 if (get_irn_op(n) == op_Proj &&
197 is_Cond(get_Proj_pred(n)) &&
198 just_passed_a_Raise) {
200 ir_node *c = get_Proj_pred(n);
202 /* If we already visited the other Proj, and it also leads to a Raise,
203 we are in the middle of something. Continue searching. */
204 assert(get_irn_n_outs(c) == 2 && "encountered a switch cond");
205 other_proj = get_irn_out(c, 0);
206 if (other_proj == n) other_proj = get_irn_out(c, 1);
207 if (get_ProjX_probability(other_proj) == Cond_prob_exception_taken) {
208 set_ProjX_probability(other_proj, Cond_prob_was_exception_taken);
209 /* Keep searching for the Proj, so keep just_passed_a_Raise. */
211 set_ProjX_probability(n, Cond_prob_exception_taken);
212 just_passed_a_Raise = 0;
217 set_irn_link(n, Cond_list);
222 static void walk_post(ir_node *n, void *env)
226 just_passed_a_Raise = 0;
228 if (get_irn_op(n) == op_Proj &&
229 is_Cond(get_Proj_pred(n)) && (
230 get_ProjX_probability(n) == Cond_prob_exception_taken ||
231 get_ProjX_probability(n) == Cond_prob_was_exception_taken
233 just_passed_a_Raise = 1;
237 /** Precompute which Conds test for an exception.
239 * Operates on current_ir_graph. */
240 void precompute_cond_evaluation(void) {
243 compute_irg_outs(current_ir_graph);
245 just_passed_a_Raise = 0;
247 my_irg_walk_current_graph(walk_pre, walk_post, NULL);
249 for (c = Cond_list; c; c = get_irn_link(c)) {
252 assert(get_irn_n_outs(c) == 2 && "encountered a switch cond");
253 p0 = get_irn_out(c, 0);
254 p1 = get_irn_out(c, 1);
256 /* both are exceptions */
257 if ((get_ProjX_probability(p0) == Cond_prob_exception_taken) &&
258 (get_ProjX_probability(p1) == Cond_prob_exception_taken) ) {
259 assert(0 && "I tried to avoid these!");
261 set_ProjX_probability(p0, Cond_prob_normal);
262 set_ProjX_probability(p1, Cond_prob_normal);
265 /* p0 is exception */
266 else if (get_ProjX_probability(p0) == Cond_prob_exception_taken) {
267 set_ProjX_probability(p1, Cond_prob_avoid_exception);
270 /* p1 is exception */
271 else if (get_ProjX_probability(p1) == Cond_prob_exception_taken) {
272 set_ProjX_probability(p0, Cond_prob_avoid_exception);
275 /* none is exception */
277 set_ProjX_probability(p0, Cond_prob_normal);
278 set_ProjX_probability(p1, Cond_prob_normal);
283 int is_fragile_Proj(ir_node *n) {
284 return is_Proj(n) && (get_ProjX_probability(n) == Cond_prob_exception_taken);
287 /*------------------------------------------------------------------*/
288 /* The algorithm to compute the execution frequencies.
290 * Walk the control flow loop tree which we consider the interval
291 * tree. Compute the execution for the lowest loop, add inner loops
292 * to worklist. Consider the inner loops as simple nodes. Check that
293 * there is only one loop header in each loop. */
294 /*------------------------------------------------------------------*/
296 static double exception_prob = 0.001;
298 static INLINE int is_loop_head(ir_node *cond)
304 /** Weight a single region in edge.
306 * Given all outs of the predecessor region, we can compute the weight of
307 * this single edge. */
308 static INLINE double get_weighted_region_exec_freq(void *reg, int pos) {
309 void *pred_reg = get_region_in(reg, pos);
310 double res, full_freq = get_region_exec_freq (pred_reg);
311 int n_outs = get_region_n_outs (pred_reg);
312 int n_exc_outs = get_region_n_exc_outs(pred_reg);
315 if (is_ir_node(reg)) {
316 cfop = get_Block_cfgpred((ir_node *)reg, pos);
317 if (is_Proj(cfop) && !is_Cond(get_Proj_pred(cfop)))
318 cfop = skip_Proj(cfop);
320 assert(is_ir_loop(reg));
321 cfop = get_loop_cfop(reg, pos);
324 if (is_fragile_op(cfop) || is_fragile_Proj(cfop)) {
325 res = full_freq * exception_prob;
328 /* Equally distribute the weight after exceptions to the left over outs. */
329 res = (full_freq *(1 - exception_prob * n_exc_outs)) / (n_outs - n_exc_outs);
335 static INLINE void compute_region_freqency(void *reg, double head_weight) {
336 int i, n_ins = get_region_n_ins(reg);
339 //printf("head weight %lf: ", head_weight); DDMR(reg);
341 for (i = 0; i < n_ins; ++i) {
342 void *pred_reg = get_region_in(reg, i);
344 my_freq += get_weighted_region_exec_freq(reg, i);
348 if (my_freq == 0.0) {
349 /* All preds are from outer loop. We are a head or so. */
350 my_freq = head_weight;
352 set_region_exec_freq(reg, my_freq);
355 static void check_proper_head(ir_loop *l, void *reg)
357 int i, n_ins = get_region_n_ins(reg);
359 for (i = 0; i < n_ins; ++i) {
360 assert(!get_region_in(reg, i));
364 /* Compute the ex freq for current_ir_graph */
365 static void compute_frequency(int default_loop_weight) {
366 ir_loop *outermost_l = get_irg_loop(current_ir_graph);
367 pdeq *block_worklist = new_pdeq1(outermost_l);
369 /* Outermost start is considered a loop head. We will soon multiply
370 by default_loop_weight. */
371 set_region_exec_freq(outermost_l, 1.0/default_loop_weight);
373 while (!pdeq_empty(block_worklist)) {
374 ir_loop *l = (ir_loop *)pdeq_getl(block_worklist);
375 int i, n_elems = get_loop_n_elements(l);
377 /* The header is initialized with the frequency of the full loop times the iteration weight. */
378 check_proper_head(l, get_loop_element(l, 0).son);
380 for (i = 0; i < n_elems; ++i) {
381 loop_element e = get_loop_element(l, i);
382 if (is_ir_loop(e.son)) pdeq_putr(block_worklist, e.son);
383 compute_region_freqency(e.son, default_loop_weight * get_region_exec_freq(l));
386 del_pdeq(block_worklist);
389 /* Compute the execution frequency for all blocks in the given
392 * irg: The graph to be analyzed.
393 * default_loop_weight: The number of executions of a loop.
395 void compute_execution_frequency(ir_graph *irg, int default_loop_weight, double exception_probability) {
396 ir_graph *rem = current_ir_graph;
397 current_ir_graph = irg;
398 exception_prob = exception_probability;
399 if (!exec_freq_set) exec_freq_set = new_set(exec_freq_cmp, 256);
401 precompute_cond_evaluation();
402 construct_intervals(current_ir_graph);
403 compute_frequency(default_loop_weight);
405 set_irg_exec_freq_state(irg, exec_freq_consistent);
406 if (get_irp_exec_freq_state() == exec_freq_none)
407 set_irp_exec_freq_state(exec_freq_inconsistent);
410 dump_loop_tree (current_ir_graph, "-execfreq");
411 dump_ir_block_graph(current_ir_graph, "-execfreq");
412 dump_interval_graph(current_ir_graph, "-execfreq");
415 current_ir_graph = rem;
419 void compute_execution_frequencies(int default_loop_weight, double exception_probability) {
420 int i, n_irgs = get_irp_n_irgs();
422 for (i = 0; i < n_irgs; ++i) {
423 compute_execution_frequency(get_irp_irg(i), default_loop_weight, exception_probability);
425 set_irp_exec_freq_state(exec_freq_consistent);
428 /** free occupied memory, reset */
429 void free_execution_frequency(void) {
430 int i, n_irgs = get_irp_n_irgs();
432 del_set(exec_freq_set);
434 for (i = 0; i < n_irgs; ++i)
435 set_irg_exec_freq_state(get_irp_irg(i), exec_freq_none);
436 set_irp_exec_freq_state(exec_freq_none);
439 exec_freq_state get_irg_exec_freq_state(ir_graph *irg) {
440 return irg->execfreq_state;
442 void set_irg_exec_freq_state(ir_graph *irg, exec_freq_state s) {
443 if ((get_irp_exec_freq_state() == exec_freq_consistent && s != exec_freq_consistent) ||
444 (get_irp_exec_freq_state() == exec_freq_none && s != exec_freq_none))
445 irp->execfreq_state = exec_freq_inconsistent;
446 irg->execfreq_state = s;
449 /* Sets irg and irp exec freq state to inconsistent if it is set to consistent. */
450 void set_irg_exec_freq_state_inconsistent(ir_graph *irg) {
451 if (get_irg_exec_freq_state(irg) == exec_freq_consistent)
452 set_irg_exec_freq_state(irg, exec_freq_inconsistent);
455 void set_irp_exec_freq_state(exec_freq_state s) {
456 irp->execfreq_state = s;
459 exec_freq_state get_irp_exec_freq_state(void) {
460 return irp->execfreq_state;
463 /* Sets irp and all irg exec freq states to inconsistent if it is set to consistent. */
464 void set_irp_exec_freq_state_inconsistent(void) {
465 if (get_irp_exec_freq_state() != exec_freq_none) {
466 int i, n_irgs = get_irp_n_irgs();
467 set_irp_exec_freq_state(exec_freq_inconsistent);
468 for (i = 0; i < n_irgs; ++i) {
469 ir_graph *irg = get_irp_irg(i);
470 if (get_irg_exec_freq_state(irg) != exec_freq_none)
471 irg->execfreq_state = exec_freq_inconsistent;