bbc57020acb79c216cc543b735e731d62b6c6afa
[libfirm] / ir / be / becopyopt.c
1 /**
2  * @author Daniel Grund
3  * @date 12.04.2005
4  */
5
6 #include "becopyopt.h"
7
8 #define DEBUG_LVL SET_LEVEL_1
9 static firm_dbg_module_t *dbg = NULL;
10
11 //TODO
12 #define is_optimizable(irn) (is_Phi(irn) || 0)
13
14 /**
15  * Builds an optimization unit for a given optimizable irn (root).
16  * This opt-unit is inserted in the main structure co.
17  * If an arg of root itself is optimizable process this arg before with a
18  * recursive call. For handling this situation and loops co->root is used
19  * to remember all roots.
20  */
21 static void co_append_unit(copy_opt_t *co, const ir_node *root) {
22         int i, arity;
23         unit_t *unit;
24         DBG((dbg, LEVEL_1, "Root: %n\n", root));
25         /* check if we encountered this root earlier */
26         if (pset_find_ptr(co->roots, root))
27                 return;
28         pset_insert_ptr(co->roots, root);
29
30         /* init unit */
31         arity = get_irn_arity(root);
32         unit = malloc(sizeof(*unit));
33         unit->interf = 0;
34         unit->node_count = 1;
35         unit->nodes = malloc((arity+1) * sizeof(*unit->nodes));
36         unit->nodes[0] = root;
37         INIT_LIST_HEAD(&unit->queue);
38
39         /* check all args */
40         for (i=0; i<arity; ++i) {
41                 ir_node *arg = get_irn_n(root, i);
42                 if (!values_interfere(root, arg)) {
43                         if (arg != root) {
44                                 DBG((dbg, LEVEL_1, "Member: %n\n", arg));
45                                 if (is_optimizable(arg))
46                                         co_append_unit(co, arg);
47                                 unit->nodes[unit->node_count++] = arg;
48                         }
49                 } else
50                         unit->interf++;
51         }
52
53         if (unit->node_count > 1) {
54                 unit->nodes = realloc(unit->nodes, unit->node_count * sizeof(*unit->nodes));
55                 list_add_tail(&unit->units, &co->units);
56         } else {
57                 free(unit->nodes);
58                 free(unit);
59         }
60 }
61
62 static void co_collect_in_block(ir_node *block, void *env) {
63         copy_opt_t *co = env;
64         struct list_head *head = &get_ra_block_info(block)->border_head;
65         border_t *curr;
66
67         list_for_each_entry_reverse(border_t, curr, head, list)
68                 if (curr->is_def && curr->is_real && is_optimizable(curr->irn))
69                         co_append_unit(co, curr->irn);
70 }
71
72 static void co_collect_units(copy_opt_t *co) {
73         co->roots = pset_new_ptr(64);
74         dom_tree_walk_irg(co->irg, co_collect_in_block, NULL, co);
75         del_pset(co->roots);
76 }
77
78 copy_opt_t *new_copy_opt(ir_graph *irg) {
79         dbg = firm_dbg_register("ir.be.copyopt");
80         firm_dbg_set_mask(dbg, DEBUG_LVL);
81
82         copy_opt_t *co = calloc(1, sizeof(*co));
83         co->irg = irg;
84         INIT_LIST_HEAD(&co->units);
85         co_collect_units(co);
86         return co;
87 }
88
89 void free_copy_opt(copy_opt_t *co) {
90         unit_t *curr;
91         list_for_each_entry(unit_t, curr, &co->units, units) {
92                 free(curr->nodes);
93                 free(curr);
94         }
95 }
96
97 int co_get_lower_bound(copy_opt_t *co) {
98         //TODO Think if copy counting is this easy!
99         int res = 0;
100         unit_t *curr;
101         list_for_each_entry(unit_t, curr, &co->units, units)
102                 res += curr->interf + curr->node_count - curr->mis_size;
103         return res;
104 }
105
106 int co_get_interferer_count(copy_opt_t *co) {
107         int res = 0;
108         unit_t *curr;
109         list_for_each_entry(unit_t, curr, &co->units, units)
110                 res += curr->interf;
111         return res;
112 }