4 * Copyright: (c) Universitaet Karlsruhe
5 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
12 #include "becopyopt.h"
13 #include "becopystat.h"
15 #define DEBUG_LVL 0 //SET_LEVEL_1
16 static firm_dbg_module_t *dbg = NULL;
18 #define is_curr_reg_class(irn) (arch_get_irn_reg_class(co->env, irn, arch_pos_make_out(0)) == co->cls)
21 * Builds an optimization unit for a given optimizable irn (root).
22 * This opt-unit is inserted in the main structure co.
23 * If an arg of root itself is optimizable process this arg before with a
24 * recursive call. For handling this situation and loops co->root is used
25 * to remember all roots.
27 static void co_append_unit(copy_opt_t *co, ir_node *root) {
30 DBG((dbg, LEVEL_1, "\t Root: %n\n", root));
31 /* check if we encountered this root earlier */
32 if (pset_find_ptr(co->roots, root))
34 pset_insert_ptr(co->roots, root);
36 assert(is_curr_reg_class(root) && "node is in wrong register class!");
39 arity = get_irn_arity(root);
40 unit = xcalloc(1, sizeof(*unit));
44 unit->nodes = xmalloc((arity+1) * sizeof(*unit->nodes));
45 unit->nodes[0] = root;
46 INIT_LIST_HEAD(&unit->queue);
49 for (i=0; i<arity; ++i) {
50 ir_node *arg = get_irn_n(root, i);
51 assert(is_curr_reg_class(arg) && "Argument not in same register class.");
53 if (!values_interfere(root, arg)) {
54 DBG((dbg, LEVEL_1, "\t Member: %n\n", arg));
55 if (is_optimizable(arg))
56 co_append_unit(co, arg);
57 unit->nodes[unit->node_count++] = arg;
62 unit->nodes = xrealloc(unit->nodes, unit->node_count * sizeof(*unit->nodes));
63 list_add_tail(&unit->units, &co->units);
64 /* Init mis_size to node_count. So get_lower_bound returns correct results.
65 * - Now it can be called even before the heuristic has run.
66 * - And it will return correct results for units with nodecount 1 which are
67 * not optimized during the heuristic and have therefor delivered wrong results for get_lower_bound
69 unit->mis_size = unit->node_count;
73 static void co_collect_in_block(ir_node *block, void *env) {
75 struct list_head *head = &get_ra_block_info(block)->border_head;
78 list_for_each_entry_reverse(border_t, curr, head, list)
79 if (curr->is_def && curr->is_real && is_optimizable(curr->irn))
80 co_append_unit(co, curr->irn);
83 static void co_collect_units(copy_opt_t *co) {
84 DBG((dbg, LEVEL_1, "\tCollecting optimization units\n"));
85 co->roots = pset_new_ptr(64);
86 dom_tree_walk_irg(co->irg, co_collect_in_block, NULL, co);
90 copy_opt_t *new_copy_opt(ir_graph *irg, const arch_env_t *env, const arch_register_class_t *cls) {
91 const char *s1, *s2, *s3;
95 dbg = firm_dbg_register("ir.be.copyopt");
96 firm_dbg_set_mask(dbg, DEBUG_LVL);
98 co = xcalloc(1, sizeof(*co));
101 // co->isa = env->isa;
104 s1 = get_irp_prog_name();
105 s2 = get_entity_name(get_irg_entity(co->irg));
107 len = strlen(s1) + strlen(s2) + strlen(s3) + 5;
108 co->name = xmalloc(len);
109 if (!strcmp(co->name, DEBUG_IRG))
110 firm_dbg_set_mask(dbg, -1);
111 snprintf(co->name, len, "%s__%s__%s", s1, s2, s3);
113 INIT_LIST_HEAD(&co->units);
114 co_collect_units(co);
118 void free_copy_opt(copy_opt_t *co) {
121 list_for_each_entry_safe(unit_t, curr, tmp, &co->units, units) {
127 int is_optimizable_arg(ir_node *irn) {
129 for(i=0, max=get_irn_n_outs(irn); i<max; ++i) {
130 ir_node *n = get_irn_out(irn, i);
131 if (is_optimizable(n) && (irn == n || !values_interfere(irn, n)))
138 int co_get_copy_count(copy_opt_t *co) {
141 list_for_each_entry(unit_t, curr, &co->units, units) {
142 int root_col = get_irn_col(co, curr->nodes[0]);
144 for (i=1; i<curr->node_count; ++i)
145 if (root_col != get_irn_col(co, curr->nodes[i]))
151 int co_get_lower_bound(copy_opt_t *co) {
154 list_for_each_entry(unit_t, curr, &co->units, units)
155 res += curr->interf + curr->node_count - curr->mis_size;
159 int co_get_interferer_count(copy_opt_t *co) {
162 list_for_each_entry(unit_t, curr, &co->units, units)
168 * Needed for result checking
170 static void co_collect_for_checker(ir_node *block, void *env) {
171 copy_opt_t *co = env;
172 struct list_head *head = &get_ra_block_info(block)->border_head;
175 list_for_each_entry_reverse(border_t, curr, head, list)
176 if (curr->is_def && curr->is_real && is_curr_reg_class(curr->irn))
177 obstack_ptr_grow(&co->ob, curr->irn);
181 * This O(n^2) checker checks, if two ifg-connected nodes have the same color.
183 void co_check_allocation(copy_opt_t *co) {
184 ir_node **nodes, *n1, *n2;
187 obstack_init(&co->ob);
188 dom_tree_walk_irg(co->irg, co_collect_for_checker, NULL, co);
189 obstack_ptr_grow(&co->ob, NULL);
191 nodes = (ir_node **) obstack_finish(&co->ob);
192 for (i = 0, n1 = nodes[i]; n1; n1 = nodes[++i]) {
193 assert(! (is_allocatable_irn(n1) && get_irn_col(co, n1) == NO_COLOR));
194 for (o = i+1, n2 = nodes[o]; n2; n2 = nodes[++o])
195 if (phi_ops_interfere(n1, n2) && get_irn_col(co, n1) == get_irn_col(co, n2)) {
196 DBG((dbg, 0, "Error: %n in %n and %n in %n have the same color.\n", n1, get_nodes_block(n1), n2, get_nodes_block(n2)));
197 assert(0 && "Interfering values have the same color!");
200 obstack_free(&co->ob, NULL);
201 DBG((dbg, 2, "The checker seems to be happy!\n"));