10 #include "becopyopt.h"
11 #include "becopystat.h"
13 #define DEBUG_LVL 0 //SET_LEVEL_1
14 static firm_dbg_module_t *dbg = NULL;
16 #define is_curr_reg_class(irn) (co->isa->get_irn_reg_class(irn)==co->cls)
17 #define is_optimizable(irn) (is_Phi(irn) || is_Copy(irn))
20 * Builds an optimization unit for a given optimizable irn (root).
21 * This opt-unit is inserted in the main structure co.
22 * If an arg of root itself is optimizable process this arg before with a
23 * recursive call. For handling this situation and loops co->root is used
24 * to remember all roots.
26 static void co_append_unit(copy_opt_t *co, const ir_node *root) {
29 DBG((dbg, LEVEL_1, "\t Root: %n\n", root));
30 /* check if we encountered this root earlier */
31 if (pset_find_ptr(co->roots, root))
33 pset_insert_ptr(co->roots, root);
35 assert(is_curr_reg_class(root) && "node is in wrong register class!");
38 arity = get_irn_arity(root);
39 unit = xcalloc(1, sizeof(*unit));
43 unit->nodes = xmalloc((arity+1) * sizeof(*unit->nodes));
44 unit->nodes[0] = root;
45 INIT_LIST_HEAD(&unit->queue);
48 for (i=0; i<arity; ++i) {
49 ir_node *arg = get_irn_n(root, i);
50 assert(is_curr_reg_class(arg) && "Argument not in same register class.");
52 if (!values_interfere(root, arg)) {
53 DBG((dbg, LEVEL_1, "\t Member: %n\n", arg));
54 if (is_optimizable(arg))
55 co_append_unit(co, arg);
56 unit->nodes[unit->node_count++] = arg;
61 unit->nodes = xrealloc(unit->nodes, unit->node_count * sizeof(*unit->nodes));
62 list_add_tail(&unit->units, &co->units);
63 /* Init mis_size to node_count. So get_lower_bound returns correct results.
64 * - Now it can be called even before the heuristic has run.
65 * - And it will return correct results for units with nodecount 1 which are
66 * not optimized during the heuristic and have therefor delivered wrong results for get_lower_bound
68 unit->mis_size = unit->node_count;
72 static void co_collect_in_block(ir_node *block, void *env) {
74 struct list_head *head = &get_ra_block_info(block)->border_head;
77 list_for_each_entry_reverse(border_t, curr, head, list)
78 if (curr->is_def && curr->is_real && is_optimizable(curr->irn))
79 co_append_unit(co, curr->irn);
82 static void co_collect_units(copy_opt_t *co) {
83 DBG((dbg, LEVEL_1, "\tCollecting optimization units\n"));
84 co->roots = pset_new_ptr(64);
85 dom_tree_walk_irg(co->irg, co_collect_in_block, NULL, co);
89 copy_opt_t *new_copy_opt(ir_graph *irg, const arch_isa_if_t *isa, const arch_register_class_t *cls) {
90 const char *s1, *s2, *s3;
94 dbg = firm_dbg_register("ir.be.copyopt");
95 firm_dbg_set_mask(dbg, DEBUG_LVL);
97 co = xcalloc(1, sizeof(*co));
102 s1 = get_irp_prog_name();
103 s2 = get_entity_name(get_irg_entity(co->irg));
105 len = strlen(s1) + strlen(s2) + strlen(s3) + 5;
106 co->name = xmalloc(len);
107 if (!strcmp(co->name, DEBUG_IRG))
108 firm_dbg_set_mask(dbg, -1);
109 snprintf(co->name, len, "%s__%s__%s", s1, s2, s3);
111 INIT_LIST_HEAD(&co->units);
112 co_collect_units(co);
116 void free_copy_opt(copy_opt_t *co) {
119 list_for_each_entry_safe(unit_t, curr, tmp, &co->units, units) {
125 int co_get_copy_count(copy_opt_t *co) {
128 list_for_each_entry(unit_t, curr, &co->units, units) {
129 int root_col = get_irn_color(curr->nodes[0]);
131 for (i=1; i<curr->node_count; ++i)
132 if (root_col != get_irn_color(curr->nodes[i]))
138 int co_get_lower_bound(copy_opt_t *co) {
141 list_for_each_entry(unit_t, curr, &co->units, units)
142 res += curr->interf + curr->node_count - curr->mis_size;
146 int co_get_interferer_count(copy_opt_t *co) {
149 list_for_each_entry(unit_t, curr, &co->units, units)
155 * Needed for result checking
157 static void co_collect_for_checker(ir_node *block, void *env) {
158 copy_opt_t *co = env;
159 struct list_head *head = &get_ra_block_info(block)->border_head;
162 list_for_each_entry_reverse(border_t, curr, head, list)
163 if (curr->is_def && curr->is_real && is_curr_reg_class(curr->irn))
164 obstack_ptr_grow(&co->ob, curr->irn);
168 * This O(n^2) checker checks, if two ifg-connected nodes have the same color.
170 void co_check_allocation(copy_opt_t *co) {
171 ir_node **nodes, *n1, *n2;
174 obstack_init(&co->ob);
175 dom_tree_walk_irg(co->irg, co_collect_for_checker, NULL, co);
176 obstack_ptr_grow(&co->ob, NULL);
178 nodes = (ir_node **) obstack_finish(&co->ob);
179 for (i = 0, n1 = nodes[i]; n1; n1 = nodes[++i]) {
180 assert(! (is_allocatable_irn(n1) && get_irn_color(n1) == NO_COLOR));
181 for (o = i+1, n2 = nodes[o]; n2; n2 = nodes[++o])
182 if (phi_ops_interfere(n1, n2) && get_irn_color(n1) == get_irn_color(n2)) {
183 DBG((dbg, 0, "Error: %n in %n and %n in %n have the same color.\n", n1, get_nodes_block(n1), n2, get_nodes_block(n2)));
184 assert(0 && "Interfering values have the same color!");
187 obstack_free(&co->ob, NULL);
188 DBG((dbg, 2, "The checker seems to be happy!\n"));