2 * Author: Matthias Braun
4 * Copyright: (c) Universitaet Karlsruhe
5 * License: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
27 #include "besched_t.h"
30 static int my_values_interfere(const ir_node *a, const ir_node *b);
32 typedef struct be_verify_register_pressure_env_t_ {
33 ir_graph *irg; /**< the irg to verify */
34 be_lv_t *lv; /**< Liveness information. */
35 const arch_env_t *arch_env; /**< an architecture environment */
36 const arch_register_class_t *cls; /**< the register class to check for */
37 int registers_available; /**< number of available registers */
38 int problem_found; /**< flag indicating if a problem was found */
39 } be_verify_register_pressure_env_t;
42 * Print all nodes of a pset into a file.
44 static void print_living_values(FILE *F, pset *live_nodes) {
48 foreach_pset(live_nodes, node) {
49 ir_fprintf(F, "%+F ", node);
55 * Check if number of live nodes never exceeds the number of available registers.
57 static void verify_liveness_walker(ir_node *block, void *data) {
58 be_verify_register_pressure_env_t *env = (be_verify_register_pressure_env_t *)data;
59 pset *live_nodes = pset_new_ptr_default();
63 /* collect register pressure info, start with end of a block */
64 be_liveness_end_of_block(env->lv, env->arch_env, env->cls, block, live_nodes);
66 pressure = pset_count(live_nodes);
67 if(pressure > env->registers_available) {
68 ir_fprintf(stderr, "Verify Warning: Register pressure too high at end of block %+F(%s) (%d/%d):\n",
69 block, get_irg_dump_name(env->irg), pressure, env->registers_available);
70 print_living_values(stderr, live_nodes);
71 env->problem_found = 1;
74 sched_foreach_reverse(block, irn) {
78 be_liveness_transfer(env->arch_env, env->cls, irn, live_nodes);
80 pressure = pset_count(live_nodes);
82 if(pressure > env->registers_available) {
83 ir_fprintf(stderr, "Verify Warning: Register pressure too high before node %+F in %+F(%s) (%d/%d):\n",
84 irn, block, get_irg_dump_name(env->irg), pressure, env->registers_available);
85 print_living_values(stderr, live_nodes);
86 env->problem_found = 1;
93 * Start a walk over the irg and check the register pressure.
95 int be_verify_register_pressure(const be_irg_t *birg, const arch_register_class_t *cls, ir_graph *irg) {
96 be_verify_register_pressure_env_t env;
98 env.lv = be_liveness(irg);
100 env.arch_env = birg->main_env->arch_env;
102 env.registers_available = env.cls->n_regs - be_put_ignore_regs(birg, env.cls, NULL);
103 env.problem_found = 0;
105 irg_block_walk_graph(irg, verify_liveness_walker, NULL, &env);
106 be_liveness_free(env.lv);
108 return ! env.problem_found;
113 //---------------------------------------------------------------------------
117 typedef struct be_verify_schedule_env_t_ {
118 int problem_found; /**< flags indicating if there was a problem */
119 bitset_t *scheduled; /**< bitset of scheduled nodes */
120 ir_graph *irg; /**< the irg to check */
121 const arch_env_t *arch_env; /**< the arch_env */
122 } be_verify_schedule_env_t;
125 * Simple schedule checker.
127 static void verify_schedule_walker(ir_node *block, void *data) {
128 be_verify_schedule_env_t *env = (be_verify_schedule_env_t*) data;
130 int non_phi_found = 0;
131 int cfchange_found = 0;
132 // TODO ask arch about delay branches
133 int delay_branches = 0;
134 int last_timestep = INT_MIN;
137 * Tests for the following things:
138 * 1. Make sure that all phi nodes are scheduled at the beginning of the block
139 * 2. There is 1 or no control flow changing node scheduled and exactly delay_branches operations after it.
140 * 3. No value is defined after it has been used
142 sched_foreach(block, node) {
146 // this node is scheduled
147 if(bitset_is_set(env->scheduled, get_irn_idx(node))) {
148 ir_fprintf(stderr, "Verify warning: %+F appears to be schedule twice\n");
149 env->problem_found = 1;
151 bitset_set(env->scheduled, get_irn_idx(node));
153 // Check that scheduled nodes are in the correct block
154 if(get_nodes_block(node) != block) {
155 ir_fprintf(stderr, "Verify warning: %+F is in block %+F but scheduled in %+F\n", node, get_nodes_block(node), block);
156 env->problem_found = 1;
159 // Check that timesteps are increasing
160 timestep = sched_get_time_step(node);
161 if(timestep <= last_timestep) {
162 ir_fprintf(stderr, "Verify warning: Schedule timestep did not increase at node %+F\n",
164 env->problem_found = 1;
166 last_timestep = timestep;
168 // Check that phis come before any other node
171 ir_fprintf(stderr, "Verify Warning: Phi node %+F scheduled after non-Phi nodes in block %+F (%s)\n",
172 node, block, get_irg_dump_name(env->irg));
173 env->problem_found = 1;
179 // Check for control flow changing nodes
180 if (is_cfop(node) && get_irn_opcode(node) != iro_Start) {
181 /* check, that only one CF operation is scheduled */
182 if (cfchange_found == 1) {
183 ir_fprintf(stderr, "Verify Warning: More than 1 control flow changing node (%+F) scheduled in block %+F (%s)\n",
184 node, block, get_irg_dump_name(env->irg));
185 env->problem_found = 1;
188 } else if (cfchange_found) {
189 // proj and keepany aren't real instructions...
190 if(!is_Proj(node) && !be_is_Keep(node)) {
191 /* check for delay branches */
192 if (delay_branches == 0) {
193 ir_fprintf(stderr, "Verify Warning: Node %+F scheduled after control flow changing node (+delay branches) in block %+F (%s)\n",
194 node, block, get_irg_dump_name(env->irg));
195 env->problem_found = 1;
202 // Check that all uses come before their definitions
204 int nodetime = sched_get_time_step(node);
205 for(i = 0, arity = get_irn_arity(node); i < arity; ++i) {
206 ir_node *arg = get_irn_n(node, i);
207 if(get_nodes_block(arg) != block
208 || !sched_is_scheduled(arg))
211 if(sched_get_time_step(arg) >= nodetime) {
212 ir_fprintf(stderr, "Verify Warning: Value %+F used by %+F before it was defined in block %+F (%s)\n",
213 arg, node, block, get_irg_dump_name(env->irg));
214 env->problem_found = 1;
219 // Check that no dead nodes are scheduled
220 if(get_irn_n_edges(node) == 0) {
221 ir_fprintf(stderr, "Verify warning: Node %+F is dead but scheduled in block %+F (%s)\n",
222 node, block, get_irg_dump_name(env->irg));
223 env->problem_found = 1;
227 /* check that all delay branches are filled (at least with NOPs) */
228 if (cfchange_found && delay_branches != 0) {
229 ir_fprintf(stderr, "Verify warning: Not all delay slots filled after jump (%d/%d) in block %+F (%s)\n",
230 block, get_irg_dump_name(env->irg));
231 env->problem_found = 1;
235 static int should_be_scheduled(be_verify_schedule_env_t *env, ir_node *node) {
239 if(get_irn_mode(node) == mode_M) {
242 if(is_Phi(node) || is_Sync(node) || is_Pin(node))
245 if(is_Proj(node) && get_irn_mode(node) == mode_X)
247 if(be_is_Keep(node) && get_irn_opcode(get_nodes_block(node)) == iro_Bad)
250 switch(get_irn_opcode(node)) {
260 if(arch_irn_get_flags(env->arch_env, node) & arch_irn_flags_ignore)
266 static void check_schedule(ir_node *node, void *data) {
267 be_verify_schedule_env_t *env = data;
271 should_be = should_be_scheduled(env, node);
275 scheduled = bitset_is_set(env->scheduled, get_irn_idx(node)) ? 1 : 0;
276 should_be = should_be ? 1 : 0;
277 if(should_be != scheduled) {
278 ir_fprintf(stderr, "Verify warning: Node %+F in block %+F(%s) should%s be scheduled\n",
279 node, get_nodes_block(node), get_irg_dump_name(env->irg), should_be ? "" : " not");
280 env->problem_found = 1;
285 * Start a walk over the irg and check schedule.
287 int be_verify_schedule(const be_irg_t *birg)
289 be_verify_schedule_env_t env;
291 env.problem_found = 0;
292 env.irg = be_get_birg_irg(birg);
293 env.scheduled = bitset_alloca(get_irg_last_idx(env.irg));
294 env.arch_env = birg->main_env->arch_env;
296 irg_block_walk_graph(env.irg, verify_schedule_walker, NULL, &env);
297 // check if all nodes are scheduled
298 irg_walk_graph(env.irg, check_schedule, NULL, &env);
300 return ! env.problem_found;
305 //---------------------------------------------------------------------------
309 typedef struct _spill_t {
315 const arch_env_t *arch_env;
320 } be_verify_spillslots_env_t;
322 static int cmp_spill(const void* d1, const void* d2, size_t size) {
323 const spill_t* s1 = d1;
324 const spill_t* s2 = d2;
325 return s1->spill != s2->spill;
328 static spill_t *find_spill(be_verify_spillslots_env_t *env, ir_node *node) {
332 return set_find(env->spills, &spill, sizeof(spill), HASH_PTR(node));
335 static spill_t *get_spill(be_verify_spillslots_env_t *env, ir_node *node, ir_entity *ent) {
337 int hash = HASH_PTR(node);
340 res = set_find(env->spills, &spill, sizeof(spill), hash);
344 res = set_insert(env->spills, &spill, sizeof(spill), hash);
350 static ir_node *get_memory_edge(const ir_node *node) {
352 ir_node *result = NULL;
354 arity = get_irn_arity(node);
355 for(i = arity - 1; i >= 0; --i) {
356 ir_node *arg = get_irn_n(node, i);
357 if(get_irn_mode(arg) == mode_M) {
358 assert(result == NULL);
366 static void collect(be_verify_spillslots_env_t *env, ir_node *node, ir_node *reload, ir_entity* ent);
368 static void check_entity(be_verify_spillslots_env_t *env, ir_node *node, ir_entity *ent) {
370 ir_fprintf(stderr, "Verify warning: Node %+F in block %+F(%s) should have an entity assigned\n",
371 node, get_nodes_block(node), get_irg_dump_name(env->irg));
375 static void collect_spill(be_verify_spillslots_env_t *env, ir_node *node, ir_node *reload, ir_entity* ent) {
376 ir_entity *spillent = arch_get_frame_entity(env->arch_env, node);
377 check_entity(env, node, spillent);
378 get_spill(env, node, ent);
380 if(spillent != ent) {
381 ir_fprintf(stderr, "Verify warning: Spill %+F has different entity than reload %+F in block %+F(%s)\n",
382 node, reload, get_nodes_block(node), get_irg_dump_name(env->irg));
383 env->problem_found = 1;
387 static void collect_memperm(be_verify_spillslots_env_t *env, ir_node *node, ir_node *reload, ir_entity* ent) {
390 int hash = HASH_PTR(node);
395 assert(is_Proj(node));
397 memperm = get_Proj_pred(node);
398 out = get_Proj_proj(node);
400 spillent = be_get_MemPerm_out_entity(memperm, out);
401 check_entity(env, memperm, spillent);
402 if(spillent != ent) {
403 ir_fprintf(stderr, "Verify warning: MemPerm %+F has different entity than reload %+F in block %+F(%s)\n",
404 node, reload, get_nodes_block(node), get_irg_dump_name(env->irg));
405 env->problem_found = 1;
409 res = set_find(env->spills, &spill, sizeof(spill), hash);
414 spill.ent = spillent;
415 res = set_insert(env->spills, &spill, sizeof(spill), hash);
417 for(i = 0, arity = be_get_MemPerm_entity_arity(memperm); i < arity; ++i) {
418 ir_node* arg = get_irn_n(memperm, i + 1);
419 ir_entity* argent = be_get_MemPerm_in_entity(memperm, i);
421 collect(env, arg, memperm, argent);
425 static void collect_memphi(be_verify_spillslots_env_t *env, ir_node *node, ir_node *reload, ir_entity *ent) {
428 int hash = HASH_PTR(node);
430 assert(is_Phi(node));
433 res = set_find(env->spills, &spill, sizeof(spill), hash);
439 res = set_insert(env->spills, &spill, sizeof(spill), hash);
441 // is 1 of the arguments a spill?
442 for(i = 0, arity = get_irn_arity(node); i < arity; ++i) {
443 ir_node* arg = get_irn_n(node, i);
444 collect(env, arg, reload, ent);
448 static void collect(be_verify_spillslots_env_t *env, ir_node *node, ir_node *reload, ir_entity* ent) {
449 if(be_is_Spill(node)) {
450 collect_spill(env, node, reload, ent);
451 } else if(is_Proj(node)) {
452 collect_memperm(env, node, reload, ent);
453 } else if(is_Phi(node) && get_irn_mode(node) == mode_M) {
454 collect_memphi(env, node, reload, ent);
456 // Disabled for now, spills might get transformed by the backend
458 ir_fprintf(stderr, "Verify warning: No spill, memperm or memphi attached to node %+F found from node %+F in block %+F(%s)\n",
459 node, reload, get_nodes_block(node), get_irg_dump_name(env->irg));
460 env->problem_found = 1;
466 * This walker function searches for reloads and collects all the spills
467 * and memphis attached to them.
469 static void collect_spills_walker(ir_node *node, void *data) {
470 be_verify_spillslots_env_t *env = data;
471 const arch_env_t *arch_env = env->arch_env;
473 // @@@ ia32_classify returns classification of Proj_pred :-/
477 if(arch_irn_class_is(arch_env, node, reload)) {
478 ir_node *spill = get_memory_edge(node);
482 ir_fprintf(stderr, "Verify warning: No spill attached to reload %+F in block %+F(%s)\n",
483 node, get_nodes_block(node), get_irg_dump_name(env->irg));
484 env->problem_found = 1;
487 ent = arch_get_frame_entity(env->arch_env, node);
488 check_entity(env, node, ent);
490 collect(env, spill, node, ent);
491 ARR_APP1(ir_node*, env->reloads, node);
495 static void check_spillslot_interference(be_verify_spillslots_env_t *env) {
496 int spillcount = set_count(env->spills);
497 spill_t **spills = alloca(spillcount * sizeof(spills[0]));
501 for(spill = set_first(env->spills), i = 0; spill != NULL; spill = set_next(env->spills), ++i) {
505 for(i = 0; i < spillcount; ++i) {
506 spill_t *sp1 = spills[i];
509 for(i2 = i+1; i2 < spillcount; ++i2) {
510 spill_t *sp2 = spills[i2];
512 if(sp1->ent != sp2->ent)
515 if(my_values_interfere(sp1->spill, sp2->spill)) {
516 ir_fprintf(stderr, "Verify warning: Spillslots for %+F in block %+F(%s) and %+F in block %+F(%s) interfere\n",
517 sp1->spill, get_nodes_block(sp1->spill), get_irg_dump_name(env->irg),
518 sp2->spill, get_nodes_block(sp2->spill), get_irg_dump_name(env->irg));
519 env->problem_found = 1;
520 my_values_interfere(sp1->spill, sp2->spill);
526 static void check_lonely_spills(ir_node *node, void *data) {
527 be_verify_spillslots_env_t *env = data;
529 if(be_is_Spill(node) || (is_Proj(node) && be_is_MemPerm(get_Proj_pred(node)))) {
530 spill_t *spill = find_spill(env, node);
531 if(be_is_Spill(node)) {
532 ir_entity *ent = arch_get_frame_entity(env->arch_env, node);
533 check_entity(env, node, ent);
537 ir_fprintf(stderr, "Verify warning: Node %+F in block %+F(%s) not connected to a reaload\n",
538 node, get_nodes_block(node), get_irg_dump_name(env->irg));
543 int be_verify_spillslots(const arch_env_t *arch_env, ir_graph *irg)
545 be_verify_spillslots_env_t env;
547 env.arch_env = arch_env;
549 env.spills = new_set(cmp_spill, 10);
550 env.reloads = NEW_ARR_F(ir_node*, 0);
551 env.problem_found = 0;
553 irg_walk_graph(irg, collect_spills_walker, NULL, &env);
554 irg_walk_graph(irg, check_lonely_spills, NULL, &env);
556 check_spillslot_interference(&env);
558 DEL_ARR_F(env.reloads);
561 return ! env.problem_found;
566 //---------------------------------------------------------------------------
571 * Check, if two values interfere.
572 * @param a The first value.
573 * @param b The second value.
574 * @return 1, if a and b interfere, 0 if not.
576 static int my_values_interfere(const ir_node *a, const ir_node *b) {
577 const ir_edge_t *edge;
579 int a2b = value_dominates(a, b);
580 int b2a = value_dominates(b, a);
582 /* If there is no dominance relation, they do not interfere. */
587 * Adjust a and b so, that a dominates b if
588 * a dominates b or vice versa.
591 const ir_node *t = a;
596 bb = get_nodes_block(b);
599 * Look at all usages of a.
600 * If there's one usage of a in the block of b, then
601 * we check, if this use is dominated by b, if that's true
602 * a and b interfere. Note that b must strictly dominate the user,
603 * since if b is the last user of in the block, b and a do not
605 * Uses of a not in b's block can be disobeyed, because the
606 * check for a being live at the end of b's block is already
609 foreach_out_edge(a, edge) {
610 const ir_node *user = get_edge_src_irn(edge);
614 if(get_irn_opcode(user) == iro_End)
617 // in case of phi arguments we compare with the block the value comes from
619 ir_node *phiblock = get_nodes_block(user);
622 user = get_irn_n(phiblock, get_edge_src_pos(edge));
625 if(value_dominates(b, user))
634 //---------------------------------------------------------------------------
638 typedef struct _be_verify_register_allocation_env_t {
639 const arch_env_t *arch_env;
643 } be_verify_register_allocation_env_t;
645 static void check_register_constraints(ir_node *node, be_verify_register_allocation_env_t *env) {
646 const arch_env_t *arch_env = env->arch_env;
647 const arch_register_t *reg;
650 /* verify output register */
651 if (arch_get_irn_reg_class(arch_env, node, -1) != NULL) {
652 reg = arch_get_irn_register(arch_env, node);
654 ir_fprintf(stderr, "Verify warning: Node %+F in block %+F(%s) should have a register assigned\n",
655 node, get_nodes_block(node), get_irg_dump_name(env->irg));
656 env->problem_found = 1;
658 else if (! arch_register_type_is(reg, joker) && !arch_reg_is_allocatable(arch_env, node, -1, reg)) {
659 ir_fprintf(stderr, "Verify warning: Register %s assigned as output of %+F not allowed (register constraint) in block %+F(%s)\n",
660 reg->name, node, get_nodes_block(node), get_irg_dump_name(env->irg));
661 env->problem_found = 1;
665 /* verify input register */
666 arity = get_irn_arity(node);
667 for (i = 0; i < arity; ++i) {
668 ir_node *pred = get_irn_n(node, i);
670 if (is_Unknown(pred))
674 ir_fprintf(stderr, "Verify warning: %+F in block %+F(%s) has Bad as input %d\n",
675 node, get_nodes_block(node), get_irg_dump_name(env->irg), i);
676 env->problem_found = 1;
680 if (arch_get_irn_reg_class(arch_env, node, i) == NULL)
683 reg = arch_get_irn_register(arch_env, pred);
685 ir_fprintf(stderr, "Verify warning: Node %+F in block %+F(%s) should have a register assigned\n",
686 pred, get_nodes_block(pred), get_irg_dump_name(env->irg));
687 env->problem_found = 1;
690 else if (! arch_register_type_is(reg, joker) && ! arch_reg_is_allocatable(arch_env, node, i, reg)) {
691 ir_fprintf(stderr, "Verify warning: Register %s as input %d of %+F not allowed (register constraint) in block %+F(%s)\n",
692 reg->name, i, node, get_nodes_block(node), get_irg_dump_name(env->irg));
693 env->problem_found = 1;
698 static void check_register_allocation(be_verify_register_allocation_env_t *env,
699 const arch_register_class_t *regclass, pset *nodes) {
700 const arch_env_t *arch_env = env->arch_env;
701 const arch_register_t *reg = NULL;
703 bitset_t *registers = bitset_alloca(arch_register_class_n_regs(regclass));
706 foreach_pset(nodes, node) {
707 if (arch_get_irn_reg_class(arch_env, node, -1) != regclass)
710 reg = arch_get_irn_register(arch_env, node);
712 /* this problem is already reported in 'check_register_constraints' */
716 if (bitset_is_set(registers, reg->index)) {
721 bitset_set(registers, reg->index);
725 ir_fprintf(stderr, "Verify warning: Register %s assigned more than once in block %+F(%s)\n",
726 reg->name, get_nodes_block(node), get_irg_dump_name(env->irg));
727 env->problem_found = 1;
729 foreach_pset(nodes, node) {
730 if (arch_get_irn_register(arch_env, node) == reg) {
731 ir_fprintf(stderr, " at node %+F\n", node);
737 static void verify_block_register_allocation(ir_node *block, void *data) {
738 be_verify_register_allocation_env_t *env = data;
739 const arch_env_t *arch_env = env->arch_env;
740 const arch_isa_t *isa = arch_env->isa;
743 nregclasses = arch_isa_get_n_reg_class(isa);
744 for (i = 0; i < nregclasses; ++i) {
745 const arch_register_class_t *regclass = arch_isa_get_reg_class(isa, i);
747 pset *live_nodes = pset_new_ptr_default();
749 be_liveness_end_of_block(env->lv, env->arch_env, regclass, block, live_nodes);
750 check_register_allocation(env, regclass, live_nodes);
752 sched_foreach_reverse(block, node) {
756 be_liveness_transfer(env->arch_env, regclass, node, live_nodes);
757 check_register_allocation(env, regclass, live_nodes);
758 check_register_constraints(node, env);
761 del_pset(live_nodes);
765 int be_verify_register_allocation(const arch_env_t *arch_env, ir_graph *irg) {
766 be_verify_register_allocation_env_t env;
768 env.arch_env = arch_env;
770 env.lv = be_liveness(irg);
771 env.problem_found = 0;
773 irg_block_walk_graph(irg, verify_block_register_allocation, NULL, &env);
775 be_liveness_free(env.lv);
777 return !env.problem_found;
782 //---------------------------------------------------------------------------
786 typedef struct _verify_out_dead_nodes_env {
790 } verify_out_dead_nodes_env;
792 static void check_out_edges(ir_node *node, verify_out_dead_nodes_env *env) {
793 ir_graph *irg = env->irg;
794 const ir_edge_t* edge;
796 if(irn_visited(node))
798 mark_irn_visited(node);
800 foreach_out_edge(node, edge) {
801 ir_node* src = get_edge_src_irn(edge);
803 if(!bitset_is_set(env->reachable, get_irn_idx(src)) && !is_Block(node)) {
804 ir_fprintf(stderr, "Verify warning: Node %+F in block %+F(%s) only reachable through out edges from %+F\n",
805 src, get_nodes_block(src), get_irg_dump_name(irg), node);
806 env->problem_found = 1;
810 check_out_edges(src, env);
814 static void set_reachable(ir_node *node, void* data)
816 bitset_t* reachable = data;
817 bitset_set(reachable, get_irn_idx(node));
820 int be_verify_out_edges(ir_graph *irg) {
821 verify_out_dead_nodes_env env;
824 env.reachable = bitset_alloca(get_irg_last_idx(irg));
825 env.problem_found = edges_verify(irg);
827 irg_walk_in_or_dep_graph(irg, set_reachable, NULL, env.reachable);
828 irg_walk_anchors(irg, set_reachable, NULL, env.reachable);
829 inc_irg_visited(irg);
830 check_out_edges(get_irg_start(irg), &env);
832 return ! env.problem_found;