2 * Author: Matthias Braun
4 * Copyright: (c) Universitaet Karlsruhe
5 * License: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
27 #include "besched_t.h"
31 static int my_values_interfere(const ir_node *a, const ir_node *b);
33 typedef struct be_verify_register_pressure_env_t_ {
34 ir_graph *irg; /**< the irg to verify */
35 be_lv_t *lv; /**< Liveness information. */
36 const arch_env_t *arch_env; /**< an architecture environment */
37 const arch_register_class_t *cls; /**< the register class to check for */
38 int registers_available; /**< number of available registers */
39 int problem_found; /**< flag indicating if a problem was found */
40 } be_verify_register_pressure_env_t;
43 * Print all nodes of a pset into a file.
45 static void print_living_values(FILE *F, pset *live_nodes) {
49 foreach_pset(live_nodes, node) {
50 ir_fprintf(F, "%+F ", node);
56 * Check if number of live nodes never exceeds the number of available registers.
58 static void verify_liveness_walker(ir_node *block, void *data) {
59 be_verify_register_pressure_env_t *env = (be_verify_register_pressure_env_t *)data;
60 pset *live_nodes = pset_new_ptr_default();
64 /* collect register pressure info, start with end of a block */
65 be_liveness_end_of_block(env->lv, env->arch_env, env->cls, block, live_nodes);
67 pressure = pset_count(live_nodes);
68 if(pressure > env->registers_available) {
69 ir_fprintf(stderr, "Verify Warning: Register pressure too high at end of block %+F(%s) (%d/%d):\n",
70 block, get_irg_dump_name(env->irg), pressure, env->registers_available);
71 print_living_values(stderr, live_nodes);
72 env->problem_found = 1;
75 sched_foreach_reverse(block, irn) {
79 be_liveness_transfer(env->arch_env, env->cls, irn, live_nodes);
81 pressure = pset_count(live_nodes);
83 if(pressure > env->registers_available) {
84 ir_fprintf(stderr, "Verify Warning: Register pressure too high before node %+F in %+F(%s) (%d/%d):\n",
85 irn, block, get_irg_dump_name(env->irg), pressure, env->registers_available);
86 print_living_values(stderr, live_nodes);
87 env->problem_found = 1;
94 * Start a walk over the irg and check the register pressure.
96 int be_verify_register_pressure(const be_irg_t *birg,
97 const arch_register_class_t *cls,
99 be_verify_register_pressure_env_t env;
101 env.lv = be_liveness(irg);
103 env.arch_env = birg->main_env->arch_env;
105 env.registers_available = env.cls->n_regs - be_put_ignore_regs(birg, env.cls, NULL);
106 env.problem_found = 0;
108 irg_block_walk_graph(irg, verify_liveness_walker, NULL, &env);
109 be_liveness_free(env.lv);
111 return ! env.problem_found;
116 //---------------------------------------------------------------------------
120 typedef struct be_verify_schedule_env_t_ {
121 int problem_found; /**< flags indicating if there was a problem */
122 bitset_t *scheduled; /**< bitset of scheduled nodes */
123 ir_graph *irg; /**< the irg to check */
124 const arch_env_t *arch_env; /**< the arch_env */
125 } be_verify_schedule_env_t;
128 * Simple schedule checker.
130 static void verify_schedule_walker(ir_node *block, void *data) {
131 be_verify_schedule_env_t *env = (be_verify_schedule_env_t*) data;
133 int non_phi_found = 0;
134 int cfchange_found = 0;
135 // TODO ask arch about delay branches
136 int delay_branches = 0;
137 int last_timestep = INT_MIN;
140 * Tests for the following things:
141 * 1. Make sure that all phi nodes are scheduled at the beginning of the block
142 * 2. There is 1 or no control flow changing node scheduled and exactly delay_branches operations after it.
143 * 3. No value is defined after it has been used
145 sched_foreach(block, node) {
149 // this node is scheduled
150 if(bitset_is_set(env->scheduled, get_irn_idx(node))) {
151 ir_fprintf(stderr, "Verify warning: %+F appears to be schedule twice\n");
152 env->problem_found = 1;
154 bitset_set(env->scheduled, get_irn_idx(node));
156 // Check that scheduled nodes are in the correct block
157 if(get_nodes_block(node) != block) {
158 ir_fprintf(stderr, "Verify warning: %+F is in block %+F but scheduled in %+F\n", node, get_nodes_block(node), block);
159 env->problem_found = 1;
162 // Check that timesteps are increasing
163 timestep = sched_get_time_step(node);
164 if(timestep <= last_timestep) {
165 ir_fprintf(stderr, "Verify warning: Schedule timestep did not increase at node %+F\n",
167 env->problem_found = 1;
169 last_timestep = timestep;
171 // Check that phis come before any other node
174 ir_fprintf(stderr, "Verify Warning: Phi node %+F scheduled after non-Phi nodes in block %+F (%s)\n",
175 node, block, get_irg_dump_name(env->irg));
176 env->problem_found = 1;
182 // Check for control flow changing nodes
183 if (is_cfop(node) && get_irn_opcode(node) != iro_Start) {
184 /* check, that only one CF operation is scheduled */
185 if (cfchange_found == 1) {
186 ir_fprintf(stderr, "Verify Warning: More than 1 control flow changing node (%+F) scheduled in block %+F (%s)\n",
187 node, block, get_irg_dump_name(env->irg));
188 env->problem_found = 1;
191 } else if (cfchange_found) {
192 // proj and keepany aren't real instructions...
193 if(!is_Proj(node) && !be_is_Keep(node)) {
194 /* check for delay branches */
195 if (delay_branches == 0) {
196 ir_fprintf(stderr, "Verify Warning: Node %+F scheduled after control flow changing node (+delay branches) in block %+F (%s)\n",
197 node, block, get_irg_dump_name(env->irg));
198 env->problem_found = 1;
205 // Check that all uses come before their definitions
207 int nodetime = sched_get_time_step(node);
208 for(i = 0, arity = get_irn_arity(node); i < arity; ++i) {
209 ir_node *arg = get_irn_n(node, i);
210 if(get_nodes_block(arg) != block
211 || !sched_is_scheduled(arg))
214 if(sched_get_time_step(arg) >= nodetime) {
215 ir_fprintf(stderr, "Verify Warning: Value %+F used by %+F before it was defined in block %+F (%s)\n",
216 arg, node, block, get_irg_dump_name(env->irg));
217 env->problem_found = 1;
222 // Check that no dead nodes are scheduled
223 if(get_irn_n_edges(node) == 0) {
224 ir_fprintf(stderr, "Verify warning: Node %+F is dead but scheduled in block %+F (%s)\n",
225 node, block, get_irg_dump_name(env->irg));
226 env->problem_found = 1;
230 /* check that all delay branches are filled (at least with NOPs) */
231 if (cfchange_found && delay_branches != 0) {
232 ir_fprintf(stderr, "Verify warning: Not all delay slots filled after jump (%d/%d) in block %+F (%s)\n",
233 block, get_irg_dump_name(env->irg));
234 env->problem_found = 1;
238 static int should_be_scheduled(be_verify_schedule_env_t *env, ir_node *node) {
242 if(get_irn_mode(node) == mode_M) {
245 if(is_Phi(node) || is_Sync(node) || is_Pin(node))
248 if(is_Proj(node) && get_irn_mode(node) == mode_X)
250 if(be_is_Keep(node) && get_irn_opcode(get_nodes_block(node)) == iro_Bad)
253 switch(get_irn_opcode(node)) {
263 if(arch_irn_get_flags(env->arch_env, node) & arch_irn_flags_ignore)
269 static void check_schedule(ir_node *node, void *data) {
270 be_verify_schedule_env_t *env = data;
274 should_be = should_be_scheduled(env, node);
278 scheduled = bitset_is_set(env->scheduled, get_irn_idx(node)) ? 1 : 0;
279 should_be = should_be ? 1 : 0;
280 if(should_be != scheduled) {
281 ir_fprintf(stderr, "Verify warning: Node %+F in block %+F(%s) should%s be scheduled\n",
282 node, get_nodes_block(node), get_irg_dump_name(env->irg), should_be ? "" : " not");
283 env->problem_found = 1;
288 * Start a walk over the irg and check schedule.
290 int be_verify_schedule(const be_irg_t *birg)
292 be_verify_schedule_env_t env;
294 env.problem_found = 0;
295 env.irg = be_get_birg_irg(birg);
296 env.scheduled = bitset_alloca(get_irg_last_idx(env.irg));
297 env.arch_env = birg->main_env->arch_env;
299 irg_block_walk_graph(env.irg, verify_schedule_walker, NULL, &env);
300 // check if all nodes are scheduled
301 irg_walk_graph(env.irg, check_schedule, NULL, &env);
303 return ! env.problem_found;
308 //---------------------------------------------------------------------------
312 typedef struct _spill_t {
318 const arch_env_t *arch_env;
323 } be_verify_spillslots_env_t;
325 static int cmp_spill(const void* d1, const void* d2, size_t size) {
326 const spill_t* s1 = d1;
327 const spill_t* s2 = d2;
328 return s1->spill != s2->spill;
331 static spill_t *find_spill(be_verify_spillslots_env_t *env, ir_node *node) {
335 return set_find(env->spills, &spill, sizeof(spill), HASH_PTR(node));
338 static spill_t *get_spill(be_verify_spillslots_env_t *env, ir_node *node, ir_entity *ent) {
340 int hash = HASH_PTR(node);
343 res = set_find(env->spills, &spill, sizeof(spill), hash);
347 res = set_insert(env->spills, &spill, sizeof(spill), hash);
353 static ir_node *get_memory_edge(const ir_node *node) {
355 ir_node *result = NULL;
357 arity = get_irn_arity(node);
358 for(i = arity - 1; i >= 0; --i) {
359 ir_node *arg = get_irn_n(node, i);
360 if(get_irn_mode(arg) == mode_M) {
361 assert(result == NULL);
369 static void collect(be_verify_spillslots_env_t *env, ir_node *node, ir_node *reload, ir_entity* ent);
371 static void check_entity(be_verify_spillslots_env_t *env, ir_node *node, ir_entity *ent) {
373 ir_fprintf(stderr, "Verify warning: Node %+F in block %+F(%s) should have an entity assigned\n",
374 node, get_nodes_block(node), get_irg_dump_name(env->irg));
378 static void collect_spill(be_verify_spillslots_env_t *env, ir_node *node, ir_node *reload, ir_entity* ent) {
379 ir_entity *spillent = arch_get_frame_entity(env->arch_env, node);
380 check_entity(env, node, spillent);
381 get_spill(env, node, ent);
383 if(spillent != ent) {
384 ir_fprintf(stderr, "Verify warning: Spill %+F has different entity than reload %+F in block %+F(%s)\n",
385 node, reload, get_nodes_block(node), get_irg_dump_name(env->irg));
386 env->problem_found = 1;
390 static void collect_memperm(be_verify_spillslots_env_t *env, ir_node *node, ir_node *reload, ir_entity* ent) {
393 int hash = HASH_PTR(node);
398 assert(is_Proj(node));
400 memperm = get_Proj_pred(node);
401 out = get_Proj_proj(node);
403 spillent = be_get_MemPerm_out_entity(memperm, out);
404 check_entity(env, memperm, spillent);
405 if(spillent != ent) {
406 ir_fprintf(stderr, "Verify warning: MemPerm %+F has different entity than reload %+F in block %+F(%s)\n",
407 node, reload, get_nodes_block(node), get_irg_dump_name(env->irg));
408 env->problem_found = 1;
412 res = set_find(env->spills, &spill, sizeof(spill), hash);
417 spill.ent = spillent;
418 res = set_insert(env->spills, &spill, sizeof(spill), hash);
420 for(i = 0, arity = be_get_MemPerm_entity_arity(memperm); i < arity; ++i) {
421 ir_node* arg = get_irn_n(memperm, i + 1);
422 ir_entity* argent = be_get_MemPerm_in_entity(memperm, i);
424 collect(env, arg, memperm, argent);
428 static void collect_memphi(be_verify_spillslots_env_t *env, ir_node *node, ir_node *reload, ir_entity *ent) {
431 int hash = HASH_PTR(node);
433 assert(is_Phi(node));
436 res = set_find(env->spills, &spill, sizeof(spill), hash);
442 res = set_insert(env->spills, &spill, sizeof(spill), hash);
444 // is 1 of the arguments a spill?
445 for(i = 0, arity = get_irn_arity(node); i < arity; ++i) {
446 ir_node* arg = get_irn_n(node, i);
447 collect(env, arg, reload, ent);
451 static void collect(be_verify_spillslots_env_t *env, ir_node *node, ir_node *reload, ir_entity* ent) {
452 if(be_is_Spill(node)) {
453 collect_spill(env, node, reload, ent);
454 } else if(is_Proj(node)) {
455 collect_memperm(env, node, reload, ent);
456 } else if(is_Phi(node) && get_irn_mode(node) == mode_M) {
457 collect_memphi(env, node, reload, ent);
459 // Disabled for now, spills might get transformed by the backend
461 ir_fprintf(stderr, "Verify warning: No spill, memperm or memphi attached to node %+F found from node %+F in block %+F(%s)\n",
462 node, reload, get_nodes_block(node), get_irg_dump_name(env->irg));
463 env->problem_found = 1;
469 * This walker function searches for reloads and collects all the spills
470 * and memphis attached to them.
472 static void collect_spills_walker(ir_node *node, void *data) {
473 be_verify_spillslots_env_t *env = data;
474 const arch_env_t *arch_env = env->arch_env;
476 // @@@ ia32_classify returns classification of Proj_pred :-/
480 if(arch_irn_class_is(arch_env, node, reload)) {
481 ir_node *spill = get_memory_edge(node);
485 ir_fprintf(stderr, "Verify warning: No spill attached to reload %+F in block %+F(%s)\n",
486 node, get_nodes_block(node), get_irg_dump_name(env->irg));
487 env->problem_found = 1;
490 ent = arch_get_frame_entity(env->arch_env, node);
491 check_entity(env, node, ent);
493 collect(env, spill, node, ent);
494 ARR_APP1(ir_node*, env->reloads, node);
498 static void check_spillslot_interference(be_verify_spillslots_env_t *env) {
499 int spillcount = set_count(env->spills);
500 spill_t **spills = alloca(spillcount * sizeof(spills[0]));
504 for(spill = set_first(env->spills), i = 0; spill != NULL; spill = set_next(env->spills), ++i) {
508 for(i = 0; i < spillcount; ++i) {
509 spill_t *sp1 = spills[i];
512 for(i2 = i+1; i2 < spillcount; ++i2) {
513 spill_t *sp2 = spills[i2];
515 if(sp1->ent != sp2->ent)
518 if(my_values_interfere(sp1->spill, sp2->spill)) {
519 ir_fprintf(stderr, "Verify warning: Spillslots for %+F in block %+F(%s) and %+F in block %+F(%s) interfere\n",
520 sp1->spill, get_nodes_block(sp1->spill), get_irg_dump_name(env->irg),
521 sp2->spill, get_nodes_block(sp2->spill), get_irg_dump_name(env->irg));
522 env->problem_found = 1;
523 my_values_interfere(sp1->spill, sp2->spill);
529 static void check_lonely_spills(ir_node *node, void *data) {
530 be_verify_spillslots_env_t *env = data;
532 if(be_is_Spill(node) || (is_Proj(node) && be_is_MemPerm(get_Proj_pred(node)))) {
533 spill_t *spill = find_spill(env, node);
534 if(be_is_Spill(node)) {
535 ir_entity *ent = arch_get_frame_entity(env->arch_env, node);
536 check_entity(env, node, ent);
540 ir_fprintf(stderr, "Verify warning: Node %+F in block %+F(%s) not connected to a reaload\n",
541 node, get_nodes_block(node), get_irg_dump_name(env->irg));
546 int be_verify_spillslots(const arch_env_t *arch_env, ir_graph *irg)
548 be_verify_spillslots_env_t env;
550 env.arch_env = arch_env;
552 env.spills = new_set(cmp_spill, 10);
553 env.reloads = NEW_ARR_F(ir_node*, 0);
554 env.problem_found = 0;
556 irg_walk_graph(irg, collect_spills_walker, NULL, &env);
557 irg_walk_graph(irg, check_lonely_spills, NULL, &env);
559 check_spillslot_interference(&env);
561 DEL_ARR_F(env.reloads);
564 return ! env.problem_found;
569 //---------------------------------------------------------------------------
574 * Check, if two values interfere.
575 * @param a The first value.
576 * @param b The second value.
577 * @return 1, if a and b interfere, 0 if not.
579 static int my_values_interfere(const ir_node *a, const ir_node *b) {
580 const ir_edge_t *edge;
582 int a2b = value_dominates(a, b);
583 int b2a = value_dominates(b, a);
585 /* If there is no dominance relation, they do not interfere. */
590 * Adjust a and b so, that a dominates b if
591 * a dominates b or vice versa.
594 const ir_node *t = a;
599 bb = get_nodes_block(b);
602 * Look at all usages of a.
603 * If there's one usage of a in the block of b, then
604 * we check, if this use is dominated by b, if that's true
605 * a and b interfere. Note that b must strictly dominate the user,
606 * since if b is the last user of in the block, b and a do not
608 * Uses of a not in b's block can be disobeyed, because the
609 * check for a being live at the end of b's block is already
612 foreach_out_edge(a, edge) {
613 const ir_node *user = get_edge_src_irn(edge);
617 if(get_irn_opcode(user) == iro_End)
620 // in case of phi arguments we compare with the block the value comes from
622 ir_node *phiblock = get_nodes_block(user);
625 user = get_irn_n(phiblock, get_edge_src_pos(edge));
628 if(value_dominates(b, user))
637 //---------------------------------------------------------------------------
641 typedef struct _be_verify_register_allocation_env_t {
642 const arch_env_t *arch_env;
646 } be_verify_register_allocation_env_t;
648 static void check_register_constraints(ir_node *node, be_verify_register_allocation_env_t *env) {
649 const arch_env_t *arch_env = env->arch_env;
650 const arch_register_t *reg;
653 /* verify output register */
654 if (arch_get_irn_reg_class(arch_env, node, -1) != NULL) {
655 reg = arch_get_irn_register(arch_env, node);
657 ir_fprintf(stderr, "Verify warning: Node %+F in block %+F(%s) should have a register assigned\n",
658 node, get_nodes_block(node), get_irg_dump_name(env->irg));
659 env->problem_found = 1;
661 else if (! arch_register_type_is(reg, joker) && !arch_reg_is_allocatable(arch_env, node, -1, reg)) {
662 ir_fprintf(stderr, "Verify warning: Register %s assigned as output of %+F not allowed (register constraint) in block %+F(%s)\n",
663 reg->name, node, get_nodes_block(node), get_irg_dump_name(env->irg));
664 env->problem_found = 1;
668 /* verify input register */
669 arity = get_irn_arity(node);
670 for (i = 0; i < arity; ++i) {
671 ir_node *pred = get_irn_n(node, i);
673 if (is_Unknown(pred))
677 ir_fprintf(stderr, "Verify warning: %+F in block %+F(%s) has Bad as input %d\n",
678 node, get_nodes_block(node), get_irg_dump_name(env->irg), i);
679 env->problem_found = 1;
683 if (arch_get_irn_reg_class(arch_env, node, i) == NULL)
686 reg = arch_get_irn_register(arch_env, pred);
688 ir_fprintf(stderr, "Verify warning: Node %+F in block %+F(%s) should have a register assigned\n",
689 pred, get_nodes_block(pred), get_irg_dump_name(env->irg));
690 env->problem_found = 1;
693 else if (! arch_register_type_is(reg, joker) && ! arch_reg_is_allocatable(arch_env, node, i, reg)) {
694 ir_fprintf(stderr, "Verify warning: Register %s as input %d of %+F not allowed (register constraint) in block %+F(%s)\n",
695 reg->name, i, node, get_nodes_block(node), get_irg_dump_name(env->irg));
696 env->problem_found = 1;
701 static void check_register_allocation(be_verify_register_allocation_env_t *env,
702 const arch_register_class_t *regclass, pset *nodes) {
703 const arch_env_t *arch_env = env->arch_env;
704 const arch_register_t *reg = NULL;
706 bitset_t *registers = bitset_alloca(arch_register_class_n_regs(regclass));
709 foreach_pset(nodes, node) {
710 if (arch_get_irn_reg_class(arch_env, node, -1) != regclass)
713 reg = arch_get_irn_register(arch_env, node);
715 /* this problem is already reported in 'check_register_constraints' */
719 if (bitset_is_set(registers, reg->index)) {
724 bitset_set(registers, reg->index);
728 ir_fprintf(stderr, "Verify warning: Register %s assigned more than once in block %+F(%s)\n",
729 reg->name, get_nodes_block(node), get_irg_dump_name(env->irg));
730 env->problem_found = 1;
732 foreach_pset(nodes, node) {
733 if (arch_get_irn_register(arch_env, node) == reg) {
734 ir_fprintf(stderr, " at node %+F\n", node);
740 static void verify_block_register_allocation(ir_node *block, void *data) {
741 be_verify_register_allocation_env_t *env = data;
742 const arch_env_t *arch_env = env->arch_env;
743 const arch_isa_t *isa = arch_env->isa;
746 nregclasses = arch_isa_get_n_reg_class(isa);
747 for (i = 0; i < nregclasses; ++i) {
748 const arch_register_class_t *regclass = arch_isa_get_reg_class(isa, i);
750 pset *live_nodes = pset_new_ptr_default();
752 be_liveness_end_of_block(env->lv, env->arch_env, regclass, block, live_nodes);
753 check_register_allocation(env, regclass, live_nodes);
755 sched_foreach_reverse(block, node) {
759 be_liveness_transfer(env->arch_env, regclass, node, live_nodes);
760 check_register_allocation(env, regclass, live_nodes);
761 check_register_constraints(node, env);
764 del_pset(live_nodes);
768 int be_verify_register_allocation(const arch_env_t *arch_env, ir_graph *irg) {
769 be_verify_register_allocation_env_t env;
771 env.arch_env = arch_env;
773 env.lv = be_liveness(irg);
774 env.problem_found = 0;
776 irg_block_walk_graph(irg, verify_block_register_allocation, NULL, &env);
778 be_liveness_free(env.lv);
780 return !env.problem_found;
785 //---------------------------------------------------------------------------
789 typedef struct _verify_out_dead_nodes_env {
793 } verify_out_dead_nodes_env;
795 static void check_out_edges(ir_node *node, verify_out_dead_nodes_env *env) {
796 ir_graph *irg = env->irg;
797 const ir_edge_t* edge;
799 if(irn_visited(node))
801 mark_irn_visited(node);
803 foreach_out_edge(node, edge) {
804 ir_node* src = get_edge_src_irn(edge);
806 if(!bitset_is_set(env->reachable, get_irn_idx(src)) && !is_Block(node)) {
807 ir_fprintf(stderr, "Verify warning: Node %+F in block %+F(%s) only reachable through out edges from %+F\n",
808 src, get_nodes_block(src), get_irg_dump_name(irg), node);
809 env->problem_found = 1;
813 check_out_edges(src, env);
817 static void set_reachable(ir_node *node, void* data)
819 bitset_t* reachable = data;
820 bitset_set(reachable, get_irn_idx(node));
823 int be_verify_out_edges(ir_graph *irg) {
824 verify_out_dead_nodes_env env;
827 env.reachable = bitset_alloca(get_irg_last_idx(irg));
828 env.problem_found = edges_verify(irg);
830 irg_walk_in_or_dep_graph(irg, set_reachable, NULL, env.reachable);
831 irg_walk_anchors(irg, set_reachable, NULL, env.reachable);
832 inc_irg_visited(irg);
833 check_out_edges(get_irg_start(irg), &env);
835 return ! env.problem_found;