2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @author Christian Helmer
23 * @brief loop inversion and loop unrolling, loop peeling
39 #include "array_t.h" /* automatic array */
40 #include "beutil.h" /* get_block */
41 #include "irloop_t.h" /* set_irn_loop*/
44 DEBUG_ONLY(static firm_dbg_module_t *dbg);
47 * Convenience macro for iterating over every phi node of the given block.
48 * Requires phi list per block.
50 #define for_each_phi(block, phi) \
51 for ( (phi) = get_Block_phis( (block) ); (phi) ; (phi) = get_Phi_next( (phi) ) )
54 static ir_loop *cur_loop;
56 /* abortable walker function */
57 typedef unsigned irg_walk_func_abortable(ir_node *, void *);
59 /* condition for walking a node during a copy_walk */
60 typedef unsigned walker_condition(ir_node *);
62 /* node and position of a predecessor */
63 typedef struct out_edges {
68 /* access complex values through the nodes links */
69 typedef struct node_info {
72 ir_node *link; /* temporary links for ssa creation */
73 ir_node **ins; /* ins for phi nodes, during rewiring of blocks */
75 struct node_info *freelistnext; /* linked list to free all node_infos */
78 static node_info *link_node_state_list; /* head of the linked list to free all node_infos */
80 static out_edge *cur_loop_outs; /* A walker may start visiting the current loop with these nodes. */
81 static out_edge *cur_head_outs; /* A walker may start visiting the cur head with these nodes. */
83 static ir_node *loop_cf_head = NULL; /* Loop head node */
84 static unsigned loop_cf_head_valid = 1; /* A loop may have one head, otherwise we do not touch it. */
89 static ir_node *loop_inv_head = NULL;
91 static ir_node *loop_peeled_head = NULL;
93 /* Loop analysis informations */
94 typedef struct loop_info_t {
95 unsigned blocks; /* number of blocks in the loop */
96 unsigned calls; /* number of calls */
97 unsigned loads; /* number of load nodes */
98 unsigned outs; /* outs without keepalives */
100 unsigned invariant_loads;
101 unsigned stores; /* number of store nodes */
102 unsigned opnodes_n; /* nodes that probably result in an instruction */
103 unsigned do_invariant_opt;
107 /* Information about the current loop */
108 static loop_info_t loop_info;
110 /* A walker may start visiting a condition chain with these nodes. */
111 static out_edge *cond_chain_entries;
113 /* Number of unrolling */
116 static unsigned head_inversion_node_count;
117 static unsigned inversion_head_node_limit;
118 static unsigned head_inversion_block_count;
120 static unsigned enable_peeling;
121 static unsigned enable_inversion;
122 static unsigned enable_unrolling;
126 * ============= AUXILIARY FUNCTIONS =====================================
131 * Creates object on the heap, and adds it to a linked list to free it later.
133 static node_info *new_node_info(void)
135 node_info *l = XMALLOCZ(node_info);
136 l->freelistnext = link_node_state_list;
137 link_node_state_list = l;
143 static node_info *get_node_info(ir_node *n)
145 return ((node_info *)get_irn_link(n));
148 /* Allocates a node_info struct for the given node. For use with a walker. */
149 static void alloc_node_info(ir_node *node, void *env)
153 state = new_node_info();
154 set_irn_link(node, (void *)state);
157 static void free_node_info(void)
161 n = link_node_state_list;
163 node_info *next = n->freelistnext;
168 link_node_state_list = NULL;
172 * Use the linked list to reset the reused values of all node_infos
173 * Reset in particular the copy attribute as copy_walk uses it to determine a present copy
175 static void reset_node_infos(void)
178 next = link_node_state_list;
179 while (next->freelistnext) {
180 node_info *cur = next;
181 next = cur->freelistnext;
188 /* Returns the nodes node_info link. */
189 static ir_node *get_link(ir_node *n)
191 return ((node_info *)get_irn_link(n))->link;
194 /* Sets the nodes node_info link. */
195 static void set_link(ir_node *n, ir_node *link)
197 ((node_info *)get_irn_link(n))->link = link;
200 /* Returns a nodes copy. */
201 static ir_node *get_copy(ir_node *n)
203 return ((node_info *)get_irn_link(n))->copy;
206 /* Sets a nodes copy. */
207 static void set_copy(ir_node *n, ir_node *copy)
209 ((node_info *)get_irn_link(n) )->copy = copy;
213 * Convenience macro for iterating over every copy in a linked list
216 #define for_each_copy(node) \
217 for ( ; (node) ; (node) = get_copy(node))
220 * Convenience macro for iterating over every copy in 2 linked lists
221 * of copies in parallel.
223 #define for_each_copy2(high1, low1, high2, low2) \
224 for ( ; (low1) && (low2); (high1) = (low1), (low1) = get_copy(low1), \
225 (high2) = (low2), (low2) = get_copy(low2))
228 * Returns 0 if the node or block is not in cur_loop.
230 static unsigned is_in_loop(ir_node *node)
232 return (get_irn_loop(get_block(node)) == cur_loop);
235 /* Returns if the given be is an alien edge. This is the case when the pred is not in the loop. */
236 static unsigned is_alien_edge(ir_node *n, int i)
238 return(!is_in_loop(get_irn_n(n, i)));
241 /* used for block walker */
242 static void reset_block_mark(ir_node *node, void * env)
247 set_Block_mark(node, 0);
250 static unsigned is_nodesblock_marked(ir_node* node)
252 return (get_Block_mark(get_block(node)));
255 /* Returns the number of blocks in a loop. */
256 int get_loop_n_blocks(ir_loop *loop)
260 elements = get_loop_n_elements(loop);
262 for (e=0; e<elements; e++) {
263 loop_element elem = get_loop_element(loop, e);
264 if (is_ir_node(elem.kind) && is_Block(elem.node))
271 * Add newpred at position pos to node and also add the corresponding value to the phis.
272 * Requires block phi list.
274 static int duplicate_preds(ir_node* block, unsigned pos, ir_node* newpred)
282 assert(is_Block(block) && "duplicate_preds may be called for blocks only");
284 DB((dbg, LEVEL_5, "duplicate_preds(node %N, pos %d, newpred %N)\n", block, pos, newpred));
286 block_arity = get_irn_arity(block);
288 NEW_ARR_A(ir_node*, ins, block_arity + 1);
290 for (i = 0; i < block_arity; ++i) {
291 ins[i] = get_irn_n(block, i);
293 ins[block_arity] = newpred;
295 set_irn_in(block, block_arity + 1, ins);
299 for_each_phi(block, phi) {
300 int phi_arity = get_irn_arity(phi);
301 DB((dbg, LEVEL_5, "duplicate_preds: fixing phi %N\n", phi));
303 NEW_ARR_A(ir_node *, ins, block_arity + 1);
304 for (i = 0; i < phi_arity; ++i) {
305 DB((dbg, LEVEL_5, "pos %N\n", get_irn_n(phi, i)));
306 ins[i] = get_irn_n(phi, i);
308 ins[block_arity] = get_copy(get_irn_n(phi, pos));
309 set_irn_in(phi, block_arity + 1, ins);
316 * Finds loop head and loop_info.
318 static void get_loop_info(ir_node *node, void *env)
320 unsigned node_in_loop, pred_in_loop;
324 arity = get_irn_arity(node);
325 for (i = 0; i < arity; i++) {
326 ir_node *pred = get_irn_n(node, i);
328 pred_in_loop = is_in_loop(pred);
329 node_in_loop = is_in_loop(node);
331 /* collect some loop information */
337 /* Find the loops head/the blocks with cfpred outside of the loop */
338 if (is_Block(node) && node_in_loop && !pred_in_loop && loop_cf_head_valid) {
339 ir_node *cfgpred = get_Block_cfgpred(node, i);
341 if (!is_in_loop(cfgpred)) {
342 DB((dbg, LEVEL_5, "potential head %+F because inloop and pred %+F not inloop\n", node, pred));
343 /* another head? We do not touch this. */
344 if (loop_cf_head && loop_cf_head != node) {
345 loop_cf_head_valid = 0;
354 /* Adds all nodes pointing into the loop to loop_entries and also finds the loops head */
355 static void get_loop_outs(ir_node *node, void *env)
357 unsigned node_in_loop, pred_in_loop;
361 arity = get_irn_arity(node);
362 for (i = 0; i < arity; ++i) {
363 ir_node *pred = get_irn_n(node, i);
365 pred_in_loop = is_in_loop(pred);
366 node_in_loop = is_in_loop(node);
368 if (pred_in_loop && !node_in_loop) {
371 entry.pred_irn_n = i;
372 ARR_APP1(out_edge, cur_loop_outs, entry);
377 static ir_node *ssa_second_def;
378 static ir_node *ssa_second_def_block;
381 * Walks the graph bottom up, searching for definitions and creates phis.
383 static ir_node *search_def_and_create_phis(ir_node *block, ir_mode *mode)
391 DB((dbg, LEVEL_5, "ssa search_def_and_create_phis: block %N\n", block));
393 /* Prevents creation of phi that would be bad anyway.
394 * Dead and bad blocks. */
395 if (get_irn_arity(block) < 1 || is_Bad(block))
398 if (block == ssa_second_def_block) {
399 DB((dbg, LEVEL_5, "ssa found second definition: use second def %N\n", ssa_second_def));
400 return ssa_second_def;
403 /* already processed this block? */
404 if (irn_visited(block)) {
405 ir_node *value = get_link(block);
406 DB((dbg, LEVEL_5, "ssa already visited: use linked %N\n", value));
410 irg = get_irn_irg(block);
411 assert(block != get_irg_start_block(irg));
413 /* a Block with only 1 predecessor needs no Phi */
414 n_cfgpreds = get_Block_n_cfgpreds(block);
415 if (n_cfgpreds == 1) {
416 ir_node *pred_block = get_Block_cfgpred_block(block, 0);
419 DB((dbg, LEVEL_5, "ssa 1 pred: walk pred %N\n", pred_block));
421 value = search_def_and_create_phis(pred_block, mode);
422 set_link(block, value);
423 mark_irn_visited(block);
428 /* create a new Phi */
429 NEW_ARR_A(ir_node*, in, n_cfgpreds);
430 for (i = 0; i < n_cfgpreds; ++i)
431 in[i] = new_Unknown(mode);
433 phi = new_r_Phi(block, n_cfgpreds, in, mode);
435 /* Important: always keep block phi list up to date. */
436 add_Block_phi(block, phi);
437 /* EVERY node is assumed to have a node_info linked. */
438 alloc_node_info(phi, NULL);
440 DB((dbg, LEVEL_5, "ssa phi creation: link new phi %N to block %N\n", phi, block));
442 set_link(block, phi);
443 mark_irn_visited(block);
445 /* set Phi predecessors */
446 for (i = 0; i < n_cfgpreds; ++i) {
448 ir_node *pred_block = get_Block_cfgpred_block(block, i);
449 assert(pred_block != NULL);
450 pred_val = search_def_and_create_phis(pred_block, mode);
451 assert(pred_val != NULL);
453 DB((dbg, LEVEL_5, "ssa phi pred:phi %N, pred %N\n", phi, pred_val));
454 set_irn_n(phi, i, pred_val);
460 * Given a set of values this function constructs SSA-form for the users of the
461 * first value (the users are determined through the out-edges of the value).
462 * Works without using the dominance tree.
464 static void construct_ssa(ir_node *orig_block, ir_node *orig_val,
465 ir_node *second_block, ir_node *second_val)
469 const ir_edge_t *edge;
470 const ir_edge_t *next;
472 assert(orig_block && orig_val && second_block && second_val &&
473 "no parameter of construct_ssa may be NULL");
475 /* no need to do anything */
476 if (orig_val == second_val)
479 irg = get_irn_irg(orig_val);
481 ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED);
482 inc_irg_visited(irg);
484 mode = get_irn_mode(orig_val);
485 set_link(orig_block, orig_val);
486 mark_irn_visited(orig_block);
488 ssa_second_def_block = second_block;
489 ssa_second_def = second_val;
491 /* Only fix the users of the first, i.e. the original node */
492 foreach_out_edge_safe(orig_val, edge, next) {
493 ir_node *user = get_edge_src_irn(edge);
494 int j = get_edge_src_pos(edge);
495 ir_node *user_block = get_nodes_block(user);
502 DB((dbg, LEVEL_5, "original user %N\n", user));
505 ir_node *pred_block = get_Block_cfgpred_block(user_block, j);
506 newval = search_def_and_create_phis(pred_block, mode);
508 newval = search_def_and_create_phis(user_block, mode);
511 /* If we get a bad node the user keeps the original in. No second definition needed. */
512 if (newval != user && !is_Bad(newval))
513 set_irn_n(user, j, newval);
516 ir_free_resources(irg, IR_RESOURCE_IRN_VISITED);
520 * Construct SSA for def and all of its copies.
522 static void construct_ssa_n(ir_node *def, ir_node *user)
527 const ir_edge_t *edge;
528 const ir_edge_t *next;
529 irg = get_irn_irg(def);
531 ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED);
532 inc_irg_visited(irg);
534 mode = get_irn_mode(def);
536 for_each_copy(iter) {
537 set_link(get_nodes_block(iter), iter);
538 mark_irn_visited(get_nodes_block(iter));
540 DB((dbg, LEVEL_5, "ssa_n: Link def %N to block %N\n",
541 iter, get_nodes_block(iter)));
544 /* Need to search the outs, because we need the in-pos on the user node. */
545 foreach_out_edge_safe(def, edge, next) {
546 ir_node *edge_user = get_edge_src_irn(edge);
547 int edge_src = get_edge_src_pos(edge);
548 ir_node *user_block = get_nodes_block(user);
551 if (edge_user != user)
555 ir_node *pred_block = get_Block_cfgpred_block(user_block, edge_src);
556 newval = search_def_and_create_phis(pred_block, mode);
558 newval = search_def_and_create_phis(user_block, mode);
561 if (newval != user && !is_Bad(newval))
562 set_irn_n(user, edge_src, newval);
565 ir_free_resources(irg, IR_RESOURCE_IRN_VISITED);
569 * Construct SSA for all definitions in arr.
571 void construct_ssa_foreach(ir_node **arr, int arr_n)
574 for (i = 0; i < arr_n ; ++i) {
575 ir_node *cppred, *block, *cpblock, *pred;
578 cppred = get_copy(pred);
579 block = get_nodes_block(pred);
580 cpblock = get_nodes_block(cppred);
581 construct_ssa(block, pred, cpblock, cppred);
585 /* get the number of backedges without alien bes */
586 static int get_backedge_n(ir_node *loophead, unsigned with_alien)
590 int arity = get_irn_arity(loophead);
591 for (i = 0; i < arity; ++i) {
592 ir_node *pred = get_irn_n(loophead, i);
593 if (is_backedge(loophead, i) && (with_alien || is_in_loop(pred)))
600 * Rewires the heads after peeling.
602 static void peel_fix_heads(void)
604 ir_node **loopheadnins, **peelheadnins;
605 ir_node *loophead = loop_cf_head;
606 ir_node *peelhead = get_copy(loophead);
608 int headarity = get_irn_arity(loophead);
615 int backedges_n = get_backedge_n(loophead, 0);
617 int lhead_arity = 2 * backedges_n;
618 int phead_arity = headarity - backedges_n;
621 NEW_ARR_A(ir_node *, loopheadnins, lhead_arity );
622 NEW_ARR_A(ir_node *, peelheadnins, phead_arity );
624 for_each_phi(loophead, phi) {
625 NEW_ARR_A(ir_node *, get_node_info(phi)->ins, lhead_arity);
627 for_each_phi(peelhead, phi) {
628 NEW_ARR_A(ir_node *, get_node_info(phi)->ins, phead_arity);
631 for (i = 0; i < headarity; i++)
633 ir_node *orgjmp = get_irn_n(loophead, i);
634 ir_node *copyjmp = get_copy(orgjmp);
637 * Rewire the head blocks ins and their phi ins.
638 * Requires phi list per block.
640 if (is_backedge(loophead, i) && !is_alien_edge(loophead, i)) {
641 loopheadnins[lheadin_c] = orgjmp;
642 for_each_phi(loophead, phi) {
643 get_node_info( phi )->ins[lheadin_c] = get_irn_n( phi, i) ;
647 /* former bes of the peeled code origin now from the loophead */
648 loopheadnins[lheadin_c] = copyjmp;
650 /* get_irn_n( get_copy_of(phi), i ) <!=> get_copy_of( get_irn_n( phi, i) )
651 * Order is crucial! Predecessors outside of the loop are non existent.
652 * The copy (cloned with its ins!) has pred i,
653 * but phis pred i might not have a copy of itself.
655 for_each_phi(loophead, phi) {
656 get_node_info( phi )->ins[lheadin_c] = get_irn_n( get_copy(phi), i) ;
660 peelheadnins[pheadin_c] = orgjmp;
661 for_each_phi(peelhead, phi) {
662 get_node_info( phi )->ins[pheadin_c] = get_irn_n(phi, i);
668 assert(pheadin_c == ARR_LEN(peelheadnins) &&
669 lheadin_c == ARR_LEN(loopheadnins) &&
670 "the constructed head arities do not match the predefined arities");
672 /* assign the ins to the nodes */
673 set_irn_in(loophead, ARR_LEN(loopheadnins), loopheadnins);
674 set_irn_in(peelhead, ARR_LEN(peelheadnins), peelheadnins);
676 for_each_phi(loophead, phi) {
677 ir_node **ins = get_node_info( phi )->ins;
678 set_irn_in(phi, lhead_arity, ins);
681 for_each_phi(peelhead, phi) {
682 ir_node **ins = get_node_info( phi )->ins;
683 set_irn_in(phi, phead_arity, ins);
688 * Create a raw copy (ins are still the old ones) of the given node.
689 * We rely on copies to be NOT visited.
691 static ir_node *rawcopy_node(ir_node *node)
697 cp = exact_copy(node);
699 arity = get_irn_arity(node);
701 for (i = 0; i < arity; ++i) {
702 if (is_backedge(node, i))
707 cpstate = new_node_info();
708 set_irn_link(cp, cpstate);
713 * exact_copy already sets Macroblock.
714 * Why should we do this anyway? */
715 set_Block_MacroBlock(cp, cp);
722 * This walker copies all walked nodes.
723 * If the walk_condition is true for a node, it is walked.
724 * All nodes node_info->copy attributes have to be NULL prior to every walk.
726 static void copy_walk(ir_node *node, walker_condition *walk_condition, ir_loop *set_loop)
732 ir_graph *irg = current_ir_graph;
733 node_info *node_info = get_node_info(node);
736 * break condition and cycle resolver, creating temporary node copies
738 if (get_irn_visited(node) >= get_irg_visited(irg)) {
739 /* Here we rely on nodestate's copy being initialized with NULL */
740 DB((dbg, LEVEL_5, "copy_walk: We have already visited %N\n", node));
741 if (node_info->copy == NULL) {
742 cp = rawcopy_node(node);
743 DB((dbg, LEVEL_5, "The TEMP copy of %N is created %N\n", node, cp));
749 mark_irn_visited(node);
751 if (!is_Block(node)) {
752 ir_node *pred = get_nodes_block(node);
753 if (walk_condition(pred))
754 DB((dbg, LEVEL_5, "walk block %N\n", pred));
755 copy_walk(pred, walk_condition, set_loop);
758 arity = get_irn_arity(node);
760 NEW_ARR_A(ir_node *, cpin, arity);
762 for (i = 0; i < arity; ++i) {
763 ir_node *pred = get_irn_n(node, i);
765 if (walk_condition(pred)) {
766 DB((dbg, LEVEL_5, "walk node %N\n", pred));
767 copy_walk(pred, walk_condition, set_loop);
768 cpin[i] = get_copy(pred);
769 DB((dbg, LEVEL_5, "copy of %N gets new in %N which is copy of %N\n",
770 node, get_copy(pred), pred));
776 /* copy node / finalize temp node */
777 if (node_info->copy == NULL) {
778 /* No temporary copy existent */
779 cp = rawcopy_node(node);
780 DB((dbg, LEVEL_5, "The FINAL copy of %N is CREATED %N\n", node, cp));
782 /* temporary copy is existent but without correct ins */
784 DB((dbg, LEVEL_5, "The FINAL copy of %N is EXISTENT %N\n", node, cp));
787 if (!is_Block(node)) {
788 ir_node *cpblock = get_copy(get_nodes_block(node));
790 set_nodes_block(cp, cpblock );
792 add_Block_phi(cpblock, cp);
795 set_irn_loop(cp, set_loop);
796 set_irn_in(cp, ARR_LEN(cpin), cpin);
800 * Loop peeling, and fix the cf for the loop entry nodes, which have now more preds
802 static void peel(out_edge *loop_outs)
805 ir_node **entry_buffer;
808 ir_reserve_resources(current_ir_graph, IR_RESOURCE_IRN_VISITED);
810 NEW_ARR_A(ir_node *, entry_buffer, ARR_LEN(loop_outs));
812 /* duplicate loop walk */
813 inc_irg_visited(current_ir_graph);
815 for (i = 0; i < ARR_LEN(loop_outs); i++) {
816 out_edge entry = loop_outs[i];
817 ir_node *node = entry.node;
818 ir_node *pred = get_irn_n(entry.node, entry.pred_irn_n);
820 if (is_Block(node)) {
821 copy_walk(pred, is_in_loop, NULL);
822 duplicate_preds(node, entry.pred_irn_n, get_copy(pred) );
824 copy_walk(pred, is_in_loop, NULL);
825 /* leave out keepalives */
827 /* Node is user of a value defined inside the loop.
828 * We'll need a phi since we duplicated the loop. */
829 /* Cannot construct_ssa here, because it needs another walker. */
830 entry_buffer[entry_c] = pred;
836 ir_free_resources(current_ir_graph, IR_RESOURCE_IRN_VISITED);
838 /* Rewires the 2 heads */
841 /* Generate phis for values from peeled code and original loop */
842 construct_ssa_foreach(entry_buffer, entry_c);
843 /*for (i = 0; i < entry_c; i++)
845 ir_node *cppred, *block, *cpblock, *pred;
847 pred = entry_buffer[i];
848 cppred = get_copy(pred);
849 block = get_nodes_block(pred);
850 cpblock = get_nodes_block(cppred);
851 construct_ssa(block, pred, cpblock, cppred);
856 * Populates head_entries with (node, pred_pos) tuple
857 * whereas the node's pred at pred_pos is in the head but not the node itself.
858 * Head and condition chain blocks must be marked.
860 static void get_head_outs(ir_node *node, void *env)
863 int arity = get_irn_arity(node);
866 DB((dbg, LEVEL_5, "get head entries %N \n", node));
868 for (i = 0; i < arity; ++i) {
869 /* node is not in the head, but the predecessor is.
870 * (head or loop chain nodes are marked) */
872 DB((dbg, LEVEL_5, "... "));
873 DB((dbg, LEVEL_5, "node %N marked %d (0) pred %d marked %d (1) \n",
874 node->node_nr, is_nodesblock_marked(node),i, is_nodesblock_marked(get_irn_n(node, i))));
876 if (!is_nodesblock_marked(node) && is_nodesblock_marked(get_irn_n(node, i))) {
879 entry.pred_irn_n = i;
881 "Found head chain entry %N @%d because !inloop %N and inloop %N\n",
882 node, i, node, get_irn_n(node, i)));
883 ARR_APP1(out_edge, cur_head_outs, entry);
889 * Find condition chains, and add them to be inverted, until the node count exceeds the limit.
890 * A block belongs to the chain if a condition branches out of the loop.
891 * Returns 1 if the given block belongs to the condition chain.
893 static unsigned find_condition_chains(ir_node *block)
895 const ir_edge_t *edge;
899 DB((dbg, LEVEL_5, "condition_chains for block %N\n", block));
901 /* Collect all outs, including keeps.
902 * (TODO firm function for number of out edges?) */
903 foreach_out_edge_kind(block, edge, EDGE_KIND_NORMAL) {
907 /* We do not want to collect more nodes from condition chains, than the limit allows us to.
908 * Also, leave at least one block as body. */
909 if (head_inversion_node_count + nodes_n > inversion_head_node_limit
910 || head_inversion_block_count + 1 == loop_info.blocks) {
911 set_Block_mark(block, 0);
916 /* First: check our successors, and add all succs that are outside of the loop to the list */
917 foreach_block_succ(block, edge) {
918 ir_node *src = get_edge_src_irn( edge );
919 int pos = get_edge_src_pos( edge );
921 if (!is_in_loop(src)) {
926 entry.pred_irn_n = pos;
927 ARR_APP1(out_edge, cond_chain_entries, entry);
928 mark_irn_visited(src);
933 /* this block is not part of the chain,
934 * because the chain would become too long or we have no successor outside of the loop */
936 set_Block_mark(block, 0);
939 set_Block_mark(block, 1);
940 ++head_inversion_block_count;
941 DB((dbg, LEVEL_5, "block %N is part of condition chain\n", block));
942 head_inversion_node_count += nodes_n;
945 /* Second: walk all successors, and add them to the list if they are not part of the chain */
946 foreach_block_succ(block, edge) {
948 ir_node *src = get_edge_src_irn( edge );
949 int pos = get_edge_src_pos( edge );
951 /* already done cases */
952 if (!is_in_loop( src ) || (get_irn_visited(src) >= get_irg_visited(current_ir_graph))) {
956 mark_irn_visited(src);
957 DB((dbg, LEVEL_5, "condition chain walk %N\n", src));
958 inchain = find_condition_chains(src);
960 /* if successor is not part of chain we need to collect its outs */
964 entry.pred_irn_n = pos;
965 ARR_APP1(out_edge, cond_chain_entries, entry);
972 * Rewire the loop head and inverted head for loop inversion.
974 static void inversion_fix_heads(void)
976 ir_node **loopheadnins, **invheadnins;
977 ir_node *loophead = loop_cf_head;
978 ir_node *invhead = get_copy(loophead);
980 int headarity = get_irn_arity(loophead);
987 int backedges_n = get_backedge_n(loophead, 0);
988 int lhead_arity = backedges_n;
989 int ihead_arity = headarity - backedges_n;
991 assert(lhead_arity != 0 && "Loophead has arity 0. Probably wrong backedge informations.");
992 assert(ihead_arity != 0 && "Inversionhead has arity 0. Probably wrong backedge informations.");
994 /* new in arrays for all phis in the head blocks */
995 NEW_ARR_A(ir_node *, loopheadnins, lhead_arity);
996 NEW_ARR_A(ir_node *, invheadnins, ihead_arity);
998 for_each_phi(loophead, phi) {
999 NEW_ARR_A(ir_node *, get_node_info(phi)->ins, lhead_arity);
1001 for_each_phi(invhead, phi) {
1002 NEW_ARR_A(ir_node *, get_node_info(phi)->ins, ihead_arity);
1005 for (i = 0; i < headarity; i++) {
1006 ir_node *pred = get_irn_n(loophead, i);
1009 * Rewire the head blocks ins and their phi ins.
1010 * Requires phi list per block.
1012 if (is_backedge(loophead, i) && !is_alien_edge(loophead, i)) {
1013 /* just copy these edges */
1014 loopheadnins[lheadin_c] = pred;
1015 for_each_phi(loophead, phi) {
1016 get_node_info(phi)->ins[lheadin_c] = get_irn_n(phi, i);
1020 invheadnins[iheadin_c] = pred;
1021 for_each_phi(invhead, phi) {
1022 get_node_info(phi)->ins[iheadin_c] = get_irn_n(phi, i) ;
1028 /* assign the ins to the head blocks */
1029 set_irn_in(loophead, ARR_LEN(loopheadnins), loopheadnins);
1030 set_irn_in(invhead, ARR_LEN(invheadnins), invheadnins);
1032 /* assign the ins for the phis */
1033 for_each_phi(loophead, phi) {
1034 ir_node **ins = get_node_info(phi)->ins;
1035 set_irn_in(phi, lhead_arity, ins);
1038 for_each_phi(invhead, phi) {
1039 ir_node **ins = get_node_info(phi)->ins;
1040 set_irn_in(phi, ihead_arity, ins);
1044 static void inversion_walk(out_edge *head_entries)
1049 ir_node **entry_buffer;
1050 ir_node **head_phi_assign;
1052 NEW_ARR_A(ir_node *, entry_buffer, ARR_LEN(head_entries));
1054 head_phi_assign = NEW_ARR_F(ir_node *, 0);
1056 /* Find assignments in the condition chain,
1057 * to construct_ssa for them after the loop inversion. */
1058 for_each_phi(loop_cf_head , phi) {
1059 int arity = get_irn_arity(phi);
1060 for (i = 0; i < arity; ++i) {
1061 ir_node *def = get_irn_n(phi, i);
1062 if (is_nodesblock_marked(def)) {
1063 ARR_APP1(ir_node *, head_phi_assign, def);
1068 ir_reserve_resources(current_ir_graph, IR_RESOURCE_IRN_VISITED);
1071 * duplicate condition chain
1073 inc_irg_visited(current_ir_graph);
1075 for (i = 0; i < ARR_LEN(head_entries); ++i) {
1076 out_edge entry = head_entries[i];
1077 ir_node *node = entry.node;
1078 ir_node *pred = get_irn_n(entry.node, entry.pred_irn_n);
1080 if (is_Block(node)) {
1081 DB((dbg, LEVEL_5, "\nInit walk block %N\n", pred));
1083 copy_walk(pred, is_nodesblock_marked, cur_loop);
1084 duplicate_preds(node, entry.pred_irn_n, get_copy(pred) );
1086 DB((dbg, LEVEL_5, "\nInit walk node %N\n", pred));
1088 copy_walk(pred, is_nodesblock_marked, cur_loop);
1090 /* ignore keepalives */
1091 if (!is_End(node)) {
1092 /* Node is user of a value assigned inside the loop.
1093 * We will need a phi since we duplicated the head. */
1094 entry_buffer[entry_c] = pred;
1100 ir_free_resources(current_ir_graph, IR_RESOURCE_IRN_VISITED);
1102 inversion_fix_heads();
1104 /* Generate phis for users of values assigned in the condition chain
1105 * and read in the loops body */
1106 construct_ssa_foreach(entry_buffer, entry_c);
1108 /* Generate phis for values that are assigned in the condition chain
1109 * but not read in the loops body. */
1110 construct_ssa_foreach(head_phi_assign, ARR_LEN(head_phi_assign));
1112 loop_cf_head = get_copy(loop_cf_head);
1116 void loop_peeling(void)
1118 cur_loop_outs = NEW_ARR_F(out_edge, 0);
1119 irg_walk_graph( current_ir_graph, get_loop_outs, NULL, NULL );
1121 peel(cur_loop_outs);
1126 set_irg_doms_inconsistent(current_ir_graph);
1127 set_irg_loopinfo_inconsistent(current_ir_graph);
1128 set_irg_outs_inconsistent(current_ir_graph);
1130 DEL_ARR_F(cur_loop_outs);
1133 /* Loop inversion */
1134 void loop_inversion(void)
1136 unsigned do_inversion = 1;
1138 inversion_head_node_limit = INT_MAX;
1140 /* Search for condition chains. */
1141 ir_reserve_resources(current_ir_graph, IR_RESOURCE_BLOCK_MARK);
1143 irg_walk_graph(current_ir_graph, reset_block_mark, NULL, NULL);
1145 loop_info.blocks = get_loop_n_blocks(cur_loop);
1146 cond_chain_entries = NEW_ARR_F(out_edge, 0);
1148 head_inversion_node_count = 0;
1149 head_inversion_block_count = 0;
1151 set_Block_mark(loop_cf_head, 1);
1152 mark_irn_visited(loop_cf_head);
1153 inc_irg_visited(current_ir_graph);
1155 find_condition_chains(loop_cf_head);
1157 DB((dbg, LEVEL_3, "Loop contains %d blocks.\n", loop_info.blocks));
1158 if (loop_info.blocks < 2) {
1160 DB((dbg, LEVEL_3, "Loop contains %d (less than 2) blocks => No Inversion done.\n", loop_info.blocks));
1163 /* We also catch endless loops here,
1164 * because they do not have a condition chain. */
1165 if (head_inversion_block_count < 1) {
1167 DB((dbg, LEVEL_3, "Loop contains %d (less than 1) invertible blocks => No Inversion done.\n", head_inversion_block_count));
1171 cur_head_outs = NEW_ARR_F(out_edge, 0);
1173 /* Get all edges pointing into the head or condition chain (outs). */
1174 irg_walk_graph(current_ir_graph, get_head_outs, NULL, NULL);
1175 inversion_walk(cur_head_outs);
1177 DEL_ARR_F(cur_head_outs);
1179 set_irg_doms_inconsistent(current_ir_graph);
1180 set_irg_loopinfo_inconsistent(current_ir_graph);
1181 set_irg_outs_inconsistent(current_ir_graph);
1185 DEL_ARR_F(cond_chain_entries);
1186 ir_free_resources(current_ir_graph, IR_RESOURCE_BLOCK_MARK);
1190 * Returns last element of linked list of copies by
1191 * walking the linked list.
1193 ir_node *get_last_copy(ir_node *node)
1195 ir_node *copy, *cur;
1197 while ((copy = get_copy(cur))) {
1204 * Rewire floating copies of the current loop.
1206 void unrolling_fix_cf(void)
1208 ir_node *loophead = loop_cf_head;
1209 int headarity = get_irn_arity(loophead);
1210 ir_node *phi, *headnode;
1211 /*ir_node *high, *low;*/
1215 int backedges_n = get_backedge_n(loophead, 0);
1216 int unroll_arity = backedges_n;
1218 /* Create ins for all heads and their phis */
1219 headnode = get_copy(loophead);
1220 for_each_copy(headnode) {
1221 NEW_ARR_A(ir_node *, get_node_info(headnode)->ins, unroll_arity);
1222 for_each_phi(headnode, phi) {
1223 NEW_ARR_A(ir_node *, get_node_info(phi)->ins, unroll_arity);
1227 /* Append the copies to the existing loop. */
1228 for (i = 0; i < headarity; i++) {
1229 ir_node *upper_head = loophead;
1230 ir_node *lower_head = get_copy(loophead);
1232 ir_node *upper_pred = get_irn_n(loophead, i);
1233 ir_node *lower_pred = get_copy(get_irn_n(loophead, i));
1238 * Build unrolled loop top down
1240 if (is_backedge(loophead, i) && !is_alien_edge(loophead, i)) {
1241 for_each_copy2(upper_head, lower_head, upper_pred, lower_pred) {
1242 get_node_info(lower_head)->ins[uhead_in_n] = upper_pred;
1244 for_each_phi(upper_head, phi) {
1245 ir_node *phi_copy = get_copy(phi);
1246 get_node_info(phi_copy)->ins[uhead_in_n] = get_irn_n(phi, i);
1250 last_pred = upper_pred;
1253 /* Fix the topmost loop heads backedges. */
1254 set_irn_n(loophead, i, last_pred);
1255 for_each_phi(loophead, phi) {
1256 ir_node *last_phi = get_last_copy(phi);
1257 ir_node *pred = get_irn_n(last_phi, i);
1258 set_irn_n(phi, i, pred);
1263 headnode = get_copy(loophead);
1264 for_each_copy(headnode) {
1265 set_irn_in(headnode, unroll_arity, get_node_info(headnode)->ins);
1266 for_each_phi(headnode, phi) {
1267 set_irn_in(phi, unroll_arity, get_node_info(phi)->ins);
1273 static ir_node *add_phi(ir_node *node, int phi_pos)
1278 mode = get_irn_mode(get_irn_n(node, phi_pos));
1279 ir_node *block = get_nodes_block(node);
1280 int n_cfgpreds = get_irn_arity(block);
1281 ir_node *pred = get_irn_n(node, phi_pos);
1284 /* create a new Phi */
1285 NEW_ARR_A(ir_node*, in, n_cfgpreds);
1286 for (i = 0; i < n_cfgpreds; ++i)
1287 in[i] = new_Unknown(mode); /*pred;*/
1289 phi = new_r_Phi(block, n_cfgpreds, in, mode);
1291 assert(phi && "phi null");
1292 assert(is_Bad(phi) && "phi bad");
1294 /* Important: always keep block phi list up to date. */
1295 add_Block_phi(block, phi);
1296 /* EVERY node is assumed to have a node_info linked. */
1297 alloc_node_info(phi, NULL);
1299 set_irn_n(node, phi_pos, phi);
1307 * Could be improved with variable range informations.
1309 void loop_unrolling(void)
1315 cur_loop_outs = NEW_ARR_F(out_edge, 0);
1316 irg_walk_graph( current_ir_graph, get_loop_outs, NULL, NULL );
1318 ir_reserve_resources(current_ir_graph, IR_RESOURCE_IRN_VISITED);
1320 /* duplicate whole loop content */
1321 inc_irg_visited(current_ir_graph);
1323 for (i = 0; i < ARR_LEN(cur_loop_outs); ++i) {
1324 out_edge entry = cur_loop_outs[i];
1325 ir_node *node = entry.node;
1326 ir_node *pred = get_irn_n(entry.node, entry.pred_irn_n);
1327 if (!is_Block(node)) {
1328 for (j = 0; j < unroll_times - 1; ++j) {
1329 copy_walk(pred, is_in_loop, cur_loop);
1330 pred = get_copy(pred);
1335 for (i = 0; i < ARR_LEN(cur_loop_outs); ++i) {
1336 out_edge entry = cur_loop_outs[i];
1337 ir_node *node = entry.node;
1338 ir_node *pred = get_irn_n(entry.node, entry.pred_irn_n);
1340 if (is_Block(node)) {
1341 for (j = 0; j < unroll_times - 1; ++j) {
1342 copy_walk(pred, is_in_loop, cur_loop);
1343 duplicate_preds(node, entry.pred_irn_n, get_copy(pred));
1345 pred = get_copy(pred);
1350 ir_free_resources(current_ir_graph, IR_RESOURCE_IRN_VISITED);
1352 /*dump_ir_graph(current_ir_graph, "-raw");*/
1356 /* Line up the floating copies. */
1359 /* Generate phis for all loop outs */
1360 for (i = 0; i < ARR_LEN(cur_loop_outs); ++i) {
1361 out_edge entry = cur_loop_outs[i];
1362 ir_node *node = entry.node;
1363 ir_node *pred = get_irn_n(entry.node, entry.pred_irn_n);
1365 if (!is_Block(node) && !is_End(node)) {
1366 DB((dbg, LEVEL_5, " construct_ssa_n def %N node %N pos %d\n",
1367 pred, node, entry.pred_irn_n));
1368 construct_ssa_n(pred, node);
1373 DEL_ARR_F(cur_loop_outs);
1375 set_irg_doms_inconsistent(current_ir_graph);
1376 set_irg_loopinfo_inconsistent(current_ir_graph);
1377 set_irg_outs_inconsistent(current_ir_graph);
1380 /* Initialization and */
1381 static void init_analyze(ir_loop *loop)
1383 /* Init new for every loop */
1386 loop_cf_head = NULL;
1387 loop_cf_head_valid = 1;
1388 loop_inv_head = NULL;
1389 loop_peeled_head = NULL;
1392 loop_info.calls = 0;
1393 loop_info.loads = 0;
1394 loop_info.blocks = 0;
1396 DB((dbg, LEVEL_2, " >>>> current loop includes node %N <<<\n", get_loop_node(loop, 0)));
1398 irg_walk_graph(current_ir_graph, get_loop_info, NULL, NULL);
1400 /* RETURN if there is no valid head */
1401 if (!loop_cf_head || !loop_cf_head_valid) {
1402 DB((dbg, LEVEL_2, "No valid loop head. Nothing done.\n"));
1409 if (enable_inversion)
1411 if (enable_unrolling)
1415 /* RETURN if there is a call in the loop */
1416 if (loop_info.calls)
1420 DB((dbg, LEVEL_2, " <<<< end of loop with node %N >>>>\n", get_loop_node(loop, 0)));
1423 /* Find most inner loops and send them to analyze_loop */
1424 static void find_most_inner_loop(ir_loop *loop)
1426 /* descend into sons */
1427 int sons = get_loop_n_sons(loop);
1433 el_n = get_loop_n_elements(loop);
1435 for (i=0; i < el_n; ++i) {
1436 elem = get_loop_element(loop, i);
1437 /* We can only rely on the blocks,
1438 * as the loop attribute of the nodes seems not to be set. */
1439 if (is_ir_node(elem.kind) && is_Block(elem.node)) {
1440 ARR_APP1(ir_node *, loops, elem.node);
1441 DB((dbg, LEVEL_5, "Found most inner loop (contains block %+F)\n", elem.node));
1447 for (s=0; s<sons; s++) {
1448 find_most_inner_loop(get_loop_son(loop, s));
1454 * Assure preconditions are met and go through all loops.
1456 void loop_optimization(ir_graph *irg)
1462 link_node_state_list = NULL;
1463 set_current_ir_graph(irg);
1467 assure_irg_outs(irg);
1469 /* NOTE: sets only the loop attribute of blocks, not nodes */
1470 /* NOTE: Kills links */
1471 assure_cf_loop(irg);
1473 ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
1474 collect_phiprojs(irg);
1475 ir_free_resources(irg, IR_RESOURCE_IRN_LINK);
1477 /* allocate node_info for additional information on nodes */
1478 ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
1479 irg_walk_graph(current_ir_graph, alloc_node_info, NULL, NULL);
1481 loop = get_irg_loop(irg);
1482 sons = get_loop_n_sons(loop);
1484 loops = NEW_ARR_F(ir_node *, 0);
1486 for (nr = 0; nr < sons; ++nr) {
1487 find_most_inner_loop(get_loop_son(loop, nr));
1490 /* TODO Keep backedges during optimization to avoid
1491 * this ugly allocation and deallocation.
1492 * (set_irn_in seems to destroy them)
1495 for (i = 0; i < ARR_LEN(loops); ++i) {
1498 loop = get_irn_loop(loops[i]);
1502 /* This part is useful for testing
1503 * or has to be used if the backedge information is destroyed.
1504 * Which is the case at the moment, because the backedge information gets lost
1505 * before inversion_fix_heads/unrolling_fix_cf, which results in bads.
1506 * NOTE!: Testsuite runs successfully nevertheless...
1510 * assure_cf_loop() creates a completely new loop tree.
1511 * Thus we cannot optimize a loop, assure_cf_loop() and continue with the next loop,
1512 * as the next loop must be searched because it is not distinguishable from the
1513 * already done loops.
1514 * The links of the loops are also not available anymore (to store a "loop done" flag).
1515 * Therefore we save a block per loop.
1516 * NOTE: We rely on the loop optimizations not to remove any block from the loop.
1517 * Later, we fetch the blocks loop attribute, as it is updated by assure_cf_loop.
1519 for (i = 0; i < ARR_LEN(loops); ++i) {
1523 ir_free_resources(irg, IR_RESOURCE_IRN_LINK);
1525 edges_assure(current_ir_graph);
1526 assure_irg_outs(current_ir_graph);
1528 /* NOTE: sets only the loop attribute of blocks */
1529 /* NOTE: Kills links */
1530 assure_cf_loop(current_ir_graph);
1532 irg_walk_graph(current_ir_graph, alloc_node_info, NULL, NULL);
1533 ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
1535 /* Get loop from block */
1536 loop = get_irn_loop(loops[i]);
1545 ir_free_resources(irg, IR_RESOURCE_IRN_LINK);
1546 ir_free_resources(irg, IR_RESOURCE_PHI_LIST);
1549 void do_loop_unrolling(ir_graph *irg)
1551 enable_unrolling = 1;
1553 enable_inversion = 0;
1555 DB((dbg, LEVEL_2, " >>> unrolling (Startnode %N) <<<\n",
1556 get_irg_start(irg)));
1558 loop_optimization(irg);
1560 DB((dbg, LEVEL_2, " >>> unrolling done (Startnode %N) <<<\n",
1561 get_irg_start(irg)));
1564 void do_loop_inversion(ir_graph *irg)
1566 enable_unrolling = 0;
1568 enable_inversion = 1;
1570 DB((dbg, LEVEL_2, " >>> inversion (Startnode %N) <<<\n",
1571 get_irg_start(irg)));
1573 loop_optimization(irg);
1575 DB((dbg, LEVEL_2, " >>> inversion done (Startnode %N) <<<\n",
1576 get_irg_start(irg)));
1579 void do_loop_peeling(ir_graph *irg)
1581 enable_unrolling = 0;
1583 enable_inversion = 0;
1585 DB((dbg, LEVEL_2, " >>> peeling (Startnode %N) <<<\n",
1586 get_irg_start(irg)));
1588 loop_optimization(irg);
1590 DB((dbg, LEVEL_2, " >>> peeling done (Startnode %N) <<<\n",
1591 get_irg_start(irg)));
1595 ir_graph_pass_t *loop_inversion_pass(const char *name)
1597 return def_graph_pass(name ? name : "loop_inversion", do_loop_inversion);
1600 ir_graph_pass_t *loop_unroll_pass(const char *name)
1602 return def_graph_pass(name ? name : "loop_unroll", do_loop_unrolling);
1605 ir_graph_pass_t *loop_peeling_pass(const char *name)
1607 return def_graph_pass(name ? name : "loop_peeling", do_loop_peeling);
1610 void firm_init_loop_opt(void)
1612 FIRM_DBG_REGISTER(dbg, "firm.opt.loop");