2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @author Christian Helmer
23 * @brief loop inversion and loop unrolling, loop peeling
39 #include "array_t.h" /* automatic array */
40 #include "beutil.h" /* get_block */
41 #include "irloop_t.h" /* set_irn_loop*/
48 DEBUG_ONLY(static firm_dbg_module_t *dbg);
51 * Convenience macro for iterating over every phi node of the given block.
52 * Requires phi list per block.
54 #define for_each_phi(block, phi) \
55 for ( (phi) = get_Block_phis( (block) ); (phi) ; (phi) = get_Phi_next( (phi) ) )
58 static ir_loop *cur_loop;
60 /* abortable walker function */
61 typedef unsigned irg_walk_func_abortable(ir_node *, void *);
63 /* condition for walking a node during a copy_walk */
64 typedef unsigned walker_condition(ir_node *);
66 /* node and position of a predecessor */
67 typedef struct out_edges {
72 /* access complex values through the nodes links */
73 typedef struct node_info {
76 ir_node *link; /* temporary links for ssa creation */
77 ir_node **ins; /* ins for phi nodes, during rewiring of blocks */
79 struct node_info *freelistnext; /* linked list to free all node_infos */
82 static node_info *link_node_state_list; /* head of the linked list to free all node_infos */
84 static out_edge *cur_loop_outs; /* A walker may start visiting the current loop with these nodes. */
85 static out_edge *cur_head_outs; /* A walker may start visiting the cur head with these nodes. */
87 static ir_node *loop_cf_head = NULL; /* Loop head node */
88 static unsigned loop_cf_head_valid = 1; /* A loop may have one head, otherwise we do not touch it. */
93 static ir_node *loop_inv_head = NULL;
95 static ir_node *loop_peeled_head = NULL;
97 /* Loop analysis informations */
98 typedef struct loop_info_t {
99 unsigned blocks; /* number of blocks in the loop */
100 unsigned calls; /* number of calls */
101 unsigned loads; /* number of load nodes */
102 unsigned outs; /* outs without keepalives */
104 unsigned invariant_loads;
105 unsigned stores; /* number of store nodes */
106 unsigned opnodes_n; /* nodes that probably result in an instruction */
107 unsigned do_invariant_opt;
111 /* Information about the current loop */
112 static loop_info_t loop_info;
114 /* A walker may start visiting a condition chain with these nodes. */
115 static out_edge *cond_chain_entries;
117 /* Number of unrolling */
120 static unsigned head_inversion_node_count;
121 static unsigned inversion_head_node_limit;
122 static unsigned head_inversion_block_count;
124 static unsigned enable_peeling;
125 static unsigned enable_inversion;
126 static unsigned enable_unrolling;
130 * ============= AUXILIARY FUNCTIONS =====================================
135 * Creates object on the heap, and adds it to a linked list to free it later.
137 static node_info *new_node_info(void) {
138 node_info *l = XMALLOCZ(node_info);
139 l->freelistnext = link_node_state_list;
140 link_node_state_list = l;
146 static node_info *get_node_info(ir_node *n)
148 return ((node_info *)get_irn_link(n));
151 /* Allocates a node_info struct for the given node. For use with a walker. */
152 static void alloc_node_info(ir_node *node, void *env)
156 state = new_node_info();
157 set_irn_link(node, (void *)state);
160 static void free_node_info(void)
164 n = link_node_state_list;
166 node_info *next = n->freelistnext;
171 link_node_state_list = NULL;
175 * Use the linked list to reset the reused values of all node_infos
176 * Reset in particular the copy attribute as copy_walk uses it to determine a present copy
178 static void reset_node_infos(void)
181 next = link_node_state_list;
182 while(next->freelistnext) {
183 node_info *cur = next;
184 next = cur->freelistnext;
191 /* Returns the nodes node_info link. */
192 static ir_node *get_link(ir_node *n)
194 return ((node_info *)get_irn_link(n))->link;
197 /* Sets the nodes node_info link. */
198 static void set_link(ir_node *n, ir_node *link)
200 ((node_info *)get_irn_link(n))->link = link;
203 /* Returns a nodes copy. */
204 static ir_node *get_copy(ir_node *n)
206 return ((node_info *)get_irn_link(n))->copy;
209 /* Sets a nodes copy. */
210 static void set_copy(ir_node *n, ir_node *copy)
212 ((node_info *)get_irn_link(n) )->copy = copy;
216 * Convenience macro for iterating over every copy in a linked list
219 #define for_each_copy(node) \
220 for ( ; (node) ; (node) = get_copy(node))
223 * Convenience macro for iterating over every copy in 2 linked lists
224 * of copies in parallel.
226 #define for_each_copy2(high1, low1, high2, low2) \
227 for ( ; (low1) && (low2); (high1) = (low1), (low1) = get_copy(low1), \
228 (high2) = (low2), (low2) = get_copy(low2))
231 * Returns 0 if the node or block is not in cur_loop.
233 static unsigned is_in_loop(ir_node *node)
235 return (get_irn_loop(get_block(node)) == cur_loop);
238 /* Returns if the given be is an alien edge. This is the case when the pred is not in the loop. */
239 static unsigned is_alien_edge(ir_node *n, int i)
241 return(!is_in_loop(get_irn_n(n, i)));
244 /* used for block walker */
245 static void reset_block_mark(ir_node *node, void * env)
250 set_Block_mark(node, 0);
253 static unsigned is_nodesblock_marked(ir_node* node)
255 return (get_Block_mark(get_block(node)));
258 /* Returns the number of blocks in a loop. */
259 int get_loop_n_blocks(ir_loop *loop) {
262 elements = get_loop_n_elements(loop);
264 for(e=0; e<elements; e++) {
265 loop_element elem = get_loop_element(loop, e);
266 if (is_ir_node(elem.kind) && is_Block(elem.node))
273 * Add newpred at position pos to node and also add the corresponding value to the phis.
274 * Requires block phi list.
276 static int duplicate_preds(ir_node* node, unsigned pos, ir_node* newpred)
284 assert(is_Block(node) && "duplicate_preds is only allowed for blocks");
286 DB((dbg, LEVEL_5, "duplicate_preds(node %ld, pos %d, newpred %ld)\n",
287 get_irn_node_nr(node), pos, get_irn_node_nr(newpred)));
289 block_arity = get_irn_arity(node);
291 NEW_ARR_A(ir_node*, ins, block_arity + 1);
293 for (i = 0; i < block_arity; ++i) {
294 ins[i] = get_irn_n(node, i);
296 ins[block_arity] = newpred;
298 set_irn_in(node, block_arity + 1, ins);
300 for_each_phi(node, phi) {
301 int phi_arity = get_irn_arity(phi);
302 DB((dbg, LEVEL_5, "duplicate_preds: fixing phi %ld\n", get_irn_node_nr(phi)));
304 NEW_ARR_A(ir_node *, ins, block_arity + 1);
305 for (i=0; i < phi_arity; ++i) {
306 DB((dbg, LEVEL_5, "in %ld\n", get_irn_node_nr(get_irn_n(phi, i))));
307 ins[i] = get_irn_n(phi, i);
309 ins[block_arity] = get_irn_n(phi, pos);
310 set_irn_in(phi, block_arity + 1, ins);
316 * Finds loop head and loop_info.
318 static void get_loop_info(ir_node *node, void *env)
320 unsigned node_in_loop, pred_in_loop;
324 arity = get_irn_arity(node);
325 for (i = 0; i < arity; i++) {
326 ir_node *pred = get_irn_n(node, i);
328 pred_in_loop = is_in_loop(pred);
329 node_in_loop = is_in_loop(node);
331 /* collect some loop information */
337 /* Find the loops head/the blocks with cfpred outside of the loop */
338 if (is_Block(node) && node_in_loop && !pred_in_loop && loop_cf_head_valid) {
339 ir_node *cfgpred = get_Block_cfgpred(node, i);
341 if (!is_in_loop(cfgpred)) {
342 DB((dbg, LEVEL_5, "potential head %+F because inloop and pred %+F not inloop\n", node, pred));
343 /* another head? We do not touch this. */
344 if (loop_cf_head && loop_cf_head != node) {
345 loop_cf_head_valid = 0;
354 /* Adds all nodes pointing into the loop to loop_entries and also finds the loops head */
355 static void get_loop_outs(ir_node *node, void *env)
357 unsigned node_in_loop, pred_in_loop;
361 arity = get_irn_arity(node);
362 for (i = 0; i < arity; i++) {
363 ir_node *pred = get_irn_n(node, i);
365 pred_in_loop = is_in_loop(pred);
366 node_in_loop = is_in_loop(node);
368 if (pred_in_loop && !node_in_loop) {
371 entry.pred_irn_n = i;
372 ARR_APP1(out_edge, cur_loop_outs, entry);
377 static ir_node *ssa_second_def;
378 static ir_node *ssa_second_def_block;
381 * Walks the graph bottom up, searching for definitions and creates phis.
383 static ir_node *search_def_and_create_phis(ir_node *block, ir_mode *mode)
391 DB((dbg, LEVEL_5, "ssa search_def_and_create_phis: block %ld\n", get_irn_node_nr(block)));
393 /* Prevents creation of phi that would be bad anyway.
394 * Dead and bad blocks. */
395 if (get_irn_arity(block) < 1 || is_Bad(block))
398 if (block == ssa_second_def_block) {
399 DB((dbg, LEVEL_5, "ssa found second definition: use second def %ld\n", get_irn_node_nr(ssa_second_def)));
400 return ssa_second_def;
403 /* already processed this block? */
404 if (irn_visited(block)) {
405 ir_node *value = get_link(block);
406 DB((dbg, LEVEL_5, "ssa already visited: use linked %ld\n", get_irn_node_nr(value)));
410 irg = get_irn_irg(block);
411 assert(block != get_irg_start_block(irg));
413 /* a Block with only 1 predecessor needs no Phi */
414 n_cfgpreds = get_Block_n_cfgpreds(block);
415 if (n_cfgpreds == 1) {
416 ir_node *pred_block = get_Block_cfgpred_block(block, 0);
419 DB((dbg, LEVEL_5, "ssa 1 pred: walk pred %ld\n", get_irn_node_nr(pred_block)));
421 value = search_def_and_create_phis(pred_block, mode);
422 set_link(block, value);
423 mark_irn_visited(block);
428 /* create a new Phi */
429 NEW_ARR_A(ir_node*, in, n_cfgpreds);
430 for(i = 0; i < n_cfgpreds; ++i)
431 in[i] = new_Unknown(mode);
433 phi = new_r_Phi(block, n_cfgpreds, in, mode);
435 /* Important: always keep block phi list up to date. */
436 add_Block_phi(block, phi);
437 /* EVERY node is assumed to have a node_info linked. */
438 alloc_node_info(phi, NULL);
440 DB((dbg, LEVEL_5, "ssa phi creation: link new phi %ld to block %ld\n", get_irn_node_nr(phi), get_irn_node_nr(block)));
442 set_link(block, phi);
443 mark_irn_visited(block);
445 /* set Phi predecessors */
446 for(i = 0; i < n_cfgpreds; ++i) {
448 ir_node *pred_block = get_Block_cfgpred_block(block, i);
449 assert(pred_block != NULL);
450 pred_val = search_def_and_create_phis(pred_block, mode);
451 assert(pred_val != NULL);
453 DB((dbg, LEVEL_5, "ssa phi pred:phi %ld, pred %ld\n", get_irn_node_nr(phi), get_irn_node_nr(pred_val)));
454 set_irn_n(phi, i, pred_val);
460 * Given a set of values this function constructs SSA-form for the users of the
461 * first value (the users are determined through the out-edges of the value).
462 * Works without using the dominance tree.
464 static void construct_ssa(ir_node *orig_block, ir_node *orig_val,
465 ir_node *second_block, ir_node *second_val)
469 const ir_edge_t *edge;
470 const ir_edge_t *next;
472 assert(orig_block && orig_val && second_block && second_val &&
473 "no parameter of construct_ssa may be NULL");
475 /* no need to do anything */
476 if (orig_val == second_val)
479 irg = get_irn_irg(orig_val);
481 ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED);
482 inc_irg_visited(irg);
484 mode = get_irn_mode(orig_val);
485 set_link(orig_block, orig_val);
486 mark_irn_visited(orig_block);
488 ssa_second_def_block = second_block;
489 ssa_second_def = second_val;
491 /* Only fix the users of the first, i.e. the original node */
492 foreach_out_edge_safe(orig_val, edge, next) {
493 ir_node *user = get_edge_src_irn(edge);
494 int j = get_edge_src_pos(edge);
495 ir_node *user_block = get_nodes_block(user);
502 DB((dbg, LEVEL_5, "original user %ld\n", get_irn_node_nr(user)));
505 ir_node *pred_block = get_Block_cfgpred_block(user_block, j);
506 newval = search_def_and_create_phis(pred_block, mode);
508 newval = search_def_and_create_phis(user_block, mode);
511 /* If we get a bad node the user keeps the original in. No second definition needed. */
512 if (newval != user && !is_Bad(newval))
513 set_irn_n(user, j, newval);
516 ir_free_resources(irg, IR_RESOURCE_IRN_VISITED);
520 * Construct SSA for def and all of its copies.
522 static void construct_ssa_n(ir_node *def, ir_node *user)
527 const ir_edge_t *edge;
528 const ir_edge_t *next;
529 irg = get_irn_irg(def);
531 ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED);
532 inc_irg_visited(irg);
534 mode = get_irn_mode(def);
536 for_each_copy(iter) {
537 set_link(get_nodes_block(iter), iter);
538 mark_irn_visited(get_nodes_block(iter));
540 DB((dbg, LEVEL_5, "ssa_n: Link def %ld to block %ld\n",
541 get_irn_node_nr(iter), get_irn_node_nr(get_nodes_block(iter))));
544 /* Need to search the outs, because we need the in-pos on the user node. */
545 foreach_out_edge_safe(def, edge, next) {
546 ir_node *edge_user = get_edge_src_irn(edge);
547 int edge_src = get_edge_src_pos(edge);
548 ir_node *user_block = get_nodes_block(user);
551 if (edge_user != user)
555 ir_node *pred_block = get_Block_cfgpred_block(user_block, edge_src);
556 newval = search_def_and_create_phis(pred_block, mode);
558 newval = search_def_and_create_phis(user_block, mode);
561 if (newval != user && !is_Bad(newval))
562 set_irn_n(user, edge_src, newval);
565 ir_free_resources(irg, IR_RESOURCE_IRN_VISITED);
569 * Construct SSA for all definitions in arr.
571 void construct_ssa_foreach(ir_node **arr, int arr_n)
574 for(i = 0; i < arr_n; ++i) {
575 ir_node *cppred, *block, *cpblock, *pred;
578 cppred = get_copy(pred);
579 block = get_nodes_block(pred);
580 cpblock = get_nodes_block(cppred);
581 construct_ssa(block, pred, cpblock, cppred);
585 /* get the number of backedges without alien bes */
586 static int get_backedge_n(ir_node *loophead, unsigned with_alien)
590 int arity = get_irn_arity(loophead);
591 for (i = 0; i < arity; ++i) {
592 ir_node *pred = get_irn_n(loophead, i);
593 if (is_backedge(loophead, i) && (with_alien || is_in_loop(pred)))
600 * Rewires the heads after peeling.
602 static void peel_fix_heads(void)
604 ir_node **loopheadnins, **peelheadnins;
605 ir_node *loophead = loop_cf_head;
606 ir_node *peelhead = get_copy(loophead);
608 int headarity = get_irn_arity(loophead);
615 int backedges_n = get_backedge_n(loophead, 0);
617 int lhead_arity = 2 * backedges_n;
618 int phead_arity = headarity - backedges_n;
621 NEW_ARR_A(ir_node *, loopheadnins, lhead_arity );
622 NEW_ARR_A(ir_node *, peelheadnins, phead_arity );
624 for_each_phi(loophead, phi) {
625 NEW_ARR_A(ir_node *, get_node_info(phi)->ins, lhead_arity);
627 for_each_phi(peelhead, phi) {
628 NEW_ARR_A(ir_node *, get_node_info(phi)->ins, phead_arity);
631 for (i = 0; i < headarity; i++)
633 ir_node *orgjmp = get_irn_n(loophead, i);
634 ir_node *copyjmp = get_copy(orgjmp);
637 * Rewire the head blocks ins and their phi ins.
638 * Requires phi list per block.
640 if (is_backedge(loophead, i) && !is_alien_edge(loophead, i)) {
641 loopheadnins[lheadin_c] = orgjmp;
642 for_each_phi(loophead, phi) {
643 get_node_info( phi )->ins[lheadin_c] = get_irn_n( phi, i) ;
647 /* former bes of the peeled code origin now from the loophead */
648 loopheadnins[lheadin_c] = copyjmp;
650 /* get_irn_n( get_copy_of(phi), i ) <!=> get_copy_of( get_irn_n( phi, i) )
651 * Order is crucial! Predecessors outside of the loop are non existent.
652 * The copy (cloned with its ins!) has pred i,
653 * but phis pred i might not have a copy of itself.
655 for_each_phi(loophead, phi) {
656 get_node_info( phi )->ins[lheadin_c] = get_irn_n( get_copy(phi), i) ;
660 peelheadnins[pheadin_c] = orgjmp;
661 for_each_phi(peelhead, phi) {
662 get_node_info( phi )->ins[pheadin_c] = get_irn_n(phi, i);
668 assert(pheadin_c == ARR_LEN(peelheadnins) &&
669 lheadin_c == ARR_LEN(loopheadnins) &&
670 "the constructed head arities do not match the predefined arities");
672 /* assign the ins to the nodes */
673 set_irn_in(loophead, ARR_LEN(loopheadnins), loopheadnins);
674 set_irn_in(peelhead, ARR_LEN(peelheadnins), peelheadnins);
676 for_each_phi(loophead, phi) {
677 ir_node **ins = get_node_info( phi )->ins;
678 set_irn_in(phi, lhead_arity, ins);
681 for_each_phi(peelhead, phi) {
682 ir_node **ins = get_node_info( phi )->ins;
683 set_irn_in(phi, phead_arity, ins);
688 * Create a raw copy (ins are still the old ones) of the given node.
689 * We rely on copies to be NOT visited.
691 static ir_node *rawcopy_node(ir_node *node)
697 cp = exact_copy(node);
699 arity = get_irn_arity(node);
701 for (i = 0; i < arity; ++i) {
702 if (is_backedge(node, i))
707 cpstate = new_node_info();
708 set_irn_link(cp, cpstate);
712 * exact_copy already sets Macroblock.
713 * Why do we need to do this anyway? */
714 set_Block_MacroBlock(cp, cp);
720 * This walker copies all walked nodes.
721 * If the walk_condition is true for a node, it is walked.
722 * All nodes node_info->copy attributes has to be NULL prior to every to every walk.
724 static void copy_walk(ir_node *node, walker_condition *walk_condition, ir_loop *set_loop)
730 ir_graph *irg = current_ir_graph;
731 node_info *node_info = get_node_info(node);
734 * break condition and cycle resolver, creating temporary node copies
736 if (get_irn_visited(node) >= get_irg_visited(irg)) {
737 /* Here we rely on nodestate's copy being initialized with NULL */
738 DB((dbg, LEVEL_5, "copy_walk: We have already visited %ld\n", get_irn_node_nr(node)));
739 if (node_info->copy == NULL) {
740 cp = rawcopy_node(node);
741 DB((dbg, LEVEL_5, "The TEMP copy of %ld is created %ld\n", get_irn_node_nr(node), get_irn_node_nr(cp)));
747 mark_irn_visited(node);
749 if (!is_Block(node)) {
750 ir_node *pred = get_nodes_block(node);
751 if (walk_condition(pred))
752 DB((dbg, LEVEL_5, "walk block %ld\n", get_irn_node_nr(pred)));
753 copy_walk(pred, walk_condition, set_loop);
756 arity = get_irn_arity(node);
758 NEW_ARR_A(ir_node *, cpin, arity);
760 for (i = get_irn_arity(node) - 1; i >= 0; --i) {
761 ir_node *pred = get_irn_n(node, i);
763 if (walk_condition(pred)) {
764 DB((dbg, LEVEL_5, "walk node %ld\n", get_irn_node_nr(pred)));
765 copy_walk(pred, walk_condition, set_loop);
766 cpin[i] = get_copy(pred);
767 DB((dbg, LEVEL_5, "copy of %ld gets new in %ld which is copy of %ld\n",
768 get_irn_node_nr(node), get_irn_node_nr(get_copy(pred)), get_irn_node_nr(pred)));
774 /* copy node / finalize temp node */
775 if (node_info->copy == NULL) {
776 /* No temporary copy existent */
777 cp = rawcopy_node(node);
778 DB((dbg, LEVEL_5, "The FINAL copy of %ld is CREATED %ld\n", get_irn_node_nr(node), get_irn_node_nr(cp)));
780 /* temporary copy is existent but without correct ins */
782 DB((dbg, LEVEL_5, "The FINAL copy of %ld is EXISTENT %ld\n", get_irn_node_nr(node), get_irn_node_nr(cp)));
785 if (!is_Block(node)) {
786 ir_node *cpblock = get_copy(get_nodes_block(node));
788 set_nodes_block(cp, cpblock );
789 /* fix the phi information in attr.phis */
791 add_Block_phi(cpblock, cp);
794 set_irn_loop(cp, set_loop);
795 set_irn_in(cp, ARR_LEN(cpin), cpin);
799 * Loop peeling, and fix the cf for the loop entry nodes, which have now more preds
801 static void peel(out_edge *loop_outs)
804 ir_node **entry_buffer;
807 ir_reserve_resources(current_ir_graph, IR_RESOURCE_IRN_VISITED);
809 NEW_ARR_A(ir_node *, entry_buffer, ARR_LEN(loop_outs));
811 /* duplicate loop walk */
812 inc_irg_visited(current_ir_graph);
814 for(i = 0; i < ARR_LEN(loop_outs); i++) {
815 out_edge entry = loop_outs[i];
816 ir_node *node = entry.node;
817 ir_node *pred = get_irn_n(entry.node, entry.pred_irn_n);
819 if (is_Block(node)) {
820 copy_walk(pred, is_in_loop, NULL);
821 duplicate_preds(node, entry.pred_irn_n, get_copy(pred) );
823 copy_walk(pred, is_in_loop, NULL);
824 /* leave out keepalives */
826 /* Node is user of a value defined inside the loop.
827 * We'll need a phi since we duplicated the loop. */
828 /* Cannot construct_ssa here, because it needs another walker. */
829 entry_buffer[entry_c++] = pred;
833 ir_free_resources(current_ir_graph, IR_RESOURCE_IRN_VISITED);
835 /* Rewires the 2 heads */
838 /* Generate phis for values from peeled code and original loop */
839 construct_ssa_foreach(entry_buffer, entry_c);
840 /*for(i = 0; i < entry_c; i++)
842 ir_node *cppred, *block, *cpblock, *pred;
844 pred = entry_buffer[i];
845 cppred = get_copy(pred);
846 block = get_nodes_block(pred);
847 cpblock = get_nodes_block(cppred);
848 construct_ssa(block, pred, cpblock, cppred);
853 * Populates head_entries with (node, pred_pos) tuple
854 * whereas the node's pred at pred_pos is in the head but not the node itself.
855 * Head and condition chain blocks must be marked.
857 static void get_head_outs(ir_node *node, void *env)
860 int arity = get_irn_arity(node);
863 DB((dbg, LEVEL_5, "get head entries %ld \n", get_irn_node_nr(node)));
865 for(i = 0; i < arity; ++i) {
866 /* node is not in the head, but the predecessor is.
867 * (head or loop chain nodes are marked) */
869 DB((dbg, LEVEL_5, "... "));
870 DB((dbg, LEVEL_5, "node %ld marked %d (0) pred %d marked %d (1) \n",
871 node->node_nr, is_nodesblock_marked(node),i, is_nodesblock_marked(get_irn_n(node, i))));
873 if (!is_nodesblock_marked(node) && is_nodesblock_marked(get_irn_n(node, i))) {
876 entry.pred_irn_n = i;
878 "Found head chain entry %ld @%d because !inloop %ld and inloop %ld\n",
879 get_irn_node_nr(node), i, get_irn_node_nr(node), get_irn_node_nr(get_irn_n(node, i))));
880 ARR_APP1(out_edge, cur_head_outs, entry);
886 * Find condition chains, and add them to be inverted, until the node count exceeds the limit.
887 * A block belongs to the chain if a condition branches out of the loop.
888 * Returns 1 if the given block belongs to the condition chain.
890 static unsigned find_condition_chains(ir_node *block) {
891 const ir_edge_t *edge;
895 DB((dbg, LEVEL_5, "condition_chains for block %ld\n", get_irn_node_nr(block)));
897 /* Collect all outs, including keeps.
898 * (TODO firm function for number of out edges?) */
899 foreach_out_edge_kind(block, edge, EDGE_KIND_NORMAL) {
903 /* We do not want to collect more nodes from condition chains, than the limit allows us to.
904 * Also, leave at least one block as body. */
905 if (head_inversion_node_count + nodes_n > inversion_head_node_limit
906 || head_inversion_block_count + 1 == loop_info.blocks) {
907 set_Block_mark(block, 0);
912 /* First: check our successors, and add all succs that are outside of the loop to the list */
913 foreach_block_succ(block, edge) {
914 ir_node *src = get_edge_src_irn( edge );
915 int pos = get_edge_src_pos( edge );
917 if (!is_in_loop(src)) {
922 entry.pred_irn_n = pos;
923 ARR_APP1(out_edge, cond_chain_entries, entry);
924 mark_irn_visited(src);
929 /* this block is not part of the chain,
930 * because the chain would become too long or we have no successor outside of the loop */
932 set_Block_mark(block, 0);
935 set_Block_mark(block, 1);
936 ++head_inversion_block_count;
937 DB((dbg, LEVEL_5, "block %ld is part of condition chain\n", get_irn_node_nr(block)));
938 head_inversion_node_count += nodes_n;
941 /* Second: walk all successors, and add them to the list if they are not part of the chain */
942 foreach_block_succ(block, edge) {
944 ir_node *src = get_edge_src_irn( edge );
945 int pos = get_edge_src_pos( edge );
947 /* already done cases */
948 if (!is_in_loop( src ) || (get_irn_visited(src) >= get_irg_visited(current_ir_graph))) {
952 mark_irn_visited(src);
953 DB((dbg, LEVEL_5, "condition chain walk %ld\n", get_irn_node_nr(src)));
954 inchain = find_condition_chains(src);
956 /* if successor is not part of chain we need to collect its outs */
960 entry.pred_irn_n = pos;
961 ARR_APP1(out_edge, cond_chain_entries, entry);
968 * Rewire the loop head and inverted head for loop inversion.
970 static void inversion_fix_heads(void)
972 ir_node **loopheadnins, **invheadnins;
973 ir_node *loophead = loop_cf_head;
974 ir_node *invhead = get_copy(loophead);
976 int headarity = get_irn_arity(loophead);
983 int backedges_n = get_backedge_n(loophead, 0);
984 int lhead_arity = backedges_n;
985 int ihead_arity = headarity - backedges_n;
987 assert(lhead_arity != 0 && "Loophead has arity 0. Probably wrong backedge informations.");
988 assert(ihead_arity != 0 && "Inversionhead has arity 0. Probably wrong backedge informations.");
990 /* new in arrays for all phis in the head blocks */
991 NEW_ARR_A(ir_node *, loopheadnins, lhead_arity);
992 NEW_ARR_A(ir_node *, invheadnins, ihead_arity);
994 for_each_phi(loophead, phi) {
995 NEW_ARR_A(ir_node *, get_node_info(phi)->ins, lhead_arity);
997 for_each_phi(invhead, phi) {
998 NEW_ARR_A(ir_node *, get_node_info(phi)->ins, ihead_arity);
1001 for (i = 0; i < headarity; i++) {
1002 ir_node *pred = get_irn_n(loophead, i);
1005 * Rewire the head blocks ins and their phi ins.
1006 * Requires phi list per block.
1008 if (is_backedge(loophead, i) && !is_alien_edge(loophead, i)) {
1009 /* just copy these edges */
1010 loopheadnins[lheadin_c] = pred;
1011 for_each_phi(loophead, phi) {
1012 get_node_info(phi)->ins[lheadin_c] = get_irn_n(phi, i);
1016 invheadnins[iheadin_c] = pred;
1017 for_each_phi(invhead, phi) {
1018 get_node_info(phi)->ins[iheadin_c] = get_irn_n(phi, i) ;
1024 /* assign the ins to the head blocks */
1025 set_irn_in(loophead, ARR_LEN(loopheadnins), loopheadnins);
1026 set_irn_in(invhead, ARR_LEN(invheadnins), invheadnins);
1028 /* assign the ins for the phis */
1029 for_each_phi(loophead, phi) {
1030 ir_node **ins = get_node_info(phi)->ins;
1031 set_irn_in(phi, lhead_arity, ins);
1034 for_each_phi(invhead, phi) {
1035 ir_node **ins = get_node_info(phi)->ins;
1036 set_irn_in(phi, ihead_arity, ins);
1040 static void inversion_walk(out_edge *head_entries)
1045 ir_node **entry_buffer;
1046 ir_node **head_phi_assign;
1048 NEW_ARR_A(ir_node *, entry_buffer, ARR_LEN(head_entries));
1050 head_phi_assign = NEW_ARR_F(ir_node *, 0);
1052 /* Find assignments in the condition chain, to construct_ssa for them after the loop inversion. */
1053 for_each_phi(loop_cf_head , phi) {
1054 for(i=0; i<get_irn_arity(phi); ++i) {
1055 ir_node *def = get_irn_n(phi, i);
1056 if (is_nodesblock_marked(def)) {
1057 ARR_APP1(ir_node *, head_phi_assign, def);
1062 ir_reserve_resources(current_ir_graph, IR_RESOURCE_IRN_VISITED);
1065 * duplicate condition chain
1067 inc_irg_visited(current_ir_graph);
1069 for(i = 0; i < ARR_LEN(head_entries); ++i) {
1070 out_edge entry = head_entries[i];
1071 ir_node *node = entry.node;
1072 ir_node *pred = get_irn_n(entry.node, entry.pred_irn_n);
1074 if (is_Block(node)) {
1075 DB((dbg, LEVEL_5, "\nInit walk block %ld\n", get_irn_node_nr(pred)));
1076 copy_walk(pred, is_nodesblock_marked, cur_loop);
1077 duplicate_preds(node, entry.pred_irn_n, get_copy(pred) );
1079 DB((dbg, LEVEL_5, "\nInit walk node %ld\n", get_irn_node_nr(pred)));
1080 copy_walk(pred, is_nodesblock_marked, cur_loop);
1082 /* ignore keepalives */
1084 /* Node is user of a value assigned inside the loop.
1085 * We will need a phi since we duplicated the head. */
1086 entry_buffer[entry_c++] = pred;
1090 ir_free_resources(current_ir_graph, IR_RESOURCE_IRN_VISITED);
1092 inversion_fix_heads();
1094 /* Generate phis for users of values assigned in the condition chain
1095 * and read in the loops body */
1096 construct_ssa_foreach(entry_buffer, entry_c);
1098 /* Generate phis for values that are assigned in the condition chain
1099 * but not read in the loops body. */
1100 construct_ssa_foreach(head_phi_assign, ARR_LEN(head_phi_assign));
1102 loop_cf_head = get_copy(loop_cf_head);
1106 void loop_peeling(void)
1108 cur_loop_outs = NEW_ARR_F(out_edge, 0);
1109 irg_walk_graph( current_ir_graph, get_loop_outs, NULL, NULL );
1111 peel(cur_loop_outs);
1116 set_irg_doms_inconsistent(current_ir_graph);
1117 set_irg_loopinfo_inconsistent(current_ir_graph);
1118 set_irg_outs_inconsistent(current_ir_graph);
1120 DEL_ARR_F(cur_loop_outs);
1123 /* Loop inversion */
1124 void loop_inversion(void)
1126 unsigned do_inversion = 1;
1128 inversion_head_node_limit = INT_MAX;
1130 /* Search for condition chains. */
1131 ir_reserve_resources(current_ir_graph, IR_RESOURCE_BLOCK_MARK);
1133 irg_walk_graph(current_ir_graph, reset_block_mark, NULL, NULL);
1135 loop_info.blocks = get_loop_n_blocks(cur_loop);
1136 cond_chain_entries = NEW_ARR_F(out_edge, 0);
1138 head_inversion_node_count = 0;
1139 head_inversion_block_count = 0;
1141 set_Block_mark(loop_cf_head, 1);
1142 mark_irn_visited(loop_cf_head);
1143 inc_irg_visited(current_ir_graph);
1145 find_condition_chains(loop_cf_head);
1147 DB((dbg, LEVEL_3, "Loop contains %d blocks.\n", loop_info.blocks));
1148 if (loop_info.blocks < 2) {
1150 DB((dbg, LEVEL_3, "Loop contains %d (less than 2) blocks => No Inversion done.\n", loop_info.blocks));
1153 /* We also catch endless loops here,
1154 * because they do not have a condition chain. */
1155 if (head_inversion_block_count < 1) {
1157 DB((dbg, LEVEL_3, "Loop contains %d (less than 1) invertible blocks => No Inversion done.\n", head_inversion_block_count));
1161 cur_head_outs = NEW_ARR_F(out_edge, 0);
1163 /* Get all edges pointing into the head or condition chain (outs). */
1164 irg_walk_graph(current_ir_graph, get_head_outs, NULL, NULL);
1165 inversion_walk(cur_head_outs);
1167 DEL_ARR_F(cur_head_outs);
1169 set_irg_doms_inconsistent(current_ir_graph);
1170 set_irg_loopinfo_inconsistent(current_ir_graph);
1171 set_irg_outs_inconsistent(current_ir_graph);
1175 DEL_ARR_F(cond_chain_entries);
1176 ir_free_resources(current_ir_graph, IR_RESOURCE_BLOCK_MARK);
1180 * Returns last element of linked list of copies by
1181 * walking the linked list.
1183 ir_node *get_last_copy(ir_node *node)
1185 ir_node *copy, *cur;
1187 while ((copy = get_copy(cur))) {
1194 * Rewire floating copies of the current loop.
1196 void unrolling_fix_cf(void)
1198 ir_node *loophead = loop_cf_head;
1199 int headarity = get_irn_arity(loophead);
1200 ir_node *phi, *headnode;
1201 /*ir_node *high, *low;*/
1205 int backedges_n = get_backedge_n(loophead, 0);
1206 int unroll_arity = backedges_n;
1208 /* Create ins for all heads and their phis */
1209 headnode = get_copy(loophead);
1210 for_each_copy(headnode) {
1211 NEW_ARR_A(ir_node *, get_node_info(headnode)->ins, unroll_arity);
1212 for_each_phi(headnode, phi) {
1213 NEW_ARR_A(ir_node *, get_node_info(phi)->ins, unroll_arity);
1217 /* Append the copies to the existing loop. */
1218 for (i = 0; i < headarity; i++) {
1219 ir_node *upper_head = loophead;
1220 ir_node *lower_head = get_copy(loophead);
1222 ir_node *upper_pred = get_irn_n(loophead, i);
1223 ir_node *lower_pred = get_copy(get_irn_n(loophead, i));
1228 * Build unrolled loop top down
1230 if (is_backedge(loophead, i) && !is_alien_edge(loophead, i)) {
1231 for_each_copy2(upper_head, lower_head, upper_pred, lower_pred) {
1232 get_node_info(lower_head)->ins[uhead_in_n] = upper_pred;
1234 for_each_phi(upper_head, phi) {
1235 ir_node *phi_copy = get_copy(phi);
1236 get_node_info(phi_copy)->ins[uhead_in_n] = get_irn_n(phi, i);
1240 last_pred = upper_pred;
1243 /* Fix the topmost loop heads backedges. */
1244 set_irn_n(loophead, i, last_pred);
1245 for_each_phi(loophead, phi) {
1246 ir_node *last_phi = get_last_copy(phi);
1247 ir_node *pred = get_irn_n(last_phi, i);
1248 set_irn_n(phi, i, pred);
1253 headnode = get_copy(loophead);
1254 for_each_copy(headnode) {
1255 set_irn_in(headnode, unroll_arity, get_node_info(headnode)->ins);
1256 for_each_phi(headnode, phi) {
1257 set_irn_in(phi, unroll_arity, get_node_info(phi)->ins);
1263 static ir_node *add_phi(ir_node *node, int phi_pos)
1268 mode = get_irn_mode(get_irn_n(node, phi_pos));
1269 ir_node *block = get_nodes_block(node);
1270 int n_cfgpreds = get_irn_arity(block);
1271 ir_node *pred = get_irn_n(node, phi_pos);
1274 /* create a new Phi */
1275 NEW_ARR_A(ir_node*, in, n_cfgpreds);
1276 for(i = 0; i < n_cfgpreds; ++i)
1277 in[i] = new_Unknown(mode); /*pred;*/
1279 phi = new_r_Phi(block, n_cfgpreds, in, mode);
1281 assert(phi && "phi null");
1282 assert(is_Bad(phi) && "phi bad");
1284 /* Important: always keep block phi list up to date. */
1285 add_Block_phi(block, phi);
1286 /* EVERY node is assumed to have a node_info linked. */
1287 alloc_node_info(phi, NULL);
1289 set_irn_n(node, phi_pos, phi);
1297 * Could be improved with variable range informations.
1299 void loop_unrolling(void)
1305 cur_loop_outs = NEW_ARR_F(out_edge, 0);
1306 irg_walk_graph( current_ir_graph, get_loop_outs, NULL, NULL );
1308 ir_reserve_resources(current_ir_graph, IR_RESOURCE_IRN_VISITED);
1310 /* duplicate whole loop content */
1311 inc_irg_visited(current_ir_graph);
1313 for(i = 0; i < ARR_LEN(cur_loop_outs); i++) {
1314 out_edge entry = cur_loop_outs[i];
1315 ir_node *node = entry.node;
1316 ir_node *pred = get_irn_n(entry.node, entry.pred_irn_n);
1318 if (!is_Block(node)) {
1320 for (j = 0; j < unroll_times-1; ++j) {
1321 copy_walk(pred, is_in_loop, cur_loop);
1323 pred = get_copy(pred);
1328 for(i = 0; i < ARR_LEN(cur_loop_outs); i++) {
1329 out_edge entry = cur_loop_outs[i];
1330 ir_node *node = entry.node;
1331 ir_node *pred = get_irn_n(entry.node, entry.pred_irn_n);
1333 /* build linked list of copied nodes/blocks */
1334 if (is_Block(node)) {
1335 for (j = 0; j < unroll_times-1; ++j) {
1336 copy_walk(pred, is_in_loop, cur_loop);
1337 duplicate_preds(node, entry.pred_irn_n, get_copy(pred));
1339 pred = get_copy(pred);
1344 ir_free_resources(current_ir_graph, IR_RESOURCE_IRN_VISITED);
1346 /* Line up the floating copies. */
1349 /* Generate phis for all loop outs */
1350 for(i = 0; i < ARR_LEN(cur_loop_outs); i++) {
1351 out_edge entry = cur_loop_outs[i];
1352 ir_node *node = entry.node;
1353 ir_node *pred = get_irn_n(entry.node, entry.pred_irn_n);
1355 if (!is_Block(node) && !is_End(node)) {
1356 DB((dbg, LEVEL_1, " construct_ssa_n def %ld node %ld pos %d\n",
1357 get_irn_node_nr(pred), get_irn_node_nr(node), entry.pred_irn_n));
1358 construct_ssa_n(pred, node);
1362 DEL_ARR_F(cur_loop_outs);
1364 set_irg_doms_inconsistent(current_ir_graph);
1365 set_irg_loopinfo_inconsistent(current_ir_graph);
1366 set_irg_outs_inconsistent(current_ir_graph);
1369 /* Initialization and */
1370 static void init_analyze(ir_loop *loop)
1372 /* Init new for every loop */
1375 loop_cf_head = NULL;
1376 loop_cf_head_valid = 1;
1377 loop_inv_head = NULL;
1378 loop_peeled_head = NULL;
1381 loop_info.calls = 0;
1382 loop_info.loads = 0;
1383 loop_info.blocks = 0;
1385 DB((dbg, LEVEL_2, " >>>> current loop includes node %ld <<<\n", get_irn_node_nr(get_loop_node(loop, 0))));
1387 irg_walk_graph(current_ir_graph, get_loop_info, NULL, NULL);
1389 /* RETURN if there is no valid head */
1390 if (!loop_cf_head || !loop_cf_head_valid) {
1391 DB((dbg, LEVEL_2, "No valid loop head. Nothing done.\n"));
1398 if (enable_inversion)
1400 if (enable_unrolling)
1404 /* RETURN if there is a call in the loop */
1405 if (loop_info.calls)
1409 DB((dbg, LEVEL_2, " <<<< end of loop with node %ld >>>>\n", get_irn_node_nr(get_loop_node(loop, 0))));
1412 /* Find most inner loops and send them to analyze_loop */
1413 static void find_most_inner_loop(ir_loop *loop)
1415 /* descend into sons */
1416 int sons = get_loop_n_sons(loop);
1422 el_n = get_loop_n_elements(loop);
1424 for (i=0; i < el_n; ++i) {
1425 elem = get_loop_element(loop, i);
1426 /* We can only rely on the blocks,
1427 * as the loop attribute of the nodes seems not to be set. */
1428 if (is_ir_node(elem.kind) && is_Block(elem.node)) {
1429 ARR_APP1(ir_node *, loops, elem.node);
1430 DB((dbg, LEVEL_5, "Found most inner loop (contains block %+F)\n", elem.node));
1436 for(s=0; s<sons; s++) {
1437 find_most_inner_loop(get_loop_son(loop, s));
1443 * Assure preconditions are met and go through all loops.
1445 void loop_optimization(ir_graph *irg)
1451 link_node_state_list = NULL;
1452 set_current_ir_graph(irg);
1456 assure_irg_outs(irg);
1458 /* NOTE: sets only the loop attribute of blocks, not nodes */
1459 /* NOTE: Kills links */
1460 assure_cf_loop(irg);
1462 ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
1463 collect_phiprojs(irg);
1464 ir_free_resources(irg, IR_RESOURCE_IRN_LINK);
1466 /* allocate node_info for additional information on nodes */
1467 ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
1468 irg_walk_graph(current_ir_graph, alloc_node_info, NULL, NULL);
1470 loop = get_irg_loop(irg);
1471 sons = get_loop_n_sons(loop);
1473 loops = NEW_ARR_F(ir_node *, 0);
1475 for (nr = 0; nr < sons; ++nr) {
1476 find_most_inner_loop(get_loop_son(loop, nr));
1479 /* TODO Keep backedges during optimization to avoid
1480 * this ugly allocation and deallocation.
1481 * (set_irn_in seems to destroy them)
1484 for (i = 0; i < ARR_LEN(loops); ++i) {
1487 loop = get_irn_loop(loops[i]);
1491 /* This part is useful for testing
1492 * or has to be used if the backedge information is destroyed.
1493 * Which is the case at the moment, because the backedge information gets lost
1494 * before inversion_fix_heads/unrolling_fix_cf, which results in bads.
1495 * NOTE!: Testsuite runs successfully nevertheless...
1499 * assure_cf_loop() creates a completely new loop tree.
1500 * Thus we cannot optimize a loop, assure_cf_loop() and continue with the next loop,
1501 * as the next loop must be searched because it is not distinguishable from the
1502 * already done loops.
1503 * The links of the loops are also not available anymore (to store a "loop done" flag).
1504 * Therefore we save a block per loop.
1505 * NOTE: We rely on the loop optimizations not to remove any block from the loop.
1506 * Later, we fetch the blocks loop attribute, as it is updated by assure_cf_loop.
1508 for (i = 0; i < ARR_LEN(loops); ++i) {
1512 ir_free_resources(irg, IR_RESOURCE_IRN_LINK);
1514 edges_assure(current_ir_graph);
1515 assure_irg_outs(current_ir_graph);
1517 /* NOTE: sets only the loop attribute of blocks */
1518 /* NOTE: Kills links */
1519 assure_cf_loop(current_ir_graph);
1521 irg_walk_graph(current_ir_graph, alloc_node_info, NULL, NULL);
1522 ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
1524 /* Get loop from block */
1525 loop = get_irn_loop(loops[i]);
1534 ir_free_resources(irg, IR_RESOURCE_IRN_LINK);
1535 ir_free_resources(irg, IR_RESOURCE_PHI_LIST);
1538 void do_loop_unrolling(ir_graph *irg)
1540 enable_unrolling = 1;
1542 enable_inversion = 0;
1544 DB((dbg, LEVEL_2, " >>> unrolling (Startnode %ld) <<<\n",
1545 get_irn_node_nr(get_irg_start(irg))));
1547 loop_optimization(irg);
1549 DB((dbg, LEVEL_2, " >>> unrolling done (Startnode %ld) <<<\n",
1550 get_irn_node_nr(get_irg_start(irg))));
1553 void do_loop_inversion(ir_graph *irg)
1555 enable_unrolling = 0;
1557 enable_inversion = 1;
1559 DB((dbg, LEVEL_2, " >>> inversion (Startnode %ld) <<<\n",
1560 get_irn_node_nr(get_irg_start(irg))));
1562 loop_optimization(irg);
1564 DB((dbg, LEVEL_2, " >>> inversion done (Startnode %ld) <<<\n",
1565 get_irn_node_nr(get_irg_start(irg))));
1568 void do_loop_peeling(ir_graph *irg)
1570 enable_unrolling = 0;
1572 enable_inversion = 0;
1574 DB((dbg, LEVEL_2, " >>> peeling (Startnode %ld) <<<\n",
1575 get_irn_node_nr(get_irg_start(irg))));
1577 loop_optimization(irg);
1579 DB((dbg, LEVEL_2, " >>> peeling done (Startnode %ld) <<<\n",
1580 get_irn_node_nr(get_irg_start(irg))));
1584 void firm_init_loop_opt(void)
1586 FIRM_DBG_REGISTER(dbg, "firm.opt.loop");