2 * This file is part of libFirm.
3 * Copyright (C) 2012 University of Karlsruhe.
8 * @author Christian Helmer
9 * @brief loop inversion and loop unrolling
16 #include "iroptimize.h"
35 #include "irbackedge_t.h"
36 #include "irnodemap.h"
39 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
42 * Convenience macro for iterating over every phi node of the given block.
43 * Requires phi list per block.
45 #define for_each_phi(block, phi) \
46 for ((phi) = get_Block_phis( (block) ); (phi) ; (phi) = get_Phi_next((phi)))
48 #define for_each_phi_safe(head, phi, next) \
49 for ((phi) = (head), (next) = (head) ? get_Phi_next((head)) : NULL; \
50 (phi) ; (phi) = (next), (next) = (next) ? get_Phi_next((next)) : NULL)
52 /* Currently processed loop. */
53 static ir_loop *cur_loop;
55 /* Flag for kind of unrolling. */
59 } unrolling_kind_flag;
61 /* Condition for performing visiting a node during copy_walk. */
62 typedef bool walker_condition(const ir_node *);
64 /* Node and position of a predecessor. */
65 typedef struct entry_edge {
71 /* Node info for unrolling. */
72 typedef struct unrolling_node_info {
74 } unrolling_node_info;
76 /* Outs of the nodes head. */
77 static entry_edge *cur_head_outs;
79 /* Information about the loop head */
80 static ir_node *loop_head = NULL;
81 static bool loop_head_valid = true;
83 /* List of all inner loops, that are processed. */
84 static ir_loop **loops;
87 typedef struct loop_stats_t {
91 unsigned too_large_adapted;
92 unsigned cc_limit_reached;
95 unsigned u_simple_counting_loop;
96 unsigned constant_unroll;
97 unsigned invariant_unroll;
102 static loop_stats_t stats;
104 /* Set stats to sero */
105 static void reset_stats(void)
107 memset(&stats, 0, sizeof(loop_stats_t));
111 static void print_stats(void)
113 DB((dbg, LEVEL_2, "---------------------------------------\n"));
114 DB((dbg, LEVEL_2, "loops : %d\n",stats.loops));
115 DB((dbg, LEVEL_2, "inverted : %d\n",stats.inverted));
116 DB((dbg, LEVEL_2, "too_large : %d\n",stats.too_large));
117 DB((dbg, LEVEL_2, "too_large_adapted : %d\n",stats.too_large_adapted));
118 DB((dbg, LEVEL_2, "cc_limit_reached : %d\n",stats.cc_limit_reached));
119 DB((dbg, LEVEL_2, "calls_limit : %d\n",stats.calls_limit));
120 DB((dbg, LEVEL_2, "u_simple_counting : %d\n",stats.u_simple_counting_loop));
121 DB((dbg, LEVEL_2, "constant_unroll : %d\n",stats.constant_unroll));
122 DB((dbg, LEVEL_2, "invariant_unroll : %d\n",stats.invariant_unroll));
123 DB((dbg, LEVEL_2, "=======================================\n"));
126 /* Commandline parameters */
127 typedef struct loop_opt_params_t {
128 unsigned max_loop_size; /* Maximum number of nodes [nodes]*/
129 int depth_adaption; /* Loop nest depth adaption [percent] */
130 unsigned allowed_calls; /* Number of calls allowed [number] */
131 bool count_phi; /* Count phi nodes */
132 bool count_proj; /* Count projections */
134 unsigned max_cc_size; /* Maximum condition chain size [nodes] */
135 unsigned max_branches;
137 unsigned max_unrolled_loop_size; /* [nodes] */
138 bool allow_const_unrolling;
139 bool allow_invar_unrolling;
140 unsigned invar_unrolling_min_size; /* [nodes] */
144 static loop_opt_params_t opt_params;
146 /* Loop analysis informations */
147 typedef struct loop_info_t {
148 unsigned nodes; /* node count */
149 unsigned ld_st; /* load and store nodes */
150 unsigned branches; /* number of conditions */
151 unsigned calls; /* number of calls */
152 unsigned cf_outs; /* number of cf edges which leave the loop */
153 entry_edge cf_out; /* single loop leaving cf edge */
154 int be_src_pos; /* position of the single own backedge in the head */
157 unsigned cc_size; /* nodes in the condition chain */
160 unsigned max_unroll; /* Number of unrolls satisfying max_loop_size */
161 unsigned exit_cond; /* 1 if condition==true exits the loop. */
162 unsigned latest_value:1; /* 1 if condition is checked against latest counter value */
163 unsigned needs_backedge:1; /* 0 if loop is completely unrolled */
164 unsigned decreasing:1; /* Step operation is_Sub, or step is<0 */
166 /* IV informations of a simple loop */
170 ir_node *iteration_phi;
173 ir_tarval *count_tar; /* Number of loop iterations */
175 ir_node *duff_cond; /* Duff mod */
176 unrolling_kind_flag unroll_kind; /* constant or invariant unrolling */
179 /* Information about the current loop */
180 static loop_info_t loop_info;
182 /* Outs of the condition chain (loop inversion). */
183 static ir_node **cc_blocks;
184 /* df/cf edges with def in the condition chain */
185 static entry_edge *cond_chain_entries;
186 /* Array of df loops found in the condition chain. */
187 static entry_edge *head_df_loop;
188 /* Number of blocks in cc */
189 static unsigned inversion_blocks_in_cc;
192 /* Cf/df edges leaving the loop.
193 * Called entries here, as they are used to enter the loop with walkers. */
194 static entry_edge *loop_entries;
195 /* Number of unrolls to perform */
196 static int unroll_nr;
197 /* Phase is used to keep copies of nodes. */
198 static ir_nodemap map;
199 static struct obstack obst;
201 /* Loop operations. */
202 typedef enum loop_op_t {
208 /* Saves which loop operation to do until after basic tests. */
209 static loop_op_t loop_op;
211 /* Returns the maximum nodes for the given nest depth */
212 static unsigned get_max_nodes_adapted(unsigned depth)
214 double perc = 100.0 + (double)opt_params.depth_adaption;
215 double factor = pow(perc / 100.0, depth);
217 return (int)((double)opt_params.max_loop_size * factor);
220 /* Reset nodes link. For use with a walker. */
221 static void reset_link(ir_node *node, void *env)
224 set_irn_link(node, NULL);
227 /* Returns 0 if the node or block is not in cur_loop. */
228 static bool is_in_loop(const ir_node *node)
230 return get_irn_loop(get_block_const(node)) == cur_loop;
233 /* Returns 0 if the given edge is not a backedge
234 * with its pred in the cur_loop. */
235 static bool is_own_backedge(const ir_node *n, int pos)
237 return is_backedge(n, pos) && is_in_loop(get_irn_n(n, pos));
240 /* Finds loop head and some loop_info as calls or else if necessary. */
241 static void get_loop_info(ir_node *node, void *env)
243 bool node_in_loop = is_in_loop(node);
247 /* collect some loop information */
249 if (is_Phi(node) && opt_params.count_phi)
251 else if (is_Proj(node) && opt_params.count_proj)
253 else if (!is_Confirm(node) && !is_Const(node) && !is_SymConst(node))
256 if (is_Load(node) || is_Store(node))
263 arity = get_irn_arity(node);
264 for (i = 0; i < arity; i++) {
265 ir_node *pred = get_irn_n(node, i);
266 bool pred_in_loop = is_in_loop(pred);
268 if (is_Block(node) && !node_in_loop && pred_in_loop) {
275 loop_info.cf_out = entry;
278 /* Find the loops head/the blocks with cfpred outside of the loop */
279 if (is_Block(node)) {
282 /* Count innerloop branches */
283 foreach_out_edge_kind(node, edge, EDGE_KIND_BLOCK) {
284 ir_node *succ = get_edge_src_irn(edge);
285 if (is_Block(succ) && is_in_loop(succ))
289 ++loop_info.branches;
291 if (node_in_loop && !pred_in_loop && loop_head_valid) {
292 ir_node *cfgpred = get_Block_cfgpred(node, i);
294 if (!is_in_loop(cfgpred)) {
295 DB((dbg, LEVEL_5, "potential head %+F because inloop and pred %+F not inloop\n",
297 /* another head? We do not touch this. */
298 if (loop_head && loop_head != node) {
299 loop_head_valid = false;
309 /* Finds all edges with users outside of the loop
310 * and definition inside the loop. */
311 static void get_loop_entries(ir_node *node, void *env)
313 unsigned node_in_loop, pred_in_loop;
317 arity = get_irn_arity(node);
318 for (i = 0; i < arity; ++i) {
319 ir_node *pred = get_irn_n(node, i);
321 pred_in_loop = is_in_loop(pred);
322 node_in_loop = is_in_loop(node);
324 if (pred_in_loop && !node_in_loop) {
329 ARR_APP1(entry_edge, loop_entries, entry);
335 static ir_node *ssa_second_def;
336 static ir_node *ssa_second_def_block;
339 * Walks the graph bottom up, searching for definitions and creates phis.
341 static ir_node *search_def_and_create_phis(ir_node *block, ir_mode *mode, int first)
345 ir_graph *irg = get_irn_irg(block);
349 DB((dbg, LEVEL_5, "ssa search_def_and_create_phis: block %N\n", block));
351 /* Prevents creation of phi that would be bad anyway.
352 * Dead and bad blocks. */
353 if (get_irn_arity(block) < 1 || is_Bad(block)) {
354 DB((dbg, LEVEL_5, "ssa bad %N\n", block));
355 return new_r_Bad(irg, mode);
358 if (block == ssa_second_def_block && !first) {
359 DB((dbg, LEVEL_5, "ssa found second definition: use second def %N\n", ssa_second_def));
360 return ssa_second_def;
363 /* already processed this block? */
364 if (irn_visited(block)) {
365 ir_node *value = (ir_node *) get_irn_link(block);
366 DB((dbg, LEVEL_5, "ssa already visited: use linked %N\n", value));
370 assert(block != get_irg_start_block(irg));
372 /* a Block with only 1 predecessor needs no Phi */
373 n_cfgpreds = get_Block_n_cfgpreds(block);
374 if (n_cfgpreds == 1) {
375 ir_node *pred_block = get_Block_cfgpred_block(block, 0);
378 DB((dbg, LEVEL_5, "ssa 1 pred: walk pred %N\n", pred_block));
380 value = search_def_and_create_phis(pred_block, mode, 0);
381 set_irn_link(block, value);
382 mark_irn_visited(block);
387 /* create a new Phi */
388 NEW_ARR_A(ir_node*, in, n_cfgpreds);
389 for (i = 0; i < n_cfgpreds; ++i)
390 in[i] = new_r_Dummy(irg, mode);
392 phi = new_r_Phi(block, n_cfgpreds, in, mode);
393 /* Important: always keep block phi list up to date. */
394 add_Block_phi(block, phi);
395 DB((dbg, LEVEL_5, "ssa phi creation: link new phi %N to block %N\n", phi, block));
396 set_irn_link(block, phi);
397 mark_irn_visited(block);
399 /* set Phi predecessors */
400 for (i = 0; i < n_cfgpreds; ++i) {
402 ir_node *pred_block = get_Block_cfgpred_block(block, i);
403 assert(pred_block != NULL);
404 pred_val = search_def_and_create_phis(pred_block, mode, 0);
406 assert(pred_val != NULL);
408 DB((dbg, LEVEL_5, "ssa phi pred:phi %N, pred %N\n", phi, pred_val));
409 set_irn_n(phi, i, pred_val);
417 * Given a set of values this function constructs SSA-form for the users of the
418 * first value (the users are determined through the out-edges of the value).
419 * Works without using the dominance tree.
421 static void construct_ssa(ir_node *orig_block, ir_node *orig_val,
422 ir_node *second_block, ir_node *second_val)
427 assert(orig_block && orig_val && second_block && second_val &&
428 "no parameter of construct_ssa may be NULL");
430 if (orig_val == second_val)
433 irg = get_irn_irg(orig_val);
435 ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED);
436 inc_irg_visited(irg);
438 mode = get_irn_mode(orig_val);
439 set_irn_link(orig_block, orig_val);
440 mark_irn_visited(orig_block);
442 ssa_second_def_block = second_block;
443 ssa_second_def = second_val;
445 /* Only fix the users of the first, i.e. the original node */
446 foreach_out_edge_safe(orig_val, edge) {
447 ir_node *user = get_edge_src_irn(edge);
448 int j = get_edge_src_pos(edge);
449 ir_node *user_block = get_nodes_block(user);
456 DB((dbg, LEVEL_5, "original user %N\n", user));
459 ir_node *pred_block = get_Block_cfgpred_block(user_block, j);
460 newval = search_def_and_create_phis(pred_block, mode, 1);
462 newval = search_def_and_create_phis(user_block, mode, 1);
464 if (newval != user && !is_Bad(newval))
465 set_irn_n(user, j, newval);
468 ir_free_resources(irg, IR_RESOURCE_IRN_VISITED);
472 /***** Unrolling Helper Functions *****/
474 /* Assign the copy with index nr to node n */
475 static void set_unroll_copy(ir_node *n, int nr, ir_node *cp)
477 unrolling_node_info *info;
478 assert(nr != 0 && "0 reserved");
480 info = ir_nodemap_get(unrolling_node_info, &map, n);
482 ir_node **const arr = NEW_ARR_DZ(ir_node*, &obst, unroll_nr);
484 info = OALLOCZ(&obst, unrolling_node_info);
486 ir_nodemap_insert(&map, n, info);
491 info->copies[nr] = cp;
494 /* Returns a nodes copy if it exists, else NULL. */
495 static ir_node *get_unroll_copy(ir_node *n, int nr)
498 unrolling_node_info *info = ir_nodemap_get(unrolling_node_info, &map, n);
502 cp = info->copies[nr];
507 /***** Inversion Helper Functions *****/
509 /* Sets copy cp of node n. */
510 static void set_inversion_copy(ir_node *n, ir_node *cp)
512 ir_nodemap_insert(&map, n, cp);
515 /* Getter of copy of n for inversion */
516 static ir_node *get_inversion_copy(ir_node *n)
518 ir_node *cp = ir_nodemap_get(ir_node, &map, n);
522 /* Resets block mark for given node. For use with walker */
523 static void reset_block_mark(ir_node *node, void * env)
528 set_Block_mark(node, 0);
531 /* Returns mark of node, or its block if node is not a block.
532 * Used in this context to determine if node is in the condition chain. */
533 static bool is_nodes_block_marked(const ir_node* node)
535 return get_Block_mark(get_block_const(node));
538 /* Extends a nodes ins by node new.
539 * NOTE: This is slow if a node n needs to be extended more than once. */
540 static void extend_irn(ir_node *n, ir_node *newnode, bool new_is_backedge)
543 int arity = get_irn_arity(n);
544 int new_arity = arity + 1;
545 ir_node **ins = XMALLOCN(ir_node*, new_arity);
546 bool *bes = XMALLOCN(bool, new_arity);
549 /* Bes are important!
550 * Another way would be recreating the looptree,
551 * but after that we cannot distinguish already processed loops
552 * from not yet processed ones. */
554 for(i = 0; i < arity; ++i) {
555 bes[i] = is_backedge(n, i);
557 bes[i] = new_is_backedge;
560 for(i = 0; i < arity; ++i) {
561 ins[i] = get_irn_n(n, i);
565 set_irn_in(n, new_arity, ins);
569 for(i = 0; i < new_arity; ++i) {
578 /* Extends a block by a copy of its pred at pos,
579 * fixing also the phis in the same way. */
580 static void extend_ins_by_copy(ir_node *block, int pos)
585 /* Extend block by copy of definition at pos */
586 ir_node *const pred = get_Block_cfgpred(block, pos);
587 new_in = get_inversion_copy(pred);
588 DB((dbg, LEVEL_5, "Extend block %N by %N cp of %N\n", block, new_in, pred));
589 extend_irn(block, new_in, false);
591 /* Extend block phis by copy of definition at pos */
592 for_each_phi(block, phi) {
595 pred = get_irn_n(phi, pos);
596 cp = get_inversion_copy(pred);
597 /* If the phis in is not in the condition chain (eg. a constant),
598 * there is no copy. */
604 DB((dbg, LEVEL_5, "Extend phi %N by %N cp of %N\n", phi, new_in, pred));
605 extend_irn(phi, new_in, false);
609 /* Returns the number of blocks backedges. With or without alien bes. */
610 static int get_backedge_n(ir_node *block, bool with_alien)
613 int const arity = get_Block_n_cfgpreds(block);
614 for (int i = 0; i < arity; ++i) {
615 ir_node *const pred = get_Block_cfgpred(block, i);
616 if (is_backedge(block, i) && (with_alien || is_in_loop(pred)))
622 /* Returns a raw copy of the given node.
623 * Attributes are kept/set according to the needs of loop inversion. */
624 static ir_node *copy_node(ir_node *node)
629 cp = exact_copy(node);
630 arity = get_irn_arity(node);
632 /* Keep backedge info */
633 for (i = 0; i < arity; ++i) {
634 if (is_backedge(node, i))
639 set_Block_mark(cp, 0);
647 * This walker copies all walked nodes.
648 * If the walk_condition is true for a node, it is copied.
649 * All nodes node_info->copy have to be NULL prior to every walk.
650 * Order of ins is important for later usage.
652 static void copy_walk(ir_node *node, walker_condition *walk_condition,
661 * break condition and cycle resolver, creating temporary node copies
663 if (irn_visited(node)) {
664 /* Here we rely on nodestate's copy being initialized with NULL */
665 DB((dbg, LEVEL_5, "copy_walk: We have already visited %N\n", node));
666 if (get_inversion_copy(node) == NULL) {
667 cp = copy_node(node);
668 set_inversion_copy(node, cp);
670 DB((dbg, LEVEL_5, "The TEMP copy of %N is created %N\n", node, cp));
676 mark_irn_visited(node);
678 if (!is_Block(node)) {
679 ir_node *pred = get_nodes_block(node);
680 if (walk_condition(pred))
681 DB((dbg, LEVEL_5, "walk block %N\n", pred));
682 copy_walk(pred, walk_condition, set_loop);
685 arity = get_irn_arity(node);
687 NEW_ARR_A(ir_node *, cpin, arity);
689 for (i = 0; i < arity; ++i) {
690 ir_node *pred = get_irn_n(node, i);
692 if (walk_condition(pred)) {
693 DB((dbg, LEVEL_5, "walk node %N\n", pred));
694 copy_walk(pred, walk_condition, set_loop);
695 cpin[i] = get_inversion_copy(pred);
696 DB((dbg, LEVEL_5, "copy of %N gets new in %N which is copy of %N\n",
697 node, get_inversion_copy(pred), pred));
703 /* copy node / finalize temp node */
704 if (get_inversion_copy(node) == NULL) {
705 /* No temporary copy existent */
706 cp = copy_node(node);
707 set_inversion_copy(node, cp);
708 DB((dbg, LEVEL_5, "The FINAL copy of %N is CREATED %N\n", node, cp));
710 /* temporary copy is existent but without correct ins */
711 cp = get_inversion_copy(node);
712 DB((dbg, LEVEL_5, "The FINAL copy of %N is EXISTENT %N\n", node, cp));
715 if (!is_Block(node)) {
716 ir_node *cpblock = get_inversion_copy(get_nodes_block(node));
718 set_nodes_block(cp, cpblock );
720 add_Block_phi(cpblock, cp);
723 /* Keeps phi list of temporary node. */
724 set_irn_in(cp, ARR_LEN(cpin), cpin);
728 * This walker copies all walked nodes.
729 * If the walk_condition is true for a node, it is copied.
730 * All nodes node_info->copy have to be NULL prior to every walk.
731 * Order of ins is important for later usage.
732 * Takes copy_index, to phase-link copy at specific index.
734 static void copy_walk_n(ir_node *node, walker_condition *walk_condition,
743 * break condition and cycle resolver, creating temporary node copies
745 if (irn_visited(node)) {
746 /* Here we rely on nodestate's copy being initialized with NULL */
747 DB((dbg, LEVEL_5, "copy_walk: We have already visited %N\n", node));
748 if (get_unroll_copy(node, copy_index) == NULL) {
751 set_unroll_copy(node, copy_index, u);
752 DB((dbg, LEVEL_5, "The TEMP unknown of %N is created %N\n", node, u));
758 mark_irn_visited(node);
760 if (!is_Block(node)) {
761 ir_node *block = get_nodes_block(node);
762 if (walk_condition(block))
763 DB((dbg, LEVEL_5, "walk block %N\n", block));
764 copy_walk_n(block, walk_condition, copy_index);
767 arity = get_irn_arity(node);
768 NEW_ARR_A(ir_node *, cpin, arity);
770 for (i = 0; i < arity; ++i) {
771 ir_node *pred = get_irn_n(node, i);
773 if (walk_condition(pred)) {
774 DB((dbg, LEVEL_5, "walk node %N\n", pred));
775 copy_walk_n(pred, walk_condition, copy_index);
776 cpin[i] = get_unroll_copy(pred, copy_index);
782 /* copy node / finalize temp node */
783 cp = get_unroll_copy(node, copy_index);
784 if (cp == NULL || is_Unknown(cp)) {
785 cp = copy_node(node);
786 set_unroll_copy(node, copy_index, cp);
787 DB((dbg, LEVEL_5, "The FINAL copy of %N is CREATED %N\n", node, cp));
789 /* temporary copy is existent but without correct ins */
790 cp = get_unroll_copy(node, copy_index);
791 DB((dbg, LEVEL_5, "The FINAL copy of %N is EXISTENT %N\n", node, cp));
794 if (!is_Block(node)) {
795 ir_node *cpblock = get_unroll_copy(get_nodes_block(node), copy_index);
797 set_nodes_block(cp, cpblock );
799 add_Block_phi(cpblock, cp);
802 /* Keeps phi list of temporary node. */
803 set_irn_in(cp, ARR_LEN(cpin), cpin);
806 /* Removes alle Blocks with non marked predecessors from the condition chain. */
807 static void unmark_not_allowed_cc_blocks(void)
809 size_t blocks = ARR_LEN(cc_blocks);
812 for(i = 0; i < blocks; ++i) {
813 ir_node *block = cc_blocks[i];
815 /* Head is an exception. */
816 if (block == loop_head)
819 int const arity = get_Block_n_cfgpreds(block);
820 for (int a = 0; a < arity; ++a) {
821 if (!is_nodes_block_marked(get_Block_cfgpred(block, a))) {
822 set_Block_mark(block, 0);
823 --inversion_blocks_in_cc;
824 DB((dbg, LEVEL_5, "Removed %N from cc (blocks in cc %d)\n",
825 block, inversion_blocks_in_cc));
833 /* Unmarks all cc blocks using cc_blocks except head.
834 * TODO: invert head for unrolling? */
835 static void unmark_cc_blocks(void)
837 size_t blocks = ARR_LEN(cc_blocks);
840 for(i = 0; i < blocks; ++i) {
841 ir_node *block = cc_blocks[i];
843 /* TODO Head is an exception. */
844 /*if (block != loop_head)*/
845 set_Block_mark(block, 0);
847 /*inversion_blocks_in_cc = 1;*/
848 inversion_blocks_in_cc = 0;
851 loop_info.cc_size = 0;
855 * Populates head_entries with (node, pred_pos) tuple
856 * whereas the node's pred at pred_pos is in the cc but not the node itself.
857 * Also finds df loops inside the cc.
858 * Head and condition chain blocks have been marked previously.
860 static void get_head_outs(ir_node *node, void *env)
863 int arity = get_irn_arity(node);
866 for (i = 0; i < arity; ++i) {
867 if (!is_nodes_block_marked(node) && is_nodes_block_marked(get_irn_n(node, i))) {
871 /* Saving also predecessor seems redundant, but becomes
872 * necessary when changing position of it, before
873 * dereferencing it.*/
874 entry.pred = get_irn_n(node, i);
875 ARR_APP1(entry_edge, cur_head_outs, entry);
879 arity = get_irn_arity(loop_head);
881 /* Find df loops inside the cc */
882 if (is_Phi(node) && get_nodes_block(node) == loop_head) {
883 for (i = 0; i < arity; ++i) {
884 if (is_own_backedge(loop_head, i)) {
885 if (is_nodes_block_marked(get_irn_n(node, i))) {
889 entry.pred = get_irn_n(node, i);
890 ARR_APP1(entry_edge, head_df_loop, entry);
891 DB((dbg, LEVEL_5, "Found incc assignment node %N @%d is pred %N, graph %N %N\n",
892 node, i, entry.pred, current_ir_graph, get_irg_start_block(current_ir_graph)));
900 * Find condition chains, and add them to be inverted.
901 * A block belongs to the chain if a condition branches out of the loop.
902 * (Some blocks need to be removed once again.)
903 * Returns 1 if the given block belongs to the condition chain.
905 static void find_condition_chain(ir_node *block)
909 bool jmp_only = true;
910 unsigned nodes_n = 0;
912 mark_irn_visited(block);
914 DB((dbg, LEVEL_5, "condition_chains for block %N\n", block));
917 foreach_out_edge_kind(block, edge, EDGE_KIND_NORMAL) {
921 /* Check if node count would exceed maximum cc size.
923 * This is not optimal, as we search depth-first and break here,
924 * continuing with another subtree. */
925 if (loop_info.cc_size + nodes_n > opt_params.max_cc_size) {
926 set_Block_mark(block, 0);
930 /* Check if block only has a jmp instruction. */
931 foreach_out_edge(block, edge) {
932 ir_node *src = get_edge_src_irn(edge);
934 if (!is_Block(src) && !is_Jmp(src)) {
939 /* Check cf outs if one is leaving the loop,
940 * or if this node has a backedge. */
941 foreach_block_succ(block, edge) {
942 ir_node *src = get_edge_src_irn(edge);
943 int pos = get_edge_src_pos(edge);
945 if (!is_in_loop(src))
948 /* Inverting blocks with backedge outs leads to a cf edge
949 * from the inverted head, into the inverted head (skipping the body).
950 * As the body becomes the new loop head,
951 * this would introduce another loop in the existing loop.
952 * This loop inversion cannot cope with this case. */
953 if (is_backedge(src, pos)) {
959 /* We need all predecessors to already belong to the condition chain.
960 * Example of wrong case: * == in cc
971 /* Collect blocks containing only a Jmp.
972 * Do not collect blocks with backedge outs. */
973 if ((jmp_only || mark) && !has_be) {
974 set_Block_mark(block, 1);
975 ++inversion_blocks_in_cc;
976 loop_info.cc_size += nodes_n;
977 DB((dbg, LEVEL_5, "block %N is part of condition chain\n", block));
978 ARR_APP1(ir_node *, cc_blocks, block);
980 set_Block_mark(block, 0);
983 foreach_block_succ(block, edge) {
984 ir_node *src = get_edge_src_irn( edge );
986 if (is_in_loop(src) && ! irn_visited(src))
987 find_condition_chain(src);
992 * Rewires the copied condition chain. Removes backedges
993 * as this condition chain is prior to the loop.
994 * Copy of loop_head must have phi list and old (unfixed) backedge info of the loop head.
995 * (loop_head is already fixed, we cannot rely on it.)
997 static void fix_copy_inversion(void)
1002 ir_node *phi, *next;
1003 ir_node *head_cp = get_inversion_copy(loop_head);
1004 ir_graph *irg = get_irn_irg(head_cp);
1005 int arity = get_irn_arity(head_cp);
1006 int backedges = get_backedge_n(head_cp, false);
1007 int new_arity = arity - backedges;
1011 NEW_ARR_A(ir_node *, ins, new_arity);
1014 /* Remove block backedges */
1015 for(i = 0; i < arity; ++i) {
1016 if (!is_backedge(head_cp, i))
1017 ins[pos++] = get_irn_n(head_cp, i);
1020 new_head = new_r_Block(irg, new_arity, ins);
1022 phis = NEW_ARR_F(ir_node *, 0);
1024 for_each_phi_safe(get_Block_phis(head_cp), phi, next) {
1026 NEW_ARR_A(ir_node *, ins, new_arity);
1028 for(i = 0; i < arity; ++i) {
1029 if (!is_backedge(head_cp, i))
1030 ins[pos++] = get_irn_n(phi, i);
1032 new_phi = new_rd_Phi(get_irn_dbg_info(phi),
1033 new_head, new_arity, ins,
1035 ARR_APP1(ir_node *, phis, new_phi);
1039 for_each_phi_safe(get_Block_phis(head_cp), phi, next) {
1040 exchange(phi, phis[pos++]);
1043 exchange(head_cp, new_head);
1049 /* Puts the original condition chain at the end of the loop,
1050 * subsequently to the body.
1051 * Relies on block phi list and correct backedges.
1053 static void fix_head_inversion(void)
1057 ir_node *phi, *next;
1059 ir_graph *irg = get_irn_irg(loop_head);
1060 int arity = get_irn_arity(loop_head);
1061 int backedges = get_backedge_n(loop_head, false);
1062 int new_arity = backedges;
1066 NEW_ARR_A(ir_node *, ins, new_arity);
1069 /* Keep only backedges */
1070 for(i = 0; i < arity; ++i) {
1071 if (is_own_backedge(loop_head, i))
1072 ins[pos++] = get_irn_n(loop_head, i);
1075 new_head = new_r_Block(irg, new_arity, ins);
1077 phis = NEW_ARR_F(ir_node *, 0);
1079 for_each_phi(loop_head, phi) {
1081 DB((dbg, LEVEL_5, "Fixing phi %N of loop head\n", phi));
1083 NEW_ARR_A(ir_node *, ins, new_arity);
1086 for (i = 0; i < arity; ++i) {
1087 ir_node *pred = get_irn_n(phi, i);
1089 if (is_own_backedge(loop_head, i)) {
1090 /* If assignment is in the condition chain,
1091 * we need to create a phi in the new loop head.
1092 * This can only happen for df, not cf. See find_condition_chains. */
1093 /*if (is_nodes_block_marked(pred)) {
1101 new_phi = new_rd_Phi(get_irn_dbg_info(phi),
1102 new_head, new_arity, ins,
1105 ARR_APP1(ir_node *, phis, new_phi);
1107 DB((dbg, LEVEL_5, "fix inverted head should exch %N by %N (pos %d)\n", phi, new_phi, pos ));
1111 for_each_phi_safe(get_Block_phis(loop_head), phi, next) {
1112 DB((dbg, LEVEL_5, "fix inverted exch phi %N by %N\n", phi, phis[pos]));
1113 if (phis[pos] != phi)
1114 exchange(phi, phis[pos++]);
1119 DB((dbg, LEVEL_5, "fix inverted head exch head block %N by %N\n", loop_head, new_head));
1120 exchange(loop_head, new_head);
1123 /* Does the loop inversion. */
1124 static void inversion_walk(ir_graph *irg, entry_edge *head_entries)
1129 * The order of rewiring bottom-up is crucial.
1130 * Any change of the order leads to lost information that would be needed later.
1133 ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED);
1135 /* 1. clone condition chain */
1136 inc_irg_visited(irg);
1138 for (i = 0; i < ARR_LEN(head_entries); ++i) {
1139 entry_edge entry = head_entries[i];
1140 ir_node *pred = get_irn_n(entry.node, entry.pos);
1142 DB((dbg, LEVEL_5, "\nInit walk block %N\n", pred));
1144 copy_walk(pred, is_nodes_block_marked, cur_loop);
1147 ir_free_resources(irg, IR_RESOURCE_IRN_VISITED);
1149 /* 2. Extends the head control flow successors ins
1150 * with the definitions of the copied head node. */
1151 for (i = 0; i < ARR_LEN(head_entries); ++i) {
1152 entry_edge head_out = head_entries[i];
1154 if (is_Block(head_out.node))
1155 extend_ins_by_copy(head_out.node, head_out.pos);
1158 /* 3. construct_ssa for users of definitions in the condition chain,
1159 * as there is now a second definition. */
1160 for (i = 0; i < ARR_LEN(head_entries); ++i) {
1161 entry_edge head_out = head_entries[i];
1163 /* Ignore keepalives */
1164 if (is_End(head_out.node))
1167 /* Construct ssa for assignments in the condition chain. */
1168 if (!is_Block(head_out.node)) {
1169 ir_node *pred, *cppred, *block, *cpblock;
1171 pred = head_out.pred;
1172 cppred = get_inversion_copy(pred);
1173 block = get_nodes_block(pred);
1174 cpblock = get_nodes_block(cppred);
1175 construct_ssa(block, pred, cpblock, cppred);
1180 * If there is an assignment in the condition chain
1181 * with a user also in the condition chain,
1182 * the dominance frontier is in the new loop head.
1183 * The dataflow loop is completely in the condition chain.
1188 * Phi_cp | | copied condition chain
1192 * Phi* | | new loop head with newly created phi.
1194 * Phi | | original, inverted condition chain
1199 for (i = 0; i < ARR_LEN(head_df_loop); ++i) {
1200 entry_edge head_out = head_df_loop[i];
1202 /* Construct ssa for assignments in the condition chain. */
1203 ir_node *pred, *cppred, *block, *cpblock;
1205 pred = head_out.pred;
1206 cppred = get_inversion_copy(pred);
1207 assert(cppred && pred);
1208 block = get_nodes_block(pred);
1209 cpblock = get_nodes_block(cppred);
1210 construct_ssa(block, pred, cpblock, cppred);
1213 /* 4. Remove the ins which are no backedges from the original condition chain
1214 * as the cc is now subsequent to the body. */
1215 fix_head_inversion();
1217 /* 5. Remove the backedges of the copied condition chain,
1218 * because it is going to be the new 'head' in advance to the loop. */
1219 fix_copy_inversion();
1223 /* Performs loop inversion of cur_loop if possible and reasonable. */
1224 static void loop_inversion(ir_graph *irg)
1227 unsigned max_loop_nodes = opt_params.max_loop_size;
1228 unsigned max_loop_nodes_adapted;
1229 int depth_adaption = opt_params.depth_adaption;
1231 bool do_inversion = true;
1233 /* Depth of 0 is the procedure and 1 a topmost loop. */
1234 loop_depth = get_loop_depth(cur_loop) - 1;
1236 /* Calculating in per mil. */
1237 max_loop_nodes_adapted = get_max_nodes_adapted(loop_depth);
1239 DB((dbg, LEVEL_1, "max_nodes: %d\nmax_nodes_adapted %d at depth of %d (adaption %d)\n",
1240 max_loop_nodes, max_loop_nodes_adapted, loop_depth, depth_adaption));
1242 if (loop_info.nodes == 0)
1245 if (loop_info.nodes > max_loop_nodes) {
1246 /* Only for stats */
1247 DB((dbg, LEVEL_1, "Nodes %d > allowed nodes %d\n",
1248 loop_info.nodes, loop_depth, max_loop_nodes));
1251 /* Adaption might change it */
1254 /* Limit processing to loops smaller than given parameter. */
1255 if (loop_info.nodes > max_loop_nodes_adapted) {
1256 DB((dbg, LEVEL_1, "Nodes %d > allowed nodes (depth %d adapted) %d\n",
1257 loop_info.nodes, loop_depth, max_loop_nodes_adapted));
1258 ++stats.too_large_adapted;
1262 if (loop_info.calls > opt_params.allowed_calls) {
1263 DB((dbg, LEVEL_1, "Calls %d > allowed calls %d\n",
1264 loop_info.calls, opt_params.allowed_calls));
1265 ++stats.calls_limit;
1269 /*inversion_head_node_limit = INT_MAX;*/
1270 ir_reserve_resources(irg, IR_RESOURCE_BLOCK_MARK);
1272 /* Reset block marks.
1273 * We use block marks to flag blocks of the original condition chain. */
1274 irg_walk_graph(irg, reset_block_mark, NULL, NULL);
1276 /*loop_info.blocks = get_loop_n_blocks(cur_loop);*/
1277 cond_chain_entries = NEW_ARR_F(entry_edge, 0);
1278 head_df_loop = NEW_ARR_F(entry_edge, 0);
1280 /*head_inversion_node_count = 0;*/
1281 inversion_blocks_in_cc = 0;
1283 /* Use phase to keep copy of nodes from the condition chain. */
1284 ir_nodemap_init(&map, irg);
1285 obstack_init(&obst);
1287 /* Search for condition chains and temporarily save the blocks in an array. */
1288 cc_blocks = NEW_ARR_F(ir_node *, 0);
1289 inc_irg_visited(irg);
1290 find_condition_chain(loop_head);
1292 unmark_not_allowed_cc_blocks();
1293 DEL_ARR_F(cc_blocks);
1295 /* Condition chain too large.
1296 * Loop should better be small enough to fit into the cache. */
1297 /* TODO Of course, we should take a small enough cc in the first place,
1298 * which is not that simple. (bin packing) */
1299 if (loop_info.cc_size > opt_params.max_cc_size) {
1300 ++stats.cc_limit_reached;
1302 do_inversion = false;
1304 /* Unmark cc blocks except the head.
1305 * Invert head only for possible unrolling. */
1309 /* We also catch endless loops here,
1310 * because they do not have a condition chain. */
1311 if (inversion_blocks_in_cc < 1) {
1312 do_inversion = false;
1314 "Loop contains %d (less than 1) invertible blocks => No Inversion done.\n",
1315 inversion_blocks_in_cc));
1319 cur_head_outs = NEW_ARR_F(entry_edge, 0);
1321 /* Get all edges pointing into the condition chain. */
1322 irg_walk_graph(irg, get_head_outs, NULL, NULL);
1324 /* Do the inversion */
1325 inversion_walk(irg, cur_head_outs);
1327 DEL_ARR_F(cur_head_outs);
1329 /* Duplicated blocks changed doms */
1330 clear_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_DOMINANCE
1331 | IR_GRAPH_PROPERTY_CONSISTENT_LOOPINFO);
1337 obstack_free(&obst, NULL);
1338 ir_nodemap_destroy(&map);
1339 DEL_ARR_F(cond_chain_entries);
1340 DEL_ARR_F(head_df_loop);
1342 ir_free_resources(irg, IR_RESOURCE_BLOCK_MARK);
1345 /* Fix the original loop_heads ins for invariant unrolling case. */
1346 static void unrolling_fix_loop_head_inv(void)
1350 ir_node *proj = new_Proj(loop_info.duff_cond, mode_X, 0);
1351 ir_node *head_pred = get_irn_n(loop_head, loop_info.be_src_pos);
1352 ir_node *loop_condition = get_unroll_copy(head_pred, unroll_nr - 1);
1354 /* Original loop_heads ins are:
1355 * duff block and the own backedge */
1357 ins[0] = loop_condition;
1359 set_irn_in(loop_head, 2, ins);
1360 DB((dbg, LEVEL_4, "Rewire ins of block loophead %N to pred %N and duffs entry %N \n" , loop_head, ins[0], ins[1]));
1362 for_each_phi(loop_head, phi) {
1363 ir_node *pred = get_irn_n(phi, loop_info.be_src_pos);
1364 /* TODO we think it is a phi, but for Mergesort it is not the case.*/
1366 ir_node *last_pred = get_unroll_copy(pred, unroll_nr - 1);
1369 ins[1] = (ir_node*)get_irn_link(phi);
1370 set_irn_in(phi, 2, ins);
1371 DB((dbg, LEVEL_4, "Rewire ins of loophead phi %N to pred %N and duffs entry %N \n" , phi, ins[0], ins[1]));
1375 /* Removes previously created phis with only 1 in. */
1376 static void correct_phis(ir_node *node, void *env)
1380 if (is_Phi(node) && get_irn_arity(node) == 1) {
1384 in[0] = get_irn_n(node, 0);
1386 exch = new_rd_Phi(get_irn_dbg_info(node),
1387 get_nodes_block(node), 1, in,
1388 get_irn_mode(node));
1390 exchange(node, exch);
1394 /* Unrolling: Rewire floating copies. */
1395 static void place_copies(int copies)
1397 ir_node *loophead = loop_head;
1400 int be_src_pos = loop_info.be_src_pos;
1402 /* Serialize loops by fixing their head ins.
1403 * Processed are the copies.
1404 * The original loop is done after that, to keep backedge infos. */
1405 for (c = 0; c < copies; ++c) {
1406 ir_node *upper = get_unroll_copy(loophead, c);
1407 ir_node *lower = get_unroll_copy(loophead, c + 1);
1409 ir_node *topmost_be_block = get_nodes_block(get_irn_n(loophead, be_src_pos));
1411 /* Important: get the preds first and then their copy. */
1412 ir_node *upper_be_block = get_unroll_copy(topmost_be_block, c);
1413 ir_node *new_jmp = new_r_Jmp(upper_be_block);
1414 DB((dbg, LEVEL_5, " place_copies upper %N lower %N\n", upper, lower));
1416 DB((dbg, LEVEL_5, "topmost be block %N \n", topmost_be_block));
1418 if (loop_info.unroll_kind == constant) {
1421 set_irn_in(lower, 1, ins);
1423 for_each_phi(loophead, phi) {
1424 ir_node *topmost_def = get_irn_n(phi, be_src_pos);
1425 ir_node *upper_def = get_unroll_copy(topmost_def, c);
1426 ir_node *lower_phi = get_unroll_copy(phi, c + 1);
1428 /* It is possible, that the value used
1429 * in the OWN backedge path is NOT defined in this loop. */
1430 if (is_in_loop(topmost_def))
1433 ins[0] = topmost_def;
1435 set_irn_in(lower_phi, 1, ins);
1436 /* Need to replace phis with 1 in later. */
1439 /* Invariant case */
1440 /* Every node has 2 ins. One from the duff blocks
1441 * and one from the previously unrolled loop. */
1443 /* Calculate corresponding projection of mod result for this copy c */
1444 ir_node *proj = new_Proj(loop_info.duff_cond, mode_X, unroll_nr - c - 1);
1445 DB((dbg, LEVEL_4, "New duff proj %N\n" , proj));
1449 set_irn_in(lower, 2, ins);
1450 DB((dbg, LEVEL_4, "Rewire ins of Block %N to pred %N and duffs entry %N \n" , lower, ins[0], ins[1]));
1452 for_each_phi(loophead, phi) {
1453 ir_node *topmost_phi_pred = get_irn_n(phi, be_src_pos);
1454 ir_node *upper_phi_pred;
1458 lower_phi = get_unroll_copy(phi, c + 1);
1459 duff_phi = (ir_node*)get_irn_link(phi);
1460 DB((dbg, LEVEL_4, "DD Link of %N is %N\n" , phi, duff_phi));
1463 if (is_in_loop(topmost_phi_pred)) {
1464 upper_phi_pred = get_unroll_copy(topmost_phi_pred, c);
1466 upper_phi_pred = topmost_phi_pred;
1469 ins[0] = upper_phi_pred;
1471 set_irn_in(lower_phi, 2, ins);
1472 DB((dbg, LEVEL_4, "Rewire ins of %N to pred %N and duffs entry %N \n" , lower_phi, ins[0], ins[1]));
1477 /* Reconnect last copy. */
1478 for (i = 0; i < ARR_LEN(loop_entries); ++i) {
1479 entry_edge edge = loop_entries[i];
1480 /* Last copy is at the bottom */
1481 ir_node *new_pred = get_unroll_copy(edge.pred, copies);
1482 set_irn_n(edge.node, edge.pos, new_pred);
1485 /* Fix original loops head.
1486 * Done in the end, as ins and be info were needed before. */
1487 if (loop_info.unroll_kind == constant) {
1489 ir_node *head_pred = get_irn_n(loop_head, be_src_pos);
1490 ir_node *loop_condition = get_unroll_copy(head_pred, unroll_nr - 1);
1492 set_irn_n(loop_head, loop_info.be_src_pos, loop_condition);
1494 for_each_phi(loop_head, phi) {
1495 ir_node *pred = get_irn_n(phi, be_src_pos);
1498 /* It is possible, that the value used
1499 * in the OWN backedge path is NOT assigned in this loop. */
1500 if (is_in_loop(pred))
1501 last_pred = get_unroll_copy(pred, copies);
1504 set_irn_n(phi, be_src_pos, last_pred);
1508 unrolling_fix_loop_head_inv();
1512 /* Copies the cur_loop several times. */
1513 static void copy_loop(entry_edge *cur_loop_outs, int copies)
1517 ir_reserve_resources(current_ir_graph, IR_RESOURCE_IRN_VISITED);
1519 for (c = 0; c < copies; ++c) {
1522 inc_irg_visited(current_ir_graph);
1524 DB((dbg, LEVEL_5, " ### Copy_loop copy nr: %d ###\n", c));
1525 for (i = 0; i < ARR_LEN(cur_loop_outs); ++i) {
1526 entry_edge entry = cur_loop_outs[i];
1527 ir_node *pred = get_irn_n(entry.node, entry.pos);
1529 copy_walk_n(pred, is_in_loop, c + 1);
1533 ir_free_resources(current_ir_graph, IR_RESOURCE_IRN_VISITED);
1537 /* Creates a new phi from the given phi node omitting own bes,
1538 * using be_block as supplier of backedge informations. */
1539 static ir_node *clone_phis_sans_bes(ir_node *phi, ir_node *be_block, ir_node *dest_block)
1545 int const arity = get_Phi_n_preds(phi);
1546 assert(arity == get_Block_n_cfgpreds(be_block));
1548 ins = NEW_ARR_F(ir_node *, arity);
1549 for (i = 0; i < arity; ++i) {
1550 if (! is_own_backedge(be_block, i)) {
1551 ins[c] = get_irn_n(phi, i);
1556 newphi = new_r_Phi(dest_block, c, ins, get_irn_mode(phi));
1558 set_irn_link(phi, newphi);
1559 DB((dbg, LEVEL_4, "Linking for duffs device %N to %N\n", phi, newphi));
1564 /* Creates a new block from the given block node omitting own bes,
1565 * using be_block as supplier of backedge informations. */
1566 static ir_node *clone_block_sans_bes(ir_node *node, ir_node *be_block)
1571 int const arity = get_Block_n_cfgpreds(node);
1572 assert(arity == get_irn_arity(be_block));
1574 NEW_ARR_A(ir_node *, ins, arity);
1575 for (i = 0; i < arity; ++i) {
1576 if (! is_own_backedge(be_block, i)) {
1577 ins[c] = get_irn_n(node, i);
1582 return new_Block(c, ins);
1585 /* Creates a structure to calculate absolute value of node op.
1586 * Returns mux node with absolute value. */
1587 static ir_node *new_Abs(ir_node *op, ir_mode *mode)
1589 ir_graph *irg = get_irn_irg(op);
1590 ir_node *block = get_nodes_block(op);
1591 ir_node *zero = new_r_Const(irg, get_mode_null(mode));
1592 ir_node *cmp = new_r_Cmp(block, op, zero, ir_relation_less);
1593 ir_node *minus_op = new_r_Minus(block, op, mode);
1594 ir_node *mux = new_r_Mux(block, cmp, op, minus_op, mode);
1600 /* Creates blocks for duffs device, using previously obtained
1601 * informations about the iv.
1603 static void create_duffs_block(void)
1607 ir_node *block1, *count_block, *duff_block;
1608 ir_node *ems, *ems_mod, *ems_div, *ems_mod_proj, *cmp_null,
1609 *ems_mode_cond, *x_true, *x_false, *const_null;
1610 ir_node *true_val, *false_val;
1613 ir_node *duff_mod, *proj, *cond;
1615 ir_node *count, *correction, *unroll_c;
1616 ir_node *cmp_bad_count, *good_count, *bad_count, *count_phi, *bad_count_neg;
1619 mode = get_irn_mode(loop_info.end_val);
1620 const_null = new_Const(get_mode_null(mode));
1623 * 1. Calculate first approach to count.
1624 * Condition: (end - start) % step == 0 */
1625 block1 = clone_block_sans_bes(loop_head, loop_head);
1626 DB((dbg, LEVEL_4, "Duff block 1 %N\n", block1));
1628 /* Create loop entry phis in first duff block
1629 * as it becomes the loops preheader */
1630 for_each_phi(loop_head, phi) {
1631 /* Returns phis pred if phi would have arity 1*/
1632 ir_node *new_phi = clone_phis_sans_bes(phi, loop_head, block1);
1634 DB((dbg, LEVEL_4, "HEAD %N phi %N\n", loop_head, phi));
1635 DB((dbg, LEVEL_4, "BLOCK1 %N phi %N\n", block1, new_phi));
1638 ems = new_r_Sub(block1, loop_info.end_val, loop_info.start_val,
1639 get_irn_mode(loop_info.end_val));
1640 DB((dbg, LEVEL_4, "BLOCK1 sub %N\n", ems));
1643 ems = new_Sub(loop_info.end_val, loop_info.start_val,
1644 get_irn_mode(loop_info.end_val));
1646 DB((dbg, LEVEL_4, "mod ins %N %N\n", ems, loop_info.step));
1647 ems_mod = new_r_Mod(block1,
1652 op_pin_state_pinned);
1653 ems_div = new_r_Div(block1,
1658 op_pin_state_pinned);
1660 DB((dbg, LEVEL_4, "New module node %N\n", ems_mod));
1662 ems_mod_proj = new_r_Proj(ems_mod, mode_Iu, pn_Mod_res);
1663 cmp_null = new_r_Cmp(block1, ems_mod_proj, const_null, ir_relation_less);
1664 ems_mode_cond = new_r_Cond(block1, cmp_null);
1666 /* ems % step == 0 */
1667 x_true = new_r_Proj(ems_mode_cond, mode_X, pn_Cond_true);
1668 /* ems % step != 0 */
1669 x_false = new_r_Proj(ems_mode_cond, mode_X, pn_Cond_false);
1672 * Assures, duffs device receives a valid count.
1674 * decreasing: count < 0
1675 * increasing: count > 0
1680 count_block = new_Block(2, ins);
1681 DB((dbg, LEVEL_4, "Duff block 2 %N\n", count_block));
1684 /* Increase loop-taken-count depending on the loop condition
1685 * uses the latest iv to compare to. */
1686 if (loop_info.latest_value == 1) {
1687 /* ems % step == 0 : +0 */
1688 true_val = new_Const(get_mode_null(mode));
1689 /* ems % step != 0 : +1 */
1690 false_val = new_Const(get_mode_one(mode));
1692 ir_tarval *tv_two = new_tarval_from_long(2, mode);
1693 /* ems % step == 0 : +1 */
1694 true_val = new_Const(get_mode_one(mode));
1695 /* ems % step != 0 : +2 */
1696 false_val = new_Const(tv_two);
1702 correction = new_r_Phi(count_block, 2, ins, mode);
1704 count = new_r_Proj(ems_div, mode, pn_Div_res);
1706 /* (end - start) / step + correction */
1707 count = new_Add(count, correction, mode);
1709 /* We preconditioned the loop to be tail-controlled.
1710 * So, if count is something 'wrong' like 0,
1711 * negative/positive (depending on step direction),
1712 * we may take the loop once (tail-contr.) and leave it
1713 * to the existing condition, to break; */
1715 /* Depending on step direction, we have to check for > or < 0 */
1716 if (loop_info.decreasing == 1) {
1717 cmp_bad_count = new_r_Cmp(count_block, count, const_null,
1720 cmp_bad_count = new_r_Cmp(count_block, count, const_null,
1721 ir_relation_greater);
1724 bad_count_neg = new_r_Cond(count_block, cmp_bad_count);
1725 good_count = new_Proj(bad_count_neg, mode_X, pn_Cond_true);
1726 bad_count = new_Proj(ems_mode_cond, mode_X, pn_Cond_false);
1729 * Contains module to decide which loop to start from. */
1731 ins[0] = good_count;
1733 duff_block = new_Block(2, ins);
1734 DB((dbg, LEVEL_4, "Duff block 3 %N\n", duff_block));
1736 /* Get absolute value */
1737 ins[0] = new_Abs(count, mode);
1738 /* Manually feed the aforementioned count = 1 (bad case)*/
1739 ins[1] = new_Const(get_mode_one(mode));
1740 count_phi = new_r_Phi(duff_block, 2, ins, mode);
1742 unroll_c = new_Const(new_tarval_from_long((long)unroll_nr, mode));
1744 /* count % unroll_nr */
1745 duff_mod = new_r_Mod(duff_block,
1750 op_pin_state_pinned);
1753 proj = new_Proj(duff_mod, mode, pn_Mod_res);
1754 /* condition does NOT create itself in the block of the proj! */
1755 cond = new_r_Cond(duff_block, proj);
1757 loop_info.duff_cond = cond;
1760 /* Returns 1 if given node is not in loop,
1761 * or if it is a phi of the loop head with only loop invariant defs.
1763 static unsigned is_loop_invariant_def(ir_node *node)
1767 if (! is_in_loop(node)) {
1768 DB((dbg, LEVEL_4, "Not in loop %N\n", node));
1769 /* || is_Const(node) || is_SymConst(node)) {*/
1773 /* If this is a phi of the loophead shared by more than 1 loop,
1774 * we need to check if all defs are not in the loop. */
1777 block = get_nodes_block(node);
1779 /* To prevent unexpected situations. */
1780 if (block != loop_head) {
1784 for (i = 0; i < get_irn_arity(node); ++i) {
1785 /* Check if all bes are just loopbacks. */
1786 if (is_own_backedge(block, i) && get_irn_n(node, i) != node)
1789 DB((dbg, LEVEL_4, "invar %N\n", node));
1792 DB((dbg, LEVEL_4, "Not invar %N\n", node));
1797 /* Returns 1 if one pred of node is invariant and the other is not.
1798 * invar_pred and other are set analogously. */
1799 static unsigned get_invariant_pred(ir_node *node, ir_node **invar_pred, ir_node **other)
1801 ir_node *pred0 = get_irn_n(node, 0);
1802 ir_node *pred1 = get_irn_n(node, 1);
1807 if (is_loop_invariant_def(pred0)) {
1808 DB((dbg, LEVEL_4, "pred0 invar %N\n", pred0));
1809 *invar_pred = pred0;
1813 if (is_loop_invariant_def(pred1)) {
1814 DB((dbg, LEVEL_4, "pred1 invar %N\n", pred1));
1816 if (*invar_pred != NULL) {
1817 /* RETURN. We do not want both preds to be invariant. */
1822 *invar_pred = pred1;
1825 DB((dbg, LEVEL_4, "pred1 not invar %N\n", pred1));
1827 if (*invar_pred != NULL)
1834 /* Starts from a phi that may belong to an iv.
1835 * If an add forms a loop with iteration_phi,
1836 * and add uses a constant, 1 is returned
1837 * and 'start' as well as 'add' are sane. */
1838 static unsigned get_start_and_add(ir_node *iteration_phi, unrolling_kind_flag role)
1841 ir_node *found_add = loop_info.add;
1842 int arity = get_irn_arity(iteration_phi);
1844 DB((dbg, LEVEL_4, "Find start and add from %N\n", iteration_phi));
1846 for (i = 0; i < arity; ++i) {
1848 /* Find start_val which needs to be pred of the iteration_phi.
1849 * If start_val already known, sanity check. */
1850 if (!is_backedge(get_nodes_block(loop_info.iteration_phi), i)) {
1851 ir_node *found_start_val = get_irn_n(loop_info.iteration_phi, i);
1853 DB((dbg, LEVEL_4, "found_start_val %N\n", found_start_val));
1855 /* We already found a start_val it has to be always the same. */
1856 if (loop_info.start_val && found_start_val != loop_info.start_val)
1859 if ((role == constant) && !(is_SymConst(found_start_val) || is_Const(found_start_val)))
1861 else if((role == constant) && !(is_loop_invariant_def(found_start_val)))
1864 loop_info.start_val = found_start_val;
1867 /* The phi has to be in the loop head.
1868 * Follow all own backedges. Every value supplied from these preds of the phi
1869 * needs to origin from the same add. */
1870 if (is_own_backedge(get_nodes_block(loop_info.iteration_phi), i)) {
1871 ir_node *new_found = get_irn_n(loop_info.iteration_phi,i);
1873 DB((dbg, LEVEL_4, "is add? %N\n", new_found));
1875 if (! (is_Add(new_found) || is_Sub(new_found)) || (found_add && found_add != new_found))
1878 found_add = new_found;
1882 loop_info.add = found_add;
1888 /* Returns 1 if one pred of node is a const value and the other is not.
1889 * const_pred and other are set analogously. */
1890 static unsigned get_const_pred(ir_node *node, ir_node **const_pred, ir_node **other)
1892 ir_node *pred0 = get_irn_n(node, 0);
1893 ir_node *pred1 = get_irn_n(node, 1);
1895 DB((dbg, LEVEL_4, "Checking for constant pred of %N\n", node));
1900 /*DB((dbg, LEVEL_4, "is %N const\n", pred0));*/
1901 if (is_Const(pred0) || is_SymConst(pred0)) {
1902 *const_pred = pred0;
1906 /*DB((dbg, LEVEL_4, "is %N const\n", pred1));*/
1907 if (is_Const(pred1) || is_SymConst(pred1)) {
1908 if (*const_pred != NULL) {
1909 /* RETURN. We do not want both preds to be constant. */
1914 *const_pred = pred1;
1917 if (*const_pred == NULL)
1923 /* Returns 1 if loop exits within 2 steps of the iv.
1924 * Norm_proj means we do not exit the loop.*/
1925 static unsigned simulate_next(ir_tarval **count_tar,
1926 ir_tarval *stepped, ir_tarval *step_tar, ir_tarval *end_tar,
1927 ir_relation norm_proj)
1931 DB((dbg, LEVEL_4, "Loop taken if (stepped)%ld %s (end)%ld ",
1932 get_tarval_long(stepped),
1933 get_relation_string((norm_proj)),
1934 get_tarval_long(end_tar)));
1935 DB((dbg, LEVEL_4, "comparing latest value %d\n", loop_info.latest_value));
1937 /* If current iv does not stay in the loop,
1938 * this run satisfied the exit condition. */
1939 if (! (tarval_cmp(stepped, end_tar) & norm_proj))
1942 DB((dbg, LEVEL_4, "Result: (stepped)%ld IS %s (end)%ld\n",
1943 get_tarval_long(stepped),
1944 get_relation_string(tarval_cmp(stepped, end_tar)),
1945 get_tarval_long(end_tar)));
1948 if (is_Add(loop_info.add))
1949 next = tarval_add(stepped, step_tar);
1952 next = tarval_sub(stepped, step_tar, get_irn_mode(loop_info.end_val));
1954 DB((dbg, LEVEL_4, "Loop taken if %ld %s %ld ",
1955 get_tarval_long(next),
1956 get_relation_string(norm_proj),
1957 get_tarval_long(end_tar)));
1958 DB((dbg, LEVEL_4, "comparing latest value %d\n", loop_info.latest_value));
1960 /* Increase steps. */
1961 *count_tar = tarval_add(*count_tar, get_tarval_one(get_tarval_mode(*count_tar)));
1963 /* Next has to fail the loop condition, or we will never exit. */
1964 if (! (tarval_cmp(next, end_tar) & norm_proj))
1970 /* Check if loop meets requirements for a 'simple loop':
1971 * - Exactly one cf out
1973 * - Max nodes after unrolling
1977 * Returns Projection of cmp node or NULL; */
1978 static ir_node *is_simple_loop(void)
1981 ir_node *loop_block, *exit_block, *projx, *cond, *cmp;
1983 /* Maximum of one condition, and no endless loops. */
1984 if (loop_info.cf_outs != 1)
1987 DB((dbg, LEVEL_4, "1 loop exit\n"));
1989 /* Calculate maximum unroll_nr keeping node count below limit. */
1990 loop_info.max_unroll = (int)((double)opt_params.max_unrolled_loop_size / (double)loop_info.nodes);
1991 if (loop_info.max_unroll < 2) {
1996 DB((dbg, LEVEL_4, "maximum unroll factor %u, to not exceed node limit \n",
1997 opt_params.max_unrolled_loop_size));
1999 arity = get_irn_arity(loop_head);
2000 /* RETURN if we have more than 1 be. */
2001 /* Get my backedges without alien bes. */
2003 for (i = 0; i < arity; ++i) {
2004 ir_node *pred = get_irn_n(loop_head, i);
2005 if (is_own_backedge(loop_head, i)) {
2007 /* Our simple loops may have only one backedge. */
2010 loop_block = get_nodes_block(pred);
2011 loop_info.be_src_pos = i;
2016 DB((dbg, LEVEL_4, "loop has 1 own backedge.\n"));
2018 exit_block = get_nodes_block(loop_info.cf_out.pred);
2019 /* The loop has to be tail-controlled.
2020 * This can be changed/improved,
2021 * but we would need a duff iv. */
2022 if (exit_block != loop_block)
2025 DB((dbg, LEVEL_4, "tail-controlled loop.\n"));
2027 /* find value on which loop exit depends */
2028 projx = loop_info.cf_out.pred;
2029 cond = get_irn_n(projx, 0);
2030 cmp = get_irn_n(cond, 0);
2035 DB((dbg, LEVEL_5, "projection is %s\n", get_relation_string(get_Cmp_relation(cmp))));
2037 switch(get_Proj_proj(projx)) {
2039 loop_info.exit_cond = 0;
2042 loop_info.exit_cond = 1;
2045 panic("Cond Proj_proj other than true/false");
2048 DB((dbg, LEVEL_4, "Valid Cmp.\n"));
2052 /* Returns 1 if all nodes are mode_Iu or mode_Is. */
2053 static unsigned are_mode_I(ir_node *n1, ir_node* n2, ir_node *n3)
2055 ir_mode *m1 = get_irn_mode(n1);
2056 ir_mode *m2 = get_irn_mode(n2);
2057 ir_mode *m3 = get_irn_mode(n3);
2059 if ((m1 == mode_Iu && m2 == mode_Iu && m3 == mode_Iu) ||
2060 (m1 == mode_Is && m2 == mode_Is && m3 == mode_Is))
2066 /* Checks if cur_loop is a simple tail-controlled counting loop
2067 * with start and end value loop invariant, step constant. */
2068 static unsigned get_unroll_decision_invariant(void)
2071 ir_node *projres, *loop_condition, *iteration_path;
2073 ir_tarval *step_tar;
2077 /* RETURN if loop is not 'simple' */
2078 projres = is_simple_loop();
2079 if (projres == NULL)
2082 /* Use a minimal size for the invariant unrolled loop,
2083 * as duffs device produces overhead */
2084 if (loop_info.nodes < opt_params.invar_unrolling_min_size)
2087 loop_condition = get_irn_n(projres, 0);
2089 success = get_invariant_pred(loop_condition, &loop_info.end_val, &iteration_path);
2090 DB((dbg, LEVEL_4, "pred invar %d\n", success));
2095 DB((dbg, LEVEL_4, "Invariant End_val %N, other %N\n", loop_info.end_val, iteration_path));
2097 /* We may find the add or the phi first.
2098 * Until now we only have end_val. */
2099 if (is_Add(iteration_path) || is_Sub(iteration_path)) {
2101 loop_info.add = iteration_path;
2102 DB((dbg, LEVEL_4, "Case 1: Got add %N (maybe not sane)\n", loop_info.add));
2104 /* Preds of the add should be step and the iteration_phi */
2105 success = get_const_pred(loop_info.add, &loop_info.step, &loop_info.iteration_phi);
2109 DB((dbg, LEVEL_4, "Got step %N\n", loop_info.step));
2111 if (! is_Phi(loop_info.iteration_phi))
2114 DB((dbg, LEVEL_4, "Got phi %N\n", loop_info.iteration_phi));
2117 * Does necessary sanity check of add, if it is already set. */
2118 success = get_start_and_add(loop_info.iteration_phi, invariant);
2122 DB((dbg, LEVEL_4, "Got start A %N\n", loop_info.start_val));
2124 } else if (is_Phi(iteration_path)) {
2125 ir_node *new_iteration_phi;
2127 loop_info.iteration_phi = iteration_path;
2128 DB((dbg, LEVEL_4, "Case 2: Got phi %N\n", loop_info.iteration_phi));
2130 /* Find start_val and add-node.
2131 * Does necessary sanity check of add, if it is already set. */
2132 success = get_start_and_add(loop_info.iteration_phi, invariant);
2136 DB((dbg, LEVEL_4, "Got start B %N\n", loop_info.start_val));
2137 DB((dbg, LEVEL_4, "Got add or sub %N\n", loop_info.add));
2139 success = get_const_pred(loop_info.add, &loop_info.step, &new_iteration_phi);
2143 DB((dbg, LEVEL_4, "Got step (B) %N\n", loop_info.step));
2145 if (loop_info.iteration_phi != new_iteration_phi)
2152 mode = get_irn_mode(loop_info.end_val);
2154 DB((dbg, LEVEL_4, "start %N, end %N, step %N\n",
2155 loop_info.start_val, loop_info.end_val, loop_info.step));
2157 if (mode != mode_Is && mode != mode_Iu)
2160 /* TODO necessary? */
2161 if (!are_mode_I(loop_info.start_val, loop_info.step, loop_info.end_val))
2164 DB((dbg, LEVEL_4, "mode integer\n"));
2166 step_tar = get_Const_tarval(loop_info.step);
2168 if (tarval_is_null(step_tar)) {
2169 /* TODO Might be worth a warning. */
2173 DB((dbg, LEVEL_4, "step is not 0\n"));
2175 create_duffs_block();
2177 return loop_info.max_unroll;
2180 /* Returns unroll factor,
2181 * given maximum unroll factor and number of loop passes. */
2182 static unsigned get_preferred_factor_constant(ir_tarval *count_tar)
2184 ir_tarval *tar_6, *tar_5, *tar_4, *tar_3, *tar_2;
2186 ir_mode *mode = get_irn_mode(loop_info.end_val);
2188 tar_6 = new_tarval_from_long(6, mode);
2189 tar_5 = new_tarval_from_long(5, mode);
2190 tar_4 = new_tarval_from_long(4, mode);
2191 tar_3 = new_tarval_from_long(3, mode);
2192 tar_2 = new_tarval_from_long(2, mode);
2194 /* loop passes % {6, 5, 4, 3, 2} == 0 */
2195 if (tarval_is_null(tarval_mod(count_tar, tar_6)))
2197 else if (tarval_is_null(tarval_mod(count_tar, tar_5)))
2199 else if (tarval_is_null(tarval_mod(count_tar, tar_4)))
2201 else if (tarval_is_null(tarval_mod(count_tar, tar_3)))
2203 else if (tarval_is_null(tarval_mod(count_tar, tar_2)))
2206 /* gcd(max_unroll, count_tar) */
2207 int a = loop_info.max_unroll;
2208 int b = (int)get_tarval_long(count_tar);
2211 DB((dbg, LEVEL_4, "gcd of max_unroll %d and count_tar %d: ", a, b));
2218 DB((dbg, LEVEL_4, "%d\n", a));
2222 DB((dbg, LEVEL_4, "preferred unroll factor %d\n", prefer));
2225 * If our preference is greater than the allowed unroll factor
2226 * we either might reduce the preferred factor and prevent a duffs device block,
2227 * or create a duffs device block, from which in this case (constants only)
2228 * we know the startloop at compiletime.
2229 * The latter yields the following graphs.
2230 * but for code generation we would want to use graph A.
2231 * The graphs are equivalent. So, we can only reduce the preferred factor.
2241 if (prefer <= loop_info.max_unroll)
2246 if (loop_info.max_unroll >= 3)
2248 else if (loop_info.max_unroll >= 2)
2254 if (loop_info.max_unroll >= 2)
2265 /* Check if cur_loop is a simple counting loop.
2266 * Start, step and end are constants.
2267 * TODO The whole constant case should use procedures similar to
2268 * the invariant case, as they are more versatile. */
2270 static unsigned get_unroll_decision_constant(void)
2272 ir_node *cmp, *iteration_path;
2273 unsigned success, is_latest_val;
2274 ir_tarval *start_tar, *end_tar, *step_tar, *diff_tar, *count_tar;
2276 ir_relation proj_proj, norm_proj;
2279 /* RETURN if loop is not 'simple' */
2280 cmp = is_simple_loop();
2284 /* One in of the loop condition needs to be loop invariant. => end_val
2285 * The other in is assigned by an add. => add
2286 * The add uses a loop invariant value => step
2287 * and a phi with a loop invariant start_val and the add node as ins.
2299 success = get_const_pred(cmp, &loop_info.end_val, &iteration_path);
2303 DB((dbg, LEVEL_4, "End_val %N, other %N\n", loop_info.end_val, iteration_path));
2305 /* We may find the add or the phi first.
2306 * Until now we only have end_val. */
2307 if (is_Add(iteration_path) || is_Sub(iteration_path)) {
2309 /* We test against the latest value of the iv. */
2312 loop_info.add = iteration_path;
2313 DB((dbg, LEVEL_4, "Case 2: Got add %N (maybe not sane)\n", loop_info.add));
2315 /* Preds of the add should be step and the iteration_phi */
2316 success = get_const_pred(loop_info.add, &loop_info.step, &loop_info.iteration_phi);
2320 DB((dbg, LEVEL_4, "Got step %N\n", loop_info.step));
2322 if (! is_Phi(loop_info.iteration_phi))
2325 DB((dbg, LEVEL_4, "Got phi %N\n", loop_info.iteration_phi));
2328 * Does necessary sanity check of add, if it is already set. */
2329 success = get_start_and_add(loop_info.iteration_phi, constant);
2333 DB((dbg, LEVEL_4, "Got start %N\n", loop_info.start_val));
2335 } else if (is_Phi(iteration_path)) {
2336 ir_node *new_iteration_phi;
2338 /* We compare with the value the iv had entering this run. */
2341 loop_info.iteration_phi = iteration_path;
2342 DB((dbg, LEVEL_4, "Case 1: Got phi %N \n", loop_info.iteration_phi));
2344 /* Find start_val and add-node.
2345 * Does necessary sanity check of add, if it is already set. */
2346 success = get_start_and_add(loop_info.iteration_phi, constant);
2350 DB((dbg, LEVEL_4, "Got start %N\n", loop_info.start_val));
2351 DB((dbg, LEVEL_4, "Got add or sub %N\n", loop_info.add));
2353 success = get_const_pred(loop_info.add, &loop_info.step, &new_iteration_phi);
2357 DB((dbg, LEVEL_4, "Got step %N\n", loop_info.step));
2359 if (loop_info.iteration_phi != new_iteration_phi)
2367 mode = get_irn_mode(loop_info.end_val);
2369 DB((dbg, LEVEL_4, "start %N, end %N, step %N\n",
2370 loop_info.start_val, loop_info.end_val, loop_info.step));
2372 if (mode != mode_Is && mode != mode_Iu)
2375 /* TODO necessary? */
2376 if (!are_mode_I(loop_info.start_val, loop_info.step, loop_info.end_val))
2379 DB((dbg, LEVEL_4, "mode integer\n"));
2381 end_tar = get_Const_tarval(loop_info.end_val);
2382 start_tar = get_Const_tarval(loop_info.start_val);
2383 step_tar = get_Const_tarval(loop_info.step);
2385 if (tarval_is_null(step_tar))
2386 /* TODO Might be worth a warning. */
2389 DB((dbg, LEVEL_4, "step is not 0\n"));
2391 if ((!tarval_is_negative(step_tar)) ^ (!is_Sub(loop_info.add)))
2392 loop_info.decreasing = 1;
2394 diff_tar = tarval_sub(end_tar, start_tar, mode);
2396 /* We need at least count_tar steps to be close to end_val, maybe more.
2397 * No way, that we have gone too many steps.
2398 * This represents the 'latest value'.
2399 * (If condition checks against latest value, is checked later) */
2400 count_tar = tarval_div(diff_tar, step_tar);
2402 /* Iv will not pass end_val (except overflows).
2403 * Nothing done, as it would yield to no advantage. */
2404 if (tarval_is_negative(count_tar)) {
2405 DB((dbg, LEVEL_4, "Loop is endless or never taken."));
2406 /* TODO Might be worth a warning. */
2410 ++stats.u_simple_counting_loop;
2412 loop_info.latest_value = is_latest_val;
2415 if (! is_simple_counting_loop(&count_tar))
2419 /* stepped can be negative, if step < 0 */
2420 stepped = tarval_mul(count_tar, step_tar);
2422 /* step as close to end_val as possible, */
2423 /* |stepped| <= |end_tar|, and dist(stepped, end_tar) is smaller than a step. */
2424 if (is_Sub(loop_info.add))
2425 stepped = tarval_sub(start_tar, stepped, mode_Is);
2427 stepped = tarval_add(start_tar, stepped);
2429 DB((dbg, LEVEL_4, "stepped to %ld\n", get_tarval_long(stepped)));
2431 proj_proj = get_Cmp_relation(cmp);
2432 /* Assure that norm_proj is the stay-in-loop case. */
2433 if (loop_info.exit_cond == 1)
2434 norm_proj = get_negated_relation(proj_proj);
2436 norm_proj = proj_proj;
2438 DB((dbg, LEVEL_4, "normalized projection %s\n", get_relation_string(norm_proj)));
2439 /* Executed at most once (stay in counting loop if a Eq b) */
2440 if (norm_proj == ir_relation_equal)
2441 /* TODO Might be worth a warning. */
2444 /* calculates next values and increases count_tar according to it */
2445 success = simulate_next(&count_tar, stepped, step_tar, end_tar, norm_proj);
2449 /* We run loop once more, if we compare to the
2450 * not yet in-/decreased iv. */
2451 if (is_latest_val == 0) {
2452 DB((dbg, LEVEL_4, "condition uses not latest iv value\n"));
2453 count_tar = tarval_add(count_tar, get_tarval_one(mode));
2456 DB((dbg, LEVEL_4, "loop taken %ld times\n", get_tarval_long(count_tar)));
2458 /* Assure the loop is taken at least 1 time. */
2459 if (tarval_is_null(count_tar)) {
2460 /* TODO Might be worth a warning. */
2464 loop_info.count_tar = count_tar;
2465 return get_preferred_factor_constant(count_tar);
2471 static void unroll_loop(void)
2474 if (! (loop_info.nodes > 0))
2477 if (loop_info.nodes > opt_params.max_unrolled_loop_size) {
2478 DB((dbg, LEVEL_2, "Nodes %d > allowed nodes %d\n",
2479 loop_info.nodes, opt_params.max_unrolled_loop_size));
2484 if (loop_info.calls > 0) {
2485 DB((dbg, LEVEL_2, "Calls %d > allowed calls 0\n",
2487 ++stats.calls_limit;
2493 /* get_unroll_decision_constant and invariant are completely
2494 * independent for flexibility.
2495 * Some checks may be performed twice. */
2497 /* constant case? */
2498 if (opt_params.allow_const_unrolling)
2499 unroll_nr = get_unroll_decision_constant();
2500 if (unroll_nr > 1) {
2501 loop_info.unroll_kind = constant;
2504 /* invariant case? */
2505 if (opt_params.allow_invar_unrolling)
2506 unroll_nr = get_unroll_decision_invariant();
2508 loop_info.unroll_kind = invariant;
2511 DB((dbg, LEVEL_2, " *** Unrolling %d times ***\n", unroll_nr));
2513 if (unroll_nr > 1) {
2514 loop_entries = NEW_ARR_F(entry_edge, 0);
2517 irg_walk_graph(current_ir_graph, get_loop_entries, NULL, NULL);
2519 if (loop_info.unroll_kind == constant) {
2520 if ((int)get_tarval_long(loop_info.count_tar) == unroll_nr)
2521 loop_info.needs_backedge = 0;
2523 loop_info.needs_backedge = 1;
2525 loop_info.needs_backedge = 1;
2528 /* Use phase to keep copy of nodes from the condition chain. */
2529 ir_nodemap_init(&map, current_ir_graph);
2530 obstack_init(&obst);
2532 /* Copies the loop */
2533 copy_loop(loop_entries, unroll_nr - 1);
2535 /* Line up the floating copies. */
2536 place_copies(unroll_nr - 1);
2538 /* Remove phis with 1 in
2539 * If there were no nested phis, this would not be necessary.
2540 * Avoiding the creation in the first place
2541 * leads to complex special cases. */
2542 irg_walk_graph(current_ir_graph, correct_phis, NULL, NULL);
2544 if (loop_info.unroll_kind == constant)
2545 ++stats.constant_unroll;
2547 ++stats.invariant_unroll;
2549 clear_irg_properties(current_ir_graph, IR_GRAPH_PROPERTY_CONSISTENT_DOMINANCE);
2551 DEL_ARR_F(loop_entries);
2552 obstack_free(&obst, NULL);
2553 ir_nodemap_destroy(&map);
2558 /* Analyzes the loop, and checks if size is within allowed range.
2559 * Decides if loop will be processed. */
2560 static void init_analyze(ir_graph *irg, ir_loop *loop)
2565 loop_head_valid = true;
2567 /* Reset loop info */
2568 memset(&loop_info, 0, sizeof(loop_info_t));
2570 DB((dbg, LEVEL_1, " >>>> current loop %ld <<<\n",
2571 get_loop_loop_nr(loop)));
2573 /* Collect loop informations: head, node counts. */
2574 irg_walk_graph(irg, get_loop_info, NULL, NULL);
2576 /* RETURN if there is no valid head */
2577 if (!loop_head || !loop_head_valid) {
2578 DB((dbg, LEVEL_1, "No valid loop head. Nothing done.\n"));
2581 DB((dbg, LEVEL_1, "Loophead: %N\n", loop_head));
2584 if (loop_info.branches > opt_params.max_branches) {
2585 DB((dbg, LEVEL_1, "Branches %d > allowed branches %d\n",
2586 loop_info.branches, opt_params.max_branches));
2587 ++stats.calls_limit;
2592 case loop_op_inversion:
2593 loop_inversion(irg);
2596 case loop_op_unrolling:
2601 panic("Loop optimization not implemented.");
2603 DB((dbg, LEVEL_1, " <<<< end of loop with node %ld >>>>\n",
2604 get_loop_loop_nr(loop)));
2607 /* Find innermost loops and add them to loops. */
2608 static void find_innermost_loop(ir_loop *loop)
2610 bool had_sons = false;
2611 size_t n_elements = get_loop_n_elements(loop);
2614 for (e = 0; e < n_elements; ++e) {
2615 loop_element element = get_loop_element(loop, e);
2616 if (*element.kind == k_ir_loop) {
2617 find_innermost_loop(element.son);
2623 ARR_APP1(ir_loop*, loops, loop);
2627 static void set_loop_params(void)
2629 opt_params.max_loop_size = 100;
2630 opt_params.depth_adaption = -50;
2631 opt_params.count_phi = true;
2632 opt_params.count_proj = false;
2633 opt_params.allowed_calls = 0;
2635 opt_params.max_cc_size = 5;
2638 opt_params.allow_const_unrolling = true;
2639 opt_params.allow_invar_unrolling = false;
2641 opt_params.invar_unrolling_min_size = 20;
2642 opt_params.max_unrolled_loop_size = 400;
2643 opt_params.max_branches = 9999;
2646 /* Assure preconditions are met and go through all loops. */
2647 void loop_optimization(ir_graph *irg)
2653 assure_irg_properties(irg,
2654 IR_GRAPH_PROPERTY_CONSISTENT_OUT_EDGES
2655 | IR_GRAPH_PROPERTY_CONSISTENT_OUTS
2656 | IR_GRAPH_PROPERTY_CONSISTENT_LOOPINFO);
2660 /* Reset stats for this procedure */
2664 set_current_ir_graph(irg);
2666 ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK | IR_RESOURCE_PHI_LIST);
2667 collect_phiprojs(irg);
2669 loop = get_irg_loop(irg);
2671 loops = NEW_ARR_F(ir_loop *, 0);
2672 /* List all inner loops */
2673 n_elements = get_loop_n_elements(loop);
2674 for (i = 0; i < n_elements; ++i) {
2675 loop_element element = get_loop_element(loop, i);
2676 if (*element.kind != k_ir_loop)
2678 find_innermost_loop(element.son);
2681 /* Set all links to NULL */
2682 irg_walk_graph(irg, reset_link, NULL, NULL);
2684 for (i = 0; i < ARR_LEN(loops); ++i) {
2685 ir_loop *loop = loops[i];
2689 /* Analyze and handle loop */
2690 init_analyze(irg, loop);
2692 /* Copied blocks do not have their phi list yet */
2693 collect_phiprojs(irg);
2695 /* Set links to NULL
2696 * TODO Still necessary? */
2697 irg_walk_graph(irg, reset_link, NULL, NULL);
2703 ir_free_resources(irg, IR_RESOURCE_IRN_LINK | IR_RESOURCE_PHI_LIST);
2705 confirm_irg_properties(irg, IR_GRAPH_PROPERTIES_NONE);
2708 void do_loop_unrolling(ir_graph *irg)
2710 loop_op = loop_op_unrolling;
2711 loop_optimization(irg);
2714 void do_loop_inversion(ir_graph *irg)
2716 loop_op = loop_op_inversion;
2717 loop_optimization(irg);
2720 void do_loop_peeling(ir_graph *irg)
2722 loop_op = loop_op_peeling;
2723 loop_optimization(irg);
2726 ir_graph_pass_t *loop_inversion_pass(const char *name)
2728 return def_graph_pass(name ? name : "loop_inversion", do_loop_inversion);
2731 ir_graph_pass_t *loop_unroll_pass(const char *name)
2733 return def_graph_pass(name ? name : "loop_unroll", do_loop_unrolling);
2736 ir_graph_pass_t *loop_peeling_pass(const char *name)
2738 return def_graph_pass(name ? name : "loop_peeling", do_loop_peeling);
2741 void firm_init_loop_opt(void)
2743 FIRM_DBG_REGISTER(dbg, "firm.opt.loop");