fix a bunch of warnings reported by clang analyzer
[libfirm] / ir / opt / loop.c
1 /*
2  * Copyright (C) 1995-2011 University of Karlsruhe.  All right reserved.
3  *
4  * This file is part of libFirm.
5  *
6  * This file may be distributed and/or modified under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation and appearing in the file LICENSE.GPL included in the
9  * packaging of this file.
10  *
11  * Licensees holding valid libFirm Professional Edition licenses may use
12  * this file in accordance with the libFirm Commercial License.
13  * Agreement provided with the Software.
14  *
15  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17  * PURPOSE.
18  */
19
20 /**
21  * @file
22  * @author   Christian Helmer
23  * @brief    loop inversion and loop unrolling
24  *
25  */
26 #include "config.h"
27
28 #include <stdbool.h>
29
30 #include "iroptimize.h"
31 #include "opt_init.h"
32 #include "irnode.h"
33 #include "debug.h"
34 #include "error.h"
35
36 #include "ircons.h"
37 #include "irgopt.h"
38 #include "irgmod.h"
39 #include "irgwalk.h"
40 #include "irouts.h"
41 #include "iredges.h"
42 #include "irtools.h"
43 #include "array_t.h"
44 #include "beutil.h"
45 #include "irpass.h"
46 #include "irdom.h"
47
48 #include <math.h>
49 #include "irbackedge_t.h"
50 #include "irnodemap.h"
51 #include "irloop_t.h"
52
53 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
54
55 /**
56  * Convenience macro for iterating over every phi node of the given block.
57  * Requires phi list per block.
58  */
59 #define for_each_phi(block, phi) \
60         for ((phi) = get_Block_phis( (block) ); (phi) ; (phi) = get_Phi_next((phi)))
61
62 #define for_each_phi_safe(head, phi, next) \
63         for ((phi) = (head), (next) = (head) ? get_Phi_next((head)) : NULL; \
64                         (phi) ; (phi) = (next), (next) = (next) ? get_Phi_next((next)) : NULL)
65
66 /* Currently processed loop. */
67 static ir_loop *cur_loop;
68
69 /* Flag for kind of unrolling. */
70 typedef enum {
71         constant,
72         invariant
73 } unrolling_kind_flag;
74
75 /* Condition for performing visiting a node during copy_walk. */
76 typedef bool walker_condition(const ir_node *);
77
78 /* Node and position of a predecessor. */
79 typedef struct entry_edge {
80         ir_node *node;
81         int pos;
82         ir_node *pred;
83 } entry_edge;
84
85 /* Node info for unrolling. */
86 typedef struct unrolling_node_info {
87         ir_node **copies;
88 } unrolling_node_info;
89
90 /* Outs of the nodes head. */
91 static entry_edge *cur_head_outs;
92
93 /* Information about the loop head */
94 static ir_node *loop_head       = NULL;
95 static bool     loop_head_valid = true;
96
97 /* List of all inner loops, that are processed. */
98 static ir_loop **loops;
99
100 /* Stats */
101 typedef struct loop_stats_t {
102         unsigned loops;
103         unsigned inverted;
104         unsigned too_large;
105         unsigned too_large_adapted;
106         unsigned cc_limit_reached;
107         unsigned calls_limit;
108
109         unsigned u_simple_counting_loop;
110         unsigned constant_unroll;
111         unsigned invariant_unroll;
112
113         unsigned unhandled;
114 } loop_stats_t;
115
116 static loop_stats_t stats;
117
118 /* Set stats to sero */
119 static void reset_stats(void)
120 {
121         memset(&stats, 0, sizeof(loop_stats_t));
122 }
123
124 /* Print stats */
125 static void print_stats(void)
126 {
127         DB((dbg, LEVEL_2, "---------------------------------------\n"));
128         DB((dbg, LEVEL_2, "loops             :   %d\n",stats.loops));
129         DB((dbg, LEVEL_2, "inverted          :   %d\n",stats.inverted));
130         DB((dbg, LEVEL_2, "too_large         :   %d\n",stats.too_large));
131         DB((dbg, LEVEL_2, "too_large_adapted :   %d\n",stats.too_large_adapted));
132         DB((dbg, LEVEL_2, "cc_limit_reached  :   %d\n",stats.cc_limit_reached));
133         DB((dbg, LEVEL_2, "calls_limit       :   %d\n",stats.calls_limit));
134         DB((dbg, LEVEL_2, "u_simple_counting :   %d\n",stats.u_simple_counting_loop));
135         DB((dbg, LEVEL_2, "constant_unroll   :   %d\n",stats.constant_unroll));
136         DB((dbg, LEVEL_2, "invariant_unroll  :   %d\n",stats.invariant_unroll));
137         DB((dbg, LEVEL_2, "=======================================\n"));
138 }
139
140 /* Commandline parameters */
141 typedef struct loop_opt_params_t {
142 unsigned max_loop_size;     /* Maximum number of nodes  [nodes]*/
143 int      depth_adaption;    /* Loop nest depth adaption [percent] */
144 unsigned allowed_calls;     /* Number of calls allowed [number] */
145 bool     count_phi;         /* Count phi nodes */
146 bool     count_proj;        /* Count projections */
147
148 unsigned max_cc_size;       /* Maximum condition chain size [nodes] */
149 unsigned max_branches;
150
151 unsigned max_unrolled_loop_size;    /* [nodes] */
152 bool     allow_const_unrolling;
153 bool     allow_invar_unrolling;
154 unsigned invar_unrolling_min_size;  /* [nodes] */
155
156 } loop_opt_params_t;
157
158 static loop_opt_params_t opt_params;
159
160 /* Loop analysis informations */
161 typedef struct loop_info_t {
162         unsigned nodes;        /* node count */
163         unsigned ld_st;        /* load and store nodes */
164         unsigned branches;     /* number of conditions */
165         unsigned calls;        /* number of calls */
166         unsigned cf_outs;      /* number of cf edges which leave the loop */
167         entry_edge cf_out;     /* single loop leaving cf edge */
168         int be_src_pos;        /* position of the single own backedge in the head */
169
170         /* for inversion */
171         unsigned cc_size;      /* nodes in the condition chain */
172
173         /* for unrolling */
174         unsigned max_unroll;   /* Number of unrolls satisfying max_loop_size */
175         unsigned exit_cond;    /* 1 if condition==true exits the loop.  */
176         unsigned latest_value:1;    /* 1 if condition is checked against latest counter value */
177         unsigned needs_backedge:1;  /* 0 if loop is completely unrolled */
178         unsigned decreasing:1;      /* Step operation is_Sub, or step is<0 */
179
180         /* IV informations of a simple loop */
181         ir_node *start_val;
182         ir_node *step;
183         ir_node *end_val;
184         ir_node *iteration_phi;
185         ir_node *add;
186
187         ir_tarval *count_tar;               /* Number of loop iterations */
188
189         ir_node *duff_cond;                 /* Duff mod */
190         unrolling_kind_flag unroll_kind;    /* constant or invariant unrolling */
191 } loop_info_t;
192
193 /* Information about the current loop */
194 static loop_info_t loop_info;
195
196 /* Outs of the condition chain (loop inversion). */
197 static ir_node **cc_blocks;
198 /* df/cf edges with def in the condition chain */
199 static entry_edge *cond_chain_entries;
200 /* Array of df loops found in the condition chain. */
201 static entry_edge *head_df_loop;
202 /* Number of blocks in cc */
203 static unsigned inversion_blocks_in_cc;
204
205
206 /* Cf/df edges leaving the loop.
207  * Called entries here, as they are used to enter the loop with walkers. */
208 static entry_edge *loop_entries;
209 /* Number of unrolls to perform */
210 static int unroll_nr;
211 /* Phase is used to keep copies of nodes. */
212 static ir_nodemap     map;
213 static struct obstack obst;
214
215 /* Loop operations.  */
216 typedef enum loop_op_t {
217         loop_op_inversion,
218         loop_op_unrolling,
219         loop_op_peeling
220 } loop_op_t;
221
222 /* Saves which loop operation to do until after basic tests. */
223 static loop_op_t loop_op;
224
225 /* Returns the maximum nodes for the given nest depth */
226 static unsigned get_max_nodes_adapted(unsigned depth)
227 {
228         double perc = 100.0 + (double)opt_params.depth_adaption;
229         double factor = pow(perc / 100.0, depth);
230
231         return (int)((double)opt_params.max_loop_size * factor);
232 }
233
234 /* Reset nodes link. For use with a walker. */
235 static void reset_link(ir_node *node, void *env)
236 {
237         (void)env;
238         set_irn_link(node, NULL);
239 }
240
241 /* Returns 0 if the node or block is not in cur_loop. */
242 static bool is_in_loop(const ir_node *node)
243 {
244         return get_irn_loop(get_block_const(node)) == cur_loop;
245 }
246
247 /* Returns 0 if the given edge is not a backedge
248  * with its pred in the cur_loop. */
249 static bool is_own_backedge(const ir_node *n, int pos)
250 {
251         return is_backedge(n, pos) && is_in_loop(get_irn_n(n, pos));
252 }
253
254 /* Finds loop head and some loop_info as calls or else if necessary. */
255 static void get_loop_info(ir_node *node, void *env)
256 {
257         bool node_in_loop = is_in_loop(node);
258         int i, arity;
259         (void)env;
260
261         /* collect some loop information */
262         if (node_in_loop) {
263                 if (is_Phi(node) && opt_params.count_phi)
264                         ++loop_info.nodes;
265                 else if (is_Proj(node) && opt_params.count_proj)
266                         ++loop_info.nodes;
267                 else if (!is_Confirm(node) && !is_Const(node) && !is_SymConst(node))
268                         ++loop_info.nodes;
269
270                 if (is_Load(node) || is_Store(node))
271                         ++loop_info.ld_st;
272
273                 if (is_Call(node))
274                         ++loop_info.calls;
275         }
276
277         arity = get_irn_arity(node);
278         for (i = 0; i < arity; i++) {
279                 ir_node *pred         = get_irn_n(node, i);
280                 bool     pred_in_loop = is_in_loop(pred);
281
282                 if (is_Block(node) && !node_in_loop && pred_in_loop) {
283                         entry_edge entry;
284                         entry.node = node;
285                         entry.pos = i;
286                         entry.pred = pred;
287                         /* Count cf outs */
288                         ++loop_info.cf_outs;
289                         loop_info.cf_out = entry;
290                 }
291
292                 /* Find the loops head/the blocks with cfpred outside of the loop */
293                 if (is_Block(node)) {
294                         unsigned outs_n = 0;
295
296                         /* Count innerloop branches */
297                         foreach_out_edge_kind(node, edge, EDGE_KIND_BLOCK) {
298                                 ir_node *succ = get_edge_src_irn(edge);
299                                 if (is_Block(succ) && is_in_loop(succ))
300                                         ++outs_n;
301                         }
302                         if (outs_n > 1)
303                                 ++loop_info.branches;
304
305                         if (node_in_loop && !pred_in_loop && loop_head_valid) {
306                                 ir_node *cfgpred = get_Block_cfgpred(node, i);
307
308                                 if (!is_in_loop(cfgpred)) {
309                                         DB((dbg, LEVEL_5, "potential head %+F because inloop and pred %+F not inloop\n",
310                                                                 node, pred));
311                                         /* another head? We do not touch this. */
312                                         if (loop_head && loop_head != node) {
313                                                 loop_head_valid = false;
314                                         } else {
315                                                 loop_head = node;
316                                         }
317                                 }
318                         }
319                 }
320         }
321 }
322
323 /* Finds all edges with users outside of the loop
324  * and definition inside the loop. */
325 static void get_loop_entries(ir_node *node, void *env)
326 {
327         unsigned node_in_loop, pred_in_loop;
328         int i, arity;
329         (void) env;
330
331         arity = get_irn_arity(node);
332         for (i = 0; i < arity; ++i) {
333                 ir_node *pred = get_irn_n(node, i);
334
335                 pred_in_loop = is_in_loop(pred);
336                 node_in_loop = is_in_loop(node);
337
338                 if (pred_in_loop && !node_in_loop) {
339                         entry_edge entry;
340                         entry.node = node;
341                         entry.pos = i;
342                         entry.pred = pred;
343                         ARR_APP1(entry_edge, loop_entries, entry);
344                 }
345         }
346 }
347
348 /* ssa */
349 static ir_node *ssa_second_def;
350 static ir_node *ssa_second_def_block;
351
352 /**
353  * Walks the graph bottom up, searching for definitions and creates phis.
354  */
355 static ir_node *search_def_and_create_phis(ir_node *block, ir_mode *mode, int first)
356 {
357         int i;
358         int n_cfgpreds;
359         ir_graph *irg = get_irn_irg(block);
360         ir_node *phi;
361         ir_node **in;
362
363         DB((dbg, LEVEL_5, "ssa search_def_and_create_phis: block %N\n", block));
364
365         /* Prevents creation of phi that would be bad anyway.
366          * Dead and bad blocks. */
367         if (get_irn_arity(block) < 1 || is_Bad(block)) {
368                 DB((dbg, LEVEL_5, "ssa bad %N\n", block));
369                 return new_r_Bad(irg, mode);
370         }
371
372         if (block == ssa_second_def_block && !first) {
373                 DB((dbg, LEVEL_5, "ssa found second definition: use second def %N\n", ssa_second_def));
374                 return ssa_second_def;
375         }
376
377         /* already processed this block? */
378         if (irn_visited(block)) {
379                 ir_node *value = (ir_node *) get_irn_link(block);
380                 DB((dbg, LEVEL_5, "ssa already visited: use linked %N\n", value));
381                 return value;
382         }
383
384         assert(block != get_irg_start_block(irg));
385
386         /* a Block with only 1 predecessor needs no Phi */
387         n_cfgpreds = get_Block_n_cfgpreds(block);
388         if (n_cfgpreds == 1) {
389                 ir_node *pred_block = get_Block_cfgpred_block(block, 0);
390                 ir_node *value;
391
392                 DB((dbg, LEVEL_5, "ssa 1 pred: walk pred %N\n", pred_block));
393
394                 value = search_def_and_create_phis(pred_block, mode, 0);
395                 set_irn_link(block, value);
396                 mark_irn_visited(block);
397
398                 return value;
399         }
400
401         /* create a new Phi */
402         NEW_ARR_A(ir_node*, in, n_cfgpreds);
403         for (i = 0; i < n_cfgpreds; ++i)
404                 in[i] = new_r_Dummy(irg, mode);
405
406         phi = new_r_Phi(block, n_cfgpreds, in, mode);
407         /* Important: always keep block phi list up to date. */
408         add_Block_phi(block, phi);
409         DB((dbg, LEVEL_5, "ssa phi creation: link new phi %N to block %N\n", phi, block));
410         set_irn_link(block, phi);
411         mark_irn_visited(block);
412
413         /* set Phi predecessors */
414         for (i = 0; i < n_cfgpreds; ++i) {
415                 ir_node *pred_val;
416                 ir_node *pred_block = get_Block_cfgpred_block(block, i);
417                 assert(pred_block != NULL);
418                 pred_val = search_def_and_create_phis(pred_block, mode, 0);
419
420                 assert(pred_val != NULL);
421
422                 DB((dbg, LEVEL_5, "ssa phi pred:phi %N, pred %N\n", phi, pred_val));
423                 set_irn_n(phi, i, pred_val);
424         }
425
426         return phi;
427 }
428
429
430 /**
431  * Given a set of values this function constructs SSA-form for the users of the
432  * first value (the users are determined through the out-edges of the value).
433  * Works without using the dominance tree.
434  */
435 static void construct_ssa(ir_node *orig_block, ir_node *orig_val,
436                 ir_node *second_block, ir_node *second_val)
437 {
438         ir_graph *irg;
439         ir_mode *mode;
440
441         assert(orig_block && orig_val && second_block && second_val &&
442                         "no parameter of construct_ssa may be NULL");
443
444         if (orig_val == second_val)
445                 return;
446
447         irg = get_irn_irg(orig_val);
448
449         ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED);
450         inc_irg_visited(irg);
451
452         mode = get_irn_mode(orig_val);
453         set_irn_link(orig_block, orig_val);
454         mark_irn_visited(orig_block);
455
456         ssa_second_def_block = second_block;
457         ssa_second_def       = second_val;
458
459         /* Only fix the users of the first, i.e. the original node */
460         foreach_out_edge_safe(orig_val, edge) {
461                 ir_node *user = get_edge_src_irn(edge);
462                 int j = get_edge_src_pos(edge);
463                 ir_node *user_block = get_nodes_block(user);
464                 ir_node *newval;
465
466                 /* ignore keeps */
467                 if (is_End(user))
468                         continue;
469
470                 DB((dbg, LEVEL_5, "original user %N\n", user));
471
472                 if (is_Phi(user)) {
473                         ir_node *pred_block = get_Block_cfgpred_block(user_block, j);
474                         newval = search_def_and_create_phis(pred_block, mode, 1);
475                 } else {
476                         newval = search_def_and_create_phis(user_block, mode, 1);
477                 }
478                 if (newval != user && !is_Bad(newval))
479                         set_irn_n(user, j, newval);
480         }
481
482         ir_free_resources(irg, IR_RESOURCE_IRN_VISITED);
483 }
484
485
486 /***** Unrolling Helper Functions *****/
487
488 /* Assign the copy with index nr to node n */
489 static void set_unroll_copy(ir_node *n, int nr, ir_node *cp)
490 {
491         unrolling_node_info *info;
492         assert(nr != 0 && "0 reserved");
493
494         info = ir_nodemap_get(unrolling_node_info, &map, n);
495         if (! info) {
496                 ir_node **arr = NEW_ARR_D(ir_node*, &obst, unroll_nr);
497                 memset(arr, 0, unroll_nr * sizeof(ir_node*));
498
499                 info = OALLOCZ(&obst, unrolling_node_info);
500                 info->copies = arr;
501                 ir_nodemap_insert(&map, n, info);
502         }
503         /* Original node */
504         info->copies[0] = n;
505
506         info->copies[nr] = cp;
507 }
508
509 /* Returns a nodes copy if it exists, else NULL. */
510 static ir_node *get_unroll_copy(ir_node *n, int nr)
511 {
512         ir_node             *cp;
513         unrolling_node_info *info = ir_nodemap_get(unrolling_node_info, &map, n);
514         if (! info)
515                 return NULL;
516
517         cp = info->copies[nr];
518         return cp;
519 }
520
521
522 /***** Inversion Helper Functions *****/
523
524 /* Sets copy cp of node n. */
525 static void set_inversion_copy(ir_node *n, ir_node *cp)
526 {
527         ir_nodemap_insert(&map, n, cp);
528 }
529
530 /* Getter of copy of n for inversion */
531 static ir_node *get_inversion_copy(ir_node *n)
532 {
533         ir_node *cp = ir_nodemap_get(ir_node, &map, n);
534         return cp;
535 }
536
537 /* Resets block mark for given node. For use with walker */
538 static void reset_block_mark(ir_node *node, void * env)
539 {
540         (void) env;
541
542         if (is_Block(node))
543                 set_Block_mark(node, 0);
544 }
545
546 /* Returns mark of node, or its block if node is not a block.
547  * Used in this context to determine if node is in the condition chain. */
548 static bool is_nodes_block_marked(const ir_node* node)
549 {
550         return get_Block_mark(get_block_const(node));
551 }
552
553 /* Extends a nodes ins by node new.
554  * NOTE: This is slow if a node n needs to be extended more than once. */
555 static void extend_irn(ir_node *n, ir_node *newnode, bool new_is_backedge)
556 {
557         int i;
558         int arity = get_irn_arity(n);
559         int new_arity = arity + 1;
560         ir_node **ins = XMALLOCN(ir_node*, new_arity);
561         bool     *bes = XMALLOCN(bool, new_arity);
562
563         /* save bes */
564         /* Bes are important!
565          * Another way would be recreating the looptree,
566          * but after that we cannot distinguish already processed loops
567          * from not yet processed ones. */
568         if (is_Block(n)) {
569                 for(i = 0; i < arity; ++i) {
570                         bes[i] = is_backedge(n, i);
571                 }
572                 bes[i] = new_is_backedge;
573         }
574
575         for(i = 0; i < arity; ++i) {
576                 ins[i] = get_irn_n(n, i);
577         }
578         ins[i] = newnode;
579
580         set_irn_in(n, new_arity, ins);
581
582         /* restore bes  */
583         if (is_Block(n)) {
584                 for(i = 0; i < new_arity; ++i) {
585                         if (bes[i])
586                                 set_backedge(n, i);
587                 }
588         }
589         free(ins);
590         free(bes);
591 }
592
593 /* Extends a block by a copy of its pred at pos,
594  * fixing also the phis in the same way. */
595 static void extend_ins_by_copy(ir_node *block, int pos)
596 {
597         ir_node *new_in;
598         ir_node *phi;
599         ir_node *pred;
600         assert(is_Block(block));
601
602         /* Extend block by copy of definition at pos */
603         pred = get_irn_n(block, pos);
604         new_in = get_inversion_copy(pred);
605         DB((dbg, LEVEL_5, "Extend block %N by %N cp of %N\n", block, new_in, pred));
606         extend_irn(block, new_in, false);
607
608         /* Extend block phis by copy of definition at pos */
609         for_each_phi(block, phi) {
610                 ir_node *pred, *cp;
611
612                 pred = get_irn_n(phi, pos);
613                 cp = get_inversion_copy(pred);
614                 /* If the phis in is not in the condition chain (eg. a constant),
615                  * there is no copy. */
616                 if (cp == NULL)
617                         new_in = pred;
618                 else
619                         new_in = cp;
620
621                 DB((dbg, LEVEL_5, "Extend phi %N by %N cp of %N\n", phi, new_in, pred));
622                 extend_irn(phi, new_in, false);
623         }
624 }
625
626 /* Returns the number of blocks backedges. With or without alien bes. */
627 static int get_backedge_n(ir_node *block, bool with_alien)
628 {
629         int i;
630         int be_n = 0;
631         int arity = get_irn_arity(block);
632
633         assert(is_Block(block));
634
635         for (i = 0; i < arity; ++i) {
636                 ir_node *pred = get_irn_n(block, i);
637                 if (is_backedge(block, i) && (with_alien || is_in_loop(pred)))
638                         ++be_n;
639         }
640         return be_n;
641 }
642
643 /* Returns a raw copy of the given node.
644  * Attributes are kept/set according to the needs of loop inversion. */
645 static ir_node *copy_node(ir_node *node)
646 {
647         int i, arity;
648         ir_node *cp;
649
650         cp = exact_copy(node);
651         arity = get_irn_arity(node);
652
653         /* Keep backedge info */
654         for (i = 0; i < arity; ++i) {
655                 if (is_backedge(node, i))
656                         set_backedge(cp, i);
657         }
658
659         if (is_Block(cp)) {
660                 set_Block_mark(cp, 0);
661         }
662
663         return cp;
664 }
665
666
667 /**
668  * This walker copies all walked nodes.
669  * If the walk_condition is true for a node, it is copied.
670  * All nodes node_info->copy have to be NULL prior to every walk.
671  * Order of ins is important for later usage.
672  */
673 static void copy_walk(ir_node *node, walker_condition *walk_condition,
674                       ir_loop *set_loop)
675 {
676         int i;
677         int arity;
678         ir_node *cp;
679         ir_node **cpin;
680         ir_graph *irg = current_ir_graph;
681
682         /**
683          * break condition and cycle resolver, creating temporary node copies
684          */
685         if (get_irn_visited(node) >= get_irg_visited(irg)) {
686                 /* Here we rely on nodestate's copy being initialized with NULL */
687                 DB((dbg, LEVEL_5, "copy_walk: We have already visited %N\n", node));
688                 if (get_inversion_copy(node) == NULL) {
689                         cp = copy_node(node);
690                         set_inversion_copy(node, cp);
691
692                         DB((dbg, LEVEL_5, "The TEMP copy of %N is created %N\n", node, cp));
693                 }
694                 return;
695         }
696
697         /* Walk */
698         mark_irn_visited(node);
699
700         if (!is_Block(node)) {
701                 ir_node *pred = get_nodes_block(node);
702                 if (walk_condition(pred))
703                         DB((dbg, LEVEL_5, "walk block %N\n", pred));
704                 copy_walk(pred, walk_condition, set_loop);
705         }
706
707         arity = get_irn_arity(node);
708
709         NEW_ARR_A(ir_node *, cpin, arity);
710
711         for (i = 0; i < arity; ++i) {
712                 ir_node *pred = get_irn_n(node, i);
713
714                 if (walk_condition(pred)) {
715                         DB((dbg, LEVEL_5, "walk node %N\n", pred));
716                         copy_walk(pred, walk_condition, set_loop);
717                         cpin[i] = get_inversion_copy(pred);
718                         DB((dbg, LEVEL_5, "copy of %N gets new in %N which is copy of %N\n",
719                                                 node, get_inversion_copy(pred), pred));
720                 } else {
721                         cpin[i] = pred;
722                 }
723         }
724
725         /* copy node / finalize temp node */
726         if (get_inversion_copy(node) == NULL) {
727                 /* No temporary copy existent */
728                 cp = copy_node(node);
729                 set_inversion_copy(node, cp);
730                 DB((dbg, LEVEL_5, "The FINAL copy of %N is CREATED %N\n", node, cp));
731         } else {
732                 /* temporary copy is existent but without correct ins */
733                 cp = get_inversion_copy(node);
734                 DB((dbg, LEVEL_5, "The FINAL copy of %N is EXISTENT %N\n", node, cp));
735         }
736
737         if (!is_Block(node)) {
738                 ir_node *cpblock = get_inversion_copy(get_nodes_block(node));
739
740                 set_nodes_block(cp, cpblock );
741                 if (is_Phi(cp))
742                         add_Block_phi(cpblock, cp);
743         }
744
745         /* Keeps phi list of temporary node. */
746         set_irn_in(cp, ARR_LEN(cpin), cpin);
747 }
748
749 /**
750  * This walker copies all walked nodes.
751  * If the walk_condition is true for a node, it is copied.
752  * All nodes node_info->copy have to be NULL prior to every walk.
753  * Order of ins is important for later usage.
754  * Takes copy_index, to phase-link copy at specific index.
755  */
756 static void copy_walk_n(ir_node *node, walker_condition *walk_condition,
757                         int copy_index)
758 {
759         int i;
760         int arity;
761         ir_node *cp;
762         ir_node **cpin;
763
764         /**
765          * break condition and cycle resolver, creating temporary node copies
766          */
767         if (irn_visited(node)) {
768                 /* Here we rely on nodestate's copy being initialized with NULL */
769                 DB((dbg, LEVEL_5, "copy_walk: We have already visited %N\n", node));
770                 if (get_unroll_copy(node, copy_index) == NULL) {
771                         ir_node *u;
772                         u = copy_node(node);
773                         set_unroll_copy(node, copy_index, u);
774                         DB((dbg, LEVEL_5, "The TEMP unknown of %N is created %N\n", node, u));
775                 }
776                 return;
777         }
778
779         /* Walk */
780         mark_irn_visited(node);
781
782         if (!is_Block(node)) {
783                 ir_node *block = get_nodes_block(node);
784                 if (walk_condition(block))
785                         DB((dbg, LEVEL_5, "walk block %N\n", block));
786                 copy_walk_n(block, walk_condition, copy_index);
787         }
788
789         arity = get_irn_arity(node);
790         NEW_ARR_A(ir_node *, cpin, arity);
791
792         for (i = 0; i < arity; ++i) {
793                 ir_node *pred = get_irn_n(node, i);
794
795                 if (walk_condition(pred)) {
796                         DB((dbg, LEVEL_5, "walk node %N\n", pred));
797                         copy_walk_n(pred, walk_condition, copy_index);
798                         cpin[i] = get_unroll_copy(pred, copy_index);
799                 } else {
800                         cpin[i] = pred;
801                 }
802         }
803
804         /* copy node / finalize temp node */
805         cp = get_unroll_copy(node, copy_index);
806         if (cp == NULL || is_Unknown(cp)) {
807                 cp = copy_node(node);
808                 set_unroll_copy(node, copy_index, cp);
809                 DB((dbg, LEVEL_5, "The FINAL copy of %N is CREATED %N\n", node, cp));
810         } else {
811                 /* temporary copy is existent but without correct ins */
812                 cp = get_unroll_copy(node, copy_index);
813                 DB((dbg, LEVEL_5, "The FINAL copy of %N is EXISTENT %N\n", node, cp));
814         }
815
816         if (!is_Block(node)) {
817                 ir_node *cpblock = get_unroll_copy(get_nodes_block(node), copy_index);
818
819                 set_nodes_block(cp, cpblock );
820                 if (is_Phi(cp))
821                         add_Block_phi(cpblock, cp);
822         }
823
824         /* Keeps phi list of temporary node. */
825         set_irn_in(cp, ARR_LEN(cpin), cpin);
826 }
827
828 /* Removes alle Blocks with non marked predecessors from the condition chain. */
829 static void unmark_not_allowed_cc_blocks(void)
830 {
831         size_t blocks = ARR_LEN(cc_blocks);
832         size_t i;
833
834         for(i = 0; i < blocks; ++i) {
835                 ir_node *block = cc_blocks[i];
836                 int a;
837                 int arity = get_irn_arity(block);
838
839                 /* Head is an exception. */
840                 if (block == loop_head)
841                         continue;
842
843                 for(a = 0; a < arity; ++a) {
844                         if (! is_nodes_block_marked(get_irn_n(block, a))) {
845                                 set_Block_mark(block, 0);
846                                 --inversion_blocks_in_cc;
847                                 DB((dbg, LEVEL_5, "Removed %N from cc (blocks in cc %d)\n",
848                                                 block, inversion_blocks_in_cc));
849
850                                 break;
851                         }
852                 }
853         }
854 }
855
856 /* Unmarks all cc blocks using cc_blocks except head.
857  * TODO: invert head for unrolling? */
858 static void unmark_cc_blocks(void)
859 {
860         size_t blocks = ARR_LEN(cc_blocks);
861         size_t i;
862
863         for(i = 0; i < blocks; ++i) {
864                 ir_node *block = cc_blocks[i];
865
866                 /* TODO Head is an exception. */
867                 /*if (block != loop_head)*/
868                 set_Block_mark(block, 0);
869         }
870         /*inversion_blocks_in_cc = 1;*/
871         inversion_blocks_in_cc = 0;
872
873         /* invalidate */
874         loop_info.cc_size = 0;
875 }
876
877 /**
878  * Populates head_entries with (node, pred_pos) tuple
879  * whereas the node's pred at pred_pos is in the cc but not the node itself.
880  * Also finds df loops inside the cc.
881  * Head and condition chain blocks have been marked previously.
882  */
883 static void get_head_outs(ir_node *node, void *env)
884 {
885         int i;
886         int arity = get_irn_arity(node);
887         (void) env;
888
889         for (i = 0; i < arity; ++i) {
890                 if (!is_nodes_block_marked(node) && is_nodes_block_marked(get_irn_n(node, i))) {
891                         entry_edge entry;
892                         entry.node = node;
893                         entry.pos = i;
894                         /* Saving also predecessor seems redundant, but becomes
895                          * necessary when changing position of it, before
896                          * dereferencing it.*/
897                         entry.pred = get_irn_n(node, i);
898                         ARR_APP1(entry_edge, cur_head_outs, entry);
899                 }
900         }
901
902         arity = get_irn_arity(loop_head);
903
904         /* Find df loops inside the cc */
905         if (is_Phi(node) && get_nodes_block(node) == loop_head) {
906                 for (i = 0; i < arity; ++i) {
907                         if (is_own_backedge(loop_head, i)) {
908                                 if (is_nodes_block_marked(get_irn_n(node, i))) {
909                                         entry_edge entry;
910                                         entry.node = node;
911                                         entry.pos = i;
912                                         entry.pred = get_irn_n(node, i);
913                                         ARR_APP1(entry_edge, head_df_loop, entry);
914                                         DB((dbg, LEVEL_5, "Found incc assignment node %N @%d is pred %N, graph %N %N\n",
915                                                         node, i, entry.pred, current_ir_graph, get_irg_start_block(current_ir_graph)));
916                                 }
917                         }
918                 }
919         }
920 }
921
922 /**
923  * Find condition chains, and add them to be inverted.
924  * A block belongs to the chain if a condition branches out of the loop.
925  * (Some blocks need to be removed once again.)
926  * Returns 1 if the given block belongs to the condition chain.
927  */
928 static void find_condition_chain(ir_node *block)
929 {
930         bool     mark     = false;
931         bool     has_be   = false;
932         bool     jmp_only = true;
933         unsigned nodes_n  = 0;
934
935         mark_irn_visited(block);
936
937         DB((dbg, LEVEL_5, "condition_chains for block %N\n", block));
938
939         /* Get node count */
940         foreach_out_edge_kind(block, edge, EDGE_KIND_NORMAL) {
941                 ++nodes_n;
942         }
943
944         /* Check if node count would exceed maximum cc size.
945          * TODO
946          * This is not optimal, as we search depth-first and break here,
947          * continuing with another subtree. */
948         if (loop_info.cc_size + nodes_n > opt_params.max_cc_size) {
949                 set_Block_mark(block, 0);
950                 return;
951         }
952
953         /* Check if block only has a jmp instruction. */
954         foreach_out_edge(block, edge) {
955                 ir_node *src = get_edge_src_irn(edge);
956
957                 if (!is_Block(src) && !is_Jmp(src)) {
958                         jmp_only = false;
959                 }
960         }
961
962         /* Check cf outs if one is leaving the loop,
963          * or if this node has a backedge. */
964         foreach_block_succ(block, edge) {
965                 ir_node *src = get_edge_src_irn(edge);
966                 int pos = get_edge_src_pos(edge);
967
968                 if (!is_in_loop(src))
969                         mark = true;
970
971                 /* Inverting blocks with backedge outs leads to a cf edge
972                  * from the inverted head, into the inverted head (skipping the body).
973                  * As the body becomes the new loop head,
974                  * this would introduce another loop in the existing loop.
975                  * This loop inversion cannot cope with this case. */
976                 if (is_backedge(src, pos)) {
977                         has_be = true;
978                         break;
979                 }
980         }
981
982         /* We need all predecessors to already belong to the condition chain.
983          * Example of wrong case:  * == in cc
984          *
985          *     Head*             ,--.
986          *    /|   \            B   |
987          *   / A*  B           /    |
988          *  / /\   /          ?     |
989          *   /   C*      =>      D  |
990          *      /  D           Head |
991          *     /               A  \_|
992          *                      C
993          */
994         /* Collect blocks containing only a Jmp.
995          * Do not collect blocks with backedge outs. */
996         if ((jmp_only || mark) && !has_be) {
997                 set_Block_mark(block, 1);
998                 ++inversion_blocks_in_cc;
999                 loop_info.cc_size += nodes_n;
1000                 DB((dbg, LEVEL_5, "block %N is part of condition chain\n", block));
1001                 ARR_APP1(ir_node *, cc_blocks, block);
1002         } else {
1003                 set_Block_mark(block, 0);
1004         }
1005
1006         foreach_block_succ(block, edge) {
1007                 ir_node *src = get_edge_src_irn( edge );
1008
1009                 if (is_in_loop(src) && ! irn_visited(src))
1010                         find_condition_chain(src);
1011         }
1012 }
1013
1014 /**
1015  * Rewires the copied condition chain. Removes backedges
1016  * as this condition chain is prior to the loop.
1017  * Copy of loop_head must have phi list and old (unfixed) backedge info of the loop head.
1018  * (loop_head is already fixed, we cannot rely on it.)
1019  */
1020 static void fix_copy_inversion(void)
1021 {
1022         ir_node *new_head;
1023         ir_node **ins;
1024         ir_node **phis;
1025         ir_node *phi, *next;
1026         ir_node *head_cp = get_inversion_copy(loop_head);
1027         ir_graph *irg    = get_irn_irg(head_cp);
1028         int arity        = get_irn_arity(head_cp);
1029         int backedges    = get_backedge_n(head_cp, false);
1030         int new_arity    = arity - backedges;
1031         int pos;
1032         int i;
1033
1034         NEW_ARR_A(ir_node *, ins, new_arity);
1035
1036         pos = 0;
1037         /* Remove block backedges */
1038         for(i = 0; i < arity; ++i) {
1039                 if (!is_backedge(head_cp, i))
1040                         ins[pos++] = get_irn_n(head_cp, i);
1041         }
1042
1043         new_head = new_r_Block(irg, new_arity, ins);
1044
1045         phis = NEW_ARR_F(ir_node *, 0);
1046
1047         for_each_phi_safe(get_Block_phis(head_cp), phi, next) {
1048                 ir_node *new_phi;
1049                 NEW_ARR_A(ir_node *, ins, new_arity);
1050                 pos = 0;
1051                 for(i = 0; i < arity; ++i) {
1052                         if (!is_backedge(head_cp, i))
1053                                 ins[pos++] = get_irn_n(phi, i);
1054                 }
1055                 new_phi = new_rd_Phi(get_irn_dbg_info(phi),
1056                                 new_head, new_arity, ins,
1057                                 get_irn_mode(phi));
1058                 ARR_APP1(ir_node *, phis, new_phi);
1059         }
1060
1061         pos = 0;
1062         for_each_phi_safe(get_Block_phis(head_cp), phi, next) {
1063                 exchange(phi, phis[pos++]);
1064         }
1065
1066         exchange(head_cp, new_head);
1067
1068         DEL_ARR_F(phis);
1069 }
1070
1071
1072 /* Puts the original condition chain at the end of the loop,
1073  * subsequently to the body.
1074  * Relies on block phi list and correct backedges.
1075  */
1076 static void fix_head_inversion(void)
1077 {
1078         ir_node *new_head;
1079         ir_node **ins;
1080         ir_node *phi, *next;
1081         ir_node **phis;
1082         ir_graph *irg = get_irn_irg(loop_head);
1083         int arity     = get_irn_arity(loop_head);
1084         int backedges = get_backedge_n(loop_head, false);
1085         int new_arity = backedges;
1086         int pos;
1087         int i;
1088
1089         NEW_ARR_A(ir_node *, ins, new_arity);
1090
1091         pos = 0;
1092         /* Keep only backedges */
1093         for(i = 0; i < arity; ++i) {
1094                 if (is_own_backedge(loop_head, i))
1095                         ins[pos++] = get_irn_n(loop_head, i);
1096         }
1097
1098         new_head = new_r_Block(irg, new_arity, ins);
1099
1100         phis = NEW_ARR_F(ir_node *, 0);
1101
1102         for_each_phi(loop_head, phi) {
1103                 ir_node *new_phi;
1104                 DB((dbg, LEVEL_5, "Fixing phi %N of loop head\n", phi));
1105
1106                 NEW_ARR_A(ir_node *, ins, new_arity);
1107
1108                 pos = 0;
1109                 for (i = 0; i < arity; ++i) {
1110                         ir_node *pred = get_irn_n(phi, i);
1111
1112                         if (is_own_backedge(loop_head, i)) {
1113                                 /* If assignment is in the condition chain,
1114                                  * we need to create a phi in the new loop head.
1115                                  * This can only happen for df, not cf. See find_condition_chains. */
1116                                 /*if (is_nodes_block_marked(pred)) {
1117                                         ins[pos++] = pred;
1118                                 } else {*/
1119                                 ins[pos++] = pred;
1120
1121                         }
1122                 }
1123
1124                 new_phi = new_rd_Phi(get_irn_dbg_info(phi),
1125                         new_head, new_arity, ins,
1126                         get_irn_mode(phi));
1127
1128                 ARR_APP1(ir_node *, phis, new_phi);
1129
1130                 DB((dbg, LEVEL_5, "fix inverted head should exch %N by %N (pos %d)\n", phi, new_phi, pos ));
1131         }
1132
1133         pos = 0;
1134         for_each_phi_safe(get_Block_phis(loop_head), phi, next) {
1135                 DB((dbg, LEVEL_5, "fix inverted exch phi %N by %N\n", phi, phis[pos]));
1136                 if (phis[pos] != phi)
1137                         exchange(phi, phis[pos++]);
1138         }
1139
1140         DEL_ARR_F(phis);
1141
1142         DB((dbg, LEVEL_5, "fix inverted head exch head block %N by %N\n", loop_head, new_head));
1143         exchange(loop_head, new_head);
1144 }
1145
1146 /* Does the loop inversion.  */
1147 static void inversion_walk(ir_graph *irg, entry_edge *head_entries)
1148 {
1149         size_t i;
1150
1151         /*
1152          * The order of rewiring bottom-up is crucial.
1153          * Any change of the order leads to lost information that would be needed later.
1154          */
1155
1156         ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED);
1157
1158         /* 1. clone condition chain */
1159         inc_irg_visited(irg);
1160
1161         for (i = 0; i < ARR_LEN(head_entries); ++i) {
1162                 entry_edge entry = head_entries[i];
1163                 ir_node *pred = get_irn_n(entry.node, entry.pos);
1164
1165                 DB((dbg, LEVEL_5, "\nInit walk block %N\n", pred));
1166
1167                 copy_walk(pred, is_nodes_block_marked, cur_loop);
1168         }
1169
1170         ir_free_resources(irg, IR_RESOURCE_IRN_VISITED);
1171
1172         /* 2. Extends the head control flow successors ins
1173          *    with the definitions of the copied head node. */
1174         for (i = 0; i < ARR_LEN(head_entries); ++i) {
1175                 entry_edge head_out = head_entries[i];
1176
1177                 if (is_Block(head_out.node))
1178                         extend_ins_by_copy(head_out.node, head_out.pos);
1179         }
1180
1181         /* 3. construct_ssa for users of definitions in the condition chain,
1182          *    as there is now a second definition. */
1183         for (i = 0; i < ARR_LEN(head_entries); ++i) {
1184                 entry_edge head_out = head_entries[i];
1185
1186                 /* Ignore keepalives */
1187                 if (is_End(head_out.node))
1188                         continue;
1189
1190                 /* Construct ssa for assignments in the condition chain. */
1191                 if (!is_Block(head_out.node)) {
1192                         ir_node *pred, *cppred, *block, *cpblock;
1193
1194                         pred = head_out.pred;
1195                         cppred = get_inversion_copy(pred);
1196                         block = get_nodes_block(pred);
1197                         cpblock = get_nodes_block(cppred);
1198                         construct_ssa(block, pred, cpblock, cppred);
1199                 }
1200         }
1201
1202         /*
1203          * If there is an assignment in the condition chain
1204          * with a user also in the condition chain,
1205          * the dominance frontier is in the new loop head.
1206          * The dataflow loop is completely in the condition chain.
1207          * Goal:
1208          *  To be wired: >|
1209          *
1210          *  | ,--.   |
1211          * Phi_cp |  | copied condition chain
1212          * >| |   |  |
1213          * >| ?__/   |
1214          * >| ,-.
1215          *  Phi* |   | new loop head with newly created phi.
1216          *   |   |
1217          *  Phi  |   | original, inverted condition chain
1218          *   |   |   |
1219          *   ?__/    |
1220          *
1221          */
1222         for (i = 0; i < ARR_LEN(head_df_loop); ++i) {
1223                 entry_edge head_out = head_df_loop[i];
1224
1225                 /* Construct ssa for assignments in the condition chain. */
1226                 ir_node *pred, *cppred, *block, *cpblock;
1227
1228                 pred = head_out.pred;
1229                 cppred = get_inversion_copy(pred);
1230                 assert(cppred && pred);
1231                 block = get_nodes_block(pred);
1232                 cpblock = get_nodes_block(cppred);
1233                 construct_ssa(block, pred, cpblock, cppred);
1234         }
1235
1236         /* 4. Remove the ins which are no backedges from the original condition chain
1237          *    as the cc is now subsequent to the body. */
1238         fix_head_inversion();
1239
1240         /* 5. Remove the backedges of the copied condition chain,
1241          *    because it is going to be the new 'head' in advance to the loop. */
1242         fix_copy_inversion();
1243
1244 }
1245
1246 /* Performs loop inversion of cur_loop if possible and reasonable. */
1247 static void loop_inversion(ir_graph *irg)
1248 {
1249         int      loop_depth;
1250         unsigned max_loop_nodes = opt_params.max_loop_size;
1251         unsigned max_loop_nodes_adapted;
1252         int      depth_adaption = opt_params.depth_adaption;
1253
1254         bool do_inversion = true;
1255
1256         /* Depth of 0 is the procedure and 1 a topmost loop. */
1257         loop_depth = get_loop_depth(cur_loop) - 1;
1258
1259         /* Calculating in per mil. */
1260         max_loop_nodes_adapted = get_max_nodes_adapted(loop_depth);
1261
1262         DB((dbg, LEVEL_1, "max_nodes: %d\nmax_nodes_adapted %d at depth of %d (adaption %d)\n",
1263                         max_loop_nodes, max_loop_nodes_adapted, loop_depth, depth_adaption));
1264
1265         if (loop_info.nodes == 0)
1266                 return;
1267
1268         if (loop_info.nodes > max_loop_nodes) {
1269                 /* Only for stats */
1270                 DB((dbg, LEVEL_1, "Nodes %d > allowed nodes %d\n",
1271                         loop_info.nodes, loop_depth, max_loop_nodes));
1272                 ++stats.too_large;
1273                 /* no RETURN */
1274                 /* Adaption might change it */
1275         }
1276
1277         /* Limit processing to loops smaller than given parameter. */
1278         if (loop_info.nodes > max_loop_nodes_adapted) {
1279                 DB((dbg, LEVEL_1, "Nodes %d > allowed nodes (depth %d adapted) %d\n",
1280                         loop_info.nodes, loop_depth, max_loop_nodes_adapted));
1281                 ++stats.too_large_adapted;
1282                 return;
1283         }
1284
1285         if (loop_info.calls > opt_params.allowed_calls) {
1286                 DB((dbg, LEVEL_1, "Calls %d > allowed calls %d\n",
1287                         loop_info.calls, opt_params.allowed_calls));
1288                 ++stats.calls_limit;
1289                 return;
1290         }
1291
1292         /*inversion_head_node_limit = INT_MAX;*/
1293         ir_reserve_resources(irg, IR_RESOURCE_BLOCK_MARK);
1294
1295         /* Reset block marks.
1296          * We use block marks to flag blocks of the original condition chain. */
1297         irg_walk_graph(irg, reset_block_mark, NULL, NULL);
1298
1299         /*loop_info.blocks = get_loop_n_blocks(cur_loop);*/
1300         cond_chain_entries = NEW_ARR_F(entry_edge, 0);
1301         head_df_loop = NEW_ARR_F(entry_edge, 0);
1302
1303         /*head_inversion_node_count = 0;*/
1304         inversion_blocks_in_cc = 0;
1305
1306         /* Use phase to keep copy of nodes from the condition chain. */
1307         ir_nodemap_init(&map, irg);
1308         obstack_init(&obst);
1309
1310         /* Search for condition chains and temporarily save the blocks in an array. */
1311         cc_blocks = NEW_ARR_F(ir_node *, 0);
1312         inc_irg_visited(irg);
1313         find_condition_chain(loop_head);
1314
1315         unmark_not_allowed_cc_blocks();
1316         DEL_ARR_F(cc_blocks);
1317
1318         /* Condition chain too large.
1319          * Loop should better be small enough to fit into the cache. */
1320         /* TODO Of course, we should take a small enough cc in the first place,
1321          * which is not that simple. (bin packing)  */
1322         if (loop_info.cc_size > opt_params.max_cc_size) {
1323                 ++stats.cc_limit_reached;
1324
1325                 do_inversion = false;
1326
1327                 /* Unmark cc blocks except the head.
1328                  * Invert head only for possible unrolling. */
1329                 unmark_cc_blocks();
1330         }
1331
1332         /* We also catch endless loops here,
1333          * because they do not have a condition chain. */
1334         if (inversion_blocks_in_cc < 1) {
1335                 do_inversion = false;
1336                 DB((dbg, LEVEL_3,
1337                         "Loop contains %d (less than 1) invertible blocks => No Inversion done.\n",
1338                         inversion_blocks_in_cc));
1339         }
1340
1341         if (do_inversion) {
1342                 cur_head_outs = NEW_ARR_F(entry_edge, 0);
1343
1344                 /* Get all edges pointing into the condition chain. */
1345                 irg_walk_graph(irg, get_head_outs, NULL, NULL);
1346
1347                 /* Do the inversion */
1348                 inversion_walk(irg, cur_head_outs);
1349
1350                 DEL_ARR_F(cur_head_outs);
1351
1352                 /* Duplicated blocks changed doms */
1353                 clear_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_DOMINANCE
1354                                    | IR_GRAPH_PROPERTY_CONSISTENT_LOOPINFO);
1355
1356                 ++stats.inverted;
1357         }
1358
1359         /* free */
1360         obstack_free(&obst, NULL);
1361         ir_nodemap_destroy(&map);
1362         DEL_ARR_F(cond_chain_entries);
1363         DEL_ARR_F(head_df_loop);
1364
1365         ir_free_resources(irg, IR_RESOURCE_BLOCK_MARK);
1366 }
1367
1368 /* Fix the original loop_heads ins for invariant unrolling case. */
1369 static void unrolling_fix_loop_head_inv(void)
1370 {
1371         ir_node *ins[2];
1372         ir_node *phi;
1373         ir_node *proj = new_Proj(loop_info.duff_cond, mode_X, 0);
1374         ir_node *head_pred = get_irn_n(loop_head, loop_info.be_src_pos);
1375         ir_node *loop_condition = get_unroll_copy(head_pred, unroll_nr - 1);
1376
1377         /* Original loop_heads ins are:
1378          * duff block and the own backedge */
1379
1380         ins[0] = loop_condition;
1381         ins[1] = proj;
1382         set_irn_in(loop_head, 2, ins);
1383         DB((dbg, LEVEL_4, "Rewire ins of block loophead %N to pred %N and duffs entry %N \n" , loop_head, ins[0], ins[1]));
1384
1385         for_each_phi(loop_head, phi) {
1386                 ir_node *pred = get_irn_n(phi, loop_info.be_src_pos);
1387                 /* TODO we think it is a phi, but for Mergesort it is not the case.*/
1388
1389                 ir_node *last_pred = get_unroll_copy(pred, unroll_nr - 1);
1390
1391                 ins[0] = last_pred;
1392                 ins[1] = (ir_node*)get_irn_link(phi);
1393                 set_irn_in(phi, 2, ins);
1394                 DB((dbg, LEVEL_4, "Rewire ins of loophead phi %N to pred %N and duffs entry %N \n" , phi, ins[0], ins[1]));
1395         }
1396 }
1397
1398 /* Removes previously created phis with only 1 in. */
1399 static void correct_phis(ir_node *node, void *env)
1400 {
1401         (void)env;
1402
1403         if (is_Phi(node) && get_irn_arity(node) == 1) {
1404                 ir_node *exch;
1405                 ir_node *in[1];
1406
1407                 in[0] = get_irn_n(node, 0);
1408
1409                 exch = new_rd_Phi(get_irn_dbg_info(node),
1410                     get_nodes_block(node), 1, in,
1411                         get_irn_mode(node));
1412
1413                 exchange(node, exch);
1414         }
1415 }
1416
1417 /* Unrolling: Rewire floating copies. */
1418 static void place_copies(int copies)
1419 {
1420         ir_node *loophead = loop_head;
1421         size_t i;
1422         int c;
1423         int be_src_pos = loop_info.be_src_pos;
1424
1425         /* Serialize loops by fixing their head ins.
1426          * Processed are the copies.
1427          * The original loop is done after that, to keep backedge infos. */
1428         for (c = 0; c < copies; ++c) {
1429                 ir_node *upper = get_unroll_copy(loophead, c);
1430                 ir_node *lower = get_unroll_copy(loophead, c + 1);
1431                 ir_node *phi;
1432                 ir_node *topmost_be_block = get_nodes_block(get_irn_n(loophead, be_src_pos));
1433
1434                 /* Important: get the preds first and then their copy. */
1435                 ir_node *upper_be_block = get_unroll_copy(topmost_be_block, c);
1436                 ir_node *new_jmp = new_r_Jmp(upper_be_block);
1437                 DB((dbg, LEVEL_5, " place_copies upper %N lower %N\n", upper, lower));
1438
1439                 DB((dbg, LEVEL_5, "topmost be block %N \n", topmost_be_block));
1440
1441                 if (loop_info.unroll_kind == constant) {
1442                         ir_node *ins[1];
1443                         ins[0] = new_jmp;
1444                         set_irn_in(lower, 1, ins);
1445
1446                         for_each_phi(loophead, phi) {
1447                                 ir_node *topmost_def = get_irn_n(phi, be_src_pos);
1448                                 ir_node *upper_def = get_unroll_copy(topmost_def, c);
1449                                 ir_node *lower_phi = get_unroll_copy(phi, c + 1);
1450
1451                                 /* It is possible, that the value used
1452                                  * in the OWN backedge path is NOT defined in this loop. */
1453                                 if (is_in_loop(topmost_def))
1454                                         ins[0] = upper_def;
1455                                 else
1456                                         ins[0] = topmost_def;
1457
1458                                 set_irn_in(lower_phi, 1, ins);
1459                                 /* Need to replace phis with 1 in later. */
1460                         }
1461                 } else {
1462                         /* Invariant case */
1463                         /* Every node has 2 ins. One from the duff blocks
1464                          * and one from the previously unrolled loop. */
1465                         ir_node *ins[2];
1466                         /* Calculate corresponding projection of mod result for this copy c */
1467                         ir_node *proj = new_Proj(loop_info.duff_cond, mode_X, unroll_nr - c - 1);
1468                         DB((dbg, LEVEL_4, "New duff proj %N\n" , proj));
1469
1470                         ins[0] = new_jmp;
1471                         ins[1] = proj;
1472                         set_irn_in(lower, 2, ins);
1473                         DB((dbg, LEVEL_4, "Rewire ins of Block %N to pred %N and duffs entry %N \n" , lower, ins[0], ins[1]));
1474
1475                         for_each_phi(loophead, phi) {
1476                                 ir_node *topmost_phi_pred = get_irn_n(phi, be_src_pos);
1477                                 ir_node *upper_phi_pred;
1478                                 ir_node *lower_phi;
1479                                 ir_node *duff_phi;
1480
1481                                 lower_phi = get_unroll_copy(phi, c + 1);
1482                                 duff_phi = (ir_node*)get_irn_link(phi);
1483                                 DB((dbg, LEVEL_4, "DD Link of %N is %N\n" , phi, duff_phi));
1484
1485                                 /*  */
1486                                 if (is_in_loop(topmost_phi_pred)) {
1487                                         upper_phi_pred = get_unroll_copy(topmost_phi_pred, c);
1488                                 } else {
1489                                         upper_phi_pred = topmost_phi_pred;
1490                                 }
1491
1492                                 ins[0] = upper_phi_pred;
1493                                 ins[1] = duff_phi;
1494                                 set_irn_in(lower_phi, 2, ins);
1495                                 DB((dbg, LEVEL_4, "Rewire ins of %N to pred %N and duffs entry %N \n" , lower_phi, ins[0], ins[1]));
1496                         }
1497                 }
1498         }
1499
1500         /* Reconnect last copy. */
1501         for (i = 0; i < ARR_LEN(loop_entries); ++i) {
1502                 entry_edge edge = loop_entries[i];
1503                 /* Last copy is at the bottom */
1504                 ir_node *new_pred = get_unroll_copy(edge.pred, copies);
1505                 set_irn_n(edge.node, edge.pos, new_pred);
1506         }
1507
1508         /* Fix original loops head.
1509          * Done in the end, as ins and be info were needed before. */
1510         if (loop_info.unroll_kind == constant) {
1511                 ir_node *phi;
1512                 ir_node *head_pred = get_irn_n(loop_head, be_src_pos);
1513                 ir_node *loop_condition = get_unroll_copy(head_pred, unroll_nr - 1);
1514
1515                 set_irn_n(loop_head, loop_info.be_src_pos, loop_condition);
1516
1517                 for_each_phi(loop_head, phi) {
1518                         ir_node *pred = get_irn_n(phi, be_src_pos);
1519                         ir_node *last_pred;
1520
1521                         /* It is possible, that the value used
1522                          * in the OWN backedge path is NOT assigned in this loop. */
1523                         if (is_in_loop(pred))
1524                                 last_pred = get_unroll_copy(pred, copies);
1525                         else
1526                                 last_pred = pred;
1527                         set_irn_n(phi, be_src_pos, last_pred);
1528                 }
1529
1530         } else {
1531                 unrolling_fix_loop_head_inv();
1532         }
1533 }
1534
1535 /* Copies the cur_loop several times. */
1536 static void copy_loop(entry_edge *cur_loop_outs, int copies)
1537 {
1538         int c;
1539
1540         ir_reserve_resources(current_ir_graph, IR_RESOURCE_IRN_VISITED);
1541
1542         for (c = 0; c < copies; ++c) {
1543                 size_t i;
1544
1545                 inc_irg_visited(current_ir_graph);
1546
1547                 DB((dbg, LEVEL_5, "         ### Copy_loop  copy nr: %d ###\n", c));
1548                 for (i = 0; i < ARR_LEN(cur_loop_outs); ++i) {
1549                         entry_edge entry = cur_loop_outs[i];
1550                         ir_node *pred = get_irn_n(entry.node, entry.pos);
1551
1552                         copy_walk_n(pred, is_in_loop, c + 1);
1553                 }
1554         }
1555
1556         ir_free_resources(current_ir_graph, IR_RESOURCE_IRN_VISITED);
1557 }
1558
1559
1560 /* Creates a new phi from the given phi node omitting own bes,
1561  * using be_block as supplier of backedge informations. */
1562 static ir_node *clone_phis_sans_bes(ir_node *phi, ir_node *be_block, ir_node *dest_block)
1563 {
1564         ir_node **ins;
1565         int arity = get_irn_arity(phi);
1566         int i, c = 0;
1567         ir_node *newphi;
1568
1569         assert(get_irn_arity(phi) == get_irn_arity(be_block));
1570         assert(is_Phi(phi));
1571
1572         ins = NEW_ARR_F(ir_node *, arity);
1573         for (i = 0; i < arity; ++i) {
1574                 if (! is_own_backedge(be_block, i)) {
1575                         ins[c] = get_irn_n(phi, i);
1576                         ++c;
1577                 }
1578         }
1579
1580         newphi = new_r_Phi(dest_block, c, ins, get_irn_mode(phi));
1581
1582         set_irn_link(phi, newphi);
1583         DB((dbg, LEVEL_4, "Linking for duffs device %N to %N\n", phi, newphi));
1584
1585         return newphi;
1586 }
1587
1588 /* Creates a new block from the given block node omitting own bes,
1589  * using be_block as supplier of backedge informations. */
1590 static ir_node *clone_block_sans_bes(ir_node *node, ir_node *be_block)
1591 {
1592         int arity = get_irn_arity(node);
1593         int i, c = 0;
1594         ir_node **ins;
1595
1596         assert(get_irn_arity(node) == get_irn_arity(be_block));
1597         assert(is_Block(node));
1598
1599         NEW_ARR_A(ir_node *, ins, arity);
1600         for (i = 0; i < arity; ++i) {
1601                 if (! is_own_backedge(be_block, i)) {
1602                         ins[c] = get_irn_n(node, i);
1603                         ++c;
1604                 }
1605         }
1606
1607         return new_Block(c, ins);
1608 }
1609
1610 /* Creates a structure to calculate absolute value of node op.
1611  * Returns mux node with absolute value. */
1612 static ir_node *new_Abs(ir_node *op, ir_mode *mode)
1613 {
1614   ir_graph *irg      = get_irn_irg(op);
1615   ir_node  *block    = get_nodes_block(op);
1616   ir_node  *zero     = new_r_Const(irg, get_mode_null(mode));
1617   ir_node  *cmp      = new_r_Cmp(block, op, zero, ir_relation_less);
1618   ir_node  *minus_op = new_r_Minus(block, op, mode);
1619   ir_node  *mux      = new_r_Mux(block, cmp, op, minus_op, mode);
1620
1621   return mux;
1622 }
1623
1624
1625 /* Creates blocks for duffs device, using previously obtained
1626  * informations about the iv.
1627  * TODO split */
1628 static void create_duffs_block(void)
1629 {
1630         ir_mode *mode;
1631
1632         ir_node *block1, *count_block, *duff_block;
1633         ir_node *ems, *ems_mod, *ems_div, *ems_mod_proj, *cmp_null,
1634                 *ems_mode_cond, *x_true, *x_false, *const_null;
1635         ir_node *true_val, *false_val;
1636         ir_node *ins[2];
1637
1638         ir_node *duff_mod, *proj, *cond;
1639
1640         ir_node *count, *correction, *unroll_c;
1641         ir_node *cmp_bad_count, *good_count, *bad_count, *count_phi, *bad_count_neg;
1642         ir_node *phi;
1643
1644         mode = get_irn_mode(loop_info.end_val);
1645         const_null = new_Const(get_mode_null(mode));
1646
1647         /* TODO naming
1648          * 1. Calculate first approach to count.
1649          *    Condition: (end - start) % step == 0 */
1650         block1 = clone_block_sans_bes(loop_head, loop_head);
1651         DB((dbg, LEVEL_4, "Duff block 1 %N\n", block1));
1652
1653         /* Create loop entry phis in first duff block
1654          * as it becomes the loops preheader */
1655         for_each_phi(loop_head, phi) {
1656                 /* Returns phis pred if phi would have arity 1*/
1657                 ir_node *new_phi = clone_phis_sans_bes(phi, loop_head, block1);
1658
1659                 DB((dbg, LEVEL_4, "HEAD %N phi %N\n", loop_head, phi));
1660                 DB((dbg, LEVEL_4, "BLOCK1 %N phi %N\n", block1, new_phi));
1661         }
1662
1663         ems = new_r_Sub(block1, loop_info.end_val, loop_info.start_val,
1664                 get_irn_mode(loop_info.end_val));
1665                 DB((dbg, LEVEL_4, "BLOCK1 sub %N\n", ems));
1666
1667
1668         ems = new_Sub(loop_info.end_val, loop_info.start_val,
1669                 get_irn_mode(loop_info.end_val));
1670
1671         DB((dbg, LEVEL_4, "mod ins %N %N\n", ems, loop_info.step));
1672         ems_mod = new_r_Mod(block1,
1673                 new_NoMem(),
1674                 ems,
1675                 loop_info.step,
1676                 mode,
1677                 op_pin_state_pinned);
1678         ems_div = new_r_Div(block1,
1679                 new_NoMem(),
1680                 ems,
1681                 loop_info.step,
1682                 mode,
1683                 op_pin_state_pinned);
1684
1685         DB((dbg, LEVEL_4, "New module node %N\n", ems_mod));
1686
1687         ems_mod_proj = new_r_Proj(ems_mod, mode_Iu, pn_Mod_res);
1688         cmp_null = new_r_Cmp(block1, ems_mod_proj, const_null, ir_relation_less);
1689         ems_mode_cond = new_r_Cond(block1, cmp_null);
1690
1691         /* ems % step == 0 */
1692         x_true = new_r_Proj(ems_mode_cond, mode_X, pn_Cond_true);
1693         /* ems % step != 0 */
1694         x_false = new_r_Proj(ems_mode_cond, mode_X, pn_Cond_false);
1695
1696         /* 2. Second block.
1697          * Assures, duffs device receives a valid count.
1698          * Condition:
1699          *     decreasing: count < 0
1700          *     increasing: count > 0
1701          */
1702         ins[0] = x_true;
1703         ins[1] = x_false;
1704
1705         count_block = new_Block(2, ins);
1706         DB((dbg, LEVEL_4, "Duff block 2 %N\n", count_block));
1707
1708
1709         /* Increase loop-taken-count depending on the loop condition
1710          * uses the latest iv to compare to. */
1711         if (loop_info.latest_value == 1) {
1712                 /* ems % step == 0 :  +0 */
1713                 true_val = new_Const(get_mode_null(mode));
1714                 /* ems % step != 0 :  +1 */
1715                 false_val = new_Const(get_mode_one(mode));
1716         } else {
1717                 ir_tarval *tv_two = new_tarval_from_long(2, mode);
1718                 /* ems % step == 0 :  +1 */
1719                 true_val = new_Const(get_mode_one(mode));
1720                 /* ems % step != 0 :  +2 */
1721                 false_val = new_Const(tv_two);
1722         }
1723
1724         ins[0] = true_val;
1725         ins[1] = false_val;
1726
1727         correction = new_r_Phi(count_block, 2, ins, mode);
1728
1729         count = new_r_Proj(ems_div, mode, pn_Div_res);
1730
1731         /* (end - start) / step  +  correction */
1732         count = new_Add(count, correction, mode);
1733
1734         /* We preconditioned the loop to be tail-controlled.
1735          * So, if count is something 'wrong' like 0,
1736          * negative/positive (depending on step direction),
1737          * we may take the loop once (tail-contr.) and leave it
1738          * to the existing condition, to break; */
1739
1740         /* Depending on step direction, we have to check for > or < 0 */
1741         if (loop_info.decreasing == 1) {
1742                 cmp_bad_count = new_r_Cmp(count_block, count, const_null,
1743                                           ir_relation_less);
1744         } else {
1745                 cmp_bad_count = new_r_Cmp(count_block, count, const_null,
1746                                           ir_relation_greater);
1747         }
1748
1749         bad_count_neg = new_r_Cond(count_block, cmp_bad_count);
1750         good_count = new_Proj(bad_count_neg, mode_X, pn_Cond_true);
1751         bad_count = new_Proj(ems_mode_cond, mode_X, pn_Cond_false);
1752
1753         /* 3. Duff Block
1754          *    Contains module to decide which loop to start from. */
1755
1756         ins[0] = good_count;
1757         ins[1] = bad_count;
1758         duff_block = new_Block(2, ins);
1759         DB((dbg, LEVEL_4, "Duff block 3 %N\n", duff_block));
1760
1761         /* Get absolute value */
1762         ins[0] = new_Abs(count, mode);
1763         /* Manually feed the aforementioned count = 1 (bad case)*/
1764         ins[1] = new_Const(get_mode_one(mode));
1765         count_phi = new_r_Phi(duff_block, 2, ins, mode);
1766
1767         unroll_c = new_Const(new_tarval_from_long((long)unroll_nr, mode));
1768
1769         /* count % unroll_nr */
1770         duff_mod = new_r_Mod(duff_block,
1771                 new_NoMem(),
1772                 count_phi,
1773                 unroll_c,
1774                 mode,
1775                 op_pin_state_pinned);
1776
1777
1778         proj = new_Proj(duff_mod, mode, pn_Mod_res);
1779         /* condition does NOT create itself in the block of the proj! */
1780         cond = new_r_Cond(duff_block, proj);
1781
1782         loop_info.duff_cond = cond;
1783 }
1784
1785 /* Returns 1 if given node is not in loop,
1786  * or if it is a phi of the loop head with only loop invariant defs.
1787  */
1788 static unsigned is_loop_invariant_def(ir_node *node)
1789 {
1790         int i;
1791
1792         if (! is_in_loop(node)) {
1793                 DB((dbg, LEVEL_4, "Not in loop %N\n", node));
1794                 /* || is_Const(node) || is_SymConst(node)) {*/
1795                 return 1;
1796         }
1797
1798         /* If this is a phi of the loophead shared by more than 1 loop,
1799          * we need to check if all defs are not in the loop.  */
1800         if (is_Phi(node)) {
1801                 ir_node *block;
1802                 block = get_nodes_block(node);
1803
1804                 /* To prevent unexpected situations. */
1805                 if (block != loop_head) {
1806                         return 0;
1807                 }
1808
1809                 for (i = 0; i < get_irn_arity(node); ++i) {
1810                         /* Check if all bes are just loopbacks. */
1811                         if (is_own_backedge(block, i) && get_irn_n(node, i) != node)
1812                                 return 0;
1813                 }
1814                 DB((dbg, LEVEL_4, "invar %N\n", node));
1815                 return 1;
1816         }
1817         DB((dbg, LEVEL_4, "Not invar %N\n", node));
1818
1819         return 0;
1820 }
1821
1822 /* Returns 1 if one pred of node is invariant and the other is not.
1823  * invar_pred and other are set analogously. */
1824 static unsigned get_invariant_pred(ir_node *node, ir_node **invar_pred, ir_node **other)
1825 {
1826         ir_node *pred0 = get_irn_n(node, 0);
1827         ir_node *pred1 = get_irn_n(node, 1);
1828
1829         *invar_pred = NULL;
1830         *other = NULL;
1831
1832         if (is_loop_invariant_def(pred0)) {
1833                 DB((dbg, LEVEL_4, "pred0 invar %N\n", pred0));
1834                 *invar_pred = pred0;
1835                 *other = pred1;
1836         }
1837
1838         if (is_loop_invariant_def(pred1)) {
1839                 DB((dbg, LEVEL_4, "pred1 invar %N\n", pred1));
1840
1841                 if (*invar_pred != NULL) {
1842                         /* RETURN. We do not want both preds to be invariant. */
1843                         return 0;
1844                 }
1845
1846                 *other = pred0;
1847                 *invar_pred = pred1;
1848                 return 1;
1849         } else {
1850                 DB((dbg, LEVEL_4, "pred1 not invar %N\n", pred1));
1851
1852                 if (*invar_pred != NULL)
1853                         return 1;
1854                 else
1855                         return 0;
1856         }
1857 }
1858
1859 /* Starts from a phi that may belong to an iv.
1860  * If an add forms a loop with iteration_phi,
1861  * and add uses a constant, 1 is returned
1862  * and 'start' as well as 'add' are sane. */
1863 static unsigned get_start_and_add(ir_node *iteration_phi, unrolling_kind_flag role)
1864 {
1865         int i;
1866         ir_node *found_add = loop_info.add;
1867         int arity = get_irn_arity(iteration_phi);
1868
1869         DB((dbg, LEVEL_4, "Find start and add from %N\n", iteration_phi));
1870
1871         for (i = 0; i < arity; ++i) {
1872
1873                 /* Find start_val which needs to be pred of the iteration_phi.
1874                  * If start_val already known, sanity check. */
1875                 if (!is_backedge(get_nodes_block(loop_info.iteration_phi), i)) {
1876                         ir_node *found_start_val = get_irn_n(loop_info.iteration_phi, i);
1877
1878                         DB((dbg, LEVEL_4, "found_start_val %N\n", found_start_val));
1879
1880                         /* We already found a start_val it has to be always the same. */
1881                         if (loop_info.start_val && found_start_val != loop_info.start_val)
1882                                 return 0;
1883
1884                         if ((role == constant) && !(is_SymConst(found_start_val) || is_Const(found_start_val)))
1885                                         return 0;
1886                         else if((role == constant) && !(is_loop_invariant_def(found_start_val)))
1887                                         return 0;
1888
1889                         loop_info.start_val = found_start_val;
1890                 }
1891
1892                 /* The phi has to be in the loop head.
1893                  * Follow all own backedges. Every value supplied from these preds of the phi
1894                  * needs to origin from the same add. */
1895                 if (is_own_backedge(get_nodes_block(loop_info.iteration_phi), i)) {
1896                         ir_node *new_found = get_irn_n(loop_info.iteration_phi,i);
1897
1898                         DB((dbg, LEVEL_4, "is add? %N\n", new_found));
1899
1900                         if (! (is_Add(new_found) || is_Sub(new_found)) || (found_add && found_add != new_found))
1901                                 return 0;
1902                         else
1903                                 found_add = new_found;
1904                 }
1905         }
1906
1907         loop_info.add = found_add;
1908
1909         return 1;
1910 }
1911
1912
1913 /* Returns 1 if one pred of node is a const value and the other is not.
1914  * const_pred and other are set analogously. */
1915 static unsigned get_const_pred(ir_node *node, ir_node **const_pred, ir_node **other)
1916 {
1917         ir_node *pred0 = get_irn_n(node, 0);
1918         ir_node *pred1 = get_irn_n(node, 1);
1919
1920         DB((dbg, LEVEL_4, "Checking for constant pred of %N\n", node));
1921
1922         *const_pred = NULL;
1923         *other = NULL;
1924
1925         /*DB((dbg, LEVEL_4, "is %N const\n", pred0));*/
1926         if (is_Const(pred0) || is_SymConst(pred0)) {
1927                 *const_pred = pred0;
1928                 *other = pred1;
1929         }
1930
1931         /*DB((dbg, LEVEL_4, "is %N const\n", pred1));*/
1932         if (is_Const(pred1) || is_SymConst(pred1)) {
1933                 if (*const_pred != NULL) {
1934                         /* RETURN. We do not want both preds to be constant. */
1935                         return 0;
1936                 }
1937
1938                 *other = pred0;
1939                 *const_pred = pred1;
1940         }
1941
1942         if (*const_pred == NULL)
1943                 return 0;
1944         else
1945                 return 1;
1946 }
1947
1948 /* Returns 1 if loop exits within 2 steps of the iv.
1949  * Norm_proj means we do not exit the loop.*/
1950 static unsigned simulate_next(ir_tarval **count_tar,
1951                 ir_tarval *stepped, ir_tarval *step_tar, ir_tarval *end_tar,
1952                 ir_relation norm_proj)
1953 {
1954         ir_tarval *next;
1955
1956         DB((dbg, LEVEL_4, "Loop taken if (stepped)%ld %s (end)%ld ",
1957                                 get_tarval_long(stepped),
1958                                 get_relation_string((norm_proj)),
1959                                 get_tarval_long(end_tar)));
1960         DB((dbg, LEVEL_4, "comparing latest value %d\n", loop_info.latest_value));
1961
1962         /* If current iv does not stay in the loop,
1963          * this run satisfied the exit condition. */
1964         if (! (tarval_cmp(stepped, end_tar) & norm_proj))
1965                 return 1;
1966
1967         DB((dbg, LEVEL_4, "Result: (stepped)%ld IS %s (end)%ld\n",
1968                                 get_tarval_long(stepped),
1969                                 get_relation_string(tarval_cmp(stepped, end_tar)),
1970                                 get_tarval_long(end_tar)));
1971
1972         /* next step */
1973         if (is_Add(loop_info.add))
1974                 next = tarval_add(stepped, step_tar);
1975         else
1976                 /* sub */
1977                 next = tarval_sub(stepped, step_tar, get_irn_mode(loop_info.end_val));
1978
1979         DB((dbg, LEVEL_4, "Loop taken if %ld %s %ld ",
1980                                 get_tarval_long(next),
1981                                 get_relation_string(norm_proj),
1982                                 get_tarval_long(end_tar)));
1983         DB((dbg, LEVEL_4, "comparing latest value %d\n", loop_info.latest_value));
1984
1985         /* Increase steps. */
1986         *count_tar = tarval_add(*count_tar, get_tarval_one(get_tarval_mode(*count_tar)));
1987
1988         /* Next has to fail the loop condition, or we will never exit. */
1989         if (! (tarval_cmp(next, end_tar) & norm_proj))
1990                 return 1;
1991         else
1992                 return 0;
1993 }
1994
1995 /* Check if loop meets requirements for a 'simple loop':
1996  * - Exactly one cf out
1997  * - Allowed calls
1998  * - Max nodes after unrolling
1999  * - tail-controlled
2000  * - exactly one be
2001  * - cmp
2002  * Returns Projection of cmp node or NULL; */
2003 static ir_node *is_simple_loop(void)
2004 {
2005         int arity, i;
2006         ir_node *loop_block, *exit_block, *projx, *cond, *cmp;
2007
2008         /* Maximum of one condition, and no endless loops. */
2009         if (loop_info.cf_outs != 1)
2010                 return NULL;
2011
2012         DB((dbg, LEVEL_4, "1 loop exit\n"));
2013
2014         /* Calculate maximum unroll_nr keeping node count below limit. */
2015         loop_info.max_unroll = (int)((double)opt_params.max_unrolled_loop_size / (double)loop_info.nodes);
2016         if (loop_info.max_unroll < 2) {
2017                 ++stats.too_large;
2018                 return NULL;
2019         }
2020
2021         DB((dbg, LEVEL_4, "maximum unroll factor %u, to not exceed node limit \n",
2022                 opt_params.max_unrolled_loop_size));
2023
2024         arity = get_irn_arity(loop_head);
2025         /* RETURN if we have more than 1 be. */
2026         /* Get my backedges without alien bes. */
2027         loop_block = NULL;
2028         for (i = 0; i < arity; ++i) {
2029                 ir_node *pred = get_irn_n(loop_head, i);
2030                 if (is_own_backedge(loop_head, i)) {
2031                         if (loop_block)
2032                                 /* Our simple loops may have only one backedge. */
2033                                 return NULL;
2034                         else {
2035                                 loop_block = get_nodes_block(pred);
2036                                 loop_info.be_src_pos = i;
2037                         }
2038                 }
2039         }
2040
2041         DB((dbg, LEVEL_4, "loop has 1 own backedge.\n"));
2042
2043         exit_block = get_nodes_block(loop_info.cf_out.pred);
2044         /* The loop has to be tail-controlled.
2045          * This can be changed/improved,
2046          * but we would need a duff iv. */
2047         if (exit_block != loop_block)
2048                 return NULL;
2049
2050         DB((dbg, LEVEL_4, "tail-controlled loop.\n"));
2051
2052         /* find value on which loop exit depends */
2053         projx = loop_info.cf_out.pred;
2054         cond = get_irn_n(projx, 0);
2055         cmp = get_irn_n(cond, 0);
2056
2057         if (!is_Cmp(cmp))
2058                 return NULL;
2059
2060         DB((dbg, LEVEL_5, "projection is %s\n", get_relation_string(get_Cmp_relation(cmp))));
2061
2062         switch(get_Proj_proj(projx)) {
2063                 case pn_Cond_false:
2064                         loop_info.exit_cond = 0;
2065                         break;
2066                 case pn_Cond_true:
2067                         loop_info.exit_cond = 1;
2068                         break;
2069                 default:
2070                         panic("Cond Proj_proj other than true/false");
2071         }
2072
2073         DB((dbg, LEVEL_4, "Valid Cmp.\n"));
2074         return cmp;
2075 }
2076
2077 /* Returns 1 if all nodes are mode_Iu or mode_Is. */
2078 static unsigned are_mode_I(ir_node *n1, ir_node* n2, ir_node *n3)
2079 {
2080         ir_mode *m1 = get_irn_mode(n1);
2081         ir_mode *m2 = get_irn_mode(n2);
2082         ir_mode *m3 = get_irn_mode(n3);
2083
2084         if ((m1 == mode_Iu && m2 == mode_Iu && m3 == mode_Iu) ||
2085             (m1 == mode_Is && m2 == mode_Is && m3 == mode_Is))
2086                 return 1;
2087         else
2088                 return 0;
2089 }
2090
2091 /* Checks if cur_loop is a simple tail-controlled counting loop
2092  * with start and end value loop invariant, step constant. */
2093 static unsigned get_unroll_decision_invariant(void)
2094 {
2095
2096         ir_node   *projres, *loop_condition, *iteration_path;
2097         unsigned   success;
2098         ir_tarval *step_tar;
2099         ir_mode   *mode;
2100
2101
2102         /* RETURN if loop is not 'simple' */
2103         projres = is_simple_loop();
2104         if (projres == NULL)
2105                 return 0;
2106
2107         /* Use a minimal size for the invariant unrolled loop,
2108      * as duffs device produces overhead */
2109         if (loop_info.nodes < opt_params.invar_unrolling_min_size)
2110                 return 0;
2111
2112         loop_condition = get_irn_n(projres, 0);
2113
2114         success = get_invariant_pred(loop_condition, &loop_info.end_val, &iteration_path);
2115         DB((dbg, LEVEL_4, "pred invar %d\n", success));
2116
2117         if (! success)
2118                 return 0;
2119
2120         DB((dbg, LEVEL_4, "Invariant End_val %N, other %N\n", loop_info.end_val, iteration_path));
2121
2122         /* We may find the add or the phi first.
2123          * Until now we only have end_val. */
2124         if (is_Add(iteration_path) || is_Sub(iteration_path)) {
2125
2126                 loop_info.add = iteration_path;
2127                 DB((dbg, LEVEL_4, "Case 1: Got add %N (maybe not sane)\n", loop_info.add));
2128
2129                 /* Preds of the add should be step and the iteration_phi */
2130                 success = get_const_pred(loop_info.add, &loop_info.step, &loop_info.iteration_phi);
2131                 if (! success)
2132                         return 0;
2133
2134                 DB((dbg, LEVEL_4, "Got step %N\n", loop_info.step));
2135
2136                 if (! is_Phi(loop_info.iteration_phi))
2137                         return 0;
2138
2139                 DB((dbg, LEVEL_4, "Got phi %N\n", loop_info.iteration_phi));
2140
2141                 /* Find start_val.
2142                  * Does necessary sanity check of add, if it is already set.  */
2143                 success = get_start_and_add(loop_info.iteration_phi, invariant);
2144                 if (! success)
2145                         return 0;
2146
2147                 DB((dbg, LEVEL_4, "Got start A  %N\n", loop_info.start_val));
2148
2149         } else if (is_Phi(iteration_path)) {
2150                 ir_node *new_iteration_phi;
2151
2152                 loop_info.iteration_phi = iteration_path;
2153                 DB((dbg, LEVEL_4, "Case 2: Got phi %N\n", loop_info.iteration_phi));
2154
2155                 /* Find start_val and add-node.
2156                  * Does necessary sanity check of add, if it is already set.  */
2157                 success = get_start_and_add(loop_info.iteration_phi, invariant);
2158                 if (! success)
2159                         return 0;
2160
2161                 DB((dbg, LEVEL_4, "Got start B %N\n", loop_info.start_val));
2162                 DB((dbg, LEVEL_4, "Got add or sub %N\n", loop_info.add));
2163
2164                 success = get_const_pred(loop_info.add, &loop_info.step, &new_iteration_phi);
2165                 if (! success)
2166                         return 0;
2167
2168                 DB((dbg, LEVEL_4, "Got step (B) %N\n", loop_info.step));
2169
2170                 if (loop_info.iteration_phi != new_iteration_phi)
2171                         return 0;
2172
2173         } else {
2174                 return 0;
2175         }
2176
2177         mode = get_irn_mode(loop_info.end_val);
2178
2179         DB((dbg, LEVEL_4, "start %N, end %N, step %N\n",
2180                                 loop_info.start_val, loop_info.end_val, loop_info.step));
2181
2182         if (mode != mode_Is && mode != mode_Iu)
2183                 return 0;
2184
2185         /* TODO necessary? */
2186         if (!are_mode_I(loop_info.start_val, loop_info.step, loop_info.end_val))
2187                 return 0;
2188
2189         DB((dbg, LEVEL_4, "mode integer\n"));
2190
2191         step_tar = get_Const_tarval(loop_info.step);
2192
2193         if (tarval_is_null(step_tar)) {
2194                 /* TODO Might be worth a warning. */
2195                 return 0;
2196         }
2197
2198         DB((dbg, LEVEL_4, "step is not 0\n"));
2199
2200         create_duffs_block();
2201
2202         return loop_info.max_unroll;
2203 }
2204
2205 /* Returns unroll factor,
2206  * given maximum unroll factor and number of loop passes. */
2207 static unsigned get_preferred_factor_constant(ir_tarval *count_tar)
2208 {
2209         ir_tarval *tar_6, *tar_5, *tar_4, *tar_3, *tar_2;
2210         unsigned prefer;
2211         ir_mode *mode = get_irn_mode(loop_info.end_val);
2212
2213         tar_6 = new_tarval_from_long(6, mode);
2214         tar_5 = new_tarval_from_long(5, mode);
2215         tar_4 = new_tarval_from_long(4, mode);
2216         tar_3 = new_tarval_from_long(3, mode);
2217         tar_2 = new_tarval_from_long(2, mode);
2218
2219         /* loop passes % {6, 5, 4, 3, 2} == 0  */
2220         if (tarval_is_null(tarval_mod(count_tar, tar_6)))
2221                 prefer = 6;
2222         else if (tarval_is_null(tarval_mod(count_tar, tar_5)))
2223                 prefer = 5;
2224         else if (tarval_is_null(tarval_mod(count_tar, tar_4)))
2225                 prefer = 4;
2226         else if (tarval_is_null(tarval_mod(count_tar, tar_3)))
2227                 prefer = 3;
2228         else if (tarval_is_null(tarval_mod(count_tar, tar_2)))
2229                 prefer = 2;
2230         else {
2231                 /* gcd(max_unroll, count_tar) */
2232                 int a = loop_info.max_unroll;
2233                 int b = (int)get_tarval_long(count_tar);
2234                 int c;
2235
2236                 DB((dbg, LEVEL_4, "gcd of max_unroll %d and count_tar %d: ", a, b));
2237
2238                 do {
2239                 c = a % b;
2240                 a = b; b = c;
2241                 } while( c != 0);
2242
2243                 DB((dbg, LEVEL_4, "%d\n", a));
2244                 return a;
2245         }
2246
2247         DB((dbg, LEVEL_4, "preferred unroll factor %d\n", prefer));
2248
2249         /*
2250          * If our preference is greater than the allowed unroll factor
2251          * we either might reduce the preferred factor and prevent a duffs device block,
2252          * or create a duffs device block, from which in this case (constants only)
2253          * we know the startloop at compiletime.
2254          * The latter yields the following graphs.
2255          * but for code generation we would want to use graph A.
2256          * The graphs are equivalent. So, we can only reduce the preferred factor.
2257          * A)                   B)
2258          *     PreHead             PreHead
2259          *        |      ,--.         |   ,--.
2260          *         \ Loop1   \        Loop2   \
2261          *          \  |     |       /  |     |
2262          *           Loop2   /      / Loop1   /
2263          *           |   `--'      |      `--'
2264          */
2265
2266         if (prefer <= loop_info.max_unroll)
2267                 return prefer;
2268         else {
2269                 switch(prefer) {
2270                         case 6:
2271                                 if (loop_info.max_unroll >= 3)
2272                                         return 3;
2273                                 else if (loop_info.max_unroll >= 2)
2274                                         return 2;
2275                                 else
2276                                         return 0;
2277
2278                         case 4:
2279                                 if (loop_info.max_unroll >= 2)
2280                                         return 2;
2281                                 else
2282                                         return 0;
2283
2284                         default:
2285                                 return 0;
2286                 }
2287         }
2288 }
2289
2290 /* Check if cur_loop is a simple counting loop.
2291  * Start, step and end are constants.
2292  * TODO The whole constant case should use procedures similar to
2293  * the invariant case, as they are more versatile. */
2294 /* TODO split. */
2295 static unsigned get_unroll_decision_constant(void)
2296 {
2297         ir_node     *cmp, *iteration_path;
2298         unsigned     success, is_latest_val;
2299         ir_tarval   *start_tar, *end_tar, *step_tar, *diff_tar, *count_tar;
2300         ir_tarval   *stepped;
2301         ir_relation  proj_proj, norm_proj;
2302         ir_mode     *mode;
2303
2304         /* RETURN if loop is not 'simple' */
2305         cmp = is_simple_loop();
2306         if (cmp == NULL)
2307                 return 0;
2308
2309         /* One in of the loop condition needs to be loop invariant. => end_val
2310          * The other in is assigned by an add. => add
2311          * The add uses a loop invariant value => step
2312          * and a phi with a loop invariant start_val and the add node as ins.
2313
2314            ^   ^
2315            |   | .-,
2316            |   Phi |
2317                 \  |   |
2318           ^  Add   |
2319            \  | \__|
2320             cond
2321              /\
2322         */
2323
2324         success = get_const_pred(cmp, &loop_info.end_val, &iteration_path);
2325         if (! success)
2326                 return 0;
2327
2328         DB((dbg, LEVEL_4, "End_val %N, other %N\n", loop_info.end_val, iteration_path));
2329
2330         /* We may find the add or the phi first.
2331          * Until now we only have end_val. */
2332         if (is_Add(iteration_path) || is_Sub(iteration_path)) {
2333
2334                 /* We test against the latest value of the iv. */
2335                 is_latest_val = 1;
2336
2337                 loop_info.add = iteration_path;
2338                 DB((dbg, LEVEL_4, "Case 2: Got add %N (maybe not sane)\n", loop_info.add));
2339
2340                 /* Preds of the add should be step and the iteration_phi */
2341                 success = get_const_pred(loop_info.add, &loop_info.step, &loop_info.iteration_phi);
2342                 if (! success)
2343                         return 0;
2344
2345                 DB((dbg, LEVEL_4, "Got step %N\n", loop_info.step));
2346
2347                 if (! is_Phi(loop_info.iteration_phi))
2348                         return 0;
2349
2350                 DB((dbg, LEVEL_4, "Got phi %N\n", loop_info.iteration_phi));
2351
2352                 /* Find start_val.
2353                  * Does necessary sanity check of add, if it is already set.  */
2354                 success = get_start_and_add(loop_info.iteration_phi, constant);
2355                 if (! success)
2356                         return 0;
2357
2358                 DB((dbg, LEVEL_4, "Got start %N\n", loop_info.start_val));
2359
2360         } else if (is_Phi(iteration_path)) {
2361                 ir_node *new_iteration_phi;
2362
2363                 /* We compare with the value the iv had entering this run. */
2364                 is_latest_val = 0;
2365
2366                 loop_info.iteration_phi = iteration_path;
2367                 DB((dbg, LEVEL_4, "Case 1: Got phi %N \n", loop_info.iteration_phi));
2368
2369                 /* Find start_val and add-node.
2370                  * Does necessary sanity check of add, if it is already set.  */
2371                 success = get_start_and_add(loop_info.iteration_phi, constant);
2372                 if (! success)
2373                         return 0;
2374
2375                 DB((dbg, LEVEL_4, "Got start %N\n", loop_info.start_val));
2376                 DB((dbg, LEVEL_4, "Got add or sub %N\n", loop_info.add));
2377
2378                 success = get_const_pred(loop_info.add, &loop_info.step, &new_iteration_phi);
2379                 if (! success)
2380                         return 0;
2381
2382                 DB((dbg, LEVEL_4, "Got step %N\n", loop_info.step));
2383
2384                 if (loop_info.iteration_phi != new_iteration_phi)
2385                         return 0;
2386
2387         } else {
2388                 /* RETURN */
2389                 return 0;
2390         }
2391
2392         mode = get_irn_mode(loop_info.end_val);
2393
2394         DB((dbg, LEVEL_4, "start %N, end %N, step %N\n",
2395                                 loop_info.start_val, loop_info.end_val, loop_info.step));
2396
2397         if (mode != mode_Is && mode != mode_Iu)
2398                 return 0;
2399
2400         /* TODO necessary? */
2401         if (!are_mode_I(loop_info.start_val, loop_info.step, loop_info.end_val))
2402                 return 0;
2403
2404         DB((dbg, LEVEL_4, "mode integer\n"));
2405
2406         end_tar = get_Const_tarval(loop_info.end_val);
2407         start_tar = get_Const_tarval(loop_info.start_val);
2408         step_tar = get_Const_tarval(loop_info.step);
2409
2410         if (tarval_is_null(step_tar))
2411                 /* TODO Might be worth a warning. */
2412                 return 0;
2413
2414         DB((dbg, LEVEL_4, "step is not 0\n"));
2415
2416         if ((!tarval_is_negative(step_tar)) ^ (!is_Sub(loop_info.add)))
2417                 loop_info.decreasing = 1;
2418
2419         diff_tar = tarval_sub(end_tar, start_tar, mode);
2420
2421         /* We need at least count_tar steps to be close to end_val, maybe more.
2422          * No way, that we have gone too many steps.
2423          * This represents the 'latest value'.
2424          * (If condition checks against latest value, is checked later) */
2425         count_tar = tarval_div(diff_tar, step_tar);
2426
2427         /* Iv will not pass end_val (except overflows).
2428          * Nothing done, as it would yield to no advantage. */
2429         if (tarval_is_negative(count_tar)) {
2430                 DB((dbg, LEVEL_4, "Loop is endless or never taken."));
2431                 /* TODO Might be worth a warning. */
2432                 return 0;
2433         }
2434
2435         ++stats.u_simple_counting_loop;
2436
2437         loop_info.latest_value = is_latest_val;
2438
2439         /* TODO split here
2440         if (! is_simple_counting_loop(&count_tar))
2441                 return 0;
2442         */
2443
2444         /* stepped can be negative, if step < 0 */
2445         stepped = tarval_mul(count_tar, step_tar);
2446
2447         /* step as close to end_val as possible, */
2448         /* |stepped| <= |end_tar|, and dist(stepped, end_tar) is smaller than a step. */
2449         if (is_Sub(loop_info.add))
2450                 stepped = tarval_sub(start_tar, stepped, mode_Is);
2451         else
2452                 stepped = tarval_add(start_tar, stepped);
2453
2454         DB((dbg, LEVEL_4, "stepped to %ld\n", get_tarval_long(stepped)));
2455
2456         proj_proj = get_Cmp_relation(cmp);
2457         /* Assure that norm_proj is the stay-in-loop case. */
2458         if (loop_info.exit_cond == 1)
2459                 norm_proj = get_negated_relation(proj_proj);
2460         else
2461                 norm_proj = proj_proj;
2462
2463         DB((dbg, LEVEL_4, "normalized projection %s\n", get_relation_string(norm_proj)));
2464         /* Executed at most once (stay in counting loop if a Eq b) */
2465         if (norm_proj == ir_relation_equal)
2466                 /* TODO Might be worth a warning. */
2467                 return 0;
2468
2469         /* calculates next values and increases count_tar according to it */
2470         success = simulate_next(&count_tar, stepped, step_tar, end_tar, norm_proj);
2471         if (! success)
2472                 return 0;
2473
2474         /* We run loop once more, if we compare to the
2475          * not yet in-/decreased iv. */
2476         if (is_latest_val == 0) {
2477                 DB((dbg, LEVEL_4, "condition uses not latest iv value\n"));
2478                 count_tar = tarval_add(count_tar, get_tarval_one(mode));
2479         }
2480
2481         DB((dbg, LEVEL_4, "loop taken %ld times\n", get_tarval_long(count_tar)));
2482
2483         /* Assure the loop is taken at least 1 time. */
2484         if (tarval_is_null(count_tar)) {
2485                 /* TODO Might be worth a warning. */
2486                 return 0;
2487         }
2488
2489         loop_info.count_tar = count_tar;
2490         return get_preferred_factor_constant(count_tar);
2491 }
2492
2493 /**
2494  * Loop unrolling
2495  */
2496 static void unroll_loop(void)
2497 {
2498
2499         if (! (loop_info.nodes > 0))
2500                 return;
2501
2502         if (loop_info.nodes > opt_params.max_unrolled_loop_size) {
2503                 DB((dbg, LEVEL_2, "Nodes %d > allowed nodes %d\n",
2504                         loop_info.nodes, opt_params.max_unrolled_loop_size));
2505                 ++stats.too_large;
2506                 return;
2507         }
2508
2509         if (loop_info.calls > 0) {
2510                 DB((dbg, LEVEL_2, "Calls %d > allowed calls 0\n",
2511                         loop_info.calls));
2512                 ++stats.calls_limit;
2513                 return;
2514         }
2515
2516         unroll_nr = 0;
2517
2518         /* get_unroll_decision_constant and invariant are completely
2519          * independent for flexibility.
2520          * Some checks may be performed twice. */
2521
2522         /* constant case? */
2523         if (opt_params.allow_const_unrolling)
2524                 unroll_nr = get_unroll_decision_constant();
2525         if (unroll_nr > 1) {
2526                 loop_info.unroll_kind = constant;
2527
2528         } else {
2529                 /* invariant case? */
2530                 if (opt_params.allow_invar_unrolling)
2531                         unroll_nr = get_unroll_decision_invariant();
2532                 if (unroll_nr > 1)
2533                         loop_info.unroll_kind = invariant;
2534         }
2535
2536         DB((dbg, LEVEL_2, " *** Unrolling %d times ***\n", unroll_nr));
2537
2538         if (unroll_nr > 1) {
2539                 loop_entries = NEW_ARR_F(entry_edge, 0);
2540
2541                 /* Get loop outs */
2542                 irg_walk_graph(current_ir_graph, get_loop_entries, NULL, NULL);
2543
2544                 if (loop_info.unroll_kind == constant) {
2545                         if ((int)get_tarval_long(loop_info.count_tar) == unroll_nr)
2546                                 loop_info.needs_backedge = 0;
2547                         else
2548                                 loop_info.needs_backedge = 1;
2549                 } else {
2550                         loop_info.needs_backedge = 1;
2551                 }
2552
2553                 /* Use phase to keep copy of nodes from the condition chain. */
2554                 ir_nodemap_init(&map, current_ir_graph);
2555                 obstack_init(&obst);
2556
2557                 /* Copies the loop */
2558                 copy_loop(loop_entries, unroll_nr - 1);
2559
2560                 /* Line up the floating copies. */
2561                 place_copies(unroll_nr - 1);
2562
2563                 /* Remove phis with 1 in
2564                  * If there were no nested phis, this would not be necessary.
2565                  * Avoiding the creation in the first place
2566                  * leads to complex special cases. */
2567                 irg_walk_graph(current_ir_graph, correct_phis, NULL, NULL);
2568
2569                 if (loop_info.unroll_kind == constant)
2570                         ++stats.constant_unroll;
2571                 else
2572                         ++stats.invariant_unroll;
2573
2574                 clear_irg_properties(current_ir_graph, IR_GRAPH_PROPERTY_CONSISTENT_DOMINANCE);
2575
2576                 DEL_ARR_F(loop_entries);
2577                 obstack_free(&obst, NULL);
2578                 ir_nodemap_destroy(&map);
2579         }
2580
2581 }
2582
2583 /* Analyzes the loop, and checks if size is within allowed range.
2584  * Decides if loop will be processed. */
2585 static void init_analyze(ir_graph *irg, ir_loop *loop)
2586 {
2587         cur_loop = loop;
2588
2589         loop_head       = NULL;
2590         loop_head_valid = true;
2591
2592         /* Reset loop info */
2593         memset(&loop_info, 0, sizeof(loop_info_t));
2594
2595         DB((dbg, LEVEL_1, "    >>>> current loop %ld <<<\n",
2596             get_loop_loop_nr(loop)));
2597
2598         /* Collect loop informations: head, node counts. */
2599         irg_walk_graph(irg, get_loop_info, NULL, NULL);
2600
2601         /* RETURN if there is no valid head */
2602         if (!loop_head || !loop_head_valid) {
2603                 DB((dbg, LEVEL_1,   "No valid loop head. Nothing done.\n"));
2604                 return;
2605         } else {
2606                 DB((dbg, LEVEL_1,   "Loophead: %N\n", loop_head));
2607         }
2608
2609         if (loop_info.branches > opt_params.max_branches) {
2610                 DB((dbg, LEVEL_1, "Branches %d > allowed branches %d\n",
2611                         loop_info.branches, opt_params.max_branches));
2612                 ++stats.calls_limit;
2613                 return;
2614         }
2615
2616         switch (loop_op) {
2617                 case loop_op_inversion:
2618                         loop_inversion(irg);
2619                         break;
2620
2621                 case loop_op_unrolling:
2622                         unroll_loop();
2623                         break;
2624
2625                 default:
2626                         panic("Loop optimization not implemented.");
2627         }
2628         DB((dbg, LEVEL_1, "       <<<< end of loop with node %ld >>>>\n",
2629             get_loop_loop_nr(loop)));
2630 }
2631
2632 /* Find innermost loops and add them to loops. */
2633 static void find_innermost_loop(ir_loop *loop)
2634 {
2635         bool   had_sons   = false;
2636         size_t n_elements = get_loop_n_elements(loop);
2637         size_t e;
2638
2639         for (e = 0; e < n_elements; ++e) {
2640                 loop_element element = get_loop_element(loop, e);
2641                 if (*element.kind == k_ir_loop) {
2642                         find_innermost_loop(element.son);
2643                         had_sons = true;
2644                 }
2645         }
2646
2647         if (!had_sons) {
2648                 ARR_APP1(ir_loop*, loops, loop);
2649         }
2650 }
2651
2652 static void set_loop_params(void)
2653 {
2654     opt_params.max_loop_size = 100;
2655     opt_params.depth_adaption = -50;
2656     opt_params.count_phi = true;
2657     opt_params.count_proj = false;
2658     opt_params.allowed_calls = 0;
2659
2660     opt_params.max_cc_size = 5;
2661
2662
2663     opt_params.allow_const_unrolling = true;
2664     opt_params.allow_invar_unrolling = false;
2665
2666     opt_params.invar_unrolling_min_size = 20;
2667     opt_params.max_unrolled_loop_size = 400;
2668     opt_params.max_branches = 9999;
2669 }
2670
2671 /* Assure preconditions are met and go through all loops. */
2672 void loop_optimization(ir_graph *irg)
2673 {
2674         ir_loop *loop;
2675         size_t   i;
2676         size_t   n_elements;
2677
2678         assure_irg_properties(irg,
2679                 IR_GRAPH_PROPERTY_CONSISTENT_OUT_EDGES
2680                 | IR_GRAPH_PROPERTY_CONSISTENT_OUTS
2681                 | IR_GRAPH_PROPERTY_CONSISTENT_LOOPINFO);
2682
2683         set_loop_params();
2684
2685         /* Reset stats for this procedure */
2686         reset_stats();
2687
2688         /* Preconditions */
2689         set_current_ir_graph(irg);
2690
2691         ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK | IR_RESOURCE_PHI_LIST);
2692         collect_phiprojs(irg);
2693
2694         loop = get_irg_loop(irg);
2695
2696         loops = NEW_ARR_F(ir_loop *, 0);
2697         /* List all inner loops */
2698         n_elements = get_loop_n_elements(loop);
2699         for (i = 0; i < n_elements; ++i) {
2700                 loop_element element = get_loop_element(loop, i);
2701                 if (*element.kind != k_ir_loop)
2702                         continue;
2703                 find_innermost_loop(element.son);
2704         }
2705
2706         /* Set all links to NULL */
2707         irg_walk_graph(irg, reset_link, NULL, NULL);
2708
2709         for (i = 0; i < ARR_LEN(loops); ++i) {
2710                 ir_loop *loop = loops[i];
2711
2712                 ++stats.loops;
2713
2714                 /* Analyze and handle loop */
2715                 init_analyze(irg, loop);
2716
2717                 /* Copied blocks do not have their phi list yet */
2718                 collect_phiprojs(irg);
2719
2720                 /* Set links to NULL
2721                  * TODO Still necessary? */
2722                 irg_walk_graph(irg, reset_link, NULL, NULL);
2723         }
2724
2725         print_stats();
2726
2727         DEL_ARR_F(loops);
2728         ir_free_resources(irg, IR_RESOURCE_IRN_LINK | IR_RESOURCE_PHI_LIST);
2729
2730         confirm_irg_properties(irg, IR_GRAPH_PROPERTIES_NONE);
2731 }
2732
2733 void do_loop_unrolling(ir_graph *irg)
2734 {
2735         loop_op = loop_op_unrolling;
2736         loop_optimization(irg);
2737 }
2738
2739 void do_loop_inversion(ir_graph *irg)
2740 {
2741         loop_op = loop_op_inversion;
2742         loop_optimization(irg);
2743 }
2744
2745 void do_loop_peeling(ir_graph *irg)
2746 {
2747         loop_op = loop_op_peeling;
2748         loop_optimization(irg);
2749 }
2750
2751 ir_graph_pass_t *loop_inversion_pass(const char *name)
2752 {
2753         return def_graph_pass(name ? name : "loop_inversion", do_loop_inversion);
2754 }
2755
2756 ir_graph_pass_t *loop_unroll_pass(const char *name)
2757 {
2758         return def_graph_pass(name ? name : "loop_unroll", do_loop_unrolling);
2759 }
2760
2761 ir_graph_pass_t *loop_peeling_pass(const char *name)
2762 {
2763         return def_graph_pass(name ? name : "loop_peeling", do_loop_peeling);
2764 }
2765
2766 void firm_init_loop_opt(void)
2767 {
2768         FIRM_DBG_REGISTER(dbg, "firm.opt.loop");
2769 }