rename ir_phase to ir_nodemap and simplify interface
[libfirm] / ir / opt / loop.c
1 /*
2  * Copyright (C) 1995-2011 University of Karlsruhe.  All right reserved.
3  *
4  * This file is part of libFirm.
5  *
6  * This file may be distributed and/or modified under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation and appearing in the file LICENSE.GPL included in the
9  * packaging of this file.
10  *
11  * Licensees holding valid libFirm Professional Edition licenses may use
12  * this file in accordance with the libFirm Commercial License.
13  * Agreement provided with the Software.
14  *
15  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17  * PURPOSE.
18  */
19
20 /**
21  * @file
22  * @author   Christian Helmer
23  * @brief    loop inversion and loop unrolling
24  *
25  * @version  $Id$
26  */
27 #include "config.h"
28
29 #include <stdbool.h>
30
31 #include "iroptimize.h"
32 #include "opt_init.h"
33 #include "irnode.h"
34 #include "debug.h"
35 #include "error.h"
36
37 #include "ircons.h"
38 #include "irgopt.h"
39 #include "irgmod.h"
40 #include "irgwalk.h"
41 #include "irouts.h"
42 #include "iredges.h"
43 #include "irtools.h"
44 #include "array_t.h"
45 #include "beutil.h"
46 #include "irpass.h"
47 #include "irdom.h"
48 #include "opt_manage.h"
49
50 #include <math.h>
51 #include "irbackedge_t.h"
52 #include "irnodemap.h"
53 #include "irloop_t.h"
54
55 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
56
57 /**
58  * Convenience macro for iterating over every phi node of the given block.
59  * Requires phi list per block.
60  */
61 #define for_each_phi(block, phi) \
62         for ((phi) = get_Block_phis( (block) ); (phi) ; (phi) = get_Phi_next((phi)))
63
64 #define for_each_phi_safe(head, phi, next) \
65         for ((phi) = (head), (next) = (head) ? get_Phi_next((head)) : NULL; \
66                         (phi) ; (phi) = (next), (next) = (next) ? get_Phi_next((next)) : NULL)
67
68 /* Currently processed loop. */
69 static ir_loop *cur_loop;
70
71 /* Flag for kind of unrolling. */
72 typedef enum {
73         constant,
74         invariant
75 } unrolling_kind_flag;
76
77 /* Condition for performing visiting a node during copy_walk. */
78 typedef bool walker_condition(const ir_node *);
79
80 /* Node and position of a predecessor. */
81 typedef struct entry_edge {
82         ir_node *node;
83         int pos;
84         ir_node *pred;
85 } entry_edge;
86
87 /* Node info for unrolling. */
88 typedef struct unrolling_node_info {
89         ir_node **copies;
90 } unrolling_node_info;
91
92 /* Outs of the nodes head. */
93 static entry_edge *cur_head_outs;
94
95 /* Information about the loop head */
96 static ir_node *loop_head       = NULL;
97 static bool     loop_head_valid = true;
98
99 /* List of all inner loops, that are processed. */
100 static ir_loop **loops;
101
102 /* Stats */
103 typedef struct loop_stats_t {
104         unsigned loops;
105         unsigned inverted;
106         unsigned too_large;
107         unsigned too_large_adapted;
108         unsigned cc_limit_reached;
109         unsigned calls_limit;
110
111         unsigned u_simple_counting_loop;
112         unsigned constant_unroll;
113         unsigned invariant_unroll;
114
115         unsigned unhandled;
116 } loop_stats_t;
117
118 static loop_stats_t stats;
119
120 /* Set stats to sero */
121 static void reset_stats(void)
122 {
123         memset(&stats, 0, sizeof(loop_stats_t));
124 }
125
126 /* Print stats */
127 static void print_stats(void)
128 {
129         DB((dbg, LEVEL_2, "---------------------------------------\n"));
130         DB((dbg, LEVEL_2, "loops             :   %d\n",stats.loops));
131         DB((dbg, LEVEL_2, "inverted          :   %d\n",stats.inverted));
132         DB((dbg, LEVEL_2, "too_large         :   %d\n",stats.too_large));
133         DB((dbg, LEVEL_2, "too_large_adapted :   %d\n",stats.too_large_adapted));
134         DB((dbg, LEVEL_2, "cc_limit_reached  :   %d\n",stats.cc_limit_reached));
135         DB((dbg, LEVEL_2, "calls_limit       :   %d\n",stats.calls_limit));
136         DB((dbg, LEVEL_2, "u_simple_counting :   %d\n",stats.u_simple_counting_loop));
137         DB((dbg, LEVEL_2, "constant_unroll   :   %d\n",stats.constant_unroll));
138         DB((dbg, LEVEL_2, "invariant_unroll  :   %d\n",stats.invariant_unroll));
139         DB((dbg, LEVEL_2, "=======================================\n"));
140 }
141
142 /* Commandline parameters */
143 typedef struct loop_opt_params_t {
144 unsigned max_loop_size;     /* Maximum number of nodes  [nodes]*/
145 int      depth_adaption;    /* Loop nest depth adaption [percent] */
146 unsigned allowed_calls;     /* Number of calls allowed [number] */
147 bool     count_phi;         /* Count phi nodes */
148 bool     count_proj;        /* Count projections */
149
150 unsigned max_cc_size;       /* Maximum condition chain size [nodes] */
151 unsigned max_branches;
152
153 unsigned max_unrolled_loop_size;    /* [nodes] */
154 bool     allow_const_unrolling;
155 bool     allow_invar_unrolling;
156 unsigned invar_unrolling_min_size;  /* [nodes] */
157
158 } loop_opt_params_t;
159
160 static loop_opt_params_t opt_params;
161
162 /* Loop analysis informations */
163 typedef struct loop_info_t {
164         unsigned nodes;        /* node count */
165         unsigned ld_st;        /* load and store nodes */
166         unsigned branches;     /* number of conditions */
167         unsigned calls;        /* number of calls */
168         unsigned cf_outs;      /* number of cf edges which leave the loop */
169         entry_edge cf_out;     /* single loop leaving cf edge */
170         int be_src_pos;        /* position of the single own backedge in the head */
171
172         /* for inversion */
173         unsigned cc_size;      /* nodes in the condition chain */
174
175         /* for unrolling */
176         unsigned max_unroll;   /* Number of unrolls satisfying max_loop_size */
177         unsigned exit_cond;    /* 1 if condition==true exits the loop.  */
178         unsigned latest_value:1;    /* 1 if condition is checked against latest counter value */
179         unsigned needs_backedge:1;  /* 0 if loop is completely unrolled */
180         unsigned decreasing:1;      /* Step operation is_Sub, or step is<0 */
181
182         /* IV informations of a simple loop */
183         ir_node *start_val;
184         ir_node *step;
185         ir_node *end_val;
186         ir_node *iteration_phi;
187         ir_node *add;
188
189         ir_tarval *count_tar;               /* Number of loop iterations */
190
191         ir_node *duff_cond;                 /* Duff mod */
192         unrolling_kind_flag unroll_kind;    /* constant or invariant unrolling */
193 } loop_info_t;
194
195 /* Information about the current loop */
196 static loop_info_t loop_info;
197
198 /* Outs of the condition chain (loop inversion). */
199 static ir_node **cc_blocks;
200 /* df/cf edges with def in the condition chain */
201 static entry_edge *cond_chain_entries;
202 /* Array of df loops found in the condition chain. */
203 static entry_edge *head_df_loop;
204 /* Number of blocks in cc */
205 static unsigned inversion_blocks_in_cc;
206
207
208 /* Cf/df edges leaving the loop.
209  * Called entries here, as they are used to enter the loop with walkers. */
210 static entry_edge *loop_entries;
211 /* Number of unrolls to perform */
212 static int unroll_nr;
213 /* Phase is used to keep copies of nodes. */
214 static ir_nodemap     map;
215 static struct obstack obst;
216
217 /* Loop operations.  */
218 typedef enum loop_op_t {
219         loop_op_inversion,
220         loop_op_unrolling,
221         loop_op_peeling
222 } loop_op_t;
223
224 /* Saves which loop operation to do until after basic tests. */
225 static loop_op_t loop_op;
226
227 /* Returns the maximum nodes for the given nest depth */
228 static unsigned get_max_nodes_adapted(unsigned depth)
229 {
230         double perc = 100.0 + (double)opt_params.depth_adaption;
231         double factor = pow(perc / 100.0, depth);
232
233         return (int)((double)opt_params.max_loop_size * factor);
234 }
235
236 /* Reset nodes link. For use with a walker. */
237 static void reset_link(ir_node *node, void *env)
238 {
239         (void)env;
240         set_irn_link(node, NULL);
241 }
242
243 /* Returns 0 if the node or block is not in cur_loop. */
244 static bool is_in_loop(const ir_node *node)
245 {
246         return get_irn_loop(get_block_const(node)) == cur_loop;
247 }
248
249 /* Returns 0 if the given edge is not a backedge
250  * with its pred in the cur_loop. */
251 static bool is_own_backedge(const ir_node *n, int pos)
252 {
253         return is_backedge(n, pos) && is_in_loop(get_irn_n(n, pos));
254 }
255
256 /* Finds loop head and some loop_info as calls or else if necessary. */
257 static void get_loop_info(ir_node *node, void *env)
258 {
259         bool node_in_loop = is_in_loop(node);
260         int i, arity;
261         (void)env;
262
263         /* collect some loop information */
264         if (node_in_loop) {
265                 if (is_Phi(node) && opt_params.count_phi)
266                         ++loop_info.nodes;
267                 else if (is_Proj(node) && opt_params.count_proj)
268                         ++loop_info.nodes;
269                 else if (!is_Confirm(node) && !is_Const(node) && !is_SymConst(node))
270                         ++loop_info.nodes;
271
272                 if (is_Load(node) || is_Store(node))
273                         ++loop_info.ld_st;
274
275                 if (is_Call(node))
276                         ++loop_info.calls;
277         }
278
279         arity = get_irn_arity(node);
280         for (i = 0; i < arity; i++) {
281                 ir_node *pred         = get_irn_n(node, i);
282                 bool     pred_in_loop = is_in_loop(pred);
283
284                 if (is_Block(node) && !node_in_loop && pred_in_loop) {
285                         entry_edge entry;
286                         entry.node = node;
287                         entry.pos = i;
288                         entry.pred = pred;
289                         /* Count cf outs */
290                         ++loop_info.cf_outs;
291                         loop_info.cf_out = entry;
292                 }
293
294                 /* Find the loops head/the blocks with cfpred outside of the loop */
295                 if (is_Block(node)) {
296                         const ir_edge_t *edge;
297                         unsigned outs_n = 0;
298
299                         /* Count innerloop branches */
300                         foreach_out_edge_kind(node, edge, EDGE_KIND_BLOCK) {
301                                 ir_node *succ = get_edge_src_irn(edge);
302                                 if (is_Block(succ) && is_in_loop(succ))
303                                         ++outs_n;
304                         }
305                         if (outs_n > 1)
306                                 ++loop_info.branches;
307
308                         if (node_in_loop && !pred_in_loop && loop_head_valid) {
309                                 ir_node *cfgpred = get_Block_cfgpred(node, i);
310
311                                 if (!is_in_loop(cfgpred)) {
312                                         DB((dbg, LEVEL_5, "potential head %+F because inloop and pred %+F not inloop\n",
313                                                                 node, pred));
314                                         /* another head? We do not touch this. */
315                                         if (loop_head && loop_head != node) {
316                                                 loop_head_valid = false;
317                                         } else {
318                                                 loop_head = node;
319                                         }
320                                 }
321                         }
322                 }
323         }
324 }
325
326 /* Finds all edges with users outside of the loop
327  * and definition inside the loop. */
328 static void get_loop_entries(ir_node *node, void *env)
329 {
330         unsigned node_in_loop, pred_in_loop;
331         int i, arity;
332         (void) env;
333
334         arity = get_irn_arity(node);
335         for (i = 0; i < arity; ++i) {
336                 ir_node *pred = get_irn_n(node, i);
337
338                 pred_in_loop = is_in_loop(pred);
339                 node_in_loop = is_in_loop(node);
340
341                 if (pred_in_loop && !node_in_loop) {
342                         entry_edge entry;
343                         entry.node = node;
344                         entry.pos = i;
345                         entry.pred = pred;
346                         ARR_APP1(entry_edge, loop_entries, entry);
347                 }
348         }
349 }
350
351 /* ssa */
352 static ir_node *ssa_second_def;
353 static ir_node *ssa_second_def_block;
354
355 /**
356  * Walks the graph bottom up, searching for definitions and creates phis.
357  */
358 static ir_node *search_def_and_create_phis(ir_node *block, ir_mode *mode, int first)
359 {
360         int i;
361         int n_cfgpreds;
362         ir_graph *irg = get_irn_irg(block);
363         ir_node *phi;
364         ir_node **in;
365
366         DB((dbg, LEVEL_5, "ssa search_def_and_create_phis: block %N\n", block));
367
368         /* Prevents creation of phi that would be bad anyway.
369          * Dead and bad blocks. */
370         if (get_irn_arity(block) < 1 || is_Bad(block)) {
371                 DB((dbg, LEVEL_5, "ssa bad %N\n", block));
372                 return new_r_Bad(irg, mode);
373         }
374
375         if (block == ssa_second_def_block && !first) {
376                 DB((dbg, LEVEL_5, "ssa found second definition: use second def %N\n", ssa_second_def));
377                 return ssa_second_def;
378         }
379
380         /* already processed this block? */
381         if (irn_visited(block)) {
382                 ir_node *value = (ir_node *) get_irn_link(block);
383                 DB((dbg, LEVEL_5, "ssa already visited: use linked %N\n", value));
384                 return value;
385         }
386
387         assert(block != get_irg_start_block(irg));
388
389         /* a Block with only 1 predecessor needs no Phi */
390         n_cfgpreds = get_Block_n_cfgpreds(block);
391         if (n_cfgpreds == 1) {
392                 ir_node *pred_block = get_Block_cfgpred_block(block, 0);
393                 ir_node *value;
394
395                 DB((dbg, LEVEL_5, "ssa 1 pred: walk pred %N\n", pred_block));
396
397                 value = search_def_and_create_phis(pred_block, mode, 0);
398                 set_irn_link(block, value);
399                 mark_irn_visited(block);
400
401                 return value;
402         }
403
404         /* create a new Phi */
405         NEW_ARR_A(ir_node*, in, n_cfgpreds);
406         for (i = 0; i < n_cfgpreds; ++i)
407                 in[i] = new_r_Dummy(irg, mode);
408
409         phi = new_r_Phi(block, n_cfgpreds, in, mode);
410         /* Important: always keep block phi list up to date. */
411         add_Block_phi(block, phi);
412         DB((dbg, LEVEL_5, "ssa phi creation: link new phi %N to block %N\n", phi, block));
413         set_irn_link(block, phi);
414         mark_irn_visited(block);
415
416         /* set Phi predecessors */
417         for (i = 0; i < n_cfgpreds; ++i) {
418                 ir_node *pred_val;
419                 ir_node *pred_block = get_Block_cfgpred_block(block, i);
420                 assert(pred_block != NULL);
421                 pred_val = search_def_and_create_phis(pred_block, mode, 0);
422
423                 assert(pred_val != NULL);
424
425                 DB((dbg, LEVEL_5, "ssa phi pred:phi %N, pred %N\n", phi, pred_val));
426                 set_irn_n(phi, i, pred_val);
427         }
428
429         return phi;
430 }
431
432
433 /**
434  * Given a set of values this function constructs SSA-form for the users of the
435  * first value (the users are determined through the out-edges of the value).
436  * Works without using the dominance tree.
437  */
438 static void construct_ssa(ir_node *orig_block, ir_node *orig_val,
439                 ir_node *second_block, ir_node *second_val)
440 {
441         ir_graph *irg;
442         ir_mode *mode;
443         const ir_edge_t *edge;
444         const ir_edge_t *next;
445
446         assert(orig_block && orig_val && second_block && second_val &&
447                         "no parameter of construct_ssa may be NULL");
448
449         if (orig_val == second_val)
450                 return;
451
452         irg = get_irn_irg(orig_val);
453
454         ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED);
455         inc_irg_visited(irg);
456
457         mode = get_irn_mode(orig_val);
458         set_irn_link(orig_block, orig_val);
459         mark_irn_visited(orig_block);
460
461         ssa_second_def_block = second_block;
462         ssa_second_def       = second_val;
463
464         /* Only fix the users of the first, i.e. the original node */
465         foreach_out_edge_safe(orig_val, edge, next) {
466                 ir_node *user = get_edge_src_irn(edge);
467                 int j = get_edge_src_pos(edge);
468                 ir_node *user_block = get_nodes_block(user);
469                 ir_node *newval;
470
471                 /* ignore keeps */
472                 if (is_End(user))
473                         continue;
474
475                 DB((dbg, LEVEL_5, "original user %N\n", user));
476
477                 if (is_Phi(user)) {
478                         ir_node *pred_block = get_Block_cfgpred_block(user_block, j);
479                         newval = search_def_and_create_phis(pred_block, mode, 1);
480                 } else {
481                         newval = search_def_and_create_phis(user_block, mode, 1);
482                 }
483                 if (newval != user && !is_Bad(newval))
484                         set_irn_n(user, j, newval);
485         }
486
487         ir_free_resources(irg, IR_RESOURCE_IRN_VISITED);
488 }
489
490
491 /***** Unrolling Helper Functions *****/
492
493 /* Assign the copy with index nr to node n */
494 static void set_unroll_copy(ir_node *n, int nr, ir_node *cp)
495 {
496         unrolling_node_info *info;
497         assert(nr != 0 && "0 reserved");
498
499         info = (unrolling_node_info*)ir_nodemap_get(&map, n);
500         if (! info) {
501                 ir_node **arr = NEW_ARR_D(ir_node*, &obst, unroll_nr);
502                 memset(arr, 0, unroll_nr * sizeof(ir_node*));
503
504                 info = OALLOCZ(&obst, unrolling_node_info);
505                 info->copies = arr;
506                 ir_nodemap_insert(&map, n, info);
507         }
508         /* Original node */
509         info->copies[0] = n;
510
511         info->copies[nr] = cp;
512 }
513
514 /* Returns a nodes copy if it exists, else NULL. */
515 static ir_node *get_unroll_copy(ir_node *n, int nr)
516 {
517         ir_node             *cp;
518         unrolling_node_info *info = (unrolling_node_info *)ir_nodemap_get(&map, n);
519         if (! info)
520                 return NULL;
521
522         cp = info->copies[nr];
523         return cp;
524 }
525
526
527 /***** Inversion Helper Functions *****/
528
529 /* Sets copy cp of node n. */
530 static void set_inversion_copy(ir_node *n, ir_node *cp)
531 {
532         ir_nodemap_insert(&map, n, cp);
533 }
534
535 /* Getter of copy of n for inversion */
536 static ir_node *get_inversion_copy(ir_node *n)
537 {
538         ir_node *cp = (ir_node *)ir_nodemap_get(&map, n);
539         return cp;
540 }
541
542 /* Resets block mark for given node. For use with walker */
543 static void reset_block_mark(ir_node *node, void * env)
544 {
545         (void) env;
546
547         if (is_Block(node))
548                 set_Block_mark(node, 0);
549 }
550
551 /* Returns mark of node, or its block if node is not a block.
552  * Used in this context to determine if node is in the condition chain. */
553 static bool is_nodes_block_marked(const ir_node* node)
554 {
555         return get_Block_mark(get_block_const(node));
556 }
557
558 /* Extends a nodes ins by node new.
559  * NOTE: This is slow if a node n needs to be extended more than once. */
560 static void extend_irn(ir_node *n, ir_node *newnode, bool new_is_backedge)
561 {
562         int i;
563         int arity = get_irn_arity(n);
564         int new_arity = arity + 1;
565         ir_node **ins = XMALLOCN(ir_node*, new_arity);
566         bool     *bes = XMALLOCN(bool, new_arity);
567
568         /* save bes */
569         /* Bes are important!
570          * Another way would be recreating the looptree,
571          * but after that we cannot distinguish already processed loops
572          * from not yet processed ones. */
573         if (is_Block(n)) {
574                 for(i = 0; i < arity; ++i) {
575                         bes[i] = is_backedge(n, i);
576                 }
577                 bes[i] = new_is_backedge;
578         }
579
580         for(i = 0; i < arity; ++i) {
581                 ins[i] = get_irn_n(n, i);
582         }
583         ins[i] = newnode;
584
585         set_irn_in(n, new_arity, ins);
586
587         /* restore bes  */
588         if (is_Block(n)) {
589                 for(i = 0; i < new_arity; ++i) {
590                         if (bes[i])
591                                 set_backedge(n, i);
592                 }
593         }
594 }
595
596 /* Extends a block by a copy of its pred at pos,
597  * fixing also the phis in the same way. */
598 static void extend_ins_by_copy(ir_node *block, int pos)
599 {
600         ir_node *new_in;
601         ir_node *phi;
602         ir_node *pred;
603         assert(is_Block(block));
604
605         /* Extend block by copy of definition at pos */
606         pred = get_irn_n(block, pos);
607         new_in = get_inversion_copy(pred);
608         DB((dbg, LEVEL_5, "Extend block %N by %N cp of %N\n", block, new_in, pred));
609         extend_irn(block, new_in, false);
610
611         /* Extend block phis by copy of definition at pos */
612         for_each_phi(block, phi) {
613                 ir_node *pred, *cp;
614
615                 pred = get_irn_n(phi, pos);
616                 cp = get_inversion_copy(pred);
617                 /* If the phis in is not in the condition chain (eg. a constant),
618                  * there is no copy. */
619                 if (cp == NULL)
620                         new_in = pred;
621                 else
622                         new_in = cp;
623
624                 DB((dbg, LEVEL_5, "Extend phi %N by %N cp of %N\n", phi, new_in, pred));
625                 extend_irn(phi, new_in, false);
626         }
627 }
628
629 /* Returns the number of blocks backedges. With or without alien bes. */
630 static int get_backedge_n(ir_node *block, bool with_alien)
631 {
632         int i;
633         int be_n = 0;
634         int arity = get_irn_arity(block);
635
636         assert(is_Block(block));
637
638         for (i = 0; i < arity; ++i) {
639                 ir_node *pred = get_irn_n(block, i);
640                 if (is_backedge(block, i) && (with_alien || is_in_loop(pred)))
641                         ++be_n;
642         }
643         return be_n;
644 }
645
646 /* Returns a raw copy of the given node.
647  * Attributes are kept/set according to the needs of loop inversion. */
648 static ir_node *copy_node(ir_node *node)
649 {
650         int i, arity;
651         ir_node *cp;
652
653         cp = exact_copy(node);
654         arity = get_irn_arity(node);
655
656         /* Keep backedge info */
657         for (i = 0; i < arity; ++i) {
658                 if (is_backedge(node, i))
659                         set_backedge(cp, i);
660         }
661
662         if (is_Block(cp)) {
663                 set_Block_mark(cp, 0);
664         }
665
666         return cp;
667 }
668
669
670 /**
671  * This walker copies all walked nodes.
672  * If the walk_condition is true for a node, it is copied.
673  * All nodes node_info->copy have to be NULL prior to every walk.
674  * Order of ins is important for later usage.
675  */
676 static void copy_walk(ir_node *node, walker_condition *walk_condition,
677                       ir_loop *set_loop)
678 {
679         int i;
680         int arity;
681         ir_node *cp;
682         ir_node **cpin;
683         ir_graph *irg = current_ir_graph;
684
685         /**
686          * break condition and cycle resolver, creating temporary node copies
687          */
688         if (get_irn_visited(node) >= get_irg_visited(irg)) {
689                 /* Here we rely on nodestate's copy being initialized with NULL */
690                 DB((dbg, LEVEL_5, "copy_walk: We have already visited %N\n", node));
691                 if (get_inversion_copy(node) == NULL) {
692                         cp = copy_node(node);
693                         set_inversion_copy(node, cp);
694
695                         DB((dbg, LEVEL_5, "The TEMP copy of %N is created %N\n", node, cp));
696                 }
697                 return;
698         }
699
700         /* Walk */
701         mark_irn_visited(node);
702
703         if (!is_Block(node)) {
704                 ir_node *pred = get_nodes_block(node);
705                 if (walk_condition(pred))
706                         DB((dbg, LEVEL_5, "walk block %N\n", pred));
707                 copy_walk(pred, walk_condition, set_loop);
708         }
709
710         arity = get_irn_arity(node);
711
712         NEW_ARR_A(ir_node *, cpin, arity);
713
714         for (i = 0; i < arity; ++i) {
715                 ir_node *pred = get_irn_n(node, i);
716
717                 if (walk_condition(pred)) {
718                         DB((dbg, LEVEL_5, "walk node %N\n", pred));
719                         copy_walk(pred, walk_condition, set_loop);
720                         cpin[i] = get_inversion_copy(pred);
721                         DB((dbg, LEVEL_5, "copy of %N gets new in %N which is copy of %N\n",
722                                                 node, get_inversion_copy(pred), pred));
723                 } else {
724                         cpin[i] = pred;
725                 }
726         }
727
728         /* copy node / finalize temp node */
729         if (get_inversion_copy(node) == NULL) {
730                 /* No temporary copy existent */
731                 cp = copy_node(node);
732                 set_inversion_copy(node, cp);
733                 DB((dbg, LEVEL_5, "The FINAL copy of %N is CREATED %N\n", node, cp));
734         } else {
735                 /* temporary copy is existent but without correct ins */
736                 cp = get_inversion_copy(node);
737                 DB((dbg, LEVEL_5, "The FINAL copy of %N is EXISTENT %N\n", node, cp));
738         }
739
740         if (!is_Block(node)) {
741                 ir_node *cpblock = get_inversion_copy(get_nodes_block(node));
742
743                 set_nodes_block(cp, cpblock );
744                 if (is_Phi(cp))
745                         add_Block_phi(cpblock, cp);
746         }
747
748         /* Keeps phi list of temporary node. */
749         set_irn_in(cp, ARR_LEN(cpin), cpin);
750 }
751
752 /**
753  * This walker copies all walked nodes.
754  * If the walk_condition is true for a node, it is copied.
755  * All nodes node_info->copy have to be NULL prior to every walk.
756  * Order of ins is important for later usage.
757  * Takes copy_index, to phase-link copy at specific index.
758  */
759 static void copy_walk_n(ir_node *node, walker_condition *walk_condition,
760                         int copy_index)
761 {
762         int i;
763         int arity;
764         ir_node *cp;
765         ir_node **cpin;
766
767         /**
768          * break condition and cycle resolver, creating temporary node copies
769          */
770         if (irn_visited(node)) {
771                 /* Here we rely on nodestate's copy being initialized with NULL */
772                 DB((dbg, LEVEL_5, "copy_walk: We have already visited %N\n", node));
773                 if (get_unroll_copy(node, copy_index) == NULL) {
774                         ir_node *u;
775                         u = copy_node(node);
776                         set_unroll_copy(node, copy_index, u);
777                         DB((dbg, LEVEL_5, "The TEMP unknown of %N is created %N\n", node, u));
778                 }
779                 return;
780         }
781
782         /* Walk */
783         mark_irn_visited(node);
784
785         if (!is_Block(node)) {
786                 ir_node *block = get_nodes_block(node);
787                 if (walk_condition(block))
788                         DB((dbg, LEVEL_5, "walk block %N\n", block));
789                 copy_walk_n(block, walk_condition, copy_index);
790         }
791
792         arity = get_irn_arity(node);
793         NEW_ARR_A(ir_node *, cpin, arity);
794
795         for (i = 0; i < arity; ++i) {
796                 ir_node *pred = get_irn_n(node, i);
797
798                 if (walk_condition(pred)) {
799                         DB((dbg, LEVEL_5, "walk node %N\n", pred));
800                         copy_walk_n(pred, walk_condition, copy_index);
801                         cpin[i] = get_unroll_copy(pred, copy_index);
802                 } else {
803                         cpin[i] = pred;
804                 }
805         }
806
807         /* copy node / finalize temp node */
808         cp = get_unroll_copy(node, copy_index);
809         if (cp == NULL || is_Unknown(cp)) {
810                 cp = copy_node(node);
811                 set_unroll_copy(node, copy_index, cp);
812                 DB((dbg, LEVEL_5, "The FINAL copy of %N is CREATED %N\n", node, cp));
813         } else {
814                 /* temporary copy is existent but without correct ins */
815                 cp = get_unroll_copy(node, copy_index);
816                 DB((dbg, LEVEL_5, "The FINAL copy of %N is EXISTENT %N\n", node, cp));
817         }
818
819         if (!is_Block(node)) {
820                 ir_node *cpblock = get_unroll_copy(get_nodes_block(node), copy_index);
821
822                 set_nodes_block(cp, cpblock );
823                 if (is_Phi(cp))
824                         add_Block_phi(cpblock, cp);
825         }
826
827         /* Keeps phi list of temporary node. */
828         set_irn_in(cp, ARR_LEN(cpin), cpin);
829 }
830
831 /* Removes alle Blocks with non marked predecessors from the condition chain. */
832 static void unmark_not_allowed_cc_blocks(void)
833 {
834         size_t blocks = ARR_LEN(cc_blocks);
835         size_t i;
836
837         for(i = 0; i < blocks; ++i) {
838                 ir_node *block = cc_blocks[i];
839                 int a;
840                 int arity = get_irn_arity(block);
841
842                 /* Head is an exception. */
843                 if (block == loop_head)
844                         continue;
845
846                 for(a = 0; a < arity; ++a) {
847                         if (! is_nodes_block_marked(get_irn_n(block, a))) {
848                                 set_Block_mark(block, 0);
849                                 --inversion_blocks_in_cc;
850                                 DB((dbg, LEVEL_5, "Removed %N from cc (blocks in cc %d)\n",
851                                                 block, inversion_blocks_in_cc));
852
853                                 break;
854                         }
855                 }
856         }
857 }
858
859 /* Unmarks all cc blocks using cc_blocks except head.
860  * TODO: invert head for unrolling? */
861 static void unmark_cc_blocks(void)
862 {
863         size_t blocks = ARR_LEN(cc_blocks);
864         size_t i;
865
866         for(i = 0; i < blocks; ++i) {
867                 ir_node *block = cc_blocks[i];
868
869                 /* TODO Head is an exception. */
870                 /*if (block != loop_head)*/
871                 set_Block_mark(block, 0);
872         }
873         /*inversion_blocks_in_cc = 1;*/
874         inversion_blocks_in_cc = 0;
875
876         /* invalidate */
877         loop_info.cc_size = 0;
878 }
879
880 /**
881  * Populates head_entries with (node, pred_pos) tuple
882  * whereas the node's pred at pred_pos is in the cc but not the node itself.
883  * Also finds df loops inside the cc.
884  * Head and condition chain blocks have been marked previously.
885  */
886 static void get_head_outs(ir_node *node, void *env)
887 {
888         int i;
889         int arity = get_irn_arity(node);
890         (void) env;
891
892         for (i = 0; i < arity; ++i) {
893                 if (!is_nodes_block_marked(node) && is_nodes_block_marked(get_irn_n(node, i))) {
894                         entry_edge entry;
895                         entry.node = node;
896                         entry.pos = i;
897                         /* Saving also predecessor seems redundant, but becomes
898                          * necessary when changing position of it, before
899                          * dereferencing it.*/
900                         entry.pred = get_irn_n(node, i);
901                         ARR_APP1(entry_edge, cur_head_outs, entry);
902                 }
903         }
904
905         arity = get_irn_arity(loop_head);
906
907         /* Find df loops inside the cc */
908         if (is_Phi(node) && get_nodes_block(node) == loop_head) {
909                 for (i = 0; i < arity; ++i) {
910                         if (is_own_backedge(loop_head, i)) {
911                                 if (is_nodes_block_marked(get_irn_n(node, i))) {
912                                         entry_edge entry;
913                                         entry.node = node;
914                                         entry.pos = i;
915                                         entry.pred = get_irn_n(node, i);
916                                         ARR_APP1(entry_edge, head_df_loop, entry);
917                                         DB((dbg, LEVEL_5, "Found incc assignment node %N @%d is pred %N, graph %N %N\n",
918                                                         node, i, entry.pred, current_ir_graph, get_irg_start_block(current_ir_graph)));
919                                 }
920                         }
921                 }
922         }
923 }
924
925 /**
926  * Find condition chains, and add them to be inverted.
927  * A block belongs to the chain if a condition branches out of the loop.
928  * (Some blocks need to be removed once again.)
929  * Returns 1 if the given block belongs to the condition chain.
930  */
931 static void find_condition_chain(ir_node *block)
932 {
933         const    ir_edge_t *edge;
934         bool     mark     = false;
935         bool     has_be   = false;
936         bool     jmp_only = true;
937         unsigned nodes_n  = 0;
938
939         mark_irn_visited(block);
940
941         DB((dbg, LEVEL_5, "condition_chains for block %N\n", block));
942
943         /* Get node count */
944         foreach_out_edge_kind(block, edge, EDGE_KIND_NORMAL) {
945                 ++nodes_n;
946         }
947
948         /* Check if node count would exceed maximum cc size.
949          * TODO
950          * This is not optimal, as we search depth-first and break here,
951          * continuing with another subtree. */
952         if (loop_info.cc_size + nodes_n > opt_params.max_cc_size) {
953                 set_Block_mark(block, 0);
954                 return;
955         }
956
957         /* Check if block only has a jmp instruction. */
958         foreach_out_edge(block, edge) {
959                 ir_node *src = get_edge_src_irn(edge);
960
961                 if (!is_Block(src) && !is_Jmp(src)) {
962                         jmp_only = false;
963                 }
964         }
965
966         /* Check cf outs if one is leaving the loop,
967          * or if this node has a backedge. */
968         foreach_block_succ(block, edge) {
969                 ir_node *src = get_edge_src_irn(edge);
970                 int pos = get_edge_src_pos(edge);
971
972                 if (!is_in_loop(src))
973                         mark = true;
974
975                 /* Inverting blocks with backedge outs leads to a cf edge
976                  * from the inverted head, into the inverted head (skipping the body).
977                  * As the body becomes the new loop head,
978                  * this would introduce another loop in the existing loop.
979                  * This loop inversion cannot cope with this case. */
980                 if (is_backedge(src, pos)) {
981                         has_be = true;
982                         break;
983                 }
984         }
985
986         /* We need all predecessors to already belong to the condition chain.
987          * Example of wrong case:  * == in cc
988          *
989          *     Head*             ,--.
990          *    /|   \            B   |
991          *   / A*  B           /    |
992          *  / /\   /          ?     |
993          *   /   C*      =>      D  |
994          *      /  D           Head |
995          *     /               A  \_|
996          *                      C
997          */
998         /* Collect blocks containing only a Jmp.
999          * Do not collect blocks with backedge outs. */
1000         if ((jmp_only || mark) && !has_be) {
1001                 set_Block_mark(block, 1);
1002                 ++inversion_blocks_in_cc;
1003                 loop_info.cc_size += nodes_n;
1004                 DB((dbg, LEVEL_5, "block %N is part of condition chain\n", block));
1005                 ARR_APP1(ir_node *, cc_blocks, block);
1006         } else {
1007                 set_Block_mark(block, 0);
1008         }
1009
1010         foreach_block_succ(block, edge) {
1011                 ir_node *src = get_edge_src_irn( edge );
1012
1013                 if (is_in_loop(src) && ! irn_visited(src))
1014                         find_condition_chain(src);
1015         }
1016 }
1017
1018 /**
1019  * Rewires the copied condition chain. Removes backedges
1020  * as this condition chain is prior to the loop.
1021  * Copy of loop_head must have phi list and old (unfixed) backedge info of the loop head.
1022  * (loop_head is already fixed, we cannot rely on it.)
1023  */
1024 static void fix_copy_inversion(void)
1025 {
1026         ir_node *new_head;
1027         ir_node **ins;
1028         ir_node **phis;
1029         ir_node *phi, *next;
1030         ir_node *head_cp = get_inversion_copy(loop_head);
1031         ir_graph *irg    = get_irn_irg(head_cp);
1032         int arity        = get_irn_arity(head_cp);
1033         int backedges    = get_backedge_n(head_cp, false);
1034         int new_arity    = arity - backedges;
1035         int pos;
1036         int i;
1037
1038         NEW_ARR_A(ir_node *, ins, new_arity);
1039
1040         pos = 0;
1041         /* Remove block backedges */
1042         for(i = 0; i < arity; ++i) {
1043                 if (!is_backedge(head_cp, i))
1044                         ins[pos++] = get_irn_n(head_cp, i);
1045         }
1046
1047         new_head = new_r_Block(irg, new_arity, ins);
1048
1049         phis = NEW_ARR_F(ir_node *, 0);
1050
1051         for_each_phi_safe(get_Block_phis(head_cp), phi, next) {
1052                 ir_node *new_phi;
1053                 NEW_ARR_A(ir_node *, ins, new_arity);
1054                 pos = 0;
1055                 for(i = 0; i < arity; ++i) {
1056                         if (!is_backedge(head_cp, i))
1057                                 ins[pos++] = get_irn_n(phi, i);
1058                 }
1059                 new_phi = new_rd_Phi(get_irn_dbg_info(phi),
1060                                 new_head, new_arity, ins,
1061                                 get_irn_mode(phi));
1062                 ARR_APP1(ir_node *, phis, new_phi);
1063         }
1064
1065         pos = 0;
1066         for_each_phi_safe(get_Block_phis(head_cp), phi, next) {
1067                 exchange(phi, phis[pos++]);
1068         }
1069
1070         exchange(head_cp, new_head);
1071
1072         DEL_ARR_F(phis);
1073 }
1074
1075
1076 /* Puts the original condition chain at the end of the loop,
1077  * subsequently to the body.
1078  * Relies on block phi list and correct backedges.
1079  */
1080 static void fix_head_inversion(void)
1081 {
1082         ir_node *new_head;
1083         ir_node **ins;
1084         ir_node *phi, *next;
1085         ir_node **phis;
1086         ir_graph *irg = get_irn_irg(loop_head);
1087         int arity     = get_irn_arity(loop_head);
1088         int backedges = get_backedge_n(loop_head, false);
1089         int new_arity = backedges;
1090         int pos;
1091         int i;
1092
1093         NEW_ARR_A(ir_node *, ins, new_arity);
1094
1095         pos = 0;
1096         /* Keep only backedges */
1097         for(i = 0; i < arity; ++i) {
1098                 if (is_own_backedge(loop_head, i))
1099                         ins[pos++] = get_irn_n(loop_head, i);
1100         }
1101
1102         new_head = new_r_Block(irg, new_arity, ins);
1103
1104         phis = NEW_ARR_F(ir_node *, 0);
1105
1106         for_each_phi(loop_head, phi) {
1107                 ir_node *new_phi;
1108                 DB((dbg, LEVEL_5, "Fixing phi %N of loop head\n", phi));
1109
1110                 NEW_ARR_A(ir_node *, ins, new_arity);
1111
1112                 pos = 0;
1113                 for (i = 0; i < arity; ++i) {
1114                         ir_node *pred = get_irn_n(phi, i);
1115
1116                         if (is_own_backedge(loop_head, i)) {
1117                                 /* If assignment is in the condition chain,
1118                                  * we need to create a phi in the new loop head.
1119                                  * This can only happen for df, not cf. See find_condition_chains. */
1120                                 /*if (is_nodes_block_marked(pred)) {
1121                                         ins[pos++] = pred;
1122                                 } else {*/
1123                                 ins[pos++] = pred;
1124
1125                         }
1126                 }
1127
1128                 new_phi = new_rd_Phi(get_irn_dbg_info(phi),
1129                         new_head, new_arity, ins,
1130                         get_irn_mode(phi));
1131
1132                 ARR_APP1(ir_node *, phis, new_phi);
1133
1134                 DB((dbg, LEVEL_5, "fix inverted head should exch %N by %N (pos %d)\n", phi, new_phi, pos ));
1135         }
1136
1137         pos = 0;
1138         for_each_phi_safe(get_Block_phis(loop_head), phi, next) {
1139                 DB((dbg, LEVEL_5, "fix inverted exch phi %N by %N\n", phi, phis[pos]));
1140                 if (phis[pos] != phi)
1141                         exchange(phi, phis[pos++]);
1142         }
1143
1144         DEL_ARR_F(phis);
1145
1146         DB((dbg, LEVEL_5, "fix inverted head exch head block %N by %N\n", loop_head, new_head));
1147         exchange(loop_head, new_head);
1148 }
1149
1150 /* Does the loop inversion.  */
1151 static void inversion_walk(ir_graph *irg, entry_edge *head_entries)
1152 {
1153         size_t i;
1154
1155         /*
1156          * The order of rewiring bottom-up is crucial.
1157          * Any change of the order leads to lost information that would be needed later.
1158          */
1159
1160         ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED);
1161
1162         /* 1. clone condition chain */
1163         inc_irg_visited(irg);
1164
1165         for (i = 0; i < ARR_LEN(head_entries); ++i) {
1166                 entry_edge entry = head_entries[i];
1167                 ir_node *pred = get_irn_n(entry.node, entry.pos);
1168
1169                 DB((dbg, LEVEL_5, "\nInit walk block %N\n", pred));
1170
1171                 copy_walk(pred, is_nodes_block_marked, cur_loop);
1172         }
1173
1174         ir_free_resources(irg, IR_RESOURCE_IRN_VISITED);
1175
1176         /* 2. Extends the head control flow successors ins
1177          *    with the definitions of the copied head node. */
1178         for (i = 0; i < ARR_LEN(head_entries); ++i) {
1179                 entry_edge head_out = head_entries[i];
1180
1181                 if (is_Block(head_out.node))
1182                         extend_ins_by_copy(head_out.node, head_out.pos);
1183         }
1184
1185         /* 3. construct_ssa for users of definitions in the condition chain,
1186          *    as there is now a second definition. */
1187         for (i = 0; i < ARR_LEN(head_entries); ++i) {
1188                 entry_edge head_out = head_entries[i];
1189
1190                 /* Ignore keepalives */
1191                 if (is_End(head_out.node))
1192                         continue;
1193
1194                 /* Construct ssa for assignments in the condition chain. */
1195                 if (!is_Block(head_out.node)) {
1196                         ir_node *pred, *cppred, *block, *cpblock;
1197
1198                         pred = head_out.pred;
1199                         cppred = get_inversion_copy(pred);
1200                         block = get_nodes_block(pred);
1201                         cpblock = get_nodes_block(cppred);
1202                         construct_ssa(block, pred, cpblock, cppred);
1203                 }
1204         }
1205
1206         /*
1207          * If there is an assignment in the condition chain
1208          * with a user also in the condition chain,
1209          * the dominance frontier is in the new loop head.
1210          * The dataflow loop is completely in the condition chain.
1211          * Goal:
1212          *  To be wired: >|
1213          *
1214          *  | ,--.   |
1215          * Phi_cp |  | copied condition chain
1216          * >| |   |  |
1217          * >| ?__/   |
1218          * >| ,-.
1219          *  Phi* |   | new loop head with newly created phi.
1220          *   |   |
1221          *  Phi  |   | original, inverted condition chain
1222          *   |   |   |
1223          *   ?__/    |
1224          *
1225          */
1226         for (i = 0; i < ARR_LEN(head_df_loop); ++i) {
1227                 entry_edge head_out = head_df_loop[i];
1228
1229                 /* Construct ssa for assignments in the condition chain. */
1230                 ir_node *pred, *cppred, *block, *cpblock;
1231
1232                 pred = head_out.pred;
1233                 cppred = get_inversion_copy(pred);
1234                 assert(cppred && pred);
1235                 block = get_nodes_block(pred);
1236                 cpblock = get_nodes_block(cppred);
1237                 construct_ssa(block, pred, cpblock, cppred);
1238         }
1239
1240         /* 4. Remove the ins which are no backedges from the original condition chain
1241          *    as the cc is now subsequent to the body. */
1242         fix_head_inversion();
1243
1244         /* 5. Remove the backedges of the copied condition chain,
1245          *    because it is going to be the new 'head' in advance to the loop. */
1246         fix_copy_inversion();
1247
1248 }
1249
1250 /* Performs loop inversion of cur_loop if possible and reasonable. */
1251 static void loop_inversion(ir_graph *irg)
1252 {
1253         int      loop_depth;
1254         unsigned max_loop_nodes = opt_params.max_loop_size;
1255         unsigned max_loop_nodes_adapted;
1256         int      depth_adaption = opt_params.depth_adaption;
1257
1258         bool do_inversion = true;
1259
1260         /* Depth of 0 is the procedure and 1 a topmost loop. */
1261         loop_depth = get_loop_depth(cur_loop) - 1;
1262
1263         /* Calculating in per mil. */
1264         max_loop_nodes_adapted = get_max_nodes_adapted(loop_depth);
1265
1266         DB((dbg, LEVEL_1, "max_nodes: %d\nmax_nodes_adapted %d at depth of %d (adaption %d)\n",
1267                         max_loop_nodes, max_loop_nodes_adapted, loop_depth, depth_adaption));
1268
1269         if (loop_info.nodes == 0)
1270                 return;
1271
1272         if (loop_info.nodes > max_loop_nodes) {
1273                 /* Only for stats */
1274                 DB((dbg, LEVEL_1, "Nodes %d > allowed nodes %d\n",
1275                         loop_info.nodes, loop_depth, max_loop_nodes));
1276                 ++stats.too_large;
1277                 /* no RETURN */
1278                 /* Adaption might change it */
1279         }
1280
1281         /* Limit processing to loops smaller than given parameter. */
1282         if (loop_info.nodes > max_loop_nodes_adapted) {
1283                 DB((dbg, LEVEL_1, "Nodes %d > allowed nodes (depth %d adapted) %d\n",
1284                         loop_info.nodes, loop_depth, max_loop_nodes_adapted));
1285                 ++stats.too_large_adapted;
1286                 return;
1287         }
1288
1289         if (loop_info.calls > opt_params.allowed_calls) {
1290                 DB((dbg, LEVEL_1, "Calls %d > allowed calls %d\n",
1291                         loop_info.calls, opt_params.allowed_calls));
1292                 ++stats.calls_limit;
1293                 return;
1294         }
1295
1296         /*inversion_head_node_limit = INT_MAX;*/
1297         ir_reserve_resources(irg, IR_RESOURCE_BLOCK_MARK);
1298
1299         /* Reset block marks.
1300          * We use block marks to flag blocks of the original condition chain. */
1301         irg_walk_graph(irg, reset_block_mark, NULL, NULL);
1302
1303         /*loop_info.blocks = get_loop_n_blocks(cur_loop);*/
1304         cond_chain_entries = NEW_ARR_F(entry_edge, 0);
1305         head_df_loop = NEW_ARR_F(entry_edge, 0);
1306
1307         /*head_inversion_node_count = 0;*/
1308         inversion_blocks_in_cc = 0;
1309
1310         /* Use phase to keep copy of nodes from the condition chain. */
1311         ir_nodemap_init(&map, irg);
1312         obstack_init(&obst);
1313
1314         /* Search for condition chains and temporarily save the blocks in an array. */
1315         cc_blocks = NEW_ARR_F(ir_node *, 0);
1316         inc_irg_visited(irg);
1317         find_condition_chain(loop_head);
1318
1319         unmark_not_allowed_cc_blocks();
1320         DEL_ARR_F(cc_blocks);
1321
1322         /* Condition chain too large.
1323          * Loop should better be small enough to fit into the cache. */
1324         /* TODO Of course, we should take a small enough cc in the first place,
1325          * which is not that simple. (bin packing)  */
1326         if (loop_info.cc_size > opt_params.max_cc_size) {
1327                 ++stats.cc_limit_reached;
1328
1329                 do_inversion = false;
1330
1331                 /* Unmark cc blocks except the head.
1332                  * Invert head only for possible unrolling. */
1333                 unmark_cc_blocks();
1334         }
1335
1336         /* We also catch endless loops here,
1337          * because they do not have a condition chain. */
1338         if (inversion_blocks_in_cc < 1) {
1339                 do_inversion = false;
1340                 DB((dbg, LEVEL_3,
1341                         "Loop contains %d (less than 1) invertible blocks => No Inversion done.\n",
1342                         inversion_blocks_in_cc));
1343         }
1344
1345         if (do_inversion) {
1346                 cur_head_outs = NEW_ARR_F(entry_edge, 0);
1347
1348                 /* Get all edges pointing into the condition chain. */
1349                 irg_walk_graph(irg, get_head_outs, NULL, NULL);
1350
1351                 /* Do the inversion */
1352                 inversion_walk(irg, cur_head_outs);
1353
1354                 DEL_ARR_F(cur_head_outs);
1355
1356                 /* Duplicated blocks changed doms */
1357                 clear_irg_state(irg, IR_GRAPH_STATE_CONSISTENT_DOMINANCE
1358                                    | IR_GRAPH_STATE_CONSISTENT_LOOPINFO);
1359
1360                 ++stats.inverted;
1361         }
1362
1363         /* free */
1364         obstack_free(&obst, NULL);
1365         ir_nodemap_destroy(&map);
1366         DEL_ARR_F(cond_chain_entries);
1367         DEL_ARR_F(head_df_loop);
1368
1369         ir_free_resources(irg, IR_RESOURCE_BLOCK_MARK);
1370 }
1371
1372 /* Fix the original loop_heads ins for invariant unrolling case. */
1373 static void unrolling_fix_loop_head_inv(void)
1374 {
1375         ir_node *ins[2];
1376         ir_node *phi;
1377         ir_node *proj = new_Proj(loop_info.duff_cond, mode_X, 0);
1378         ir_node *head_pred = get_irn_n(loop_head, loop_info.be_src_pos);
1379         ir_node *loop_condition = get_unroll_copy(head_pred, unroll_nr - 1);
1380
1381         /* Original loop_heads ins are:
1382          * duff block and the own backedge */
1383
1384         ins[0] = loop_condition;
1385         ins[1] = proj;
1386         set_irn_in(loop_head, 2, ins);
1387         DB((dbg, LEVEL_4, "Rewire ins of block loophead %N to pred %N and duffs entry %N \n" , loop_head, ins[0], ins[1]));
1388
1389         for_each_phi(loop_head, phi) {
1390                 ir_node *pred = get_irn_n(phi, loop_info.be_src_pos);
1391                 /* TODO we think it is a phi, but for Mergesort it is not the case.*/
1392
1393                 ir_node *last_pred = get_unroll_copy(pred, unroll_nr - 1);
1394
1395                 ins[0] = last_pred;
1396                 ins[1] = (ir_node*)get_irn_link(phi);
1397                 set_irn_in(phi, 2, ins);
1398                 DB((dbg, LEVEL_4, "Rewire ins of loophead phi %N to pred %N and duffs entry %N \n" , phi, ins[0], ins[1]));
1399         }
1400 }
1401
1402 /* Removes previously created phis with only 1 in. */
1403 static void correct_phis(ir_node *node, void *env)
1404 {
1405         (void)env;
1406
1407         if (is_Phi(node) && get_irn_arity(node) == 1) {
1408                 ir_node *exch;
1409                 ir_node *in[1];
1410
1411                 in[0] = get_irn_n(node, 0);
1412
1413                 exch = new_rd_Phi(get_irn_dbg_info(node),
1414                     get_nodes_block(node), 1, in,
1415                         get_irn_mode(node));
1416
1417                 exchange(node, exch);
1418         }
1419 }
1420
1421 /* Unrolling: Rewire floating copies. */
1422 static void place_copies(int copies)
1423 {
1424         ir_node *loophead = loop_head;
1425         size_t i;
1426         int c;
1427         int be_src_pos = loop_info.be_src_pos;
1428
1429         /* Serialize loops by fixing their head ins.
1430          * Processed are the copies.
1431          * The original loop is done after that, to keep backedge infos. */
1432         for (c = 0; c < copies; ++c) {
1433                 ir_node *upper = get_unroll_copy(loophead, c);
1434                 ir_node *lower = get_unroll_copy(loophead, c + 1);
1435                 ir_node *phi;
1436                 ir_node *topmost_be_block = get_nodes_block(get_irn_n(loophead, be_src_pos));
1437
1438                 /* Important: get the preds first and then their copy. */
1439                 ir_node *upper_be_block = get_unroll_copy(topmost_be_block, c);
1440                 ir_node *new_jmp = new_r_Jmp(upper_be_block);
1441                 DB((dbg, LEVEL_5, " place_copies upper %N lower %N\n", upper, lower));
1442
1443                 DB((dbg, LEVEL_5, "topmost be block %N \n", topmost_be_block));
1444
1445                 if (loop_info.unroll_kind == constant) {
1446                         ir_node *ins[1];
1447                         ins[0] = new_jmp;
1448                         set_irn_in(lower, 1, ins);
1449
1450                         for_each_phi(loophead, phi) {
1451                                 ir_node *topmost_def = get_irn_n(phi, be_src_pos);
1452                                 ir_node *upper_def = get_unroll_copy(topmost_def, c);
1453                                 ir_node *lower_phi = get_unroll_copy(phi, c + 1);
1454
1455                                 /* It is possible, that the value used
1456                                  * in the OWN backedge path is NOT defined in this loop. */
1457                                 if (is_in_loop(topmost_def))
1458                                         ins[0] = upper_def;
1459                                 else
1460                                         ins[0] = topmost_def;
1461
1462                                 set_irn_in(lower_phi, 1, ins);
1463                                 /* Need to replace phis with 1 in later. */
1464                         }
1465                 } else {
1466                         /* Invariant case */
1467                         /* Every node has 2 ins. One from the duff blocks
1468                          * and one from the previously unrolled loop. */
1469                         ir_node *ins[2];
1470                         /* Calculate corresponding projection of mod result for this copy c */
1471                         ir_node *proj = new_Proj(loop_info.duff_cond, mode_X, unroll_nr - c - 1);
1472                         DB((dbg, LEVEL_4, "New duff proj %N\n" , proj));
1473
1474                         ins[0] = new_jmp;
1475                         ins[1] = proj;
1476                         set_irn_in(lower, 2, ins);
1477                         DB((dbg, LEVEL_4, "Rewire ins of Block %N to pred %N and duffs entry %N \n" , lower, ins[0], ins[1]));
1478
1479                         for_each_phi(loophead, phi) {
1480                                 ir_node *topmost_phi_pred = get_irn_n(phi, be_src_pos);
1481                                 ir_node *upper_phi_pred;
1482                                 ir_node *lower_phi;
1483                                 ir_node *duff_phi;
1484
1485                                 lower_phi = get_unroll_copy(phi, c + 1);
1486                                 duff_phi = (ir_node*)get_irn_link(phi);
1487                                 DB((dbg, LEVEL_4, "DD Link of %N is %N\n" , phi, duff_phi));
1488
1489                                 /*  */
1490                                 if (is_in_loop(topmost_phi_pred)) {
1491                                         upper_phi_pred = get_unroll_copy(topmost_phi_pred, c);
1492                                 } else {
1493                                         upper_phi_pred = topmost_phi_pred;
1494                                 }
1495
1496                                 ins[0] = upper_phi_pred;
1497                                 ins[1] = duff_phi;
1498                                 set_irn_in(lower_phi, 2, ins);
1499                                 DB((dbg, LEVEL_4, "Rewire ins of %N to pred %N and duffs entry %N \n" , lower_phi, ins[0], ins[1]));
1500                         }
1501                 }
1502         }
1503
1504         /* Reconnect last copy. */
1505         for (i = 0; i < ARR_LEN(loop_entries); ++i) {
1506                 entry_edge edge = loop_entries[i];
1507                 /* Last copy is at the bottom */
1508                 ir_node *new_pred = get_unroll_copy(edge.pred, copies);
1509                 set_irn_n(edge.node, edge.pos, new_pred);
1510         }
1511
1512         /* Fix original loops head.
1513          * Done in the end, as ins and be info were needed before. */
1514         if (loop_info.unroll_kind == constant) {
1515                 ir_node *phi;
1516                 ir_node *head_pred = get_irn_n(loop_head, be_src_pos);
1517                 ir_node *loop_condition = get_unroll_copy(head_pred, unroll_nr - 1);
1518
1519                 set_irn_n(loop_head, loop_info.be_src_pos, loop_condition);
1520
1521                 for_each_phi(loop_head, phi) {
1522                         ir_node *pred = get_irn_n(phi, be_src_pos);
1523                         ir_node *last_pred;
1524
1525                         /* It is possible, that the value used
1526                          * in the OWN backedge path is NOT assigned in this loop. */
1527                         if (is_in_loop(pred))
1528                                 last_pred = get_unroll_copy(pred, copies);
1529                         else
1530                                 last_pred = pred;
1531                         set_irn_n(phi, be_src_pos, last_pred);
1532                 }
1533
1534         } else {
1535                 unrolling_fix_loop_head_inv();
1536         }
1537 }
1538
1539 /* Copies the cur_loop several times. */
1540 static void copy_loop(entry_edge *cur_loop_outs, int copies)
1541 {
1542         int c;
1543
1544         ir_reserve_resources(current_ir_graph, IR_RESOURCE_IRN_VISITED);
1545
1546         for (c = 0; c < copies; ++c) {
1547                 size_t i;
1548
1549                 inc_irg_visited(current_ir_graph);
1550
1551                 DB((dbg, LEVEL_5, "         ### Copy_loop  copy nr: %d ###\n", c));
1552                 for (i = 0; i < ARR_LEN(cur_loop_outs); ++i) {
1553                         entry_edge entry = cur_loop_outs[i];
1554                         ir_node *pred = get_irn_n(entry.node, entry.pos);
1555
1556                         copy_walk_n(pred, is_in_loop, c + 1);
1557                 }
1558         }
1559
1560         ir_free_resources(current_ir_graph, IR_RESOURCE_IRN_VISITED);
1561 }
1562
1563
1564 /* Creates a new phi from the given phi node omitting own bes,
1565  * using be_block as supplier of backedge informations. */
1566 static ir_node *clone_phis_sans_bes(ir_node *phi, ir_node *be_block, ir_node *dest_block)
1567 {
1568         ir_node **ins;
1569         int arity = get_irn_arity(phi);
1570         int i, c = 0;
1571         ir_node *newphi;
1572
1573         assert(get_irn_arity(phi) == get_irn_arity(be_block));
1574         assert(is_Phi(phi));
1575
1576         ins = NEW_ARR_F(ir_node *, arity);
1577         for (i = 0; i < arity; ++i) {
1578                 if (! is_own_backedge(be_block, i)) {
1579                         ins[c] = get_irn_n(phi, i);
1580                         ++c;
1581                 }
1582         }
1583
1584         newphi = new_r_Phi(dest_block, c, ins, get_irn_mode(phi));
1585
1586         set_irn_link(phi, newphi);
1587         DB((dbg, LEVEL_4, "Linking for duffs device %N to %N\n", phi, newphi));
1588
1589         return newphi;
1590 }
1591
1592 /* Creates a new block from the given block node omitting own bes,
1593  * using be_block as supplier of backedge informations. */
1594 static ir_node *clone_block_sans_bes(ir_node *node, ir_node *be_block)
1595 {
1596         int arity = get_irn_arity(node);
1597         int i, c = 0;
1598         ir_node **ins;
1599
1600         assert(get_irn_arity(node) == get_irn_arity(be_block));
1601         assert(is_Block(node));
1602
1603         NEW_ARR_A(ir_node *, ins, arity);
1604         for (i = 0; i < arity; ++i) {
1605                 if (! is_own_backedge(be_block, i)) {
1606                         ins[c] = get_irn_n(node, i);
1607                         ++c;
1608                 }
1609         }
1610
1611         return new_Block(c, ins);
1612 }
1613
1614 /* Creates a structure to calculate absolute value of node op.
1615  * Returns mux node with absolute value. */
1616 static ir_node *new_Abs(ir_node *op, ir_mode *mode)
1617 {
1618   ir_graph *irg      = get_irn_irg(op);
1619   ir_node  *block    = get_nodes_block(op);
1620   ir_node  *zero     = new_r_Const(irg, get_mode_null(mode));
1621   ir_node  *cmp      = new_r_Cmp(block, op, zero, ir_relation_less);
1622   ir_node  *minus_op = new_r_Minus(block, op, mode);
1623   ir_node  *mux      = new_r_Mux(block, cmp, op, minus_op, mode);
1624
1625   return mux;
1626 }
1627
1628
1629 /* Creates blocks for duffs device, using previously obtained
1630  * informations about the iv.
1631  * TODO split */
1632 static void create_duffs_block(void)
1633 {
1634         ir_mode *mode;
1635
1636         ir_node *block1, *count_block, *duff_block;
1637         ir_node *ems, *ems_mod, *ems_div, *ems_mod_proj, *cmp_null,
1638                 *ems_mode_cond, *x_true, *x_false, *const_null;
1639         ir_node *true_val, *false_val;
1640         ir_node *ins[2];
1641
1642         ir_node *duff_mod, *proj, *cond;
1643
1644         ir_node *count, *correction, *unroll_c;
1645         ir_node *cmp_bad_count, *good_count, *bad_count, *count_phi, *bad_count_neg;
1646         ir_node *phi;
1647
1648         mode = get_irn_mode(loop_info.end_val);
1649         const_null = new_Const(get_mode_null(mode));
1650
1651         /* TODO naming
1652          * 1. Calculate first approach to count.
1653          *    Condition: (end - start) % step == 0 */
1654         block1 = clone_block_sans_bes(loop_head, loop_head);
1655         DB((dbg, LEVEL_4, "Duff block 1 %N\n", block1));
1656
1657         /* Create loop entry phis in first duff block
1658          * as it becomes the loops preheader */
1659         for_each_phi(loop_head, phi) {
1660                 /* Returns phis pred if phi would have arity 1*/
1661                 ir_node *new_phi = clone_phis_sans_bes(phi, loop_head, block1);
1662
1663                 DB((dbg, LEVEL_4, "HEAD %N phi %N\n", loop_head, phi));
1664                 DB((dbg, LEVEL_4, "BLOCK1 %N phi %N\n", block1, new_phi));
1665         }
1666
1667         ems = new_r_Sub(block1, loop_info.end_val, loop_info.start_val,
1668                 get_irn_mode(loop_info.end_val));
1669                 DB((dbg, LEVEL_4, "BLOCK1 sub %N\n", ems));
1670
1671
1672         ems = new_Sub(loop_info.end_val, loop_info.start_val,
1673                 get_irn_mode(loop_info.end_val));
1674
1675         DB((dbg, LEVEL_4, "mod ins %N %N\n", ems, loop_info.step));
1676         ems_mod = new_r_Mod(block1,
1677                 new_NoMem(),
1678                 ems,
1679                 loop_info.step,
1680                 mode,
1681                 op_pin_state_pinned);
1682         ems_div = new_r_Div(block1,
1683                 new_NoMem(),
1684                 ems,
1685                 loop_info.step,
1686                 mode,
1687                 op_pin_state_pinned);
1688
1689         DB((dbg, LEVEL_4, "New module node %N\n", ems_mod));
1690
1691         ems_mod_proj = new_r_Proj(ems_mod, mode_Iu, pn_Mod_res);
1692         cmp_null = new_r_Cmp(block1, ems_mod_proj, const_null, ir_relation_less);
1693         ems_mode_cond = new_r_Cond(block1, cmp_null);
1694
1695         /* ems % step == 0 */
1696         x_true = new_r_Proj(ems_mode_cond, mode_X, pn_Cond_true);
1697         /* ems % step != 0 */
1698         x_false = new_r_Proj(ems_mode_cond, mode_X, pn_Cond_false);
1699
1700         /* 2. Second block.
1701          * Assures, duffs device receives a valid count.
1702          * Condition:
1703          *     decreasing: count < 0
1704          *     increasing: count > 0
1705          */
1706         ins[0] = x_true;
1707         ins[1] = x_false;
1708
1709         count_block = new_Block(2, ins);
1710         DB((dbg, LEVEL_4, "Duff block 2 %N\n", count_block));
1711
1712
1713         /* Increase loop-taken-count depending on the loop condition
1714          * uses the latest iv to compare to. */
1715         if (loop_info.latest_value == 1) {
1716                 /* ems % step == 0 :  +0 */
1717                 true_val = new_Const(get_mode_null(mode));
1718                 /* ems % step != 0 :  +1 */
1719                 false_val = new_Const(get_mode_one(mode));
1720         } else {
1721                 ir_tarval *tv_two = new_tarval_from_long(2, mode);
1722                 /* ems % step == 0 :  +1 */
1723                 true_val = new_Const(get_mode_one(mode));
1724                 /* ems % step != 0 :  +2 */
1725                 false_val = new_Const(tv_two);
1726         }
1727
1728         ins[0] = true_val;
1729         ins[1] = false_val;
1730
1731         correction = new_r_Phi(count_block, 2, ins, mode);
1732
1733         count = new_r_Proj(ems_div, mode, pn_Div_res);
1734
1735         /* (end - start) / step  +  correction */
1736         count = new_Add(count, correction, mode);
1737
1738         /* We preconditioned the loop to be tail-controlled.
1739          * So, if count is something 'wrong' like 0,
1740          * negative/positive (depending on step direction),
1741          * we may take the loop once (tail-contr.) and leave it
1742          * to the existing condition, to break; */
1743
1744         /* Depending on step direction, we have to check for > or < 0 */
1745         if (loop_info.decreasing == 1) {
1746                 cmp_bad_count = new_r_Cmp(count_block, count, const_null,
1747                                           ir_relation_less);
1748         } else {
1749                 cmp_bad_count = new_r_Cmp(count_block, count, const_null,
1750                                           ir_relation_greater);
1751         }
1752
1753         bad_count_neg = new_r_Cond(count_block, cmp_bad_count);
1754         good_count = new_Proj(bad_count_neg, mode_X, pn_Cond_true);
1755         bad_count = new_Proj(ems_mode_cond, mode_X, pn_Cond_false);
1756
1757         /* 3. Duff Block
1758          *    Contains module to decide which loop to start from. */
1759
1760         ins[0] = good_count;
1761         ins[1] = bad_count;
1762         duff_block = new_Block(2, ins);
1763         DB((dbg, LEVEL_4, "Duff block 3 %N\n", duff_block));
1764
1765         /* Get absolute value */
1766         ins[0] = new_Abs(count, mode);
1767         /* Manually feed the aforementioned count = 1 (bad case)*/
1768         ins[1] = new_Const(get_mode_one(mode));
1769         count_phi = new_r_Phi(duff_block, 2, ins, mode);
1770
1771         unroll_c = new_Const(new_tarval_from_long((long)unroll_nr, mode));
1772
1773         /* count % unroll_nr */
1774         duff_mod = new_r_Mod(duff_block,
1775                 new_NoMem(),
1776                 count_phi,
1777                 unroll_c,
1778                 mode,
1779                 op_pin_state_pinned);
1780
1781
1782         proj = new_Proj(duff_mod, mode, pn_Mod_res);
1783         /* condition does NOT create itself in the block of the proj! */
1784         cond = new_r_Cond(duff_block, proj);
1785
1786         loop_info.duff_cond = cond;
1787 }
1788
1789 /* Returns 1 if given node is not in loop,
1790  * or if it is a phi of the loop head with only loop invariant defs.
1791  */
1792 static unsigned is_loop_invariant_def(ir_node *node)
1793 {
1794         int i;
1795
1796         if (! is_in_loop(node)) {
1797                 DB((dbg, LEVEL_4, "Not in loop %N\n", node));
1798                 /* || is_Const(node) || is_SymConst(node)) {*/
1799                 return 1;
1800         }
1801
1802         /* If this is a phi of the loophead shared by more than 1 loop,
1803          * we need to check if all defs are not in the loop.  */
1804         if (is_Phi(node)) {
1805                 ir_node *block;
1806                 block = get_nodes_block(node);
1807
1808                 /* To prevent unexpected situations. */
1809                 if (block != loop_head) {
1810                         return 0;
1811                 }
1812
1813                 for (i = 0; i < get_irn_arity(node); ++i) {
1814                         /* Check if all bes are just loopbacks. */
1815                         if (is_own_backedge(block, i) && get_irn_n(node, i) != node)
1816                                 return 0;
1817                 }
1818                 DB((dbg, LEVEL_4, "invar %N\n", node));
1819                 return 1;
1820         }
1821         DB((dbg, LEVEL_4, "Not invar %N\n", node));
1822
1823         return 0;
1824 }
1825
1826 /* Returns 1 if one pred of node is invariant and the other is not.
1827  * invar_pred and other are set analogously. */
1828 static unsigned get_invariant_pred(ir_node *node, ir_node **invar_pred, ir_node **other)
1829 {
1830         ir_node *pred0 = get_irn_n(node, 0);
1831         ir_node *pred1 = get_irn_n(node, 1);
1832
1833         *invar_pred = NULL;
1834         *other = NULL;
1835
1836         if (is_loop_invariant_def(pred0)) {
1837                 DB((dbg, LEVEL_4, "pred0 invar %N\n", pred0));
1838                 *invar_pred = pred0;
1839                 *other = pred1;
1840         }
1841
1842         if (is_loop_invariant_def(pred1)) {
1843                 DB((dbg, LEVEL_4, "pred1 invar %N\n", pred1));
1844
1845                 if (*invar_pred != NULL) {
1846                         /* RETURN. We do not want both preds to be invariant. */
1847                         return 0;
1848                 }
1849
1850                 *other = pred0;
1851                 *invar_pred = pred1;
1852                 return 1;
1853         } else {
1854                 DB((dbg, LEVEL_4, "pred1 not invar %N\n", pred1));
1855
1856                 if (*invar_pred != NULL)
1857                         return 1;
1858                 else
1859                         return 0;
1860         }
1861 }
1862
1863 /* Starts from a phi that may belong to an iv.
1864  * If an add forms a loop with iteration_phi,
1865  * and add uses a constant, 1 is returned
1866  * and 'start' as well as 'add' are sane. */
1867 static unsigned get_start_and_add(ir_node *iteration_phi, unrolling_kind_flag role)
1868 {
1869         int i;
1870         ir_node *found_add = loop_info.add;
1871         int arity = get_irn_arity(iteration_phi);
1872
1873         DB((dbg, LEVEL_4, "Find start and add from %N\n", iteration_phi));
1874
1875         for (i = 0; i < arity; ++i) {
1876
1877                 /* Find start_val which needs to be pred of the iteration_phi.
1878                  * If start_val already known, sanity check. */
1879                 if (!is_backedge(get_nodes_block(loop_info.iteration_phi), i)) {
1880                         ir_node *found_start_val = get_irn_n(loop_info.iteration_phi, i);
1881
1882                         DB((dbg, LEVEL_4, "found_start_val %N\n", found_start_val));
1883
1884                         /* We already found a start_val it has to be always the same. */
1885                         if (loop_info.start_val && found_start_val != loop_info.start_val)
1886                                 return 0;
1887
1888                         if ((role == constant) && !(is_SymConst(found_start_val) || is_Const(found_start_val)))
1889                                         return 0;
1890                         else if((role == constant) && !(is_loop_invariant_def(found_start_val)))
1891                                         return 0;
1892
1893                         loop_info.start_val = found_start_val;
1894                 }
1895
1896                 /* The phi has to be in the loop head.
1897                  * Follow all own backedges. Every value supplied from these preds of the phi
1898                  * needs to origin from the same add. */
1899                 if (is_own_backedge(get_nodes_block(loop_info.iteration_phi), i)) {
1900                         ir_node *new_found = get_irn_n(loop_info.iteration_phi,i);
1901
1902                         DB((dbg, LEVEL_4, "is add? %N\n", new_found));
1903
1904                         if (! (is_Add(new_found) || is_Sub(new_found)) || (found_add && found_add != new_found))
1905                                 return 0;
1906                         else
1907                                 found_add = new_found;
1908                 }
1909         }
1910
1911         loop_info.add = found_add;
1912
1913         return 1;
1914 }
1915
1916
1917 /* Returns 1 if one pred of node is a const value and the other is not.
1918  * const_pred and other are set analogously. */
1919 static unsigned get_const_pred(ir_node *node, ir_node **const_pred, ir_node **other)
1920 {
1921         ir_node *pred0 = get_irn_n(node, 0);
1922         ir_node *pred1 = get_irn_n(node, 1);
1923
1924         DB((dbg, LEVEL_4, "Checking for constant pred of %N\n", node));
1925
1926         *const_pred = NULL;
1927         *other = NULL;
1928
1929         /*DB((dbg, LEVEL_4, "is %N const\n", pred0));*/
1930         if (is_Const(pred0) || is_SymConst(pred0)) {
1931                 *const_pred = pred0;
1932                 *other = pred1;
1933         }
1934
1935         /*DB((dbg, LEVEL_4, "is %N const\n", pred1));*/
1936         if (is_Const(pred1) || is_SymConst(pred1)) {
1937                 if (*const_pred != NULL) {
1938                         /* RETURN. We do not want both preds to be constant. */
1939                         return 0;
1940                 }
1941
1942                 *other = pred0;
1943                 *const_pred = pred1;
1944         }
1945
1946         if (*const_pred == NULL)
1947                 return 0;
1948         else
1949                 return 1;
1950 }
1951
1952 /* Returns 1 if loop exits within 2 steps of the iv.
1953  * Norm_proj means we do not exit the loop.*/
1954 static unsigned simulate_next(ir_tarval **count_tar,
1955                 ir_tarval *stepped, ir_tarval *step_tar, ir_tarval *end_tar,
1956                 ir_relation norm_proj)
1957 {
1958         ir_tarval *next;
1959
1960         DB((dbg, LEVEL_4, "Loop taken if (stepped)%ld %s (end)%ld ",
1961                                 get_tarval_long(stepped),
1962                                 get_relation_string((norm_proj)),
1963                                 get_tarval_long(end_tar)));
1964         DB((dbg, LEVEL_4, "comparing latest value %d\n", loop_info.latest_value));
1965
1966         /* If current iv does not stay in the loop,
1967          * this run satisfied the exit condition. */
1968         if (! (tarval_cmp(stepped, end_tar) & norm_proj))
1969                 return 1;
1970
1971         DB((dbg, LEVEL_4, "Result: (stepped)%ld IS %s (end)%ld\n",
1972                                 get_tarval_long(stepped),
1973                                 get_relation_string(tarval_cmp(stepped, end_tar)),
1974                                 get_tarval_long(end_tar)));
1975
1976         /* next step */
1977         if (is_Add(loop_info.add))
1978                 next = tarval_add(stepped, step_tar);
1979         else
1980                 /* sub */
1981                 next = tarval_sub(stepped, step_tar, get_irn_mode(loop_info.end_val));
1982
1983         DB((dbg, LEVEL_4, "Loop taken if %ld %s %ld ",
1984                                 get_tarval_long(next),
1985                                 get_relation_string(norm_proj),
1986                                 get_tarval_long(end_tar)));
1987         DB((dbg, LEVEL_4, "comparing latest value %d\n", loop_info.latest_value));
1988
1989         /* Increase steps. */
1990         *count_tar = tarval_add(*count_tar, get_tarval_one(get_tarval_mode(*count_tar)));
1991
1992         /* Next has to fail the loop condition, or we will never exit. */
1993         if (! (tarval_cmp(next, end_tar) & norm_proj))
1994                 return 1;
1995         else
1996                 return 0;
1997 }
1998
1999 /* Check if loop meets requirements for a 'simple loop':
2000  * - Exactly one cf out
2001  * - Allowed calls
2002  * - Max nodes after unrolling
2003  * - tail-controlled
2004  * - exactly one be
2005  * - cmp
2006  * Returns Projection of cmp node or NULL; */
2007 static ir_node *is_simple_loop(void)
2008 {
2009         int arity, i;
2010         ir_node *loop_block, *exit_block, *projx, *cond, *cmp;
2011
2012         /* Maximum of one condition, and no endless loops. */
2013         if (loop_info.cf_outs != 1)
2014                 return NULL;
2015
2016         DB((dbg, LEVEL_4, "1 loop exit\n"));
2017
2018         /* Calculate maximum unroll_nr keeping node count below limit. */
2019         loop_info.max_unroll = (int)((double)opt_params.max_unrolled_loop_size / (double)loop_info.nodes);
2020         if (loop_info.max_unroll < 2) {
2021                 ++stats.too_large;
2022                 return NULL;
2023         }
2024
2025         DB((dbg, LEVEL_4, "maximum unroll factor %u, to not exceed node limit \n",
2026                 opt_params.max_unrolled_loop_size));
2027
2028         arity = get_irn_arity(loop_head);
2029         /* RETURN if we have more than 1 be. */
2030         /* Get my backedges without alien bes. */
2031         loop_block = NULL;
2032         for (i = 0; i < arity; ++i) {
2033                 ir_node *pred = get_irn_n(loop_head, i);
2034                 if (is_own_backedge(loop_head, i)) {
2035                         if (loop_block)
2036                                 /* Our simple loops may have only one backedge. */
2037                                 return NULL;
2038                         else {
2039                                 loop_block = get_nodes_block(pred);
2040                                 loop_info.be_src_pos = i;
2041                         }
2042                 }
2043         }
2044
2045         DB((dbg, LEVEL_4, "loop has 1 own backedge.\n"));
2046
2047         exit_block = get_nodes_block(loop_info.cf_out.pred);
2048         /* The loop has to be tail-controlled.
2049          * This can be changed/improved,
2050          * but we would need a duff iv. */
2051         if (exit_block != loop_block)
2052                 return NULL;
2053
2054         DB((dbg, LEVEL_4, "tail-controlled loop.\n"));
2055
2056         /* find value on which loop exit depends */
2057         projx = loop_info.cf_out.pred;
2058         cond = get_irn_n(projx, 0);
2059         cmp = get_irn_n(cond, 0);
2060
2061         if (!is_Cmp(cmp))
2062                 return NULL;
2063
2064         DB((dbg, LEVEL_5, "projection is %s\n", get_relation_string(get_Proj_proj(projx))));
2065
2066         switch(get_Proj_proj(projx)) {
2067                 case pn_Cond_false:
2068                         loop_info.exit_cond = 0;
2069                         break;
2070                 case pn_Cond_true:
2071                         loop_info.exit_cond = 1;
2072                         break;
2073                 default:
2074                         panic("Cond Proj_proj other than true/false");
2075         }
2076
2077         DB((dbg, LEVEL_4, "Valid Cmp.\n"));
2078         return cmp;
2079 }
2080
2081 /* Returns 1 if all nodes are mode_Iu or mode_Is. */
2082 static unsigned are_mode_I(ir_node *n1, ir_node* n2, ir_node *n3)
2083 {
2084         ir_mode *m1 = get_irn_mode(n1);
2085         ir_mode *m2 = get_irn_mode(n2);
2086         ir_mode *m3 = get_irn_mode(n3);
2087
2088         if ((m1 == mode_Iu && m2 == mode_Iu && m3 == mode_Iu) ||
2089             (m1 == mode_Is && m2 == mode_Is && m3 == mode_Is))
2090                 return 1;
2091         else
2092                 return 0;
2093 }
2094
2095 /* Checks if cur_loop is a simple tail-controlled counting loop
2096  * with start and end value loop invariant, step constant. */
2097 static unsigned get_unroll_decision_invariant(void)
2098 {
2099
2100         ir_node   *projres, *loop_condition, *iteration_path;
2101         unsigned   success;
2102         ir_tarval *step_tar;
2103         ir_mode   *mode;
2104
2105
2106         /* RETURN if loop is not 'simple' */
2107         projres = is_simple_loop();
2108         if (projres == NULL)
2109                 return 0;
2110
2111         /* Use a minimal size for the invariant unrolled loop,
2112      * as duffs device produces overhead */
2113         if (loop_info.nodes < opt_params.invar_unrolling_min_size)
2114                 return 0;
2115
2116         loop_condition = get_irn_n(projres, 0);
2117
2118         success = get_invariant_pred(loop_condition, &loop_info.end_val, &iteration_path);
2119         DB((dbg, LEVEL_4, "pred invar %d\n", success));
2120
2121         if (! success)
2122                 return 0;
2123
2124         DB((dbg, LEVEL_4, "Invariant End_val %N, other %N\n", loop_info.end_val, iteration_path));
2125
2126         /* We may find the add or the phi first.
2127          * Until now we only have end_val. */
2128         if (is_Add(iteration_path) || is_Sub(iteration_path)) {
2129
2130                 loop_info.add = iteration_path;
2131                 DB((dbg, LEVEL_4, "Case 1: Got add %N (maybe not sane)\n", loop_info.add));
2132
2133                 /* Preds of the add should be step and the iteration_phi */
2134                 success = get_const_pred(loop_info.add, &loop_info.step, &loop_info.iteration_phi);
2135                 if (! success)
2136                         return 0;
2137
2138                 DB((dbg, LEVEL_4, "Got step %N\n", loop_info.step));
2139
2140                 if (! is_Phi(loop_info.iteration_phi))
2141                         return 0;
2142
2143                 DB((dbg, LEVEL_4, "Got phi %N\n", loop_info.iteration_phi));
2144
2145                 /* Find start_val.
2146                  * Does necessary sanity check of add, if it is already set.  */
2147                 success = get_start_and_add(loop_info.iteration_phi, invariant);
2148                 if (! success)
2149                         return 0;
2150
2151                 DB((dbg, LEVEL_4, "Got start A  %N\n", loop_info.start_val));
2152
2153         } else if (is_Phi(iteration_path)) {
2154                 ir_node *new_iteration_phi;
2155
2156                 loop_info.iteration_phi = iteration_path;
2157                 DB((dbg, LEVEL_4, "Case 2: Got phi %N\n", loop_info.iteration_phi));
2158
2159                 /* Find start_val and add-node.
2160                  * Does necessary sanity check of add, if it is already set.  */
2161                 success = get_start_and_add(loop_info.iteration_phi, invariant);
2162                 if (! success)
2163                         return 0;
2164
2165                 DB((dbg, LEVEL_4, "Got start B %N\n", loop_info.start_val));
2166                 DB((dbg, LEVEL_4, "Got add or sub %N\n", loop_info.add));
2167
2168                 success = get_const_pred(loop_info.add, &loop_info.step, &new_iteration_phi);
2169                 if (! success)
2170                         return 0;
2171
2172                 DB((dbg, LEVEL_4, "Got step (B) %N\n", loop_info.step));
2173
2174                 if (loop_info.iteration_phi != new_iteration_phi)
2175                         return 0;
2176
2177         } else {
2178                 return 0;
2179         }
2180
2181         mode = get_irn_mode(loop_info.end_val);
2182
2183         DB((dbg, LEVEL_4, "start %N, end %N, step %N\n",
2184                                 loop_info.start_val, loop_info.end_val, loop_info.step));
2185
2186         if (mode != mode_Is && mode != mode_Iu)
2187                 return 0;
2188
2189         /* TODO necessary? */
2190         if (!are_mode_I(loop_info.start_val, loop_info.step, loop_info.end_val))
2191                 return 0;
2192
2193         DB((dbg, LEVEL_4, "mode integer\n"));
2194
2195         step_tar = get_Const_tarval(loop_info.step);
2196
2197         if (tarval_is_null(step_tar)) {
2198                 /* TODO Might be worth a warning. */
2199                 return 0;
2200         }
2201
2202         DB((dbg, LEVEL_4, "step is not 0\n"));
2203
2204         create_duffs_block();
2205
2206         return loop_info.max_unroll;
2207 }
2208
2209 /* Returns unroll factor,
2210  * given maximum unroll factor and number of loop passes. */
2211 static unsigned get_preferred_factor_constant(ir_tarval *count_tar)
2212 {
2213         ir_tarval *tar_6, *tar_5, *tar_4, *tar_3, *tar_2;
2214         unsigned prefer;
2215         ir_mode *mode = get_irn_mode(loop_info.end_val);
2216
2217         tar_6 = new_tarval_from_long(6, mode);
2218         tar_5 = new_tarval_from_long(5, mode);
2219         tar_4 = new_tarval_from_long(4, mode);
2220         tar_3 = new_tarval_from_long(3, mode);
2221         tar_2 = new_tarval_from_long(2, mode);
2222
2223         /* loop passes % {6, 5, 4, 3, 2} == 0  */
2224         if (tarval_is_null(tarval_mod(count_tar, tar_6)))
2225                 prefer = 6;
2226         else if (tarval_is_null(tarval_mod(count_tar, tar_5)))
2227                 prefer = 5;
2228         else if (tarval_is_null(tarval_mod(count_tar, tar_4)))
2229                 prefer = 4;
2230         else if (tarval_is_null(tarval_mod(count_tar, tar_3)))
2231                 prefer = 3;
2232         else if (tarval_is_null(tarval_mod(count_tar, tar_2)))
2233                 prefer = 2;
2234         else {
2235                 /* gcd(max_unroll, count_tar) */
2236                 int a = loop_info.max_unroll;
2237                 int b = (int)get_tarval_long(count_tar);
2238                 int c;
2239
2240                 DB((dbg, LEVEL_4, "gcd of max_unroll %d and count_tar %d: ", a, b));
2241
2242                 do {
2243                 c = a % b;
2244                 a = b; b = c;
2245                 } while( c != 0);
2246
2247                 DB((dbg, LEVEL_4, "%d\n", a));
2248                 return a;
2249         }
2250
2251         DB((dbg, LEVEL_4, "preferred unroll factor %d\n", prefer));
2252
2253         /*
2254          * If our preference is greater than the allowed unroll factor
2255          * we either might reduce the preferred factor and prevent a duffs device block,
2256          * or create a duffs device block, from which in this case (constants only)
2257          * we know the startloop at compiletime.
2258          * The latter yields the following graphs.
2259          * but for code generation we would want to use graph A.
2260          * The graphs are equivalent. So, we can only reduce the preferred factor.
2261          * A)                   B)
2262          *     PreHead             PreHead
2263          *        |      ,--.         |   ,--.
2264          *         \ Loop1   \        Loop2   \
2265          *          \  |     |       /  |     |
2266          *           Loop2   /      / Loop1   /
2267          *           |   `--'      |      `--'
2268          */
2269
2270         if (prefer <= loop_info.max_unroll)
2271                 return prefer;
2272         else {
2273                 switch(prefer) {
2274                         case 6:
2275                                 if (loop_info.max_unroll >= 3)
2276                                         return 3;
2277                                 else if (loop_info.max_unroll >= 2)
2278                                         return 2;
2279                                 else
2280                                         return 0;
2281
2282                         case 4:
2283                                 if (loop_info.max_unroll >= 2)
2284                                         return 2;
2285                                 else
2286                                         return 0;
2287
2288                         default:
2289                                 return 0;
2290                 }
2291         }
2292 }
2293
2294 /* Check if cur_loop is a simple counting loop.
2295  * Start, step and end are constants.
2296  * TODO The whole constant case should use procedures similar to
2297  * the invariant case, as they are more versatile. */
2298 /* TODO split. */
2299 static unsigned get_unroll_decision_constant(void)
2300 {
2301         ir_node     *cmp, *iteration_path;
2302         unsigned     success, is_latest_val;
2303         ir_tarval   *start_tar, *end_tar, *step_tar, *diff_tar, *count_tar;
2304         ir_tarval   *stepped;
2305         ir_relation  proj_proj, norm_proj;
2306         ir_mode     *mode;
2307
2308         /* RETURN if loop is not 'simple' */
2309         cmp = is_simple_loop();
2310         if (cmp == NULL)
2311                 return 0;
2312
2313         /* One in of the loop condition needs to be loop invariant. => end_val
2314          * The other in is assigned by an add. => add
2315          * The add uses a loop invariant value => step
2316          * and a phi with a loop invariant start_val and the add node as ins.
2317
2318            ^   ^
2319            |   | .-,
2320            |   Phi |
2321                 \  |   |
2322           ^  Add   |
2323            \  | \__|
2324             cond
2325              /\
2326         */
2327
2328         success = get_const_pred(cmp, &loop_info.end_val, &iteration_path);
2329         if (! success)
2330                 return 0;
2331
2332         DB((dbg, LEVEL_4, "End_val %N, other %N\n", loop_info.end_val, iteration_path));
2333
2334         /* We may find the add or the phi first.
2335          * Until now we only have end_val. */
2336         if (is_Add(iteration_path) || is_Sub(iteration_path)) {
2337
2338                 /* We test against the latest value of the iv. */
2339                 is_latest_val = 1;
2340
2341                 loop_info.add = iteration_path;
2342                 DB((dbg, LEVEL_4, "Case 2: Got add %N (maybe not sane)\n", loop_info.add));
2343
2344                 /* Preds of the add should be step and the iteration_phi */
2345                 success = get_const_pred(loop_info.add, &loop_info.step, &loop_info.iteration_phi);
2346                 if (! success)
2347                         return 0;
2348
2349                 DB((dbg, LEVEL_4, "Got step %N\n", loop_info.step));
2350
2351                 if (! is_Phi(loop_info.iteration_phi))
2352                         return 0;
2353
2354                 DB((dbg, LEVEL_4, "Got phi %N\n", loop_info.iteration_phi));
2355
2356                 /* Find start_val.
2357                  * Does necessary sanity check of add, if it is already set.  */
2358                 success = get_start_and_add(loop_info.iteration_phi, constant);
2359                 if (! success)
2360                         return 0;
2361
2362                 DB((dbg, LEVEL_4, "Got start %N\n", loop_info.start_val));
2363
2364         } else if (is_Phi(iteration_path)) {
2365                 ir_node *new_iteration_phi;
2366
2367                 /* We compare with the value the iv had entering this run. */
2368                 is_latest_val = 0;
2369
2370                 loop_info.iteration_phi = iteration_path;
2371                 DB((dbg, LEVEL_4, "Case 1: Got phi %N \n", loop_info.iteration_phi));
2372
2373                 /* Find start_val and add-node.
2374                  * Does necessary sanity check of add, if it is already set.  */
2375                 success = get_start_and_add(loop_info.iteration_phi, constant);
2376                 if (! success)
2377                         return 0;
2378
2379                 DB((dbg, LEVEL_4, "Got start %N\n", loop_info.start_val));
2380                 DB((dbg, LEVEL_4, "Got add or sub %N\n", loop_info.add));
2381
2382                 success = get_const_pred(loop_info.add, &loop_info.step, &new_iteration_phi);
2383                 if (! success)
2384                         return 0;
2385
2386                 DB((dbg, LEVEL_4, "Got step %N\n", loop_info.step));
2387
2388                 if (loop_info.iteration_phi != new_iteration_phi)
2389                         return 0;
2390
2391         } else {
2392                 /* RETURN */
2393                 return 0;
2394         }
2395
2396         mode = get_irn_mode(loop_info.end_val);
2397
2398         DB((dbg, LEVEL_4, "start %N, end %N, step %N\n",
2399                                 loop_info.start_val, loop_info.end_val, loop_info.step));
2400
2401         if (mode != mode_Is && mode != mode_Iu)
2402                 return 0;
2403
2404         /* TODO necessary? */
2405         if (!are_mode_I(loop_info.start_val, loop_info.step, loop_info.end_val))
2406                 return 0;
2407
2408         DB((dbg, LEVEL_4, "mode integer\n"));
2409
2410         end_tar = get_Const_tarval(loop_info.end_val);
2411         start_tar = get_Const_tarval(loop_info.start_val);
2412         step_tar = get_Const_tarval(loop_info.step);
2413
2414         if (tarval_is_null(step_tar))
2415                 /* TODO Might be worth a warning. */
2416                 return 0;
2417
2418         DB((dbg, LEVEL_4, "step is not 0\n"));
2419
2420         if ((!tarval_is_negative(step_tar)) ^ (!is_Sub(loop_info.add)))
2421                 loop_info.decreasing = 1;
2422
2423         diff_tar = tarval_sub(end_tar, start_tar, mode);
2424
2425         /* We need at least count_tar steps to be close to end_val, maybe more.
2426          * No way, that we have gone too many steps.
2427          * This represents the 'latest value'.
2428          * (If condition checks against latest value, is checked later) */
2429         count_tar = tarval_div(diff_tar, step_tar);
2430
2431         /* Iv will not pass end_val (except overflows).
2432          * Nothing done, as it would yield to no advantage. */
2433         if (tarval_is_negative(count_tar)) {
2434                 DB((dbg, LEVEL_4, "Loop is endless or never taken."));
2435                 /* TODO Might be worth a warning. */
2436                 return 0;
2437         }
2438
2439         ++stats.u_simple_counting_loop;
2440
2441         loop_info.latest_value = is_latest_val;
2442
2443         /* TODO split here
2444         if (! is_simple_counting_loop(&count_tar))
2445                 return 0;
2446         */
2447
2448         /* stepped can be negative, if step < 0 */
2449         stepped = tarval_mul(count_tar, step_tar);
2450
2451         /* step as close to end_val as possible, */
2452         /* |stepped| <= |end_tar|, and dist(stepped, end_tar) is smaller than a step. */
2453         if (is_Sub(loop_info.add))
2454                 stepped = tarval_sub(start_tar, stepped, mode_Is);
2455         else
2456                 stepped = tarval_add(start_tar, stepped);
2457
2458         DB((dbg, LEVEL_4, "stepped to %ld\n", get_tarval_long(stepped)));
2459
2460         proj_proj = get_Cmp_relation(cmp);
2461         /* Assure that norm_proj is the stay-in-loop case. */
2462         if (loop_info.exit_cond == 1)
2463                 norm_proj = get_negated_relation(proj_proj);
2464         else
2465                 norm_proj = proj_proj;
2466
2467         DB((dbg, LEVEL_4, "normalized projection %s\n", get_relation_string(norm_proj)));
2468         /* Executed at most once (stay in counting loop if a Eq b) */
2469         if (norm_proj == ir_relation_equal)
2470                 /* TODO Might be worth a warning. */
2471                 return 0;
2472
2473         /* calculates next values and increases count_tar according to it */
2474         success = simulate_next(&count_tar, stepped, step_tar, end_tar, norm_proj);
2475         if (! success)
2476                 return 0;
2477
2478         /* We run loop once more, if we compare to the
2479          * not yet in-/decreased iv. */
2480         if (is_latest_val == 0) {
2481                 DB((dbg, LEVEL_4, "condition uses not latest iv value\n"));
2482                 count_tar = tarval_add(count_tar, get_tarval_one(mode));
2483         }
2484
2485         DB((dbg, LEVEL_4, "loop taken %ld times\n", get_tarval_long(count_tar)));
2486
2487         /* Assure the loop is taken at least 1 time. */
2488         if (tarval_is_null(count_tar)) {
2489                 /* TODO Might be worth a warning. */
2490                 return 0;
2491         }
2492
2493         loop_info.count_tar = count_tar;
2494         return get_preferred_factor_constant(count_tar);
2495 }
2496
2497 /**
2498  * Loop unrolling
2499  */
2500 static void unroll_loop(void)
2501 {
2502
2503         if (! (loop_info.nodes > 0))
2504                 return;
2505
2506         if (loop_info.nodes > opt_params.max_unrolled_loop_size) {
2507                 DB((dbg, LEVEL_2, "Nodes %d > allowed nodes %d\n",
2508                         loop_info.nodes, opt_params.max_unrolled_loop_size));
2509                 ++stats.too_large;
2510                 return;
2511         }
2512
2513         if (loop_info.calls > 0) {
2514                 DB((dbg, LEVEL_2, "Calls %d > allowed calls 0\n",
2515                         loop_info.calls));
2516                 ++stats.calls_limit;
2517                 return;
2518         }
2519
2520         unroll_nr = 0;
2521
2522         /* get_unroll_decision_constant and invariant are completely
2523          * independent for flexibility.
2524          * Some checks may be performed twice. */
2525
2526         /* constant case? */
2527         if (opt_params.allow_const_unrolling)
2528                 unroll_nr = get_unroll_decision_constant();
2529         if (unroll_nr > 1) {
2530                 loop_info.unroll_kind = constant;
2531
2532         } else {
2533                 /* invariant case? */
2534                 if (opt_params.allow_invar_unrolling)
2535                         unroll_nr = get_unroll_decision_invariant();
2536                 if (unroll_nr > 1)
2537                         loop_info.unroll_kind = invariant;
2538         }
2539
2540         DB((dbg, LEVEL_2, " *** Unrolling %d times ***\n", unroll_nr));
2541
2542         if (unroll_nr > 1) {
2543                 loop_entries = NEW_ARR_F(entry_edge, 0);
2544
2545                 /* Get loop outs */
2546                 irg_walk_graph(current_ir_graph, get_loop_entries, NULL, NULL);
2547
2548                 if (loop_info.unroll_kind == constant) {
2549                         if ((int)get_tarval_long(loop_info.count_tar) == unroll_nr)
2550                                 loop_info.needs_backedge = 0;
2551                         else
2552                                 loop_info.needs_backedge = 1;
2553                 } else {
2554                         loop_info.needs_backedge = 1;
2555                 }
2556
2557                 /* Use phase to keep copy of nodes from the condition chain. */
2558                 ir_nodemap_init(&map, current_ir_graph);
2559                 obstack_init(&obst);
2560
2561                 /* Copies the loop */
2562                 copy_loop(loop_entries, unroll_nr - 1);
2563
2564                 /* Line up the floating copies. */
2565                 place_copies(unroll_nr - 1);
2566
2567                 /* Remove phis with 1 in
2568                  * If there were no nested phis, this would not be necessary.
2569                  * Avoiding the creation in the first place
2570                  * leads to complex special cases. */
2571                 irg_walk_graph(current_ir_graph, correct_phis, NULL, NULL);
2572
2573                 if (loop_info.unroll_kind == constant)
2574                         ++stats.constant_unroll;
2575                 else
2576                         ++stats.invariant_unroll;
2577
2578                 clear_irg_state(current_ir_graph, IR_GRAPH_STATE_CONSISTENT_DOMINANCE);
2579
2580                 DEL_ARR_F(loop_entries);
2581                 obstack_free(&obst, NULL);
2582                 ir_nodemap_destroy(&map);
2583         }
2584
2585 }
2586
2587 /* Analyzes the loop, and checks if size is within allowed range.
2588  * Decides if loop will be processed. */
2589 static void init_analyze(ir_graph *irg, ir_loop *loop)
2590 {
2591         cur_loop = loop;
2592
2593         loop_head       = NULL;
2594         loop_head_valid = true;
2595
2596         /* Reset loop info */
2597         memset(&loop_info, 0, sizeof(loop_info_t));
2598
2599         DB((dbg, LEVEL_1, "    >>>> current loop includes node %N <<<\n",
2600                 get_loop_node(loop, 0)));
2601
2602         /* Collect loop informations: head, node counts. */
2603         irg_walk_graph(irg, get_loop_info, NULL, NULL);
2604
2605         /* RETURN if there is no valid head */
2606         if (!loop_head || !loop_head_valid) {
2607                 DB((dbg, LEVEL_1,   "No valid loop head. Nothing done.\n"));
2608                 return;
2609         } else {
2610                 DB((dbg, LEVEL_1,   "Loophead: %N\n", loop_head));
2611         }
2612
2613         if (loop_info.branches > opt_params.max_branches) {
2614                 DB((dbg, LEVEL_1, "Branches %d > allowed branches %d\n",
2615                         loop_info.branches, opt_params.max_branches));
2616                 ++stats.calls_limit;
2617                 return;
2618         }
2619
2620         switch (loop_op) {
2621                 case loop_op_inversion:
2622                         loop_inversion(irg);
2623                         break;
2624
2625                 case loop_op_unrolling:
2626                         unroll_loop();
2627                         break;
2628
2629                 default:
2630                         panic("Loop optimization not implemented.");
2631         }
2632         DB((dbg, LEVEL_1, "       <<<< end of loop with node %N >>>>\n",
2633                 get_loop_node(loop, 0)));
2634 }
2635
2636 /* Find innermost loops and add them to loops. */
2637 static void find_innermost_loop(ir_loop *loop)
2638 {
2639         /* descend into sons */
2640         size_t sons = get_loop_n_sons(loop);
2641
2642         if (sons == 0) {
2643                 ARR_APP1(ir_loop *, loops, loop);
2644         } else {
2645                 size_t s;
2646                 for (s = 0; s < sons; ++s) {
2647                         find_innermost_loop(get_loop_son(loop, s));
2648                 }
2649         }
2650 }
2651
2652 static void set_loop_params(void)
2653 {
2654     opt_params.max_loop_size = 100;
2655     opt_params.depth_adaption = -50;
2656     opt_params.count_phi = true;
2657     opt_params.count_proj = false;
2658     opt_params.allowed_calls = 0;
2659
2660     opt_params.max_cc_size = 5;
2661
2662
2663     opt_params.allow_const_unrolling = true;
2664     opt_params.allow_invar_unrolling = false;
2665
2666     opt_params.invar_unrolling_min_size = 20;
2667     opt_params.max_unrolled_loop_size = 400;
2668     opt_params.max_branches = 9999;
2669 }
2670
2671 /* Assure preconditions are met and go through all loops. */
2672 void loop_optimization(ir_graph *irg)
2673 {
2674         ir_loop *loop;
2675         size_t  sons, nr;
2676         size_t  i;
2677
2678         set_loop_params();
2679
2680         /* Reset stats for this procedure */
2681         reset_stats();
2682
2683         /* Preconditions */
2684         set_current_ir_graph(irg);
2685
2686         ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK | IR_RESOURCE_PHI_LIST);
2687         collect_phiprojs(irg);
2688
2689         loop = get_irg_loop(irg);
2690         sons = get_loop_n_sons(loop);
2691
2692         loops = NEW_ARR_F(ir_loop *, 0);
2693         /* List all inner loops */
2694         for (nr = 0; nr < sons; ++nr) {
2695                 find_innermost_loop(get_loop_son(loop, nr));
2696         }
2697
2698         /* Set all links to NULL */
2699         irg_walk_graph(irg, reset_link, NULL, NULL);
2700
2701         for (i = 0; i < ARR_LEN(loops); ++i) {
2702                 ir_loop *loop = loops[i];
2703
2704                 ++stats.loops;
2705
2706                 /* Analyze and handle loop */
2707                 init_analyze(irg, loop);
2708
2709                 /* Copied blocks do not have their phi list yet */
2710                 collect_phiprojs(irg);
2711
2712                 /* Set links to NULL
2713                  * TODO Still necessary? */
2714                 irg_walk_graph(irg, reset_link, NULL, NULL);
2715         }
2716
2717         print_stats();
2718
2719         DEL_ARR_F(loops);
2720         ir_free_resources(irg, IR_RESOURCE_IRN_LINK | IR_RESOURCE_PHI_LIST);
2721 }
2722
2723 static ir_graph_state_t perform_loop_unrolling(ir_graph *irg)
2724 {
2725         loop_op = loop_op_unrolling;
2726         loop_optimization(irg);
2727         return 0;
2728 }
2729
2730 static ir_graph_state_t perform_loop_inversion(ir_graph *irg)
2731 {
2732         loop_op = loop_op_inversion;
2733         loop_optimization(irg);
2734         return 0;
2735 }
2736
2737 static ir_graph_state_t perform_loop_peeling(ir_graph *irg)
2738 {
2739         loop_op = loop_op_peeling;
2740         loop_optimization(irg);
2741         return 0;
2742 }
2743
2744 static optdesc_t opt_unroll_loops = {
2745         "unroll-loops",
2746         IR_GRAPH_STATE_CONSISTENT_OUT_EDGES | IR_GRAPH_STATE_CONSISTENT_OUTS | IR_GRAPH_STATE_CONSISTENT_LOOPINFO,
2747         perform_loop_unrolling,
2748 };
2749
2750 static optdesc_t opt_invert_loops = {
2751         "invert-loops",
2752         IR_GRAPH_STATE_CONSISTENT_OUT_EDGES | IR_GRAPH_STATE_CONSISTENT_OUTS | IR_GRAPH_STATE_CONSISTENT_LOOPINFO,
2753         perform_loop_inversion,
2754 };
2755
2756 static optdesc_t opt_peel_loops = {
2757         "peel-loops",
2758         IR_GRAPH_STATE_CONSISTENT_OUT_EDGES | IR_GRAPH_STATE_CONSISTENT_OUTS | IR_GRAPH_STATE_CONSISTENT_LOOPINFO,
2759         perform_loop_peeling,
2760 };
2761
2762 void do_loop_unrolling(ir_graph *irg)
2763 { perform_irg_optimization(irg, &opt_unroll_loops); }
2764
2765 void do_loop_inversion(ir_graph *irg)
2766 { perform_irg_optimization(irg, &opt_invert_loops); }
2767
2768 void do_loop_peeling(ir_graph *irg)
2769 { perform_irg_optimization(irg, &opt_peel_loops); }
2770
2771 ir_graph_pass_t *loop_inversion_pass(const char *name)
2772 {
2773         return def_graph_pass(name ? name : "loop_inversion", do_loop_inversion);
2774 }
2775
2776 ir_graph_pass_t *loop_unroll_pass(const char *name)
2777 {
2778         return def_graph_pass(name ? name : "loop_unroll", do_loop_unrolling);
2779 }
2780
2781 ir_graph_pass_t *loop_peeling_pass(const char *name)
2782 {
2783         return def_graph_pass(name ? name : "loop_peeling", do_loop_peeling);
2784 }
2785
2786 void firm_init_loop_opt(void)
2787 {
2788         FIRM_DBG_REGISTER(dbg, "firm.opt.loop");
2789 }