correctly implement memop handling
[libfirm] / ir / opt / loop.c
1 /*
2  * Copyright (C) 1995-2011 University of Karlsruhe.  All right reserved.
3  *
4  * This file is part of libFirm.
5  *
6  * This file may be distributed and/or modified under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation and appearing in the file LICENSE.GPL included in the
9  * packaging of this file.
10  *
11  * Licensees holding valid libFirm Professional Edition licenses may use
12  * this file in accordance with the libFirm Commercial License.
13  * Agreement provided with the Software.
14  *
15  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17  * PURPOSE.
18  */
19
20 /**
21  * @file
22  * @author   Christian Helmer
23  * @brief    loop inversion and loop unrolling
24  *
25  */
26 #include "config.h"
27
28 #include <stdbool.h>
29
30 #include "iroptimize.h"
31 #include "opt_init.h"
32 #include "irnode.h"
33 #include "debug.h"
34 #include "error.h"
35
36 #include "ircons.h"
37 #include "irgopt.h"
38 #include "irgmod.h"
39 #include "irgwalk.h"
40 #include "irouts.h"
41 #include "iredges.h"
42 #include "irtools.h"
43 #include "array_t.h"
44 #include "beutil.h"
45 #include "irpass.h"
46 #include "irdom.h"
47 #include "opt_manage.h"
48
49 #include <math.h>
50 #include "irbackedge_t.h"
51 #include "irnodemap.h"
52 #include "irloop_t.h"
53
54 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
55
56 /**
57  * Convenience macro for iterating over every phi node of the given block.
58  * Requires phi list per block.
59  */
60 #define for_each_phi(block, phi) \
61         for ((phi) = get_Block_phis( (block) ); (phi) ; (phi) = get_Phi_next((phi)))
62
63 #define for_each_phi_safe(head, phi, next) \
64         for ((phi) = (head), (next) = (head) ? get_Phi_next((head)) : NULL; \
65                         (phi) ; (phi) = (next), (next) = (next) ? get_Phi_next((next)) : NULL)
66
67 /* Currently processed loop. */
68 static ir_loop *cur_loop;
69
70 /* Flag for kind of unrolling. */
71 typedef enum {
72         constant,
73         invariant
74 } unrolling_kind_flag;
75
76 /* Condition for performing visiting a node during copy_walk. */
77 typedef bool walker_condition(const ir_node *);
78
79 /* Node and position of a predecessor. */
80 typedef struct entry_edge {
81         ir_node *node;
82         int pos;
83         ir_node *pred;
84 } entry_edge;
85
86 /* Node info for unrolling. */
87 typedef struct unrolling_node_info {
88         ir_node **copies;
89 } unrolling_node_info;
90
91 /* Outs of the nodes head. */
92 static entry_edge *cur_head_outs;
93
94 /* Information about the loop head */
95 static ir_node *loop_head       = NULL;
96 static bool     loop_head_valid = true;
97
98 /* List of all inner loops, that are processed. */
99 static ir_loop **loops;
100
101 /* Stats */
102 typedef struct loop_stats_t {
103         unsigned loops;
104         unsigned inverted;
105         unsigned too_large;
106         unsigned too_large_adapted;
107         unsigned cc_limit_reached;
108         unsigned calls_limit;
109
110         unsigned u_simple_counting_loop;
111         unsigned constant_unroll;
112         unsigned invariant_unroll;
113
114         unsigned unhandled;
115 } loop_stats_t;
116
117 static loop_stats_t stats;
118
119 /* Set stats to sero */
120 static void reset_stats(void)
121 {
122         memset(&stats, 0, sizeof(loop_stats_t));
123 }
124
125 /* Print stats */
126 static void print_stats(void)
127 {
128         DB((dbg, LEVEL_2, "---------------------------------------\n"));
129         DB((dbg, LEVEL_2, "loops             :   %d\n",stats.loops));
130         DB((dbg, LEVEL_2, "inverted          :   %d\n",stats.inverted));
131         DB((dbg, LEVEL_2, "too_large         :   %d\n",stats.too_large));
132         DB((dbg, LEVEL_2, "too_large_adapted :   %d\n",stats.too_large_adapted));
133         DB((dbg, LEVEL_2, "cc_limit_reached  :   %d\n",stats.cc_limit_reached));
134         DB((dbg, LEVEL_2, "calls_limit       :   %d\n",stats.calls_limit));
135         DB((dbg, LEVEL_2, "u_simple_counting :   %d\n",stats.u_simple_counting_loop));
136         DB((dbg, LEVEL_2, "constant_unroll   :   %d\n",stats.constant_unroll));
137         DB((dbg, LEVEL_2, "invariant_unroll  :   %d\n",stats.invariant_unroll));
138         DB((dbg, LEVEL_2, "=======================================\n"));
139 }
140
141 /* Commandline parameters */
142 typedef struct loop_opt_params_t {
143 unsigned max_loop_size;     /* Maximum number of nodes  [nodes]*/
144 int      depth_adaption;    /* Loop nest depth adaption [percent] */
145 unsigned allowed_calls;     /* Number of calls allowed [number] */
146 bool     count_phi;         /* Count phi nodes */
147 bool     count_proj;        /* Count projections */
148
149 unsigned max_cc_size;       /* Maximum condition chain size [nodes] */
150 unsigned max_branches;
151
152 unsigned max_unrolled_loop_size;    /* [nodes] */
153 bool     allow_const_unrolling;
154 bool     allow_invar_unrolling;
155 unsigned invar_unrolling_min_size;  /* [nodes] */
156
157 } loop_opt_params_t;
158
159 static loop_opt_params_t opt_params;
160
161 /* Loop analysis informations */
162 typedef struct loop_info_t {
163         unsigned nodes;        /* node count */
164         unsigned ld_st;        /* load and store nodes */
165         unsigned branches;     /* number of conditions */
166         unsigned calls;        /* number of calls */
167         unsigned cf_outs;      /* number of cf edges which leave the loop */
168         entry_edge cf_out;     /* single loop leaving cf edge */
169         int be_src_pos;        /* position of the single own backedge in the head */
170
171         /* for inversion */
172         unsigned cc_size;      /* nodes in the condition chain */
173
174         /* for unrolling */
175         unsigned max_unroll;   /* Number of unrolls satisfying max_loop_size */
176         unsigned exit_cond;    /* 1 if condition==true exits the loop.  */
177         unsigned latest_value:1;    /* 1 if condition is checked against latest counter value */
178         unsigned needs_backedge:1;  /* 0 if loop is completely unrolled */
179         unsigned decreasing:1;      /* Step operation is_Sub, or step is<0 */
180
181         /* IV informations of a simple loop */
182         ir_node *start_val;
183         ir_node *step;
184         ir_node *end_val;
185         ir_node *iteration_phi;
186         ir_node *add;
187
188         ir_tarval *count_tar;               /* Number of loop iterations */
189
190         ir_node *duff_cond;                 /* Duff mod */
191         unrolling_kind_flag unroll_kind;    /* constant or invariant unrolling */
192 } loop_info_t;
193
194 /* Information about the current loop */
195 static loop_info_t loop_info;
196
197 /* Outs of the condition chain (loop inversion). */
198 static ir_node **cc_blocks;
199 /* df/cf edges with def in the condition chain */
200 static entry_edge *cond_chain_entries;
201 /* Array of df loops found in the condition chain. */
202 static entry_edge *head_df_loop;
203 /* Number of blocks in cc */
204 static unsigned inversion_blocks_in_cc;
205
206
207 /* Cf/df edges leaving the loop.
208  * Called entries here, as they are used to enter the loop with walkers. */
209 static entry_edge *loop_entries;
210 /* Number of unrolls to perform */
211 static int unroll_nr;
212 /* Phase is used to keep copies of nodes. */
213 static ir_nodemap     map;
214 static struct obstack obst;
215
216 /* Loop operations.  */
217 typedef enum loop_op_t {
218         loop_op_inversion,
219         loop_op_unrolling,
220         loop_op_peeling
221 } loop_op_t;
222
223 /* Saves which loop operation to do until after basic tests. */
224 static loop_op_t loop_op;
225
226 /* Returns the maximum nodes for the given nest depth */
227 static unsigned get_max_nodes_adapted(unsigned depth)
228 {
229         double perc = 100.0 + (double)opt_params.depth_adaption;
230         double factor = pow(perc / 100.0, depth);
231
232         return (int)((double)opt_params.max_loop_size * factor);
233 }
234
235 /* Reset nodes link. For use with a walker. */
236 static void reset_link(ir_node *node, void *env)
237 {
238         (void)env;
239         set_irn_link(node, NULL);
240 }
241
242 /* Returns 0 if the node or block is not in cur_loop. */
243 static bool is_in_loop(const ir_node *node)
244 {
245         return get_irn_loop(get_block_const(node)) == cur_loop;
246 }
247
248 /* Returns 0 if the given edge is not a backedge
249  * with its pred in the cur_loop. */
250 static bool is_own_backedge(const ir_node *n, int pos)
251 {
252         return is_backedge(n, pos) && is_in_loop(get_irn_n(n, pos));
253 }
254
255 /* Finds loop head and some loop_info as calls or else if necessary. */
256 static void get_loop_info(ir_node *node, void *env)
257 {
258         bool node_in_loop = is_in_loop(node);
259         int i, arity;
260         (void)env;
261
262         /* collect some loop information */
263         if (node_in_loop) {
264                 if (is_Phi(node) && opt_params.count_phi)
265                         ++loop_info.nodes;
266                 else if (is_Proj(node) && opt_params.count_proj)
267                         ++loop_info.nodes;
268                 else if (!is_Confirm(node) && !is_Const(node) && !is_SymConst(node))
269                         ++loop_info.nodes;
270
271                 if (is_Load(node) || is_Store(node))
272                         ++loop_info.ld_st;
273
274                 if (is_Call(node))
275                         ++loop_info.calls;
276         }
277
278         arity = get_irn_arity(node);
279         for (i = 0; i < arity; i++) {
280                 ir_node *pred         = get_irn_n(node, i);
281                 bool     pred_in_loop = is_in_loop(pred);
282
283                 if (is_Block(node) && !node_in_loop && pred_in_loop) {
284                         entry_edge entry;
285                         entry.node = node;
286                         entry.pos = i;
287                         entry.pred = pred;
288                         /* Count cf outs */
289                         ++loop_info.cf_outs;
290                         loop_info.cf_out = entry;
291                 }
292
293                 /* Find the loops head/the blocks with cfpred outside of the loop */
294                 if (is_Block(node)) {
295                         const ir_edge_t *edge;
296                         unsigned outs_n = 0;
297
298                         /* Count innerloop branches */
299                         foreach_out_edge_kind(node, edge, EDGE_KIND_BLOCK) {
300                                 ir_node *succ = get_edge_src_irn(edge);
301                                 if (is_Block(succ) && is_in_loop(succ))
302                                         ++outs_n;
303                         }
304                         if (outs_n > 1)
305                                 ++loop_info.branches;
306
307                         if (node_in_loop && !pred_in_loop && loop_head_valid) {
308                                 ir_node *cfgpred = get_Block_cfgpred(node, i);
309
310                                 if (!is_in_loop(cfgpred)) {
311                                         DB((dbg, LEVEL_5, "potential head %+F because inloop and pred %+F not inloop\n",
312                                                                 node, pred));
313                                         /* another head? We do not touch this. */
314                                         if (loop_head && loop_head != node) {
315                                                 loop_head_valid = false;
316                                         } else {
317                                                 loop_head = node;
318                                         }
319                                 }
320                         }
321                 }
322         }
323 }
324
325 /* Finds all edges with users outside of the loop
326  * and definition inside the loop. */
327 static void get_loop_entries(ir_node *node, void *env)
328 {
329         unsigned node_in_loop, pred_in_loop;
330         int i, arity;
331         (void) env;
332
333         arity = get_irn_arity(node);
334         for (i = 0; i < arity; ++i) {
335                 ir_node *pred = get_irn_n(node, i);
336
337                 pred_in_loop = is_in_loop(pred);
338                 node_in_loop = is_in_loop(node);
339
340                 if (pred_in_loop && !node_in_loop) {
341                         entry_edge entry;
342                         entry.node = node;
343                         entry.pos = i;
344                         entry.pred = pred;
345                         ARR_APP1(entry_edge, loop_entries, entry);
346                 }
347         }
348 }
349
350 /* ssa */
351 static ir_node *ssa_second_def;
352 static ir_node *ssa_second_def_block;
353
354 /**
355  * Walks the graph bottom up, searching for definitions and creates phis.
356  */
357 static ir_node *search_def_and_create_phis(ir_node *block, ir_mode *mode, int first)
358 {
359         int i;
360         int n_cfgpreds;
361         ir_graph *irg = get_irn_irg(block);
362         ir_node *phi;
363         ir_node **in;
364
365         DB((dbg, LEVEL_5, "ssa search_def_and_create_phis: block %N\n", block));
366
367         /* Prevents creation of phi that would be bad anyway.
368          * Dead and bad blocks. */
369         if (get_irn_arity(block) < 1 || is_Bad(block)) {
370                 DB((dbg, LEVEL_5, "ssa bad %N\n", block));
371                 return new_r_Bad(irg, mode);
372         }
373
374         if (block == ssa_second_def_block && !first) {
375                 DB((dbg, LEVEL_5, "ssa found second definition: use second def %N\n", ssa_second_def));
376                 return ssa_second_def;
377         }
378
379         /* already processed this block? */
380         if (irn_visited(block)) {
381                 ir_node *value = (ir_node *) get_irn_link(block);
382                 DB((dbg, LEVEL_5, "ssa already visited: use linked %N\n", value));
383                 return value;
384         }
385
386         assert(block != get_irg_start_block(irg));
387
388         /* a Block with only 1 predecessor needs no Phi */
389         n_cfgpreds = get_Block_n_cfgpreds(block);
390         if (n_cfgpreds == 1) {
391                 ir_node *pred_block = get_Block_cfgpred_block(block, 0);
392                 ir_node *value;
393
394                 DB((dbg, LEVEL_5, "ssa 1 pred: walk pred %N\n", pred_block));
395
396                 value = search_def_and_create_phis(pred_block, mode, 0);
397                 set_irn_link(block, value);
398                 mark_irn_visited(block);
399
400                 return value;
401         }
402
403         /* create a new Phi */
404         NEW_ARR_A(ir_node*, in, n_cfgpreds);
405         for (i = 0; i < n_cfgpreds; ++i)
406                 in[i] = new_r_Dummy(irg, mode);
407
408         phi = new_r_Phi(block, n_cfgpreds, in, mode);
409         /* Important: always keep block phi list up to date. */
410         add_Block_phi(block, phi);
411         DB((dbg, LEVEL_5, "ssa phi creation: link new phi %N to block %N\n", phi, block));
412         set_irn_link(block, phi);
413         mark_irn_visited(block);
414
415         /* set Phi predecessors */
416         for (i = 0; i < n_cfgpreds; ++i) {
417                 ir_node *pred_val;
418                 ir_node *pred_block = get_Block_cfgpred_block(block, i);
419                 assert(pred_block != NULL);
420                 pred_val = search_def_and_create_phis(pred_block, mode, 0);
421
422                 assert(pred_val != NULL);
423
424                 DB((dbg, LEVEL_5, "ssa phi pred:phi %N, pred %N\n", phi, pred_val));
425                 set_irn_n(phi, i, pred_val);
426         }
427
428         return phi;
429 }
430
431
432 /**
433  * Given a set of values this function constructs SSA-form for the users of the
434  * first value (the users are determined through the out-edges of the value).
435  * Works without using the dominance tree.
436  */
437 static void construct_ssa(ir_node *orig_block, ir_node *orig_val,
438                 ir_node *second_block, ir_node *second_val)
439 {
440         ir_graph *irg;
441         ir_mode *mode;
442         const ir_edge_t *edge;
443         const ir_edge_t *next;
444
445         assert(orig_block && orig_val && second_block && second_val &&
446                         "no parameter of construct_ssa may be NULL");
447
448         if (orig_val == second_val)
449                 return;
450
451         irg = get_irn_irg(orig_val);
452
453         ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED);
454         inc_irg_visited(irg);
455
456         mode = get_irn_mode(orig_val);
457         set_irn_link(orig_block, orig_val);
458         mark_irn_visited(orig_block);
459
460         ssa_second_def_block = second_block;
461         ssa_second_def       = second_val;
462
463         /* Only fix the users of the first, i.e. the original node */
464         foreach_out_edge_safe(orig_val, edge, next) {
465                 ir_node *user = get_edge_src_irn(edge);
466                 int j = get_edge_src_pos(edge);
467                 ir_node *user_block = get_nodes_block(user);
468                 ir_node *newval;
469
470                 /* ignore keeps */
471                 if (is_End(user))
472                         continue;
473
474                 DB((dbg, LEVEL_5, "original user %N\n", user));
475
476                 if (is_Phi(user)) {
477                         ir_node *pred_block = get_Block_cfgpred_block(user_block, j);
478                         newval = search_def_and_create_phis(pred_block, mode, 1);
479                 } else {
480                         newval = search_def_and_create_phis(user_block, mode, 1);
481                 }
482                 if (newval != user && !is_Bad(newval))
483                         set_irn_n(user, j, newval);
484         }
485
486         ir_free_resources(irg, IR_RESOURCE_IRN_VISITED);
487 }
488
489
490 /***** Unrolling Helper Functions *****/
491
492 /* Assign the copy with index nr to node n */
493 static void set_unroll_copy(ir_node *n, int nr, ir_node *cp)
494 {
495         unrolling_node_info *info;
496         assert(nr != 0 && "0 reserved");
497
498         info = (unrolling_node_info*)ir_nodemap_get(&map, n);
499         if (! info) {
500                 ir_node **arr = NEW_ARR_D(ir_node*, &obst, unroll_nr);
501                 memset(arr, 0, unroll_nr * sizeof(ir_node*));
502
503                 info = OALLOCZ(&obst, unrolling_node_info);
504                 info->copies = arr;
505                 ir_nodemap_insert(&map, n, info);
506         }
507         /* Original node */
508         info->copies[0] = n;
509
510         info->copies[nr] = cp;
511 }
512
513 /* Returns a nodes copy if it exists, else NULL. */
514 static ir_node *get_unroll_copy(ir_node *n, int nr)
515 {
516         ir_node             *cp;
517         unrolling_node_info *info = (unrolling_node_info *)ir_nodemap_get(&map, n);
518         if (! info)
519                 return NULL;
520
521         cp = info->copies[nr];
522         return cp;
523 }
524
525
526 /***** Inversion Helper Functions *****/
527
528 /* Sets copy cp of node n. */
529 static void set_inversion_copy(ir_node *n, ir_node *cp)
530 {
531         ir_nodemap_insert(&map, n, cp);
532 }
533
534 /* Getter of copy of n for inversion */
535 static ir_node *get_inversion_copy(ir_node *n)
536 {
537         ir_node *cp = (ir_node *)ir_nodemap_get(&map, n);
538         return cp;
539 }
540
541 /* Resets block mark for given node. For use with walker */
542 static void reset_block_mark(ir_node *node, void * env)
543 {
544         (void) env;
545
546         if (is_Block(node))
547                 set_Block_mark(node, 0);
548 }
549
550 /* Returns mark of node, or its block if node is not a block.
551  * Used in this context to determine if node is in the condition chain. */
552 static bool is_nodes_block_marked(const ir_node* node)
553 {
554         return get_Block_mark(get_block_const(node));
555 }
556
557 /* Extends a nodes ins by node new.
558  * NOTE: This is slow if a node n needs to be extended more than once. */
559 static void extend_irn(ir_node *n, ir_node *newnode, bool new_is_backedge)
560 {
561         int i;
562         int arity = get_irn_arity(n);
563         int new_arity = arity + 1;
564         ir_node **ins = XMALLOCN(ir_node*, new_arity);
565         bool     *bes = XMALLOCN(bool, new_arity);
566
567         /* save bes */
568         /* Bes are important!
569          * Another way would be recreating the looptree,
570          * but after that we cannot distinguish already processed loops
571          * from not yet processed ones. */
572         if (is_Block(n)) {
573                 for(i = 0; i < arity; ++i) {
574                         bes[i] = is_backedge(n, i);
575                 }
576                 bes[i] = new_is_backedge;
577         }
578
579         for(i = 0; i < arity; ++i) {
580                 ins[i] = get_irn_n(n, i);
581         }
582         ins[i] = newnode;
583
584         set_irn_in(n, new_arity, ins);
585
586         /* restore bes  */
587         if (is_Block(n)) {
588                 for(i = 0; i < new_arity; ++i) {
589                         if (bes[i])
590                                 set_backedge(n, i);
591                 }
592         }
593 }
594
595 /* Extends a block by a copy of its pred at pos,
596  * fixing also the phis in the same way. */
597 static void extend_ins_by_copy(ir_node *block, int pos)
598 {
599         ir_node *new_in;
600         ir_node *phi;
601         ir_node *pred;
602         assert(is_Block(block));
603
604         /* Extend block by copy of definition at pos */
605         pred = get_irn_n(block, pos);
606         new_in = get_inversion_copy(pred);
607         DB((dbg, LEVEL_5, "Extend block %N by %N cp of %N\n", block, new_in, pred));
608         extend_irn(block, new_in, false);
609
610         /* Extend block phis by copy of definition at pos */
611         for_each_phi(block, phi) {
612                 ir_node *pred, *cp;
613
614                 pred = get_irn_n(phi, pos);
615                 cp = get_inversion_copy(pred);
616                 /* If the phis in is not in the condition chain (eg. a constant),
617                  * there is no copy. */
618                 if (cp == NULL)
619                         new_in = pred;
620                 else
621                         new_in = cp;
622
623                 DB((dbg, LEVEL_5, "Extend phi %N by %N cp of %N\n", phi, new_in, pred));
624                 extend_irn(phi, new_in, false);
625         }
626 }
627
628 /* Returns the number of blocks backedges. With or without alien bes. */
629 static int get_backedge_n(ir_node *block, bool with_alien)
630 {
631         int i;
632         int be_n = 0;
633         int arity = get_irn_arity(block);
634
635         assert(is_Block(block));
636
637         for (i = 0; i < arity; ++i) {
638                 ir_node *pred = get_irn_n(block, i);
639                 if (is_backedge(block, i) && (with_alien || is_in_loop(pred)))
640                         ++be_n;
641         }
642         return be_n;
643 }
644
645 /* Returns a raw copy of the given node.
646  * Attributes are kept/set according to the needs of loop inversion. */
647 static ir_node *copy_node(ir_node *node)
648 {
649         int i, arity;
650         ir_node *cp;
651
652         cp = exact_copy(node);
653         arity = get_irn_arity(node);
654
655         /* Keep backedge info */
656         for (i = 0; i < arity; ++i) {
657                 if (is_backedge(node, i))
658                         set_backedge(cp, i);
659         }
660
661         if (is_Block(cp)) {
662                 set_Block_mark(cp, 0);
663         }
664
665         return cp;
666 }
667
668
669 /**
670  * This walker copies all walked nodes.
671  * If the walk_condition is true for a node, it is copied.
672  * All nodes node_info->copy have to be NULL prior to every walk.
673  * Order of ins is important for later usage.
674  */
675 static void copy_walk(ir_node *node, walker_condition *walk_condition,
676                       ir_loop *set_loop)
677 {
678         int i;
679         int arity;
680         ir_node *cp;
681         ir_node **cpin;
682         ir_graph *irg = current_ir_graph;
683
684         /**
685          * break condition and cycle resolver, creating temporary node copies
686          */
687         if (get_irn_visited(node) >= get_irg_visited(irg)) {
688                 /* Here we rely on nodestate's copy being initialized with NULL */
689                 DB((dbg, LEVEL_5, "copy_walk: We have already visited %N\n", node));
690                 if (get_inversion_copy(node) == NULL) {
691                         cp = copy_node(node);
692                         set_inversion_copy(node, cp);
693
694                         DB((dbg, LEVEL_5, "The TEMP copy of %N is created %N\n", node, cp));
695                 }
696                 return;
697         }
698
699         /* Walk */
700         mark_irn_visited(node);
701
702         if (!is_Block(node)) {
703                 ir_node *pred = get_nodes_block(node);
704                 if (walk_condition(pred))
705                         DB((dbg, LEVEL_5, "walk block %N\n", pred));
706                 copy_walk(pred, walk_condition, set_loop);
707         }
708
709         arity = get_irn_arity(node);
710
711         NEW_ARR_A(ir_node *, cpin, arity);
712
713         for (i = 0; i < arity; ++i) {
714                 ir_node *pred = get_irn_n(node, i);
715
716                 if (walk_condition(pred)) {
717                         DB((dbg, LEVEL_5, "walk node %N\n", pred));
718                         copy_walk(pred, walk_condition, set_loop);
719                         cpin[i] = get_inversion_copy(pred);
720                         DB((dbg, LEVEL_5, "copy of %N gets new in %N which is copy of %N\n",
721                                                 node, get_inversion_copy(pred), pred));
722                 } else {
723                         cpin[i] = pred;
724                 }
725         }
726
727         /* copy node / finalize temp node */
728         if (get_inversion_copy(node) == NULL) {
729                 /* No temporary copy existent */
730                 cp = copy_node(node);
731                 set_inversion_copy(node, cp);
732                 DB((dbg, LEVEL_5, "The FINAL copy of %N is CREATED %N\n", node, cp));
733         } else {
734                 /* temporary copy is existent but without correct ins */
735                 cp = get_inversion_copy(node);
736                 DB((dbg, LEVEL_5, "The FINAL copy of %N is EXISTENT %N\n", node, cp));
737         }
738
739         if (!is_Block(node)) {
740                 ir_node *cpblock = get_inversion_copy(get_nodes_block(node));
741
742                 set_nodes_block(cp, cpblock );
743                 if (is_Phi(cp))
744                         add_Block_phi(cpblock, cp);
745         }
746
747         /* Keeps phi list of temporary node. */
748         set_irn_in(cp, ARR_LEN(cpin), cpin);
749 }
750
751 /**
752  * This walker copies all walked nodes.
753  * If the walk_condition is true for a node, it is copied.
754  * All nodes node_info->copy have to be NULL prior to every walk.
755  * Order of ins is important for later usage.
756  * Takes copy_index, to phase-link copy at specific index.
757  */
758 static void copy_walk_n(ir_node *node, walker_condition *walk_condition,
759                         int copy_index)
760 {
761         int i;
762         int arity;
763         ir_node *cp;
764         ir_node **cpin;
765
766         /**
767          * break condition and cycle resolver, creating temporary node copies
768          */
769         if (irn_visited(node)) {
770                 /* Here we rely on nodestate's copy being initialized with NULL */
771                 DB((dbg, LEVEL_5, "copy_walk: We have already visited %N\n", node));
772                 if (get_unroll_copy(node, copy_index) == NULL) {
773                         ir_node *u;
774                         u = copy_node(node);
775                         set_unroll_copy(node, copy_index, u);
776                         DB((dbg, LEVEL_5, "The TEMP unknown of %N is created %N\n", node, u));
777                 }
778                 return;
779         }
780
781         /* Walk */
782         mark_irn_visited(node);
783
784         if (!is_Block(node)) {
785                 ir_node *block = get_nodes_block(node);
786                 if (walk_condition(block))
787                         DB((dbg, LEVEL_5, "walk block %N\n", block));
788                 copy_walk_n(block, walk_condition, copy_index);
789         }
790
791         arity = get_irn_arity(node);
792         NEW_ARR_A(ir_node *, cpin, arity);
793
794         for (i = 0; i < arity; ++i) {
795                 ir_node *pred = get_irn_n(node, i);
796
797                 if (walk_condition(pred)) {
798                         DB((dbg, LEVEL_5, "walk node %N\n", pred));
799                         copy_walk_n(pred, walk_condition, copy_index);
800                         cpin[i] = get_unroll_copy(pred, copy_index);
801                 } else {
802                         cpin[i] = pred;
803                 }
804         }
805
806         /* copy node / finalize temp node */
807         cp = get_unroll_copy(node, copy_index);
808         if (cp == NULL || is_Unknown(cp)) {
809                 cp = copy_node(node);
810                 set_unroll_copy(node, copy_index, cp);
811                 DB((dbg, LEVEL_5, "The FINAL copy of %N is CREATED %N\n", node, cp));
812         } else {
813                 /* temporary copy is existent but without correct ins */
814                 cp = get_unroll_copy(node, copy_index);
815                 DB((dbg, LEVEL_5, "The FINAL copy of %N is EXISTENT %N\n", node, cp));
816         }
817
818         if (!is_Block(node)) {
819                 ir_node *cpblock = get_unroll_copy(get_nodes_block(node), copy_index);
820
821                 set_nodes_block(cp, cpblock );
822                 if (is_Phi(cp))
823                         add_Block_phi(cpblock, cp);
824         }
825
826         /* Keeps phi list of temporary node. */
827         set_irn_in(cp, ARR_LEN(cpin), cpin);
828 }
829
830 /* Removes alle Blocks with non marked predecessors from the condition chain. */
831 static void unmark_not_allowed_cc_blocks(void)
832 {
833         size_t blocks = ARR_LEN(cc_blocks);
834         size_t i;
835
836         for(i = 0; i < blocks; ++i) {
837                 ir_node *block = cc_blocks[i];
838                 int a;
839                 int arity = get_irn_arity(block);
840
841                 /* Head is an exception. */
842                 if (block == loop_head)
843                         continue;
844
845                 for(a = 0; a < arity; ++a) {
846                         if (! is_nodes_block_marked(get_irn_n(block, a))) {
847                                 set_Block_mark(block, 0);
848                                 --inversion_blocks_in_cc;
849                                 DB((dbg, LEVEL_5, "Removed %N from cc (blocks in cc %d)\n",
850                                                 block, inversion_blocks_in_cc));
851
852                                 break;
853                         }
854                 }
855         }
856 }
857
858 /* Unmarks all cc blocks using cc_blocks except head.
859  * TODO: invert head for unrolling? */
860 static void unmark_cc_blocks(void)
861 {
862         size_t blocks = ARR_LEN(cc_blocks);
863         size_t i;
864
865         for(i = 0; i < blocks; ++i) {
866                 ir_node *block = cc_blocks[i];
867
868                 /* TODO Head is an exception. */
869                 /*if (block != loop_head)*/
870                 set_Block_mark(block, 0);
871         }
872         /*inversion_blocks_in_cc = 1;*/
873         inversion_blocks_in_cc = 0;
874
875         /* invalidate */
876         loop_info.cc_size = 0;
877 }
878
879 /**
880  * Populates head_entries with (node, pred_pos) tuple
881  * whereas the node's pred at pred_pos is in the cc but not the node itself.
882  * Also finds df loops inside the cc.
883  * Head and condition chain blocks have been marked previously.
884  */
885 static void get_head_outs(ir_node *node, void *env)
886 {
887         int i;
888         int arity = get_irn_arity(node);
889         (void) env;
890
891         for (i = 0; i < arity; ++i) {
892                 if (!is_nodes_block_marked(node) && is_nodes_block_marked(get_irn_n(node, i))) {
893                         entry_edge entry;
894                         entry.node = node;
895                         entry.pos = i;
896                         /* Saving also predecessor seems redundant, but becomes
897                          * necessary when changing position of it, before
898                          * dereferencing it.*/
899                         entry.pred = get_irn_n(node, i);
900                         ARR_APP1(entry_edge, cur_head_outs, entry);
901                 }
902         }
903
904         arity = get_irn_arity(loop_head);
905
906         /* Find df loops inside the cc */
907         if (is_Phi(node) && get_nodes_block(node) == loop_head) {
908                 for (i = 0; i < arity; ++i) {
909                         if (is_own_backedge(loop_head, i)) {
910                                 if (is_nodes_block_marked(get_irn_n(node, i))) {
911                                         entry_edge entry;
912                                         entry.node = node;
913                                         entry.pos = i;
914                                         entry.pred = get_irn_n(node, i);
915                                         ARR_APP1(entry_edge, head_df_loop, entry);
916                                         DB((dbg, LEVEL_5, "Found incc assignment node %N @%d is pred %N, graph %N %N\n",
917                                                         node, i, entry.pred, current_ir_graph, get_irg_start_block(current_ir_graph)));
918                                 }
919                         }
920                 }
921         }
922 }
923
924 /**
925  * Find condition chains, and add them to be inverted.
926  * A block belongs to the chain if a condition branches out of the loop.
927  * (Some blocks need to be removed once again.)
928  * Returns 1 if the given block belongs to the condition chain.
929  */
930 static void find_condition_chain(ir_node *block)
931 {
932         const    ir_edge_t *edge;
933         bool     mark     = false;
934         bool     has_be   = false;
935         bool     jmp_only = true;
936         unsigned nodes_n  = 0;
937
938         mark_irn_visited(block);
939
940         DB((dbg, LEVEL_5, "condition_chains for block %N\n", block));
941
942         /* Get node count */
943         foreach_out_edge_kind(block, edge, EDGE_KIND_NORMAL) {
944                 ++nodes_n;
945         }
946
947         /* Check if node count would exceed maximum cc size.
948          * TODO
949          * This is not optimal, as we search depth-first and break here,
950          * continuing with another subtree. */
951         if (loop_info.cc_size + nodes_n > opt_params.max_cc_size) {
952                 set_Block_mark(block, 0);
953                 return;
954         }
955
956         /* Check if block only has a jmp instruction. */
957         foreach_out_edge(block, edge) {
958                 ir_node *src = get_edge_src_irn(edge);
959
960                 if (!is_Block(src) && !is_Jmp(src)) {
961                         jmp_only = false;
962                 }
963         }
964
965         /* Check cf outs if one is leaving the loop,
966          * or if this node has a backedge. */
967         foreach_block_succ(block, edge) {
968                 ir_node *src = get_edge_src_irn(edge);
969                 int pos = get_edge_src_pos(edge);
970
971                 if (!is_in_loop(src))
972                         mark = true;
973
974                 /* Inverting blocks with backedge outs leads to a cf edge
975                  * from the inverted head, into the inverted head (skipping the body).
976                  * As the body becomes the new loop head,
977                  * this would introduce another loop in the existing loop.
978                  * This loop inversion cannot cope with this case. */
979                 if (is_backedge(src, pos)) {
980                         has_be = true;
981                         break;
982                 }
983         }
984
985         /* We need all predecessors to already belong to the condition chain.
986          * Example of wrong case:  * == in cc
987          *
988          *     Head*             ,--.
989          *    /|   \            B   |
990          *   / A*  B           /    |
991          *  / /\   /          ?     |
992          *   /   C*      =>      D  |
993          *      /  D           Head |
994          *     /               A  \_|
995          *                      C
996          */
997         /* Collect blocks containing only a Jmp.
998          * Do not collect blocks with backedge outs. */
999         if ((jmp_only || mark) && !has_be) {
1000                 set_Block_mark(block, 1);
1001                 ++inversion_blocks_in_cc;
1002                 loop_info.cc_size += nodes_n;
1003                 DB((dbg, LEVEL_5, "block %N is part of condition chain\n", block));
1004                 ARR_APP1(ir_node *, cc_blocks, block);
1005         } else {
1006                 set_Block_mark(block, 0);
1007         }
1008
1009         foreach_block_succ(block, edge) {
1010                 ir_node *src = get_edge_src_irn( edge );
1011
1012                 if (is_in_loop(src) && ! irn_visited(src))
1013                         find_condition_chain(src);
1014         }
1015 }
1016
1017 /**
1018  * Rewires the copied condition chain. Removes backedges
1019  * as this condition chain is prior to the loop.
1020  * Copy of loop_head must have phi list and old (unfixed) backedge info of the loop head.
1021  * (loop_head is already fixed, we cannot rely on it.)
1022  */
1023 static void fix_copy_inversion(void)
1024 {
1025         ir_node *new_head;
1026         ir_node **ins;
1027         ir_node **phis;
1028         ir_node *phi, *next;
1029         ir_node *head_cp = get_inversion_copy(loop_head);
1030         ir_graph *irg    = get_irn_irg(head_cp);
1031         int arity        = get_irn_arity(head_cp);
1032         int backedges    = get_backedge_n(head_cp, false);
1033         int new_arity    = arity - backedges;
1034         int pos;
1035         int i;
1036
1037         NEW_ARR_A(ir_node *, ins, new_arity);
1038
1039         pos = 0;
1040         /* Remove block backedges */
1041         for(i = 0; i < arity; ++i) {
1042                 if (!is_backedge(head_cp, i))
1043                         ins[pos++] = get_irn_n(head_cp, i);
1044         }
1045
1046         new_head = new_r_Block(irg, new_arity, ins);
1047
1048         phis = NEW_ARR_F(ir_node *, 0);
1049
1050         for_each_phi_safe(get_Block_phis(head_cp), phi, next) {
1051                 ir_node *new_phi;
1052                 NEW_ARR_A(ir_node *, ins, new_arity);
1053                 pos = 0;
1054                 for(i = 0; i < arity; ++i) {
1055                         if (!is_backedge(head_cp, i))
1056                                 ins[pos++] = get_irn_n(phi, i);
1057                 }
1058                 new_phi = new_rd_Phi(get_irn_dbg_info(phi),
1059                                 new_head, new_arity, ins,
1060                                 get_irn_mode(phi));
1061                 ARR_APP1(ir_node *, phis, new_phi);
1062         }
1063
1064         pos = 0;
1065         for_each_phi_safe(get_Block_phis(head_cp), phi, next) {
1066                 exchange(phi, phis[pos++]);
1067         }
1068
1069         exchange(head_cp, new_head);
1070
1071         DEL_ARR_F(phis);
1072 }
1073
1074
1075 /* Puts the original condition chain at the end of the loop,
1076  * subsequently to the body.
1077  * Relies on block phi list and correct backedges.
1078  */
1079 static void fix_head_inversion(void)
1080 {
1081         ir_node *new_head;
1082         ir_node **ins;
1083         ir_node *phi, *next;
1084         ir_node **phis;
1085         ir_graph *irg = get_irn_irg(loop_head);
1086         int arity     = get_irn_arity(loop_head);
1087         int backedges = get_backedge_n(loop_head, false);
1088         int new_arity = backedges;
1089         int pos;
1090         int i;
1091
1092         NEW_ARR_A(ir_node *, ins, new_arity);
1093
1094         pos = 0;
1095         /* Keep only backedges */
1096         for(i = 0; i < arity; ++i) {
1097                 if (is_own_backedge(loop_head, i))
1098                         ins[pos++] = get_irn_n(loop_head, i);
1099         }
1100
1101         new_head = new_r_Block(irg, new_arity, ins);
1102
1103         phis = NEW_ARR_F(ir_node *, 0);
1104
1105         for_each_phi(loop_head, phi) {
1106                 ir_node *new_phi;
1107                 DB((dbg, LEVEL_5, "Fixing phi %N of loop head\n", phi));
1108
1109                 NEW_ARR_A(ir_node *, ins, new_arity);
1110
1111                 pos = 0;
1112                 for (i = 0; i < arity; ++i) {
1113                         ir_node *pred = get_irn_n(phi, i);
1114
1115                         if (is_own_backedge(loop_head, i)) {
1116                                 /* If assignment is in the condition chain,
1117                                  * we need to create a phi in the new loop head.
1118                                  * This can only happen for df, not cf. See find_condition_chains. */
1119                                 /*if (is_nodes_block_marked(pred)) {
1120                                         ins[pos++] = pred;
1121                                 } else {*/
1122                                 ins[pos++] = pred;
1123
1124                         }
1125                 }
1126
1127                 new_phi = new_rd_Phi(get_irn_dbg_info(phi),
1128                         new_head, new_arity, ins,
1129                         get_irn_mode(phi));
1130
1131                 ARR_APP1(ir_node *, phis, new_phi);
1132
1133                 DB((dbg, LEVEL_5, "fix inverted head should exch %N by %N (pos %d)\n", phi, new_phi, pos ));
1134         }
1135
1136         pos = 0;
1137         for_each_phi_safe(get_Block_phis(loop_head), phi, next) {
1138                 DB((dbg, LEVEL_5, "fix inverted exch phi %N by %N\n", phi, phis[pos]));
1139                 if (phis[pos] != phi)
1140                         exchange(phi, phis[pos++]);
1141         }
1142
1143         DEL_ARR_F(phis);
1144
1145         DB((dbg, LEVEL_5, "fix inverted head exch head block %N by %N\n", loop_head, new_head));
1146         exchange(loop_head, new_head);
1147 }
1148
1149 /* Does the loop inversion.  */
1150 static void inversion_walk(ir_graph *irg, entry_edge *head_entries)
1151 {
1152         size_t i;
1153
1154         /*
1155          * The order of rewiring bottom-up is crucial.
1156          * Any change of the order leads to lost information that would be needed later.
1157          */
1158
1159         ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED);
1160
1161         /* 1. clone condition chain */
1162         inc_irg_visited(irg);
1163
1164         for (i = 0; i < ARR_LEN(head_entries); ++i) {
1165                 entry_edge entry = head_entries[i];
1166                 ir_node *pred = get_irn_n(entry.node, entry.pos);
1167
1168                 DB((dbg, LEVEL_5, "\nInit walk block %N\n", pred));
1169
1170                 copy_walk(pred, is_nodes_block_marked, cur_loop);
1171         }
1172
1173         ir_free_resources(irg, IR_RESOURCE_IRN_VISITED);
1174
1175         /* 2. Extends the head control flow successors ins
1176          *    with the definitions of the copied head node. */
1177         for (i = 0; i < ARR_LEN(head_entries); ++i) {
1178                 entry_edge head_out = head_entries[i];
1179
1180                 if (is_Block(head_out.node))
1181                         extend_ins_by_copy(head_out.node, head_out.pos);
1182         }
1183
1184         /* 3. construct_ssa for users of definitions in the condition chain,
1185          *    as there is now a second definition. */
1186         for (i = 0; i < ARR_LEN(head_entries); ++i) {
1187                 entry_edge head_out = head_entries[i];
1188
1189                 /* Ignore keepalives */
1190                 if (is_End(head_out.node))
1191                         continue;
1192
1193                 /* Construct ssa for assignments in the condition chain. */
1194                 if (!is_Block(head_out.node)) {
1195                         ir_node *pred, *cppred, *block, *cpblock;
1196
1197                         pred = head_out.pred;
1198                         cppred = get_inversion_copy(pred);
1199                         block = get_nodes_block(pred);
1200                         cpblock = get_nodes_block(cppred);
1201                         construct_ssa(block, pred, cpblock, cppred);
1202                 }
1203         }
1204
1205         /*
1206          * If there is an assignment in the condition chain
1207          * with a user also in the condition chain,
1208          * the dominance frontier is in the new loop head.
1209          * The dataflow loop is completely in the condition chain.
1210          * Goal:
1211          *  To be wired: >|
1212          *
1213          *  | ,--.   |
1214          * Phi_cp |  | copied condition chain
1215          * >| |   |  |
1216          * >| ?__/   |
1217          * >| ,-.
1218          *  Phi* |   | new loop head with newly created phi.
1219          *   |   |
1220          *  Phi  |   | original, inverted condition chain
1221          *   |   |   |
1222          *   ?__/    |
1223          *
1224          */
1225         for (i = 0; i < ARR_LEN(head_df_loop); ++i) {
1226                 entry_edge head_out = head_df_loop[i];
1227
1228                 /* Construct ssa for assignments in the condition chain. */
1229                 ir_node *pred, *cppred, *block, *cpblock;
1230
1231                 pred = head_out.pred;
1232                 cppred = get_inversion_copy(pred);
1233                 assert(cppred && pred);
1234                 block = get_nodes_block(pred);
1235                 cpblock = get_nodes_block(cppred);
1236                 construct_ssa(block, pred, cpblock, cppred);
1237         }
1238
1239         /* 4. Remove the ins which are no backedges from the original condition chain
1240          *    as the cc is now subsequent to the body. */
1241         fix_head_inversion();
1242
1243         /* 5. Remove the backedges of the copied condition chain,
1244          *    because it is going to be the new 'head' in advance to the loop. */
1245         fix_copy_inversion();
1246
1247 }
1248
1249 /* Performs loop inversion of cur_loop if possible and reasonable. */
1250 static void loop_inversion(ir_graph *irg)
1251 {
1252         int      loop_depth;
1253         unsigned max_loop_nodes = opt_params.max_loop_size;
1254         unsigned max_loop_nodes_adapted;
1255         int      depth_adaption = opt_params.depth_adaption;
1256
1257         bool do_inversion = true;
1258
1259         /* Depth of 0 is the procedure and 1 a topmost loop. */
1260         loop_depth = get_loop_depth(cur_loop) - 1;
1261
1262         /* Calculating in per mil. */
1263         max_loop_nodes_adapted = get_max_nodes_adapted(loop_depth);
1264
1265         DB((dbg, LEVEL_1, "max_nodes: %d\nmax_nodes_adapted %d at depth of %d (adaption %d)\n",
1266                         max_loop_nodes, max_loop_nodes_adapted, loop_depth, depth_adaption));
1267
1268         if (loop_info.nodes == 0)
1269                 return;
1270
1271         if (loop_info.nodes > max_loop_nodes) {
1272                 /* Only for stats */
1273                 DB((dbg, LEVEL_1, "Nodes %d > allowed nodes %d\n",
1274                         loop_info.nodes, loop_depth, max_loop_nodes));
1275                 ++stats.too_large;
1276                 /* no RETURN */
1277                 /* Adaption might change it */
1278         }
1279
1280         /* Limit processing to loops smaller than given parameter. */
1281         if (loop_info.nodes > max_loop_nodes_adapted) {
1282                 DB((dbg, LEVEL_1, "Nodes %d > allowed nodes (depth %d adapted) %d\n",
1283                         loop_info.nodes, loop_depth, max_loop_nodes_adapted));
1284                 ++stats.too_large_adapted;
1285                 return;
1286         }
1287
1288         if (loop_info.calls > opt_params.allowed_calls) {
1289                 DB((dbg, LEVEL_1, "Calls %d > allowed calls %d\n",
1290                         loop_info.calls, opt_params.allowed_calls));
1291                 ++stats.calls_limit;
1292                 return;
1293         }
1294
1295         /*inversion_head_node_limit = INT_MAX;*/
1296         ir_reserve_resources(irg, IR_RESOURCE_BLOCK_MARK);
1297
1298         /* Reset block marks.
1299          * We use block marks to flag blocks of the original condition chain. */
1300         irg_walk_graph(irg, reset_block_mark, NULL, NULL);
1301
1302         /*loop_info.blocks = get_loop_n_blocks(cur_loop);*/
1303         cond_chain_entries = NEW_ARR_F(entry_edge, 0);
1304         head_df_loop = NEW_ARR_F(entry_edge, 0);
1305
1306         /*head_inversion_node_count = 0;*/
1307         inversion_blocks_in_cc = 0;
1308
1309         /* Use phase to keep copy of nodes from the condition chain. */
1310         ir_nodemap_init(&map, irg);
1311         obstack_init(&obst);
1312
1313         /* Search for condition chains and temporarily save the blocks in an array. */
1314         cc_blocks = NEW_ARR_F(ir_node *, 0);
1315         inc_irg_visited(irg);
1316         find_condition_chain(loop_head);
1317
1318         unmark_not_allowed_cc_blocks();
1319         DEL_ARR_F(cc_blocks);
1320
1321         /* Condition chain too large.
1322          * Loop should better be small enough to fit into the cache. */
1323         /* TODO Of course, we should take a small enough cc in the first place,
1324          * which is not that simple. (bin packing)  */
1325         if (loop_info.cc_size > opt_params.max_cc_size) {
1326                 ++stats.cc_limit_reached;
1327
1328                 do_inversion = false;
1329
1330                 /* Unmark cc blocks except the head.
1331                  * Invert head only for possible unrolling. */
1332                 unmark_cc_blocks();
1333         }
1334
1335         /* We also catch endless loops here,
1336          * because they do not have a condition chain. */
1337         if (inversion_blocks_in_cc < 1) {
1338                 do_inversion = false;
1339                 DB((dbg, LEVEL_3,
1340                         "Loop contains %d (less than 1) invertible blocks => No Inversion done.\n",
1341                         inversion_blocks_in_cc));
1342         }
1343
1344         if (do_inversion) {
1345                 cur_head_outs = NEW_ARR_F(entry_edge, 0);
1346
1347                 /* Get all edges pointing into the condition chain. */
1348                 irg_walk_graph(irg, get_head_outs, NULL, NULL);
1349
1350                 /* Do the inversion */
1351                 inversion_walk(irg, cur_head_outs);
1352
1353                 DEL_ARR_F(cur_head_outs);
1354
1355                 /* Duplicated blocks changed doms */
1356                 clear_irg_state(irg, IR_GRAPH_STATE_CONSISTENT_DOMINANCE
1357                                    | IR_GRAPH_STATE_CONSISTENT_LOOPINFO);
1358
1359                 ++stats.inverted;
1360         }
1361
1362         /* free */
1363         obstack_free(&obst, NULL);
1364         ir_nodemap_destroy(&map);
1365         DEL_ARR_F(cond_chain_entries);
1366         DEL_ARR_F(head_df_loop);
1367
1368         ir_free_resources(irg, IR_RESOURCE_BLOCK_MARK);
1369 }
1370
1371 /* Fix the original loop_heads ins for invariant unrolling case. */
1372 static void unrolling_fix_loop_head_inv(void)
1373 {
1374         ir_node *ins[2];
1375         ir_node *phi;
1376         ir_node *proj = new_Proj(loop_info.duff_cond, mode_X, 0);
1377         ir_node *head_pred = get_irn_n(loop_head, loop_info.be_src_pos);
1378         ir_node *loop_condition = get_unroll_copy(head_pred, unroll_nr - 1);
1379
1380         /* Original loop_heads ins are:
1381          * duff block and the own backedge */
1382
1383         ins[0] = loop_condition;
1384         ins[1] = proj;
1385         set_irn_in(loop_head, 2, ins);
1386         DB((dbg, LEVEL_4, "Rewire ins of block loophead %N to pred %N and duffs entry %N \n" , loop_head, ins[0], ins[1]));
1387
1388         for_each_phi(loop_head, phi) {
1389                 ir_node *pred = get_irn_n(phi, loop_info.be_src_pos);
1390                 /* TODO we think it is a phi, but for Mergesort it is not the case.*/
1391
1392                 ir_node *last_pred = get_unroll_copy(pred, unroll_nr - 1);
1393
1394                 ins[0] = last_pred;
1395                 ins[1] = (ir_node*)get_irn_link(phi);
1396                 set_irn_in(phi, 2, ins);
1397                 DB((dbg, LEVEL_4, "Rewire ins of loophead phi %N to pred %N and duffs entry %N \n" , phi, ins[0], ins[1]));
1398         }
1399 }
1400
1401 /* Removes previously created phis with only 1 in. */
1402 static void correct_phis(ir_node *node, void *env)
1403 {
1404         (void)env;
1405
1406         if (is_Phi(node) && get_irn_arity(node) == 1) {
1407                 ir_node *exch;
1408                 ir_node *in[1];
1409
1410                 in[0] = get_irn_n(node, 0);
1411
1412                 exch = new_rd_Phi(get_irn_dbg_info(node),
1413                     get_nodes_block(node), 1, in,
1414                         get_irn_mode(node));
1415
1416                 exchange(node, exch);
1417         }
1418 }
1419
1420 /* Unrolling: Rewire floating copies. */
1421 static void place_copies(int copies)
1422 {
1423         ir_node *loophead = loop_head;
1424         size_t i;
1425         int c;
1426         int be_src_pos = loop_info.be_src_pos;
1427
1428         /* Serialize loops by fixing their head ins.
1429          * Processed are the copies.
1430          * The original loop is done after that, to keep backedge infos. */
1431         for (c = 0; c < copies; ++c) {
1432                 ir_node *upper = get_unroll_copy(loophead, c);
1433                 ir_node *lower = get_unroll_copy(loophead, c + 1);
1434                 ir_node *phi;
1435                 ir_node *topmost_be_block = get_nodes_block(get_irn_n(loophead, be_src_pos));
1436
1437                 /* Important: get the preds first and then their copy. */
1438                 ir_node *upper_be_block = get_unroll_copy(topmost_be_block, c);
1439                 ir_node *new_jmp = new_r_Jmp(upper_be_block);
1440                 DB((dbg, LEVEL_5, " place_copies upper %N lower %N\n", upper, lower));
1441
1442                 DB((dbg, LEVEL_5, "topmost be block %N \n", topmost_be_block));
1443
1444                 if (loop_info.unroll_kind == constant) {
1445                         ir_node *ins[1];
1446                         ins[0] = new_jmp;
1447                         set_irn_in(lower, 1, ins);
1448
1449                         for_each_phi(loophead, phi) {
1450                                 ir_node *topmost_def = get_irn_n(phi, be_src_pos);
1451                                 ir_node *upper_def = get_unroll_copy(topmost_def, c);
1452                                 ir_node *lower_phi = get_unroll_copy(phi, c + 1);
1453
1454                                 /* It is possible, that the value used
1455                                  * in the OWN backedge path is NOT defined in this loop. */
1456                                 if (is_in_loop(topmost_def))
1457                                         ins[0] = upper_def;
1458                                 else
1459                                         ins[0] = topmost_def;
1460
1461                                 set_irn_in(lower_phi, 1, ins);
1462                                 /* Need to replace phis with 1 in later. */
1463                         }
1464                 } else {
1465                         /* Invariant case */
1466                         /* Every node has 2 ins. One from the duff blocks
1467                          * and one from the previously unrolled loop. */
1468                         ir_node *ins[2];
1469                         /* Calculate corresponding projection of mod result for this copy c */
1470                         ir_node *proj = new_Proj(loop_info.duff_cond, mode_X, unroll_nr - c - 1);
1471                         DB((dbg, LEVEL_4, "New duff proj %N\n" , proj));
1472
1473                         ins[0] = new_jmp;
1474                         ins[1] = proj;
1475                         set_irn_in(lower, 2, ins);
1476                         DB((dbg, LEVEL_4, "Rewire ins of Block %N to pred %N and duffs entry %N \n" , lower, ins[0], ins[1]));
1477
1478                         for_each_phi(loophead, phi) {
1479                                 ir_node *topmost_phi_pred = get_irn_n(phi, be_src_pos);
1480                                 ir_node *upper_phi_pred;
1481                                 ir_node *lower_phi;
1482                                 ir_node *duff_phi;
1483
1484                                 lower_phi = get_unroll_copy(phi, c + 1);
1485                                 duff_phi = (ir_node*)get_irn_link(phi);
1486                                 DB((dbg, LEVEL_4, "DD Link of %N is %N\n" , phi, duff_phi));
1487
1488                                 /*  */
1489                                 if (is_in_loop(topmost_phi_pred)) {
1490                                         upper_phi_pred = get_unroll_copy(topmost_phi_pred, c);
1491                                 } else {
1492                                         upper_phi_pred = topmost_phi_pred;
1493                                 }
1494
1495                                 ins[0] = upper_phi_pred;
1496                                 ins[1] = duff_phi;
1497                                 set_irn_in(lower_phi, 2, ins);
1498                                 DB((dbg, LEVEL_4, "Rewire ins of %N to pred %N and duffs entry %N \n" , lower_phi, ins[0], ins[1]));
1499                         }
1500                 }
1501         }
1502
1503         /* Reconnect last copy. */
1504         for (i = 0; i < ARR_LEN(loop_entries); ++i) {
1505                 entry_edge edge = loop_entries[i];
1506                 /* Last copy is at the bottom */
1507                 ir_node *new_pred = get_unroll_copy(edge.pred, copies);
1508                 set_irn_n(edge.node, edge.pos, new_pred);
1509         }
1510
1511         /* Fix original loops head.
1512          * Done in the end, as ins and be info were needed before. */
1513         if (loop_info.unroll_kind == constant) {
1514                 ir_node *phi;
1515                 ir_node *head_pred = get_irn_n(loop_head, be_src_pos);
1516                 ir_node *loop_condition = get_unroll_copy(head_pred, unroll_nr - 1);
1517
1518                 set_irn_n(loop_head, loop_info.be_src_pos, loop_condition);
1519
1520                 for_each_phi(loop_head, phi) {
1521                         ir_node *pred = get_irn_n(phi, be_src_pos);
1522                         ir_node *last_pred;
1523
1524                         /* It is possible, that the value used
1525                          * in the OWN backedge path is NOT assigned in this loop. */
1526                         if (is_in_loop(pred))
1527                                 last_pred = get_unroll_copy(pred, copies);
1528                         else
1529                                 last_pred = pred;
1530                         set_irn_n(phi, be_src_pos, last_pred);
1531                 }
1532
1533         } else {
1534                 unrolling_fix_loop_head_inv();
1535         }
1536 }
1537
1538 /* Copies the cur_loop several times. */
1539 static void copy_loop(entry_edge *cur_loop_outs, int copies)
1540 {
1541         int c;
1542
1543         ir_reserve_resources(current_ir_graph, IR_RESOURCE_IRN_VISITED);
1544
1545         for (c = 0; c < copies; ++c) {
1546                 size_t i;
1547
1548                 inc_irg_visited(current_ir_graph);
1549
1550                 DB((dbg, LEVEL_5, "         ### Copy_loop  copy nr: %d ###\n", c));
1551                 for (i = 0; i < ARR_LEN(cur_loop_outs); ++i) {
1552                         entry_edge entry = cur_loop_outs[i];
1553                         ir_node *pred = get_irn_n(entry.node, entry.pos);
1554
1555                         copy_walk_n(pred, is_in_loop, c + 1);
1556                 }
1557         }
1558
1559         ir_free_resources(current_ir_graph, IR_RESOURCE_IRN_VISITED);
1560 }
1561
1562
1563 /* Creates a new phi from the given phi node omitting own bes,
1564  * using be_block as supplier of backedge informations. */
1565 static ir_node *clone_phis_sans_bes(ir_node *phi, ir_node *be_block, ir_node *dest_block)
1566 {
1567         ir_node **ins;
1568         int arity = get_irn_arity(phi);
1569         int i, c = 0;
1570         ir_node *newphi;
1571
1572         assert(get_irn_arity(phi) == get_irn_arity(be_block));
1573         assert(is_Phi(phi));
1574
1575         ins = NEW_ARR_F(ir_node *, arity);
1576         for (i = 0; i < arity; ++i) {
1577                 if (! is_own_backedge(be_block, i)) {
1578                         ins[c] = get_irn_n(phi, i);
1579                         ++c;
1580                 }
1581         }
1582
1583         newphi = new_r_Phi(dest_block, c, ins, get_irn_mode(phi));
1584
1585         set_irn_link(phi, newphi);
1586         DB((dbg, LEVEL_4, "Linking for duffs device %N to %N\n", phi, newphi));
1587
1588         return newphi;
1589 }
1590
1591 /* Creates a new block from the given block node omitting own bes,
1592  * using be_block as supplier of backedge informations. */
1593 static ir_node *clone_block_sans_bes(ir_node *node, ir_node *be_block)
1594 {
1595         int arity = get_irn_arity(node);
1596         int i, c = 0;
1597         ir_node **ins;
1598
1599         assert(get_irn_arity(node) == get_irn_arity(be_block));
1600         assert(is_Block(node));
1601
1602         NEW_ARR_A(ir_node *, ins, arity);
1603         for (i = 0; i < arity; ++i) {
1604                 if (! is_own_backedge(be_block, i)) {
1605                         ins[c] = get_irn_n(node, i);
1606                         ++c;
1607                 }
1608         }
1609
1610         return new_Block(c, ins);
1611 }
1612
1613 /* Creates a structure to calculate absolute value of node op.
1614  * Returns mux node with absolute value. */
1615 static ir_node *new_Abs(ir_node *op, ir_mode *mode)
1616 {
1617   ir_graph *irg      = get_irn_irg(op);
1618   ir_node  *block    = get_nodes_block(op);
1619   ir_node  *zero     = new_r_Const(irg, get_mode_null(mode));
1620   ir_node  *cmp      = new_r_Cmp(block, op, zero, ir_relation_less);
1621   ir_node  *minus_op = new_r_Minus(block, op, mode);
1622   ir_node  *mux      = new_r_Mux(block, cmp, op, minus_op, mode);
1623
1624   return mux;
1625 }
1626
1627
1628 /* Creates blocks for duffs device, using previously obtained
1629  * informations about the iv.
1630  * TODO split */
1631 static void create_duffs_block(void)
1632 {
1633         ir_mode *mode;
1634
1635         ir_node *block1, *count_block, *duff_block;
1636         ir_node *ems, *ems_mod, *ems_div, *ems_mod_proj, *cmp_null,
1637                 *ems_mode_cond, *x_true, *x_false, *const_null;
1638         ir_node *true_val, *false_val;
1639         ir_node *ins[2];
1640
1641         ir_node *duff_mod, *proj, *cond;
1642
1643         ir_node *count, *correction, *unroll_c;
1644         ir_node *cmp_bad_count, *good_count, *bad_count, *count_phi, *bad_count_neg;
1645         ir_node *phi;
1646
1647         mode = get_irn_mode(loop_info.end_val);
1648         const_null = new_Const(get_mode_null(mode));
1649
1650         /* TODO naming
1651          * 1. Calculate first approach to count.
1652          *    Condition: (end - start) % step == 0 */
1653         block1 = clone_block_sans_bes(loop_head, loop_head);
1654         DB((dbg, LEVEL_4, "Duff block 1 %N\n", block1));
1655
1656         /* Create loop entry phis in first duff block
1657          * as it becomes the loops preheader */
1658         for_each_phi(loop_head, phi) {
1659                 /* Returns phis pred if phi would have arity 1*/
1660                 ir_node *new_phi = clone_phis_sans_bes(phi, loop_head, block1);
1661
1662                 DB((dbg, LEVEL_4, "HEAD %N phi %N\n", loop_head, phi));
1663                 DB((dbg, LEVEL_4, "BLOCK1 %N phi %N\n", block1, new_phi));
1664         }
1665
1666         ems = new_r_Sub(block1, loop_info.end_val, loop_info.start_val,
1667                 get_irn_mode(loop_info.end_val));
1668                 DB((dbg, LEVEL_4, "BLOCK1 sub %N\n", ems));
1669
1670
1671         ems = new_Sub(loop_info.end_val, loop_info.start_val,
1672                 get_irn_mode(loop_info.end_val));
1673
1674         DB((dbg, LEVEL_4, "mod ins %N %N\n", ems, loop_info.step));
1675         ems_mod = new_r_Mod(block1,
1676                 new_NoMem(),
1677                 ems,
1678                 loop_info.step,
1679                 mode,
1680                 op_pin_state_pinned);
1681         ems_div = new_r_Div(block1,
1682                 new_NoMem(),
1683                 ems,
1684                 loop_info.step,
1685                 mode,
1686                 op_pin_state_pinned);
1687
1688         DB((dbg, LEVEL_4, "New module node %N\n", ems_mod));
1689
1690         ems_mod_proj = new_r_Proj(ems_mod, mode_Iu, pn_Mod_res);
1691         cmp_null = new_r_Cmp(block1, ems_mod_proj, const_null, ir_relation_less);
1692         ems_mode_cond = new_r_Cond(block1, cmp_null);
1693
1694         /* ems % step == 0 */
1695         x_true = new_r_Proj(ems_mode_cond, mode_X, pn_Cond_true);
1696         /* ems % step != 0 */
1697         x_false = new_r_Proj(ems_mode_cond, mode_X, pn_Cond_false);
1698
1699         /* 2. Second block.
1700          * Assures, duffs device receives a valid count.
1701          * Condition:
1702          *     decreasing: count < 0
1703          *     increasing: count > 0
1704          */
1705         ins[0] = x_true;
1706         ins[1] = x_false;
1707
1708         count_block = new_Block(2, ins);
1709         DB((dbg, LEVEL_4, "Duff block 2 %N\n", count_block));
1710
1711
1712         /* Increase loop-taken-count depending on the loop condition
1713          * uses the latest iv to compare to. */
1714         if (loop_info.latest_value == 1) {
1715                 /* ems % step == 0 :  +0 */
1716                 true_val = new_Const(get_mode_null(mode));
1717                 /* ems % step != 0 :  +1 */
1718                 false_val = new_Const(get_mode_one(mode));
1719         } else {
1720                 ir_tarval *tv_two = new_tarval_from_long(2, mode);
1721                 /* ems % step == 0 :  +1 */
1722                 true_val = new_Const(get_mode_one(mode));
1723                 /* ems % step != 0 :  +2 */
1724                 false_val = new_Const(tv_two);
1725         }
1726
1727         ins[0] = true_val;
1728         ins[1] = false_val;
1729
1730         correction = new_r_Phi(count_block, 2, ins, mode);
1731
1732         count = new_r_Proj(ems_div, mode, pn_Div_res);
1733
1734         /* (end - start) / step  +  correction */
1735         count = new_Add(count, correction, mode);
1736
1737         /* We preconditioned the loop to be tail-controlled.
1738          * So, if count is something 'wrong' like 0,
1739          * negative/positive (depending on step direction),
1740          * we may take the loop once (tail-contr.) and leave it
1741          * to the existing condition, to break; */
1742
1743         /* Depending on step direction, we have to check for > or < 0 */
1744         if (loop_info.decreasing == 1) {
1745                 cmp_bad_count = new_r_Cmp(count_block, count, const_null,
1746                                           ir_relation_less);
1747         } else {
1748                 cmp_bad_count = new_r_Cmp(count_block, count, const_null,
1749                                           ir_relation_greater);
1750         }
1751
1752         bad_count_neg = new_r_Cond(count_block, cmp_bad_count);
1753         good_count = new_Proj(bad_count_neg, mode_X, pn_Cond_true);
1754         bad_count = new_Proj(ems_mode_cond, mode_X, pn_Cond_false);
1755
1756         /* 3. Duff Block
1757          *    Contains module to decide which loop to start from. */
1758
1759         ins[0] = good_count;
1760         ins[1] = bad_count;
1761         duff_block = new_Block(2, ins);
1762         DB((dbg, LEVEL_4, "Duff block 3 %N\n", duff_block));
1763
1764         /* Get absolute value */
1765         ins[0] = new_Abs(count, mode);
1766         /* Manually feed the aforementioned count = 1 (bad case)*/
1767         ins[1] = new_Const(get_mode_one(mode));
1768         count_phi = new_r_Phi(duff_block, 2, ins, mode);
1769
1770         unroll_c = new_Const(new_tarval_from_long((long)unroll_nr, mode));
1771
1772         /* count % unroll_nr */
1773         duff_mod = new_r_Mod(duff_block,
1774                 new_NoMem(),
1775                 count_phi,
1776                 unroll_c,
1777                 mode,
1778                 op_pin_state_pinned);
1779
1780
1781         proj = new_Proj(duff_mod, mode, pn_Mod_res);
1782         /* condition does NOT create itself in the block of the proj! */
1783         cond = new_r_Cond(duff_block, proj);
1784
1785         loop_info.duff_cond = cond;
1786 }
1787
1788 /* Returns 1 if given node is not in loop,
1789  * or if it is a phi of the loop head with only loop invariant defs.
1790  */
1791 static unsigned is_loop_invariant_def(ir_node *node)
1792 {
1793         int i;
1794
1795         if (! is_in_loop(node)) {
1796                 DB((dbg, LEVEL_4, "Not in loop %N\n", node));
1797                 /* || is_Const(node) || is_SymConst(node)) {*/
1798                 return 1;
1799         }
1800
1801         /* If this is a phi of the loophead shared by more than 1 loop,
1802          * we need to check if all defs are not in the loop.  */
1803         if (is_Phi(node)) {
1804                 ir_node *block;
1805                 block = get_nodes_block(node);
1806
1807                 /* To prevent unexpected situations. */
1808                 if (block != loop_head) {
1809                         return 0;
1810                 }
1811
1812                 for (i = 0; i < get_irn_arity(node); ++i) {
1813                         /* Check if all bes are just loopbacks. */
1814                         if (is_own_backedge(block, i) && get_irn_n(node, i) != node)
1815                                 return 0;
1816                 }
1817                 DB((dbg, LEVEL_4, "invar %N\n", node));
1818                 return 1;
1819         }
1820         DB((dbg, LEVEL_4, "Not invar %N\n", node));
1821
1822         return 0;
1823 }
1824
1825 /* Returns 1 if one pred of node is invariant and the other is not.
1826  * invar_pred and other are set analogously. */
1827 static unsigned get_invariant_pred(ir_node *node, ir_node **invar_pred, ir_node **other)
1828 {
1829         ir_node *pred0 = get_irn_n(node, 0);
1830         ir_node *pred1 = get_irn_n(node, 1);
1831
1832         *invar_pred = NULL;
1833         *other = NULL;
1834
1835         if (is_loop_invariant_def(pred0)) {
1836                 DB((dbg, LEVEL_4, "pred0 invar %N\n", pred0));
1837                 *invar_pred = pred0;
1838                 *other = pred1;
1839         }
1840
1841         if (is_loop_invariant_def(pred1)) {
1842                 DB((dbg, LEVEL_4, "pred1 invar %N\n", pred1));
1843
1844                 if (*invar_pred != NULL) {
1845                         /* RETURN. We do not want both preds to be invariant. */
1846                         return 0;
1847                 }
1848
1849                 *other = pred0;
1850                 *invar_pred = pred1;
1851                 return 1;
1852         } else {
1853                 DB((dbg, LEVEL_4, "pred1 not invar %N\n", pred1));
1854
1855                 if (*invar_pred != NULL)
1856                         return 1;
1857                 else
1858                         return 0;
1859         }
1860 }
1861
1862 /* Starts from a phi that may belong to an iv.
1863  * If an add forms a loop with iteration_phi,
1864  * and add uses a constant, 1 is returned
1865  * and 'start' as well as 'add' are sane. */
1866 static unsigned get_start_and_add(ir_node *iteration_phi, unrolling_kind_flag role)
1867 {
1868         int i;
1869         ir_node *found_add = loop_info.add;
1870         int arity = get_irn_arity(iteration_phi);
1871
1872         DB((dbg, LEVEL_4, "Find start and add from %N\n", iteration_phi));
1873
1874         for (i = 0; i < arity; ++i) {
1875
1876                 /* Find start_val which needs to be pred of the iteration_phi.
1877                  * If start_val already known, sanity check. */
1878                 if (!is_backedge(get_nodes_block(loop_info.iteration_phi), i)) {
1879                         ir_node *found_start_val = get_irn_n(loop_info.iteration_phi, i);
1880
1881                         DB((dbg, LEVEL_4, "found_start_val %N\n", found_start_val));
1882
1883                         /* We already found a start_val it has to be always the same. */
1884                         if (loop_info.start_val && found_start_val != loop_info.start_val)
1885                                 return 0;
1886
1887                         if ((role == constant) && !(is_SymConst(found_start_val) || is_Const(found_start_val)))
1888                                         return 0;
1889                         else if((role == constant) && !(is_loop_invariant_def(found_start_val)))
1890                                         return 0;
1891
1892                         loop_info.start_val = found_start_val;
1893                 }
1894
1895                 /* The phi has to be in the loop head.
1896                  * Follow all own backedges. Every value supplied from these preds of the phi
1897                  * needs to origin from the same add. */
1898                 if (is_own_backedge(get_nodes_block(loop_info.iteration_phi), i)) {
1899                         ir_node *new_found = get_irn_n(loop_info.iteration_phi,i);
1900
1901                         DB((dbg, LEVEL_4, "is add? %N\n", new_found));
1902
1903                         if (! (is_Add(new_found) || is_Sub(new_found)) || (found_add && found_add != new_found))
1904                                 return 0;
1905                         else
1906                                 found_add = new_found;
1907                 }
1908         }
1909
1910         loop_info.add = found_add;
1911
1912         return 1;
1913 }
1914
1915
1916 /* Returns 1 if one pred of node is a const value and the other is not.
1917  * const_pred and other are set analogously. */
1918 static unsigned get_const_pred(ir_node *node, ir_node **const_pred, ir_node **other)
1919 {
1920         ir_node *pred0 = get_irn_n(node, 0);
1921         ir_node *pred1 = get_irn_n(node, 1);
1922
1923         DB((dbg, LEVEL_4, "Checking for constant pred of %N\n", node));
1924
1925         *const_pred = NULL;
1926         *other = NULL;
1927
1928         /*DB((dbg, LEVEL_4, "is %N const\n", pred0));*/
1929         if (is_Const(pred0) || is_SymConst(pred0)) {
1930                 *const_pred = pred0;
1931                 *other = pred1;
1932         }
1933
1934         /*DB((dbg, LEVEL_4, "is %N const\n", pred1));*/
1935         if (is_Const(pred1) || is_SymConst(pred1)) {
1936                 if (*const_pred != NULL) {
1937                         /* RETURN. We do not want both preds to be constant. */
1938                         return 0;
1939                 }
1940
1941                 *other = pred0;
1942                 *const_pred = pred1;
1943         }
1944
1945         if (*const_pred == NULL)
1946                 return 0;
1947         else
1948                 return 1;
1949 }
1950
1951 /* Returns 1 if loop exits within 2 steps of the iv.
1952  * Norm_proj means we do not exit the loop.*/
1953 static unsigned simulate_next(ir_tarval **count_tar,
1954                 ir_tarval *stepped, ir_tarval *step_tar, ir_tarval *end_tar,
1955                 ir_relation norm_proj)
1956 {
1957         ir_tarval *next;
1958
1959         DB((dbg, LEVEL_4, "Loop taken if (stepped)%ld %s (end)%ld ",
1960                                 get_tarval_long(stepped),
1961                                 get_relation_string((norm_proj)),
1962                                 get_tarval_long(end_tar)));
1963         DB((dbg, LEVEL_4, "comparing latest value %d\n", loop_info.latest_value));
1964
1965         /* If current iv does not stay in the loop,
1966          * this run satisfied the exit condition. */
1967         if (! (tarval_cmp(stepped, end_tar) & norm_proj))
1968                 return 1;
1969
1970         DB((dbg, LEVEL_4, "Result: (stepped)%ld IS %s (end)%ld\n",
1971                                 get_tarval_long(stepped),
1972                                 get_relation_string(tarval_cmp(stepped, end_tar)),
1973                                 get_tarval_long(end_tar)));
1974
1975         /* next step */
1976         if (is_Add(loop_info.add))
1977                 next = tarval_add(stepped, step_tar);
1978         else
1979                 /* sub */
1980                 next = tarval_sub(stepped, step_tar, get_irn_mode(loop_info.end_val));
1981
1982         DB((dbg, LEVEL_4, "Loop taken if %ld %s %ld ",
1983                                 get_tarval_long(next),
1984                                 get_relation_string(norm_proj),
1985                                 get_tarval_long(end_tar)));
1986         DB((dbg, LEVEL_4, "comparing latest value %d\n", loop_info.latest_value));
1987
1988         /* Increase steps. */
1989         *count_tar = tarval_add(*count_tar, get_tarval_one(get_tarval_mode(*count_tar)));
1990
1991         /* Next has to fail the loop condition, or we will never exit. */
1992         if (! (tarval_cmp(next, end_tar) & norm_proj))
1993                 return 1;
1994         else
1995                 return 0;
1996 }
1997
1998 /* Check if loop meets requirements for a 'simple loop':
1999  * - Exactly one cf out
2000  * - Allowed calls
2001  * - Max nodes after unrolling
2002  * - tail-controlled
2003  * - exactly one be
2004  * - cmp
2005  * Returns Projection of cmp node or NULL; */
2006 static ir_node *is_simple_loop(void)
2007 {
2008         int arity, i;
2009         ir_node *loop_block, *exit_block, *projx, *cond, *cmp;
2010
2011         /* Maximum of one condition, and no endless loops. */
2012         if (loop_info.cf_outs != 1)
2013                 return NULL;
2014
2015         DB((dbg, LEVEL_4, "1 loop exit\n"));
2016
2017         /* Calculate maximum unroll_nr keeping node count below limit. */
2018         loop_info.max_unroll = (int)((double)opt_params.max_unrolled_loop_size / (double)loop_info.nodes);
2019         if (loop_info.max_unroll < 2) {
2020                 ++stats.too_large;
2021                 return NULL;
2022         }
2023
2024         DB((dbg, LEVEL_4, "maximum unroll factor %u, to not exceed node limit \n",
2025                 opt_params.max_unrolled_loop_size));
2026
2027         arity = get_irn_arity(loop_head);
2028         /* RETURN if we have more than 1 be. */
2029         /* Get my backedges without alien bes. */
2030         loop_block = NULL;
2031         for (i = 0; i < arity; ++i) {
2032                 ir_node *pred = get_irn_n(loop_head, i);
2033                 if (is_own_backedge(loop_head, i)) {
2034                         if (loop_block)
2035                                 /* Our simple loops may have only one backedge. */
2036                                 return NULL;
2037                         else {
2038                                 loop_block = get_nodes_block(pred);
2039                                 loop_info.be_src_pos = i;
2040                         }
2041                 }
2042         }
2043
2044         DB((dbg, LEVEL_4, "loop has 1 own backedge.\n"));
2045
2046         exit_block = get_nodes_block(loop_info.cf_out.pred);
2047         /* The loop has to be tail-controlled.
2048          * This can be changed/improved,
2049          * but we would need a duff iv. */
2050         if (exit_block != loop_block)
2051                 return NULL;
2052
2053         DB((dbg, LEVEL_4, "tail-controlled loop.\n"));
2054
2055         /* find value on which loop exit depends */
2056         projx = loop_info.cf_out.pred;
2057         cond = get_irn_n(projx, 0);
2058         cmp = get_irn_n(cond, 0);
2059
2060         if (!is_Cmp(cmp))
2061                 return NULL;
2062
2063         DB((dbg, LEVEL_5, "projection is %s\n", get_relation_string(get_Proj_proj(projx))));
2064
2065         switch(get_Proj_proj(projx)) {
2066                 case pn_Cond_false:
2067                         loop_info.exit_cond = 0;
2068                         break;
2069                 case pn_Cond_true:
2070                         loop_info.exit_cond = 1;
2071                         break;
2072                 default:
2073                         panic("Cond Proj_proj other than true/false");
2074         }
2075
2076         DB((dbg, LEVEL_4, "Valid Cmp.\n"));
2077         return cmp;
2078 }
2079
2080 /* Returns 1 if all nodes are mode_Iu or mode_Is. */
2081 static unsigned are_mode_I(ir_node *n1, ir_node* n2, ir_node *n3)
2082 {
2083         ir_mode *m1 = get_irn_mode(n1);
2084         ir_mode *m2 = get_irn_mode(n2);
2085         ir_mode *m3 = get_irn_mode(n3);
2086
2087         if ((m1 == mode_Iu && m2 == mode_Iu && m3 == mode_Iu) ||
2088             (m1 == mode_Is && m2 == mode_Is && m3 == mode_Is))
2089                 return 1;
2090         else
2091                 return 0;
2092 }
2093
2094 /* Checks if cur_loop is a simple tail-controlled counting loop
2095  * with start and end value loop invariant, step constant. */
2096 static unsigned get_unroll_decision_invariant(void)
2097 {
2098
2099         ir_node   *projres, *loop_condition, *iteration_path;
2100         unsigned   success;
2101         ir_tarval *step_tar;
2102         ir_mode   *mode;
2103
2104
2105         /* RETURN if loop is not 'simple' */
2106         projres = is_simple_loop();
2107         if (projres == NULL)
2108                 return 0;
2109
2110         /* Use a minimal size for the invariant unrolled loop,
2111      * as duffs device produces overhead */
2112         if (loop_info.nodes < opt_params.invar_unrolling_min_size)
2113                 return 0;
2114
2115         loop_condition = get_irn_n(projres, 0);
2116
2117         success = get_invariant_pred(loop_condition, &loop_info.end_val, &iteration_path);
2118         DB((dbg, LEVEL_4, "pred invar %d\n", success));
2119
2120         if (! success)
2121                 return 0;
2122
2123         DB((dbg, LEVEL_4, "Invariant End_val %N, other %N\n", loop_info.end_val, iteration_path));
2124
2125         /* We may find the add or the phi first.
2126          * Until now we only have end_val. */
2127         if (is_Add(iteration_path) || is_Sub(iteration_path)) {
2128
2129                 loop_info.add = iteration_path;
2130                 DB((dbg, LEVEL_4, "Case 1: Got add %N (maybe not sane)\n", loop_info.add));
2131
2132                 /* Preds of the add should be step and the iteration_phi */
2133                 success = get_const_pred(loop_info.add, &loop_info.step, &loop_info.iteration_phi);
2134                 if (! success)
2135                         return 0;
2136
2137                 DB((dbg, LEVEL_4, "Got step %N\n", loop_info.step));
2138
2139                 if (! is_Phi(loop_info.iteration_phi))
2140                         return 0;
2141
2142                 DB((dbg, LEVEL_4, "Got phi %N\n", loop_info.iteration_phi));
2143
2144                 /* Find start_val.
2145                  * Does necessary sanity check of add, if it is already set.  */
2146                 success = get_start_and_add(loop_info.iteration_phi, invariant);
2147                 if (! success)
2148                         return 0;
2149
2150                 DB((dbg, LEVEL_4, "Got start A  %N\n", loop_info.start_val));
2151
2152         } else if (is_Phi(iteration_path)) {
2153                 ir_node *new_iteration_phi;
2154
2155                 loop_info.iteration_phi = iteration_path;
2156                 DB((dbg, LEVEL_4, "Case 2: Got phi %N\n", loop_info.iteration_phi));
2157
2158                 /* Find start_val and add-node.
2159                  * Does necessary sanity check of add, if it is already set.  */
2160                 success = get_start_and_add(loop_info.iteration_phi, invariant);
2161                 if (! success)
2162                         return 0;
2163
2164                 DB((dbg, LEVEL_4, "Got start B %N\n", loop_info.start_val));
2165                 DB((dbg, LEVEL_4, "Got add or sub %N\n", loop_info.add));
2166
2167                 success = get_const_pred(loop_info.add, &loop_info.step, &new_iteration_phi);
2168                 if (! success)
2169                         return 0;
2170
2171                 DB((dbg, LEVEL_4, "Got step (B) %N\n", loop_info.step));
2172
2173                 if (loop_info.iteration_phi != new_iteration_phi)
2174                         return 0;
2175
2176         } else {
2177                 return 0;
2178         }
2179
2180         mode = get_irn_mode(loop_info.end_val);
2181
2182         DB((dbg, LEVEL_4, "start %N, end %N, step %N\n",
2183                                 loop_info.start_val, loop_info.end_val, loop_info.step));
2184
2185         if (mode != mode_Is && mode != mode_Iu)
2186                 return 0;
2187
2188         /* TODO necessary? */
2189         if (!are_mode_I(loop_info.start_val, loop_info.step, loop_info.end_val))
2190                 return 0;
2191
2192         DB((dbg, LEVEL_4, "mode integer\n"));
2193
2194         step_tar = get_Const_tarval(loop_info.step);
2195
2196         if (tarval_is_null(step_tar)) {
2197                 /* TODO Might be worth a warning. */
2198                 return 0;
2199         }
2200
2201         DB((dbg, LEVEL_4, "step is not 0\n"));
2202
2203         create_duffs_block();
2204
2205         return loop_info.max_unroll;
2206 }
2207
2208 /* Returns unroll factor,
2209  * given maximum unroll factor and number of loop passes. */
2210 static unsigned get_preferred_factor_constant(ir_tarval *count_tar)
2211 {
2212         ir_tarval *tar_6, *tar_5, *tar_4, *tar_3, *tar_2;
2213         unsigned prefer;
2214         ir_mode *mode = get_irn_mode(loop_info.end_val);
2215
2216         tar_6 = new_tarval_from_long(6, mode);
2217         tar_5 = new_tarval_from_long(5, mode);
2218         tar_4 = new_tarval_from_long(4, mode);
2219         tar_3 = new_tarval_from_long(3, mode);
2220         tar_2 = new_tarval_from_long(2, mode);
2221
2222         /* loop passes % {6, 5, 4, 3, 2} == 0  */
2223         if (tarval_is_null(tarval_mod(count_tar, tar_6)))
2224                 prefer = 6;
2225         else if (tarval_is_null(tarval_mod(count_tar, tar_5)))
2226                 prefer = 5;
2227         else if (tarval_is_null(tarval_mod(count_tar, tar_4)))
2228                 prefer = 4;
2229         else if (tarval_is_null(tarval_mod(count_tar, tar_3)))
2230                 prefer = 3;
2231         else if (tarval_is_null(tarval_mod(count_tar, tar_2)))
2232                 prefer = 2;
2233         else {
2234                 /* gcd(max_unroll, count_tar) */
2235                 int a = loop_info.max_unroll;
2236                 int b = (int)get_tarval_long(count_tar);
2237                 int c;
2238
2239                 DB((dbg, LEVEL_4, "gcd of max_unroll %d and count_tar %d: ", a, b));
2240
2241                 do {
2242                 c = a % b;
2243                 a = b; b = c;
2244                 } while( c != 0);
2245
2246                 DB((dbg, LEVEL_4, "%d\n", a));
2247                 return a;
2248         }
2249
2250         DB((dbg, LEVEL_4, "preferred unroll factor %d\n", prefer));
2251
2252         /*
2253          * If our preference is greater than the allowed unroll factor
2254          * we either might reduce the preferred factor and prevent a duffs device block,
2255          * or create a duffs device block, from which in this case (constants only)
2256          * we know the startloop at compiletime.
2257          * The latter yields the following graphs.
2258          * but for code generation we would want to use graph A.
2259          * The graphs are equivalent. So, we can only reduce the preferred factor.
2260          * A)                   B)
2261          *     PreHead             PreHead
2262          *        |      ,--.         |   ,--.
2263          *         \ Loop1   \        Loop2   \
2264          *          \  |     |       /  |     |
2265          *           Loop2   /      / Loop1   /
2266          *           |   `--'      |      `--'
2267          */
2268
2269         if (prefer <= loop_info.max_unroll)
2270                 return prefer;
2271         else {
2272                 switch(prefer) {
2273                         case 6:
2274                                 if (loop_info.max_unroll >= 3)
2275                                         return 3;
2276                                 else if (loop_info.max_unroll >= 2)
2277                                         return 2;
2278                                 else
2279                                         return 0;
2280
2281                         case 4:
2282                                 if (loop_info.max_unroll >= 2)
2283                                         return 2;
2284                                 else
2285                                         return 0;
2286
2287                         default:
2288                                 return 0;
2289                 }
2290         }
2291 }
2292
2293 /* Check if cur_loop is a simple counting loop.
2294  * Start, step and end are constants.
2295  * TODO The whole constant case should use procedures similar to
2296  * the invariant case, as they are more versatile. */
2297 /* TODO split. */
2298 static unsigned get_unroll_decision_constant(void)
2299 {
2300         ir_node     *cmp, *iteration_path;
2301         unsigned     success, is_latest_val;
2302         ir_tarval   *start_tar, *end_tar, *step_tar, *diff_tar, *count_tar;
2303         ir_tarval   *stepped;
2304         ir_relation  proj_proj, norm_proj;
2305         ir_mode     *mode;
2306
2307         /* RETURN if loop is not 'simple' */
2308         cmp = is_simple_loop();
2309         if (cmp == NULL)
2310                 return 0;
2311
2312         /* One in of the loop condition needs to be loop invariant. => end_val
2313          * The other in is assigned by an add. => add
2314          * The add uses a loop invariant value => step
2315          * and a phi with a loop invariant start_val and the add node as ins.
2316
2317            ^   ^
2318            |   | .-,
2319            |   Phi |
2320                 \  |   |
2321           ^  Add   |
2322            \  | \__|
2323             cond
2324              /\
2325         */
2326
2327         success = get_const_pred(cmp, &loop_info.end_val, &iteration_path);
2328         if (! success)
2329                 return 0;
2330
2331         DB((dbg, LEVEL_4, "End_val %N, other %N\n", loop_info.end_val, iteration_path));
2332
2333         /* We may find the add or the phi first.
2334          * Until now we only have end_val. */
2335         if (is_Add(iteration_path) || is_Sub(iteration_path)) {
2336
2337                 /* We test against the latest value of the iv. */
2338                 is_latest_val = 1;
2339
2340                 loop_info.add = iteration_path;
2341                 DB((dbg, LEVEL_4, "Case 2: Got add %N (maybe not sane)\n", loop_info.add));
2342
2343                 /* Preds of the add should be step and the iteration_phi */
2344                 success = get_const_pred(loop_info.add, &loop_info.step, &loop_info.iteration_phi);
2345                 if (! success)
2346                         return 0;
2347
2348                 DB((dbg, LEVEL_4, "Got step %N\n", loop_info.step));
2349
2350                 if (! is_Phi(loop_info.iteration_phi))
2351                         return 0;
2352
2353                 DB((dbg, LEVEL_4, "Got phi %N\n", loop_info.iteration_phi));
2354
2355                 /* Find start_val.
2356                  * Does necessary sanity check of add, if it is already set.  */
2357                 success = get_start_and_add(loop_info.iteration_phi, constant);
2358                 if (! success)
2359                         return 0;
2360
2361                 DB((dbg, LEVEL_4, "Got start %N\n", loop_info.start_val));
2362
2363         } else if (is_Phi(iteration_path)) {
2364                 ir_node *new_iteration_phi;
2365
2366                 /* We compare with the value the iv had entering this run. */
2367                 is_latest_val = 0;
2368
2369                 loop_info.iteration_phi = iteration_path;
2370                 DB((dbg, LEVEL_4, "Case 1: Got phi %N \n", loop_info.iteration_phi));
2371
2372                 /* Find start_val and add-node.
2373                  * Does necessary sanity check of add, if it is already set.  */
2374                 success = get_start_and_add(loop_info.iteration_phi, constant);
2375                 if (! success)
2376                         return 0;
2377
2378                 DB((dbg, LEVEL_4, "Got start %N\n", loop_info.start_val));
2379                 DB((dbg, LEVEL_4, "Got add or sub %N\n", loop_info.add));
2380
2381                 success = get_const_pred(loop_info.add, &loop_info.step, &new_iteration_phi);
2382                 if (! success)
2383                         return 0;
2384
2385                 DB((dbg, LEVEL_4, "Got step %N\n", loop_info.step));
2386
2387                 if (loop_info.iteration_phi != new_iteration_phi)
2388                         return 0;
2389
2390         } else {
2391                 /* RETURN */
2392                 return 0;
2393         }
2394
2395         mode = get_irn_mode(loop_info.end_val);
2396
2397         DB((dbg, LEVEL_4, "start %N, end %N, step %N\n",
2398                                 loop_info.start_val, loop_info.end_val, loop_info.step));
2399
2400         if (mode != mode_Is && mode != mode_Iu)
2401                 return 0;
2402
2403         /* TODO necessary? */
2404         if (!are_mode_I(loop_info.start_val, loop_info.step, loop_info.end_val))
2405                 return 0;
2406
2407         DB((dbg, LEVEL_4, "mode integer\n"));
2408
2409         end_tar = get_Const_tarval(loop_info.end_val);
2410         start_tar = get_Const_tarval(loop_info.start_val);
2411         step_tar = get_Const_tarval(loop_info.step);
2412
2413         if (tarval_is_null(step_tar))
2414                 /* TODO Might be worth a warning. */
2415                 return 0;
2416
2417         DB((dbg, LEVEL_4, "step is not 0\n"));
2418
2419         if ((!tarval_is_negative(step_tar)) ^ (!is_Sub(loop_info.add)))
2420                 loop_info.decreasing = 1;
2421
2422         diff_tar = tarval_sub(end_tar, start_tar, mode);
2423
2424         /* We need at least count_tar steps to be close to end_val, maybe more.
2425          * No way, that we have gone too many steps.
2426          * This represents the 'latest value'.
2427          * (If condition checks against latest value, is checked later) */
2428         count_tar = tarval_div(diff_tar, step_tar);
2429
2430         /* Iv will not pass end_val (except overflows).
2431          * Nothing done, as it would yield to no advantage. */
2432         if (tarval_is_negative(count_tar)) {
2433                 DB((dbg, LEVEL_4, "Loop is endless or never taken."));
2434                 /* TODO Might be worth a warning. */
2435                 return 0;
2436         }
2437
2438         ++stats.u_simple_counting_loop;
2439
2440         loop_info.latest_value = is_latest_val;
2441
2442         /* TODO split here
2443         if (! is_simple_counting_loop(&count_tar))
2444                 return 0;
2445         */
2446
2447         /* stepped can be negative, if step < 0 */
2448         stepped = tarval_mul(count_tar, step_tar);
2449
2450         /* step as close to end_val as possible, */
2451         /* |stepped| <= |end_tar|, and dist(stepped, end_tar) is smaller than a step. */
2452         if (is_Sub(loop_info.add))
2453                 stepped = tarval_sub(start_tar, stepped, mode_Is);
2454         else
2455                 stepped = tarval_add(start_tar, stepped);
2456
2457         DB((dbg, LEVEL_4, "stepped to %ld\n", get_tarval_long(stepped)));
2458
2459         proj_proj = get_Cmp_relation(cmp);
2460         /* Assure that norm_proj is the stay-in-loop case. */
2461         if (loop_info.exit_cond == 1)
2462                 norm_proj = get_negated_relation(proj_proj);
2463         else
2464                 norm_proj = proj_proj;
2465
2466         DB((dbg, LEVEL_4, "normalized projection %s\n", get_relation_string(norm_proj)));
2467         /* Executed at most once (stay in counting loop if a Eq b) */
2468         if (norm_proj == ir_relation_equal)
2469                 /* TODO Might be worth a warning. */
2470                 return 0;
2471
2472         /* calculates next values and increases count_tar according to it */
2473         success = simulate_next(&count_tar, stepped, step_tar, end_tar, norm_proj);
2474         if (! success)
2475                 return 0;
2476
2477         /* We run loop once more, if we compare to the
2478          * not yet in-/decreased iv. */
2479         if (is_latest_val == 0) {
2480                 DB((dbg, LEVEL_4, "condition uses not latest iv value\n"));
2481                 count_tar = tarval_add(count_tar, get_tarval_one(mode));
2482         }
2483
2484         DB((dbg, LEVEL_4, "loop taken %ld times\n", get_tarval_long(count_tar)));
2485
2486         /* Assure the loop is taken at least 1 time. */
2487         if (tarval_is_null(count_tar)) {
2488                 /* TODO Might be worth a warning. */
2489                 return 0;
2490         }
2491
2492         loop_info.count_tar = count_tar;
2493         return get_preferred_factor_constant(count_tar);
2494 }
2495
2496 /**
2497  * Loop unrolling
2498  */
2499 static void unroll_loop(void)
2500 {
2501
2502         if (! (loop_info.nodes > 0))
2503                 return;
2504
2505         if (loop_info.nodes > opt_params.max_unrolled_loop_size) {
2506                 DB((dbg, LEVEL_2, "Nodes %d > allowed nodes %d\n",
2507                         loop_info.nodes, opt_params.max_unrolled_loop_size));
2508                 ++stats.too_large;
2509                 return;
2510         }
2511
2512         if (loop_info.calls > 0) {
2513                 DB((dbg, LEVEL_2, "Calls %d > allowed calls 0\n",
2514                         loop_info.calls));
2515                 ++stats.calls_limit;
2516                 return;
2517         }
2518
2519         unroll_nr = 0;
2520
2521         /* get_unroll_decision_constant and invariant are completely
2522          * independent for flexibility.
2523          * Some checks may be performed twice. */
2524
2525         /* constant case? */
2526         if (opt_params.allow_const_unrolling)
2527                 unroll_nr = get_unroll_decision_constant();
2528         if (unroll_nr > 1) {
2529                 loop_info.unroll_kind = constant;
2530
2531         } else {
2532                 /* invariant case? */
2533                 if (opt_params.allow_invar_unrolling)
2534                         unroll_nr = get_unroll_decision_invariant();
2535                 if (unroll_nr > 1)
2536                         loop_info.unroll_kind = invariant;
2537         }
2538
2539         DB((dbg, LEVEL_2, " *** Unrolling %d times ***\n", unroll_nr));
2540
2541         if (unroll_nr > 1) {
2542                 loop_entries = NEW_ARR_F(entry_edge, 0);
2543
2544                 /* Get loop outs */
2545                 irg_walk_graph(current_ir_graph, get_loop_entries, NULL, NULL);
2546
2547                 if (loop_info.unroll_kind == constant) {
2548                         if ((int)get_tarval_long(loop_info.count_tar) == unroll_nr)
2549                                 loop_info.needs_backedge = 0;
2550                         else
2551                                 loop_info.needs_backedge = 1;
2552                 } else {
2553                         loop_info.needs_backedge = 1;
2554                 }
2555
2556                 /* Use phase to keep copy of nodes from the condition chain. */
2557                 ir_nodemap_init(&map, current_ir_graph);
2558                 obstack_init(&obst);
2559
2560                 /* Copies the loop */
2561                 copy_loop(loop_entries, unroll_nr - 1);
2562
2563                 /* Line up the floating copies. */
2564                 place_copies(unroll_nr - 1);
2565
2566                 /* Remove phis with 1 in
2567                  * If there were no nested phis, this would not be necessary.
2568                  * Avoiding the creation in the first place
2569                  * leads to complex special cases. */
2570                 irg_walk_graph(current_ir_graph, correct_phis, NULL, NULL);
2571
2572                 if (loop_info.unroll_kind == constant)
2573                         ++stats.constant_unroll;
2574                 else
2575                         ++stats.invariant_unroll;
2576
2577                 clear_irg_state(current_ir_graph, IR_GRAPH_STATE_CONSISTENT_DOMINANCE);
2578
2579                 DEL_ARR_F(loop_entries);
2580                 obstack_free(&obst, NULL);
2581                 ir_nodemap_destroy(&map);
2582         }
2583
2584 }
2585
2586 /* Analyzes the loop, and checks if size is within allowed range.
2587  * Decides if loop will be processed. */
2588 static void init_analyze(ir_graph *irg, ir_loop *loop)
2589 {
2590         cur_loop = loop;
2591
2592         loop_head       = NULL;
2593         loop_head_valid = true;
2594
2595         /* Reset loop info */
2596         memset(&loop_info, 0, sizeof(loop_info_t));
2597
2598         DB((dbg, LEVEL_1, "    >>>> current loop %ld <<<\n",
2599             get_loop_loop_nr(loop)));
2600
2601         /* Collect loop informations: head, node counts. */
2602         irg_walk_graph(irg, get_loop_info, NULL, NULL);
2603
2604         /* RETURN if there is no valid head */
2605         if (!loop_head || !loop_head_valid) {
2606                 DB((dbg, LEVEL_1,   "No valid loop head. Nothing done.\n"));
2607                 return;
2608         } else {
2609                 DB((dbg, LEVEL_1,   "Loophead: %N\n", loop_head));
2610         }
2611
2612         if (loop_info.branches > opt_params.max_branches) {
2613                 DB((dbg, LEVEL_1, "Branches %d > allowed branches %d\n",
2614                         loop_info.branches, opt_params.max_branches));
2615                 ++stats.calls_limit;
2616                 return;
2617         }
2618
2619         switch (loop_op) {
2620                 case loop_op_inversion:
2621                         loop_inversion(irg);
2622                         break;
2623
2624                 case loop_op_unrolling:
2625                         unroll_loop();
2626                         break;
2627
2628                 default:
2629                         panic("Loop optimization not implemented.");
2630         }
2631         DB((dbg, LEVEL_1, "       <<<< end of loop with node %ld >>>>\n",
2632             get_loop_loop_nr(loop)));
2633 }
2634
2635 /* Find innermost loops and add them to loops. */
2636 static void find_innermost_loop(ir_loop *loop)
2637 {
2638         bool   had_sons   = false;
2639         size_t n_elements = get_loop_n_elements(loop);
2640         size_t e;
2641
2642         for (e = 0; e < n_elements; ++e) {
2643                 loop_element element = get_loop_element(loop, e);
2644                 if (*element.kind == k_ir_loop) {
2645                         find_innermost_loop(element.son);
2646                         had_sons = true;
2647                 }
2648         }
2649
2650         if (!had_sons) {
2651                 ARR_APP1(ir_loop*, loops, loop);
2652         }
2653 }
2654
2655 static void set_loop_params(void)
2656 {
2657     opt_params.max_loop_size = 100;
2658     opt_params.depth_adaption = -50;
2659     opt_params.count_phi = true;
2660     opt_params.count_proj = false;
2661     opt_params.allowed_calls = 0;
2662
2663     opt_params.max_cc_size = 5;
2664
2665
2666     opt_params.allow_const_unrolling = true;
2667     opt_params.allow_invar_unrolling = false;
2668
2669     opt_params.invar_unrolling_min_size = 20;
2670     opt_params.max_unrolled_loop_size = 400;
2671     opt_params.max_branches = 9999;
2672 }
2673
2674 /* Assure preconditions are met and go through all loops. */
2675 void loop_optimization(ir_graph *irg)
2676 {
2677         ir_loop *loop;
2678         size_t   i;
2679         size_t   n_elements;
2680
2681         set_loop_params();
2682
2683         /* Reset stats for this procedure */
2684         reset_stats();
2685
2686         /* Preconditions */
2687         set_current_ir_graph(irg);
2688
2689         ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK | IR_RESOURCE_PHI_LIST);
2690         collect_phiprojs(irg);
2691
2692         loop = get_irg_loop(irg);
2693
2694         loops = NEW_ARR_F(ir_loop *, 0);
2695         /* List all inner loops */
2696         n_elements = get_loop_n_elements(loop);
2697         for (i = 0; i < n_elements; ++i) {
2698                 loop_element element = get_loop_element(loop, i);
2699                 if (*element.kind != k_ir_loop)
2700                         continue;
2701                 find_innermost_loop(element.son);
2702         }
2703
2704         /* Set all links to NULL */
2705         irg_walk_graph(irg, reset_link, NULL, NULL);
2706
2707         for (i = 0; i < ARR_LEN(loops); ++i) {
2708                 ir_loop *loop = loops[i];
2709
2710                 ++stats.loops;
2711
2712                 /* Analyze and handle loop */
2713                 init_analyze(irg, loop);
2714
2715                 /* Copied blocks do not have their phi list yet */
2716                 collect_phiprojs(irg);
2717
2718                 /* Set links to NULL
2719                  * TODO Still necessary? */
2720                 irg_walk_graph(irg, reset_link, NULL, NULL);
2721         }
2722
2723         print_stats();
2724
2725         DEL_ARR_F(loops);
2726         ir_free_resources(irg, IR_RESOURCE_IRN_LINK | IR_RESOURCE_PHI_LIST);
2727 }
2728
2729 static ir_graph_state_t perform_loop_unrolling(ir_graph *irg)
2730 {
2731         loop_op = loop_op_unrolling;
2732         loop_optimization(irg);
2733         return 0;
2734 }
2735
2736 static ir_graph_state_t perform_loop_inversion(ir_graph *irg)
2737 {
2738         loop_op = loop_op_inversion;
2739         loop_optimization(irg);
2740         return 0;
2741 }
2742
2743 static ir_graph_state_t perform_loop_peeling(ir_graph *irg)
2744 {
2745         loop_op = loop_op_peeling;
2746         loop_optimization(irg);
2747         return 0;
2748 }
2749
2750 static optdesc_t opt_unroll_loops = {
2751         "unroll-loops",
2752         IR_GRAPH_STATE_CONSISTENT_OUT_EDGES | IR_GRAPH_STATE_CONSISTENT_OUTS | IR_GRAPH_STATE_CONSISTENT_LOOPINFO,
2753         perform_loop_unrolling,
2754 };
2755
2756 static optdesc_t opt_invert_loops = {
2757         "invert-loops",
2758         IR_GRAPH_STATE_CONSISTENT_OUT_EDGES | IR_GRAPH_STATE_CONSISTENT_OUTS | IR_GRAPH_STATE_CONSISTENT_LOOPINFO,
2759         perform_loop_inversion,
2760 };
2761
2762 static optdesc_t opt_peel_loops = {
2763         "peel-loops",
2764         IR_GRAPH_STATE_CONSISTENT_OUT_EDGES | IR_GRAPH_STATE_CONSISTENT_OUTS | IR_GRAPH_STATE_CONSISTENT_LOOPINFO,
2765         perform_loop_peeling,
2766 };
2767
2768 void do_loop_unrolling(ir_graph *irg)
2769 { perform_irg_optimization(irg, &opt_unroll_loops); }
2770
2771 void do_loop_inversion(ir_graph *irg)
2772 { perform_irg_optimization(irg, &opt_invert_loops); }
2773
2774 void do_loop_peeling(ir_graph *irg)
2775 { perform_irg_optimization(irg, &opt_peel_loops); }
2776
2777 ir_graph_pass_t *loop_inversion_pass(const char *name)
2778 {
2779         return def_graph_pass(name ? name : "loop_inversion", do_loop_inversion);
2780 }
2781
2782 ir_graph_pass_t *loop_unroll_pass(const char *name)
2783 {
2784         return def_graph_pass(name ? name : "loop_unroll", do_loop_unrolling);
2785 }
2786
2787 ir_graph_pass_t *loop_peeling_pass(const char *name)
2788 {
2789         return def_graph_pass(name ? name : "loop_peeling", do_loop_peeling);
2790 }
2791
2792 void firm_init_loop_opt(void)
2793 {
2794         FIRM_DBG_REGISTER(dbg, "firm.opt.loop");
2795 }