reuse is_switch_Cond function
[libfirm] / ir / opt / loop.c
1 /*
2  * Copyright (C) 1995-2011 University of Karlsruhe.  All right reserved.
3  *
4  * This file is part of libFirm.
5  *
6  * This file may be distributed and/or modified under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation and appearing in the file LICENSE.GPL included in the
9  * packaging of this file.
10  *
11  * Licensees holding valid libFirm Professional Edition licenses may use
12  * this file in accordance with the libFirm Commercial License.
13  * Agreement provided with the Software.
14  *
15  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17  * PURPOSE.
18  */
19
20 /**
21  * @file
22  * @author   Christian Helmer
23  * @brief    loop inversion and loop unrolling
24  *
25  * @version  $Id$
26  */
27 #include "config.h"
28
29 #include "iroptimize.h"
30 #include "opt_init.h"
31 #include "irnode.h"
32 #include "debug.h"
33 #include "error.h"
34
35 #include "ircons.h"
36 #include "irgopt.h"
37 #include "irgmod.h"
38 #include "irgwalk.h"
39 #include "irouts.h"
40 #include "iredges.h"
41 #include "irtools.h"
42 #include "array_t.h"
43 #include "beutil.h"
44 #include "irpass.h"
45 #include "irdom.h"
46
47 #include <math.h>
48 #include "irbackedge_t.h"
49 #include "irphase_t.h"
50 #include "irloop_t.h"
51
52
53 DEBUG_ONLY(static firm_dbg_module_t *dbg);
54
55 /* DBG print stats for every procedure.  */
56 #define LOOP_OPT_STATS 1
57
58 /* DBG: Ignore node limits and process every possible loop. */
59 #define LOOP_IGNORE_NODE_LIMITS 0
60
61 /**
62  * Convenience macro for iterating over every phi node of the given block.
63  * Requires phi list per block.
64  */
65 #define for_each_phi(block, phi) \
66         for ((phi) = get_Block_phis( (block) ); (phi) ; (phi) = get_Phi_next((phi)))
67
68 #define for_each_phi_safe(head, phi, next) \
69         for ((phi) = (head), (next) = (head) ? get_Phi_next((head)) : NULL; \
70                         (phi) ; (phi) = (next), (next) = (next) ? get_Phi_next((next)) : NULL)
71
72 /* Currently processed loop. */
73 static ir_loop *cur_loop;
74
75 /* Flag for kind of unrolling. */
76 typedef enum {
77         constant,
78         invariant
79 } unrolling_kind_flag;
80
81 /* Condition for performing visiting a node during copy_walk. */
82 typedef unsigned walker_condition(ir_node *);
83
84 /* Node and position of a predecessor. */
85 typedef struct entry_edge {
86         ir_node *node;
87         int pos;
88         ir_node *pred;
89 } entry_edge;
90
91 /* Node info for unrolling. */
92 typedef struct unrolling_node_info {
93         ir_node **copies;
94 } unrolling_node_info;
95
96 /* Outs of the nodes head. */
97 static entry_edge *cur_head_outs;
98
99 /* Information about the loop head */
100 static ir_node *loop_head = NULL;
101 static unsigned loop_head_valid = 1;
102
103 /* List of all inner loops, that are processed. */
104 static ir_loop **loops;
105
106 #if LOOP_OPT_STATS
107
108 #define count_stats(val) (++val)
109 #define print_stats() (do_print_stats())
110 #define reset_stats() (do_reset_stats())
111
112 /* Stats */
113 typedef struct loop_stats_t {
114         unsigned loops;
115         unsigned inverted;
116         unsigned too_large;
117         unsigned too_large_adapted;
118         unsigned cc_limit_reached;
119         unsigned calls_limit;
120
121         unsigned u_simple_counting_loop;
122         unsigned constant_unroll;
123         unsigned invariant_unroll;
124
125         unsigned unhandled;
126 } loop_stats_t;
127
128 static loop_stats_t stats;
129
130 /* Set stats to sero */
131 static void do_reset_stats(void)
132 {
133         memset(&stats, 0, sizeof(loop_stats_t));
134 }
135
136 /* Print stats */
137 static void do_print_stats(void)
138 {
139         DB((dbg, LEVEL_2, "---------------------------------------\n"));
140         DB((dbg, LEVEL_2, "loops             :   %d\n",stats.loops));
141         DB((dbg, LEVEL_2, "inverted          :   %d\n",stats.inverted));
142         DB((dbg, LEVEL_2, "too_large         :   %d\n",stats.too_large));
143         DB((dbg, LEVEL_2, "too_large_adapted :   %d\n",stats.too_large_adapted));
144         DB((dbg, LEVEL_2, "cc_limit_reached  :   %d\n",stats.cc_limit_reached));
145         DB((dbg, LEVEL_2, "calls_limit       :   %d\n",stats.calls_limit));
146         DB((dbg, LEVEL_2, "u_simple_counting :   %d\n",stats.u_simple_counting_loop));
147         DB((dbg, LEVEL_2, "constant_unroll   :   %d\n",stats.constant_unroll));
148         DB((dbg, LEVEL_2, "invariant_unroll  :   %d\n",stats.invariant_unroll));
149         DB((dbg, LEVEL_2, "=======================================\n"));
150 }
151 #else
152 /* No stats */
153 #define count_stats(val) ((void)0)
154 #define print_stats() ((void)0)
155 #define reset_stats() ((void)0)
156
157 #endif
158
159 /* Commandline parameters */
160 typedef struct loop_opt_params_t {
161 unsigned max_loop_size;     /* Maximum number of nodes  [nodes]*/
162 int      depth_adaption;    /* Loop nest depth adaption [percent] */
163 unsigned allowed_calls;     /* Number of calls allowed [number] */
164 unsigned count_phi:1;       /* Count phi nodes */
165 unsigned count_proj:1;      /* Count projections */
166
167 unsigned max_cc_size;       /* Maximum condition chain size [nodes] */
168 unsigned max_branches;
169
170 unsigned max_unrolled_loop_size;    /* [nodes] */
171 unsigned allow_const_unrolling:1;
172 unsigned allow_invar_unrolling:1;
173 unsigned invar_unrolling_min_size;  /* [nodes] */
174
175 } loop_opt_params_t;
176
177 static loop_opt_params_t opt_params;
178
179 /* Loop analysis informations */
180 typedef struct loop_info_t {
181         unsigned nodes;        /* node count */
182         unsigned ld_st;        /* load and store nodes */
183         unsigned branches;     /* number of conditions */
184         unsigned calls;        /* number of calls */
185         unsigned cf_outs;      /* number of cf edges which leave the loop */
186         entry_edge cf_out;     /* single loop leaving cf edge */
187         int be_src_pos;        /* position of the single own backedge in the head */
188
189         /* for inversion */
190         unsigned cc_size;      /* nodes in the condition chain */
191
192         /* for unrolling */
193         unsigned max_unroll;   /* Number of unrolls satisfying max_loop_size */
194         unsigned exit_cond;    /* 1 if condition==true exits the loop.  */
195         unsigned latest_value:1;    /* 1 if condition is checked against latest counter value */
196         unsigned needs_backedge:1;  /* 0 if loop is completely unrolled */
197         unsigned decreasing:1;      /* Step operation is_Sub, or step is<0 */
198
199         /* IV informations of a simple loop */
200         ir_node *start_val;
201         ir_node *step;
202         ir_node *end_val;
203         ir_node *iteration_phi;
204         ir_node *add;
205
206         ir_tarval *count_tar;               /* Number of loop iterations */
207
208         ir_node *duff_cond;                 /* Duff mod */
209         unrolling_kind_flag unroll_kind;    /* constant or invariant unrolling */
210 } loop_info_t;
211
212 /* Information about the current loop */
213 static loop_info_t loop_info;
214
215 /* Outs of the condition chain (loop inversion). */
216 static ir_node **cc_blocks;
217 /* df/cf edges with def in the condition chain */
218 static entry_edge *cond_chain_entries;
219 /* Array of df loops found in the condition chain. */
220 static entry_edge *head_df_loop;
221 /* Number of blocks in cc */
222 static unsigned inversion_blocks_in_cc;
223
224
225 /* Cf/df edges leaving the loop.
226  * Called entries here, as they are used to enter the loop with walkers. */
227 static entry_edge *loop_entries;
228 /* Number of unrolls to perform */
229 static int unroll_nr;
230 /* Phase is used to keep copies of nodes. */
231 static ir_phase *phase;
232
233 /* Loop operations.  */
234 typedef enum loop_op_t {
235         loop_op_inversion,
236         loop_op_unrolling,
237         loop_op_peeling
238 } loop_op_t;
239
240 /* Saves which loop operation to do until after basic tests. */
241 static loop_op_t loop_op;
242
243 /* Returns the maximum nodes for the given nest depth */
244 static unsigned get_max_nodes_adapted(unsigned depth)
245 {
246         double perc = 100.0 + (double)opt_params.depth_adaption;
247         double factor = pow(perc / 100.0, depth);
248
249         return (int)((double)opt_params.max_loop_size * factor);
250 }
251
252 /* Reset nodes link. For use with a walker. */
253 static void reset_link(ir_node *node, void *env)
254 {
255         (void)env;
256         set_irn_link(node, NULL);
257 }
258
259 /* Returns 0 if the node or block is not in cur_loop. */
260 static unsigned is_in_loop(ir_node *node)
261 {
262         return (get_irn_loop(get_block(node)) == cur_loop);
263 }
264
265 /* Returns 0 if the given edge is not a backedge
266  * with its pred in the cur_loop. */
267 static unsigned is_own_backedge(ir_node *n, int pos)
268 {
269         return (is_backedge(n, pos) && is_in_loop(get_irn_n(n, pos)));
270 }
271
272 /* Finds loop head and some loop_info as calls or else if necessary. */
273 static void get_loop_info(ir_node *node, void *env)
274 {
275         unsigned node_in_loop, pred_in_loop;
276         int i, arity;
277         (void)env;
278
279         arity = get_irn_arity(node);
280         for (i = 0; i < arity; i++) {
281                 ir_node *pred = get_irn_n(node, i);
282
283                 pred_in_loop = is_in_loop(pred);
284                 node_in_loop = is_in_loop(node);
285
286                 if (!node_in_loop && pred_in_loop && is_Block(node))
287                 {
288                         entry_edge entry;
289                         entry.node = node;
290                         entry.pos = i;
291                         entry.pred = pred;
292                         /* Count cf outs */
293                         ++loop_info.cf_outs;
294                         loop_info.cf_out = entry;
295                 }
296
297                 /* collect some loop information */
298                 if (node_in_loop) {
299                         if (is_Phi(node) && opt_params.count_phi)
300                                 ++loop_info.nodes;
301                         else if (is_Proj(node) && opt_params.count_proj)
302                                 ++loop_info.nodes;
303                         else if (!is_Confirm(node) && !is_Const(node) && !is_SymConst(node))
304                                 ++loop_info.nodes;
305
306                         if (is_Load(node) || is_Store(node))
307                                 ++loop_info.ld_st;
308
309                         if (is_Call(node))
310                                 ++loop_info.calls;
311
312                 }
313
314                 /* Find the loops head/the blocks with cfpred outside of the loop */
315                 if (is_Block(node)) {
316                         const ir_edge_t *edge;
317                         unsigned outs_n = 0;
318
319                         /* Count innerloop branches */
320                         foreach_out_edge_kind(node, edge, EDGE_KIND_NORMAL) {
321                                 if (is_Block(get_edge_src_irn(edge)) && is_in_loop(get_edge_src_irn(edge)))
322                                         ++outs_n;
323                         }
324                         if (outs_n > 1)
325                                 ++loop_info.branches;
326
327                         if (node_in_loop && !pred_in_loop && loop_head_valid) {
328                                 ir_node *cfgpred = get_Block_cfgpred(node, i);
329
330                                 if (!is_in_loop(cfgpred)) {
331                                         DB((dbg, LEVEL_5, "potential head %+F because inloop and pred %+F not inloop\n",
332                                                                 node, pred));
333                                         /* another head? We do not touch this. */
334                                         if (loop_head && loop_head != node) {
335                                                 loop_head_valid = 0;
336                                         } else {
337                                                 loop_head = node;
338                                         }
339                                 }
340                         }
341                 }
342         }
343 }
344
345 /* Finds all edges with users outside of the loop
346  * and definition inside the loop. */
347 static void get_loop_entries(ir_node *node, void *env)
348 {
349         unsigned node_in_loop, pred_in_loop;
350         int i, arity;
351         (void) env;
352
353         arity = get_irn_arity(node);
354         for (i = 0; i < arity; ++i) {
355                 ir_node *pred = get_irn_n(node, i);
356
357                 pred_in_loop = is_in_loop(pred);
358                 node_in_loop = is_in_loop(node);
359
360                 if (pred_in_loop && !node_in_loop) {
361                         entry_edge entry;
362                         entry.node = node;
363                         entry.pos = i;
364                         entry.pred = pred;
365                         ARR_APP1(entry_edge, loop_entries, entry);
366                 }
367         }
368 }
369
370 /* ssa */
371 static ir_node *ssa_second_def;
372 static ir_node *ssa_second_def_block;
373
374 /**
375  * Walks the graph bottom up, searching for definitions and creates phis.
376  */
377 static ir_node *search_def_and_create_phis(ir_node *block, ir_mode *mode, int first)
378 {
379         int i;
380         int n_cfgpreds;
381         ir_graph *irg = get_irn_irg(block);
382         ir_node *phi;
383         ir_node **in;
384
385         DB((dbg, LEVEL_5, "ssa search_def_and_create_phis: block %N\n", block));
386
387         /* Prevents creation of phi that would be bad anyway.
388          * Dead and bad blocks. */
389         if (get_irn_arity(block) < 1 || is_Bad(block)) {
390                 DB((dbg, LEVEL_5, "ssa bad %N\n", block));
391                 return new_r_Bad(irg, mode);
392         }
393
394         if (block == ssa_second_def_block && !first) {
395                 DB((dbg, LEVEL_5, "ssa found second definition: use second def %N\n", ssa_second_def));
396                 return ssa_second_def;
397         }
398
399         /* already processed this block? */
400         if (irn_visited(block)) {
401                 ir_node *value = (ir_node *) get_irn_link(block);
402                 DB((dbg, LEVEL_5, "ssa already visited: use linked %N\n", value));
403                 return value;
404         }
405
406         assert(block != get_irg_start_block(irg));
407
408         /* a Block with only 1 predecessor needs no Phi */
409         n_cfgpreds = get_Block_n_cfgpreds(block);
410         if (n_cfgpreds == 1) {
411                 ir_node *pred_block = get_Block_cfgpred_block(block, 0);
412                 ir_node *value;
413
414                 DB((dbg, LEVEL_5, "ssa 1 pred: walk pred %N\n", pred_block));
415
416                 value = search_def_and_create_phis(pred_block, mode, 0);
417                 set_irn_link(block, value);
418                 mark_irn_visited(block);
419
420                 return value;
421         }
422
423         /* create a new Phi */
424         NEW_ARR_A(ir_node*, in, n_cfgpreds);
425         for (i = 0; i < n_cfgpreds; ++i)
426                 in[i] = new_r_Dummy(irg, mode);
427
428         phi = new_r_Phi(block, n_cfgpreds, in, mode);
429         /* Important: always keep block phi list up to date. */
430         add_Block_phi(block, phi);
431         DB((dbg, LEVEL_5, "ssa phi creation: link new phi %N to block %N\n", phi, block));
432         set_irn_link(block, phi);
433         mark_irn_visited(block);
434
435         /* set Phi predecessors */
436         for (i = 0; i < n_cfgpreds; ++i) {
437                 ir_node *pred_val;
438                 ir_node *pred_block = get_Block_cfgpred_block(block, i);
439                 assert(pred_block != NULL);
440                 pred_val = search_def_and_create_phis(pred_block, mode, 0);
441
442                 assert(pred_val != NULL);
443
444                 DB((dbg, LEVEL_5, "ssa phi pred:phi %N, pred %N\n", phi, pred_val));
445                 set_irn_n(phi, i, pred_val);
446         }
447
448         return phi;
449 }
450
451
452 /**
453  * Given a set of values this function constructs SSA-form for the users of the
454  * first value (the users are determined through the out-edges of the value).
455  * Works without using the dominance tree.
456  */
457 static void construct_ssa(ir_node *orig_block, ir_node *orig_val,
458                 ir_node *second_block, ir_node *second_val)
459 {
460         ir_graph *irg;
461         ir_mode *mode;
462         const ir_edge_t *edge;
463         const ir_edge_t *next;
464
465         assert(orig_block && orig_val && second_block && second_val &&
466                         "no parameter of construct_ssa may be NULL");
467
468         if (orig_val == second_val)
469                 return;
470
471         irg = get_irn_irg(orig_val);
472
473         ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED);
474         inc_irg_visited(irg);
475
476         mode = get_irn_mode(orig_val);
477         set_irn_link(orig_block, orig_val);
478         mark_irn_visited(orig_block);
479
480         ssa_second_def_block = second_block;
481         ssa_second_def       = second_val;
482
483         /* Only fix the users of the first, i.e. the original node */
484         foreach_out_edge_safe(orig_val, edge, next) {
485                 ir_node *user = get_edge_src_irn(edge);
486                 int j = get_edge_src_pos(edge);
487                 ir_node *user_block = get_nodes_block(user);
488                 ir_node *newval;
489
490                 /* ignore keeps */
491                 if (is_End(user))
492                         continue;
493
494                 DB((dbg, LEVEL_5, "original user %N\n", user));
495
496                 if (is_Phi(user)) {
497                         ir_node *pred_block = get_Block_cfgpred_block(user_block, j);
498                         newval = search_def_and_create_phis(pred_block, mode, 1);
499                 } else {
500                         newval = search_def_and_create_phis(user_block, mode, 1);
501                 }
502                 if (newval != user && !is_Bad(newval))
503                         set_irn_n(user, j, newval);
504         }
505
506         ir_free_resources(irg, IR_RESOURCE_IRN_VISITED);
507 }
508
509
510 /***** Unrolling Helper Functions *****/
511
512 /* Assign the copy with index nr to node n */
513 static void set_unroll_copy(ir_node *n, int nr, ir_node *cp)
514 {
515         unrolling_node_info *info;
516         assert(nr != 0 && "0 reserved");
517
518         info = (unrolling_node_info *)phase_get_irn_data(phase, n);
519         if (! info) {
520                 ir_node **arr;
521
522                 info = XMALLOCZ(unrolling_node_info);
523                 arr = NEW_ARR_F(ir_node *, unroll_nr);
524                 info->copies = arr;
525                 memset(info->copies, 0, (unroll_nr) * sizeof(ir_node *));
526
527                 phase_set_irn_data(phase, n, info);
528         }
529         /* Original node */
530         info->copies[0] = n;
531
532         info->copies[nr] = cp;
533 }
534
535 /* Returns a nodes copy if it exists, else NULL. */
536 static ir_node *get_unroll_copy(ir_node *n, int nr)
537 {
538         ir_node             *cp;
539         unrolling_node_info *info = (unrolling_node_info *)phase_get_irn_data(phase, n);
540         if (! info)
541                 return NULL;
542
543         cp = info->copies[nr];
544         return cp;
545 }
546
547
548 /***** Inversion Helper Functions *****/
549
550 /* Sets copy cp of node n. */
551 static void set_inversion_copy(ir_node *n, ir_node *cp)
552 {
553         phase_set_irn_data(phase, n, cp);
554 }
555
556 /* Getter of copy of n for inversion */
557 static ir_node *get_inversion_copy(ir_node *n)
558 {
559         ir_node *cp = (ir_node *)phase_get_irn_data(phase, n);
560         return cp;
561 }
562
563 /* Resets block mark for given node. For use with walker */
564 static void reset_block_mark(ir_node *node, void * env)
565 {
566         (void) env;
567
568         if (is_Block(node))
569                 set_Block_mark(node, 0);
570 }
571
572 /* Returns mark of node, or its block if node is not a block.
573  * Used in this context to determine if node is in the condition chain. */
574 static unsigned is_nodes_block_marked(ir_node* node)
575 {
576         if (is_Block(node))
577                 return get_Block_mark(node);
578         else
579                 return get_Block_mark(get_block(node));
580 }
581
582 /* Extends a nodes ins by node new.
583  * NOTE: This is slow if a node n needs to be extended more than once. */
584 static void extend_irn(ir_node *n, ir_node *newnode, int new_is_backedge)
585 {
586         ir_node **ins;
587         int i;
588         int arity = get_irn_arity(n);
589         int new_arity = arity + 1;
590         int *bes;
591
592         NEW_ARR_A(int, bes, new_arity);
593         NEW_ARR_A(ir_node *, ins, new_arity);
594
595         /* save bes */
596         /* Bes are important!
597          * Another way would be recreating the looptree,
598          * but after that we cannot distinguish already processed loops
599          * from not yet processed ones. */
600         if (is_Block(n)) {
601                 for(i = 0; i < arity; ++i) {
602                         bes[i] = is_backedge(n, i);
603                 }
604                 bes[i] = new_is_backedge;
605         }
606
607         for(i = 0; i < arity; ++i) {
608                 ins[i] = get_irn_n(n, i);
609         }
610         ins[i] = newnode;
611
612         set_irn_in(n, new_arity, ins);
613
614         /* restore bes  */
615         if (is_Block(n)) {
616                 for(i = 0; i < new_arity; ++i) {
617                         if (bes[i])
618                                 set_backedge(n, i);
619                 }
620         }
621 }
622
623 /* Extends a block by a copy of its pred at pos,
624  * fixing also the phis in the same way. */
625 static void extend_ins_by_copy(ir_node *block, int pos)
626 {
627         ir_node *new_in;
628         ir_node *phi;
629         ir_node *pred;
630         assert(is_Block(block));
631
632         /* Extend block by copy of definition at pos */
633         pred = get_irn_n(block, pos);
634         new_in = get_inversion_copy(pred);
635         DB((dbg, LEVEL_5, "Extend block %N by %N cp of %N\n", block, new_in, pred));
636         extend_irn(block, new_in, 0);
637
638         /* Extend block phis by copy of definition at pos */
639         for_each_phi(block, phi) {
640                 ir_node *pred, *cp;
641
642                 pred = get_irn_n(phi, pos);
643                 cp = get_inversion_copy(pred);
644                 /* If the phis in is not in the condition chain (eg. a constant),
645                  * there is no copy. */
646                 if (cp == NULL)
647                         new_in = pred;
648                 else
649                         new_in = cp;
650
651                 DB((dbg, LEVEL_5, "Extend phi %N by %N cp of %N\n", phi, new_in, pred));
652                 extend_irn(phi, new_in, 0);
653         }
654 }
655
656 /* Returns the number of blocks backedges. With or without alien bes. */
657 static int get_backedge_n(ir_node *block, unsigned with_alien)
658 {
659         int i;
660         int be_n = 0;
661         int arity = get_irn_arity(block);
662
663         assert(is_Block(block));
664
665         for (i = 0; i < arity; ++i) {
666                 ir_node *pred = get_irn_n(block, i);
667                 if (is_backedge(block, i) && (with_alien || is_in_loop(pred)))
668                         ++be_n;
669         }
670         return be_n;
671 }
672
673 /* Returns a raw copy of the given node.
674  * Attributes are kept/set according to the needs of loop inversion. */
675 static ir_node *copy_node(ir_node *node)
676 {
677         int i, arity;
678         ir_node *cp;
679
680         cp = exact_copy(node);
681         arity = get_irn_arity(node);
682
683         /* Keep backedge info */
684         for (i = 0; i < arity; ++i) {
685                 if (is_backedge(node, i))
686                         set_backedge(cp, i);
687         }
688
689         if (is_Block(cp)) {
690                 set_Block_mark(cp, 0);
691         }
692
693         return cp;
694 }
695
696
697 /**
698  * This walker copies all walked nodes.
699  * If the walk_condition is true for a node, it is copied.
700  * All nodes node_info->copy have to be NULL prior to every walk.
701  * Order of ins is important for later usage.
702  */
703 static void copy_walk(ir_node *node, walker_condition *walk_condition,
704                       ir_loop *set_loop)
705 {
706         int i;
707         int arity;
708         ir_node *cp;
709         ir_node **cpin;
710         ir_graph *irg = current_ir_graph;
711
712         /**
713          * break condition and cycle resolver, creating temporary node copies
714          */
715         if (get_irn_visited(node) >= get_irg_visited(irg)) {
716                 /* Here we rely on nodestate's copy being initialized with NULL */
717                 DB((dbg, LEVEL_5, "copy_walk: We have already visited %N\n", node));
718                 if (get_inversion_copy(node) == NULL) {
719                         cp = copy_node(node);
720                         set_inversion_copy(node, cp);
721
722                         DB((dbg, LEVEL_5, "The TEMP copy of %N is created %N\n", node, cp));
723                 }
724                 return;
725         }
726
727         /* Walk */
728         mark_irn_visited(node);
729
730         if (!is_Block(node)) {
731                 ir_node *pred = get_nodes_block(node);
732                 if (walk_condition(pred))
733                         DB((dbg, LEVEL_5, "walk block %N\n", pred));
734                 copy_walk(pred, walk_condition, set_loop);
735         }
736
737         arity = get_irn_arity(node);
738
739         NEW_ARR_A(ir_node *, cpin, arity);
740
741         for (i = 0; i < arity; ++i) {
742                 ir_node *pred = get_irn_n(node, i);
743
744                 if (walk_condition(pred)) {
745                         DB((dbg, LEVEL_5, "walk node %N\n", pred));
746                         copy_walk(pred, walk_condition, set_loop);
747                         cpin[i] = get_inversion_copy(pred);
748                         DB((dbg, LEVEL_5, "copy of %N gets new in %N which is copy of %N\n",
749                                                 node, get_inversion_copy(pred), pred));
750                 } else {
751                         cpin[i] = pred;
752                 }
753         }
754
755         /* copy node / finalize temp node */
756         if (get_inversion_copy(node) == NULL) {
757                 /* No temporary copy existent */
758                 cp = copy_node(node);
759                 set_inversion_copy(node, cp);
760                 DB((dbg, LEVEL_5, "The FINAL copy of %N is CREATED %N\n", node, cp));
761         } else {
762                 /* temporary copy is existent but without correct ins */
763                 cp = get_inversion_copy(node);
764                 DB((dbg, LEVEL_5, "The FINAL copy of %N is EXISTENT %N\n", node, cp));
765         }
766
767         if (!is_Block(node)) {
768                 ir_node *cpblock = get_inversion_copy(get_nodes_block(node));
769
770                 set_nodes_block(cp, cpblock );
771                 if (is_Phi(cp))
772                         add_Block_phi(cpblock, cp);
773         }
774
775         /* Keeps phi list of temporary node. */
776         set_irn_in(cp, ARR_LEN(cpin), cpin);
777 }
778
779 /**
780  * This walker copies all walked nodes.
781  * If the walk_condition is true for a node, it is copied.
782  * All nodes node_info->copy have to be NULL prior to every walk.
783  * Order of ins is important for later usage.
784  * Takes copy_index, to phase-link copy at specific index.
785  */
786 static void copy_walk_n(ir_node *node, walker_condition *walk_condition,
787                         int copy_index)
788 {
789         int i;
790         int arity;
791         ir_node *cp;
792         ir_node **cpin;
793
794         /**
795          * break condition and cycle resolver, creating temporary node copies
796          */
797         if (irn_visited(node)) {
798                 /* Here we rely on nodestate's copy being initialized with NULL */
799                 DB((dbg, LEVEL_5, "copy_walk: We have already visited %N\n", node));
800                 if (get_unroll_copy(node, copy_index) == NULL) {
801                         ir_node *u;
802                         u = copy_node(node);
803                         set_unroll_copy(node, copy_index, u);
804                         DB((dbg, LEVEL_5, "The TEMP unknown of %N is created %N\n", node, u));
805                 }
806                 return;
807         }
808
809         /* Walk */
810         mark_irn_visited(node);
811
812         if (!is_Block(node)) {
813                 ir_node *block = get_nodes_block(node);
814                 if (walk_condition(block))
815                         DB((dbg, LEVEL_5, "walk block %N\n", block));
816                 copy_walk_n(block, walk_condition, copy_index);
817         }
818
819         arity = get_irn_arity(node);
820         NEW_ARR_A(ir_node *, cpin, arity);
821
822         for (i = 0; i < arity; ++i) {
823                 ir_node *pred = get_irn_n(node, i);
824
825                 if (walk_condition(pred)) {
826                         DB((dbg, LEVEL_5, "walk node %N\n", pred));
827                         copy_walk_n(pred, walk_condition, copy_index);
828                         cpin[i] = get_unroll_copy(pred, copy_index);
829                 } else {
830                         cpin[i] = pred;
831                 }
832         }
833
834         /* copy node / finalize temp node */
835         cp = get_unroll_copy(node, copy_index);
836         if (cp == NULL || is_Unknown(cp)) {
837                 cp = copy_node(node);
838                 set_unroll_copy(node, copy_index, cp);
839                 DB((dbg, LEVEL_5, "The FINAL copy of %N is CREATED %N\n", node, cp));
840         } else {
841                 /* temporary copy is existent but without correct ins */
842                 cp = get_unroll_copy(node, copy_index);
843                 DB((dbg, LEVEL_5, "The FINAL copy of %N is EXISTENT %N\n", node, cp));
844         }
845
846         if (!is_Block(node)) {
847                 ir_node *cpblock = get_unroll_copy(get_nodes_block(node), copy_index);
848
849                 set_nodes_block(cp, cpblock );
850                 if (is_Phi(cp))
851                         add_Block_phi(cpblock, cp);
852         }
853
854         /* Keeps phi list of temporary node. */
855         set_irn_in(cp, ARR_LEN(cpin), cpin);
856 }
857
858 /* Removes alle Blocks with non marked predecessors from the condition chain. */
859 static void unmark_not_allowed_cc_blocks(void)
860 {
861         size_t blocks = ARR_LEN(cc_blocks);
862         size_t i;
863
864         for(i = 0; i < blocks; ++i) {
865                 ir_node *block = cc_blocks[i];
866                 int a;
867                 int arity = get_irn_arity(block);
868
869                 /* Head is an exception. */
870                 if (block == loop_head)
871                         continue;
872
873                 for(a = 0; a < arity; ++a) {
874                         if (! is_nodes_block_marked(get_irn_n(block, a))) {
875                                 set_Block_mark(block, 0);
876                                 --inversion_blocks_in_cc;
877                                 DB((dbg, LEVEL_5, "Removed %N from cc (blocks in cc %d)\n",
878                                                 block, inversion_blocks_in_cc));
879
880                                 break;
881                         }
882                 }
883         }
884 }
885
886 /* Unmarks all cc blocks using cc_blocks except head.
887  * TODO: invert head for unrolling? */
888 static void unmark_cc_blocks(void)
889 {
890         size_t blocks = ARR_LEN(cc_blocks);
891         size_t i;
892
893         for(i = 0; i < blocks; ++i) {
894                 ir_node *block = cc_blocks[i];
895
896                 /* TODO Head is an exception. */
897                 /*if (block != loop_head)*/
898                 set_Block_mark(block, 0);
899         }
900         /*inversion_blocks_in_cc = 1;*/
901         inversion_blocks_in_cc = 0;
902
903         /* invalidate */
904         loop_info.cc_size = 0;
905 }
906
907 /**
908  * Populates head_entries with (node, pred_pos) tuple
909  * whereas the node's pred at pred_pos is in the cc but not the node itself.
910  * Also finds df loops inside the cc.
911  * Head and condition chain blocks have been marked previously.
912  */
913 static void get_head_outs(ir_node *node, void *env)
914 {
915         int i;
916         int arity = get_irn_arity(node);
917         (void) env;
918
919         for (i = 0; i < arity; ++i) {
920                 if (!is_nodes_block_marked(node) && is_nodes_block_marked(get_irn_n(node, i))) {
921                         entry_edge entry;
922                         entry.node = node;
923                         entry.pos = i;
924                         /* Saving also predecessor seems redundant, but becomes
925                          * necessary when changing position of it, before
926                          * dereferencing it.*/
927                         entry.pred = get_irn_n(node, i);
928                         ARR_APP1(entry_edge, cur_head_outs, entry);
929                 }
930         }
931
932         arity = get_irn_arity(loop_head);
933
934         /* Find df loops inside the cc */
935         if (is_Phi(node) && get_nodes_block(node) == loop_head) {
936                 for (i = 0; i < arity; ++i) {
937                         if (is_own_backedge(loop_head, i)) {
938                                 if (is_nodes_block_marked(get_irn_n(node, i))) {
939                                         entry_edge entry;
940                                         entry.node = node;
941                                         entry.pos = i;
942                                         entry.pred = get_irn_n(node, i);
943                                         ARR_APP1(entry_edge, head_df_loop, entry);
944                                         DB((dbg, LEVEL_5, "Found incc assignment node %N @%d is pred %N, graph %N %N\n",
945                                                         node, i, entry.pred, current_ir_graph, get_irg_start_block(current_ir_graph)));
946                                 }
947                         }
948                 }
949         }
950 }
951
952 /**
953  * Find condition chains, and add them to be inverted.
954  * A block belongs to the chain if a condition branches out of the loop.
955  * (Some blocks need to be removed once again.)
956  * Returns 1 if the given block belongs to the condition chain.
957  */
958 static unsigned find_condition_chain(ir_node *block)
959 {
960         const    ir_edge_t *edge;
961         unsigned mark = 0;
962         unsigned has_be = 0;
963         unsigned jmp_only;
964         unsigned nodes_n = 0;
965
966         mark_irn_visited(block);
967
968         DB((dbg, LEVEL_5, "condition_chains for block %N\n", block));
969
970         /* Get node count */
971         foreach_out_edge_kind(block, edge, EDGE_KIND_NORMAL) {
972                 ++nodes_n;
973         }
974
975         /* Check if node count would exceed maximum cc size.
976          * TODO
977          * This is not optimal, as we search depth-first and break here,
978          * continuing with another subtree. */
979         if (loop_info.cc_size + nodes_n > opt_params.max_cc_size) {
980                 set_Block_mark(block, 0);
981                 return 0;
982         }
983
984         /* Check if block only has a jmp instruction. */
985         jmp_only = 1;
986         foreach_out_edge(block, edge) {
987                 ir_node *src = get_edge_src_irn(edge);
988
989                 if (! is_Block(src) && ! is_Jmp(src)) {
990                         jmp_only = 0;
991                 }
992         }
993
994         /* Check cf outs if one is leaving the loop,
995          * or if this node has a backedge. */
996         foreach_block_succ(block, edge) {
997                 ir_node *src = get_edge_src_irn(edge);
998                 int pos = get_edge_src_pos(edge);
999
1000                 if (! is_in_loop(src))
1001                         mark = 1;
1002
1003                 /* Inverting blocks with backedge outs leads to a cf edge
1004                  * from the inverted head, into the inverted head (skipping the body).
1005                  * As the body becomes the new loop head,
1006                  * this would introduce another loop in the existing loop.
1007                  * This loop inversion cannot cope with this case. */
1008                 if (is_backedge(src, pos)) {
1009                         has_be = 1;
1010                         break;
1011                 }
1012         }
1013
1014         /* We need all predecessors to already belong to the condition chain.
1015          * Example of wrong case:  * == in cc
1016          *
1017          *     Head*             ,--.
1018          *    /|   \            B   |
1019          *   / A*  B           /    |
1020          *  / /\   /          ?     |
1021          *   /   C*      =>      D  |
1022          *      /  D           Head |
1023          *     /               A  \_|
1024          *                      C
1025          */
1026         /* Collect blocks containing only a Jmp.
1027          * Do not collect blocks with backedge outs. */
1028         if ((jmp_only == 1 || mark == 1) && has_be == 0) {
1029                 set_Block_mark(block, 1);
1030                 ++inversion_blocks_in_cc;
1031                 loop_info.cc_size += nodes_n;
1032                 DB((dbg, LEVEL_5, "block %N is part of condition chain\n", block));
1033                 ARR_APP1(ir_node *, cc_blocks, block);
1034         } else {
1035                 set_Block_mark(block, 0);
1036         }
1037
1038         foreach_block_succ(block, edge) {
1039                 ir_node *src = get_edge_src_irn( edge );
1040
1041                 if (is_in_loop(src) && ! irn_visited(src))
1042                         find_condition_chain(src);
1043         }
1044
1045         return mark;
1046 }
1047
1048 /**
1049  * Rewires the copied condition chain. Removes backedges
1050  * as this condition chain is prior to the loop.
1051  * Copy of loop_head must have phi list and old (unfixed) backedge info of the loop head.
1052  * (loop_head is already fixed, we cannot rely on it.)
1053  */
1054 static void fix_copy_inversion(void)
1055 {
1056         ir_node *new_head;
1057         ir_node **ins;
1058         ir_node **phis;
1059         ir_node *phi, *next;
1060         ir_node *head_cp = get_inversion_copy(loop_head);
1061         ir_graph *irg    = get_irn_irg(head_cp);
1062         int arity        = get_irn_arity(head_cp);
1063         int backedges    = get_backedge_n(head_cp, 0);
1064         int new_arity    = arity - backedges;
1065         int pos;
1066         int i;
1067
1068         NEW_ARR_A(ir_node *, ins, new_arity);
1069
1070         pos = 0;
1071         /* Remove block backedges */
1072         for(i = 0; i < arity; ++i) {
1073                 if (!is_backedge(head_cp, i))
1074                         ins[pos++] = get_irn_n(head_cp, i);
1075         }
1076
1077         new_head = new_r_Block(irg, new_arity, ins);
1078
1079         phis = NEW_ARR_F(ir_node *, 0);
1080
1081         for_each_phi_safe(get_Block_phis(head_cp), phi, next) {
1082                 ir_node *new_phi;
1083                 NEW_ARR_A(ir_node *, ins, new_arity);
1084                 pos = 0;
1085                 for(i = 0; i < arity; ++i) {
1086                         if (!is_backedge(head_cp, i))
1087                                 ins[pos++] = get_irn_n(phi, i);
1088                 }
1089                 new_phi = new_rd_Phi(get_irn_dbg_info(phi),
1090                                 new_head, new_arity, ins,
1091                                 get_irn_mode(phi));
1092                 ARR_APP1(ir_node *, phis, new_phi);
1093         }
1094
1095         pos = 0;
1096         for_each_phi_safe(get_Block_phis(head_cp), phi, next) {
1097                 exchange(phi, phis[pos++]);
1098         }
1099
1100         exchange(head_cp, new_head);
1101
1102         DEL_ARR_F(phis);
1103 }
1104
1105
1106 /* Puts the original condition chain at the end of the loop,
1107  * subsequently to the body.
1108  * Relies on block phi list and correct backedges.
1109  */
1110 static void fix_head_inversion(void)
1111 {
1112         ir_node *new_head;
1113         ir_node **ins;
1114         ir_node *phi, *next;
1115         ir_node **phis;
1116         ir_graph *irg = get_irn_irg(loop_head);
1117         int arity     = get_irn_arity(loop_head);
1118         int backedges = get_backedge_n(loop_head, 0);
1119         int new_arity = backedges;
1120         int pos;
1121         int i;
1122
1123         NEW_ARR_A(ir_node *, ins, new_arity);
1124
1125         pos = 0;
1126         /* Keep only backedges */
1127         for(i = 0; i < arity; ++i) {
1128                 if (is_own_backedge(loop_head, i))
1129                         ins[pos++] = get_irn_n(loop_head, i);
1130         }
1131
1132         new_head = new_r_Block(irg, new_arity, ins);
1133
1134         phis = NEW_ARR_F(ir_node *, 0);
1135
1136         for_each_phi(loop_head, phi) {
1137                 ir_node *new_phi;
1138                 DB((dbg, LEVEL_5, "Fixing phi %N of loop head\n", phi));
1139
1140                 NEW_ARR_A(ir_node *, ins, new_arity);
1141
1142                 pos = 0;
1143                 for (i = 0; i < arity; ++i) {
1144                         ir_node *pred = get_irn_n(phi, i);
1145
1146                         if (is_own_backedge(loop_head, i)) {
1147                                 /* If assignment is in the condition chain,
1148                                  * we need to create a phi in the new loop head.
1149                                  * This can only happen for df, not cf. See find_condition_chains. */
1150                                 /*if (is_nodes_block_marked(pred)) {
1151                                         ins[pos++] = pred;
1152                                 } else {*/
1153                                 ins[pos++] = pred;
1154
1155                         }
1156                 }
1157
1158                 new_phi = new_rd_Phi(get_irn_dbg_info(phi),
1159                         new_head, new_arity, ins,
1160                         get_irn_mode(phi));
1161
1162                 ARR_APP1(ir_node *, phis, new_phi);
1163
1164                 DB((dbg, LEVEL_5, "fix inverted head should exch %N by %N (pos %d)\n", phi, new_phi, pos ));
1165         }
1166
1167         pos = 0;
1168         for_each_phi_safe(get_Block_phis(loop_head), phi, next) {
1169                 DB((dbg, LEVEL_5, "fix inverted exch phi %N by %N\n", phi, phis[pos]));
1170                 if (phis[pos] != phi)
1171                         exchange(phi, phis[pos++]);
1172         }
1173
1174         DEL_ARR_F(phis);
1175
1176         DB((dbg, LEVEL_5, "fix inverted head exch head block %N by %N\n", loop_head, new_head));
1177         exchange(loop_head, new_head);
1178 }
1179
1180 /* Does the loop inversion.  */
1181 static void inversion_walk(entry_edge *head_entries)
1182 {
1183         size_t i;
1184
1185         /*
1186          * The order of rewiring bottom-up is crucial.
1187          * Any change of the order leads to lost information that would be needed later.
1188          */
1189
1190         ir_reserve_resources(current_ir_graph, IR_RESOURCE_IRN_VISITED);
1191
1192         /* 1. clone condition chain */
1193         inc_irg_visited(current_ir_graph);
1194
1195         for (i = 0; i < ARR_LEN(head_entries); ++i) {
1196                 entry_edge entry = head_entries[i];
1197                 ir_node *pred = get_irn_n(entry.node, entry.pos);
1198
1199                 DB((dbg, LEVEL_5, "\nInit walk block %N\n", pred));
1200
1201                 copy_walk(pred, is_nodes_block_marked, cur_loop);
1202         }
1203
1204         ir_free_resources(current_ir_graph, IR_RESOURCE_IRN_VISITED);
1205
1206         /* 2. Extends the head control flow successors ins
1207          *    with the definitions of the copied head node. */
1208         for (i = 0; i < ARR_LEN(head_entries); ++i) {
1209                 entry_edge head_out = head_entries[i];
1210
1211                 if (is_Block(head_out.node))
1212                         extend_ins_by_copy(head_out.node, head_out.pos);
1213         }
1214
1215         /* 3. construct_ssa for users of definitions in the condition chain,
1216          *    as there is now a second definition. */
1217         for (i = 0; i < ARR_LEN(head_entries); ++i) {
1218                 entry_edge head_out = head_entries[i];
1219
1220                 /* Ignore keepalives */
1221                 if (is_End(head_out.node))
1222                         continue;
1223
1224                 /* Construct ssa for assignments in the condition chain. */
1225                 if (!is_Block(head_out.node)) {
1226                         ir_node *pred, *cppred, *block, *cpblock;
1227
1228                         pred = head_out.pred;
1229                         cppred = get_inversion_copy(pred);
1230                         block = get_nodes_block(pred);
1231                         cpblock = get_nodes_block(cppred);
1232                         construct_ssa(block, pred, cpblock, cppred);
1233                 }
1234         }
1235
1236         /*
1237          * If there is an assignment in the condition chain
1238          * with a user also in the condition chain,
1239          * the dominance frontier is in the new loop head.
1240          * The dataflow loop is completely in the condition chain.
1241          * Goal:
1242          *  To be wired: >|
1243          *
1244          *  | ,--.   |
1245          * Phi_cp |  | copied condition chain
1246          * >| |   |  |
1247          * >| ?__/   |
1248          * >| ,-.
1249          *  Phi* |   | new loop head with newly created phi.
1250          *   |   |
1251          *  Phi  |   | original, inverted condition chain
1252          *   |   |   |
1253          *   ?__/    |
1254          *
1255          */
1256         for (i = 0; i < ARR_LEN(head_df_loop); ++i) {
1257                 entry_edge head_out = head_df_loop[i];
1258
1259                 /* Construct ssa for assignments in the condition chain. */
1260                 ir_node *pred, *cppred, *block, *cpblock;
1261
1262                 pred = head_out.pred;
1263                 cppred = get_inversion_copy(pred);
1264                 assert(cppred && pred);
1265                 block = get_nodes_block(pred);
1266                 cpblock = get_nodes_block(cppred);
1267                 construct_ssa(block, pred, cpblock, cppred);
1268         }
1269
1270         /* 4. Remove the ins which are no backedges from the original condition chain
1271          *    as the cc is now subsequent to the body. */
1272         fix_head_inversion();
1273
1274         /* 5. Remove the backedges of the copied condition chain,
1275          *    because it is going to be the new 'head' in advance to the loop. */
1276         fix_copy_inversion();
1277
1278 }
1279
1280 /* Performs loop inversion of cur_loop if possible and reasonable. */
1281 static void loop_inversion(void)
1282 {
1283         int      loop_depth;
1284         unsigned max_loop_nodes = opt_params.max_loop_size;
1285         unsigned max_loop_nodes_adapted;
1286         int      depth_adaption = opt_params.depth_adaption;
1287
1288         unsigned do_inversion = 1;
1289         unsigned has_cc = 0;
1290
1291         /* Depth of 0 is the procedure and 1 a topmost loop. */
1292         loop_depth = get_loop_depth(cur_loop) - 1;
1293
1294         /* Calculating in per mil. */
1295         max_loop_nodes_adapted = get_max_nodes_adapted(loop_depth);
1296
1297         DB((dbg, LEVEL_1, "max_nodes: %d\nmax_nodes_adapted %d at depth of %d (adaption %d)\n",
1298                         max_loop_nodes, max_loop_nodes_adapted, loop_depth, depth_adaption));
1299
1300         if (! (loop_info.nodes > 0))
1301                 return;
1302
1303 #if LOOP_IGNORE_NODE_LIMITS
1304         DB((dbg, LEVEL_1, "WARNING: Loop node limitations ignored."));
1305 #else
1306         if (loop_info.nodes > max_loop_nodes) {
1307                 /* Only for stats */
1308                 DB((dbg, LEVEL_1, "Nodes %d > allowed nodes %d\n",
1309                         loop_info.nodes, loop_depth, max_loop_nodes));
1310                 count_stats(stats.too_large);
1311                 /* no RETURN */
1312                 /* Adaption might change it */
1313         }
1314
1315         /* Limit processing to loops smaller than given parameter. */
1316         if (loop_info.nodes > max_loop_nodes_adapted) {
1317                 DB((dbg, LEVEL_1, "Nodes %d > allowed nodes (depth %d adapted) %d\n",
1318                         loop_info.nodes, loop_depth, max_loop_nodes_adapted));
1319                 count_stats(stats.too_large_adapted);
1320                 return;
1321         }
1322
1323         if (loop_info.calls > opt_params.allowed_calls) {
1324                 DB((dbg, LEVEL_1, "Calls %d > allowed calls %d\n",
1325                         loop_info.calls, opt_params.allowed_calls));
1326                 count_stats(stats.calls_limit);
1327                 return;
1328         }
1329 #endif
1330
1331         /*inversion_head_node_limit = INT_MAX;*/
1332         ir_reserve_resources(current_ir_graph, IR_RESOURCE_BLOCK_MARK);
1333
1334         /* Reset block marks.
1335          * We use block marks to flag blocks of the original condition chain. */
1336         irg_walk_graph(current_ir_graph, reset_block_mark, NULL, NULL);
1337
1338         /*loop_info.blocks = get_loop_n_blocks(cur_loop);*/
1339         cond_chain_entries = NEW_ARR_F(entry_edge, 0);
1340         head_df_loop = NEW_ARR_F(entry_edge, 0);
1341
1342         /*head_inversion_node_count = 0;*/
1343         inversion_blocks_in_cc = 0;
1344
1345         /* Use phase to keep copy of nodes from the condition chain. */
1346         phase = new_phase(current_ir_graph, phase_irn_init_default);
1347
1348         /* Search for condition chains and temporarily save the blocks in an array. */
1349         cc_blocks = NEW_ARR_F(ir_node *, 0);
1350         inc_irg_visited(current_ir_graph);
1351         has_cc = find_condition_chain(loop_head);
1352
1353         unmark_not_allowed_cc_blocks();
1354         DEL_ARR_F(cc_blocks);
1355
1356 #if LOOP_IGNORE_NODE_LIMITS
1357         (void) unmark_cc_blocks;
1358 #else
1359         /* Condition chain too large.
1360          * Loop should better be small enough to fit into the cache. */
1361         /* TODO Of course, we should take a small enough cc in the first place,
1362          * which is not that simple. (bin packing)  */
1363         if (loop_info.cc_size > opt_params.max_cc_size) {
1364                 count_stats(stats.cc_limit_reached);
1365
1366                 do_inversion = 0;
1367
1368                 /* Unmark cc blocks except the head.
1369                  * Invert head only for possible unrolling. */
1370                 unmark_cc_blocks();
1371
1372         }
1373 #endif
1374
1375         /* We also catch endless loops here,
1376          * because they do not have a condition chain. */
1377         if (inversion_blocks_in_cc < 1) {
1378                 do_inversion = 0;
1379                 DB((dbg, LEVEL_3,
1380                         "Loop contains %d (less than 1) invertible blocks => No Inversion done.\n",
1381                         inversion_blocks_in_cc));
1382         }
1383
1384         if (do_inversion) {
1385                 cur_head_outs = NEW_ARR_F(entry_edge, 0);
1386
1387                 /* Get all edges pointing into the condition chain. */
1388                 irg_walk_graph(current_ir_graph, get_head_outs, NULL, NULL);
1389
1390                 /* Do the inversion */
1391                 inversion_walk(cur_head_outs);
1392
1393                 DEL_ARR_F(cur_head_outs);
1394
1395                 /* Duplicated blocks changed doms */
1396                 set_irg_doms_inconsistent(current_ir_graph);
1397
1398                 count_stats(stats.inverted);
1399         }
1400
1401         /* free */
1402         phase_free(phase);
1403         DEL_ARR_F(cond_chain_entries);
1404         DEL_ARR_F(head_df_loop);
1405
1406         ir_free_resources(current_ir_graph, IR_RESOURCE_BLOCK_MARK);
1407 }
1408
1409 /* Fix the original loop_heads ins for invariant unrolling case. */
1410 static void unrolling_fix_loop_head_inv(void)
1411 {
1412         ir_node *ins[2];
1413         ir_node *phi;
1414         ir_node *proj = new_Proj(loop_info.duff_cond, mode_X, 0);
1415         ir_node *head_pred = get_irn_n(loop_head, loop_info.be_src_pos);
1416         ir_node *loop_condition = get_unroll_copy(head_pred, unroll_nr - 1);
1417
1418         /* Original loop_heads ins are:
1419          * duff block and the own backedge */
1420
1421         ins[0] = loop_condition;
1422         ins[1] = proj;
1423         set_irn_in(loop_head, 2, ins);
1424         DB((dbg, LEVEL_4, "Rewire ins of block loophead %N to pred %N and duffs entry %N \n" , loop_head, ins[0], ins[1]));
1425
1426         for_each_phi(loop_head, phi) {
1427                 ir_node *pred = get_irn_n(phi, loop_info.be_src_pos);
1428                 /* TODO we think it is a phi, but for Mergesort it is not the case.*/
1429
1430                 ir_node *last_pred = get_unroll_copy(pred, unroll_nr - 1);
1431
1432                 ins[0] = last_pred;
1433                 ins[1] = (ir_node*)get_irn_link(phi);
1434                 set_irn_in(phi, 2, ins);
1435                 DB((dbg, LEVEL_4, "Rewire ins of loophead phi %N to pred %N and duffs entry %N \n" , phi, ins[0], ins[1]));
1436         }
1437 }
1438
1439 /* Removes previously created phis with only 1 in. */
1440 static void correct_phis(ir_node *node, void *env)
1441 {
1442         (void)env;
1443
1444         if (is_Phi(node) && get_irn_arity(node) == 1) {
1445                 ir_node *exch;
1446                 ir_node *in[1];
1447
1448                 in[0] = get_irn_n(node, 0);
1449
1450                 exch = new_rd_Phi(get_irn_dbg_info(node),
1451                     get_nodes_block(node), 1, in,
1452                         get_irn_mode(node));
1453
1454                 exchange(node, exch);
1455         }
1456 }
1457
1458 /* Unrolling: Rewire floating copies. */
1459 static void place_copies(int copies)
1460 {
1461         ir_node *loophead = loop_head;
1462         size_t i;
1463         int c;
1464         int be_src_pos = loop_info.be_src_pos;
1465
1466         /* Serialize loops by fixing their head ins.
1467          * Processed are the copies.
1468          * The original loop is done after that, to keep backedge infos. */
1469         for (c = 0; c < copies; ++c) {
1470                 ir_node *upper = get_unroll_copy(loophead, c);
1471                 ir_node *lower = get_unroll_copy(loophead, c + 1);
1472                 ir_node *phi;
1473                 ir_node *topmost_be_block = get_nodes_block(get_irn_n(loophead, be_src_pos));
1474
1475                 /* Important: get the preds first and then their copy. */
1476                 ir_node *upper_be_block = get_unroll_copy(topmost_be_block, c);
1477                 ir_node *new_jmp = new_r_Jmp(upper_be_block);
1478                 DB((dbg, LEVEL_5, " place_copies upper %N lower %N\n", upper, lower));
1479
1480                 DB((dbg, LEVEL_5, "topmost be block %N \n", topmost_be_block));
1481
1482                 if (loop_info.unroll_kind == constant) {
1483                         ir_node *ins[1];
1484                         ins[0] = new_jmp;
1485                         set_irn_in(lower, 1, ins);
1486
1487                         for_each_phi(loophead, phi) {
1488                                 ir_node *topmost_def = get_irn_n(phi, be_src_pos);
1489                                 ir_node *upper_def = get_unroll_copy(topmost_def, c);
1490                                 ir_node *lower_phi = get_unroll_copy(phi, c + 1);
1491
1492                                 /* It is possible, that the value used
1493                                  * in the OWN backedge path is NOT defined in this loop. */
1494                                 if (is_in_loop(topmost_def))
1495                                         ins[0] = upper_def;
1496                                 else
1497                                         ins[0] = topmost_def;
1498
1499                                 set_irn_in(lower_phi, 1, ins);
1500                                 /* Need to replace phis with 1 in later. */
1501                         }
1502                 } else {
1503                         /* Invariant case */
1504                         /* Every node has 2 ins. One from the duff blocks
1505                          * and one from the previously unrolled loop. */
1506                         ir_node *ins[2];
1507                         /* Calculate corresponding projection of mod result for this copy c */
1508                         ir_node *proj = new_Proj(loop_info.duff_cond, mode_X, unroll_nr - c - 1);
1509                         DB((dbg, LEVEL_4, "New duff proj %N\n" , proj));
1510
1511                         ins[0] = new_jmp;
1512                         ins[1] = proj;
1513                         set_irn_in(lower, 2, ins);
1514                         DB((dbg, LEVEL_4, "Rewire ins of Block %N to pred %N and duffs entry %N \n" , lower, ins[0], ins[1]));
1515
1516                         for_each_phi(loophead, phi) {
1517                                 ir_node *topmost_phi_pred = get_irn_n(phi, be_src_pos);
1518                                 ir_node *upper_phi_pred;
1519                                 ir_node *lower_phi;
1520                                 ir_node *duff_phi;
1521
1522                                 lower_phi = get_unroll_copy(phi, c + 1);
1523                                 duff_phi = (ir_node*)get_irn_link(phi);
1524                                 DB((dbg, LEVEL_4, "DD Link of %N is %N\n" , phi, duff_phi));
1525
1526                                 /*  */
1527                                 if (is_in_loop(topmost_phi_pred)) {
1528                                         upper_phi_pred = get_unroll_copy(topmost_phi_pred, c);
1529                                 } else {
1530                                         upper_phi_pred = topmost_phi_pred;
1531                                 }
1532
1533                                 ins[0] = upper_phi_pred;
1534                                 ins[1] = duff_phi;
1535                                 set_irn_in(lower_phi, 2, ins);
1536                                 DB((dbg, LEVEL_4, "Rewire ins of %N to pred %N and duffs entry %N \n" , lower_phi, ins[0], ins[1]));
1537                         }
1538                 }
1539         }
1540
1541         /* Reconnect last copy. */
1542         for (i = 0; i < ARR_LEN(loop_entries); ++i) {
1543                 entry_edge edge = loop_entries[i];
1544                 /* Last copy is at the bottom */
1545                 ir_node *new_pred = get_unroll_copy(edge.pred, copies);
1546                 set_irn_n(edge.node, edge.pos, new_pred);
1547         }
1548
1549         /* Fix original loops head.
1550          * Done in the end, as ins and be info were needed before. */
1551         if (loop_info.unroll_kind == constant) {
1552                 ir_node *phi;
1553                 ir_node *head_pred = get_irn_n(loop_head, be_src_pos);
1554                 ir_node *loop_condition = get_unroll_copy(head_pred, unroll_nr - 1);
1555
1556                 set_irn_n(loop_head, loop_info.be_src_pos, loop_condition);
1557
1558                 for_each_phi(loop_head, phi) {
1559                         ir_node *pred = get_irn_n(phi, be_src_pos);
1560                         ir_node *last_pred;
1561
1562                         /* It is possible, that the value used
1563                          * in the OWN backedge path is NOT assigned in this loop. */
1564                         if (is_in_loop(pred))
1565                                 last_pred = get_unroll_copy(pred, copies);
1566                         else
1567                                 last_pred = pred;
1568                         set_irn_n(phi, be_src_pos, last_pred);
1569                 }
1570
1571         } else {
1572                 unrolling_fix_loop_head_inv();
1573         }
1574 }
1575
1576 /* Copies the cur_loop several times. */
1577 static void copy_loop(entry_edge *cur_loop_outs, int copies)
1578 {
1579         int c;
1580
1581         ir_reserve_resources(current_ir_graph, IR_RESOURCE_IRN_VISITED);
1582
1583         for (c = 0; c < copies; ++c) {
1584                 size_t i;
1585
1586                 inc_irg_visited(current_ir_graph);
1587
1588                 DB((dbg, LEVEL_5, "         ### Copy_loop  copy nr: %d ###\n", c));
1589                 for (i = 0; i < ARR_LEN(cur_loop_outs); ++i) {
1590                         entry_edge entry = cur_loop_outs[i];
1591                         ir_node *pred = get_irn_n(entry.node, entry.pos);
1592
1593                         copy_walk_n(pred, is_in_loop, c + 1);
1594                 }
1595         }
1596
1597         ir_free_resources(current_ir_graph, IR_RESOURCE_IRN_VISITED);
1598 }
1599
1600
1601 /* Creates a new phi from the given phi node omitting own bes,
1602  * using be_block as supplier of backedge informations. */
1603 static ir_node *clone_phis_sans_bes(ir_node *phi, ir_node *be_block, ir_node *dest_block)
1604 {
1605         ir_node **ins;
1606         int arity = get_irn_arity(phi);
1607         int i, c = 0;
1608         ir_node *newphi;
1609
1610         assert(get_irn_arity(phi) == get_irn_arity(be_block));
1611         assert(is_Phi(phi));
1612
1613         ins = NEW_ARR_F(ir_node *, arity);
1614         for (i = 0; i < arity; ++i) {
1615                 if (! is_own_backedge(be_block, i)) {
1616                         ins[c] = get_irn_n(phi, i);
1617                         ++c;
1618                 }
1619         }
1620
1621         newphi = new_r_Phi(dest_block, c, ins, get_irn_mode(phi));
1622
1623         set_irn_link(phi, newphi);
1624         DB((dbg, LEVEL_4, "Linking for duffs device %N to %N\n", phi, newphi));
1625
1626         return newphi;
1627 }
1628
1629 /* Creates a new block from the given block node omitting own bes,
1630  * using be_block as supplier of backedge informations. */
1631 static ir_node *clone_block_sans_bes(ir_node *node, ir_node *be_block)
1632 {
1633         ir_node **ins;
1634         int arity = get_irn_arity(node);
1635         int i, c = 0;
1636
1637         assert(get_irn_arity(node) == get_irn_arity(be_block));
1638         assert(is_Block(node));
1639
1640         NEW_ARR_A(ir_node *, ins, arity);
1641         for (i = 0; i < arity; ++i) {
1642                 if (! is_own_backedge(be_block, i)) {
1643                         ins[c] = get_irn_n(node, i);
1644                         ++c;
1645                 }
1646         }
1647
1648         return new_Block(c, ins);
1649 }
1650
1651 /* Creates a structure to calculate absolute value of node op.
1652  * Returns mux node with absolute value. */
1653 static ir_node *new_Abs(ir_node *op, ir_mode *mode)
1654 {
1655   ir_graph *irg      = get_irn_irg(op);
1656   ir_node  *block    = get_nodes_block(op);
1657   ir_node  *zero     = new_r_Const(irg, get_mode_null(mode));
1658   ir_node  *cmp      = new_r_Cmp(block, op, zero, ir_relation_less);
1659   ir_node  *minus_op = new_r_Minus(block, op, mode);
1660   ir_node  *mux      = new_r_Mux(block, cmp, op, minus_op, mode);
1661
1662   return mux;
1663 }
1664
1665
1666 /* Creates blocks for duffs device, using previously obtained
1667  * informations about the iv.
1668  * TODO split */
1669 static void create_duffs_block(void)
1670 {
1671         ir_mode *mode;
1672
1673         ir_node *block1, *count_block, *duff_block;
1674         ir_node *ems, *ems_mod, *ems_div, *ems_mod_proj, *cmp_null,
1675                 *ems_mode_cond, *x_true, *x_false, *const_null;
1676         ir_node *true_val, *false_val;
1677         ir_node *ins[2];
1678
1679         ir_node *duff_mod, *proj, *cond;
1680
1681         ir_node *count, *correction, *unroll_c;
1682         ir_node *cmp_bad_count, *good_count, *bad_count, *count_phi, *bad_count_neg;
1683         ir_node *phi;
1684
1685         mode = get_irn_mode(loop_info.end_val);
1686         const_null = new_Const(get_mode_null(mode));
1687
1688         /* TODO naming
1689          * 1. Calculate first approach to count.
1690          *    Condition: (end - start) % step == 0 */
1691         block1 = clone_block_sans_bes(loop_head, loop_head);
1692         DB((dbg, LEVEL_4, "Duff block 1 %N\n", block1));
1693
1694         /* Create loop entry phis in first duff block
1695          * as it becomes the loops preheader */
1696         for_each_phi(loop_head, phi) {
1697                 /* Returns phis pred if phi would have arity 1*/
1698                 ir_node *new_phi = clone_phis_sans_bes(phi, loop_head, block1);
1699
1700                 DB((dbg, LEVEL_4, "HEAD %N phi %N\n", loop_head, phi));
1701                 DB((dbg, LEVEL_4, "BLOCK1 %N phi %N\n", block1, new_phi));
1702         }
1703
1704         ems = new_r_Sub(block1, loop_info.end_val, loop_info.start_val,
1705                 get_irn_mode(loop_info.end_val));
1706                 DB((dbg, LEVEL_4, "BLOCK1 sub %N\n", ems));
1707
1708
1709         ems = new_Sub(loop_info.end_val, loop_info.start_val,
1710                 get_irn_mode(loop_info.end_val));
1711
1712         DB((dbg, LEVEL_4, "mod ins %N %N\n", ems, loop_info.step));
1713         ems_mod = new_r_Mod(block1,
1714                 new_NoMem(),
1715                 ems,
1716                 loop_info.step,
1717                 mode,
1718                 op_pin_state_pinned);
1719         ems_div = new_r_Div(block1,
1720                 new_NoMem(),
1721                 ems,
1722                 loop_info.step,
1723                 mode,
1724                 op_pin_state_pinned);
1725
1726         DB((dbg, LEVEL_4, "New module node %N\n", ems_mod));
1727
1728         ems_mod_proj = new_r_Proj(ems_mod, mode_Iu, pn_Mod_res);
1729         cmp_null = new_r_Cmp(block1, ems_mod_proj, const_null, ir_relation_less);
1730         ems_mode_cond = new_r_Cond(block1, cmp_null);
1731
1732         /* ems % step == 0 */
1733         x_true = new_r_Proj(ems_mode_cond, mode_X, pn_Cond_true);
1734         /* ems % step != 0 */
1735         x_false = new_r_Proj(ems_mode_cond, mode_X, pn_Cond_false);
1736
1737         /* 2. Second block.
1738          * Assures, duffs device receives a valid count.
1739          * Condition:
1740          *     decreasing: count < 0
1741          *     increasing: count > 0
1742          */
1743         ins[0] = x_true;
1744         ins[1] = x_false;
1745
1746         count_block = new_Block(2, ins);
1747         DB((dbg, LEVEL_4, "Duff block 2 %N\n", count_block));
1748
1749
1750         /* Increase loop-taken-count depending on the loop condition
1751          * uses the latest iv to compare to. */
1752         if (loop_info.latest_value == 1) {
1753                 /* ems % step == 0 :  +0 */
1754                 true_val = new_Const(get_mode_null(mode));
1755                 /* ems % step != 0 :  +1 */
1756                 false_val = new_Const(get_mode_one(mode));
1757         } else {
1758                 ir_tarval *tv_two = new_tarval_from_long(2, mode);
1759                 /* ems % step == 0 :  +1 */
1760                 true_val = new_Const(get_mode_one(mode));
1761                 /* ems % step != 0 :  +2 */
1762                 false_val = new_Const(tv_two);
1763         }
1764
1765         ins[0] = true_val;
1766         ins[1] = false_val;
1767
1768         correction = new_r_Phi(count_block, 2, ins, mode);
1769
1770         count = new_r_Proj(ems_div, mode, pn_Div_res);
1771
1772         /* (end - start) / step  +  correction */
1773         count = new_Add(count, correction, mode);
1774
1775         /* We preconditioned the loop to be tail-controlled.
1776          * So, if count is something 'wrong' like 0,
1777          * negative/positive (depending on step direction),
1778          * we may take the loop once (tail-contr.) and leave it
1779          * to the existing condition, to break; */
1780
1781         /* Depending on step direction, we have to check for > or < 0 */
1782         if (loop_info.decreasing == 1) {
1783                 cmp_bad_count = new_r_Cmp(count_block, count, const_null,
1784                                           ir_relation_less);
1785         } else {
1786                 cmp_bad_count = new_r_Cmp(count_block, count, const_null,
1787                                           ir_relation_greater);
1788         }
1789
1790         bad_count_neg = new_r_Cond(count_block, cmp_bad_count);
1791         good_count = new_Proj(bad_count_neg, mode_X, pn_Cond_true);
1792         bad_count = new_Proj(ems_mode_cond, mode_X, pn_Cond_false);
1793
1794         /* 3. Duff Block
1795          *    Contains module to decide which loop to start from. */
1796
1797         ins[0] = good_count;
1798         ins[1] = bad_count;
1799         duff_block = new_Block(2, ins);
1800         DB((dbg, LEVEL_4, "Duff block 3 %N\n", duff_block));
1801
1802         /* Get absolute value */
1803         ins[0] = new_Abs(count, mode);
1804         /* Manually feed the aforementioned count = 1 (bad case)*/
1805         ins[1] = new_Const(get_mode_one(mode));
1806         count_phi = new_r_Phi(duff_block, 2, ins, mode);
1807
1808         unroll_c = new_Const(new_tarval_from_long((long)unroll_nr, mode));
1809
1810         /* count % unroll_nr */
1811         duff_mod = new_r_Mod(duff_block,
1812                 new_NoMem(),
1813                 count_phi,
1814                 unroll_c,
1815                 mode,
1816                 op_pin_state_pinned);
1817
1818
1819         proj = new_Proj(duff_mod, mode, pn_Mod_res);
1820         /* condition does NOT create itself in the block of the proj! */
1821         cond = new_r_Cond(duff_block, proj);
1822
1823         loop_info.duff_cond = cond;
1824 }
1825
1826 /* Returns 1 if given node is not in loop,
1827  * or if it is a phi of the loop head with only loop invariant defs.
1828  */
1829 static unsigned is_loop_invariant_def(ir_node *node)
1830 {
1831         int i;
1832
1833         if (! is_in_loop(node)) {
1834                 DB((dbg, LEVEL_4, "Not in loop %N\n", node));
1835                 /* || is_Const(node) || is_SymConst(node)) {*/
1836                 return 1;
1837         }
1838
1839         /* If this is a phi of the loophead shared by more than 1 loop,
1840          * we need to check if all defs are not in the loop.  */
1841         if (is_Phi(node)) {
1842                 ir_node *block;
1843                 block = get_nodes_block(node);
1844
1845                 /* To prevent unexpected situations. */
1846                 if (block != loop_head) {
1847                         return 0;
1848                 }
1849
1850                 for (i = 0; i < get_irn_arity(node); ++i) {
1851                         /* Check if all bes are just loopbacks. */
1852                         if (is_own_backedge(block, i) && get_irn_n(node, i) != node)
1853                                 return 0;
1854                 }
1855                 DB((dbg, LEVEL_4, "invar %N\n", node));
1856                 return 1;
1857         }
1858         DB((dbg, LEVEL_4, "Not invar %N\n", node));
1859
1860         return 0;
1861 }
1862
1863 /* Returns 1 if one pred of node is invariant and the other is not.
1864  * invar_pred and other are set analogously. */
1865 static unsigned get_invariant_pred(ir_node *node, ir_node **invar_pred, ir_node **other)
1866 {
1867         ir_node *pred0 = get_irn_n(node, 0);
1868         ir_node *pred1 = get_irn_n(node, 1);
1869
1870         *invar_pred = NULL;
1871         *other = NULL;
1872
1873         if (is_loop_invariant_def(pred0)) {
1874                 DB((dbg, LEVEL_4, "pred0 invar %N\n", pred0));
1875                 *invar_pred = pred0;
1876                 *other = pred1;
1877         }
1878
1879         if (is_loop_invariant_def(pred1)) {
1880                 DB((dbg, LEVEL_4, "pred1 invar %N\n", pred1));
1881
1882                 if (*invar_pred != NULL) {
1883                         /* RETURN. We do not want both preds to be invariant. */
1884                         return 0;
1885                 }
1886
1887                 *other = pred0;
1888                 *invar_pred = pred1;
1889                 return 1;
1890         } else {
1891                 DB((dbg, LEVEL_4, "pred1 not invar %N\n", pred1));
1892
1893                 if (*invar_pred != NULL)
1894                         return 1;
1895                 else
1896                         return 0;
1897         }
1898 }
1899
1900 /* Starts from a phi that may belong to an iv.
1901  * If an add forms a loop with iteration_phi,
1902  * and add uses a constant, 1 is returned
1903  * and 'start' as well as 'add' are sane. */
1904 static unsigned get_start_and_add(ir_node *iteration_phi, unrolling_kind_flag role)
1905 {
1906         int i;
1907         ir_node *found_add = loop_info.add;
1908         int arity = get_irn_arity(iteration_phi);
1909
1910         DB((dbg, LEVEL_4, "Find start and add from %N\n", iteration_phi));
1911
1912         for (i = 0; i < arity; ++i) {
1913
1914                 /* Find start_val which needs to be pred of the iteration_phi.
1915                  * If start_val already known, sanity check. */
1916                 if (!is_backedge(get_nodes_block(loop_info.iteration_phi), i)) {
1917                         ir_node *found_start_val = get_irn_n(loop_info.iteration_phi, i);
1918
1919                         DB((dbg, LEVEL_4, "found_start_val %N\n", found_start_val));
1920
1921                         /* We already found a start_val it has to be always the same. */
1922                         if (loop_info.start_val && found_start_val != loop_info.start_val)
1923                                 return 0;
1924
1925                         if ((role == constant) && !(is_SymConst(found_start_val) || is_Const(found_start_val)))
1926                                         return 0;
1927                         else if((role == constant) && !(is_loop_invariant_def(found_start_val)))
1928                                         return 0;
1929
1930                         loop_info.start_val = found_start_val;
1931                 }
1932
1933                 /* The phi has to be in the loop head.
1934                  * Follow all own backedges. Every value supplied from these preds of the phi
1935                  * needs to origin from the same add. */
1936                 if (is_own_backedge(get_nodes_block(loop_info.iteration_phi), i)) {
1937                         ir_node *new_found = get_irn_n(loop_info.iteration_phi,i);
1938
1939                         DB((dbg, LEVEL_4, "is add? %N\n", new_found));
1940
1941                         if (! (is_Add(new_found) || is_Sub(new_found)) || (found_add && found_add != new_found))
1942                                 return 0;
1943                         else
1944                                 found_add = new_found;
1945                 }
1946         }
1947
1948         loop_info.add = found_add;
1949
1950         return 1;
1951 }
1952
1953
1954 /* Returns 1 if one pred of node is a const value and the other is not.
1955  * const_pred and other are set analogously. */
1956 static unsigned get_const_pred(ir_node *node, ir_node **const_pred, ir_node **other)
1957 {
1958         ir_node *pred0 = get_irn_n(node, 0);
1959         ir_node *pred1 = get_irn_n(node, 1);
1960
1961         DB((dbg, LEVEL_4, "Checking for constant pred of %N\n", node));
1962
1963         *const_pred = NULL;
1964         *other = NULL;
1965
1966         /*DB((dbg, LEVEL_4, "is %N const\n", pred0));*/
1967         if (is_Const(pred0) || is_SymConst(pred0)) {
1968                 *const_pred = pred0;
1969                 *other = pred1;
1970         }
1971
1972         /*DB((dbg, LEVEL_4, "is %N const\n", pred1));*/
1973         if (is_Const(pred1) || is_SymConst(pred1)) {
1974                 if (*const_pred != NULL) {
1975                         /* RETURN. We do not want both preds to be constant. */
1976                         return 0;
1977                 }
1978
1979                 *other = pred0;
1980                 *const_pred = pred1;
1981         }
1982
1983         if (*const_pred == NULL)
1984                 return 0;
1985         else
1986                 return 1;
1987 }
1988
1989 /* Returns 1 if loop exits within 2 steps of the iv.
1990  * Norm_proj means we do not exit the loop.*/
1991 static unsigned simulate_next(ir_tarval **count_tar,
1992                 ir_tarval *stepped, ir_tarval *step_tar, ir_tarval *end_tar,
1993                 ir_relation norm_proj)
1994 {
1995         ir_tarval *next;
1996
1997         DB((dbg, LEVEL_4, "Loop taken if (stepped)%ld %s (end)%ld ",
1998                                 get_tarval_long(stepped),
1999                                 get_relation_string((norm_proj)),
2000                                 get_tarval_long(end_tar)));
2001         DB((dbg, LEVEL_4, "comparing latest value %d\n", loop_info.latest_value));
2002
2003         /* If current iv does not stay in the loop,
2004          * this run satisfied the exit condition. */
2005         if (! (tarval_cmp(stepped, end_tar) & norm_proj))
2006                 return 1;
2007
2008         DB((dbg, LEVEL_4, "Result: (stepped)%ld IS %s (end)%ld\n",
2009                                 get_tarval_long(stepped),
2010                                 get_relation_string(tarval_cmp(stepped, end_tar)),
2011                                 get_tarval_long(end_tar)));
2012
2013         /* next step */
2014         if (is_Add(loop_info.add))
2015                 next = tarval_add(stepped, step_tar);
2016         else
2017                 /* sub */
2018                 next = tarval_sub(stepped, step_tar, get_irn_mode(loop_info.end_val));
2019
2020         DB((dbg, LEVEL_4, "Loop taken if %ld %s %ld ",
2021                                 get_tarval_long(next),
2022                                 get_relation_string(norm_proj),
2023                                 get_tarval_long(end_tar)));
2024         DB((dbg, LEVEL_4, "comparing latest value %d\n", loop_info.latest_value));
2025
2026         /* Increase steps. */
2027         *count_tar = tarval_add(*count_tar, get_tarval_one(get_tarval_mode(*count_tar)));
2028
2029         /* Next has to fail the loop condition, or we will never exit. */
2030         if (! (tarval_cmp(next, end_tar) & norm_proj))
2031                 return 1;
2032         else
2033                 return 0;
2034 }
2035
2036 /* Check if loop meets requirements for a 'simple loop':
2037  * - Exactly one cf out
2038  * - Allowed calls
2039  * - Max nodes after unrolling
2040  * - tail-controlled
2041  * - exactly one be
2042  * - cmp
2043  * Returns Projection of cmp node or NULL; */
2044 static ir_node *is_simple_loop(void)
2045 {
2046         int arity, i;
2047         ir_node *loop_block, *exit_block, *projx, *cond, *cmp;
2048
2049         /* Maximum of one condition, and no endless loops. */
2050         if (loop_info.cf_outs != 1)
2051                 return NULL;
2052
2053         DB((dbg, LEVEL_4, "1 loop exit\n"));
2054
2055 #if LOOP_IGNORE_NODE_LIMITS
2056         /* Ignore loop size. Probably not wise in other than testcases. */
2057         loop_info.max_unroll = 40;
2058 #else
2059         /* Calculate maximum unroll_nr keeping node count below limit. */
2060         loop_info.max_unroll = (int)((double)opt_params.max_unrolled_loop_size / (double)loop_info.nodes);
2061         if (loop_info.max_unroll < 2) {
2062                 count_stats(stats.too_large);
2063                 return NULL;
2064         }
2065 #endif
2066
2067
2068         DB((dbg, LEVEL_4, "maximum unroll factor %u, to not exceed node limit \n",
2069                 opt_params.max_unrolled_loop_size));
2070
2071         arity = get_irn_arity(loop_head);
2072         /* RETURN if we have more than 1 be. */
2073         /* Get my backedges without alien bes. */
2074         loop_block = NULL;
2075         for (i = 0; i < arity; ++i) {
2076                 ir_node *pred = get_irn_n(loop_head, i);
2077                 if (is_own_backedge(loop_head, i)) {
2078                         if (loop_block)
2079                                 /* Our simple loops may have only one backedge. */
2080                                 return NULL;
2081                         else {
2082                                 loop_block = get_nodes_block(pred);
2083                                 loop_info.be_src_pos = i;
2084                         }
2085                 }
2086         }
2087
2088         DB((dbg, LEVEL_4, "loop has 1 own backedge.\n"));
2089
2090         exit_block = get_nodes_block(loop_info.cf_out.pred);
2091         /* The loop has to be tail-controlled.
2092          * This can be changed/improved,
2093          * but we would need a duff iv. */
2094         if (exit_block != loop_block)
2095                 return NULL;
2096
2097         DB((dbg, LEVEL_4, "tail-controlled loop.\n"));
2098
2099         /* find value on which loop exit depends */
2100         projx = loop_info.cf_out.pred;
2101         cond = get_irn_n(projx, 0);
2102         cmp = get_irn_n(cond, 0);
2103
2104         if (!is_Cmp(cmp))
2105                 return NULL;
2106
2107         DB((dbg, LEVEL_5, "projection is %s\n", get_relation_string(get_Proj_proj(projx))));
2108
2109         switch(get_Proj_proj(projx)) {
2110                 case pn_Cond_false:
2111                         loop_info.exit_cond = 0;
2112                         break;
2113                 case pn_Cond_true:
2114                         loop_info.exit_cond = 1;
2115                         break;
2116                 default:
2117                         panic("Cond Proj_proj other than true/false");
2118         }
2119
2120         DB((dbg, LEVEL_4, "Valid Cmp.\n"));
2121         return cmp;
2122 }
2123
2124 /* Returns 1 if all nodes are mode_Iu or mode_Is. */
2125 static unsigned are_mode_I(ir_node *n1, ir_node* n2, ir_node *n3)
2126 {
2127         ir_mode *m1 = get_irn_mode(n1);
2128         ir_mode *m2 = get_irn_mode(n2);
2129         ir_mode *m3 = get_irn_mode(n3);
2130
2131         if ((m1 == mode_Iu && m2 == mode_Iu && m3 == mode_Iu) ||
2132             (m1 == mode_Is && m2 == mode_Is && m3 == mode_Is))
2133                 return 1;
2134         else
2135                 return 0;
2136 }
2137
2138 /* Checks if cur_loop is a simple tail-controlled counting loop
2139  * with start and end value loop invariant, step constant. */
2140 static unsigned get_unroll_decision_invariant(void)
2141 {
2142
2143         ir_node   *projres, *loop_condition, *iteration_path;
2144         unsigned   success, is_latest_val;
2145         ir_tarval *step_tar;
2146         ir_mode   *mode;
2147
2148
2149         /* RETURN if loop is not 'simple' */
2150         projres = is_simple_loop();
2151         if (projres == NULL)
2152                 return 0;
2153
2154         /* Use a minimal size for the invariant unrolled loop,
2155      * as duffs device produces overhead */
2156         if (loop_info.nodes < opt_params.invar_unrolling_min_size)
2157                 return 0;
2158
2159         loop_condition = get_irn_n(projres, 0);
2160
2161         success = get_invariant_pred(loop_condition, &loop_info.end_val, &iteration_path);
2162         DB((dbg, LEVEL_4, "pred invar %d\n", success));
2163
2164         if (! success)
2165                 return 0;
2166
2167         DB((dbg, LEVEL_4, "Invariant End_val %N, other %N\n", loop_info.end_val, iteration_path));
2168
2169         /* We may find the add or the phi first.
2170          * Until now we only have end_val. */
2171         if (is_Add(iteration_path) || is_Sub(iteration_path)) {
2172
2173                 /* We test against the latest value of the iv. */
2174                 is_latest_val = 1;
2175
2176                 loop_info.add = iteration_path;
2177                 DB((dbg, LEVEL_4, "Case 1: Got add %N (maybe not sane)\n", loop_info.add));
2178
2179                 /* Preds of the add should be step and the iteration_phi */
2180                 success = get_const_pred(loop_info.add, &loop_info.step, &loop_info.iteration_phi);
2181                 if (! success)
2182                         return 0;
2183
2184                 DB((dbg, LEVEL_4, "Got step %N\n", loop_info.step));
2185
2186                 if (! is_Phi(loop_info.iteration_phi))
2187                         return 0;
2188
2189                 DB((dbg, LEVEL_4, "Got phi %N\n", loop_info.iteration_phi));
2190
2191                 /* Find start_val.
2192                  * Does necessary sanity check of add, if it is already set.  */
2193                 success = get_start_and_add(loop_info.iteration_phi, invariant);
2194                 if (! success)
2195                         return 0;
2196
2197                 DB((dbg, LEVEL_4, "Got start A  %N\n", loop_info.start_val));
2198
2199         } else if (is_Phi(iteration_path)) {
2200                 ir_node *new_iteration_phi;
2201
2202                 /* We compare with the value the iv had entering this run. */
2203                 is_latest_val = 0;
2204
2205                 loop_info.iteration_phi = iteration_path;
2206                 DB((dbg, LEVEL_4, "Case 2: Got phi %N\n", loop_info.iteration_phi));
2207
2208                 /* Find start_val and add-node.
2209                  * Does necessary sanity check of add, if it is already set.  */
2210                 success = get_start_and_add(loop_info.iteration_phi, invariant);
2211                 if (! success)
2212                         return 0;
2213
2214                 DB((dbg, LEVEL_4, "Got start B %N\n", loop_info.start_val));
2215                 DB((dbg, LEVEL_4, "Got add or sub %N\n", loop_info.add));
2216
2217                 success = get_const_pred(loop_info.add, &loop_info.step, &new_iteration_phi);
2218                 if (! success)
2219                         return 0;
2220
2221                 DB((dbg, LEVEL_4, "Got step (B) %N\n", loop_info.step));
2222
2223                 if (loop_info.iteration_phi != new_iteration_phi)
2224                         return 0;
2225
2226         } else {
2227                 return 0;
2228         }
2229
2230         mode = get_irn_mode(loop_info.end_val);
2231
2232         DB((dbg, LEVEL_4, "start %N, end %N, step %N\n",
2233                                 loop_info.start_val, loop_info.end_val, loop_info.step));
2234
2235         if (mode != mode_Is && mode != mode_Iu)
2236                 return 0;
2237
2238         /* TODO necessary? */
2239         if (!are_mode_I(loop_info.start_val, loop_info.step, loop_info.end_val))
2240                 return 0;
2241
2242         DB((dbg, LEVEL_4, "mode integer\n"));
2243
2244         step_tar = get_Const_tarval(loop_info.step);
2245
2246         if (tarval_is_null(step_tar)) {
2247                 /* TODO Might be worth a warning. */
2248                 return 0;
2249         }
2250
2251         DB((dbg, LEVEL_4, "step is not 0\n"));
2252
2253         create_duffs_block();
2254
2255         return loop_info.max_unroll;
2256 }
2257
2258 /* Returns unroll factor,
2259  * given maximum unroll factor and number of loop passes. */
2260 static unsigned get_preferred_factor_constant(ir_tarval *count_tar)
2261 {
2262         ir_tarval *tar_6, *tar_5, *tar_4, *tar_3, *tar_2;
2263         unsigned prefer;
2264         ir_mode *mode = get_irn_mode(loop_info.end_val);
2265
2266         tar_6 = new_tarval_from_long(6, mode);
2267         tar_5 = new_tarval_from_long(5, mode);
2268         tar_4 = new_tarval_from_long(4, mode);
2269         tar_3 = new_tarval_from_long(3, mode);
2270         tar_2 = new_tarval_from_long(2, mode);
2271
2272         /* loop passes % {6, 5, 4, 3, 2} == 0  */
2273         if (tarval_is_null(tarval_mod(count_tar, tar_6)))
2274                 prefer = 6;
2275         else if (tarval_is_null(tarval_mod(count_tar, tar_5)))
2276                 prefer = 5;
2277         else if (tarval_is_null(tarval_mod(count_tar, tar_4)))
2278                 prefer = 4;
2279         else if (tarval_is_null(tarval_mod(count_tar, tar_3)))
2280                 prefer = 3;
2281         else if (tarval_is_null(tarval_mod(count_tar, tar_2)))
2282                 prefer = 2;
2283         else {
2284                 /* gcd(max_unroll, count_tar) */
2285                 int a = loop_info.max_unroll;
2286                 int b = (int)get_tarval_long(count_tar);
2287                 int c;
2288
2289                 DB((dbg, LEVEL_4, "gcd of max_unroll %d and count_tar %d: ", a, b));
2290
2291                 do {
2292                 c = a % b;
2293                 a = b; b = c;
2294                 } while( c != 0);
2295
2296                 DB((dbg, LEVEL_4, "%d\n", a));
2297                 return a;
2298         }
2299
2300         DB((dbg, LEVEL_4, "preferred unroll factor %d\n", prefer));
2301
2302         /*
2303          * If our preference is greater than the allowed unroll factor
2304          * we either might reduce the preferred factor and prevent a duffs device block,
2305          * or create a duffs device block, from which in this case (constants only)
2306          * we know the startloop at compiletime.
2307          * The latter yields the following graphs.
2308          * but for code generation we would want to use graph A.
2309          * The graphs are equivalent. So, we can only reduce the preferred factor.
2310          * A)                   B)
2311          *     PreHead             PreHead
2312          *        |      ,--.         |   ,--.
2313          *         \ Loop1   \        Loop2   \
2314          *          \  |     |       /  |     |
2315          *           Loop2   /      / Loop1   /
2316          *           |   `--'      |      `--'
2317          */
2318
2319         if (prefer <= loop_info.max_unroll)
2320                 return prefer;
2321         else {
2322                 switch(prefer) {
2323                         case 6:
2324                                 if (loop_info.max_unroll >= 3)
2325                                         return 3;
2326                                 else if (loop_info.max_unroll >= 2)
2327                                         return 2;
2328                                 else
2329                                         return 0;
2330
2331                         case 4:
2332                                 if (loop_info.max_unroll >= 2)
2333                                         return 2;
2334                                 else
2335                                         return 0;
2336
2337                         default:
2338                                 return 0;
2339                 }
2340         }
2341 }
2342
2343 /* Check if cur_loop is a simple counting loop.
2344  * Start, step and end are constants.
2345  * TODO The whole constant case should use procedures similar to
2346  * the invariant case, as they are more versatile. */
2347 /* TODO split. */
2348 static unsigned get_unroll_decision_constant(void)
2349 {
2350         ir_node     *cmp, *iteration_path;
2351         unsigned     success, is_latest_val;
2352         ir_tarval   *start_tar, *end_tar, *step_tar, *diff_tar, *count_tar;
2353         ir_tarval   *stepped;
2354         ir_relation  proj_proj, norm_proj;
2355         ir_mode     *mode;
2356
2357         /* RETURN if loop is not 'simple' */
2358         cmp = is_simple_loop();
2359         if (cmp == NULL)
2360                 return 0;
2361
2362         /* One in of the loop condition needs to be loop invariant. => end_val
2363          * The other in is assigned by an add. => add
2364          * The add uses a loop invariant value => step
2365          * and a phi with a loop invariant start_val and the add node as ins.
2366
2367            ^   ^
2368            |   | .-,
2369            |   Phi |
2370                 \  |   |
2371           ^  Add   |
2372            \  | \__|
2373             cond
2374              /\
2375         */
2376
2377         success = get_const_pred(cmp, &loop_info.end_val, &iteration_path);
2378         if (! success)
2379                 return 0;
2380
2381         DB((dbg, LEVEL_4, "End_val %N, other %N\n", loop_info.end_val, iteration_path));
2382
2383         /* We may find the add or the phi first.
2384          * Until now we only have end_val. */
2385         if (is_Add(iteration_path) || is_Sub(iteration_path)) {
2386
2387                 /* We test against the latest value of the iv. */
2388                 is_latest_val = 1;
2389
2390                 loop_info.add = iteration_path;
2391                 DB((dbg, LEVEL_4, "Case 2: Got add %N (maybe not sane)\n", loop_info.add));
2392
2393                 /* Preds of the add should be step and the iteration_phi */
2394                 success = get_const_pred(loop_info.add, &loop_info.step, &loop_info.iteration_phi);
2395                 if (! success)
2396                         return 0;
2397
2398                 DB((dbg, LEVEL_4, "Got step %N\n", loop_info.step));
2399
2400                 if (! is_Phi(loop_info.iteration_phi))
2401                         return 0;
2402
2403                 DB((dbg, LEVEL_4, "Got phi %N\n", loop_info.iteration_phi));
2404
2405                 /* Find start_val.
2406                  * Does necessary sanity check of add, if it is already set.  */
2407                 success = get_start_and_add(loop_info.iteration_phi, constant);
2408                 if (! success)
2409                         return 0;
2410
2411                 DB((dbg, LEVEL_4, "Got start %N\n", loop_info.start_val));
2412
2413         } else if (is_Phi(iteration_path)) {
2414                 ir_node *new_iteration_phi;
2415
2416                 /* We compare with the value the iv had entering this run. */
2417                 is_latest_val = 0;
2418
2419                 loop_info.iteration_phi = iteration_path;
2420                 DB((dbg, LEVEL_4, "Case 1: Got phi %N \n", loop_info.iteration_phi));
2421
2422                 /* Find start_val and add-node.
2423                  * Does necessary sanity check of add, if it is already set.  */
2424                 success = get_start_and_add(loop_info.iteration_phi, constant);
2425                 if (! success)
2426                         return 0;
2427
2428                 DB((dbg, LEVEL_4, "Got start %N\n", loop_info.start_val));
2429                 DB((dbg, LEVEL_4, "Got add or sub %N\n", loop_info.add));
2430
2431                 success = get_const_pred(loop_info.add, &loop_info.step, &new_iteration_phi);
2432                 if (! success)
2433                         return 0;
2434
2435                 DB((dbg, LEVEL_4, "Got step %N\n", loop_info.step));
2436
2437                 if (loop_info.iteration_phi != new_iteration_phi)
2438                         return 0;
2439
2440         } else {
2441                 /* RETURN */
2442                 return 0;
2443         }
2444
2445         mode = get_irn_mode(loop_info.end_val);
2446
2447         DB((dbg, LEVEL_4, "start %N, end %N, step %N\n",
2448                                 loop_info.start_val, loop_info.end_val, loop_info.step));
2449
2450         if (mode != mode_Is && mode != mode_Iu)
2451                 return 0;
2452
2453         /* TODO necessary? */
2454         if (!are_mode_I(loop_info.start_val, loop_info.step, loop_info.end_val))
2455                 return 0;
2456
2457         DB((dbg, LEVEL_4, "mode integer\n"));
2458
2459         end_tar = get_Const_tarval(loop_info.end_val);
2460         start_tar = get_Const_tarval(loop_info.start_val);
2461         step_tar = get_Const_tarval(loop_info.step);
2462
2463         if (tarval_is_null(step_tar))
2464                 /* TODO Might be worth a warning. */
2465                 return 0;
2466
2467         DB((dbg, LEVEL_4, "step is not 0\n"));
2468
2469         if ((!tarval_is_negative(step_tar)) ^ (!is_Sub(loop_info.add)))
2470                 loop_info.decreasing = 1;
2471
2472         diff_tar = tarval_sub(end_tar, start_tar, mode);
2473
2474         /* We need at least count_tar steps to be close to end_val, maybe more.
2475          * No way, that we have gone too many steps.
2476          * This represents the 'latest value'.
2477          * (If condition checks against latest value, is checked later) */
2478         count_tar = tarval_div(diff_tar, step_tar);
2479
2480         /* Iv will not pass end_val (except overflows).
2481          * Nothing done, as it would yield to no advantage. */
2482         if (tarval_is_negative(count_tar)) {
2483                 DB((dbg, LEVEL_4, "Loop is endless or never taken."));
2484                 /* TODO Might be worth a warning. */
2485                 return 0;
2486         }
2487
2488         count_stats(stats.u_simple_counting_loop);
2489
2490         loop_info.latest_value = is_latest_val;
2491
2492         /* TODO split here
2493         if (! is_simple_counting_loop(&count_tar))
2494                 return 0;
2495         */
2496
2497         /* stepped can be negative, if step < 0 */
2498         stepped = tarval_mul(count_tar, step_tar);
2499
2500         /* step as close to end_val as possible, */
2501         /* |stepped| <= |end_tar|, and dist(stepped, end_tar) is smaller than a step. */
2502         if (is_Sub(loop_info.add))
2503                 stepped = tarval_sub(start_tar, stepped, mode_Is);
2504         else
2505                 stepped = tarval_add(start_tar, stepped);
2506
2507         DB((dbg, LEVEL_4, "stepped to %ld\n", get_tarval_long(stepped)));
2508
2509         proj_proj = get_Cmp_relation(cmp);
2510         /* Assure that norm_proj is the stay-in-loop case. */
2511         if (loop_info.exit_cond == 1)
2512                 norm_proj = get_negated_relation(proj_proj);
2513         else
2514                 norm_proj = proj_proj;
2515
2516         DB((dbg, LEVEL_4, "normalized projection %s\n", get_relation_string(norm_proj)));
2517         /* Executed at most once (stay in counting loop if a Eq b) */
2518         if (norm_proj == ir_relation_equal)
2519                 /* TODO Might be worth a warning. */
2520                 return 0;
2521
2522         /* calculates next values and increases count_tar according to it */
2523         success = simulate_next(&count_tar, stepped, step_tar, end_tar, norm_proj);
2524         if (! success)
2525                 return 0;
2526
2527         /* We run loop once more, if we compare to the
2528          * not yet in-/decreased iv. */
2529         if (is_latest_val == 0) {
2530                 DB((dbg, LEVEL_4, "condition uses not latest iv value\n"));
2531                 count_tar = tarval_add(count_tar, get_tarval_one(mode));
2532         }
2533
2534         DB((dbg, LEVEL_4, "loop taken %ld times\n", get_tarval_long(count_tar)));
2535
2536         /* Assure the loop is taken at least 1 time. */
2537         if (tarval_is_null(count_tar)) {
2538                 /* TODO Might be worth a warning. */
2539                 return 0;
2540         }
2541
2542         loop_info.count_tar = count_tar;
2543         return get_preferred_factor_constant(count_tar);
2544 }
2545
2546 /**
2547  * Loop unrolling
2548  */
2549 static void unroll_loop(void)
2550 {
2551
2552         if (! (loop_info.nodes > 0))
2553                 return;
2554
2555 #if LOOP_IGNORE_NODE_LIMITS
2556         DB((dbg, LEVEL_1, "WARNING: Loop node limitations ignored."));
2557 #else
2558         if (loop_info.nodes > opt_params.max_unrolled_loop_size) {
2559                 DB((dbg, LEVEL_2, "Nodes %d > allowed nodes %d\n",
2560                         loop_info.nodes, opt_params.max_unrolled_loop_size));
2561                 count_stats(stats.too_large);
2562                 return;
2563         }
2564
2565         if (loop_info.calls > 0) {
2566                 DB((dbg, LEVEL_2, "Calls %d > allowed calls 0\n",
2567                         loop_info.calls));
2568                 count_stats(stats.calls_limit);
2569                 return;
2570         }
2571 #endif
2572
2573         unroll_nr = 0;
2574
2575         /* get_unroll_decision_constant and invariant are completely
2576          * independent for flexibility.
2577          * Some checks may be performed twice. */
2578
2579         /* constant case? */
2580         if (opt_params.allow_const_unrolling)
2581                 unroll_nr = get_unroll_decision_constant();
2582         if (unroll_nr > 1) {
2583                 loop_info.unroll_kind = constant;
2584
2585         } else {
2586                 /* invariant case? */
2587                 if (opt_params.allow_invar_unrolling)
2588                         unroll_nr = get_unroll_decision_invariant();
2589                 if (unroll_nr > 1)
2590                         loop_info.unroll_kind = invariant;
2591         }
2592
2593         DB((dbg, LEVEL_2, " *** Unrolling %d times ***\n", unroll_nr));
2594
2595         if (unroll_nr > 1) {
2596                 loop_entries = NEW_ARR_F(entry_edge, 0);
2597
2598                 /* Get loop outs */
2599                 irg_walk_graph(current_ir_graph, get_loop_entries, NULL, NULL);
2600
2601                 if (loop_info.unroll_kind == constant) {
2602                         if ((int)get_tarval_long(loop_info.count_tar) == unroll_nr)
2603                                 loop_info.needs_backedge = 0;
2604                         else
2605                                 loop_info.needs_backedge = 1;
2606                 } else {
2607                         loop_info.needs_backedge = 1;
2608                 }
2609
2610                 /* Use phase to keep copy of nodes from the condition chain. */
2611                 phase = new_phase(current_ir_graph, phase_irn_init_default);
2612
2613                 /* Copies the loop */
2614                 copy_loop(loop_entries, unroll_nr - 1);
2615
2616                 /* Line up the floating copies. */
2617                 place_copies(unroll_nr - 1);
2618
2619                 /* Remove phis with 1 in
2620                  * If there were no nested phis, this would not be necessary.
2621                  * Avoiding the creation in the first place
2622                  * leads to complex special cases. */
2623                 irg_walk_graph(current_ir_graph, correct_phis, NULL, NULL);
2624
2625                 if (loop_info.unroll_kind == constant)
2626                         count_stats(stats.constant_unroll);
2627                 else
2628                         count_stats(stats.invariant_unroll);
2629
2630                 set_irg_doms_inconsistent(current_ir_graph);
2631
2632                 DEL_ARR_F(loop_entries);
2633         }
2634
2635 }
2636
2637 /* Analyzes the loop, and checks if size is within allowed range.
2638  * Decides if loop will be processed. */
2639 static void init_analyze(ir_loop *loop)
2640 {
2641         cur_loop = loop;
2642
2643         loop_head = NULL;
2644         loop_head_valid = 1;
2645
2646         /* Reset loop info */
2647         memset(&loop_info, 0, sizeof(loop_info_t));
2648
2649         DB((dbg, LEVEL_1, "    >>>> current loop includes node %N <<<\n",
2650                 get_loop_node(loop, 0)));
2651
2652         /* Collect loop informations: head, node counts. */
2653         irg_walk_graph(current_ir_graph, get_loop_info, NULL, NULL);
2654
2655         /* RETURN if there is no valid head */
2656         if (!loop_head || !loop_head_valid) {
2657                 DB((dbg, LEVEL_1,   "No valid loop head. Nothing done.\n"));
2658                 return;
2659         } else {
2660                 DB((dbg, LEVEL_1,   "Loophead: %N\n", loop_head));
2661         }
2662
2663         if (loop_info.branches > opt_params.max_branches) {
2664                 DB((dbg, LEVEL_1, "Branches %d > allowed branches %d\n",
2665                         loop_info.branches, opt_params.max_branches));
2666                 count_stats(stats.calls_limit);
2667                 return;
2668         }
2669
2670         switch (loop_op) {
2671                 case loop_op_inversion:
2672                         loop_inversion();
2673                         break;
2674
2675                 case loop_op_unrolling:
2676                         unroll_loop();
2677                         break;
2678
2679                 default:
2680                         panic("Loop optimization not implemented.");
2681         }
2682         DB((dbg, LEVEL_1, "       <<<< end of loop with node %N >>>>\n",
2683                 get_loop_node(loop, 0)));
2684 }
2685
2686 /* Find innermost loops and add them to loops. */
2687 static void find_innermost_loop(ir_loop *loop)
2688 {
2689         /* descend into sons */
2690         size_t sons = get_loop_n_sons(loop);
2691
2692         if (sons == 0) {
2693                 ARR_APP1(ir_loop *, loops, loop);
2694         } else {
2695                 size_t s;
2696                 for (s = 0; s < sons; ++s) {
2697                         find_innermost_loop(get_loop_son(loop, s));
2698                 }
2699         }
2700 }
2701
2702 static void set_loop_params(void)
2703 {
2704     opt_params.max_loop_size = 100;
2705     opt_params.depth_adaption = -50;
2706     opt_params.count_phi = 1;
2707     opt_params.count_proj = 0;
2708     opt_params.allowed_calls = 0;
2709
2710     opt_params.max_cc_size = 5;
2711
2712
2713     opt_params.allow_const_unrolling = 1;
2714     opt_params.allow_invar_unrolling = 0;
2715
2716     opt_params.invar_unrolling_min_size = 20;
2717     opt_params.max_unrolled_loop_size = 400;
2718     opt_params.max_branches = 9999;
2719 }
2720
2721 /* Assure preconditions are met and go through all loops. */
2722 void loop_optimization(ir_graph *irg)
2723 {
2724         ir_loop *loop;
2725         size_t  sons, nr;
2726         size_t  i;
2727
2728         set_loop_params();
2729
2730         /* Reset stats for this procedure */
2731         reset_stats();
2732
2733         /* Preconditions */
2734         set_current_ir_graph(irg);
2735
2736         edges_assure(irg);
2737         assure_irg_outs(irg);
2738
2739         /* NOTE: sets only the loop attribute of blocks, not nodes */
2740         /* NOTE: Kills links */
2741         assure_cf_loop(irg);
2742
2743         ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK | IR_RESOURCE_PHI_LIST);
2744         collect_phiprojs(irg);
2745         ir_free_resources(irg, IR_RESOURCE_IRN_LINK);
2746
2747         loop = get_irg_loop(irg);
2748         sons = get_loop_n_sons(loop);
2749
2750         loops = NEW_ARR_F(ir_loop *, 0);
2751         /* List all inner loops */
2752         for (nr = 0; nr < sons; ++nr) {
2753                 find_innermost_loop(get_loop_son(loop, nr));
2754         }
2755
2756         ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
2757         /* Set all links to NULL */
2758         irg_walk_graph(current_ir_graph, reset_link, NULL, NULL);
2759
2760         for (i = 0; i < ARR_LEN(loops); ++i) {
2761                 ir_loop *loop = loops[i];
2762
2763                 count_stats(stats.loops);
2764
2765                 /* Analyze and handle loop */
2766                 init_analyze(loop);
2767
2768                 /* Copied blocks do not have their phi list yet */
2769                 collect_phiprojs(irg);
2770
2771                 /* Set links to NULL
2772                  * TODO Still necessary? */
2773                 irg_walk_graph(current_ir_graph, reset_link, NULL, NULL);
2774         }
2775
2776         print_stats();
2777
2778         DEL_ARR_F(loops);
2779         ir_free_resources(irg, IR_RESOURCE_IRN_LINK);
2780         ir_free_resources(irg, IR_RESOURCE_PHI_LIST);
2781 }
2782
2783 void do_loop_unrolling(ir_graph *irg)
2784 {
2785         loop_op = loop_op_unrolling;
2786
2787         DB((dbg, LEVEL_1, " >>> unrolling (Startnode %N) <<<\n",
2788                                 get_irg_start(irg)));
2789
2790         loop_optimization(irg);
2791
2792         DB((dbg, LEVEL_1, " >>> unrolling done (Startnode %N) <<<\n",
2793                                 get_irg_start(irg)));
2794 }
2795
2796 void do_loop_inversion(ir_graph *irg)
2797 {
2798         loop_op = loop_op_inversion;
2799
2800         DB((dbg, LEVEL_1, " >>> inversion (Startnode %N) <<<\n",
2801                                 get_irg_start(irg)));
2802
2803         loop_optimization(irg);
2804
2805         assure_cf_loop(irg);
2806
2807         DB((dbg, LEVEL_1, " >>> inversion done (Startnode %N) <<<\n",
2808                                 get_irg_start(irg)));
2809 }
2810
2811 void do_loop_peeling(ir_graph *irg)
2812 {
2813         loop_op = loop_op_peeling;
2814
2815         DB((dbg, LEVEL_1, " >>> peeling (Startnode %N) <<<\n",
2816                                 get_irg_start(irg)));
2817
2818         loop_optimization(irg);
2819
2820         DB((dbg, LEVEL_1, " >>> peeling done (Startnode %N) <<<\n",
2821                                 get_irg_start(irg)));
2822
2823 }
2824
2825 ir_graph_pass_t *loop_inversion_pass(const char *name)
2826 {
2827         return def_graph_pass(name ? name : "loop_inversion", do_loop_inversion);
2828 }
2829
2830 ir_graph_pass_t *loop_unroll_pass(const char *name)
2831 {
2832         return def_graph_pass(name ? name : "loop_unroll", do_loop_unrolling);
2833 }
2834
2835 ir_graph_pass_t *loop_peeling_pass(const char *name)
2836 {
2837         return def_graph_pass(name ? name : "loop_peeling", do_loop_peeling);
2838 }
2839
2840 void firm_init_loop_opt(void)
2841 {
2842         FIRM_DBG_REGISTER(dbg, "firm.opt.loop");
2843 }