plug memory leaks
[libfirm] / ir / opt / loop.c
1 /*
2  * Copyright (C) 1995-2011 University of Karlsruhe.  All right reserved.
3  *
4  * This file is part of libFirm.
5  *
6  * This file may be distributed and/or modified under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation and appearing in the file LICENSE.GPL included in the
9  * packaging of this file.
10  *
11  * Licensees holding valid libFirm Professional Edition licenses may use
12  * this file in accordance with the libFirm Commercial License.
13  * Agreement provided with the Software.
14  *
15  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17  * PURPOSE.
18  */
19
20 /**
21  * @file
22  * @author   Christian Helmer
23  * @brief    loop inversion and loop unrolling
24  *
25  */
26 #include "config.h"
27
28 #include <stdbool.h>
29
30 #include "iroptimize.h"
31 #include "opt_init.h"
32 #include "irnode.h"
33 #include "debug.h"
34 #include "error.h"
35
36 #include "ircons.h"
37 #include "irgopt.h"
38 #include "irgmod.h"
39 #include "irgwalk.h"
40 #include "irouts.h"
41 #include "iredges.h"
42 #include "irtools.h"
43 #include "array_t.h"
44 #include "beutil.h"
45 #include "irpass.h"
46 #include "irdom.h"
47 #include "opt_manage.h"
48
49 #include <math.h>
50 #include "irbackedge_t.h"
51 #include "irnodemap.h"
52 #include "irloop_t.h"
53
54 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
55
56 /**
57  * Convenience macro for iterating over every phi node of the given block.
58  * Requires phi list per block.
59  */
60 #define for_each_phi(block, phi) \
61         for ((phi) = get_Block_phis( (block) ); (phi) ; (phi) = get_Phi_next((phi)))
62
63 #define for_each_phi_safe(head, phi, next) \
64         for ((phi) = (head), (next) = (head) ? get_Phi_next((head)) : NULL; \
65                         (phi) ; (phi) = (next), (next) = (next) ? get_Phi_next((next)) : NULL)
66
67 /* Currently processed loop. */
68 static ir_loop *cur_loop;
69
70 /* Flag for kind of unrolling. */
71 typedef enum {
72         constant,
73         invariant
74 } unrolling_kind_flag;
75
76 /* Condition for performing visiting a node during copy_walk. */
77 typedef bool walker_condition(const ir_node *);
78
79 /* Node and position of a predecessor. */
80 typedef struct entry_edge {
81         ir_node *node;
82         int pos;
83         ir_node *pred;
84 } entry_edge;
85
86 /* Node info for unrolling. */
87 typedef struct unrolling_node_info {
88         ir_node **copies;
89 } unrolling_node_info;
90
91 /* Outs of the nodes head. */
92 static entry_edge *cur_head_outs;
93
94 /* Information about the loop head */
95 static ir_node *loop_head       = NULL;
96 static bool     loop_head_valid = true;
97
98 /* List of all inner loops, that are processed. */
99 static ir_loop **loops;
100
101 /* Stats */
102 typedef struct loop_stats_t {
103         unsigned loops;
104         unsigned inverted;
105         unsigned too_large;
106         unsigned too_large_adapted;
107         unsigned cc_limit_reached;
108         unsigned calls_limit;
109
110         unsigned u_simple_counting_loop;
111         unsigned constant_unroll;
112         unsigned invariant_unroll;
113
114         unsigned unhandled;
115 } loop_stats_t;
116
117 static loop_stats_t stats;
118
119 /* Set stats to sero */
120 static void reset_stats(void)
121 {
122         memset(&stats, 0, sizeof(loop_stats_t));
123 }
124
125 /* Print stats */
126 static void print_stats(void)
127 {
128         DB((dbg, LEVEL_2, "---------------------------------------\n"));
129         DB((dbg, LEVEL_2, "loops             :   %d\n",stats.loops));
130         DB((dbg, LEVEL_2, "inverted          :   %d\n",stats.inverted));
131         DB((dbg, LEVEL_2, "too_large         :   %d\n",stats.too_large));
132         DB((dbg, LEVEL_2, "too_large_adapted :   %d\n",stats.too_large_adapted));
133         DB((dbg, LEVEL_2, "cc_limit_reached  :   %d\n",stats.cc_limit_reached));
134         DB((dbg, LEVEL_2, "calls_limit       :   %d\n",stats.calls_limit));
135         DB((dbg, LEVEL_2, "u_simple_counting :   %d\n",stats.u_simple_counting_loop));
136         DB((dbg, LEVEL_2, "constant_unroll   :   %d\n",stats.constant_unroll));
137         DB((dbg, LEVEL_2, "invariant_unroll  :   %d\n",stats.invariant_unroll));
138         DB((dbg, LEVEL_2, "=======================================\n"));
139 }
140
141 /* Commandline parameters */
142 typedef struct loop_opt_params_t {
143 unsigned max_loop_size;     /* Maximum number of nodes  [nodes]*/
144 int      depth_adaption;    /* Loop nest depth adaption [percent] */
145 unsigned allowed_calls;     /* Number of calls allowed [number] */
146 bool     count_phi;         /* Count phi nodes */
147 bool     count_proj;        /* Count projections */
148
149 unsigned max_cc_size;       /* Maximum condition chain size [nodes] */
150 unsigned max_branches;
151
152 unsigned max_unrolled_loop_size;    /* [nodes] */
153 bool     allow_const_unrolling;
154 bool     allow_invar_unrolling;
155 unsigned invar_unrolling_min_size;  /* [nodes] */
156
157 } loop_opt_params_t;
158
159 static loop_opt_params_t opt_params;
160
161 /* Loop analysis informations */
162 typedef struct loop_info_t {
163         unsigned nodes;        /* node count */
164         unsigned ld_st;        /* load and store nodes */
165         unsigned branches;     /* number of conditions */
166         unsigned calls;        /* number of calls */
167         unsigned cf_outs;      /* number of cf edges which leave the loop */
168         entry_edge cf_out;     /* single loop leaving cf edge */
169         int be_src_pos;        /* position of the single own backedge in the head */
170
171         /* for inversion */
172         unsigned cc_size;      /* nodes in the condition chain */
173
174         /* for unrolling */
175         unsigned max_unroll;   /* Number of unrolls satisfying max_loop_size */
176         unsigned exit_cond;    /* 1 if condition==true exits the loop.  */
177         unsigned latest_value:1;    /* 1 if condition is checked against latest counter value */
178         unsigned needs_backedge:1;  /* 0 if loop is completely unrolled */
179         unsigned decreasing:1;      /* Step operation is_Sub, or step is<0 */
180
181         /* IV informations of a simple loop */
182         ir_node *start_val;
183         ir_node *step;
184         ir_node *end_val;
185         ir_node *iteration_phi;
186         ir_node *add;
187
188         ir_tarval *count_tar;               /* Number of loop iterations */
189
190         ir_node *duff_cond;                 /* Duff mod */
191         unrolling_kind_flag unroll_kind;    /* constant or invariant unrolling */
192 } loop_info_t;
193
194 /* Information about the current loop */
195 static loop_info_t loop_info;
196
197 /* Outs of the condition chain (loop inversion). */
198 static ir_node **cc_blocks;
199 /* df/cf edges with def in the condition chain */
200 static entry_edge *cond_chain_entries;
201 /* Array of df loops found in the condition chain. */
202 static entry_edge *head_df_loop;
203 /* Number of blocks in cc */
204 static unsigned inversion_blocks_in_cc;
205
206
207 /* Cf/df edges leaving the loop.
208  * Called entries here, as they are used to enter the loop with walkers. */
209 static entry_edge *loop_entries;
210 /* Number of unrolls to perform */
211 static int unroll_nr;
212 /* Phase is used to keep copies of nodes. */
213 static ir_nodemap     map;
214 static struct obstack obst;
215
216 /* Loop operations.  */
217 typedef enum loop_op_t {
218         loop_op_inversion,
219         loop_op_unrolling,
220         loop_op_peeling
221 } loop_op_t;
222
223 /* Saves which loop operation to do until after basic tests. */
224 static loop_op_t loop_op;
225
226 /* Returns the maximum nodes for the given nest depth */
227 static unsigned get_max_nodes_adapted(unsigned depth)
228 {
229         double perc = 100.0 + (double)opt_params.depth_adaption;
230         double factor = pow(perc / 100.0, depth);
231
232         return (int)((double)opt_params.max_loop_size * factor);
233 }
234
235 /* Reset nodes link. For use with a walker. */
236 static void reset_link(ir_node *node, void *env)
237 {
238         (void)env;
239         set_irn_link(node, NULL);
240 }
241
242 /* Returns 0 if the node or block is not in cur_loop. */
243 static bool is_in_loop(const ir_node *node)
244 {
245         return get_irn_loop(get_block_const(node)) == cur_loop;
246 }
247
248 /* Returns 0 if the given edge is not a backedge
249  * with its pred in the cur_loop. */
250 static bool is_own_backedge(const ir_node *n, int pos)
251 {
252         return is_backedge(n, pos) && is_in_loop(get_irn_n(n, pos));
253 }
254
255 /* Finds loop head and some loop_info as calls or else if necessary. */
256 static void get_loop_info(ir_node *node, void *env)
257 {
258         bool node_in_loop = is_in_loop(node);
259         int i, arity;
260         (void)env;
261
262         /* collect some loop information */
263         if (node_in_loop) {
264                 if (is_Phi(node) && opt_params.count_phi)
265                         ++loop_info.nodes;
266                 else if (is_Proj(node) && opt_params.count_proj)
267                         ++loop_info.nodes;
268                 else if (!is_Confirm(node) && !is_Const(node) && !is_SymConst(node))
269                         ++loop_info.nodes;
270
271                 if (is_Load(node) || is_Store(node))
272                         ++loop_info.ld_st;
273
274                 if (is_Call(node))
275                         ++loop_info.calls;
276         }
277
278         arity = get_irn_arity(node);
279         for (i = 0; i < arity; i++) {
280                 ir_node *pred         = get_irn_n(node, i);
281                 bool     pred_in_loop = is_in_loop(pred);
282
283                 if (is_Block(node) && !node_in_loop && pred_in_loop) {
284                         entry_edge entry;
285                         entry.node = node;
286                         entry.pos = i;
287                         entry.pred = pred;
288                         /* Count cf outs */
289                         ++loop_info.cf_outs;
290                         loop_info.cf_out = entry;
291                 }
292
293                 /* Find the loops head/the blocks with cfpred outside of the loop */
294                 if (is_Block(node)) {
295                         const ir_edge_t *edge;
296                         unsigned outs_n = 0;
297
298                         /* Count innerloop branches */
299                         foreach_out_edge_kind(node, edge, EDGE_KIND_BLOCK) {
300                                 ir_node *succ = get_edge_src_irn(edge);
301                                 if (is_Block(succ) && is_in_loop(succ))
302                                         ++outs_n;
303                         }
304                         if (outs_n > 1)
305                                 ++loop_info.branches;
306
307                         if (node_in_loop && !pred_in_loop && loop_head_valid) {
308                                 ir_node *cfgpred = get_Block_cfgpred(node, i);
309
310                                 if (!is_in_loop(cfgpred)) {
311                                         DB((dbg, LEVEL_5, "potential head %+F because inloop and pred %+F not inloop\n",
312                                                                 node, pred));
313                                         /* another head? We do not touch this. */
314                                         if (loop_head && loop_head != node) {
315                                                 loop_head_valid = false;
316                                         } else {
317                                                 loop_head = node;
318                                         }
319                                 }
320                         }
321                 }
322         }
323 }
324
325 /* Finds all edges with users outside of the loop
326  * and definition inside the loop. */
327 static void get_loop_entries(ir_node *node, void *env)
328 {
329         unsigned node_in_loop, pred_in_loop;
330         int i, arity;
331         (void) env;
332
333         arity = get_irn_arity(node);
334         for (i = 0; i < arity; ++i) {
335                 ir_node *pred = get_irn_n(node, i);
336
337                 pred_in_loop = is_in_loop(pred);
338                 node_in_loop = is_in_loop(node);
339
340                 if (pred_in_loop && !node_in_loop) {
341                         entry_edge entry;
342                         entry.node = node;
343                         entry.pos = i;
344                         entry.pred = pred;
345                         ARR_APP1(entry_edge, loop_entries, entry);
346                 }
347         }
348 }
349
350 /* ssa */
351 static ir_node *ssa_second_def;
352 static ir_node *ssa_second_def_block;
353
354 /**
355  * Walks the graph bottom up, searching for definitions and creates phis.
356  */
357 static ir_node *search_def_and_create_phis(ir_node *block, ir_mode *mode, int first)
358 {
359         int i;
360         int n_cfgpreds;
361         ir_graph *irg = get_irn_irg(block);
362         ir_node *phi;
363         ir_node **in;
364
365         DB((dbg, LEVEL_5, "ssa search_def_and_create_phis: block %N\n", block));
366
367         /* Prevents creation of phi that would be bad anyway.
368          * Dead and bad blocks. */
369         if (get_irn_arity(block) < 1 || is_Bad(block)) {
370                 DB((dbg, LEVEL_5, "ssa bad %N\n", block));
371                 return new_r_Bad(irg, mode);
372         }
373
374         if (block == ssa_second_def_block && !first) {
375                 DB((dbg, LEVEL_5, "ssa found second definition: use second def %N\n", ssa_second_def));
376                 return ssa_second_def;
377         }
378
379         /* already processed this block? */
380         if (irn_visited(block)) {
381                 ir_node *value = (ir_node *) get_irn_link(block);
382                 DB((dbg, LEVEL_5, "ssa already visited: use linked %N\n", value));
383                 return value;
384         }
385
386         assert(block != get_irg_start_block(irg));
387
388         /* a Block with only 1 predecessor needs no Phi */
389         n_cfgpreds = get_Block_n_cfgpreds(block);
390         if (n_cfgpreds == 1) {
391                 ir_node *pred_block = get_Block_cfgpred_block(block, 0);
392                 ir_node *value;
393
394                 DB((dbg, LEVEL_5, "ssa 1 pred: walk pred %N\n", pred_block));
395
396                 value = search_def_and_create_phis(pred_block, mode, 0);
397                 set_irn_link(block, value);
398                 mark_irn_visited(block);
399
400                 return value;
401         }
402
403         /* create a new Phi */
404         NEW_ARR_A(ir_node*, in, n_cfgpreds);
405         for (i = 0; i < n_cfgpreds; ++i)
406                 in[i] = new_r_Dummy(irg, mode);
407
408         phi = new_r_Phi(block, n_cfgpreds, in, mode);
409         /* Important: always keep block phi list up to date. */
410         add_Block_phi(block, phi);
411         DB((dbg, LEVEL_5, "ssa phi creation: link new phi %N to block %N\n", phi, block));
412         set_irn_link(block, phi);
413         mark_irn_visited(block);
414
415         /* set Phi predecessors */
416         for (i = 0; i < n_cfgpreds; ++i) {
417                 ir_node *pred_val;
418                 ir_node *pred_block = get_Block_cfgpred_block(block, i);
419                 assert(pred_block != NULL);
420                 pred_val = search_def_and_create_phis(pred_block, mode, 0);
421
422                 assert(pred_val != NULL);
423
424                 DB((dbg, LEVEL_5, "ssa phi pred:phi %N, pred %N\n", phi, pred_val));
425                 set_irn_n(phi, i, pred_val);
426         }
427
428         return phi;
429 }
430
431
432 /**
433  * Given a set of values this function constructs SSA-form for the users of the
434  * first value (the users are determined through the out-edges of the value).
435  * Works without using the dominance tree.
436  */
437 static void construct_ssa(ir_node *orig_block, ir_node *orig_val,
438                 ir_node *second_block, ir_node *second_val)
439 {
440         ir_graph *irg;
441         ir_mode *mode;
442         const ir_edge_t *edge;
443         const ir_edge_t *next;
444
445         assert(orig_block && orig_val && second_block && second_val &&
446                         "no parameter of construct_ssa may be NULL");
447
448         if (orig_val == second_val)
449                 return;
450
451         irg = get_irn_irg(orig_val);
452
453         ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED);
454         inc_irg_visited(irg);
455
456         mode = get_irn_mode(orig_val);
457         set_irn_link(orig_block, orig_val);
458         mark_irn_visited(orig_block);
459
460         ssa_second_def_block = second_block;
461         ssa_second_def       = second_val;
462
463         /* Only fix the users of the first, i.e. the original node */
464         foreach_out_edge_safe(orig_val, edge, next) {
465                 ir_node *user = get_edge_src_irn(edge);
466                 int j = get_edge_src_pos(edge);
467                 ir_node *user_block = get_nodes_block(user);
468                 ir_node *newval;
469
470                 /* ignore keeps */
471                 if (is_End(user))
472                         continue;
473
474                 DB((dbg, LEVEL_5, "original user %N\n", user));
475
476                 if (is_Phi(user)) {
477                         ir_node *pred_block = get_Block_cfgpred_block(user_block, j);
478                         newval = search_def_and_create_phis(pred_block, mode, 1);
479                 } else {
480                         newval = search_def_and_create_phis(user_block, mode, 1);
481                 }
482                 if (newval != user && !is_Bad(newval))
483                         set_irn_n(user, j, newval);
484         }
485
486         ir_free_resources(irg, IR_RESOURCE_IRN_VISITED);
487 }
488
489
490 /***** Unrolling Helper Functions *****/
491
492 /* Assign the copy with index nr to node n */
493 static void set_unroll_copy(ir_node *n, int nr, ir_node *cp)
494 {
495         unrolling_node_info *info;
496         assert(nr != 0 && "0 reserved");
497
498         info = (unrolling_node_info*)ir_nodemap_get(&map, n);
499         if (! info) {
500                 ir_node **arr = NEW_ARR_D(ir_node*, &obst, unroll_nr);
501                 memset(arr, 0, unroll_nr * sizeof(ir_node*));
502
503                 info = OALLOCZ(&obst, unrolling_node_info);
504                 info->copies = arr;
505                 ir_nodemap_insert(&map, n, info);
506         }
507         /* Original node */
508         info->copies[0] = n;
509
510         info->copies[nr] = cp;
511 }
512
513 /* Returns a nodes copy if it exists, else NULL. */
514 static ir_node *get_unroll_copy(ir_node *n, int nr)
515 {
516         ir_node             *cp;
517         unrolling_node_info *info = (unrolling_node_info *)ir_nodemap_get(&map, n);
518         if (! info)
519                 return NULL;
520
521         cp = info->copies[nr];
522         return cp;
523 }
524
525
526 /***** Inversion Helper Functions *****/
527
528 /* Sets copy cp of node n. */
529 static void set_inversion_copy(ir_node *n, ir_node *cp)
530 {
531         ir_nodemap_insert(&map, n, cp);
532 }
533
534 /* Getter of copy of n for inversion */
535 static ir_node *get_inversion_copy(ir_node *n)
536 {
537         ir_node *cp = (ir_node *)ir_nodemap_get(&map, n);
538         return cp;
539 }
540
541 /* Resets block mark for given node. For use with walker */
542 static void reset_block_mark(ir_node *node, void * env)
543 {
544         (void) env;
545
546         if (is_Block(node))
547                 set_Block_mark(node, 0);
548 }
549
550 /* Returns mark of node, or its block if node is not a block.
551  * Used in this context to determine if node is in the condition chain. */
552 static bool is_nodes_block_marked(const ir_node* node)
553 {
554         return get_Block_mark(get_block_const(node));
555 }
556
557 /* Extends a nodes ins by node new.
558  * NOTE: This is slow if a node n needs to be extended more than once. */
559 static void extend_irn(ir_node *n, ir_node *newnode, bool new_is_backedge)
560 {
561         int i;
562         int arity = get_irn_arity(n);
563         int new_arity = arity + 1;
564         ir_node **ins = XMALLOCN(ir_node*, new_arity);
565         bool     *bes = XMALLOCN(bool, new_arity);
566
567         /* save bes */
568         /* Bes are important!
569          * Another way would be recreating the looptree,
570          * but after that we cannot distinguish already processed loops
571          * from not yet processed ones. */
572         if (is_Block(n)) {
573                 for(i = 0; i < arity; ++i) {
574                         bes[i] = is_backedge(n, i);
575                 }
576                 bes[i] = new_is_backedge;
577         }
578
579         for(i = 0; i < arity; ++i) {
580                 ins[i] = get_irn_n(n, i);
581         }
582         ins[i] = newnode;
583
584         set_irn_in(n, new_arity, ins);
585
586         /* restore bes  */
587         if (is_Block(n)) {
588                 for(i = 0; i < new_arity; ++i) {
589                         if (bes[i])
590                                 set_backedge(n, i);
591                 }
592         }
593         free(ins);
594         free(bes);
595 }
596
597 /* Extends a block by a copy of its pred at pos,
598  * fixing also the phis in the same way. */
599 static void extend_ins_by_copy(ir_node *block, int pos)
600 {
601         ir_node *new_in;
602         ir_node *phi;
603         ir_node *pred;
604         assert(is_Block(block));
605
606         /* Extend block by copy of definition at pos */
607         pred = get_irn_n(block, pos);
608         new_in = get_inversion_copy(pred);
609         DB((dbg, LEVEL_5, "Extend block %N by %N cp of %N\n", block, new_in, pred));
610         extend_irn(block, new_in, false);
611
612         /* Extend block phis by copy of definition at pos */
613         for_each_phi(block, phi) {
614                 ir_node *pred, *cp;
615
616                 pred = get_irn_n(phi, pos);
617                 cp = get_inversion_copy(pred);
618                 /* If the phis in is not in the condition chain (eg. a constant),
619                  * there is no copy. */
620                 if (cp == NULL)
621                         new_in = pred;
622                 else
623                         new_in = cp;
624
625                 DB((dbg, LEVEL_5, "Extend phi %N by %N cp of %N\n", phi, new_in, pred));
626                 extend_irn(phi, new_in, false);
627         }
628 }
629
630 /* Returns the number of blocks backedges. With or without alien bes. */
631 static int get_backedge_n(ir_node *block, bool with_alien)
632 {
633         int i;
634         int be_n = 0;
635         int arity = get_irn_arity(block);
636
637         assert(is_Block(block));
638
639         for (i = 0; i < arity; ++i) {
640                 ir_node *pred = get_irn_n(block, i);
641                 if (is_backedge(block, i) && (with_alien || is_in_loop(pred)))
642                         ++be_n;
643         }
644         return be_n;
645 }
646
647 /* Returns a raw copy of the given node.
648  * Attributes are kept/set according to the needs of loop inversion. */
649 static ir_node *copy_node(ir_node *node)
650 {
651         int i, arity;
652         ir_node *cp;
653
654         cp = exact_copy(node);
655         arity = get_irn_arity(node);
656
657         /* Keep backedge info */
658         for (i = 0; i < arity; ++i) {
659                 if (is_backedge(node, i))
660                         set_backedge(cp, i);
661         }
662
663         if (is_Block(cp)) {
664                 set_Block_mark(cp, 0);
665         }
666
667         return cp;
668 }
669
670
671 /**
672  * This walker copies all walked nodes.
673  * If the walk_condition is true for a node, it is copied.
674  * All nodes node_info->copy have to be NULL prior to every walk.
675  * Order of ins is important for later usage.
676  */
677 static void copy_walk(ir_node *node, walker_condition *walk_condition,
678                       ir_loop *set_loop)
679 {
680         int i;
681         int arity;
682         ir_node *cp;
683         ir_node **cpin;
684         ir_graph *irg = current_ir_graph;
685
686         /**
687          * break condition and cycle resolver, creating temporary node copies
688          */
689         if (get_irn_visited(node) >= get_irg_visited(irg)) {
690                 /* Here we rely on nodestate's copy being initialized with NULL */
691                 DB((dbg, LEVEL_5, "copy_walk: We have already visited %N\n", node));
692                 if (get_inversion_copy(node) == NULL) {
693                         cp = copy_node(node);
694                         set_inversion_copy(node, cp);
695
696                         DB((dbg, LEVEL_5, "The TEMP copy of %N is created %N\n", node, cp));
697                 }
698                 return;
699         }
700
701         /* Walk */
702         mark_irn_visited(node);
703
704         if (!is_Block(node)) {
705                 ir_node *pred = get_nodes_block(node);
706                 if (walk_condition(pred))
707                         DB((dbg, LEVEL_5, "walk block %N\n", pred));
708                 copy_walk(pred, walk_condition, set_loop);
709         }
710
711         arity = get_irn_arity(node);
712
713         NEW_ARR_A(ir_node *, cpin, arity);
714
715         for (i = 0; i < arity; ++i) {
716                 ir_node *pred = get_irn_n(node, i);
717
718                 if (walk_condition(pred)) {
719                         DB((dbg, LEVEL_5, "walk node %N\n", pred));
720                         copy_walk(pred, walk_condition, set_loop);
721                         cpin[i] = get_inversion_copy(pred);
722                         DB((dbg, LEVEL_5, "copy of %N gets new in %N which is copy of %N\n",
723                                                 node, get_inversion_copy(pred), pred));
724                 } else {
725                         cpin[i] = pred;
726                 }
727         }
728
729         /* copy node / finalize temp node */
730         if (get_inversion_copy(node) == NULL) {
731                 /* No temporary copy existent */
732                 cp = copy_node(node);
733                 set_inversion_copy(node, cp);
734                 DB((dbg, LEVEL_5, "The FINAL copy of %N is CREATED %N\n", node, cp));
735         } else {
736                 /* temporary copy is existent but without correct ins */
737                 cp = get_inversion_copy(node);
738                 DB((dbg, LEVEL_5, "The FINAL copy of %N is EXISTENT %N\n", node, cp));
739         }
740
741         if (!is_Block(node)) {
742                 ir_node *cpblock = get_inversion_copy(get_nodes_block(node));
743
744                 set_nodes_block(cp, cpblock );
745                 if (is_Phi(cp))
746                         add_Block_phi(cpblock, cp);
747         }
748
749         /* Keeps phi list of temporary node. */
750         set_irn_in(cp, ARR_LEN(cpin), cpin);
751 }
752
753 /**
754  * This walker copies all walked nodes.
755  * If the walk_condition is true for a node, it is copied.
756  * All nodes node_info->copy have to be NULL prior to every walk.
757  * Order of ins is important for later usage.
758  * Takes copy_index, to phase-link copy at specific index.
759  */
760 static void copy_walk_n(ir_node *node, walker_condition *walk_condition,
761                         int copy_index)
762 {
763         int i;
764         int arity;
765         ir_node *cp;
766         ir_node **cpin;
767
768         /**
769          * break condition and cycle resolver, creating temporary node copies
770          */
771         if (irn_visited(node)) {
772                 /* Here we rely on nodestate's copy being initialized with NULL */
773                 DB((dbg, LEVEL_5, "copy_walk: We have already visited %N\n", node));
774                 if (get_unroll_copy(node, copy_index) == NULL) {
775                         ir_node *u;
776                         u = copy_node(node);
777                         set_unroll_copy(node, copy_index, u);
778                         DB((dbg, LEVEL_5, "The TEMP unknown of %N is created %N\n", node, u));
779                 }
780                 return;
781         }
782
783         /* Walk */
784         mark_irn_visited(node);
785
786         if (!is_Block(node)) {
787                 ir_node *block = get_nodes_block(node);
788                 if (walk_condition(block))
789                         DB((dbg, LEVEL_5, "walk block %N\n", block));
790                 copy_walk_n(block, walk_condition, copy_index);
791         }
792
793         arity = get_irn_arity(node);
794         NEW_ARR_A(ir_node *, cpin, arity);
795
796         for (i = 0; i < arity; ++i) {
797                 ir_node *pred = get_irn_n(node, i);
798
799                 if (walk_condition(pred)) {
800                         DB((dbg, LEVEL_5, "walk node %N\n", pred));
801                         copy_walk_n(pred, walk_condition, copy_index);
802                         cpin[i] = get_unroll_copy(pred, copy_index);
803                 } else {
804                         cpin[i] = pred;
805                 }
806         }
807
808         /* copy node / finalize temp node */
809         cp = get_unroll_copy(node, copy_index);
810         if (cp == NULL || is_Unknown(cp)) {
811                 cp = copy_node(node);
812                 set_unroll_copy(node, copy_index, cp);
813                 DB((dbg, LEVEL_5, "The FINAL copy of %N is CREATED %N\n", node, cp));
814         } else {
815                 /* temporary copy is existent but without correct ins */
816                 cp = get_unroll_copy(node, copy_index);
817                 DB((dbg, LEVEL_5, "The FINAL copy of %N is EXISTENT %N\n", node, cp));
818         }
819
820         if (!is_Block(node)) {
821                 ir_node *cpblock = get_unroll_copy(get_nodes_block(node), copy_index);
822
823                 set_nodes_block(cp, cpblock );
824                 if (is_Phi(cp))
825                         add_Block_phi(cpblock, cp);
826         }
827
828         /* Keeps phi list of temporary node. */
829         set_irn_in(cp, ARR_LEN(cpin), cpin);
830 }
831
832 /* Removes alle Blocks with non marked predecessors from the condition chain. */
833 static void unmark_not_allowed_cc_blocks(void)
834 {
835         size_t blocks = ARR_LEN(cc_blocks);
836         size_t i;
837
838         for(i = 0; i < blocks; ++i) {
839                 ir_node *block = cc_blocks[i];
840                 int a;
841                 int arity = get_irn_arity(block);
842
843                 /* Head is an exception. */
844                 if (block == loop_head)
845                         continue;
846
847                 for(a = 0; a < arity; ++a) {
848                         if (! is_nodes_block_marked(get_irn_n(block, a))) {
849                                 set_Block_mark(block, 0);
850                                 --inversion_blocks_in_cc;
851                                 DB((dbg, LEVEL_5, "Removed %N from cc (blocks in cc %d)\n",
852                                                 block, inversion_blocks_in_cc));
853
854                                 break;
855                         }
856                 }
857         }
858 }
859
860 /* Unmarks all cc blocks using cc_blocks except head.
861  * TODO: invert head for unrolling? */
862 static void unmark_cc_blocks(void)
863 {
864         size_t blocks = ARR_LEN(cc_blocks);
865         size_t i;
866
867         for(i = 0; i < blocks; ++i) {
868                 ir_node *block = cc_blocks[i];
869
870                 /* TODO Head is an exception. */
871                 /*if (block != loop_head)*/
872                 set_Block_mark(block, 0);
873         }
874         /*inversion_blocks_in_cc = 1;*/
875         inversion_blocks_in_cc = 0;
876
877         /* invalidate */
878         loop_info.cc_size = 0;
879 }
880
881 /**
882  * Populates head_entries with (node, pred_pos) tuple
883  * whereas the node's pred at pred_pos is in the cc but not the node itself.
884  * Also finds df loops inside the cc.
885  * Head and condition chain blocks have been marked previously.
886  */
887 static void get_head_outs(ir_node *node, void *env)
888 {
889         int i;
890         int arity = get_irn_arity(node);
891         (void) env;
892
893         for (i = 0; i < arity; ++i) {
894                 if (!is_nodes_block_marked(node) && is_nodes_block_marked(get_irn_n(node, i))) {
895                         entry_edge entry;
896                         entry.node = node;
897                         entry.pos = i;
898                         /* Saving also predecessor seems redundant, but becomes
899                          * necessary when changing position of it, before
900                          * dereferencing it.*/
901                         entry.pred = get_irn_n(node, i);
902                         ARR_APP1(entry_edge, cur_head_outs, entry);
903                 }
904         }
905
906         arity = get_irn_arity(loop_head);
907
908         /* Find df loops inside the cc */
909         if (is_Phi(node) && get_nodes_block(node) == loop_head) {
910                 for (i = 0; i < arity; ++i) {
911                         if (is_own_backedge(loop_head, i)) {
912                                 if (is_nodes_block_marked(get_irn_n(node, i))) {
913                                         entry_edge entry;
914                                         entry.node = node;
915                                         entry.pos = i;
916                                         entry.pred = get_irn_n(node, i);
917                                         ARR_APP1(entry_edge, head_df_loop, entry);
918                                         DB((dbg, LEVEL_5, "Found incc assignment node %N @%d is pred %N, graph %N %N\n",
919                                                         node, i, entry.pred, current_ir_graph, get_irg_start_block(current_ir_graph)));
920                                 }
921                         }
922                 }
923         }
924 }
925
926 /**
927  * Find condition chains, and add them to be inverted.
928  * A block belongs to the chain if a condition branches out of the loop.
929  * (Some blocks need to be removed once again.)
930  * Returns 1 if the given block belongs to the condition chain.
931  */
932 static void find_condition_chain(ir_node *block)
933 {
934         const    ir_edge_t *edge;
935         bool     mark     = false;
936         bool     has_be   = false;
937         bool     jmp_only = true;
938         unsigned nodes_n  = 0;
939
940         mark_irn_visited(block);
941
942         DB((dbg, LEVEL_5, "condition_chains for block %N\n", block));
943
944         /* Get node count */
945         foreach_out_edge_kind(block, edge, EDGE_KIND_NORMAL) {
946                 ++nodes_n;
947         }
948
949         /* Check if node count would exceed maximum cc size.
950          * TODO
951          * This is not optimal, as we search depth-first and break here,
952          * continuing with another subtree. */
953         if (loop_info.cc_size + nodes_n > opt_params.max_cc_size) {
954                 set_Block_mark(block, 0);
955                 return;
956         }
957
958         /* Check if block only has a jmp instruction. */
959         foreach_out_edge(block, edge) {
960                 ir_node *src = get_edge_src_irn(edge);
961
962                 if (!is_Block(src) && !is_Jmp(src)) {
963                         jmp_only = false;
964                 }
965         }
966
967         /* Check cf outs if one is leaving the loop,
968          * or if this node has a backedge. */
969         foreach_block_succ(block, edge) {
970                 ir_node *src = get_edge_src_irn(edge);
971                 int pos = get_edge_src_pos(edge);
972
973                 if (!is_in_loop(src))
974                         mark = true;
975
976                 /* Inverting blocks with backedge outs leads to a cf edge
977                  * from the inverted head, into the inverted head (skipping the body).
978                  * As the body becomes the new loop head,
979                  * this would introduce another loop in the existing loop.
980                  * This loop inversion cannot cope with this case. */
981                 if (is_backedge(src, pos)) {
982                         has_be = true;
983                         break;
984                 }
985         }
986
987         /* We need all predecessors to already belong to the condition chain.
988          * Example of wrong case:  * == in cc
989          *
990          *     Head*             ,--.
991          *    /|   \            B   |
992          *   / A*  B           /    |
993          *  / /\   /          ?     |
994          *   /   C*      =>      D  |
995          *      /  D           Head |
996          *     /               A  \_|
997          *                      C
998          */
999         /* Collect blocks containing only a Jmp.
1000          * Do not collect blocks with backedge outs. */
1001         if ((jmp_only || mark) && !has_be) {
1002                 set_Block_mark(block, 1);
1003                 ++inversion_blocks_in_cc;
1004                 loop_info.cc_size += nodes_n;
1005                 DB((dbg, LEVEL_5, "block %N is part of condition chain\n", block));
1006                 ARR_APP1(ir_node *, cc_blocks, block);
1007         } else {
1008                 set_Block_mark(block, 0);
1009         }
1010
1011         foreach_block_succ(block, edge) {
1012                 ir_node *src = get_edge_src_irn( edge );
1013
1014                 if (is_in_loop(src) && ! irn_visited(src))
1015                         find_condition_chain(src);
1016         }
1017 }
1018
1019 /**
1020  * Rewires the copied condition chain. Removes backedges
1021  * as this condition chain is prior to the loop.
1022  * Copy of loop_head must have phi list and old (unfixed) backedge info of the loop head.
1023  * (loop_head is already fixed, we cannot rely on it.)
1024  */
1025 static void fix_copy_inversion(void)
1026 {
1027         ir_node *new_head;
1028         ir_node **ins;
1029         ir_node **phis;
1030         ir_node *phi, *next;
1031         ir_node *head_cp = get_inversion_copy(loop_head);
1032         ir_graph *irg    = get_irn_irg(head_cp);
1033         int arity        = get_irn_arity(head_cp);
1034         int backedges    = get_backedge_n(head_cp, false);
1035         int new_arity    = arity - backedges;
1036         int pos;
1037         int i;
1038
1039         NEW_ARR_A(ir_node *, ins, new_arity);
1040
1041         pos = 0;
1042         /* Remove block backedges */
1043         for(i = 0; i < arity; ++i) {
1044                 if (!is_backedge(head_cp, i))
1045                         ins[pos++] = get_irn_n(head_cp, i);
1046         }
1047
1048         new_head = new_r_Block(irg, new_arity, ins);
1049
1050         phis = NEW_ARR_F(ir_node *, 0);
1051
1052         for_each_phi_safe(get_Block_phis(head_cp), phi, next) {
1053                 ir_node *new_phi;
1054                 NEW_ARR_A(ir_node *, ins, new_arity);
1055                 pos = 0;
1056                 for(i = 0; i < arity; ++i) {
1057                         if (!is_backedge(head_cp, i))
1058                                 ins[pos++] = get_irn_n(phi, i);
1059                 }
1060                 new_phi = new_rd_Phi(get_irn_dbg_info(phi),
1061                                 new_head, new_arity, ins,
1062                                 get_irn_mode(phi));
1063                 ARR_APP1(ir_node *, phis, new_phi);
1064         }
1065
1066         pos = 0;
1067         for_each_phi_safe(get_Block_phis(head_cp), phi, next) {
1068                 exchange(phi, phis[pos++]);
1069         }
1070
1071         exchange(head_cp, new_head);
1072
1073         DEL_ARR_F(phis);
1074 }
1075
1076
1077 /* Puts the original condition chain at the end of the loop,
1078  * subsequently to the body.
1079  * Relies on block phi list and correct backedges.
1080  */
1081 static void fix_head_inversion(void)
1082 {
1083         ir_node *new_head;
1084         ir_node **ins;
1085         ir_node *phi, *next;
1086         ir_node **phis;
1087         ir_graph *irg = get_irn_irg(loop_head);
1088         int arity     = get_irn_arity(loop_head);
1089         int backedges = get_backedge_n(loop_head, false);
1090         int new_arity = backedges;
1091         int pos;
1092         int i;
1093
1094         NEW_ARR_A(ir_node *, ins, new_arity);
1095
1096         pos = 0;
1097         /* Keep only backedges */
1098         for(i = 0; i < arity; ++i) {
1099                 if (is_own_backedge(loop_head, i))
1100                         ins[pos++] = get_irn_n(loop_head, i);
1101         }
1102
1103         new_head = new_r_Block(irg, new_arity, ins);
1104
1105         phis = NEW_ARR_F(ir_node *, 0);
1106
1107         for_each_phi(loop_head, phi) {
1108                 ir_node *new_phi;
1109                 DB((dbg, LEVEL_5, "Fixing phi %N of loop head\n", phi));
1110
1111                 NEW_ARR_A(ir_node *, ins, new_arity);
1112
1113                 pos = 0;
1114                 for (i = 0; i < arity; ++i) {
1115                         ir_node *pred = get_irn_n(phi, i);
1116
1117                         if (is_own_backedge(loop_head, i)) {
1118                                 /* If assignment is in the condition chain,
1119                                  * we need to create a phi in the new loop head.
1120                                  * This can only happen for df, not cf. See find_condition_chains. */
1121                                 /*if (is_nodes_block_marked(pred)) {
1122                                         ins[pos++] = pred;
1123                                 } else {*/
1124                                 ins[pos++] = pred;
1125
1126                         }
1127                 }
1128
1129                 new_phi = new_rd_Phi(get_irn_dbg_info(phi),
1130                         new_head, new_arity, ins,
1131                         get_irn_mode(phi));
1132
1133                 ARR_APP1(ir_node *, phis, new_phi);
1134
1135                 DB((dbg, LEVEL_5, "fix inverted head should exch %N by %N (pos %d)\n", phi, new_phi, pos ));
1136         }
1137
1138         pos = 0;
1139         for_each_phi_safe(get_Block_phis(loop_head), phi, next) {
1140                 DB((dbg, LEVEL_5, "fix inverted exch phi %N by %N\n", phi, phis[pos]));
1141                 if (phis[pos] != phi)
1142                         exchange(phi, phis[pos++]);
1143         }
1144
1145         DEL_ARR_F(phis);
1146
1147         DB((dbg, LEVEL_5, "fix inverted head exch head block %N by %N\n", loop_head, new_head));
1148         exchange(loop_head, new_head);
1149 }
1150
1151 /* Does the loop inversion.  */
1152 static void inversion_walk(ir_graph *irg, entry_edge *head_entries)
1153 {
1154         size_t i;
1155
1156         /*
1157          * The order of rewiring bottom-up is crucial.
1158          * Any change of the order leads to lost information that would be needed later.
1159          */
1160
1161         ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED);
1162
1163         /* 1. clone condition chain */
1164         inc_irg_visited(irg);
1165
1166         for (i = 0; i < ARR_LEN(head_entries); ++i) {
1167                 entry_edge entry = head_entries[i];
1168                 ir_node *pred = get_irn_n(entry.node, entry.pos);
1169
1170                 DB((dbg, LEVEL_5, "\nInit walk block %N\n", pred));
1171
1172                 copy_walk(pred, is_nodes_block_marked, cur_loop);
1173         }
1174
1175         ir_free_resources(irg, IR_RESOURCE_IRN_VISITED);
1176
1177         /* 2. Extends the head control flow successors ins
1178          *    with the definitions of the copied head node. */
1179         for (i = 0; i < ARR_LEN(head_entries); ++i) {
1180                 entry_edge head_out = head_entries[i];
1181
1182                 if (is_Block(head_out.node))
1183                         extend_ins_by_copy(head_out.node, head_out.pos);
1184         }
1185
1186         /* 3. construct_ssa for users of definitions in the condition chain,
1187          *    as there is now a second definition. */
1188         for (i = 0; i < ARR_LEN(head_entries); ++i) {
1189                 entry_edge head_out = head_entries[i];
1190
1191                 /* Ignore keepalives */
1192                 if (is_End(head_out.node))
1193                         continue;
1194
1195                 /* Construct ssa for assignments in the condition chain. */
1196                 if (!is_Block(head_out.node)) {
1197                         ir_node *pred, *cppred, *block, *cpblock;
1198
1199                         pred = head_out.pred;
1200                         cppred = get_inversion_copy(pred);
1201                         block = get_nodes_block(pred);
1202                         cpblock = get_nodes_block(cppred);
1203                         construct_ssa(block, pred, cpblock, cppred);
1204                 }
1205         }
1206
1207         /*
1208          * If there is an assignment in the condition chain
1209          * with a user also in the condition chain,
1210          * the dominance frontier is in the new loop head.
1211          * The dataflow loop is completely in the condition chain.
1212          * Goal:
1213          *  To be wired: >|
1214          *
1215          *  | ,--.   |
1216          * Phi_cp |  | copied condition chain
1217          * >| |   |  |
1218          * >| ?__/   |
1219          * >| ,-.
1220          *  Phi* |   | new loop head with newly created phi.
1221          *   |   |
1222          *  Phi  |   | original, inverted condition chain
1223          *   |   |   |
1224          *   ?__/    |
1225          *
1226          */
1227         for (i = 0; i < ARR_LEN(head_df_loop); ++i) {
1228                 entry_edge head_out = head_df_loop[i];
1229
1230                 /* Construct ssa for assignments in the condition chain. */
1231                 ir_node *pred, *cppred, *block, *cpblock;
1232
1233                 pred = head_out.pred;
1234                 cppred = get_inversion_copy(pred);
1235                 assert(cppred && pred);
1236                 block = get_nodes_block(pred);
1237                 cpblock = get_nodes_block(cppred);
1238                 construct_ssa(block, pred, cpblock, cppred);
1239         }
1240
1241         /* 4. Remove the ins which are no backedges from the original condition chain
1242          *    as the cc is now subsequent to the body. */
1243         fix_head_inversion();
1244
1245         /* 5. Remove the backedges of the copied condition chain,
1246          *    because it is going to be the new 'head' in advance to the loop. */
1247         fix_copy_inversion();
1248
1249 }
1250
1251 /* Performs loop inversion of cur_loop if possible and reasonable. */
1252 static void loop_inversion(ir_graph *irg)
1253 {
1254         int      loop_depth;
1255         unsigned max_loop_nodes = opt_params.max_loop_size;
1256         unsigned max_loop_nodes_adapted;
1257         int      depth_adaption = opt_params.depth_adaption;
1258
1259         bool do_inversion = true;
1260
1261         /* Depth of 0 is the procedure and 1 a topmost loop. */
1262         loop_depth = get_loop_depth(cur_loop) - 1;
1263
1264         /* Calculating in per mil. */
1265         max_loop_nodes_adapted = get_max_nodes_adapted(loop_depth);
1266
1267         DB((dbg, LEVEL_1, "max_nodes: %d\nmax_nodes_adapted %d at depth of %d (adaption %d)\n",
1268                         max_loop_nodes, max_loop_nodes_adapted, loop_depth, depth_adaption));
1269
1270         if (loop_info.nodes == 0)
1271                 return;
1272
1273         if (loop_info.nodes > max_loop_nodes) {
1274                 /* Only for stats */
1275                 DB((dbg, LEVEL_1, "Nodes %d > allowed nodes %d\n",
1276                         loop_info.nodes, loop_depth, max_loop_nodes));
1277                 ++stats.too_large;
1278                 /* no RETURN */
1279                 /* Adaption might change it */
1280         }
1281
1282         /* Limit processing to loops smaller than given parameter. */
1283         if (loop_info.nodes > max_loop_nodes_adapted) {
1284                 DB((dbg, LEVEL_1, "Nodes %d > allowed nodes (depth %d adapted) %d\n",
1285                         loop_info.nodes, loop_depth, max_loop_nodes_adapted));
1286                 ++stats.too_large_adapted;
1287                 return;
1288         }
1289
1290         if (loop_info.calls > opt_params.allowed_calls) {
1291                 DB((dbg, LEVEL_1, "Calls %d > allowed calls %d\n",
1292                         loop_info.calls, opt_params.allowed_calls));
1293                 ++stats.calls_limit;
1294                 return;
1295         }
1296
1297         /*inversion_head_node_limit = INT_MAX;*/
1298         ir_reserve_resources(irg, IR_RESOURCE_BLOCK_MARK);
1299
1300         /* Reset block marks.
1301          * We use block marks to flag blocks of the original condition chain. */
1302         irg_walk_graph(irg, reset_block_mark, NULL, NULL);
1303
1304         /*loop_info.blocks = get_loop_n_blocks(cur_loop);*/
1305         cond_chain_entries = NEW_ARR_F(entry_edge, 0);
1306         head_df_loop = NEW_ARR_F(entry_edge, 0);
1307
1308         /*head_inversion_node_count = 0;*/
1309         inversion_blocks_in_cc = 0;
1310
1311         /* Use phase to keep copy of nodes from the condition chain. */
1312         ir_nodemap_init(&map, irg);
1313         obstack_init(&obst);
1314
1315         /* Search for condition chains and temporarily save the blocks in an array. */
1316         cc_blocks = NEW_ARR_F(ir_node *, 0);
1317         inc_irg_visited(irg);
1318         find_condition_chain(loop_head);
1319
1320         unmark_not_allowed_cc_blocks();
1321         DEL_ARR_F(cc_blocks);
1322
1323         /* Condition chain too large.
1324          * Loop should better be small enough to fit into the cache. */
1325         /* TODO Of course, we should take a small enough cc in the first place,
1326          * which is not that simple. (bin packing)  */
1327         if (loop_info.cc_size > opt_params.max_cc_size) {
1328                 ++stats.cc_limit_reached;
1329
1330                 do_inversion = false;
1331
1332                 /* Unmark cc blocks except the head.
1333                  * Invert head only for possible unrolling. */
1334                 unmark_cc_blocks();
1335         }
1336
1337         /* We also catch endless loops here,
1338          * because they do not have a condition chain. */
1339         if (inversion_blocks_in_cc < 1) {
1340                 do_inversion = false;
1341                 DB((dbg, LEVEL_3,
1342                         "Loop contains %d (less than 1) invertible blocks => No Inversion done.\n",
1343                         inversion_blocks_in_cc));
1344         }
1345
1346         if (do_inversion) {
1347                 cur_head_outs = NEW_ARR_F(entry_edge, 0);
1348
1349                 /* Get all edges pointing into the condition chain. */
1350                 irg_walk_graph(irg, get_head_outs, NULL, NULL);
1351
1352                 /* Do the inversion */
1353                 inversion_walk(irg, cur_head_outs);
1354
1355                 DEL_ARR_F(cur_head_outs);
1356
1357                 /* Duplicated blocks changed doms */
1358                 clear_irg_state(irg, IR_GRAPH_STATE_CONSISTENT_DOMINANCE
1359                                    | IR_GRAPH_STATE_CONSISTENT_LOOPINFO);
1360
1361                 ++stats.inverted;
1362         }
1363
1364         /* free */
1365         obstack_free(&obst, NULL);
1366         ir_nodemap_destroy(&map);
1367         DEL_ARR_F(cond_chain_entries);
1368         DEL_ARR_F(head_df_loop);
1369
1370         ir_free_resources(irg, IR_RESOURCE_BLOCK_MARK);
1371 }
1372
1373 /* Fix the original loop_heads ins for invariant unrolling case. */
1374 static void unrolling_fix_loop_head_inv(void)
1375 {
1376         ir_node *ins[2];
1377         ir_node *phi;
1378         ir_node *proj = new_Proj(loop_info.duff_cond, mode_X, 0);
1379         ir_node *head_pred = get_irn_n(loop_head, loop_info.be_src_pos);
1380         ir_node *loop_condition = get_unroll_copy(head_pred, unroll_nr - 1);
1381
1382         /* Original loop_heads ins are:
1383          * duff block and the own backedge */
1384
1385         ins[0] = loop_condition;
1386         ins[1] = proj;
1387         set_irn_in(loop_head, 2, ins);
1388         DB((dbg, LEVEL_4, "Rewire ins of block loophead %N to pred %N and duffs entry %N \n" , loop_head, ins[0], ins[1]));
1389
1390         for_each_phi(loop_head, phi) {
1391                 ir_node *pred = get_irn_n(phi, loop_info.be_src_pos);
1392                 /* TODO we think it is a phi, but for Mergesort it is not the case.*/
1393
1394                 ir_node *last_pred = get_unroll_copy(pred, unroll_nr - 1);
1395
1396                 ins[0] = last_pred;
1397                 ins[1] = (ir_node*)get_irn_link(phi);
1398                 set_irn_in(phi, 2, ins);
1399                 DB((dbg, LEVEL_4, "Rewire ins of loophead phi %N to pred %N and duffs entry %N \n" , phi, ins[0], ins[1]));
1400         }
1401 }
1402
1403 /* Removes previously created phis with only 1 in. */
1404 static void correct_phis(ir_node *node, void *env)
1405 {
1406         (void)env;
1407
1408         if (is_Phi(node) && get_irn_arity(node) == 1) {
1409                 ir_node *exch;
1410                 ir_node *in[1];
1411
1412                 in[0] = get_irn_n(node, 0);
1413
1414                 exch = new_rd_Phi(get_irn_dbg_info(node),
1415                     get_nodes_block(node), 1, in,
1416                         get_irn_mode(node));
1417
1418                 exchange(node, exch);
1419         }
1420 }
1421
1422 /* Unrolling: Rewire floating copies. */
1423 static void place_copies(int copies)
1424 {
1425         ir_node *loophead = loop_head;
1426         size_t i;
1427         int c;
1428         int be_src_pos = loop_info.be_src_pos;
1429
1430         /* Serialize loops by fixing their head ins.
1431          * Processed are the copies.
1432          * The original loop is done after that, to keep backedge infos. */
1433         for (c = 0; c < copies; ++c) {
1434                 ir_node *upper = get_unroll_copy(loophead, c);
1435                 ir_node *lower = get_unroll_copy(loophead, c + 1);
1436                 ir_node *phi;
1437                 ir_node *topmost_be_block = get_nodes_block(get_irn_n(loophead, be_src_pos));
1438
1439                 /* Important: get the preds first and then their copy. */
1440                 ir_node *upper_be_block = get_unroll_copy(topmost_be_block, c);
1441                 ir_node *new_jmp = new_r_Jmp(upper_be_block);
1442                 DB((dbg, LEVEL_5, " place_copies upper %N lower %N\n", upper, lower));
1443
1444                 DB((dbg, LEVEL_5, "topmost be block %N \n", topmost_be_block));
1445
1446                 if (loop_info.unroll_kind == constant) {
1447                         ir_node *ins[1];
1448                         ins[0] = new_jmp;
1449                         set_irn_in(lower, 1, ins);
1450
1451                         for_each_phi(loophead, phi) {
1452                                 ir_node *topmost_def = get_irn_n(phi, be_src_pos);
1453                                 ir_node *upper_def = get_unroll_copy(topmost_def, c);
1454                                 ir_node *lower_phi = get_unroll_copy(phi, c + 1);
1455
1456                                 /* It is possible, that the value used
1457                                  * in the OWN backedge path is NOT defined in this loop. */
1458                                 if (is_in_loop(topmost_def))
1459                                         ins[0] = upper_def;
1460                                 else
1461                                         ins[0] = topmost_def;
1462
1463                                 set_irn_in(lower_phi, 1, ins);
1464                                 /* Need to replace phis with 1 in later. */
1465                         }
1466                 } else {
1467                         /* Invariant case */
1468                         /* Every node has 2 ins. One from the duff blocks
1469                          * and one from the previously unrolled loop. */
1470                         ir_node *ins[2];
1471                         /* Calculate corresponding projection of mod result for this copy c */
1472                         ir_node *proj = new_Proj(loop_info.duff_cond, mode_X, unroll_nr - c - 1);
1473                         DB((dbg, LEVEL_4, "New duff proj %N\n" , proj));
1474
1475                         ins[0] = new_jmp;
1476                         ins[1] = proj;
1477                         set_irn_in(lower, 2, ins);
1478                         DB((dbg, LEVEL_4, "Rewire ins of Block %N to pred %N and duffs entry %N \n" , lower, ins[0], ins[1]));
1479
1480                         for_each_phi(loophead, phi) {
1481                                 ir_node *topmost_phi_pred = get_irn_n(phi, be_src_pos);
1482                                 ir_node *upper_phi_pred;
1483                                 ir_node *lower_phi;
1484                                 ir_node *duff_phi;
1485
1486                                 lower_phi = get_unroll_copy(phi, c + 1);
1487                                 duff_phi = (ir_node*)get_irn_link(phi);
1488                                 DB((dbg, LEVEL_4, "DD Link of %N is %N\n" , phi, duff_phi));
1489
1490                                 /*  */
1491                                 if (is_in_loop(topmost_phi_pred)) {
1492                                         upper_phi_pred = get_unroll_copy(topmost_phi_pred, c);
1493                                 } else {
1494                                         upper_phi_pred = topmost_phi_pred;
1495                                 }
1496
1497                                 ins[0] = upper_phi_pred;
1498                                 ins[1] = duff_phi;
1499                                 set_irn_in(lower_phi, 2, ins);
1500                                 DB((dbg, LEVEL_4, "Rewire ins of %N to pred %N and duffs entry %N \n" , lower_phi, ins[0], ins[1]));
1501                         }
1502                 }
1503         }
1504
1505         /* Reconnect last copy. */
1506         for (i = 0; i < ARR_LEN(loop_entries); ++i) {
1507                 entry_edge edge = loop_entries[i];
1508                 /* Last copy is at the bottom */
1509                 ir_node *new_pred = get_unroll_copy(edge.pred, copies);
1510                 set_irn_n(edge.node, edge.pos, new_pred);
1511         }
1512
1513         /* Fix original loops head.
1514          * Done in the end, as ins and be info were needed before. */
1515         if (loop_info.unroll_kind == constant) {
1516                 ir_node *phi;
1517                 ir_node *head_pred = get_irn_n(loop_head, be_src_pos);
1518                 ir_node *loop_condition = get_unroll_copy(head_pred, unroll_nr - 1);
1519
1520                 set_irn_n(loop_head, loop_info.be_src_pos, loop_condition);
1521
1522                 for_each_phi(loop_head, phi) {
1523                         ir_node *pred = get_irn_n(phi, be_src_pos);
1524                         ir_node *last_pred;
1525
1526                         /* It is possible, that the value used
1527                          * in the OWN backedge path is NOT assigned in this loop. */
1528                         if (is_in_loop(pred))
1529                                 last_pred = get_unroll_copy(pred, copies);
1530                         else
1531                                 last_pred = pred;
1532                         set_irn_n(phi, be_src_pos, last_pred);
1533                 }
1534
1535         } else {
1536                 unrolling_fix_loop_head_inv();
1537         }
1538 }
1539
1540 /* Copies the cur_loop several times. */
1541 static void copy_loop(entry_edge *cur_loop_outs, int copies)
1542 {
1543         int c;
1544
1545         ir_reserve_resources(current_ir_graph, IR_RESOURCE_IRN_VISITED);
1546
1547         for (c = 0; c < copies; ++c) {
1548                 size_t i;
1549
1550                 inc_irg_visited(current_ir_graph);
1551
1552                 DB((dbg, LEVEL_5, "         ### Copy_loop  copy nr: %d ###\n", c));
1553                 for (i = 0; i < ARR_LEN(cur_loop_outs); ++i) {
1554                         entry_edge entry = cur_loop_outs[i];
1555                         ir_node *pred = get_irn_n(entry.node, entry.pos);
1556
1557                         copy_walk_n(pred, is_in_loop, c + 1);
1558                 }
1559         }
1560
1561         ir_free_resources(current_ir_graph, IR_RESOURCE_IRN_VISITED);
1562 }
1563
1564
1565 /* Creates a new phi from the given phi node omitting own bes,
1566  * using be_block as supplier of backedge informations. */
1567 static ir_node *clone_phis_sans_bes(ir_node *phi, ir_node *be_block, ir_node *dest_block)
1568 {
1569         ir_node **ins;
1570         int arity = get_irn_arity(phi);
1571         int i, c = 0;
1572         ir_node *newphi;
1573
1574         assert(get_irn_arity(phi) == get_irn_arity(be_block));
1575         assert(is_Phi(phi));
1576
1577         ins = NEW_ARR_F(ir_node *, arity);
1578         for (i = 0; i < arity; ++i) {
1579                 if (! is_own_backedge(be_block, i)) {
1580                         ins[c] = get_irn_n(phi, i);
1581                         ++c;
1582                 }
1583         }
1584
1585         newphi = new_r_Phi(dest_block, c, ins, get_irn_mode(phi));
1586
1587         set_irn_link(phi, newphi);
1588         DB((dbg, LEVEL_4, "Linking for duffs device %N to %N\n", phi, newphi));
1589
1590         return newphi;
1591 }
1592
1593 /* Creates a new block from the given block node omitting own bes,
1594  * using be_block as supplier of backedge informations. */
1595 static ir_node *clone_block_sans_bes(ir_node *node, ir_node *be_block)
1596 {
1597         int arity = get_irn_arity(node);
1598         int i, c = 0;
1599         ir_node **ins;
1600
1601         assert(get_irn_arity(node) == get_irn_arity(be_block));
1602         assert(is_Block(node));
1603
1604         NEW_ARR_A(ir_node *, ins, arity);
1605         for (i = 0; i < arity; ++i) {
1606                 if (! is_own_backedge(be_block, i)) {
1607                         ins[c] = get_irn_n(node, i);
1608                         ++c;
1609                 }
1610         }
1611
1612         return new_Block(c, ins);
1613 }
1614
1615 /* Creates a structure to calculate absolute value of node op.
1616  * Returns mux node with absolute value. */
1617 static ir_node *new_Abs(ir_node *op, ir_mode *mode)
1618 {
1619   ir_graph *irg      = get_irn_irg(op);
1620   ir_node  *block    = get_nodes_block(op);
1621   ir_node  *zero     = new_r_Const(irg, get_mode_null(mode));
1622   ir_node  *cmp      = new_r_Cmp(block, op, zero, ir_relation_less);
1623   ir_node  *minus_op = new_r_Minus(block, op, mode);
1624   ir_node  *mux      = new_r_Mux(block, cmp, op, minus_op, mode);
1625
1626   return mux;
1627 }
1628
1629
1630 /* Creates blocks for duffs device, using previously obtained
1631  * informations about the iv.
1632  * TODO split */
1633 static void create_duffs_block(void)
1634 {
1635         ir_mode *mode;
1636
1637         ir_node *block1, *count_block, *duff_block;
1638         ir_node *ems, *ems_mod, *ems_div, *ems_mod_proj, *cmp_null,
1639                 *ems_mode_cond, *x_true, *x_false, *const_null;
1640         ir_node *true_val, *false_val;
1641         ir_node *ins[2];
1642
1643         ir_node *duff_mod, *proj, *cond;
1644
1645         ir_node *count, *correction, *unroll_c;
1646         ir_node *cmp_bad_count, *good_count, *bad_count, *count_phi, *bad_count_neg;
1647         ir_node *phi;
1648
1649         mode = get_irn_mode(loop_info.end_val);
1650         const_null = new_Const(get_mode_null(mode));
1651
1652         /* TODO naming
1653          * 1. Calculate first approach to count.
1654          *    Condition: (end - start) % step == 0 */
1655         block1 = clone_block_sans_bes(loop_head, loop_head);
1656         DB((dbg, LEVEL_4, "Duff block 1 %N\n", block1));
1657
1658         /* Create loop entry phis in first duff block
1659          * as it becomes the loops preheader */
1660         for_each_phi(loop_head, phi) {
1661                 /* Returns phis pred if phi would have arity 1*/
1662                 ir_node *new_phi = clone_phis_sans_bes(phi, loop_head, block1);
1663
1664                 DB((dbg, LEVEL_4, "HEAD %N phi %N\n", loop_head, phi));
1665                 DB((dbg, LEVEL_4, "BLOCK1 %N phi %N\n", block1, new_phi));
1666         }
1667
1668         ems = new_r_Sub(block1, loop_info.end_val, loop_info.start_val,
1669                 get_irn_mode(loop_info.end_val));
1670                 DB((dbg, LEVEL_4, "BLOCK1 sub %N\n", ems));
1671
1672
1673         ems = new_Sub(loop_info.end_val, loop_info.start_val,
1674                 get_irn_mode(loop_info.end_val));
1675
1676         DB((dbg, LEVEL_4, "mod ins %N %N\n", ems, loop_info.step));
1677         ems_mod = new_r_Mod(block1,
1678                 new_NoMem(),
1679                 ems,
1680                 loop_info.step,
1681                 mode,
1682                 op_pin_state_pinned);
1683         ems_div = new_r_Div(block1,
1684                 new_NoMem(),
1685                 ems,
1686                 loop_info.step,
1687                 mode,
1688                 op_pin_state_pinned);
1689
1690         DB((dbg, LEVEL_4, "New module node %N\n", ems_mod));
1691
1692         ems_mod_proj = new_r_Proj(ems_mod, mode_Iu, pn_Mod_res);
1693         cmp_null = new_r_Cmp(block1, ems_mod_proj, const_null, ir_relation_less);
1694         ems_mode_cond = new_r_Cond(block1, cmp_null);
1695
1696         /* ems % step == 0 */
1697         x_true = new_r_Proj(ems_mode_cond, mode_X, pn_Cond_true);
1698         /* ems % step != 0 */
1699         x_false = new_r_Proj(ems_mode_cond, mode_X, pn_Cond_false);
1700
1701         /* 2. Second block.
1702          * Assures, duffs device receives a valid count.
1703          * Condition:
1704          *     decreasing: count < 0
1705          *     increasing: count > 0
1706          */
1707         ins[0] = x_true;
1708         ins[1] = x_false;
1709
1710         count_block = new_Block(2, ins);
1711         DB((dbg, LEVEL_4, "Duff block 2 %N\n", count_block));
1712
1713
1714         /* Increase loop-taken-count depending on the loop condition
1715          * uses the latest iv to compare to. */
1716         if (loop_info.latest_value == 1) {
1717                 /* ems % step == 0 :  +0 */
1718                 true_val = new_Const(get_mode_null(mode));
1719                 /* ems % step != 0 :  +1 */
1720                 false_val = new_Const(get_mode_one(mode));
1721         } else {
1722                 ir_tarval *tv_two = new_tarval_from_long(2, mode);
1723                 /* ems % step == 0 :  +1 */
1724                 true_val = new_Const(get_mode_one(mode));
1725                 /* ems % step != 0 :  +2 */
1726                 false_val = new_Const(tv_two);
1727         }
1728
1729         ins[0] = true_val;
1730         ins[1] = false_val;
1731
1732         correction = new_r_Phi(count_block, 2, ins, mode);
1733
1734         count = new_r_Proj(ems_div, mode, pn_Div_res);
1735
1736         /* (end - start) / step  +  correction */
1737         count = new_Add(count, correction, mode);
1738
1739         /* We preconditioned the loop to be tail-controlled.
1740          * So, if count is something 'wrong' like 0,
1741          * negative/positive (depending on step direction),
1742          * we may take the loop once (tail-contr.) and leave it
1743          * to the existing condition, to break; */
1744
1745         /* Depending on step direction, we have to check for > or < 0 */
1746         if (loop_info.decreasing == 1) {
1747                 cmp_bad_count = new_r_Cmp(count_block, count, const_null,
1748                                           ir_relation_less);
1749         } else {
1750                 cmp_bad_count = new_r_Cmp(count_block, count, const_null,
1751                                           ir_relation_greater);
1752         }
1753
1754         bad_count_neg = new_r_Cond(count_block, cmp_bad_count);
1755         good_count = new_Proj(bad_count_neg, mode_X, pn_Cond_true);
1756         bad_count = new_Proj(ems_mode_cond, mode_X, pn_Cond_false);
1757
1758         /* 3. Duff Block
1759          *    Contains module to decide which loop to start from. */
1760
1761         ins[0] = good_count;
1762         ins[1] = bad_count;
1763         duff_block = new_Block(2, ins);
1764         DB((dbg, LEVEL_4, "Duff block 3 %N\n", duff_block));
1765
1766         /* Get absolute value */
1767         ins[0] = new_Abs(count, mode);
1768         /* Manually feed the aforementioned count = 1 (bad case)*/
1769         ins[1] = new_Const(get_mode_one(mode));
1770         count_phi = new_r_Phi(duff_block, 2, ins, mode);
1771
1772         unroll_c = new_Const(new_tarval_from_long((long)unroll_nr, mode));
1773
1774         /* count % unroll_nr */
1775         duff_mod = new_r_Mod(duff_block,
1776                 new_NoMem(),
1777                 count_phi,
1778                 unroll_c,
1779                 mode,
1780                 op_pin_state_pinned);
1781
1782
1783         proj = new_Proj(duff_mod, mode, pn_Mod_res);
1784         /* condition does NOT create itself in the block of the proj! */
1785         cond = new_r_Cond(duff_block, proj);
1786
1787         loop_info.duff_cond = cond;
1788 }
1789
1790 /* Returns 1 if given node is not in loop,
1791  * or if it is a phi of the loop head with only loop invariant defs.
1792  */
1793 static unsigned is_loop_invariant_def(ir_node *node)
1794 {
1795         int i;
1796
1797         if (! is_in_loop(node)) {
1798                 DB((dbg, LEVEL_4, "Not in loop %N\n", node));
1799                 /* || is_Const(node) || is_SymConst(node)) {*/
1800                 return 1;
1801         }
1802
1803         /* If this is a phi of the loophead shared by more than 1 loop,
1804          * we need to check if all defs are not in the loop.  */
1805         if (is_Phi(node)) {
1806                 ir_node *block;
1807                 block = get_nodes_block(node);
1808
1809                 /* To prevent unexpected situations. */
1810                 if (block != loop_head) {
1811                         return 0;
1812                 }
1813
1814                 for (i = 0; i < get_irn_arity(node); ++i) {
1815                         /* Check if all bes are just loopbacks. */
1816                         if (is_own_backedge(block, i) && get_irn_n(node, i) != node)
1817                                 return 0;
1818                 }
1819                 DB((dbg, LEVEL_4, "invar %N\n", node));
1820                 return 1;
1821         }
1822         DB((dbg, LEVEL_4, "Not invar %N\n", node));
1823
1824         return 0;
1825 }
1826
1827 /* Returns 1 if one pred of node is invariant and the other is not.
1828  * invar_pred and other are set analogously. */
1829 static unsigned get_invariant_pred(ir_node *node, ir_node **invar_pred, ir_node **other)
1830 {
1831         ir_node *pred0 = get_irn_n(node, 0);
1832         ir_node *pred1 = get_irn_n(node, 1);
1833
1834         *invar_pred = NULL;
1835         *other = NULL;
1836
1837         if (is_loop_invariant_def(pred0)) {
1838                 DB((dbg, LEVEL_4, "pred0 invar %N\n", pred0));
1839                 *invar_pred = pred0;
1840                 *other = pred1;
1841         }
1842
1843         if (is_loop_invariant_def(pred1)) {
1844                 DB((dbg, LEVEL_4, "pred1 invar %N\n", pred1));
1845
1846                 if (*invar_pred != NULL) {
1847                         /* RETURN. We do not want both preds to be invariant. */
1848                         return 0;
1849                 }
1850
1851                 *other = pred0;
1852                 *invar_pred = pred1;
1853                 return 1;
1854         } else {
1855                 DB((dbg, LEVEL_4, "pred1 not invar %N\n", pred1));
1856
1857                 if (*invar_pred != NULL)
1858                         return 1;
1859                 else
1860                         return 0;
1861         }
1862 }
1863
1864 /* Starts from a phi that may belong to an iv.
1865  * If an add forms a loop with iteration_phi,
1866  * and add uses a constant, 1 is returned
1867  * and 'start' as well as 'add' are sane. */
1868 static unsigned get_start_and_add(ir_node *iteration_phi, unrolling_kind_flag role)
1869 {
1870         int i;
1871         ir_node *found_add = loop_info.add;
1872         int arity = get_irn_arity(iteration_phi);
1873
1874         DB((dbg, LEVEL_4, "Find start and add from %N\n", iteration_phi));
1875
1876         for (i = 0; i < arity; ++i) {
1877
1878                 /* Find start_val which needs to be pred of the iteration_phi.
1879                  * If start_val already known, sanity check. */
1880                 if (!is_backedge(get_nodes_block(loop_info.iteration_phi), i)) {
1881                         ir_node *found_start_val = get_irn_n(loop_info.iteration_phi, i);
1882
1883                         DB((dbg, LEVEL_4, "found_start_val %N\n", found_start_val));
1884
1885                         /* We already found a start_val it has to be always the same. */
1886                         if (loop_info.start_val && found_start_val != loop_info.start_val)
1887                                 return 0;
1888
1889                         if ((role == constant) && !(is_SymConst(found_start_val) || is_Const(found_start_val)))
1890                                         return 0;
1891                         else if((role == constant) && !(is_loop_invariant_def(found_start_val)))
1892                                         return 0;
1893
1894                         loop_info.start_val = found_start_val;
1895                 }
1896
1897                 /* The phi has to be in the loop head.
1898                  * Follow all own backedges. Every value supplied from these preds of the phi
1899                  * needs to origin from the same add. */
1900                 if (is_own_backedge(get_nodes_block(loop_info.iteration_phi), i)) {
1901                         ir_node *new_found = get_irn_n(loop_info.iteration_phi,i);
1902
1903                         DB((dbg, LEVEL_4, "is add? %N\n", new_found));
1904
1905                         if (! (is_Add(new_found) || is_Sub(new_found)) || (found_add && found_add != new_found))
1906                                 return 0;
1907                         else
1908                                 found_add = new_found;
1909                 }
1910         }
1911
1912         loop_info.add = found_add;
1913
1914         return 1;
1915 }
1916
1917
1918 /* Returns 1 if one pred of node is a const value and the other is not.
1919  * const_pred and other are set analogously. */
1920 static unsigned get_const_pred(ir_node *node, ir_node **const_pred, ir_node **other)
1921 {
1922         ir_node *pred0 = get_irn_n(node, 0);
1923         ir_node *pred1 = get_irn_n(node, 1);
1924
1925         DB((dbg, LEVEL_4, "Checking for constant pred of %N\n", node));
1926
1927         *const_pred = NULL;
1928         *other = NULL;
1929
1930         /*DB((dbg, LEVEL_4, "is %N const\n", pred0));*/
1931         if (is_Const(pred0) || is_SymConst(pred0)) {
1932                 *const_pred = pred0;
1933                 *other = pred1;
1934         }
1935
1936         /*DB((dbg, LEVEL_4, "is %N const\n", pred1));*/
1937         if (is_Const(pred1) || is_SymConst(pred1)) {
1938                 if (*const_pred != NULL) {
1939                         /* RETURN. We do not want both preds to be constant. */
1940                         return 0;
1941                 }
1942
1943                 *other = pred0;
1944                 *const_pred = pred1;
1945         }
1946
1947         if (*const_pred == NULL)
1948                 return 0;
1949         else
1950                 return 1;
1951 }
1952
1953 /* Returns 1 if loop exits within 2 steps of the iv.
1954  * Norm_proj means we do not exit the loop.*/
1955 static unsigned simulate_next(ir_tarval **count_tar,
1956                 ir_tarval *stepped, ir_tarval *step_tar, ir_tarval *end_tar,
1957                 ir_relation norm_proj)
1958 {
1959         ir_tarval *next;
1960
1961         DB((dbg, LEVEL_4, "Loop taken if (stepped)%ld %s (end)%ld ",
1962                                 get_tarval_long(stepped),
1963                                 get_relation_string((norm_proj)),
1964                                 get_tarval_long(end_tar)));
1965         DB((dbg, LEVEL_4, "comparing latest value %d\n", loop_info.latest_value));
1966
1967         /* If current iv does not stay in the loop,
1968          * this run satisfied the exit condition. */
1969         if (! (tarval_cmp(stepped, end_tar) & norm_proj))
1970                 return 1;
1971
1972         DB((dbg, LEVEL_4, "Result: (stepped)%ld IS %s (end)%ld\n",
1973                                 get_tarval_long(stepped),
1974                                 get_relation_string(tarval_cmp(stepped, end_tar)),
1975                                 get_tarval_long(end_tar)));
1976
1977         /* next step */
1978         if (is_Add(loop_info.add))
1979                 next = tarval_add(stepped, step_tar);
1980         else
1981                 /* sub */
1982                 next = tarval_sub(stepped, step_tar, get_irn_mode(loop_info.end_val));
1983
1984         DB((dbg, LEVEL_4, "Loop taken if %ld %s %ld ",
1985                                 get_tarval_long(next),
1986                                 get_relation_string(norm_proj),
1987                                 get_tarval_long(end_tar)));
1988         DB((dbg, LEVEL_4, "comparing latest value %d\n", loop_info.latest_value));
1989
1990         /* Increase steps. */
1991         *count_tar = tarval_add(*count_tar, get_tarval_one(get_tarval_mode(*count_tar)));
1992
1993         /* Next has to fail the loop condition, or we will never exit. */
1994         if (! (tarval_cmp(next, end_tar) & norm_proj))
1995                 return 1;
1996         else
1997                 return 0;
1998 }
1999
2000 /* Check if loop meets requirements for a 'simple loop':
2001  * - Exactly one cf out
2002  * - Allowed calls
2003  * - Max nodes after unrolling
2004  * - tail-controlled
2005  * - exactly one be
2006  * - cmp
2007  * Returns Projection of cmp node or NULL; */
2008 static ir_node *is_simple_loop(void)
2009 {
2010         int arity, i;
2011         ir_node *loop_block, *exit_block, *projx, *cond, *cmp;
2012
2013         /* Maximum of one condition, and no endless loops. */
2014         if (loop_info.cf_outs != 1)
2015                 return NULL;
2016
2017         DB((dbg, LEVEL_4, "1 loop exit\n"));
2018
2019         /* Calculate maximum unroll_nr keeping node count below limit. */
2020         loop_info.max_unroll = (int)((double)opt_params.max_unrolled_loop_size / (double)loop_info.nodes);
2021         if (loop_info.max_unroll < 2) {
2022                 ++stats.too_large;
2023                 return NULL;
2024         }
2025
2026         DB((dbg, LEVEL_4, "maximum unroll factor %u, to not exceed node limit \n",
2027                 opt_params.max_unrolled_loop_size));
2028
2029         arity = get_irn_arity(loop_head);
2030         /* RETURN if we have more than 1 be. */
2031         /* Get my backedges without alien bes. */
2032         loop_block = NULL;
2033         for (i = 0; i < arity; ++i) {
2034                 ir_node *pred = get_irn_n(loop_head, i);
2035                 if (is_own_backedge(loop_head, i)) {
2036                         if (loop_block)
2037                                 /* Our simple loops may have only one backedge. */
2038                                 return NULL;
2039                         else {
2040                                 loop_block = get_nodes_block(pred);
2041                                 loop_info.be_src_pos = i;
2042                         }
2043                 }
2044         }
2045
2046         DB((dbg, LEVEL_4, "loop has 1 own backedge.\n"));
2047
2048         exit_block = get_nodes_block(loop_info.cf_out.pred);
2049         /* The loop has to be tail-controlled.
2050          * This can be changed/improved,
2051          * but we would need a duff iv. */
2052         if (exit_block != loop_block)
2053                 return NULL;
2054
2055         DB((dbg, LEVEL_4, "tail-controlled loop.\n"));
2056
2057         /* find value on which loop exit depends */
2058         projx = loop_info.cf_out.pred;
2059         cond = get_irn_n(projx, 0);
2060         cmp = get_irn_n(cond, 0);
2061
2062         if (!is_Cmp(cmp))
2063                 return NULL;
2064
2065         DB((dbg, LEVEL_5, "projection is %s\n", get_relation_string(get_Proj_proj(projx))));
2066
2067         switch(get_Proj_proj(projx)) {
2068                 case pn_Cond_false:
2069                         loop_info.exit_cond = 0;
2070                         break;
2071                 case pn_Cond_true:
2072                         loop_info.exit_cond = 1;
2073                         break;
2074                 default:
2075                         panic("Cond Proj_proj other than true/false");
2076         }
2077
2078         DB((dbg, LEVEL_4, "Valid Cmp.\n"));
2079         return cmp;
2080 }
2081
2082 /* Returns 1 if all nodes are mode_Iu or mode_Is. */
2083 static unsigned are_mode_I(ir_node *n1, ir_node* n2, ir_node *n3)
2084 {
2085         ir_mode *m1 = get_irn_mode(n1);
2086         ir_mode *m2 = get_irn_mode(n2);
2087         ir_mode *m3 = get_irn_mode(n3);
2088
2089         if ((m1 == mode_Iu && m2 == mode_Iu && m3 == mode_Iu) ||
2090             (m1 == mode_Is && m2 == mode_Is && m3 == mode_Is))
2091                 return 1;
2092         else
2093                 return 0;
2094 }
2095
2096 /* Checks if cur_loop is a simple tail-controlled counting loop
2097  * with start and end value loop invariant, step constant. */
2098 static unsigned get_unroll_decision_invariant(void)
2099 {
2100
2101         ir_node   *projres, *loop_condition, *iteration_path;
2102         unsigned   success;
2103         ir_tarval *step_tar;
2104         ir_mode   *mode;
2105
2106
2107         /* RETURN if loop is not 'simple' */
2108         projres = is_simple_loop();
2109         if (projres == NULL)
2110                 return 0;
2111
2112         /* Use a minimal size for the invariant unrolled loop,
2113      * as duffs device produces overhead */
2114         if (loop_info.nodes < opt_params.invar_unrolling_min_size)
2115                 return 0;
2116
2117         loop_condition = get_irn_n(projres, 0);
2118
2119         success = get_invariant_pred(loop_condition, &loop_info.end_val, &iteration_path);
2120         DB((dbg, LEVEL_4, "pred invar %d\n", success));
2121
2122         if (! success)
2123                 return 0;
2124
2125         DB((dbg, LEVEL_4, "Invariant End_val %N, other %N\n", loop_info.end_val, iteration_path));
2126
2127         /* We may find the add or the phi first.
2128          * Until now we only have end_val. */
2129         if (is_Add(iteration_path) || is_Sub(iteration_path)) {
2130
2131                 loop_info.add = iteration_path;
2132                 DB((dbg, LEVEL_4, "Case 1: Got add %N (maybe not sane)\n", loop_info.add));
2133
2134                 /* Preds of the add should be step and the iteration_phi */
2135                 success = get_const_pred(loop_info.add, &loop_info.step, &loop_info.iteration_phi);
2136                 if (! success)
2137                         return 0;
2138
2139                 DB((dbg, LEVEL_4, "Got step %N\n", loop_info.step));
2140
2141                 if (! is_Phi(loop_info.iteration_phi))
2142                         return 0;
2143
2144                 DB((dbg, LEVEL_4, "Got phi %N\n", loop_info.iteration_phi));
2145
2146                 /* Find start_val.
2147                  * Does necessary sanity check of add, if it is already set.  */
2148                 success = get_start_and_add(loop_info.iteration_phi, invariant);
2149                 if (! success)
2150                         return 0;
2151
2152                 DB((dbg, LEVEL_4, "Got start A  %N\n", loop_info.start_val));
2153
2154         } else if (is_Phi(iteration_path)) {
2155                 ir_node *new_iteration_phi;
2156
2157                 loop_info.iteration_phi = iteration_path;
2158                 DB((dbg, LEVEL_4, "Case 2: Got phi %N\n", loop_info.iteration_phi));
2159
2160                 /* Find start_val and add-node.
2161                  * Does necessary sanity check of add, if it is already set.  */
2162                 success = get_start_and_add(loop_info.iteration_phi, invariant);
2163                 if (! success)
2164                         return 0;
2165
2166                 DB((dbg, LEVEL_4, "Got start B %N\n", loop_info.start_val));
2167                 DB((dbg, LEVEL_4, "Got add or sub %N\n", loop_info.add));
2168
2169                 success = get_const_pred(loop_info.add, &loop_info.step, &new_iteration_phi);
2170                 if (! success)
2171                         return 0;
2172
2173                 DB((dbg, LEVEL_4, "Got step (B) %N\n", loop_info.step));
2174
2175                 if (loop_info.iteration_phi != new_iteration_phi)
2176                         return 0;
2177
2178         } else {
2179                 return 0;
2180         }
2181
2182         mode = get_irn_mode(loop_info.end_val);
2183
2184         DB((dbg, LEVEL_4, "start %N, end %N, step %N\n",
2185                                 loop_info.start_val, loop_info.end_val, loop_info.step));
2186
2187         if (mode != mode_Is && mode != mode_Iu)
2188                 return 0;
2189
2190         /* TODO necessary? */
2191         if (!are_mode_I(loop_info.start_val, loop_info.step, loop_info.end_val))
2192                 return 0;
2193
2194         DB((dbg, LEVEL_4, "mode integer\n"));
2195
2196         step_tar = get_Const_tarval(loop_info.step);
2197
2198         if (tarval_is_null(step_tar)) {
2199                 /* TODO Might be worth a warning. */
2200                 return 0;
2201         }
2202
2203         DB((dbg, LEVEL_4, "step is not 0\n"));
2204
2205         create_duffs_block();
2206
2207         return loop_info.max_unroll;
2208 }
2209
2210 /* Returns unroll factor,
2211  * given maximum unroll factor and number of loop passes. */
2212 static unsigned get_preferred_factor_constant(ir_tarval *count_tar)
2213 {
2214         ir_tarval *tar_6, *tar_5, *tar_4, *tar_3, *tar_2;
2215         unsigned prefer;
2216         ir_mode *mode = get_irn_mode(loop_info.end_val);
2217
2218         tar_6 = new_tarval_from_long(6, mode);
2219         tar_5 = new_tarval_from_long(5, mode);
2220         tar_4 = new_tarval_from_long(4, mode);
2221         tar_3 = new_tarval_from_long(3, mode);
2222         tar_2 = new_tarval_from_long(2, mode);
2223
2224         /* loop passes % {6, 5, 4, 3, 2} == 0  */
2225         if (tarval_is_null(tarval_mod(count_tar, tar_6)))
2226                 prefer = 6;
2227         else if (tarval_is_null(tarval_mod(count_tar, tar_5)))
2228                 prefer = 5;
2229         else if (tarval_is_null(tarval_mod(count_tar, tar_4)))
2230                 prefer = 4;
2231         else if (tarval_is_null(tarval_mod(count_tar, tar_3)))
2232                 prefer = 3;
2233         else if (tarval_is_null(tarval_mod(count_tar, tar_2)))
2234                 prefer = 2;
2235         else {
2236                 /* gcd(max_unroll, count_tar) */
2237                 int a = loop_info.max_unroll;
2238                 int b = (int)get_tarval_long(count_tar);
2239                 int c;
2240
2241                 DB((dbg, LEVEL_4, "gcd of max_unroll %d and count_tar %d: ", a, b));
2242
2243                 do {
2244                 c = a % b;
2245                 a = b; b = c;
2246                 } while( c != 0);
2247
2248                 DB((dbg, LEVEL_4, "%d\n", a));
2249                 return a;
2250         }
2251
2252         DB((dbg, LEVEL_4, "preferred unroll factor %d\n", prefer));
2253
2254         /*
2255          * If our preference is greater than the allowed unroll factor
2256          * we either might reduce the preferred factor and prevent a duffs device block,
2257          * or create a duffs device block, from which in this case (constants only)
2258          * we know the startloop at compiletime.
2259          * The latter yields the following graphs.
2260          * but for code generation we would want to use graph A.
2261          * The graphs are equivalent. So, we can only reduce the preferred factor.
2262          * A)                   B)
2263          *     PreHead             PreHead
2264          *        |      ,--.         |   ,--.
2265          *         \ Loop1   \        Loop2   \
2266          *          \  |     |       /  |     |
2267          *           Loop2   /      / Loop1   /
2268          *           |   `--'      |      `--'
2269          */
2270
2271         if (prefer <= loop_info.max_unroll)
2272                 return prefer;
2273         else {
2274                 switch(prefer) {
2275                         case 6:
2276                                 if (loop_info.max_unroll >= 3)
2277                                         return 3;
2278                                 else if (loop_info.max_unroll >= 2)
2279                                         return 2;
2280                                 else
2281                                         return 0;
2282
2283                         case 4:
2284                                 if (loop_info.max_unroll >= 2)
2285                                         return 2;
2286                                 else
2287                                         return 0;
2288
2289                         default:
2290                                 return 0;
2291                 }
2292         }
2293 }
2294
2295 /* Check if cur_loop is a simple counting loop.
2296  * Start, step and end are constants.
2297  * TODO The whole constant case should use procedures similar to
2298  * the invariant case, as they are more versatile. */
2299 /* TODO split. */
2300 static unsigned get_unroll_decision_constant(void)
2301 {
2302         ir_node     *cmp, *iteration_path;
2303         unsigned     success, is_latest_val;
2304         ir_tarval   *start_tar, *end_tar, *step_tar, *diff_tar, *count_tar;
2305         ir_tarval   *stepped;
2306         ir_relation  proj_proj, norm_proj;
2307         ir_mode     *mode;
2308
2309         /* RETURN if loop is not 'simple' */
2310         cmp = is_simple_loop();
2311         if (cmp == NULL)
2312                 return 0;
2313
2314         /* One in of the loop condition needs to be loop invariant. => end_val
2315          * The other in is assigned by an add. => add
2316          * The add uses a loop invariant value => step
2317          * and a phi with a loop invariant start_val and the add node as ins.
2318
2319            ^   ^
2320            |   | .-,
2321            |   Phi |
2322                 \  |   |
2323           ^  Add   |
2324            \  | \__|
2325             cond
2326              /\
2327         */
2328
2329         success = get_const_pred(cmp, &loop_info.end_val, &iteration_path);
2330         if (! success)
2331                 return 0;
2332
2333         DB((dbg, LEVEL_4, "End_val %N, other %N\n", loop_info.end_val, iteration_path));
2334
2335         /* We may find the add or the phi first.
2336          * Until now we only have end_val. */
2337         if (is_Add(iteration_path) || is_Sub(iteration_path)) {
2338
2339                 /* We test against the latest value of the iv. */
2340                 is_latest_val = 1;
2341
2342                 loop_info.add = iteration_path;
2343                 DB((dbg, LEVEL_4, "Case 2: Got add %N (maybe not sane)\n", loop_info.add));
2344
2345                 /* Preds of the add should be step and the iteration_phi */
2346                 success = get_const_pred(loop_info.add, &loop_info.step, &loop_info.iteration_phi);
2347                 if (! success)
2348                         return 0;
2349
2350                 DB((dbg, LEVEL_4, "Got step %N\n", loop_info.step));
2351
2352                 if (! is_Phi(loop_info.iteration_phi))
2353                         return 0;
2354
2355                 DB((dbg, LEVEL_4, "Got phi %N\n", loop_info.iteration_phi));
2356
2357                 /* Find start_val.
2358                  * Does necessary sanity check of add, if it is already set.  */
2359                 success = get_start_and_add(loop_info.iteration_phi, constant);
2360                 if (! success)
2361                         return 0;
2362
2363                 DB((dbg, LEVEL_4, "Got start %N\n", loop_info.start_val));
2364
2365         } else if (is_Phi(iteration_path)) {
2366                 ir_node *new_iteration_phi;
2367
2368                 /* We compare with the value the iv had entering this run. */
2369                 is_latest_val = 0;
2370
2371                 loop_info.iteration_phi = iteration_path;
2372                 DB((dbg, LEVEL_4, "Case 1: Got phi %N \n", loop_info.iteration_phi));
2373
2374                 /* Find start_val and add-node.
2375                  * Does necessary sanity check of add, if it is already set.  */
2376                 success = get_start_and_add(loop_info.iteration_phi, constant);
2377                 if (! success)
2378                         return 0;
2379
2380                 DB((dbg, LEVEL_4, "Got start %N\n", loop_info.start_val));
2381                 DB((dbg, LEVEL_4, "Got add or sub %N\n", loop_info.add));
2382
2383                 success = get_const_pred(loop_info.add, &loop_info.step, &new_iteration_phi);
2384                 if (! success)
2385                         return 0;
2386
2387                 DB((dbg, LEVEL_4, "Got step %N\n", loop_info.step));
2388
2389                 if (loop_info.iteration_phi != new_iteration_phi)
2390                         return 0;
2391
2392         } else {
2393                 /* RETURN */
2394                 return 0;
2395         }
2396
2397         mode = get_irn_mode(loop_info.end_val);
2398
2399         DB((dbg, LEVEL_4, "start %N, end %N, step %N\n",
2400                                 loop_info.start_val, loop_info.end_val, loop_info.step));
2401
2402         if (mode != mode_Is && mode != mode_Iu)
2403                 return 0;
2404
2405         /* TODO necessary? */
2406         if (!are_mode_I(loop_info.start_val, loop_info.step, loop_info.end_val))
2407                 return 0;
2408
2409         DB((dbg, LEVEL_4, "mode integer\n"));
2410
2411         end_tar = get_Const_tarval(loop_info.end_val);
2412         start_tar = get_Const_tarval(loop_info.start_val);
2413         step_tar = get_Const_tarval(loop_info.step);
2414
2415         if (tarval_is_null(step_tar))
2416                 /* TODO Might be worth a warning. */
2417                 return 0;
2418
2419         DB((dbg, LEVEL_4, "step is not 0\n"));
2420
2421         if ((!tarval_is_negative(step_tar)) ^ (!is_Sub(loop_info.add)))
2422                 loop_info.decreasing = 1;
2423
2424         diff_tar = tarval_sub(end_tar, start_tar, mode);
2425
2426         /* We need at least count_tar steps to be close to end_val, maybe more.
2427          * No way, that we have gone too many steps.
2428          * This represents the 'latest value'.
2429          * (If condition checks against latest value, is checked later) */
2430         count_tar = tarval_div(diff_tar, step_tar);
2431
2432         /* Iv will not pass end_val (except overflows).
2433          * Nothing done, as it would yield to no advantage. */
2434         if (tarval_is_negative(count_tar)) {
2435                 DB((dbg, LEVEL_4, "Loop is endless or never taken."));
2436                 /* TODO Might be worth a warning. */
2437                 return 0;
2438         }
2439
2440         ++stats.u_simple_counting_loop;
2441
2442         loop_info.latest_value = is_latest_val;
2443
2444         /* TODO split here
2445         if (! is_simple_counting_loop(&count_tar))
2446                 return 0;
2447         */
2448
2449         /* stepped can be negative, if step < 0 */
2450         stepped = tarval_mul(count_tar, step_tar);
2451
2452         /* step as close to end_val as possible, */
2453         /* |stepped| <= |end_tar|, and dist(stepped, end_tar) is smaller than a step. */
2454         if (is_Sub(loop_info.add))
2455                 stepped = tarval_sub(start_tar, stepped, mode_Is);
2456         else
2457                 stepped = tarval_add(start_tar, stepped);
2458
2459         DB((dbg, LEVEL_4, "stepped to %ld\n", get_tarval_long(stepped)));
2460
2461         proj_proj = get_Cmp_relation(cmp);
2462         /* Assure that norm_proj is the stay-in-loop case. */
2463         if (loop_info.exit_cond == 1)
2464                 norm_proj = get_negated_relation(proj_proj);
2465         else
2466                 norm_proj = proj_proj;
2467
2468         DB((dbg, LEVEL_4, "normalized projection %s\n", get_relation_string(norm_proj)));
2469         /* Executed at most once (stay in counting loop if a Eq b) */
2470         if (norm_proj == ir_relation_equal)
2471                 /* TODO Might be worth a warning. */
2472                 return 0;
2473
2474         /* calculates next values and increases count_tar according to it */
2475         success = simulate_next(&count_tar, stepped, step_tar, end_tar, norm_proj);
2476         if (! success)
2477                 return 0;
2478
2479         /* We run loop once more, if we compare to the
2480          * not yet in-/decreased iv. */
2481         if (is_latest_val == 0) {
2482                 DB((dbg, LEVEL_4, "condition uses not latest iv value\n"));
2483                 count_tar = tarval_add(count_tar, get_tarval_one(mode));
2484         }
2485
2486         DB((dbg, LEVEL_4, "loop taken %ld times\n", get_tarval_long(count_tar)));
2487
2488         /* Assure the loop is taken at least 1 time. */
2489         if (tarval_is_null(count_tar)) {
2490                 /* TODO Might be worth a warning. */
2491                 return 0;
2492         }
2493
2494         loop_info.count_tar = count_tar;
2495         return get_preferred_factor_constant(count_tar);
2496 }
2497
2498 /**
2499  * Loop unrolling
2500  */
2501 static void unroll_loop(void)
2502 {
2503
2504         if (! (loop_info.nodes > 0))
2505                 return;
2506
2507         if (loop_info.nodes > opt_params.max_unrolled_loop_size) {
2508                 DB((dbg, LEVEL_2, "Nodes %d > allowed nodes %d\n",
2509                         loop_info.nodes, opt_params.max_unrolled_loop_size));
2510                 ++stats.too_large;
2511                 return;
2512         }
2513
2514         if (loop_info.calls > 0) {
2515                 DB((dbg, LEVEL_2, "Calls %d > allowed calls 0\n",
2516                         loop_info.calls));
2517                 ++stats.calls_limit;
2518                 return;
2519         }
2520
2521         unroll_nr = 0;
2522
2523         /* get_unroll_decision_constant and invariant are completely
2524          * independent for flexibility.
2525          * Some checks may be performed twice. */
2526
2527         /* constant case? */
2528         if (opt_params.allow_const_unrolling)
2529                 unroll_nr = get_unroll_decision_constant();
2530         if (unroll_nr > 1) {
2531                 loop_info.unroll_kind = constant;
2532
2533         } else {
2534                 /* invariant case? */
2535                 if (opt_params.allow_invar_unrolling)
2536                         unroll_nr = get_unroll_decision_invariant();
2537                 if (unroll_nr > 1)
2538                         loop_info.unroll_kind = invariant;
2539         }
2540
2541         DB((dbg, LEVEL_2, " *** Unrolling %d times ***\n", unroll_nr));
2542
2543         if (unroll_nr > 1) {
2544                 loop_entries = NEW_ARR_F(entry_edge, 0);
2545
2546                 /* Get loop outs */
2547                 irg_walk_graph(current_ir_graph, get_loop_entries, NULL, NULL);
2548
2549                 if (loop_info.unroll_kind == constant) {
2550                         if ((int)get_tarval_long(loop_info.count_tar) == unroll_nr)
2551                                 loop_info.needs_backedge = 0;
2552                         else
2553                                 loop_info.needs_backedge = 1;
2554                 } else {
2555                         loop_info.needs_backedge = 1;
2556                 }
2557
2558                 /* Use phase to keep copy of nodes from the condition chain. */
2559                 ir_nodemap_init(&map, current_ir_graph);
2560                 obstack_init(&obst);
2561
2562                 /* Copies the loop */
2563                 copy_loop(loop_entries, unroll_nr - 1);
2564
2565                 /* Line up the floating copies. */
2566                 place_copies(unroll_nr - 1);
2567
2568                 /* Remove phis with 1 in
2569                  * If there were no nested phis, this would not be necessary.
2570                  * Avoiding the creation in the first place
2571                  * leads to complex special cases. */
2572                 irg_walk_graph(current_ir_graph, correct_phis, NULL, NULL);
2573
2574                 if (loop_info.unroll_kind == constant)
2575                         ++stats.constant_unroll;
2576                 else
2577                         ++stats.invariant_unroll;
2578
2579                 clear_irg_state(current_ir_graph, IR_GRAPH_STATE_CONSISTENT_DOMINANCE);
2580
2581                 DEL_ARR_F(loop_entries);
2582                 obstack_free(&obst, NULL);
2583                 ir_nodemap_destroy(&map);
2584         }
2585
2586 }
2587
2588 /* Analyzes the loop, and checks if size is within allowed range.
2589  * Decides if loop will be processed. */
2590 static void init_analyze(ir_graph *irg, ir_loop *loop)
2591 {
2592         cur_loop = loop;
2593
2594         loop_head       = NULL;
2595         loop_head_valid = true;
2596
2597         /* Reset loop info */
2598         memset(&loop_info, 0, sizeof(loop_info_t));
2599
2600         DB((dbg, LEVEL_1, "    >>>> current loop %ld <<<\n",
2601             get_loop_loop_nr(loop)));
2602
2603         /* Collect loop informations: head, node counts. */
2604         irg_walk_graph(irg, get_loop_info, NULL, NULL);
2605
2606         /* RETURN if there is no valid head */
2607         if (!loop_head || !loop_head_valid) {
2608                 DB((dbg, LEVEL_1,   "No valid loop head. Nothing done.\n"));
2609                 return;
2610         } else {
2611                 DB((dbg, LEVEL_1,   "Loophead: %N\n", loop_head));
2612         }
2613
2614         if (loop_info.branches > opt_params.max_branches) {
2615                 DB((dbg, LEVEL_1, "Branches %d > allowed branches %d\n",
2616                         loop_info.branches, opt_params.max_branches));
2617                 ++stats.calls_limit;
2618                 return;
2619         }
2620
2621         switch (loop_op) {
2622                 case loop_op_inversion:
2623                         loop_inversion(irg);
2624                         break;
2625
2626                 case loop_op_unrolling:
2627                         unroll_loop();
2628                         break;
2629
2630                 default:
2631                         panic("Loop optimization not implemented.");
2632         }
2633         DB((dbg, LEVEL_1, "       <<<< end of loop with node %ld >>>>\n",
2634             get_loop_loop_nr(loop)));
2635 }
2636
2637 /* Find innermost loops and add them to loops. */
2638 static void find_innermost_loop(ir_loop *loop)
2639 {
2640         bool   had_sons   = false;
2641         size_t n_elements = get_loop_n_elements(loop);
2642         size_t e;
2643
2644         for (e = 0; e < n_elements; ++e) {
2645                 loop_element element = get_loop_element(loop, e);
2646                 if (*element.kind == k_ir_loop) {
2647                         find_innermost_loop(element.son);
2648                         had_sons = true;
2649                 }
2650         }
2651
2652         if (!had_sons) {
2653                 ARR_APP1(ir_loop*, loops, loop);
2654         }
2655 }
2656
2657 static void set_loop_params(void)
2658 {
2659     opt_params.max_loop_size = 100;
2660     opt_params.depth_adaption = -50;
2661     opt_params.count_phi = true;
2662     opt_params.count_proj = false;
2663     opt_params.allowed_calls = 0;
2664
2665     opt_params.max_cc_size = 5;
2666
2667
2668     opt_params.allow_const_unrolling = true;
2669     opt_params.allow_invar_unrolling = false;
2670
2671     opt_params.invar_unrolling_min_size = 20;
2672     opt_params.max_unrolled_loop_size = 400;
2673     opt_params.max_branches = 9999;
2674 }
2675
2676 /* Assure preconditions are met and go through all loops. */
2677 void loop_optimization(ir_graph *irg)
2678 {
2679         ir_loop *loop;
2680         size_t   i;
2681         size_t   n_elements;
2682
2683         set_loop_params();
2684
2685         /* Reset stats for this procedure */
2686         reset_stats();
2687
2688         /* Preconditions */
2689         set_current_ir_graph(irg);
2690
2691         ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK | IR_RESOURCE_PHI_LIST);
2692         collect_phiprojs(irg);
2693
2694         loop = get_irg_loop(irg);
2695
2696         loops = NEW_ARR_F(ir_loop *, 0);
2697         /* List all inner loops */
2698         n_elements = get_loop_n_elements(loop);
2699         for (i = 0; i < n_elements; ++i) {
2700                 loop_element element = get_loop_element(loop, i);
2701                 if (*element.kind != k_ir_loop)
2702                         continue;
2703                 find_innermost_loop(element.son);
2704         }
2705
2706         /* Set all links to NULL */
2707         irg_walk_graph(irg, reset_link, NULL, NULL);
2708
2709         for (i = 0; i < ARR_LEN(loops); ++i) {
2710                 ir_loop *loop = loops[i];
2711
2712                 ++stats.loops;
2713
2714                 /* Analyze and handle loop */
2715                 init_analyze(irg, loop);
2716
2717                 /* Copied blocks do not have their phi list yet */
2718                 collect_phiprojs(irg);
2719
2720                 /* Set links to NULL
2721                  * TODO Still necessary? */
2722                 irg_walk_graph(irg, reset_link, NULL, NULL);
2723         }
2724
2725         print_stats();
2726
2727         DEL_ARR_F(loops);
2728         ir_free_resources(irg, IR_RESOURCE_IRN_LINK | IR_RESOURCE_PHI_LIST);
2729 }
2730
2731 static ir_graph_state_t perform_loop_unrolling(ir_graph *irg)
2732 {
2733         loop_op = loop_op_unrolling;
2734         loop_optimization(irg);
2735         return 0;
2736 }
2737
2738 static ir_graph_state_t perform_loop_inversion(ir_graph *irg)
2739 {
2740         loop_op = loop_op_inversion;
2741         loop_optimization(irg);
2742         return 0;
2743 }
2744
2745 static ir_graph_state_t perform_loop_peeling(ir_graph *irg)
2746 {
2747         loop_op = loop_op_peeling;
2748         loop_optimization(irg);
2749         return 0;
2750 }
2751
2752 static optdesc_t opt_unroll_loops = {
2753         "unroll-loops",
2754         IR_GRAPH_STATE_CONSISTENT_OUT_EDGES | IR_GRAPH_STATE_CONSISTENT_OUTS | IR_GRAPH_STATE_CONSISTENT_LOOPINFO,
2755         perform_loop_unrolling,
2756 };
2757
2758 static optdesc_t opt_invert_loops = {
2759         "invert-loops",
2760         IR_GRAPH_STATE_CONSISTENT_OUT_EDGES | IR_GRAPH_STATE_CONSISTENT_OUTS | IR_GRAPH_STATE_CONSISTENT_LOOPINFO,
2761         perform_loop_inversion,
2762 };
2763
2764 static optdesc_t opt_peel_loops = {
2765         "peel-loops",
2766         IR_GRAPH_STATE_CONSISTENT_OUT_EDGES | IR_GRAPH_STATE_CONSISTENT_OUTS | IR_GRAPH_STATE_CONSISTENT_LOOPINFO,
2767         perform_loop_peeling,
2768 };
2769
2770 void do_loop_unrolling(ir_graph *irg)
2771 { perform_irg_optimization(irg, &opt_unroll_loops); }
2772
2773 void do_loop_inversion(ir_graph *irg)
2774 { perform_irg_optimization(irg, &opt_invert_loops); }
2775
2776 void do_loop_peeling(ir_graph *irg)
2777 { perform_irg_optimization(irg, &opt_peel_loops); }
2778
2779 ir_graph_pass_t *loop_inversion_pass(const char *name)
2780 {
2781         return def_graph_pass(name ? name : "loop_inversion", do_loop_inversion);
2782 }
2783
2784 ir_graph_pass_t *loop_unroll_pass(const char *name)
2785 {
2786         return def_graph_pass(name ? name : "loop_unroll", do_loop_unrolling);
2787 }
2788
2789 ir_graph_pass_t *loop_peeling_pass(const char *name)
2790 {
2791         return def_graph_pass(name ? name : "loop_peeling", do_loop_peeling);
2792 }
2793
2794 void firm_init_loop_opt(void)
2795 {
2796         FIRM_DBG_REGISTER(dbg, "firm.opt.loop");
2797 }