becopyilp: Inline struct size_red_t into struct ilp_env_t.
[libfirm] / ir / opt / loop.c
1 /*
2  * This file is part of libFirm.
3  * Copyright (C) 2012 University of Karlsruhe.
4  */
5
6 /**
7  * @file
8  * @author   Christian Helmer
9  * @brief    loop inversion and loop unrolling
10  *
11  */
12 #include "config.h"
13
14 #include <stdbool.h>
15
16 #include "iroptimize.h"
17 #include "opt_init.h"
18 #include "irnode.h"
19 #include "debug.h"
20 #include "error.h"
21
22 #include "ircons.h"
23 #include "irgopt.h"
24 #include "irgmod.h"
25 #include "irgwalk.h"
26 #include "irouts.h"
27 #include "iredges.h"
28 #include "irtools.h"
29 #include "array_t.h"
30 #include "beutil.h"
31 #include "irpass.h"
32 #include "irdom.h"
33
34 #include <math.h>
35 #include "irbackedge_t.h"
36 #include "irnodemap.h"
37 #include "irloop_t.h"
38
39 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
40
41 /**
42  * Convenience macro for iterating over every phi node of the given block.
43  * Requires phi list per block.
44  */
45 #define for_each_phi(block, phi) \
46         for ((phi) = get_Block_phis( (block) ); (phi) ; (phi) = get_Phi_next((phi)))
47
48 #define for_each_phi_safe(head, phi, next) \
49         for ((phi) = (head), (next) = (head) ? get_Phi_next((head)) : NULL; \
50                         (phi) ; (phi) = (next), (next) = (next) ? get_Phi_next((next)) : NULL)
51
52 /* Currently processed loop. */
53 static ir_loop *cur_loop;
54
55 /* Flag for kind of unrolling. */
56 typedef enum {
57         constant,
58         invariant
59 } unrolling_kind_flag;
60
61 /* Condition for performing visiting a node during copy_walk. */
62 typedef bool walker_condition(const ir_node *);
63
64 /* Node and position of a predecessor. */
65 typedef struct entry_edge {
66         ir_node *node;
67         int pos;
68         ir_node *pred;
69 } entry_edge;
70
71 /* Node info for unrolling. */
72 typedef struct unrolling_node_info {
73         ir_node **copies;
74 } unrolling_node_info;
75
76 /* Outs of the nodes head. */
77 static entry_edge *cur_head_outs;
78
79 /* Information about the loop head */
80 static ir_node *loop_head       = NULL;
81 static bool     loop_head_valid = true;
82
83 /* List of all inner loops, that are processed. */
84 static ir_loop **loops;
85
86 /* Stats */
87 typedef struct loop_stats_t {
88         unsigned loops;
89         unsigned inverted;
90         unsigned too_large;
91         unsigned too_large_adapted;
92         unsigned cc_limit_reached;
93         unsigned calls_limit;
94
95         unsigned u_simple_counting_loop;
96         unsigned constant_unroll;
97         unsigned invariant_unroll;
98
99         unsigned unhandled;
100 } loop_stats_t;
101
102 static loop_stats_t stats;
103
104 /* Set stats to sero */
105 static void reset_stats(void)
106 {
107         memset(&stats, 0, sizeof(loop_stats_t));
108 }
109
110 /* Print stats */
111 static void print_stats(void)
112 {
113         DB((dbg, LEVEL_2, "---------------------------------------\n"));
114         DB((dbg, LEVEL_2, "loops             :   %d\n",stats.loops));
115         DB((dbg, LEVEL_2, "inverted          :   %d\n",stats.inverted));
116         DB((dbg, LEVEL_2, "too_large         :   %d\n",stats.too_large));
117         DB((dbg, LEVEL_2, "too_large_adapted :   %d\n",stats.too_large_adapted));
118         DB((dbg, LEVEL_2, "cc_limit_reached  :   %d\n",stats.cc_limit_reached));
119         DB((dbg, LEVEL_2, "calls_limit       :   %d\n",stats.calls_limit));
120         DB((dbg, LEVEL_2, "u_simple_counting :   %d\n",stats.u_simple_counting_loop));
121         DB((dbg, LEVEL_2, "constant_unroll   :   %d\n",stats.constant_unroll));
122         DB((dbg, LEVEL_2, "invariant_unroll  :   %d\n",stats.invariant_unroll));
123         DB((dbg, LEVEL_2, "=======================================\n"));
124 }
125
126 /* Commandline parameters */
127 typedef struct loop_opt_params_t {
128 unsigned max_loop_size;     /* Maximum number of nodes  [nodes]*/
129 int      depth_adaption;    /* Loop nest depth adaption [percent] */
130 unsigned allowed_calls;     /* Number of calls allowed [number] */
131 bool     count_phi;         /* Count phi nodes */
132 bool     count_proj;        /* Count projections */
133
134 unsigned max_cc_size;       /* Maximum condition chain size [nodes] */
135 unsigned max_branches;
136
137 unsigned max_unrolled_loop_size;    /* [nodes] */
138 bool     allow_const_unrolling;
139 bool     allow_invar_unrolling;
140 unsigned invar_unrolling_min_size;  /* [nodes] */
141
142 } loop_opt_params_t;
143
144 static loop_opt_params_t opt_params;
145
146 /* Loop analysis informations */
147 typedef struct loop_info_t {
148         unsigned nodes;        /* node count */
149         unsigned ld_st;        /* load and store nodes */
150         unsigned branches;     /* number of conditions */
151         unsigned calls;        /* number of calls */
152         unsigned cf_outs;      /* number of cf edges which leave the loop */
153         entry_edge cf_out;     /* single loop leaving cf edge */
154         int be_src_pos;        /* position of the single own backedge in the head */
155
156         /* for inversion */
157         unsigned cc_size;      /* nodes in the condition chain */
158
159         /* for unrolling */
160         unsigned max_unroll;   /* Number of unrolls satisfying max_loop_size */
161         unsigned exit_cond;    /* 1 if condition==true exits the loop.  */
162         unsigned latest_value:1;    /* 1 if condition is checked against latest counter value */
163         unsigned needs_backedge:1;  /* 0 if loop is completely unrolled */
164         unsigned decreasing:1;      /* Step operation is_Sub, or step is<0 */
165
166         /* IV informations of a simple loop */
167         ir_node *start_val;
168         ir_node *step;
169         ir_node *end_val;
170         ir_node *iteration_phi;
171         ir_node *add;
172
173         ir_tarval *count_tar;               /* Number of loop iterations */
174
175         ir_node *duff_cond;                 /* Duff mod */
176         unrolling_kind_flag unroll_kind;    /* constant or invariant unrolling */
177 } loop_info_t;
178
179 /* Information about the current loop */
180 static loop_info_t loop_info;
181
182 /* Outs of the condition chain (loop inversion). */
183 static ir_node **cc_blocks;
184 /* df/cf edges with def in the condition chain */
185 static entry_edge *cond_chain_entries;
186 /* Array of df loops found in the condition chain. */
187 static entry_edge *head_df_loop;
188 /* Number of blocks in cc */
189 static unsigned inversion_blocks_in_cc;
190
191
192 /* Cf/df edges leaving the loop.
193  * Called entries here, as they are used to enter the loop with walkers. */
194 static entry_edge *loop_entries;
195 /* Number of unrolls to perform */
196 static int unroll_nr;
197 /* Phase is used to keep copies of nodes. */
198 static ir_nodemap     map;
199 static struct obstack obst;
200
201 /* Loop operations.  */
202 typedef enum loop_op_t {
203         loop_op_inversion,
204         loop_op_unrolling,
205         loop_op_peeling
206 } loop_op_t;
207
208 /* Saves which loop operation to do until after basic tests. */
209 static loop_op_t loop_op;
210
211 /* Returns the maximum nodes for the given nest depth */
212 static unsigned get_max_nodes_adapted(unsigned depth)
213 {
214         double perc = 100.0 + (double)opt_params.depth_adaption;
215         double factor = pow(perc / 100.0, depth);
216
217         return (int)((double)opt_params.max_loop_size * factor);
218 }
219
220 /* Reset nodes link. For use with a walker. */
221 static void reset_link(ir_node *node, void *env)
222 {
223         (void)env;
224         set_irn_link(node, NULL);
225 }
226
227 /* Returns 0 if the node or block is not in cur_loop. */
228 static bool is_in_loop(const ir_node *node)
229 {
230         return get_irn_loop(get_block_const(node)) == cur_loop;
231 }
232
233 /* Returns 0 if the given edge is not a backedge
234  * with its pred in the cur_loop. */
235 static bool is_own_backedge(const ir_node *n, int pos)
236 {
237         return is_backedge(n, pos) && is_in_loop(get_irn_n(n, pos));
238 }
239
240 /* Finds loop head and some loop_info as calls or else if necessary. */
241 static void get_loop_info(ir_node *node, void *env)
242 {
243         bool node_in_loop = is_in_loop(node);
244         int i, arity;
245         (void)env;
246
247         /* collect some loop information */
248         if (node_in_loop) {
249                 if (is_Phi(node) && opt_params.count_phi)
250                         ++loop_info.nodes;
251                 else if (is_Proj(node) && opt_params.count_proj)
252                         ++loop_info.nodes;
253                 else if (!is_Confirm(node) && !is_Const(node) && !is_SymConst(node))
254                         ++loop_info.nodes;
255
256                 if (is_Load(node) || is_Store(node))
257                         ++loop_info.ld_st;
258
259                 if (is_Call(node))
260                         ++loop_info.calls;
261         }
262
263         arity = get_irn_arity(node);
264         for (i = 0; i < arity; i++) {
265                 ir_node *pred         = get_irn_n(node, i);
266                 bool     pred_in_loop = is_in_loop(pred);
267
268                 if (is_Block(node) && !node_in_loop && pred_in_loop) {
269                         entry_edge entry;
270                         entry.node = node;
271                         entry.pos = i;
272                         entry.pred = pred;
273                         /* Count cf outs */
274                         ++loop_info.cf_outs;
275                         loop_info.cf_out = entry;
276                 }
277
278                 /* Find the loops head/the blocks with cfpred outside of the loop */
279                 if (is_Block(node)) {
280                         unsigned outs_n = 0;
281
282                         /* Count innerloop branches */
283                         foreach_out_edge_kind(node, edge, EDGE_KIND_BLOCK) {
284                                 ir_node *succ = get_edge_src_irn(edge);
285                                 if (is_Block(succ) && is_in_loop(succ))
286                                         ++outs_n;
287                         }
288                         if (outs_n > 1)
289                                 ++loop_info.branches;
290
291                         if (node_in_loop && !pred_in_loop && loop_head_valid) {
292                                 ir_node *cfgpred = get_Block_cfgpred(node, i);
293
294                                 if (!is_in_loop(cfgpred)) {
295                                         DB((dbg, LEVEL_5, "potential head %+F because inloop and pred %+F not inloop\n",
296                                                                 node, pred));
297                                         /* another head? We do not touch this. */
298                                         if (loop_head && loop_head != node) {
299                                                 loop_head_valid = false;
300                                         } else {
301                                                 loop_head = node;
302                                         }
303                                 }
304                         }
305                 }
306         }
307 }
308
309 /* Finds all edges with users outside of the loop
310  * and definition inside the loop. */
311 static void get_loop_entries(ir_node *node, void *env)
312 {
313         unsigned node_in_loop, pred_in_loop;
314         int i, arity;
315         (void) env;
316
317         arity = get_irn_arity(node);
318         for (i = 0; i < arity; ++i) {
319                 ir_node *pred = get_irn_n(node, i);
320
321                 pred_in_loop = is_in_loop(pred);
322                 node_in_loop = is_in_loop(node);
323
324                 if (pred_in_loop && !node_in_loop) {
325                         entry_edge entry;
326                         entry.node = node;
327                         entry.pos = i;
328                         entry.pred = pred;
329                         ARR_APP1(entry_edge, loop_entries, entry);
330                 }
331         }
332 }
333
334 /* ssa */
335 static ir_node *ssa_second_def;
336 static ir_node *ssa_second_def_block;
337
338 /**
339  * Walks the graph bottom up, searching for definitions and creates phis.
340  */
341 static ir_node *search_def_and_create_phis(ir_node *block, ir_mode *mode, int first)
342 {
343         int i;
344         int n_cfgpreds;
345         ir_graph *irg = get_irn_irg(block);
346         ir_node *phi;
347         ir_node **in;
348
349         DB((dbg, LEVEL_5, "ssa search_def_and_create_phis: block %N\n", block));
350
351         /* Prevents creation of phi that would be bad anyway.
352          * Dead and bad blocks. */
353         if (get_irn_arity(block) < 1 || is_Bad(block)) {
354                 DB((dbg, LEVEL_5, "ssa bad %N\n", block));
355                 return new_r_Bad(irg, mode);
356         }
357
358         if (block == ssa_second_def_block && !first) {
359                 DB((dbg, LEVEL_5, "ssa found second definition: use second def %N\n", ssa_second_def));
360                 return ssa_second_def;
361         }
362
363         /* already processed this block? */
364         if (irn_visited(block)) {
365                 ir_node *value = (ir_node *) get_irn_link(block);
366                 DB((dbg, LEVEL_5, "ssa already visited: use linked %N\n", value));
367                 return value;
368         }
369
370         assert(block != get_irg_start_block(irg));
371
372         /* a Block with only 1 predecessor needs no Phi */
373         n_cfgpreds = get_Block_n_cfgpreds(block);
374         if (n_cfgpreds == 1) {
375                 ir_node *pred_block = get_Block_cfgpred_block(block, 0);
376                 ir_node *value;
377
378                 DB((dbg, LEVEL_5, "ssa 1 pred: walk pred %N\n", pred_block));
379
380                 value = search_def_and_create_phis(pred_block, mode, 0);
381                 set_irn_link(block, value);
382                 mark_irn_visited(block);
383
384                 return value;
385         }
386
387         /* create a new Phi */
388         NEW_ARR_A(ir_node*, in, n_cfgpreds);
389         for (i = 0; i < n_cfgpreds; ++i)
390                 in[i] = new_r_Dummy(irg, mode);
391
392         phi = new_r_Phi(block, n_cfgpreds, in, mode);
393         /* Important: always keep block phi list up to date. */
394         add_Block_phi(block, phi);
395         DB((dbg, LEVEL_5, "ssa phi creation: link new phi %N to block %N\n", phi, block));
396         set_irn_link(block, phi);
397         mark_irn_visited(block);
398
399         /* set Phi predecessors */
400         for (i = 0; i < n_cfgpreds; ++i) {
401                 ir_node *pred_val;
402                 ir_node *pred_block = get_Block_cfgpred_block(block, i);
403                 assert(pred_block != NULL);
404                 pred_val = search_def_and_create_phis(pred_block, mode, 0);
405
406                 assert(pred_val != NULL);
407
408                 DB((dbg, LEVEL_5, "ssa phi pred:phi %N, pred %N\n", phi, pred_val));
409                 set_irn_n(phi, i, pred_val);
410         }
411
412         return phi;
413 }
414
415
416 /**
417  * Given a set of values this function constructs SSA-form for the users of the
418  * first value (the users are determined through the out-edges of the value).
419  * Works without using the dominance tree.
420  */
421 static void construct_ssa(ir_node *orig_block, ir_node *orig_val,
422                 ir_node *second_block, ir_node *second_val)
423 {
424         ir_graph *irg;
425         ir_mode *mode;
426
427         assert(orig_block && orig_val && second_block && second_val &&
428                         "no parameter of construct_ssa may be NULL");
429
430         if (orig_val == second_val)
431                 return;
432
433         irg = get_irn_irg(orig_val);
434
435         ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED);
436         inc_irg_visited(irg);
437
438         mode = get_irn_mode(orig_val);
439         set_irn_link(orig_block, orig_val);
440         mark_irn_visited(orig_block);
441
442         ssa_second_def_block = second_block;
443         ssa_second_def       = second_val;
444
445         /* Only fix the users of the first, i.e. the original node */
446         foreach_out_edge_safe(orig_val, edge) {
447                 ir_node *user = get_edge_src_irn(edge);
448                 int j = get_edge_src_pos(edge);
449                 ir_node *user_block = get_nodes_block(user);
450                 ir_node *newval;
451
452                 /* ignore keeps */
453                 if (is_End(user))
454                         continue;
455
456                 DB((dbg, LEVEL_5, "original user %N\n", user));
457
458                 if (is_Phi(user)) {
459                         ir_node *pred_block = get_Block_cfgpred_block(user_block, j);
460                         newval = search_def_and_create_phis(pred_block, mode, 1);
461                 } else {
462                         newval = search_def_and_create_phis(user_block, mode, 1);
463                 }
464                 if (newval != user && !is_Bad(newval))
465                         set_irn_n(user, j, newval);
466         }
467
468         ir_free_resources(irg, IR_RESOURCE_IRN_VISITED);
469 }
470
471
472 /***** Unrolling Helper Functions *****/
473
474 /* Assign the copy with index nr to node n */
475 static void set_unroll_copy(ir_node *n, int nr, ir_node *cp)
476 {
477         unrolling_node_info *info;
478         assert(nr != 0 && "0 reserved");
479
480         info = ir_nodemap_get(unrolling_node_info, &map, n);
481         if (! info) {
482                 ir_node **const arr = NEW_ARR_DZ(ir_node*, &obst, unroll_nr);
483
484                 info = OALLOCZ(&obst, unrolling_node_info);
485                 info->copies = arr;
486                 ir_nodemap_insert(&map, n, info);
487         }
488         /* Original node */
489         info->copies[0] = n;
490
491         info->copies[nr] = cp;
492 }
493
494 /* Returns a nodes copy if it exists, else NULL. */
495 static ir_node *get_unroll_copy(ir_node *n, int nr)
496 {
497         ir_node             *cp;
498         unrolling_node_info *info = ir_nodemap_get(unrolling_node_info, &map, n);
499         if (! info)
500                 return NULL;
501
502         cp = info->copies[nr];
503         return cp;
504 }
505
506
507 /***** Inversion Helper Functions *****/
508
509 /* Sets copy cp of node n. */
510 static void set_inversion_copy(ir_node *n, ir_node *cp)
511 {
512         ir_nodemap_insert(&map, n, cp);
513 }
514
515 /* Getter of copy of n for inversion */
516 static ir_node *get_inversion_copy(ir_node *n)
517 {
518         ir_node *cp = ir_nodemap_get(ir_node, &map, n);
519         return cp;
520 }
521
522 /* Resets block mark for given node. For use with walker */
523 static void reset_block_mark(ir_node *node, void * env)
524 {
525         (void) env;
526
527         if (is_Block(node))
528                 set_Block_mark(node, 0);
529 }
530
531 /* Returns mark of node, or its block if node is not a block.
532  * Used in this context to determine if node is in the condition chain. */
533 static bool is_nodes_block_marked(const ir_node* node)
534 {
535         return get_Block_mark(get_block_const(node));
536 }
537
538 /* Extends a nodes ins by node new.
539  * NOTE: This is slow if a node n needs to be extended more than once. */
540 static void extend_irn(ir_node *n, ir_node *newnode, bool new_is_backedge)
541 {
542         int i;
543         int arity = get_irn_arity(n);
544         int new_arity = arity + 1;
545         ir_node **ins = XMALLOCN(ir_node*, new_arity);
546         bool     *bes = XMALLOCN(bool, new_arity);
547
548         /* save bes */
549         /* Bes are important!
550          * Another way would be recreating the looptree,
551          * but after that we cannot distinguish already processed loops
552          * from not yet processed ones. */
553         if (is_Block(n)) {
554                 for(i = 0; i < arity; ++i) {
555                         bes[i] = is_backedge(n, i);
556                 }
557                 bes[i] = new_is_backedge;
558         }
559
560         for(i = 0; i < arity; ++i) {
561                 ins[i] = get_irn_n(n, i);
562         }
563         ins[i] = newnode;
564
565         set_irn_in(n, new_arity, ins);
566
567         /* restore bes  */
568         if (is_Block(n)) {
569                 for(i = 0; i < new_arity; ++i) {
570                         if (bes[i])
571                                 set_backedge(n, i);
572                 }
573         }
574         free(ins);
575         free(bes);
576 }
577
578 /* Extends a block by a copy of its pred at pos,
579  * fixing also the phis in the same way. */
580 static void extend_ins_by_copy(ir_node *block, int pos)
581 {
582         ir_node *new_in;
583         ir_node *phi;
584
585         /* Extend block by copy of definition at pos */
586         ir_node *const pred = get_Block_cfgpred(block, pos);
587         new_in = get_inversion_copy(pred);
588         DB((dbg, LEVEL_5, "Extend block %N by %N cp of %N\n", block, new_in, pred));
589         extend_irn(block, new_in, false);
590
591         /* Extend block phis by copy of definition at pos */
592         for_each_phi(block, phi) {
593                 ir_node *pred, *cp;
594
595                 pred = get_irn_n(phi, pos);
596                 cp = get_inversion_copy(pred);
597                 /* If the phis in is not in the condition chain (eg. a constant),
598                  * there is no copy. */
599                 if (cp == NULL)
600                         new_in = pred;
601                 else
602                         new_in = cp;
603
604                 DB((dbg, LEVEL_5, "Extend phi %N by %N cp of %N\n", phi, new_in, pred));
605                 extend_irn(phi, new_in, false);
606         }
607 }
608
609 /* Returns the number of blocks backedges. With or without alien bes. */
610 static int get_backedge_n(ir_node *block, bool with_alien)
611 {
612         int       be_n  = 0;
613         int const arity = get_Block_n_cfgpreds(block);
614         for (int i = 0; i < arity; ++i) {
615                 ir_node *const pred = get_Block_cfgpred(block, i);
616                 if (is_backedge(block, i) && (with_alien || is_in_loop(pred)))
617                         ++be_n;
618         }
619         return be_n;
620 }
621
622 /* Returns a raw copy of the given node.
623  * Attributes are kept/set according to the needs of loop inversion. */
624 static ir_node *copy_node(ir_node *node)
625 {
626         int i, arity;
627         ir_node *cp;
628
629         cp = exact_copy(node);
630         arity = get_irn_arity(node);
631
632         /* Keep backedge info */
633         for (i = 0; i < arity; ++i) {
634                 if (is_backedge(node, i))
635                         set_backedge(cp, i);
636         }
637
638         if (is_Block(cp)) {
639                 set_Block_mark(cp, 0);
640         }
641
642         return cp;
643 }
644
645
646 /**
647  * This walker copies all walked nodes.
648  * If the walk_condition is true for a node, it is copied.
649  * All nodes node_info->copy have to be NULL prior to every walk.
650  * Order of ins is important for later usage.
651  */
652 static void copy_walk(ir_node *node, walker_condition *walk_condition,
653                       ir_loop *set_loop)
654 {
655         int i;
656         int arity;
657         ir_node *cp;
658         ir_node **cpin;
659
660         /**
661          * break condition and cycle resolver, creating temporary node copies
662          */
663         if (irn_visited(node)) {
664                 /* Here we rely on nodestate's copy being initialized with NULL */
665                 DB((dbg, LEVEL_5, "copy_walk: We have already visited %N\n", node));
666                 if (get_inversion_copy(node) == NULL) {
667                         cp = copy_node(node);
668                         set_inversion_copy(node, cp);
669
670                         DB((dbg, LEVEL_5, "The TEMP copy of %N is created %N\n", node, cp));
671                 }
672                 return;
673         }
674
675         /* Walk */
676         mark_irn_visited(node);
677
678         if (!is_Block(node)) {
679                 ir_node *pred = get_nodes_block(node);
680                 if (walk_condition(pred))
681                         DB((dbg, LEVEL_5, "walk block %N\n", pred));
682                 copy_walk(pred, walk_condition, set_loop);
683         }
684
685         arity = get_irn_arity(node);
686
687         NEW_ARR_A(ir_node *, cpin, arity);
688
689         for (i = 0; i < arity; ++i) {
690                 ir_node *pred = get_irn_n(node, i);
691
692                 if (walk_condition(pred)) {
693                         DB((dbg, LEVEL_5, "walk node %N\n", pred));
694                         copy_walk(pred, walk_condition, set_loop);
695                         cpin[i] = get_inversion_copy(pred);
696                         DB((dbg, LEVEL_5, "copy of %N gets new in %N which is copy of %N\n",
697                                                 node, get_inversion_copy(pred), pred));
698                 } else {
699                         cpin[i] = pred;
700                 }
701         }
702
703         /* copy node / finalize temp node */
704         if (get_inversion_copy(node) == NULL) {
705                 /* No temporary copy existent */
706                 cp = copy_node(node);
707                 set_inversion_copy(node, cp);
708                 DB((dbg, LEVEL_5, "The FINAL copy of %N is CREATED %N\n", node, cp));
709         } else {
710                 /* temporary copy is existent but without correct ins */
711                 cp = get_inversion_copy(node);
712                 DB((dbg, LEVEL_5, "The FINAL copy of %N is EXISTENT %N\n", node, cp));
713         }
714
715         if (!is_Block(node)) {
716                 ir_node *cpblock = get_inversion_copy(get_nodes_block(node));
717
718                 set_nodes_block(cp, cpblock );
719                 if (is_Phi(cp))
720                         add_Block_phi(cpblock, cp);
721         }
722
723         /* Keeps phi list of temporary node. */
724         set_irn_in(cp, ARR_LEN(cpin), cpin);
725 }
726
727 /**
728  * This walker copies all walked nodes.
729  * If the walk_condition is true for a node, it is copied.
730  * All nodes node_info->copy have to be NULL prior to every walk.
731  * Order of ins is important for later usage.
732  * Takes copy_index, to phase-link copy at specific index.
733  */
734 static void copy_walk_n(ir_node *node, walker_condition *walk_condition,
735                         int copy_index)
736 {
737         int i;
738         int arity;
739         ir_node *cp;
740         ir_node **cpin;
741
742         /**
743          * break condition and cycle resolver, creating temporary node copies
744          */
745         if (irn_visited(node)) {
746                 /* Here we rely on nodestate's copy being initialized with NULL */
747                 DB((dbg, LEVEL_5, "copy_walk: We have already visited %N\n", node));
748                 if (get_unroll_copy(node, copy_index) == NULL) {
749                         ir_node *u;
750                         u = copy_node(node);
751                         set_unroll_copy(node, copy_index, u);
752                         DB((dbg, LEVEL_5, "The TEMP unknown of %N is created %N\n", node, u));
753                 }
754                 return;
755         }
756
757         /* Walk */
758         mark_irn_visited(node);
759
760         if (!is_Block(node)) {
761                 ir_node *block = get_nodes_block(node);
762                 if (walk_condition(block))
763                         DB((dbg, LEVEL_5, "walk block %N\n", block));
764                 copy_walk_n(block, walk_condition, copy_index);
765         }
766
767         arity = get_irn_arity(node);
768         NEW_ARR_A(ir_node *, cpin, arity);
769
770         for (i = 0; i < arity; ++i) {
771                 ir_node *pred = get_irn_n(node, i);
772
773                 if (walk_condition(pred)) {
774                         DB((dbg, LEVEL_5, "walk node %N\n", pred));
775                         copy_walk_n(pred, walk_condition, copy_index);
776                         cpin[i] = get_unroll_copy(pred, copy_index);
777                 } else {
778                         cpin[i] = pred;
779                 }
780         }
781
782         /* copy node / finalize temp node */
783         cp = get_unroll_copy(node, copy_index);
784         if (cp == NULL || is_Unknown(cp)) {
785                 cp = copy_node(node);
786                 set_unroll_copy(node, copy_index, cp);
787                 DB((dbg, LEVEL_5, "The FINAL copy of %N is CREATED %N\n", node, cp));
788         } else {
789                 /* temporary copy is existent but without correct ins */
790                 cp = get_unroll_copy(node, copy_index);
791                 DB((dbg, LEVEL_5, "The FINAL copy of %N is EXISTENT %N\n", node, cp));
792         }
793
794         if (!is_Block(node)) {
795                 ir_node *cpblock = get_unroll_copy(get_nodes_block(node), copy_index);
796
797                 set_nodes_block(cp, cpblock );
798                 if (is_Phi(cp))
799                         add_Block_phi(cpblock, cp);
800         }
801
802         /* Keeps phi list of temporary node. */
803         set_irn_in(cp, ARR_LEN(cpin), cpin);
804 }
805
806 /* Removes alle Blocks with non marked predecessors from the condition chain. */
807 static void unmark_not_allowed_cc_blocks(void)
808 {
809         size_t blocks = ARR_LEN(cc_blocks);
810         size_t i;
811
812         for(i = 0; i < blocks; ++i) {
813                 ir_node *block = cc_blocks[i];
814
815                 /* Head is an exception. */
816                 if (block == loop_head)
817                         continue;
818
819                 int const arity = get_Block_n_cfgpreds(block);
820                 for (int a = 0; a < arity; ++a) {
821                         if (!is_nodes_block_marked(get_Block_cfgpred(block, a))) {
822                                 set_Block_mark(block, 0);
823                                 --inversion_blocks_in_cc;
824                                 DB((dbg, LEVEL_5, "Removed %N from cc (blocks in cc %d)\n",
825                                                 block, inversion_blocks_in_cc));
826
827                                 break;
828                         }
829                 }
830         }
831 }
832
833 /* Unmarks all cc blocks using cc_blocks except head.
834  * TODO: invert head for unrolling? */
835 static void unmark_cc_blocks(void)
836 {
837         size_t blocks = ARR_LEN(cc_blocks);
838         size_t i;
839
840         for(i = 0; i < blocks; ++i) {
841                 ir_node *block = cc_blocks[i];
842
843                 /* TODO Head is an exception. */
844                 /*if (block != loop_head)*/
845                 set_Block_mark(block, 0);
846         }
847         /*inversion_blocks_in_cc = 1;*/
848         inversion_blocks_in_cc = 0;
849
850         /* invalidate */
851         loop_info.cc_size = 0;
852 }
853
854 /**
855  * Populates head_entries with (node, pred_pos) tuple
856  * whereas the node's pred at pred_pos is in the cc but not the node itself.
857  * Also finds df loops inside the cc.
858  * Head and condition chain blocks have been marked previously.
859  */
860 static void get_head_outs(ir_node *node, void *env)
861 {
862         int i;
863         int arity = get_irn_arity(node);
864         (void) env;
865
866         for (i = 0; i < arity; ++i) {
867                 if (!is_nodes_block_marked(node) && is_nodes_block_marked(get_irn_n(node, i))) {
868                         entry_edge entry;
869                         entry.node = node;
870                         entry.pos = i;
871                         /* Saving also predecessor seems redundant, but becomes
872                          * necessary when changing position of it, before
873                          * dereferencing it.*/
874                         entry.pred = get_irn_n(node, i);
875                         ARR_APP1(entry_edge, cur_head_outs, entry);
876                 }
877         }
878
879         arity = get_irn_arity(loop_head);
880
881         /* Find df loops inside the cc */
882         if (is_Phi(node) && get_nodes_block(node) == loop_head) {
883                 for (i = 0; i < arity; ++i) {
884                         if (is_own_backedge(loop_head, i)) {
885                                 if (is_nodes_block_marked(get_irn_n(node, i))) {
886                                         entry_edge entry;
887                                         entry.node = node;
888                                         entry.pos = i;
889                                         entry.pred = get_irn_n(node, i);
890                                         ARR_APP1(entry_edge, head_df_loop, entry);
891                                         DB((dbg, LEVEL_5, "Found incc assignment node %N @%d is pred %N, graph %N %N\n",
892                                                         node, i, entry.pred, current_ir_graph, get_irg_start_block(current_ir_graph)));
893                                 }
894                         }
895                 }
896         }
897 }
898
899 /**
900  * Find condition chains, and add them to be inverted.
901  * A block belongs to the chain if a condition branches out of the loop.
902  * (Some blocks need to be removed once again.)
903  * Returns 1 if the given block belongs to the condition chain.
904  */
905 static void find_condition_chain(ir_node *block)
906 {
907         bool     mark     = false;
908         bool     has_be   = false;
909         bool     jmp_only = true;
910         unsigned nodes_n  = 0;
911
912         mark_irn_visited(block);
913
914         DB((dbg, LEVEL_5, "condition_chains for block %N\n", block));
915
916         /* Get node count */
917         foreach_out_edge_kind(block, edge, EDGE_KIND_NORMAL) {
918                 ++nodes_n;
919         }
920
921         /* Check if node count would exceed maximum cc size.
922          * TODO
923          * This is not optimal, as we search depth-first and break here,
924          * continuing with another subtree. */
925         if (loop_info.cc_size + nodes_n > opt_params.max_cc_size) {
926                 set_Block_mark(block, 0);
927                 return;
928         }
929
930         /* Check if block only has a jmp instruction. */
931         foreach_out_edge(block, edge) {
932                 ir_node *src = get_edge_src_irn(edge);
933
934                 if (!is_Block(src) && !is_Jmp(src)) {
935                         jmp_only = false;
936                 }
937         }
938
939         /* Check cf outs if one is leaving the loop,
940          * or if this node has a backedge. */
941         foreach_block_succ(block, edge) {
942                 ir_node *src = get_edge_src_irn(edge);
943                 int pos = get_edge_src_pos(edge);
944
945                 if (!is_in_loop(src))
946                         mark = true;
947
948                 /* Inverting blocks with backedge outs leads to a cf edge
949                  * from the inverted head, into the inverted head (skipping the body).
950                  * As the body becomes the new loop head,
951                  * this would introduce another loop in the existing loop.
952                  * This loop inversion cannot cope with this case. */
953                 if (is_backedge(src, pos)) {
954                         has_be = true;
955                         break;
956                 }
957         }
958
959         /* We need all predecessors to already belong to the condition chain.
960          * Example of wrong case:  * == in cc
961          *
962          *     Head*             ,--.
963          *    /|   \            B   |
964          *   / A*  B           /    |
965          *  / /\   /          ?     |
966          *   /   C*      =>      D  |
967          *      /  D           Head |
968          *     /               A  \_|
969          *                      C
970          */
971         /* Collect blocks containing only a Jmp.
972          * Do not collect blocks with backedge outs. */
973         if ((jmp_only || mark) && !has_be) {
974                 set_Block_mark(block, 1);
975                 ++inversion_blocks_in_cc;
976                 loop_info.cc_size += nodes_n;
977                 DB((dbg, LEVEL_5, "block %N is part of condition chain\n", block));
978                 ARR_APP1(ir_node *, cc_blocks, block);
979         } else {
980                 set_Block_mark(block, 0);
981         }
982
983         foreach_block_succ(block, edge) {
984                 ir_node *src = get_edge_src_irn( edge );
985
986                 if (is_in_loop(src) && ! irn_visited(src))
987                         find_condition_chain(src);
988         }
989 }
990
991 /**
992  * Rewires the copied condition chain. Removes backedges
993  * as this condition chain is prior to the loop.
994  * Copy of loop_head must have phi list and old (unfixed) backedge info of the loop head.
995  * (loop_head is already fixed, we cannot rely on it.)
996  */
997 static void fix_copy_inversion(void)
998 {
999         ir_node *new_head;
1000         ir_node **ins;
1001         ir_node **phis;
1002         ir_node *phi, *next;
1003         ir_node *head_cp = get_inversion_copy(loop_head);
1004         ir_graph *irg    = get_irn_irg(head_cp);
1005         int arity        = get_irn_arity(head_cp);
1006         int backedges    = get_backedge_n(head_cp, false);
1007         int new_arity    = arity - backedges;
1008         int pos;
1009         int i;
1010
1011         NEW_ARR_A(ir_node *, ins, new_arity);
1012
1013         pos = 0;
1014         /* Remove block backedges */
1015         for(i = 0; i < arity; ++i) {
1016                 if (!is_backedge(head_cp, i))
1017                         ins[pos++] = get_irn_n(head_cp, i);
1018         }
1019
1020         new_head = new_r_Block(irg, new_arity, ins);
1021
1022         phis = NEW_ARR_F(ir_node *, 0);
1023
1024         for_each_phi_safe(get_Block_phis(head_cp), phi, next) {
1025                 ir_node *new_phi;
1026                 NEW_ARR_A(ir_node *, ins, new_arity);
1027                 pos = 0;
1028                 for(i = 0; i < arity; ++i) {
1029                         if (!is_backedge(head_cp, i))
1030                                 ins[pos++] = get_irn_n(phi, i);
1031                 }
1032                 new_phi = new_rd_Phi(get_irn_dbg_info(phi),
1033                                 new_head, new_arity, ins,
1034                                 get_irn_mode(phi));
1035                 ARR_APP1(ir_node *, phis, new_phi);
1036         }
1037
1038         pos = 0;
1039         for_each_phi_safe(get_Block_phis(head_cp), phi, next) {
1040                 exchange(phi, phis[pos++]);
1041         }
1042
1043         exchange(head_cp, new_head);
1044
1045         DEL_ARR_F(phis);
1046 }
1047
1048
1049 /* Puts the original condition chain at the end of the loop,
1050  * subsequently to the body.
1051  * Relies on block phi list and correct backedges.
1052  */
1053 static void fix_head_inversion(void)
1054 {
1055         ir_node *new_head;
1056         ir_node **ins;
1057         ir_node *phi, *next;
1058         ir_node **phis;
1059         ir_graph *irg = get_irn_irg(loop_head);
1060         int arity     = get_irn_arity(loop_head);
1061         int backedges = get_backedge_n(loop_head, false);
1062         int new_arity = backedges;
1063         int pos;
1064         int i;
1065
1066         NEW_ARR_A(ir_node *, ins, new_arity);
1067
1068         pos = 0;
1069         /* Keep only backedges */
1070         for(i = 0; i < arity; ++i) {
1071                 if (is_own_backedge(loop_head, i))
1072                         ins[pos++] = get_irn_n(loop_head, i);
1073         }
1074
1075         new_head = new_r_Block(irg, new_arity, ins);
1076
1077         phis = NEW_ARR_F(ir_node *, 0);
1078
1079         for_each_phi(loop_head, phi) {
1080                 ir_node *new_phi;
1081                 DB((dbg, LEVEL_5, "Fixing phi %N of loop head\n", phi));
1082
1083                 NEW_ARR_A(ir_node *, ins, new_arity);
1084
1085                 pos = 0;
1086                 for (i = 0; i < arity; ++i) {
1087                         ir_node *pred = get_irn_n(phi, i);
1088
1089                         if (is_own_backedge(loop_head, i)) {
1090                                 /* If assignment is in the condition chain,
1091                                  * we need to create a phi in the new loop head.
1092                                  * This can only happen for df, not cf. See find_condition_chains. */
1093                                 /*if (is_nodes_block_marked(pred)) {
1094                                         ins[pos++] = pred;
1095                                 } else {*/
1096                                 ins[pos++] = pred;
1097
1098                         }
1099                 }
1100
1101                 new_phi = new_rd_Phi(get_irn_dbg_info(phi),
1102                         new_head, new_arity, ins,
1103                         get_irn_mode(phi));
1104
1105                 ARR_APP1(ir_node *, phis, new_phi);
1106
1107                 DB((dbg, LEVEL_5, "fix inverted head should exch %N by %N (pos %d)\n", phi, new_phi, pos ));
1108         }
1109
1110         pos = 0;
1111         for_each_phi_safe(get_Block_phis(loop_head), phi, next) {
1112                 DB((dbg, LEVEL_5, "fix inverted exch phi %N by %N\n", phi, phis[pos]));
1113                 if (phis[pos] != phi)
1114                         exchange(phi, phis[pos++]);
1115         }
1116
1117         DEL_ARR_F(phis);
1118
1119         DB((dbg, LEVEL_5, "fix inverted head exch head block %N by %N\n", loop_head, new_head));
1120         exchange(loop_head, new_head);
1121 }
1122
1123 /* Does the loop inversion.  */
1124 static void inversion_walk(ir_graph *irg, entry_edge *head_entries)
1125 {
1126         size_t i;
1127
1128         /*
1129          * The order of rewiring bottom-up is crucial.
1130          * Any change of the order leads to lost information that would be needed later.
1131          */
1132
1133         ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED);
1134
1135         /* 1. clone condition chain */
1136         inc_irg_visited(irg);
1137
1138         for (i = 0; i < ARR_LEN(head_entries); ++i) {
1139                 entry_edge entry = head_entries[i];
1140                 ir_node *pred = get_irn_n(entry.node, entry.pos);
1141
1142                 DB((dbg, LEVEL_5, "\nInit walk block %N\n", pred));
1143
1144                 copy_walk(pred, is_nodes_block_marked, cur_loop);
1145         }
1146
1147         ir_free_resources(irg, IR_RESOURCE_IRN_VISITED);
1148
1149         /* 2. Extends the head control flow successors ins
1150          *    with the definitions of the copied head node. */
1151         for (i = 0; i < ARR_LEN(head_entries); ++i) {
1152                 entry_edge head_out = head_entries[i];
1153
1154                 if (is_Block(head_out.node))
1155                         extend_ins_by_copy(head_out.node, head_out.pos);
1156         }
1157
1158         /* 3. construct_ssa for users of definitions in the condition chain,
1159          *    as there is now a second definition. */
1160         for (i = 0; i < ARR_LEN(head_entries); ++i) {
1161                 entry_edge head_out = head_entries[i];
1162
1163                 /* Ignore keepalives */
1164                 if (is_End(head_out.node))
1165                         continue;
1166
1167                 /* Construct ssa for assignments in the condition chain. */
1168                 if (!is_Block(head_out.node)) {
1169                         ir_node *pred, *cppred, *block, *cpblock;
1170
1171                         pred = head_out.pred;
1172                         cppred = get_inversion_copy(pred);
1173                         block = get_nodes_block(pred);
1174                         cpblock = get_nodes_block(cppred);
1175                         construct_ssa(block, pred, cpblock, cppred);
1176                 }
1177         }
1178
1179         /*
1180          * If there is an assignment in the condition chain
1181          * with a user also in the condition chain,
1182          * the dominance frontier is in the new loop head.
1183          * The dataflow loop is completely in the condition chain.
1184          * Goal:
1185          *  To be wired: >|
1186          *
1187          *  | ,--.   |
1188          * Phi_cp |  | copied condition chain
1189          * >| |   |  |
1190          * >| ?__/   |
1191          * >| ,-.
1192          *  Phi* |   | new loop head with newly created phi.
1193          *   |   |
1194          *  Phi  |   | original, inverted condition chain
1195          *   |   |   |
1196          *   ?__/    |
1197          *
1198          */
1199         for (i = 0; i < ARR_LEN(head_df_loop); ++i) {
1200                 entry_edge head_out = head_df_loop[i];
1201
1202                 /* Construct ssa for assignments in the condition chain. */
1203                 ir_node *pred, *cppred, *block, *cpblock;
1204
1205                 pred = head_out.pred;
1206                 cppred = get_inversion_copy(pred);
1207                 assert(cppred && pred);
1208                 block = get_nodes_block(pred);
1209                 cpblock = get_nodes_block(cppred);
1210                 construct_ssa(block, pred, cpblock, cppred);
1211         }
1212
1213         /* 4. Remove the ins which are no backedges from the original condition chain
1214          *    as the cc is now subsequent to the body. */
1215         fix_head_inversion();
1216
1217         /* 5. Remove the backedges of the copied condition chain,
1218          *    because it is going to be the new 'head' in advance to the loop. */
1219         fix_copy_inversion();
1220
1221 }
1222
1223 /* Performs loop inversion of cur_loop if possible and reasonable. */
1224 static void loop_inversion(ir_graph *irg)
1225 {
1226         int      loop_depth;
1227         unsigned max_loop_nodes = opt_params.max_loop_size;
1228         unsigned max_loop_nodes_adapted;
1229         int      depth_adaption = opt_params.depth_adaption;
1230
1231         bool do_inversion = true;
1232
1233         /* Depth of 0 is the procedure and 1 a topmost loop. */
1234         loop_depth = get_loop_depth(cur_loop) - 1;
1235
1236         /* Calculating in per mil. */
1237         max_loop_nodes_adapted = get_max_nodes_adapted(loop_depth);
1238
1239         DB((dbg, LEVEL_1, "max_nodes: %d\nmax_nodes_adapted %d at depth of %d (adaption %d)\n",
1240                         max_loop_nodes, max_loop_nodes_adapted, loop_depth, depth_adaption));
1241
1242         if (loop_info.nodes == 0)
1243                 return;
1244
1245         if (loop_info.nodes > max_loop_nodes) {
1246                 /* Only for stats */
1247                 DB((dbg, LEVEL_1, "Nodes %d > allowed nodes %d\n",
1248                         loop_info.nodes, loop_depth, max_loop_nodes));
1249                 ++stats.too_large;
1250                 /* no RETURN */
1251                 /* Adaption might change it */
1252         }
1253
1254         /* Limit processing to loops smaller than given parameter. */
1255         if (loop_info.nodes > max_loop_nodes_adapted) {
1256                 DB((dbg, LEVEL_1, "Nodes %d > allowed nodes (depth %d adapted) %d\n",
1257                         loop_info.nodes, loop_depth, max_loop_nodes_adapted));
1258                 ++stats.too_large_adapted;
1259                 return;
1260         }
1261
1262         if (loop_info.calls > opt_params.allowed_calls) {
1263                 DB((dbg, LEVEL_1, "Calls %d > allowed calls %d\n",
1264                         loop_info.calls, opt_params.allowed_calls));
1265                 ++stats.calls_limit;
1266                 return;
1267         }
1268
1269         /*inversion_head_node_limit = INT_MAX;*/
1270         ir_reserve_resources(irg, IR_RESOURCE_BLOCK_MARK);
1271
1272         /* Reset block marks.
1273          * We use block marks to flag blocks of the original condition chain. */
1274         irg_walk_graph(irg, reset_block_mark, NULL, NULL);
1275
1276         /*loop_info.blocks = get_loop_n_blocks(cur_loop);*/
1277         cond_chain_entries = NEW_ARR_F(entry_edge, 0);
1278         head_df_loop = NEW_ARR_F(entry_edge, 0);
1279
1280         /*head_inversion_node_count = 0;*/
1281         inversion_blocks_in_cc = 0;
1282
1283         /* Use phase to keep copy of nodes from the condition chain. */
1284         ir_nodemap_init(&map, irg);
1285         obstack_init(&obst);
1286
1287         /* Search for condition chains and temporarily save the blocks in an array. */
1288         cc_blocks = NEW_ARR_F(ir_node *, 0);
1289         inc_irg_visited(irg);
1290         find_condition_chain(loop_head);
1291
1292         unmark_not_allowed_cc_blocks();
1293         DEL_ARR_F(cc_blocks);
1294
1295         /* Condition chain too large.
1296          * Loop should better be small enough to fit into the cache. */
1297         /* TODO Of course, we should take a small enough cc in the first place,
1298          * which is not that simple. (bin packing)  */
1299         if (loop_info.cc_size > opt_params.max_cc_size) {
1300                 ++stats.cc_limit_reached;
1301
1302                 do_inversion = false;
1303
1304                 /* Unmark cc blocks except the head.
1305                  * Invert head only for possible unrolling. */
1306                 unmark_cc_blocks();
1307         }
1308
1309         /* We also catch endless loops here,
1310          * because they do not have a condition chain. */
1311         if (inversion_blocks_in_cc < 1) {
1312                 do_inversion = false;
1313                 DB((dbg, LEVEL_3,
1314                         "Loop contains %d (less than 1) invertible blocks => No Inversion done.\n",
1315                         inversion_blocks_in_cc));
1316         }
1317
1318         if (do_inversion) {
1319                 cur_head_outs = NEW_ARR_F(entry_edge, 0);
1320
1321                 /* Get all edges pointing into the condition chain. */
1322                 irg_walk_graph(irg, get_head_outs, NULL, NULL);
1323
1324                 /* Do the inversion */
1325                 inversion_walk(irg, cur_head_outs);
1326
1327                 DEL_ARR_F(cur_head_outs);
1328
1329                 /* Duplicated blocks changed doms */
1330                 clear_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_DOMINANCE
1331                                    | IR_GRAPH_PROPERTY_CONSISTENT_LOOPINFO);
1332
1333                 ++stats.inverted;
1334         }
1335
1336         /* free */
1337         obstack_free(&obst, NULL);
1338         ir_nodemap_destroy(&map);
1339         DEL_ARR_F(cond_chain_entries);
1340         DEL_ARR_F(head_df_loop);
1341
1342         ir_free_resources(irg, IR_RESOURCE_BLOCK_MARK);
1343 }
1344
1345 /* Fix the original loop_heads ins for invariant unrolling case. */
1346 static void unrolling_fix_loop_head_inv(void)
1347 {
1348         ir_node *ins[2];
1349         ir_node *phi;
1350         ir_node *proj = new_Proj(loop_info.duff_cond, mode_X, 0);
1351         ir_node *head_pred = get_irn_n(loop_head, loop_info.be_src_pos);
1352         ir_node *loop_condition = get_unroll_copy(head_pred, unroll_nr - 1);
1353
1354         /* Original loop_heads ins are:
1355          * duff block and the own backedge */
1356
1357         ins[0] = loop_condition;
1358         ins[1] = proj;
1359         set_irn_in(loop_head, 2, ins);
1360         DB((dbg, LEVEL_4, "Rewire ins of block loophead %N to pred %N and duffs entry %N \n" , loop_head, ins[0], ins[1]));
1361
1362         for_each_phi(loop_head, phi) {
1363                 ir_node *pred = get_irn_n(phi, loop_info.be_src_pos);
1364                 /* TODO we think it is a phi, but for Mergesort it is not the case.*/
1365
1366                 ir_node *last_pred = get_unroll_copy(pred, unroll_nr - 1);
1367
1368                 ins[0] = last_pred;
1369                 ins[1] = (ir_node*)get_irn_link(phi);
1370                 set_irn_in(phi, 2, ins);
1371                 DB((dbg, LEVEL_4, "Rewire ins of loophead phi %N to pred %N and duffs entry %N \n" , phi, ins[0], ins[1]));
1372         }
1373 }
1374
1375 /* Removes previously created phis with only 1 in. */
1376 static void correct_phis(ir_node *node, void *env)
1377 {
1378         (void)env;
1379
1380         if (is_Phi(node) && get_irn_arity(node) == 1) {
1381                 ir_node *exch;
1382                 ir_node *in[1];
1383
1384                 in[0] = get_irn_n(node, 0);
1385
1386                 exch = new_rd_Phi(get_irn_dbg_info(node),
1387                     get_nodes_block(node), 1, in,
1388                         get_irn_mode(node));
1389
1390                 exchange(node, exch);
1391         }
1392 }
1393
1394 /* Unrolling: Rewire floating copies. */
1395 static void place_copies(int copies)
1396 {
1397         ir_node *loophead = loop_head;
1398         size_t i;
1399         int c;
1400         int be_src_pos = loop_info.be_src_pos;
1401
1402         /* Serialize loops by fixing their head ins.
1403          * Processed are the copies.
1404          * The original loop is done after that, to keep backedge infos. */
1405         for (c = 0; c < copies; ++c) {
1406                 ir_node *upper = get_unroll_copy(loophead, c);
1407                 ir_node *lower = get_unroll_copy(loophead, c + 1);
1408                 ir_node *phi;
1409                 ir_node *topmost_be_block = get_nodes_block(get_irn_n(loophead, be_src_pos));
1410
1411                 /* Important: get the preds first and then their copy. */
1412                 ir_node *upper_be_block = get_unroll_copy(topmost_be_block, c);
1413                 ir_node *new_jmp = new_r_Jmp(upper_be_block);
1414                 DB((dbg, LEVEL_5, " place_copies upper %N lower %N\n", upper, lower));
1415
1416                 DB((dbg, LEVEL_5, "topmost be block %N \n", topmost_be_block));
1417
1418                 if (loop_info.unroll_kind == constant) {
1419                         ir_node *ins[1];
1420                         ins[0] = new_jmp;
1421                         set_irn_in(lower, 1, ins);
1422
1423                         for_each_phi(loophead, phi) {
1424                                 ir_node *topmost_def = get_irn_n(phi, be_src_pos);
1425                                 ir_node *upper_def = get_unroll_copy(topmost_def, c);
1426                                 ir_node *lower_phi = get_unroll_copy(phi, c + 1);
1427
1428                                 /* It is possible, that the value used
1429                                  * in the OWN backedge path is NOT defined in this loop. */
1430                                 if (is_in_loop(topmost_def))
1431                                         ins[0] = upper_def;
1432                                 else
1433                                         ins[0] = topmost_def;
1434
1435                                 set_irn_in(lower_phi, 1, ins);
1436                                 /* Need to replace phis with 1 in later. */
1437                         }
1438                 } else {
1439                         /* Invariant case */
1440                         /* Every node has 2 ins. One from the duff blocks
1441                          * and one from the previously unrolled loop. */
1442                         ir_node *ins[2];
1443                         /* Calculate corresponding projection of mod result for this copy c */
1444                         ir_node *proj = new_Proj(loop_info.duff_cond, mode_X, unroll_nr - c - 1);
1445                         DB((dbg, LEVEL_4, "New duff proj %N\n" , proj));
1446
1447                         ins[0] = new_jmp;
1448                         ins[1] = proj;
1449                         set_irn_in(lower, 2, ins);
1450                         DB((dbg, LEVEL_4, "Rewire ins of Block %N to pred %N and duffs entry %N \n" , lower, ins[0], ins[1]));
1451
1452                         for_each_phi(loophead, phi) {
1453                                 ir_node *topmost_phi_pred = get_irn_n(phi, be_src_pos);
1454                                 ir_node *upper_phi_pred;
1455                                 ir_node *lower_phi;
1456                                 ir_node *duff_phi;
1457
1458                                 lower_phi = get_unroll_copy(phi, c + 1);
1459                                 duff_phi = (ir_node*)get_irn_link(phi);
1460                                 DB((dbg, LEVEL_4, "DD Link of %N is %N\n" , phi, duff_phi));
1461
1462                                 /*  */
1463                                 if (is_in_loop(topmost_phi_pred)) {
1464                                         upper_phi_pred = get_unroll_copy(topmost_phi_pred, c);
1465                                 } else {
1466                                         upper_phi_pred = topmost_phi_pred;
1467                                 }
1468
1469                                 ins[0] = upper_phi_pred;
1470                                 ins[1] = duff_phi;
1471                                 set_irn_in(lower_phi, 2, ins);
1472                                 DB((dbg, LEVEL_4, "Rewire ins of %N to pred %N and duffs entry %N \n" , lower_phi, ins[0], ins[1]));
1473                         }
1474                 }
1475         }
1476
1477         /* Reconnect last copy. */
1478         for (i = 0; i < ARR_LEN(loop_entries); ++i) {
1479                 entry_edge edge = loop_entries[i];
1480                 /* Last copy is at the bottom */
1481                 ir_node *new_pred = get_unroll_copy(edge.pred, copies);
1482                 set_irn_n(edge.node, edge.pos, new_pred);
1483         }
1484
1485         /* Fix original loops head.
1486          * Done in the end, as ins and be info were needed before. */
1487         if (loop_info.unroll_kind == constant) {
1488                 ir_node *phi;
1489                 ir_node *head_pred = get_irn_n(loop_head, be_src_pos);
1490                 ir_node *loop_condition = get_unroll_copy(head_pred, unroll_nr - 1);
1491
1492                 set_irn_n(loop_head, loop_info.be_src_pos, loop_condition);
1493
1494                 for_each_phi(loop_head, phi) {
1495                         ir_node *pred = get_irn_n(phi, be_src_pos);
1496                         ir_node *last_pred;
1497
1498                         /* It is possible, that the value used
1499                          * in the OWN backedge path is NOT assigned in this loop. */
1500                         if (is_in_loop(pred))
1501                                 last_pred = get_unroll_copy(pred, copies);
1502                         else
1503                                 last_pred = pred;
1504                         set_irn_n(phi, be_src_pos, last_pred);
1505                 }
1506
1507         } else {
1508                 unrolling_fix_loop_head_inv();
1509         }
1510 }
1511
1512 /* Copies the cur_loop several times. */
1513 static void copy_loop(entry_edge *cur_loop_outs, int copies)
1514 {
1515         int c;
1516
1517         ir_reserve_resources(current_ir_graph, IR_RESOURCE_IRN_VISITED);
1518
1519         for (c = 0; c < copies; ++c) {
1520                 size_t i;
1521
1522                 inc_irg_visited(current_ir_graph);
1523
1524                 DB((dbg, LEVEL_5, "         ### Copy_loop  copy nr: %d ###\n", c));
1525                 for (i = 0; i < ARR_LEN(cur_loop_outs); ++i) {
1526                         entry_edge entry = cur_loop_outs[i];
1527                         ir_node *pred = get_irn_n(entry.node, entry.pos);
1528
1529                         copy_walk_n(pred, is_in_loop, c + 1);
1530                 }
1531         }
1532
1533         ir_free_resources(current_ir_graph, IR_RESOURCE_IRN_VISITED);
1534 }
1535
1536
1537 /* Creates a new phi from the given phi node omitting own bes,
1538  * using be_block as supplier of backedge informations. */
1539 static ir_node *clone_phis_sans_bes(ir_node *phi, ir_node *be_block, ir_node *dest_block)
1540 {
1541         ir_node **ins;
1542         int i, c = 0;
1543         ir_node *newphi;
1544
1545         int const arity = get_Phi_n_preds(phi);
1546         assert(arity == get_Block_n_cfgpreds(be_block));
1547
1548         ins = NEW_ARR_F(ir_node *, arity);
1549         for (i = 0; i < arity; ++i) {
1550                 if (! is_own_backedge(be_block, i)) {
1551                         ins[c] = get_irn_n(phi, i);
1552                         ++c;
1553                 }
1554         }
1555
1556         newphi = new_r_Phi(dest_block, c, ins, get_irn_mode(phi));
1557
1558         set_irn_link(phi, newphi);
1559         DB((dbg, LEVEL_4, "Linking for duffs device %N to %N\n", phi, newphi));
1560
1561         return newphi;
1562 }
1563
1564 /* Creates a new block from the given block node omitting own bes,
1565  * using be_block as supplier of backedge informations. */
1566 static ir_node *clone_block_sans_bes(ir_node *node, ir_node *be_block)
1567 {
1568         int i, c = 0;
1569         ir_node **ins;
1570
1571         int const arity = get_Block_n_cfgpreds(node);
1572         assert(arity == get_irn_arity(be_block));
1573
1574         NEW_ARR_A(ir_node *, ins, arity);
1575         for (i = 0; i < arity; ++i) {
1576                 if (! is_own_backedge(be_block, i)) {
1577                         ins[c] = get_irn_n(node, i);
1578                         ++c;
1579                 }
1580         }
1581
1582         return new_Block(c, ins);
1583 }
1584
1585 /* Creates a structure to calculate absolute value of node op.
1586  * Returns mux node with absolute value. */
1587 static ir_node *new_Abs(ir_node *op, ir_mode *mode)
1588 {
1589   ir_graph *irg      = get_irn_irg(op);
1590   ir_node  *block    = get_nodes_block(op);
1591   ir_node  *zero     = new_r_Const(irg, get_mode_null(mode));
1592   ir_node  *cmp      = new_r_Cmp(block, op, zero, ir_relation_less);
1593   ir_node  *minus_op = new_r_Minus(block, op, mode);
1594   ir_node  *mux      = new_r_Mux(block, cmp, op, minus_op, mode);
1595
1596   return mux;
1597 }
1598
1599
1600 /* Creates blocks for duffs device, using previously obtained
1601  * informations about the iv.
1602  * TODO split */
1603 static void create_duffs_block(void)
1604 {
1605         ir_mode *mode;
1606
1607         ir_node *block1, *count_block, *duff_block;
1608         ir_node *ems, *ems_mod, *ems_div, *ems_mod_proj, *cmp_null,
1609                 *ems_mode_cond, *x_true, *x_false, *const_null;
1610         ir_node *true_val, *false_val;
1611         ir_node *ins[2];
1612
1613         ir_node *duff_mod, *proj, *cond;
1614
1615         ir_node *count, *correction, *unroll_c;
1616         ir_node *cmp_bad_count, *good_count, *bad_count, *count_phi, *bad_count_neg;
1617         ir_node *phi;
1618
1619         mode = get_irn_mode(loop_info.end_val);
1620         const_null = new_Const(get_mode_null(mode));
1621
1622         /* TODO naming
1623          * 1. Calculate first approach to count.
1624          *    Condition: (end - start) % step == 0 */
1625         block1 = clone_block_sans_bes(loop_head, loop_head);
1626         DB((dbg, LEVEL_4, "Duff block 1 %N\n", block1));
1627
1628         /* Create loop entry phis in first duff block
1629          * as it becomes the loops preheader */
1630         for_each_phi(loop_head, phi) {
1631                 /* Returns phis pred if phi would have arity 1*/
1632                 ir_node *new_phi = clone_phis_sans_bes(phi, loop_head, block1);
1633
1634                 DB((dbg, LEVEL_4, "HEAD %N phi %N\n", loop_head, phi));
1635                 DB((dbg, LEVEL_4, "BLOCK1 %N phi %N\n", block1, new_phi));
1636         }
1637
1638         ems = new_r_Sub(block1, loop_info.end_val, loop_info.start_val,
1639                 get_irn_mode(loop_info.end_val));
1640                 DB((dbg, LEVEL_4, "BLOCK1 sub %N\n", ems));
1641
1642
1643         ems = new_Sub(loop_info.end_val, loop_info.start_val,
1644                 get_irn_mode(loop_info.end_val));
1645
1646         DB((dbg, LEVEL_4, "mod ins %N %N\n", ems, loop_info.step));
1647         ems_mod = new_r_Mod(block1,
1648                 new_NoMem(),
1649                 ems,
1650                 loop_info.step,
1651                 mode,
1652                 op_pin_state_pinned);
1653         ems_div = new_r_Div(block1,
1654                 new_NoMem(),
1655                 ems,
1656                 loop_info.step,
1657                 mode,
1658                 op_pin_state_pinned);
1659
1660         DB((dbg, LEVEL_4, "New module node %N\n", ems_mod));
1661
1662         ems_mod_proj = new_r_Proj(ems_mod, mode_Iu, pn_Mod_res);
1663         cmp_null = new_r_Cmp(block1, ems_mod_proj, const_null, ir_relation_less);
1664         ems_mode_cond = new_r_Cond(block1, cmp_null);
1665
1666         /* ems % step == 0 */
1667         x_true = new_r_Proj(ems_mode_cond, mode_X, pn_Cond_true);
1668         /* ems % step != 0 */
1669         x_false = new_r_Proj(ems_mode_cond, mode_X, pn_Cond_false);
1670
1671         /* 2. Second block.
1672          * Assures, duffs device receives a valid count.
1673          * Condition:
1674          *     decreasing: count < 0
1675          *     increasing: count > 0
1676          */
1677         ins[0] = x_true;
1678         ins[1] = x_false;
1679
1680         count_block = new_Block(2, ins);
1681         DB((dbg, LEVEL_4, "Duff block 2 %N\n", count_block));
1682
1683
1684         /* Increase loop-taken-count depending on the loop condition
1685          * uses the latest iv to compare to. */
1686         if (loop_info.latest_value == 1) {
1687                 /* ems % step == 0 :  +0 */
1688                 true_val = new_Const(get_mode_null(mode));
1689                 /* ems % step != 0 :  +1 */
1690                 false_val = new_Const(get_mode_one(mode));
1691         } else {
1692                 ir_tarval *tv_two = new_tarval_from_long(2, mode);
1693                 /* ems % step == 0 :  +1 */
1694                 true_val = new_Const(get_mode_one(mode));
1695                 /* ems % step != 0 :  +2 */
1696                 false_val = new_Const(tv_two);
1697         }
1698
1699         ins[0] = true_val;
1700         ins[1] = false_val;
1701
1702         correction = new_r_Phi(count_block, 2, ins, mode);
1703
1704         count = new_r_Proj(ems_div, mode, pn_Div_res);
1705
1706         /* (end - start) / step  +  correction */
1707         count = new_Add(count, correction, mode);
1708
1709         /* We preconditioned the loop to be tail-controlled.
1710          * So, if count is something 'wrong' like 0,
1711          * negative/positive (depending on step direction),
1712          * we may take the loop once (tail-contr.) and leave it
1713          * to the existing condition, to break; */
1714
1715         /* Depending on step direction, we have to check for > or < 0 */
1716         if (loop_info.decreasing == 1) {
1717                 cmp_bad_count = new_r_Cmp(count_block, count, const_null,
1718                                           ir_relation_less);
1719         } else {
1720                 cmp_bad_count = new_r_Cmp(count_block, count, const_null,
1721                                           ir_relation_greater);
1722         }
1723
1724         bad_count_neg = new_r_Cond(count_block, cmp_bad_count);
1725         good_count = new_Proj(bad_count_neg, mode_X, pn_Cond_true);
1726         bad_count = new_Proj(ems_mode_cond, mode_X, pn_Cond_false);
1727
1728         /* 3. Duff Block
1729          *    Contains module to decide which loop to start from. */
1730
1731         ins[0] = good_count;
1732         ins[1] = bad_count;
1733         duff_block = new_Block(2, ins);
1734         DB((dbg, LEVEL_4, "Duff block 3 %N\n", duff_block));
1735
1736         /* Get absolute value */
1737         ins[0] = new_Abs(count, mode);
1738         /* Manually feed the aforementioned count = 1 (bad case)*/
1739         ins[1] = new_Const(get_mode_one(mode));
1740         count_phi = new_r_Phi(duff_block, 2, ins, mode);
1741
1742         unroll_c = new_Const(new_tarval_from_long((long)unroll_nr, mode));
1743
1744         /* count % unroll_nr */
1745         duff_mod = new_r_Mod(duff_block,
1746                 new_NoMem(),
1747                 count_phi,
1748                 unroll_c,
1749                 mode,
1750                 op_pin_state_pinned);
1751
1752
1753         proj = new_Proj(duff_mod, mode, pn_Mod_res);
1754         /* condition does NOT create itself in the block of the proj! */
1755         cond = new_r_Cond(duff_block, proj);
1756
1757         loop_info.duff_cond = cond;
1758 }
1759
1760 /* Returns 1 if given node is not in loop,
1761  * or if it is a phi of the loop head with only loop invariant defs.
1762  */
1763 static unsigned is_loop_invariant_def(ir_node *node)
1764 {
1765         int i;
1766
1767         if (! is_in_loop(node)) {
1768                 DB((dbg, LEVEL_4, "Not in loop %N\n", node));
1769                 /* || is_Const(node) || is_SymConst(node)) {*/
1770                 return 1;
1771         }
1772
1773         /* If this is a phi of the loophead shared by more than 1 loop,
1774          * we need to check if all defs are not in the loop.  */
1775         if (is_Phi(node)) {
1776                 ir_node *block;
1777                 block = get_nodes_block(node);
1778
1779                 /* To prevent unexpected situations. */
1780                 if (block != loop_head) {
1781                         return 0;
1782                 }
1783
1784                 for (i = 0; i < get_irn_arity(node); ++i) {
1785                         /* Check if all bes are just loopbacks. */
1786                         if (is_own_backedge(block, i) && get_irn_n(node, i) != node)
1787                                 return 0;
1788                 }
1789                 DB((dbg, LEVEL_4, "invar %N\n", node));
1790                 return 1;
1791         }
1792         DB((dbg, LEVEL_4, "Not invar %N\n", node));
1793
1794         return 0;
1795 }
1796
1797 /* Returns 1 if one pred of node is invariant and the other is not.
1798  * invar_pred and other are set analogously. */
1799 static unsigned get_invariant_pred(ir_node *node, ir_node **invar_pred, ir_node **other)
1800 {
1801         ir_node *pred0 = get_irn_n(node, 0);
1802         ir_node *pred1 = get_irn_n(node, 1);
1803
1804         *invar_pred = NULL;
1805         *other = NULL;
1806
1807         if (is_loop_invariant_def(pred0)) {
1808                 DB((dbg, LEVEL_4, "pred0 invar %N\n", pred0));
1809                 *invar_pred = pred0;
1810                 *other = pred1;
1811         }
1812
1813         if (is_loop_invariant_def(pred1)) {
1814                 DB((dbg, LEVEL_4, "pred1 invar %N\n", pred1));
1815
1816                 if (*invar_pred != NULL) {
1817                         /* RETURN. We do not want both preds to be invariant. */
1818                         return 0;
1819                 }
1820
1821                 *other = pred0;
1822                 *invar_pred = pred1;
1823                 return 1;
1824         } else {
1825                 DB((dbg, LEVEL_4, "pred1 not invar %N\n", pred1));
1826
1827                 if (*invar_pred != NULL)
1828                         return 1;
1829                 else
1830                         return 0;
1831         }
1832 }
1833
1834 /* Starts from a phi that may belong to an iv.
1835  * If an add forms a loop with iteration_phi,
1836  * and add uses a constant, 1 is returned
1837  * and 'start' as well as 'add' are sane. */
1838 static unsigned get_start_and_add(ir_node *iteration_phi, unrolling_kind_flag role)
1839 {
1840         int i;
1841         ir_node *found_add = loop_info.add;
1842         int arity = get_irn_arity(iteration_phi);
1843
1844         DB((dbg, LEVEL_4, "Find start and add from %N\n", iteration_phi));
1845
1846         for (i = 0; i < arity; ++i) {
1847
1848                 /* Find start_val which needs to be pred of the iteration_phi.
1849                  * If start_val already known, sanity check. */
1850                 if (!is_backedge(get_nodes_block(loop_info.iteration_phi), i)) {
1851                         ir_node *found_start_val = get_irn_n(loop_info.iteration_phi, i);
1852
1853                         DB((dbg, LEVEL_4, "found_start_val %N\n", found_start_val));
1854
1855                         /* We already found a start_val it has to be always the same. */
1856                         if (loop_info.start_val && found_start_val != loop_info.start_val)
1857                                 return 0;
1858
1859                         if ((role == constant) && !(is_SymConst(found_start_val) || is_Const(found_start_val)))
1860                                         return 0;
1861                         else if((role == constant) && !(is_loop_invariant_def(found_start_val)))
1862                                         return 0;
1863
1864                         loop_info.start_val = found_start_val;
1865                 }
1866
1867                 /* The phi has to be in the loop head.
1868                  * Follow all own backedges. Every value supplied from these preds of the phi
1869                  * needs to origin from the same add. */
1870                 if (is_own_backedge(get_nodes_block(loop_info.iteration_phi), i)) {
1871                         ir_node *new_found = get_irn_n(loop_info.iteration_phi,i);
1872
1873                         DB((dbg, LEVEL_4, "is add? %N\n", new_found));
1874
1875                         if (! (is_Add(new_found) || is_Sub(new_found)) || (found_add && found_add != new_found))
1876                                 return 0;
1877                         else
1878                                 found_add = new_found;
1879                 }
1880         }
1881
1882         loop_info.add = found_add;
1883
1884         return 1;
1885 }
1886
1887
1888 /* Returns 1 if one pred of node is a const value and the other is not.
1889  * const_pred and other are set analogously. */
1890 static unsigned get_const_pred(ir_node *node, ir_node **const_pred, ir_node **other)
1891 {
1892         ir_node *pred0 = get_irn_n(node, 0);
1893         ir_node *pred1 = get_irn_n(node, 1);
1894
1895         DB((dbg, LEVEL_4, "Checking for constant pred of %N\n", node));
1896
1897         *const_pred = NULL;
1898         *other = NULL;
1899
1900         /*DB((dbg, LEVEL_4, "is %N const\n", pred0));*/
1901         if (is_Const(pred0) || is_SymConst(pred0)) {
1902                 *const_pred = pred0;
1903                 *other = pred1;
1904         }
1905
1906         /*DB((dbg, LEVEL_4, "is %N const\n", pred1));*/
1907         if (is_Const(pred1) || is_SymConst(pred1)) {
1908                 if (*const_pred != NULL) {
1909                         /* RETURN. We do not want both preds to be constant. */
1910                         return 0;
1911                 }
1912
1913                 *other = pred0;
1914                 *const_pred = pred1;
1915         }
1916
1917         if (*const_pred == NULL)
1918                 return 0;
1919         else
1920                 return 1;
1921 }
1922
1923 /* Returns 1 if loop exits within 2 steps of the iv.
1924  * Norm_proj means we do not exit the loop.*/
1925 static unsigned simulate_next(ir_tarval **count_tar,
1926                 ir_tarval *stepped, ir_tarval *step_tar, ir_tarval *end_tar,
1927                 ir_relation norm_proj)
1928 {
1929         ir_tarval *next;
1930
1931         DB((dbg, LEVEL_4, "Loop taken if (stepped)%ld %s (end)%ld ",
1932                                 get_tarval_long(stepped),
1933                                 get_relation_string((norm_proj)),
1934                                 get_tarval_long(end_tar)));
1935         DB((dbg, LEVEL_4, "comparing latest value %d\n", loop_info.latest_value));
1936
1937         /* If current iv does not stay in the loop,
1938          * this run satisfied the exit condition. */
1939         if (! (tarval_cmp(stepped, end_tar) & norm_proj))
1940                 return 1;
1941
1942         DB((dbg, LEVEL_4, "Result: (stepped)%ld IS %s (end)%ld\n",
1943                                 get_tarval_long(stepped),
1944                                 get_relation_string(tarval_cmp(stepped, end_tar)),
1945                                 get_tarval_long(end_tar)));
1946
1947         /* next step */
1948         if (is_Add(loop_info.add))
1949                 next = tarval_add(stepped, step_tar);
1950         else
1951                 /* sub */
1952                 next = tarval_sub(stepped, step_tar, get_irn_mode(loop_info.end_val));
1953
1954         DB((dbg, LEVEL_4, "Loop taken if %ld %s %ld ",
1955                                 get_tarval_long(next),
1956                                 get_relation_string(norm_proj),
1957                                 get_tarval_long(end_tar)));
1958         DB((dbg, LEVEL_4, "comparing latest value %d\n", loop_info.latest_value));
1959
1960         /* Increase steps. */
1961         *count_tar = tarval_add(*count_tar, get_tarval_one(get_tarval_mode(*count_tar)));
1962
1963         /* Next has to fail the loop condition, or we will never exit. */
1964         if (! (tarval_cmp(next, end_tar) & norm_proj))
1965                 return 1;
1966         else
1967                 return 0;
1968 }
1969
1970 /* Check if loop meets requirements for a 'simple loop':
1971  * - Exactly one cf out
1972  * - Allowed calls
1973  * - Max nodes after unrolling
1974  * - tail-controlled
1975  * - exactly one be
1976  * - cmp
1977  * Returns Projection of cmp node or NULL; */
1978 static ir_node *is_simple_loop(void)
1979 {
1980         int arity, i;
1981         ir_node *loop_block, *exit_block, *projx, *cond, *cmp;
1982
1983         /* Maximum of one condition, and no endless loops. */
1984         if (loop_info.cf_outs != 1)
1985                 return NULL;
1986
1987         DB((dbg, LEVEL_4, "1 loop exit\n"));
1988
1989         /* Calculate maximum unroll_nr keeping node count below limit. */
1990         loop_info.max_unroll = (int)((double)opt_params.max_unrolled_loop_size / (double)loop_info.nodes);
1991         if (loop_info.max_unroll < 2) {
1992                 ++stats.too_large;
1993                 return NULL;
1994         }
1995
1996         DB((dbg, LEVEL_4, "maximum unroll factor %u, to not exceed node limit \n",
1997                 opt_params.max_unrolled_loop_size));
1998
1999         arity = get_irn_arity(loop_head);
2000         /* RETURN if we have more than 1 be. */
2001         /* Get my backedges without alien bes. */
2002         loop_block = NULL;
2003         for (i = 0; i < arity; ++i) {
2004                 ir_node *pred = get_irn_n(loop_head, i);
2005                 if (is_own_backedge(loop_head, i)) {
2006                         if (loop_block)
2007                                 /* Our simple loops may have only one backedge. */
2008                                 return NULL;
2009                         else {
2010                                 loop_block = get_nodes_block(pred);
2011                                 loop_info.be_src_pos = i;
2012                         }
2013                 }
2014         }
2015
2016         DB((dbg, LEVEL_4, "loop has 1 own backedge.\n"));
2017
2018         exit_block = get_nodes_block(loop_info.cf_out.pred);
2019         /* The loop has to be tail-controlled.
2020          * This can be changed/improved,
2021          * but we would need a duff iv. */
2022         if (exit_block != loop_block)
2023                 return NULL;
2024
2025         DB((dbg, LEVEL_4, "tail-controlled loop.\n"));
2026
2027         /* find value on which loop exit depends */
2028         projx = loop_info.cf_out.pred;
2029         cond = get_irn_n(projx, 0);
2030         cmp = get_irn_n(cond, 0);
2031
2032         if (!is_Cmp(cmp))
2033                 return NULL;
2034
2035         DB((dbg, LEVEL_5, "projection is %s\n", get_relation_string(get_Cmp_relation(cmp))));
2036
2037         switch(get_Proj_proj(projx)) {
2038                 case pn_Cond_false:
2039                         loop_info.exit_cond = 0;
2040                         break;
2041                 case pn_Cond_true:
2042                         loop_info.exit_cond = 1;
2043                         break;
2044                 default:
2045                         panic("Cond Proj_proj other than true/false");
2046         }
2047
2048         DB((dbg, LEVEL_4, "Valid Cmp.\n"));
2049         return cmp;
2050 }
2051
2052 /* Returns 1 if all nodes are mode_Iu or mode_Is. */
2053 static unsigned are_mode_I(ir_node *n1, ir_node* n2, ir_node *n3)
2054 {
2055         ir_mode *m1 = get_irn_mode(n1);
2056         ir_mode *m2 = get_irn_mode(n2);
2057         ir_mode *m3 = get_irn_mode(n3);
2058
2059         if ((m1 == mode_Iu && m2 == mode_Iu && m3 == mode_Iu) ||
2060             (m1 == mode_Is && m2 == mode_Is && m3 == mode_Is))
2061                 return 1;
2062         else
2063                 return 0;
2064 }
2065
2066 /* Checks if cur_loop is a simple tail-controlled counting loop
2067  * with start and end value loop invariant, step constant. */
2068 static unsigned get_unroll_decision_invariant(void)
2069 {
2070
2071         ir_node   *projres, *loop_condition, *iteration_path;
2072         unsigned   success;
2073         ir_tarval *step_tar;
2074         ir_mode   *mode;
2075
2076
2077         /* RETURN if loop is not 'simple' */
2078         projres = is_simple_loop();
2079         if (projres == NULL)
2080                 return 0;
2081
2082         /* Use a minimal size for the invariant unrolled loop,
2083      * as duffs device produces overhead */
2084         if (loop_info.nodes < opt_params.invar_unrolling_min_size)
2085                 return 0;
2086
2087         loop_condition = get_irn_n(projres, 0);
2088
2089         success = get_invariant_pred(loop_condition, &loop_info.end_val, &iteration_path);
2090         DB((dbg, LEVEL_4, "pred invar %d\n", success));
2091
2092         if (! success)
2093                 return 0;
2094
2095         DB((dbg, LEVEL_4, "Invariant End_val %N, other %N\n", loop_info.end_val, iteration_path));
2096
2097         /* We may find the add or the phi first.
2098          * Until now we only have end_val. */
2099         if (is_Add(iteration_path) || is_Sub(iteration_path)) {
2100
2101                 loop_info.add = iteration_path;
2102                 DB((dbg, LEVEL_4, "Case 1: Got add %N (maybe not sane)\n", loop_info.add));
2103
2104                 /* Preds of the add should be step and the iteration_phi */
2105                 success = get_const_pred(loop_info.add, &loop_info.step, &loop_info.iteration_phi);
2106                 if (! success)
2107                         return 0;
2108
2109                 DB((dbg, LEVEL_4, "Got step %N\n", loop_info.step));
2110
2111                 if (! is_Phi(loop_info.iteration_phi))
2112                         return 0;
2113
2114                 DB((dbg, LEVEL_4, "Got phi %N\n", loop_info.iteration_phi));
2115
2116                 /* Find start_val.
2117                  * Does necessary sanity check of add, if it is already set.  */
2118                 success = get_start_and_add(loop_info.iteration_phi, invariant);
2119                 if (! success)
2120                         return 0;
2121
2122                 DB((dbg, LEVEL_4, "Got start A  %N\n", loop_info.start_val));
2123
2124         } else if (is_Phi(iteration_path)) {
2125                 ir_node *new_iteration_phi;
2126
2127                 loop_info.iteration_phi = iteration_path;
2128                 DB((dbg, LEVEL_4, "Case 2: Got phi %N\n", loop_info.iteration_phi));
2129
2130                 /* Find start_val and add-node.
2131                  * Does necessary sanity check of add, if it is already set.  */
2132                 success = get_start_and_add(loop_info.iteration_phi, invariant);
2133                 if (! success)
2134                         return 0;
2135
2136                 DB((dbg, LEVEL_4, "Got start B %N\n", loop_info.start_val));
2137                 DB((dbg, LEVEL_4, "Got add or sub %N\n", loop_info.add));
2138
2139                 success = get_const_pred(loop_info.add, &loop_info.step, &new_iteration_phi);
2140                 if (! success)
2141                         return 0;
2142
2143                 DB((dbg, LEVEL_4, "Got step (B) %N\n", loop_info.step));
2144
2145                 if (loop_info.iteration_phi != new_iteration_phi)
2146                         return 0;
2147
2148         } else {
2149                 return 0;
2150         }
2151
2152         mode = get_irn_mode(loop_info.end_val);
2153
2154         DB((dbg, LEVEL_4, "start %N, end %N, step %N\n",
2155                                 loop_info.start_val, loop_info.end_val, loop_info.step));
2156
2157         if (mode != mode_Is && mode != mode_Iu)
2158                 return 0;
2159
2160         /* TODO necessary? */
2161         if (!are_mode_I(loop_info.start_val, loop_info.step, loop_info.end_val))
2162                 return 0;
2163
2164         DB((dbg, LEVEL_4, "mode integer\n"));
2165
2166         step_tar = get_Const_tarval(loop_info.step);
2167
2168         if (tarval_is_null(step_tar)) {
2169                 /* TODO Might be worth a warning. */
2170                 return 0;
2171         }
2172
2173         DB((dbg, LEVEL_4, "step is not 0\n"));
2174
2175         create_duffs_block();
2176
2177         return loop_info.max_unroll;
2178 }
2179
2180 /* Returns unroll factor,
2181  * given maximum unroll factor and number of loop passes. */
2182 static unsigned get_preferred_factor_constant(ir_tarval *count_tar)
2183 {
2184         ir_tarval *tar_6, *tar_5, *tar_4, *tar_3, *tar_2;
2185         unsigned prefer;
2186         ir_mode *mode = get_irn_mode(loop_info.end_val);
2187
2188         tar_6 = new_tarval_from_long(6, mode);
2189         tar_5 = new_tarval_from_long(5, mode);
2190         tar_4 = new_tarval_from_long(4, mode);
2191         tar_3 = new_tarval_from_long(3, mode);
2192         tar_2 = new_tarval_from_long(2, mode);
2193
2194         /* loop passes % {6, 5, 4, 3, 2} == 0  */
2195         if (tarval_is_null(tarval_mod(count_tar, tar_6)))
2196                 prefer = 6;
2197         else if (tarval_is_null(tarval_mod(count_tar, tar_5)))
2198                 prefer = 5;
2199         else if (tarval_is_null(tarval_mod(count_tar, tar_4)))
2200                 prefer = 4;
2201         else if (tarval_is_null(tarval_mod(count_tar, tar_3)))
2202                 prefer = 3;
2203         else if (tarval_is_null(tarval_mod(count_tar, tar_2)))
2204                 prefer = 2;
2205         else {
2206                 /* gcd(max_unroll, count_tar) */
2207                 int a = loop_info.max_unroll;
2208                 int b = (int)get_tarval_long(count_tar);
2209                 int c;
2210
2211                 DB((dbg, LEVEL_4, "gcd of max_unroll %d and count_tar %d: ", a, b));
2212
2213                 do {
2214                 c = a % b;
2215                 a = b; b = c;
2216                 } while( c != 0);
2217
2218                 DB((dbg, LEVEL_4, "%d\n", a));
2219                 return a;
2220         }
2221
2222         DB((dbg, LEVEL_4, "preferred unroll factor %d\n", prefer));
2223
2224         /*
2225          * If our preference is greater than the allowed unroll factor
2226          * we either might reduce the preferred factor and prevent a duffs device block,
2227          * or create a duffs device block, from which in this case (constants only)
2228          * we know the startloop at compiletime.
2229          * The latter yields the following graphs.
2230          * but for code generation we would want to use graph A.
2231          * The graphs are equivalent. So, we can only reduce the preferred factor.
2232          * A)                   B)
2233          *     PreHead             PreHead
2234          *        |      ,--.         |   ,--.
2235          *         \ Loop1   \        Loop2   \
2236          *          \  |     |       /  |     |
2237          *           Loop2   /      / Loop1   /
2238          *           |   `--'      |      `--'
2239          */
2240
2241         if (prefer <= loop_info.max_unroll)
2242                 return prefer;
2243         else {
2244                 switch(prefer) {
2245                         case 6:
2246                                 if (loop_info.max_unroll >= 3)
2247                                         return 3;
2248                                 else if (loop_info.max_unroll >= 2)
2249                                         return 2;
2250                                 else
2251                                         return 0;
2252
2253                         case 4:
2254                                 if (loop_info.max_unroll >= 2)
2255                                         return 2;
2256                                 else
2257                                         return 0;
2258
2259                         default:
2260                                 return 0;
2261                 }
2262         }
2263 }
2264
2265 /* Check if cur_loop is a simple counting loop.
2266  * Start, step and end are constants.
2267  * TODO The whole constant case should use procedures similar to
2268  * the invariant case, as they are more versatile. */
2269 /* TODO split. */
2270 static unsigned get_unroll_decision_constant(void)
2271 {
2272         ir_node     *cmp, *iteration_path;
2273         unsigned     success, is_latest_val;
2274         ir_tarval   *start_tar, *end_tar, *step_tar, *diff_tar, *count_tar;
2275         ir_tarval   *stepped;
2276         ir_relation  proj_proj, norm_proj;
2277         ir_mode     *mode;
2278
2279         /* RETURN if loop is not 'simple' */
2280         cmp = is_simple_loop();
2281         if (cmp == NULL)
2282                 return 0;
2283
2284         /* One in of the loop condition needs to be loop invariant. => end_val
2285          * The other in is assigned by an add. => add
2286          * The add uses a loop invariant value => step
2287          * and a phi with a loop invariant start_val and the add node as ins.
2288
2289            ^   ^
2290            |   | .-,
2291            |   Phi |
2292                 \  |   |
2293           ^  Add   |
2294            \  | \__|
2295             cond
2296              /\
2297         */
2298
2299         success = get_const_pred(cmp, &loop_info.end_val, &iteration_path);
2300         if (! success)
2301                 return 0;
2302
2303         DB((dbg, LEVEL_4, "End_val %N, other %N\n", loop_info.end_val, iteration_path));
2304
2305         /* We may find the add or the phi first.
2306          * Until now we only have end_val. */
2307         if (is_Add(iteration_path) || is_Sub(iteration_path)) {
2308
2309                 /* We test against the latest value of the iv. */
2310                 is_latest_val = 1;
2311
2312                 loop_info.add = iteration_path;
2313                 DB((dbg, LEVEL_4, "Case 2: Got add %N (maybe not sane)\n", loop_info.add));
2314
2315                 /* Preds of the add should be step and the iteration_phi */
2316                 success = get_const_pred(loop_info.add, &loop_info.step, &loop_info.iteration_phi);
2317                 if (! success)
2318                         return 0;
2319
2320                 DB((dbg, LEVEL_4, "Got step %N\n", loop_info.step));
2321
2322                 if (! is_Phi(loop_info.iteration_phi))
2323                         return 0;
2324
2325                 DB((dbg, LEVEL_4, "Got phi %N\n", loop_info.iteration_phi));
2326
2327                 /* Find start_val.
2328                  * Does necessary sanity check of add, if it is already set.  */
2329                 success = get_start_and_add(loop_info.iteration_phi, constant);
2330                 if (! success)
2331                         return 0;
2332
2333                 DB((dbg, LEVEL_4, "Got start %N\n", loop_info.start_val));
2334
2335         } else if (is_Phi(iteration_path)) {
2336                 ir_node *new_iteration_phi;
2337
2338                 /* We compare with the value the iv had entering this run. */
2339                 is_latest_val = 0;
2340
2341                 loop_info.iteration_phi = iteration_path;
2342                 DB((dbg, LEVEL_4, "Case 1: Got phi %N \n", loop_info.iteration_phi));
2343
2344                 /* Find start_val and add-node.
2345                  * Does necessary sanity check of add, if it is already set.  */
2346                 success = get_start_and_add(loop_info.iteration_phi, constant);
2347                 if (! success)
2348                         return 0;
2349
2350                 DB((dbg, LEVEL_4, "Got start %N\n", loop_info.start_val));
2351                 DB((dbg, LEVEL_4, "Got add or sub %N\n", loop_info.add));
2352
2353                 success = get_const_pred(loop_info.add, &loop_info.step, &new_iteration_phi);
2354                 if (! success)
2355                         return 0;
2356
2357                 DB((dbg, LEVEL_4, "Got step %N\n", loop_info.step));
2358
2359                 if (loop_info.iteration_phi != new_iteration_phi)
2360                         return 0;
2361
2362         } else {
2363                 /* RETURN */
2364                 return 0;
2365         }
2366
2367         mode = get_irn_mode(loop_info.end_val);
2368
2369         DB((dbg, LEVEL_4, "start %N, end %N, step %N\n",
2370                                 loop_info.start_val, loop_info.end_val, loop_info.step));
2371
2372         if (mode != mode_Is && mode != mode_Iu)
2373                 return 0;
2374
2375         /* TODO necessary? */
2376         if (!are_mode_I(loop_info.start_val, loop_info.step, loop_info.end_val))
2377                 return 0;
2378
2379         DB((dbg, LEVEL_4, "mode integer\n"));
2380
2381         end_tar = get_Const_tarval(loop_info.end_val);
2382         start_tar = get_Const_tarval(loop_info.start_val);
2383         step_tar = get_Const_tarval(loop_info.step);
2384
2385         if (tarval_is_null(step_tar))
2386                 /* TODO Might be worth a warning. */
2387                 return 0;
2388
2389         DB((dbg, LEVEL_4, "step is not 0\n"));
2390
2391         if ((!tarval_is_negative(step_tar)) ^ (!is_Sub(loop_info.add)))
2392                 loop_info.decreasing = 1;
2393
2394         diff_tar = tarval_sub(end_tar, start_tar, mode);
2395
2396         /* We need at least count_tar steps to be close to end_val, maybe more.
2397          * No way, that we have gone too many steps.
2398          * This represents the 'latest value'.
2399          * (If condition checks against latest value, is checked later) */
2400         count_tar = tarval_div(diff_tar, step_tar);
2401
2402         /* Iv will not pass end_val (except overflows).
2403          * Nothing done, as it would yield to no advantage. */
2404         if (tarval_is_negative(count_tar)) {
2405                 DB((dbg, LEVEL_4, "Loop is endless or never taken."));
2406                 /* TODO Might be worth a warning. */
2407                 return 0;
2408         }
2409
2410         ++stats.u_simple_counting_loop;
2411
2412         loop_info.latest_value = is_latest_val;
2413
2414         /* TODO split here
2415         if (! is_simple_counting_loop(&count_tar))
2416                 return 0;
2417         */
2418
2419         /* stepped can be negative, if step < 0 */
2420         stepped = tarval_mul(count_tar, step_tar);
2421
2422         /* step as close to end_val as possible, */
2423         /* |stepped| <= |end_tar|, and dist(stepped, end_tar) is smaller than a step. */
2424         if (is_Sub(loop_info.add))
2425                 stepped = tarval_sub(start_tar, stepped, mode_Is);
2426         else
2427                 stepped = tarval_add(start_tar, stepped);
2428
2429         DB((dbg, LEVEL_4, "stepped to %ld\n", get_tarval_long(stepped)));
2430
2431         proj_proj = get_Cmp_relation(cmp);
2432         /* Assure that norm_proj is the stay-in-loop case. */
2433         if (loop_info.exit_cond == 1)
2434                 norm_proj = get_negated_relation(proj_proj);
2435         else
2436                 norm_proj = proj_proj;
2437
2438         DB((dbg, LEVEL_4, "normalized projection %s\n", get_relation_string(norm_proj)));
2439         /* Executed at most once (stay in counting loop if a Eq b) */
2440         if (norm_proj == ir_relation_equal)
2441                 /* TODO Might be worth a warning. */
2442                 return 0;
2443
2444         /* calculates next values and increases count_tar according to it */
2445         success = simulate_next(&count_tar, stepped, step_tar, end_tar, norm_proj);
2446         if (! success)
2447                 return 0;
2448
2449         /* We run loop once more, if we compare to the
2450          * not yet in-/decreased iv. */
2451         if (is_latest_val == 0) {
2452                 DB((dbg, LEVEL_4, "condition uses not latest iv value\n"));
2453                 count_tar = tarval_add(count_tar, get_tarval_one(mode));
2454         }
2455
2456         DB((dbg, LEVEL_4, "loop taken %ld times\n", get_tarval_long(count_tar)));
2457
2458         /* Assure the loop is taken at least 1 time. */
2459         if (tarval_is_null(count_tar)) {
2460                 /* TODO Might be worth a warning. */
2461                 return 0;
2462         }
2463
2464         loop_info.count_tar = count_tar;
2465         return get_preferred_factor_constant(count_tar);
2466 }
2467
2468 /**
2469  * Loop unrolling
2470  */
2471 static void unroll_loop(void)
2472 {
2473
2474         if (! (loop_info.nodes > 0))
2475                 return;
2476
2477         if (loop_info.nodes > opt_params.max_unrolled_loop_size) {
2478                 DB((dbg, LEVEL_2, "Nodes %d > allowed nodes %d\n",
2479                         loop_info.nodes, opt_params.max_unrolled_loop_size));
2480                 ++stats.too_large;
2481                 return;
2482         }
2483
2484         if (loop_info.calls > 0) {
2485                 DB((dbg, LEVEL_2, "Calls %d > allowed calls 0\n",
2486                         loop_info.calls));
2487                 ++stats.calls_limit;
2488                 return;
2489         }
2490
2491         unroll_nr = 0;
2492
2493         /* get_unroll_decision_constant and invariant are completely
2494          * independent for flexibility.
2495          * Some checks may be performed twice. */
2496
2497         /* constant case? */
2498         if (opt_params.allow_const_unrolling)
2499                 unroll_nr = get_unroll_decision_constant();
2500         if (unroll_nr > 1) {
2501                 loop_info.unroll_kind = constant;
2502
2503         } else {
2504                 /* invariant case? */
2505                 if (opt_params.allow_invar_unrolling)
2506                         unroll_nr = get_unroll_decision_invariant();
2507                 if (unroll_nr > 1)
2508                         loop_info.unroll_kind = invariant;
2509         }
2510
2511         DB((dbg, LEVEL_2, " *** Unrolling %d times ***\n", unroll_nr));
2512
2513         if (unroll_nr > 1) {
2514                 loop_entries = NEW_ARR_F(entry_edge, 0);
2515
2516                 /* Get loop outs */
2517                 irg_walk_graph(current_ir_graph, get_loop_entries, NULL, NULL);
2518
2519                 if (loop_info.unroll_kind == constant) {
2520                         if ((int)get_tarval_long(loop_info.count_tar) == unroll_nr)
2521                                 loop_info.needs_backedge = 0;
2522                         else
2523                                 loop_info.needs_backedge = 1;
2524                 } else {
2525                         loop_info.needs_backedge = 1;
2526                 }
2527
2528                 /* Use phase to keep copy of nodes from the condition chain. */
2529                 ir_nodemap_init(&map, current_ir_graph);
2530                 obstack_init(&obst);
2531
2532                 /* Copies the loop */
2533                 copy_loop(loop_entries, unroll_nr - 1);
2534
2535                 /* Line up the floating copies. */
2536                 place_copies(unroll_nr - 1);
2537
2538                 /* Remove phis with 1 in
2539                  * If there were no nested phis, this would not be necessary.
2540                  * Avoiding the creation in the first place
2541                  * leads to complex special cases. */
2542                 irg_walk_graph(current_ir_graph, correct_phis, NULL, NULL);
2543
2544                 if (loop_info.unroll_kind == constant)
2545                         ++stats.constant_unroll;
2546                 else
2547                         ++stats.invariant_unroll;
2548
2549                 clear_irg_properties(current_ir_graph, IR_GRAPH_PROPERTY_CONSISTENT_DOMINANCE);
2550
2551                 DEL_ARR_F(loop_entries);
2552                 obstack_free(&obst, NULL);
2553                 ir_nodemap_destroy(&map);
2554         }
2555
2556 }
2557
2558 /* Analyzes the loop, and checks if size is within allowed range.
2559  * Decides if loop will be processed. */
2560 static void init_analyze(ir_graph *irg, ir_loop *loop)
2561 {
2562         cur_loop = loop;
2563
2564         loop_head       = NULL;
2565         loop_head_valid = true;
2566
2567         /* Reset loop info */
2568         memset(&loop_info, 0, sizeof(loop_info_t));
2569
2570         DB((dbg, LEVEL_1, "    >>>> current loop %ld <<<\n",
2571             get_loop_loop_nr(loop)));
2572
2573         /* Collect loop informations: head, node counts. */
2574         irg_walk_graph(irg, get_loop_info, NULL, NULL);
2575
2576         /* RETURN if there is no valid head */
2577         if (!loop_head || !loop_head_valid) {
2578                 DB((dbg, LEVEL_1,   "No valid loop head. Nothing done.\n"));
2579                 return;
2580         } else {
2581                 DB((dbg, LEVEL_1,   "Loophead: %N\n", loop_head));
2582         }
2583
2584         if (loop_info.branches > opt_params.max_branches) {
2585                 DB((dbg, LEVEL_1, "Branches %d > allowed branches %d\n",
2586                         loop_info.branches, opt_params.max_branches));
2587                 ++stats.calls_limit;
2588                 return;
2589         }
2590
2591         switch (loop_op) {
2592                 case loop_op_inversion:
2593                         loop_inversion(irg);
2594                         break;
2595
2596                 case loop_op_unrolling:
2597                         unroll_loop();
2598                         break;
2599
2600                 default:
2601                         panic("Loop optimization not implemented.");
2602         }
2603         DB((dbg, LEVEL_1, "       <<<< end of loop with node %ld >>>>\n",
2604             get_loop_loop_nr(loop)));
2605 }
2606
2607 /* Find innermost loops and add them to loops. */
2608 static void find_innermost_loop(ir_loop *loop)
2609 {
2610         bool   had_sons   = false;
2611         size_t n_elements = get_loop_n_elements(loop);
2612         size_t e;
2613
2614         for (e = 0; e < n_elements; ++e) {
2615                 loop_element element = get_loop_element(loop, e);
2616                 if (*element.kind == k_ir_loop) {
2617                         find_innermost_loop(element.son);
2618                         had_sons = true;
2619                 }
2620         }
2621
2622         if (!had_sons) {
2623                 ARR_APP1(ir_loop*, loops, loop);
2624         }
2625 }
2626
2627 static void set_loop_params(void)
2628 {
2629     opt_params.max_loop_size = 100;
2630     opt_params.depth_adaption = -50;
2631     opt_params.count_phi = true;
2632     opt_params.count_proj = false;
2633     opt_params.allowed_calls = 0;
2634
2635     opt_params.max_cc_size = 5;
2636
2637
2638     opt_params.allow_const_unrolling = true;
2639     opt_params.allow_invar_unrolling = false;
2640
2641     opt_params.invar_unrolling_min_size = 20;
2642     opt_params.max_unrolled_loop_size = 400;
2643     opt_params.max_branches = 9999;
2644 }
2645
2646 /* Assure preconditions are met and go through all loops. */
2647 void loop_optimization(ir_graph *irg)
2648 {
2649         ir_loop *loop;
2650         size_t   i;
2651         size_t   n_elements;
2652
2653         assure_irg_properties(irg,
2654                 IR_GRAPH_PROPERTY_CONSISTENT_OUT_EDGES
2655                 | IR_GRAPH_PROPERTY_CONSISTENT_OUTS
2656                 | IR_GRAPH_PROPERTY_CONSISTENT_LOOPINFO);
2657
2658         set_loop_params();
2659
2660         /* Reset stats for this procedure */
2661         reset_stats();
2662
2663         /* Preconditions */
2664         set_current_ir_graph(irg);
2665
2666         ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK | IR_RESOURCE_PHI_LIST);
2667         collect_phiprojs(irg);
2668
2669         loop = get_irg_loop(irg);
2670
2671         loops = NEW_ARR_F(ir_loop *, 0);
2672         /* List all inner loops */
2673         n_elements = get_loop_n_elements(loop);
2674         for (i = 0; i < n_elements; ++i) {
2675                 loop_element element = get_loop_element(loop, i);
2676                 if (*element.kind != k_ir_loop)
2677                         continue;
2678                 find_innermost_loop(element.son);
2679         }
2680
2681         /* Set all links to NULL */
2682         irg_walk_graph(irg, reset_link, NULL, NULL);
2683
2684         for (i = 0; i < ARR_LEN(loops); ++i) {
2685                 ir_loop *loop = loops[i];
2686
2687                 ++stats.loops;
2688
2689                 /* Analyze and handle loop */
2690                 init_analyze(irg, loop);
2691
2692                 /* Copied blocks do not have their phi list yet */
2693                 collect_phiprojs(irg);
2694
2695                 /* Set links to NULL
2696                  * TODO Still necessary? */
2697                 irg_walk_graph(irg, reset_link, NULL, NULL);
2698         }
2699
2700         print_stats();
2701
2702         DEL_ARR_F(loops);
2703         ir_free_resources(irg, IR_RESOURCE_IRN_LINK | IR_RESOURCE_PHI_LIST);
2704
2705         confirm_irg_properties(irg, IR_GRAPH_PROPERTIES_NONE);
2706 }
2707
2708 void do_loop_unrolling(ir_graph *irg)
2709 {
2710         loop_op = loop_op_unrolling;
2711         loop_optimization(irg);
2712 }
2713
2714 void do_loop_inversion(ir_graph *irg)
2715 {
2716         loop_op = loop_op_inversion;
2717         loop_optimization(irg);
2718 }
2719
2720 void do_loop_peeling(ir_graph *irg)
2721 {
2722         loop_op = loop_op_peeling;
2723         loop_optimization(irg);
2724 }
2725
2726 ir_graph_pass_t *loop_inversion_pass(const char *name)
2727 {
2728         return def_graph_pass(name ? name : "loop_inversion", do_loop_inversion);
2729 }
2730
2731 ir_graph_pass_t *loop_unroll_pass(const char *name)
2732 {
2733         return def_graph_pass(name ? name : "loop_unroll", do_loop_unrolling);
2734 }
2735
2736 ir_graph_pass_t *loop_peeling_pass(const char *name)
2737 {
2738         return def_graph_pass(name ? name : "loop_peeling", do_loop_peeling);
2739 }
2740
2741 void firm_init_loop_opt(void)
2742 {
2743         FIRM_DBG_REGISTER(dbg, "firm.opt.loop");
2744 }