/*
- * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
+ * Copyright (C) 1995-2011 University of Karlsruhe. All right reserved.
*
* This file is part of libFirm.
*
/**
* @file
* @author Christian Helmer
- * @brief loop inversion and loop unrolling, loop peeling
+ * @brief loop inversion and loop unrolling
*
- * @version $Id$
*/
#include "config.h"
+#include <stdbool.h>
+
+#include "iroptimize.h"
+#include "opt_init.h"
#include "irnode.h"
#include "debug.h"
+#include "error.h"
#include "ircons.h"
#include "irgopt.h"
#include "irouts.h"
#include "iredges.h"
#include "irtools.h"
-#include "array_t.h" /* automatic array */
-#include "beutil.h" /* get_block */
-#include "irloop_t.h" /* set_irn_loop*/
+#include "array_t.h"
+#include "beutil.h"
#include "irpass.h"
+#include "irdom.h"
-#if 0
- #include "irdump_t.h"
-#endif
-
+#include <math.h>
+#include "irbackedge_t.h"
+#include "irnodemap.h"
+#include "irloop_t.h"
-DEBUG_ONLY(static firm_dbg_module_t *dbg);
+DEBUG_ONLY(static firm_dbg_module_t *dbg;)
/**
* Convenience macro for iterating over every phi node of the given block.
* Requires phi list per block.
*/
#define for_each_phi(block, phi) \
- for ( (phi) = get_Block_phis( (block) ); (phi) ; (phi) = get_Phi_next( (phi) ) )
+ for ((phi) = get_Block_phis( (block) ); (phi) ; (phi) = get_Phi_next((phi)))
-/* current loop */
+#define for_each_phi_safe(head, phi, next) \
+ for ((phi) = (head), (next) = (head) ? get_Phi_next((head)) : NULL; \
+ (phi) ; (phi) = (next), (next) = (next) ? get_Phi_next((next)) : NULL)
+
+/* Currently processed loop. */
static ir_loop *cur_loop;
-/* abortable walker function */
-typedef unsigned irg_walk_func_abortable(ir_node *, void *);
+/* Flag for kind of unrolling. */
+typedef enum {
+ constant,
+ invariant
+} unrolling_kind_flag;
-/* condition for walking a node during a copy_walk */
-typedef unsigned walker_condition(ir_node *);
+/* Condition for performing visiting a node during copy_walk. */
+typedef bool walker_condition(const ir_node *);
-/* node and position of a predecessor */
-typedef struct out_edges {
+/* Node and position of a predecessor. */
+typedef struct entry_edge {
ir_node *node;
- int pred_irn_n;
-} out_edge;
-
-/* access complex values through the nodes links */
-typedef struct node_info {
- unsigned invariant:1;
- ir_node *copy;
- ir_node *link; /* temporary links for ssa creation */
- ir_node **ins; /* ins for phi nodes, during rewiring of blocks */
- unsigned done;
- struct node_info *freelistnext; /* linked list to free all node_infos */
-} node_info;
-
-static node_info *link_node_state_list; /* head of the linked list to free all node_infos */
-
-static out_edge *cur_loop_outs; /* A walker may start visiting the current loop with these nodes. */
-static out_edge *cur_head_outs; /* A walker may start visiting the cur head with these nodes. */
-
-static ir_node *loop_cf_head = NULL; /* Loop head node */
-static unsigned loop_cf_head_valid = 1; /* A loop may have one head, otherwise we do not touch it. */
-
-ir_node **loops;
-
-/* Inverted head */
-static ir_node *loop_inv_head = NULL;
-/* Peeled head */
-static ir_node *loop_peeled_head = NULL;
-
-/* Loop analysis informations */
-typedef struct loop_info_t {
- unsigned blocks; /* number of blocks in the loop */
- unsigned calls; /* number of calls */
- unsigned loads; /* number of load nodes */
- unsigned outs; /* outs without keepalives */
-#if 0
- unsigned invariant_loads;
- unsigned stores; /* number of store nodes */
- unsigned opnodes_n; /* nodes that probably result in an instruction */
- unsigned do_invariant_opt;
-#endif
-} loop_info_t;
-
-/* Information about the current loop */
-static loop_info_t loop_info;
+ int pos;
+ ir_node *pred;
+} entry_edge;
-/* A walker may start visiting a condition chain with these nodes. */
-static out_edge *cond_chain_entries;
+/* Node info for unrolling. */
+typedef struct unrolling_node_info {
+ ir_node **copies;
+} unrolling_node_info;
-/* Number of unrolling */
-int unroll_times;
+/* Outs of the nodes head. */
+static entry_edge *cur_head_outs;
-static unsigned head_inversion_node_count;
-static unsigned inversion_head_node_limit;
-static unsigned head_inversion_block_count;
+/* Information about the loop head */
+static ir_node *loop_head = NULL;
+static bool loop_head_valid = true;
-static unsigned enable_peeling;
-static unsigned enable_inversion;
-static unsigned enable_unrolling;
+/* List of all inner loops, that are processed. */
+static ir_loop **loops;
-/**
- *
- * ============= AUXILIARY FUNCTIONS =====================================
- */
-
-
-/**
- * Creates object on the heap, and adds it to a linked list to free it later.
- */
-static node_info *new_node_info(void)
-{
- node_info *l = XMALLOCZ(node_info);
- l->freelistnext = link_node_state_list;
- link_node_state_list = l;
- l->copy = NULL;
- l->invariant = 0;
- return l;
-}
+/* Stats */
+typedef struct loop_stats_t {
+ unsigned loops;
+ unsigned inverted;
+ unsigned too_large;
+ unsigned too_large_adapted;
+ unsigned cc_limit_reached;
+ unsigned calls_limit;
-static node_info *get_node_info(ir_node *n)
-{
- return ((node_info *)get_irn_link(n));
-}
+ unsigned u_simple_counting_loop;
+ unsigned constant_unroll;
+ unsigned invariant_unroll;
-/* Allocates a node_info struct for the given node. For use with a walker. */
-static void alloc_node_info(ir_node *node, void *env)
-{
- node_info *state;
- (void) env;
- state = new_node_info();
- set_irn_link(node, (void *)state);
-}
+ unsigned unhandled;
+} loop_stats_t;
-static void free_node_info(void)
-{
- int a = 0;
- node_info *n;
- n = link_node_state_list;
- while(n) {
- node_info *next = n->freelistnext;
- ++a;
- xfree(n);
- n = next;
- }
- link_node_state_list = NULL;
-}
+static loop_stats_t stats;
-/**
- * Use the linked list to reset the reused values of all node_infos
- * Reset in particular the copy attribute as copy_walk uses it to determine a present copy
- */
-static void reset_node_infos(void)
+/* Set stats to sero */
+static void reset_stats(void)
{
- node_info *next;
- next = link_node_state_list;
- while(next->freelistnext) {
- node_info *cur = next;
- next = cur->freelistnext;
- cur->copy = NULL;
- cur->ins = NULL;
- cur->link = NULL;
- }
+ memset(&stats, 0, sizeof(loop_stats_t));
}
-/* Returns the nodes node_info link. */
-static ir_node *get_link(ir_node *n)
+/* Print stats */
+static void print_stats(void)
{
- return ((node_info *)get_irn_link(n))->link;
+ DB((dbg, LEVEL_2, "---------------------------------------\n"));
+ DB((dbg, LEVEL_2, "loops : %d\n",stats.loops));
+ DB((dbg, LEVEL_2, "inverted : %d\n",stats.inverted));
+ DB((dbg, LEVEL_2, "too_large : %d\n",stats.too_large));
+ DB((dbg, LEVEL_2, "too_large_adapted : %d\n",stats.too_large_adapted));
+ DB((dbg, LEVEL_2, "cc_limit_reached : %d\n",stats.cc_limit_reached));
+ DB((dbg, LEVEL_2, "calls_limit : %d\n",stats.calls_limit));
+ DB((dbg, LEVEL_2, "u_simple_counting : %d\n",stats.u_simple_counting_loop));
+ DB((dbg, LEVEL_2, "constant_unroll : %d\n",stats.constant_unroll));
+ DB((dbg, LEVEL_2, "invariant_unroll : %d\n",stats.invariant_unroll));
+ DB((dbg, LEVEL_2, "=======================================\n"));
}
-/* Sets the nodes node_info link. */
-static void set_link(ir_node *n, ir_node *link)
-{
- ((node_info *)get_irn_link(n))->link = link;
-}
+/* Commandline parameters */
+typedef struct loop_opt_params_t {
+unsigned max_loop_size; /* Maximum number of nodes [nodes]*/
+int depth_adaption; /* Loop nest depth adaption [percent] */
+unsigned allowed_calls; /* Number of calls allowed [number] */
+bool count_phi; /* Count phi nodes */
+bool count_proj; /* Count projections */
-/* Returns a nodes copy. */
-static ir_node *get_copy(ir_node *n)
-{
- return ((node_info *)get_irn_link(n))->copy;
-}
+unsigned max_cc_size; /* Maximum condition chain size [nodes] */
+unsigned max_branches;
-/* Sets a nodes copy. */
-static void set_copy(ir_node *n, ir_node *copy)
-{
- ((node_info *)get_irn_link(n) )->copy = copy;
-}
+unsigned max_unrolled_loop_size; /* [nodes] */
+bool allow_const_unrolling;
+bool allow_invar_unrolling;
+unsigned invar_unrolling_min_size; /* [nodes] */
-/**
- * Convenience macro for iterating over every copy in a linked list
- * of copies.
- */
-#define for_each_copy(node) \
- for ( ; (node) ; (node) = get_copy(node))
+} loop_opt_params_t;
-/**
- * Convenience macro for iterating over every copy in 2 linked lists
- * of copies in parallel.
- */
-#define for_each_copy2(high1, low1, high2, low2) \
- for ( ; (low1) && (low2); (high1) = (low1), (low1) = get_copy(low1), \
- (high2) = (low2), (low2) = get_copy(low2))
+static loop_opt_params_t opt_params;
-/*
- * Returns 0 if the node or block is not in cur_loop.
- */
-static unsigned is_in_loop(ir_node *node)
-{
- return (get_irn_loop(get_block(node)) == cur_loop);
-}
+/* Loop analysis informations */
+typedef struct loop_info_t {
+ unsigned nodes; /* node count */
+ unsigned ld_st; /* load and store nodes */
+ unsigned branches; /* number of conditions */
+ unsigned calls; /* number of calls */
+ unsigned cf_outs; /* number of cf edges which leave the loop */
+ entry_edge cf_out; /* single loop leaving cf edge */
+ int be_src_pos; /* position of the single own backedge in the head */
+
+ /* for inversion */
+ unsigned cc_size; /* nodes in the condition chain */
+
+ /* for unrolling */
+ unsigned max_unroll; /* Number of unrolls satisfying max_loop_size */
+ unsigned exit_cond; /* 1 if condition==true exits the loop. */
+ unsigned latest_value:1; /* 1 if condition is checked against latest counter value */
+ unsigned needs_backedge:1; /* 0 if loop is completely unrolled */
+ unsigned decreasing:1; /* Step operation is_Sub, or step is<0 */
+
+ /* IV informations of a simple loop */
+ ir_node *start_val;
+ ir_node *step;
+ ir_node *end_val;
+ ir_node *iteration_phi;
+ ir_node *add;
+
+ ir_tarval *count_tar; /* Number of loop iterations */
+
+ ir_node *duff_cond; /* Duff mod */
+ unrolling_kind_flag unroll_kind; /* constant or invariant unrolling */
+} loop_info_t;
-/* Returns if the given be is an alien edge. This is the case when the pred is not in the loop. */
-static unsigned is_alien_edge(ir_node *n, int i)
-{
- return(!is_in_loop(get_irn_n(n, i)));
-}
+/* Information about the current loop */
+static loop_info_t loop_info;
-/* used for block walker */
-static void reset_block_mark(ir_node *node, void * env)
+/* Outs of the condition chain (loop inversion). */
+static ir_node **cc_blocks;
+/* df/cf edges with def in the condition chain */
+static entry_edge *cond_chain_entries;
+/* Array of df loops found in the condition chain. */
+static entry_edge *head_df_loop;
+/* Number of blocks in cc */
+static unsigned inversion_blocks_in_cc;
+
+
+/* Cf/df edges leaving the loop.
+ * Called entries here, as they are used to enter the loop with walkers. */
+static entry_edge *loop_entries;
+/* Number of unrolls to perform */
+static int unroll_nr;
+/* Phase is used to keep copies of nodes. */
+static ir_nodemap map;
+static struct obstack obst;
+
+/* Loop operations. */
+typedef enum loop_op_t {
+ loop_op_inversion,
+ loop_op_unrolling,
+ loop_op_peeling
+} loop_op_t;
+
+/* Saves which loop operation to do until after basic tests. */
+static loop_op_t loop_op;
+
+/* Returns the maximum nodes for the given nest depth */
+static unsigned get_max_nodes_adapted(unsigned depth)
{
- (void) env;
+ double perc = 100.0 + (double)opt_params.depth_adaption;
+ double factor = pow(perc / 100.0, depth);
- if (is_Block(node))
- set_Block_mark(node, 0);
+ return (int)((double)opt_params.max_loop_size * factor);
}
-static unsigned is_nodesblock_marked(ir_node* node)
+/* Reset nodes link. For use with a walker. */
+static void reset_link(ir_node *node, void *env)
{
- return (get_Block_mark(get_block(node)));
+ (void)env;
+ set_irn_link(node, NULL);
}
-/* Returns the number of blocks in a loop. */
-int get_loop_n_blocks(ir_loop *loop)
+/* Returns 0 if the node or block is not in cur_loop. */
+static bool is_in_loop(const ir_node *node)
{
- int elements, e;
- int blocks = 0;
- elements = get_loop_n_elements(loop);
-
- for(e=0; e<elements; e++) {
- loop_element elem = get_loop_element(loop, e);
- if (is_ir_node(elem.kind) && is_Block(elem.node))
- ++blocks;
- }
- return blocks;
+ return get_irn_loop(get_block_const(node)) == cur_loop;
}
-/**
- * Add newpred at position pos to node and also add the corresponding value to the phis.
- * Requires block phi list.
- */
-static int duplicate_preds(ir_node* node, unsigned pos, ir_node* newpred)
+/* Returns 0 if the given edge is not a backedge
+ * with its pred in the cur_loop. */
+static bool is_own_backedge(const ir_node *n, int pos)
{
- ir_node **ins;
- /*int *is_be;*/
- ir_node *phi;
- int block_arity;
- int i;
-
- assert(is_Block(node) && "duplicate_preds is only allowed for blocks");
-
- DB((dbg, LEVEL_5, "duplicate_preds(node %ld, pos %d, newpred %ld)\n",
- get_irn_node_nr(node), pos, get_irn_node_nr(newpred)));
-
- block_arity = get_irn_arity(node);
-
- NEW_ARR_A(ir_node*, ins, block_arity + 1);
-
- for (i = 0; i < block_arity; ++i) {
- ins[i] = get_irn_n(node, i);
- }
- ins[block_arity] = newpred;
-
- set_irn_in(node, block_arity + 1, ins);
-
- for_each_phi(node, phi) {
- int phi_arity = get_irn_arity(phi);
- DB((dbg, LEVEL_5, "duplicate_preds: fixing phi %ld\n", get_irn_node_nr(phi)));
-
- NEW_ARR_A(ir_node *, ins, block_arity + 1);
- for (i=0; i < phi_arity; ++i) {
- DB((dbg, LEVEL_5, "in %ld\n", get_irn_node_nr(get_irn_n(phi, i))));
- ins[i] = get_irn_n(phi, i);
- }
- ins[block_arity] = get_irn_n(phi, pos);
- set_irn_in(phi, block_arity + 1, ins);
- }
- return block_arity;
+ return is_backedge(n, pos) && is_in_loop(get_irn_n(n, pos));
}
-/**
- * Finds loop head and loop_info.
- */
+/* Finds loop head and some loop_info as calls or else if necessary. */
static void get_loop_info(ir_node *node, void *env)
{
- unsigned node_in_loop, pred_in_loop;
+ bool node_in_loop = is_in_loop(node);
int i, arity;
- (void) env;
+ (void)env;
+
+ /* collect some loop information */
+ if (node_in_loop) {
+ if (is_Phi(node) && opt_params.count_phi)
+ ++loop_info.nodes;
+ else if (is_Proj(node) && opt_params.count_proj)
+ ++loop_info.nodes;
+ else if (!is_Confirm(node) && !is_Const(node) && !is_SymConst(node))
+ ++loop_info.nodes;
+
+ if (is_Load(node) || is_Store(node))
+ ++loop_info.ld_st;
+
+ if (is_Call(node))
+ ++loop_info.calls;
+ }
arity = get_irn_arity(node);
for (i = 0; i < arity; i++) {
- ir_node *pred = get_irn_n(node, i);
+ ir_node *pred = get_irn_n(node, i);
+ bool pred_in_loop = is_in_loop(pred);
- pred_in_loop = is_in_loop(pred);
- node_in_loop = is_in_loop(node);
-
- /* collect some loop information */
- if (node_in_loop) {
- if (is_Call(node))
- ++loop_info.calls;
+ if (is_Block(node) && !node_in_loop && pred_in_loop) {
+ entry_edge entry;
+ entry.node = node;
+ entry.pos = i;
+ entry.pred = pred;
+ /* Count cf outs */
+ ++loop_info.cf_outs;
+ loop_info.cf_out = entry;
}
/* Find the loops head/the blocks with cfpred outside of the loop */
- if (is_Block(node) && node_in_loop && !pred_in_loop && loop_cf_head_valid) {
- ir_node *cfgpred = get_Block_cfgpred(node, i);
-
- if (!is_in_loop(cfgpred)) {
- DB((dbg, LEVEL_5, "potential head %+F because inloop and pred %+F not inloop\n", node, pred));
- /* another head? We do not touch this. */
- if (loop_cf_head && loop_cf_head != node) {
- loop_cf_head_valid = 0;
- } else {
- loop_cf_head = node;
+ if (is_Block(node)) {
+ unsigned outs_n = 0;
+
+ /* Count innerloop branches */
+ foreach_out_edge_kind(node, edge, EDGE_KIND_BLOCK) {
+ ir_node *succ = get_edge_src_irn(edge);
+ if (is_Block(succ) && is_in_loop(succ))
+ ++outs_n;
+ }
+ if (outs_n > 1)
+ ++loop_info.branches;
+
+ if (node_in_loop && !pred_in_loop && loop_head_valid) {
+ ir_node *cfgpred = get_Block_cfgpred(node, i);
+
+ if (!is_in_loop(cfgpred)) {
+ DB((dbg, LEVEL_5, "potential head %+F because inloop and pred %+F not inloop\n",
+ node, pred));
+ /* another head? We do not touch this. */
+ if (loop_head && loop_head != node) {
+ loop_head_valid = false;
+ } else {
+ loop_head = node;
+ }
}
}
}
}
}
-/* Adds all nodes pointing into the loop to loop_entries and also finds the loops head */
-static void get_loop_outs(ir_node *node, void *env)
+/* Finds all edges with users outside of the loop
+ * and definition inside the loop. */
+static void get_loop_entries(ir_node *node, void *env)
{
unsigned node_in_loop, pred_in_loop;
int i, arity;
(void) env;
arity = get_irn_arity(node);
- for (i = 0; i < arity; i++) {
+ for (i = 0; i < arity; ++i) {
ir_node *pred = get_irn_n(node, i);
pred_in_loop = is_in_loop(pred);
node_in_loop = is_in_loop(node);
if (pred_in_loop && !node_in_loop) {
- out_edge entry;
+ entry_edge entry;
entry.node = node;
- entry.pred_irn_n = i;
- ARR_APP1(out_edge, cur_loop_outs, entry);
+ entry.pos = i;
+ entry.pred = pred;
+ ARR_APP1(entry_edge, loop_entries, entry);
}
}
}
+/* ssa */
static ir_node *ssa_second_def;
static ir_node *ssa_second_def_block;
/**
* Walks the graph bottom up, searching for definitions and creates phis.
*/
-static ir_node *search_def_and_create_phis(ir_node *block, ir_mode *mode)
+static ir_node *search_def_and_create_phis(ir_node *block, ir_mode *mode, int first)
{
int i;
int n_cfgpreds;
- ir_graph *irg;
+ ir_graph *irg = get_irn_irg(block);
ir_node *phi;
ir_node **in;
- DB((dbg, LEVEL_5, "ssa search_def_and_create_phis: block %ld\n", get_irn_node_nr(block)));
+ DB((dbg, LEVEL_5, "ssa search_def_and_create_phis: block %N\n", block));
/* Prevents creation of phi that would be bad anyway.
* Dead and bad blocks. */
- if (get_irn_arity(block) < 1 || is_Bad(block))
- return new_Bad();
+ if (get_irn_arity(block) < 1 || is_Bad(block)) {
+ DB((dbg, LEVEL_5, "ssa bad %N\n", block));
+ return new_r_Bad(irg, mode);
+ }
- if (block == ssa_second_def_block) {
- DB((dbg, LEVEL_5, "ssa found second definition: use second def %ld\n", get_irn_node_nr(ssa_second_def)));
+ if (block == ssa_second_def_block && !first) {
+ DB((dbg, LEVEL_5, "ssa found second definition: use second def %N\n", ssa_second_def));
return ssa_second_def;
}
/* already processed this block? */
if (irn_visited(block)) {
- ir_node *value = get_link(block);
- DB((dbg, LEVEL_5, "ssa already visited: use linked %ld\n", get_irn_node_nr(value)));
+ ir_node *value = (ir_node *) get_irn_link(block);
+ DB((dbg, LEVEL_5, "ssa already visited: use linked %N\n", value));
return value;
}
- irg = get_irn_irg(block);
assert(block != get_irg_start_block(irg));
/* a Block with only 1 predecessor needs no Phi */
ir_node *pred_block = get_Block_cfgpred_block(block, 0);
ir_node *value;
- DB((dbg, LEVEL_5, "ssa 1 pred: walk pred %ld\n", get_irn_node_nr(pred_block)));
+ DB((dbg, LEVEL_5, "ssa 1 pred: walk pred %N\n", pred_block));
- value = search_def_and_create_phis(pred_block, mode);
- set_link(block, value);
+ value = search_def_and_create_phis(pred_block, mode, 0);
+ set_irn_link(block, value);
mark_irn_visited(block);
return value;
/* create a new Phi */
NEW_ARR_A(ir_node*, in, n_cfgpreds);
- for(i = 0; i < n_cfgpreds; ++i)
- in[i] = new_Unknown(mode);
+ for (i = 0; i < n_cfgpreds; ++i)
+ in[i] = new_r_Dummy(irg, mode);
phi = new_r_Phi(block, n_cfgpreds, in, mode);
-
/* Important: always keep block phi list up to date. */
add_Block_phi(block, phi);
- /* EVERY node is assumed to have a node_info linked. */
- alloc_node_info(phi, NULL);
-
- DB((dbg, LEVEL_5, "ssa phi creation: link new phi %ld to block %ld\n", get_irn_node_nr(phi), get_irn_node_nr(block)));
-
- set_link(block, phi);
+ DB((dbg, LEVEL_5, "ssa phi creation: link new phi %N to block %N\n", phi, block));
+ set_irn_link(block, phi);
mark_irn_visited(block);
/* set Phi predecessors */
- for(i = 0; i < n_cfgpreds; ++i) {
+ for (i = 0; i < n_cfgpreds; ++i) {
ir_node *pred_val;
ir_node *pred_block = get_Block_cfgpred_block(block, i);
assert(pred_block != NULL);
- pred_val = search_def_and_create_phis(pred_block, mode);
+ pred_val = search_def_and_create_phis(pred_block, mode, 0);
+
assert(pred_val != NULL);
- DB((dbg, LEVEL_5, "ssa phi pred:phi %ld, pred %ld\n", get_irn_node_nr(phi), get_irn_node_nr(pred_val)));
+ DB((dbg, LEVEL_5, "ssa phi pred:phi %N, pred %N\n", phi, pred_val));
set_irn_n(phi, i, pred_val);
}
+
return phi;
}
+
/**
* Given a set of values this function constructs SSA-form for the users of the
* first value (the users are determined through the out-edges of the value).
* Works without using the dominance tree.
*/
static void construct_ssa(ir_node *orig_block, ir_node *orig_val,
- ir_node *second_block, ir_node *second_val)
+ ir_node *second_block, ir_node *second_val)
{
ir_graph *irg;
ir_mode *mode;
- const ir_edge_t *edge;
- const ir_edge_t *next;
assert(orig_block && orig_val && second_block && second_val &&
"no parameter of construct_ssa may be NULL");
- /* no need to do anything */
if (orig_val == second_val)
return;
inc_irg_visited(irg);
mode = get_irn_mode(orig_val);
- set_link(orig_block, orig_val);
+ set_irn_link(orig_block, orig_val);
mark_irn_visited(orig_block);
ssa_second_def_block = second_block;
ssa_second_def = second_val;
/* Only fix the users of the first, i.e. the original node */
- foreach_out_edge_safe(orig_val, edge, next) {
+ foreach_out_edge_safe(orig_val, edge) {
ir_node *user = get_edge_src_irn(edge);
int j = get_edge_src_pos(edge);
ir_node *user_block = get_nodes_block(user);
if (is_End(user))
continue;
- DB((dbg, LEVEL_5, "original user %ld\n", get_irn_node_nr(user)));
+ DB((dbg, LEVEL_5, "original user %N\n", user));
if (is_Phi(user)) {
ir_node *pred_block = get_Block_cfgpred_block(user_block, j);
- newval = search_def_and_create_phis(pred_block, mode);
+ newval = search_def_and_create_phis(pred_block, mode, 1);
} else {
- newval = search_def_and_create_phis(user_block, mode);
+ newval = search_def_and_create_phis(user_block, mode, 1);
}
-
- /* If we get a bad node the user keeps the original in. No second definition needed. */
if (newval != user && !is_Bad(newval))
set_irn_n(user, j, newval);
}
ir_free_resources(irg, IR_RESOURCE_IRN_VISITED);
}
-/*
- * Construct SSA for def and all of its copies.
- */
-static void construct_ssa_n(ir_node *def, ir_node *user)
-{
- ir_graph *irg;
- ir_mode *mode;
- ir_node *iter = def;
- const ir_edge_t *edge;
- const ir_edge_t *next;
- irg = get_irn_irg(def);
- ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED);
- inc_irg_visited(irg);
+/***** Unrolling Helper Functions *****/
- mode = get_irn_mode(def);
+/* Assign the copy with index nr to node n */
+static void set_unroll_copy(ir_node *n, int nr, ir_node *cp)
+{
+ unrolling_node_info *info;
+ assert(nr != 0 && "0 reserved");
- for_each_copy(iter) {
- set_link(get_nodes_block(iter), iter);
- mark_irn_visited(get_nodes_block(iter));
+ info = ir_nodemap_get(unrolling_node_info, &map, n);
+ if (! info) {
+ ir_node **const arr = NEW_ARR_DZ(ir_node*, &obst, unroll_nr);
- DB((dbg, LEVEL_5, "ssa_n: Link def %ld to block %ld\n",
- get_irn_node_nr(iter), get_irn_node_nr(get_nodes_block(iter))));
+ info = OALLOCZ(&obst, unrolling_node_info);
+ info->copies = arr;
+ ir_nodemap_insert(&map, n, info);
}
+ /* Original node */
+ info->copies[0] = n;
- /* Need to search the outs, because we need the in-pos on the user node. */
- foreach_out_edge_safe(def, edge, next) {
- ir_node *edge_user = get_edge_src_irn(edge);
- int edge_src = get_edge_src_pos(edge);
- ir_node *user_block = get_nodes_block(user);
- ir_node *newval;
+ info->copies[nr] = cp;
+}
- if (edge_user != user)
- continue;
+/* Returns a nodes copy if it exists, else NULL. */
+static ir_node *get_unroll_copy(ir_node *n, int nr)
+{
+ ir_node *cp;
+ unrolling_node_info *info = ir_nodemap_get(unrolling_node_info, &map, n);
+ if (! info)
+ return NULL;
- if (is_Phi(user)) {
- ir_node *pred_block = get_Block_cfgpred_block(user_block, edge_src);
- newval = search_def_and_create_phis(pred_block, mode);
- } else {
- newval = search_def_and_create_phis(user_block, mode);
- }
+ cp = info->copies[nr];
+ return cp;
+}
- if (newval != user && !is_Bad(newval))
- set_irn_n(user, edge_src, newval);
- }
- ir_free_resources(irg, IR_RESOURCE_IRN_VISITED);
-}
+/***** Inversion Helper Functions *****/
-/**
- * Construct SSA for all definitions in arr.
- */
-void construct_ssa_foreach(ir_node **arr, int arr_n)
+/* Sets copy cp of node n. */
+static void set_inversion_copy(ir_node *n, ir_node *cp)
{
- int i;
- for(i = 0; i < arr_n; ++i) {
- ir_node *cppred, *block, *cpblock, *pred;
-
- pred = arr[i];
- cppred = get_copy(pred);
- block = get_nodes_block(pred);
- cpblock = get_nodes_block(cppred);
- construct_ssa(block, pred, cpblock, cppred);
- }
+ ir_nodemap_insert(&map, n, cp);
}
-/* get the number of backedges without alien bes */
-static int get_backedge_n(ir_node *loophead, unsigned with_alien)
+/* Getter of copy of n for inversion */
+static ir_node *get_inversion_copy(ir_node *n)
{
- int i;
- int be_n = 0;
- int arity = get_irn_arity(loophead);
- for (i = 0; i < arity; ++i) {
- ir_node *pred = get_irn_n(loophead, i);
- if (is_backedge(loophead, i) && (with_alien || is_in_loop(pred)))
- ++be_n;
- }
- return be_n;
+ ir_node *cp = ir_nodemap_get(ir_node, &map, n);
+ return cp;
}
-/**
- * Rewires the heads after peeling.
- */
-static void peel_fix_heads(void)
+/* Resets block mark for given node. For use with walker */
+static void reset_block_mark(ir_node *node, void * env)
{
- ir_node **loopheadnins, **peelheadnins;
- ir_node *loophead = loop_cf_head;
- ir_node *peelhead = get_copy(loophead);
-
- int headarity = get_irn_arity(loophead);
- ir_node *phi;
- int i;
-
- int lheadin_c = 0;
- int pheadin_c = 0;
-
- int backedges_n = get_backedge_n(loophead, 0);
+ (void) env;
- int lhead_arity = 2 * backedges_n;
- int phead_arity = headarity - backedges_n;
+ if (is_Block(node))
+ set_Block_mark(node, 0);
+}
- /* new in arrays */
- NEW_ARR_A(ir_node *, loopheadnins, lhead_arity );
- NEW_ARR_A(ir_node *, peelheadnins, phead_arity );
+/* Returns mark of node, or its block if node is not a block.
+ * Used in this context to determine if node is in the condition chain. */
+static bool is_nodes_block_marked(const ir_node* node)
+{
+ return get_Block_mark(get_block_const(node));
+}
- for_each_phi(loophead, phi) {
- NEW_ARR_A(ir_node *, get_node_info(phi)->ins, lhead_arity);
- }
- for_each_phi(peelhead, phi) {
- NEW_ARR_A(ir_node *, get_node_info(phi)->ins, phead_arity);
+/* Extends a nodes ins by node new.
+ * NOTE: This is slow if a node n needs to be extended more than once. */
+static void extend_irn(ir_node *n, ir_node *newnode, bool new_is_backedge)
+{
+ int i;
+ int arity = get_irn_arity(n);
+ int new_arity = arity + 1;
+ ir_node **ins = XMALLOCN(ir_node*, new_arity);
+ bool *bes = XMALLOCN(bool, new_arity);
+
+ /* save bes */
+ /* Bes are important!
+ * Another way would be recreating the looptree,
+ * but after that we cannot distinguish already processed loops
+ * from not yet processed ones. */
+ if (is_Block(n)) {
+ for(i = 0; i < arity; ++i) {
+ bes[i] = is_backedge(n, i);
+ }
+ bes[i] = new_is_backedge;
}
- for (i = 0; i < headarity; i++)
- {
- ir_node *orgjmp = get_irn_n(loophead, i);
- ir_node *copyjmp = get_copy(orgjmp);
-
- /**
- * Rewire the head blocks ins and their phi ins.
- * Requires phi list per block.
- */
- if (is_backedge(loophead, i) && !is_alien_edge(loophead, i)) {
- loopheadnins[lheadin_c] = orgjmp;
- for_each_phi(loophead, phi) {
- get_node_info( phi )->ins[lheadin_c] = get_irn_n( phi, i) ;
- }
- ++lheadin_c;
+ for(i = 0; i < arity; ++i) {
+ ins[i] = get_irn_n(n, i);
+ }
+ ins[i] = newnode;
- /* former bes of the peeled code origin now from the loophead */
- loopheadnins[lheadin_c] = copyjmp;
+ set_irn_in(n, new_arity, ins);
- /* get_irn_n( get_copy_of(phi), i ) <!=> get_copy_of( get_irn_n( phi, i) )
- * Order is crucial! Predecessors outside of the loop are non existent.
- * The copy (cloned with its ins!) has pred i,
- * but phis pred i might not have a copy of itself.
- */
- for_each_phi(loophead, phi) {
- get_node_info( phi )->ins[lheadin_c] = get_irn_n( get_copy(phi), i) ;
- }
- ++lheadin_c;
- } else {
- peelheadnins[pheadin_c] = orgjmp;
- for_each_phi(peelhead, phi) {
- get_node_info( phi )->ins[pheadin_c] = get_irn_n(phi, i);
- }
- ++pheadin_c;
+ /* restore bes */
+ if (is_Block(n)) {
+ for(i = 0; i < new_arity; ++i) {
+ if (bes[i])
+ set_backedge(n, i);
}
- }/* for */
-
- assert(pheadin_c == ARR_LEN(peelheadnins) &&
- lheadin_c == ARR_LEN(loopheadnins) &&
- "the constructed head arities do not match the predefined arities");
-
- /* assign the ins to the nodes */
- set_irn_in(loophead, ARR_LEN(loopheadnins), loopheadnins);
- set_irn_in(peelhead, ARR_LEN(peelheadnins), peelheadnins);
+ }
+ free(ins);
+ free(bes);
+}
- for_each_phi(loophead, phi) {
- ir_node **ins = get_node_info( phi )->ins;
- set_irn_in(phi, lhead_arity, ins);
+/* Extends a block by a copy of its pred at pos,
+ * fixing also the phis in the same way. */
+static void extend_ins_by_copy(ir_node *block, int pos)
+{
+ ir_node *new_in;
+ ir_node *phi;
+ assert(is_Block(block));
+
+ /* Extend block by copy of definition at pos */
+ ir_node *const pred = get_Block_cfgpred(block, pos);
+ new_in = get_inversion_copy(pred);
+ DB((dbg, LEVEL_5, "Extend block %N by %N cp of %N\n", block, new_in, pred));
+ extend_irn(block, new_in, false);
+
+ /* Extend block phis by copy of definition at pos */
+ for_each_phi(block, phi) {
+ ir_node *pred, *cp;
+
+ pred = get_irn_n(phi, pos);
+ cp = get_inversion_copy(pred);
+ /* If the phis in is not in the condition chain (eg. a constant),
+ * there is no copy. */
+ if (cp == NULL)
+ new_in = pred;
+ else
+ new_in = cp;
+
+ DB((dbg, LEVEL_5, "Extend phi %N by %N cp of %N\n", phi, new_in, pred));
+ extend_irn(phi, new_in, false);
}
+}
- for_each_phi(peelhead, phi) {
- ir_node **ins = get_node_info( phi )->ins;
- set_irn_in(phi, phead_arity, ins);
+/* Returns the number of blocks backedges. With or without alien bes. */
+static int get_backedge_n(ir_node *block, bool with_alien)
+{
+ int be_n = 0;
+ int const arity = get_Block_n_cfgpreds(block);
+ for (int i = 0; i < arity; ++i) {
+ ir_node *const pred = get_Block_cfgpred(block, i);
+ if (is_backedge(block, i) && (with_alien || is_in_loop(pred)))
+ ++be_n;
}
+ return be_n;
}
-/**
- * Create a raw copy (ins are still the old ones) of the given node.
- * We rely on copies to be NOT visited.
- */
-static ir_node *rawcopy_node(ir_node *node)
+/* Returns a raw copy of the given node.
+ * Attributes are kept/set according to the needs of loop inversion. */
+static ir_node *copy_node(ir_node *node)
{
int i, arity;
ir_node *cp;
- node_info *cpstate;
cp = exact_copy(node);
-
arity = get_irn_arity(node);
+ /* Keep backedge info */
for (i = 0; i < arity; ++i) {
if (is_backedge(node, i))
set_backedge(cp, i);
}
- set_copy(node, cp);
- cpstate = new_node_info();
- set_irn_link(cp, cpstate);
-
if (is_Block(cp)) {
- /* TODO
- * exact_copy already sets Macroblock.
- * Why do we need to do this anyway? */
- set_Block_MacroBlock(cp, cp);
+ set_Block_mark(cp, 0);
}
+
return cp;
}
+
/**
* This walker copies all walked nodes.
- * If the walk_condition is true for a node, it is walked.
- * All nodes node_info->copy attributes has to be NULL prior to every to every walk.
+ * If the walk_condition is true for a node, it is copied.
+ * All nodes node_info->copy have to be NULL prior to every walk.
+ * Order of ins is important for later usage.
*/
-static void copy_walk(ir_node *node, walker_condition *walk_condition, ir_loop *set_loop)
+static void copy_walk(ir_node *node, walker_condition *walk_condition,
+ ir_loop *set_loop)
{
int i;
int arity;
ir_node *cp;
ir_node **cpin;
- ir_graph *irg = current_ir_graph;
- node_info *node_info = get_node_info(node);
/**
* break condition and cycle resolver, creating temporary node copies
*/
- if (get_irn_visited(node) >= get_irg_visited(irg)) {
+ if (irn_visited(node)) {
/* Here we rely on nodestate's copy being initialized with NULL */
- DB((dbg, LEVEL_5, "copy_walk: We have already visited %ld\n", get_irn_node_nr(node)));
- if (node_info->copy == NULL) {
- cp = rawcopy_node(node);
- DB((dbg, LEVEL_5, "The TEMP copy of %ld is created %ld\n", get_irn_node_nr(node), get_irn_node_nr(cp)));
+ DB((dbg, LEVEL_5, "copy_walk: We have already visited %N\n", node));
+ if (get_inversion_copy(node) == NULL) {
+ cp = copy_node(node);
+ set_inversion_copy(node, cp);
+
+ DB((dbg, LEVEL_5, "The TEMP copy of %N is created %N\n", node, cp));
}
return;
}
if (!is_Block(node)) {
ir_node *pred = get_nodes_block(node);
if (walk_condition(pred))
- DB((dbg, LEVEL_5, "walk block %ld\n", get_irn_node_nr(pred)));
- copy_walk(pred, walk_condition, set_loop);
+ DB((dbg, LEVEL_5, "walk block %N\n", pred));
+ copy_walk(pred, walk_condition, set_loop);
}
arity = get_irn_arity(node);
NEW_ARR_A(ir_node *, cpin, arity);
- for (i = get_irn_arity(node) - 1; i >= 0; --i) {
+ for (i = 0; i < arity; ++i) {
ir_node *pred = get_irn_n(node, i);
if (walk_condition(pred)) {
- DB((dbg, LEVEL_5, "walk node %ld\n", get_irn_node_nr(pred)));
+ DB((dbg, LEVEL_5, "walk node %N\n", pred));
copy_walk(pred, walk_condition, set_loop);
- cpin[i] = get_copy(pred);
- DB((dbg, LEVEL_5, "copy of %ld gets new in %ld which is copy of %ld\n",
- get_irn_node_nr(node), get_irn_node_nr(get_copy(pred)), get_irn_node_nr(pred)));
+ cpin[i] = get_inversion_copy(pred);
+ DB((dbg, LEVEL_5, "copy of %N gets new in %N which is copy of %N\n",
+ node, get_inversion_copy(pred), pred));
} else {
cpin[i] = pred;
}
}
/* copy node / finalize temp node */
- if (node_info->copy == NULL) {
+ if (get_inversion_copy(node) == NULL) {
/* No temporary copy existent */
- cp = rawcopy_node(node);
- DB((dbg, LEVEL_5, "The FINAL copy of %ld is CREATED %ld\n", get_irn_node_nr(node), get_irn_node_nr(cp)));
+ cp = copy_node(node);
+ set_inversion_copy(node, cp);
+ DB((dbg, LEVEL_5, "The FINAL copy of %N is CREATED %N\n", node, cp));
} else {
/* temporary copy is existent but without correct ins */
- cp = get_copy(node);
- DB((dbg, LEVEL_5, "The FINAL copy of %ld is EXISTENT %ld\n", get_irn_node_nr(node), get_irn_node_nr(cp)));
+ cp = get_inversion_copy(node);
+ DB((dbg, LEVEL_5, "The FINAL copy of %N is EXISTENT %N\n", node, cp));
}
if (!is_Block(node)) {
- ir_node *cpblock = get_copy(get_nodes_block(node));
+ ir_node *cpblock = get_inversion_copy(get_nodes_block(node));
set_nodes_block(cp, cpblock );
- /* fix the phi information in attr.phis */
- if( is_Phi(cp) )
+ if (is_Phi(cp))
add_Block_phi(cpblock, cp);
}
- set_irn_loop(cp, set_loop);
+ /* Keeps phi list of temporary node. */
set_irn_in(cp, ARR_LEN(cpin), cpin);
}
/**
- * Loop peeling, and fix the cf for the loop entry nodes, which have now more preds
+ * This walker copies all walked nodes.
+ * If the walk_condition is true for a node, it is copied.
+ * All nodes node_info->copy have to be NULL prior to every walk.
+ * Order of ins is important for later usage.
+ * Takes copy_index, to phase-link copy at specific index.
*/
-static void peel(out_edge *loop_outs)
+static void copy_walk_n(ir_node *node, walker_condition *walk_condition,
+ int copy_index)
{
int i;
- ir_node **entry_buffer;
- int entry_c = 0;
+ int arity;
+ ir_node *cp;
+ ir_node **cpin;
- ir_reserve_resources(current_ir_graph, IR_RESOURCE_IRN_VISITED);
+ /**
+ * break condition and cycle resolver, creating temporary node copies
+ */
+ if (irn_visited(node)) {
+ /* Here we rely on nodestate's copy being initialized with NULL */
+ DB((dbg, LEVEL_5, "copy_walk: We have already visited %N\n", node));
+ if (get_unroll_copy(node, copy_index) == NULL) {
+ ir_node *u;
+ u = copy_node(node);
+ set_unroll_copy(node, copy_index, u);
+ DB((dbg, LEVEL_5, "The TEMP unknown of %N is created %N\n", node, u));
+ }
+ return;
+ }
+
+ /* Walk */
+ mark_irn_visited(node);
- NEW_ARR_A(ir_node *, entry_buffer, ARR_LEN(loop_outs));
+ if (!is_Block(node)) {
+ ir_node *block = get_nodes_block(node);
+ if (walk_condition(block))
+ DB((dbg, LEVEL_5, "walk block %N\n", block));
+ copy_walk_n(block, walk_condition, copy_index);
+ }
- /* duplicate loop walk */
- inc_irg_visited(current_ir_graph);
+ arity = get_irn_arity(node);
+ NEW_ARR_A(ir_node *, cpin, arity);
- for(i = 0; i < ARR_LEN(loop_outs); i++) {
- out_edge entry = loop_outs[i];
- ir_node *node = entry.node;
- ir_node *pred = get_irn_n(entry.node, entry.pred_irn_n);
+ for (i = 0; i < arity; ++i) {
+ ir_node *pred = get_irn_n(node, i);
- if (is_Block(node)) {
- copy_walk(pred, is_in_loop, NULL);
- duplicate_preds(node, entry.pred_irn_n, get_copy(pred) );
+ if (walk_condition(pred)) {
+ DB((dbg, LEVEL_5, "walk node %N\n", pred));
+ copy_walk_n(pred, walk_condition, copy_index);
+ cpin[i] = get_unroll_copy(pred, copy_index);
} else {
- copy_walk(pred, is_in_loop, NULL);
- /* leave out keepalives */
- if (!is_End(node))
- /* Node is user of a value defined inside the loop.
- * We'll need a phi since we duplicated the loop. */
- /* Cannot construct_ssa here, because it needs another walker. */
- entry_buffer[entry_c++] = pred;
+ cpin[i] = pred;
}
}
- ir_free_resources(current_ir_graph, IR_RESOURCE_IRN_VISITED);
+ /* copy node / finalize temp node */
+ cp = get_unroll_copy(node, copy_index);
+ if (cp == NULL || is_Unknown(cp)) {
+ cp = copy_node(node);
+ set_unroll_copy(node, copy_index, cp);
+ DB((dbg, LEVEL_5, "The FINAL copy of %N is CREATED %N\n", node, cp));
+ } else {
+ /* temporary copy is existent but without correct ins */
+ cp = get_unroll_copy(node, copy_index);
+ DB((dbg, LEVEL_5, "The FINAL copy of %N is EXISTENT %N\n", node, cp));
+ }
+
+ if (!is_Block(node)) {
+ ir_node *cpblock = get_unroll_copy(get_nodes_block(node), copy_index);
- /* Rewires the 2 heads */
- peel_fix_heads();
+ set_nodes_block(cp, cpblock );
+ if (is_Phi(cp))
+ add_Block_phi(cpblock, cp);
+ }
- /* Generate phis for values from peeled code and original loop */
- construct_ssa_foreach(entry_buffer, entry_c);
- /*for(i = 0; i < entry_c; i++)
- {
- ir_node *cppred, *block, *cpblock, *pred;
+ /* Keeps phi list of temporary node. */
+ set_irn_in(cp, ARR_LEN(cpin), cpin);
+}
- pred = entry_buffer[i];
- cppred = get_copy(pred);
- block = get_nodes_block(pred);
- cpblock = get_nodes_block(cppred);
- construct_ssa(block, pred, cpblock, cppred);
- }*/
+/* Removes alle Blocks with non marked predecessors from the condition chain. */
+static void unmark_not_allowed_cc_blocks(void)
+{
+ size_t blocks = ARR_LEN(cc_blocks);
+ size_t i;
+
+ for(i = 0; i < blocks; ++i) {
+ ir_node *block = cc_blocks[i];
+
+ /* Head is an exception. */
+ if (block == loop_head)
+ continue;
+
+ int const arity = get_Block_n_cfgpreds(block);
+ for (int a = 0; a < arity; ++a) {
+ if (!is_nodes_block_marked(get_Block_cfgpred(block, a))) {
+ set_Block_mark(block, 0);
+ --inversion_blocks_in_cc;
+ DB((dbg, LEVEL_5, "Removed %N from cc (blocks in cc %d)\n",
+ block, inversion_blocks_in_cc));
+
+ break;
+ }
+ }
+ }
+}
+
+/* Unmarks all cc blocks using cc_blocks except head.
+ * TODO: invert head for unrolling? */
+static void unmark_cc_blocks(void)
+{
+ size_t blocks = ARR_LEN(cc_blocks);
+ size_t i;
+
+ for(i = 0; i < blocks; ++i) {
+ ir_node *block = cc_blocks[i];
+
+ /* TODO Head is an exception. */
+ /*if (block != loop_head)*/
+ set_Block_mark(block, 0);
+ }
+ /*inversion_blocks_in_cc = 1;*/
+ inversion_blocks_in_cc = 0;
+
+ /* invalidate */
+ loop_info.cc_size = 0;
}
/**
* Populates head_entries with (node, pred_pos) tuple
- * whereas the node's pred at pred_pos is in the head but not the node itself.
- * Head and condition chain blocks must be marked.
+ * whereas the node's pred at pred_pos is in the cc but not the node itself.
+ * Also finds df loops inside the cc.
+ * Head and condition chain blocks have been marked previously.
*/
static void get_head_outs(ir_node *node, void *env)
{
int arity = get_irn_arity(node);
(void) env;
- DB((dbg, LEVEL_5, "get head entries %ld \n", get_irn_node_nr(node)));
-
- for(i = 0; i < arity; ++i) {
- /* node is not in the head, but the predecessor is.
- * (head or loop chain nodes are marked) */
-
- DB((dbg, LEVEL_5, "... "));
- DB((dbg, LEVEL_5, "node %ld marked %d (0) pred %d marked %d (1) \n",
- node->node_nr, is_nodesblock_marked(node),i, is_nodesblock_marked(get_irn_n(node, i))));
-
- if (!is_nodesblock_marked(node) && is_nodesblock_marked(get_irn_n(node, i))) {
- out_edge entry;
+ for (i = 0; i < arity; ++i) {
+ if (!is_nodes_block_marked(node) && is_nodes_block_marked(get_irn_n(node, i))) {
+ entry_edge entry;
entry.node = node;
- entry.pred_irn_n = i;
- DB((dbg, LEVEL_5,
- "Found head chain entry %ld @%d because !inloop %ld and inloop %ld\n",
- get_irn_node_nr(node), i, get_irn_node_nr(node), get_irn_node_nr(get_irn_n(node, i))));
- ARR_APP1(out_edge, cur_head_outs, entry);
+ entry.pos = i;
+ /* Saving also predecessor seems redundant, but becomes
+ * necessary when changing position of it, before
+ * dereferencing it.*/
+ entry.pred = get_irn_n(node, i);
+ ARR_APP1(entry_edge, cur_head_outs, entry);
+ }
+ }
+
+ arity = get_irn_arity(loop_head);
+
+ /* Find df loops inside the cc */
+ if (is_Phi(node) && get_nodes_block(node) == loop_head) {
+ for (i = 0; i < arity; ++i) {
+ if (is_own_backedge(loop_head, i)) {
+ if (is_nodes_block_marked(get_irn_n(node, i))) {
+ entry_edge entry;
+ entry.node = node;
+ entry.pos = i;
+ entry.pred = get_irn_n(node, i);
+ ARR_APP1(entry_edge, head_df_loop, entry);
+ DB((dbg, LEVEL_5, "Found incc assignment node %N @%d is pred %N, graph %N %N\n",
+ node, i, entry.pred, current_ir_graph, get_irg_start_block(current_ir_graph)));
+ }
+ }
}
}
}
/**
- * Find condition chains, and add them to be inverted, until the node count exceeds the limit.
+ * Find condition chains, and add them to be inverted.
* A block belongs to the chain if a condition branches out of the loop.
+ * (Some blocks need to be removed once again.)
* Returns 1 if the given block belongs to the condition chain.
*/
-static unsigned find_condition_chains(ir_node *block)
+static void find_condition_chain(ir_node *block)
{
- const ir_edge_t *edge;
- unsigned mark = 0;
- int nodes_n = 0;
+ bool mark = false;
+ bool has_be = false;
+ bool jmp_only = true;
+ unsigned nodes_n = 0;
- DB((dbg, LEVEL_5, "condition_chains for block %ld\n", get_irn_node_nr(block)));
+ mark_irn_visited(block);
+
+ DB((dbg, LEVEL_5, "condition_chains for block %N\n", block));
- /* Collect all outs, including keeps.
- * (TODO firm function for number of out edges?) */
+ /* Get node count */
foreach_out_edge_kind(block, edge, EDGE_KIND_NORMAL) {
++nodes_n;
}
- /* We do not want to collect more nodes from condition chains, than the limit allows us to.
- * Also, leave at least one block as body. */
- if (head_inversion_node_count + nodes_n > inversion_head_node_limit
- || head_inversion_block_count + 1 == loop_info.blocks) {
+ /* Check if node count would exceed maximum cc size.
+ * TODO
+ * This is not optimal, as we search depth-first and break here,
+ * continuing with another subtree. */
+ if (loop_info.cc_size + nodes_n > opt_params.max_cc_size) {
set_Block_mark(block, 0);
+ return;
+ }
- return 0;
+ /* Check if block only has a jmp instruction. */
+ foreach_out_edge(block, edge) {
+ ir_node *src = get_edge_src_irn(edge);
+
+ if (!is_Block(src) && !is_Jmp(src)) {
+ jmp_only = false;
+ }
}
- /* First: check our successors, and add all succs that are outside of the loop to the list */
+ /* Check cf outs if one is leaving the loop,
+ * or if this node has a backedge. */
foreach_block_succ(block, edge) {
- ir_node *src = get_edge_src_irn( edge );
- int pos = get_edge_src_pos( edge );
-
- if (!is_in_loop(src)) {
- out_edge entry;
-
- mark = 1;
- entry.node = src;
- entry.pred_irn_n = pos;
- ARR_APP1(out_edge, cond_chain_entries, entry);
- mark_irn_visited(src);
+ ir_node *src = get_edge_src_irn(edge);
+ int pos = get_edge_src_pos(edge);
+
+ if (!is_in_loop(src))
+ mark = true;
+
+ /* Inverting blocks with backedge outs leads to a cf edge
+ * from the inverted head, into the inverted head (skipping the body).
+ * As the body becomes the new loop head,
+ * this would introduce another loop in the existing loop.
+ * This loop inversion cannot cope with this case. */
+ if (is_backedge(src, pos)) {
+ has_be = true;
+ break;
}
}
- if (mark == 0) {
- /* this block is not part of the chain,
- * because the chain would become too long or we have no successor outside of the loop */
-
- set_Block_mark(block, 0);
- return 0;
- } else {
+ /* We need all predecessors to already belong to the condition chain.
+ * Example of wrong case: * == in cc
+ *
+ * Head* ,--.
+ * /| \ B |
+ * / A* B / |
+ * / /\ / ? |
+ * / C* => D |
+ * / D Head |
+ * / A \_|
+ * C
+ */
+ /* Collect blocks containing only a Jmp.
+ * Do not collect blocks with backedge outs. */
+ if ((jmp_only || mark) && !has_be) {
set_Block_mark(block, 1);
- ++head_inversion_block_count;
- DB((dbg, LEVEL_5, "block %ld is part of condition chain\n", get_irn_node_nr(block)));
- head_inversion_node_count += nodes_n;
+ ++inversion_blocks_in_cc;
+ loop_info.cc_size += nodes_n;
+ DB((dbg, LEVEL_5, "block %N is part of condition chain\n", block));
+ ARR_APP1(ir_node *, cc_blocks, block);
+ } else {
+ set_Block_mark(block, 0);
}
- /* Second: walk all successors, and add them to the list if they are not part of the chain */
foreach_block_succ(block, edge) {
- unsigned inchain;
ir_node *src = get_edge_src_irn( edge );
- int pos = get_edge_src_pos( edge );
- /* already done cases */
- if (!is_in_loop( src ) || (get_irn_visited(src) >= get_irg_visited(current_ir_graph))) {
- continue;
- }
-
- mark_irn_visited(src);
- DB((dbg, LEVEL_5, "condition chain walk %ld\n", get_irn_node_nr(src)));
- inchain = find_condition_chains(src);
-
- /* if successor is not part of chain we need to collect its outs */
- if (!inchain) {
- out_edge entry;
- entry.node = src;
- entry.pred_irn_n = pos;
- ARR_APP1(out_edge, cond_chain_entries, entry);
- }
+ if (is_in_loop(src) && ! irn_visited(src))
+ find_condition_chain(src);
}
- return mark;
}
/**
- * Rewire the loop head and inverted head for loop inversion.
+ * Rewires the copied condition chain. Removes backedges
+ * as this condition chain is prior to the loop.
+ * Copy of loop_head must have phi list and old (unfixed) backedge info of the loop head.
+ * (loop_head is already fixed, we cannot rely on it.)
*/
-static void inversion_fix_heads(void)
+static void fix_copy_inversion(void)
{
- ir_node **loopheadnins, **invheadnins;
- ir_node *loophead = loop_cf_head;
- ir_node *invhead = get_copy(loophead);
-
- int headarity = get_irn_arity(loophead);
- ir_node *phi;
+ ir_node *new_head;
+ ir_node **ins;
+ ir_node **phis;
+ ir_node *phi, *next;
+ ir_node *head_cp = get_inversion_copy(loop_head);
+ ir_graph *irg = get_irn_irg(head_cp);
+ int arity = get_irn_arity(head_cp);
+ int backedges = get_backedge_n(head_cp, false);
+ int new_arity = arity - backedges;
+ int pos;
int i;
- int lheadin_c = 0;
- int iheadin_c = 0;
+ NEW_ARR_A(ir_node *, ins, new_arity);
- int backedges_n = get_backedge_n(loophead, 0);
- int lhead_arity = backedges_n;
- int ihead_arity = headarity - backedges_n;
+ pos = 0;
+ /* Remove block backedges */
+ for(i = 0; i < arity; ++i) {
+ if (!is_backedge(head_cp, i))
+ ins[pos++] = get_irn_n(head_cp, i);
+ }
- assert(lhead_arity != 0 && "Loophead has arity 0. Probably wrong backedge informations.");
- assert(ihead_arity != 0 && "Inversionhead has arity 0. Probably wrong backedge informations.");
+ new_head = new_r_Block(irg, new_arity, ins);
- /* new in arrays for all phis in the head blocks */
- NEW_ARR_A(ir_node *, loopheadnins, lhead_arity);
- NEW_ARR_A(ir_node *, invheadnins, ihead_arity);
+ phis = NEW_ARR_F(ir_node *, 0);
- for_each_phi(loophead, phi) {
- NEW_ARR_A(ir_node *, get_node_info(phi)->ins, lhead_arity);
+ for_each_phi_safe(get_Block_phis(head_cp), phi, next) {
+ ir_node *new_phi;
+ NEW_ARR_A(ir_node *, ins, new_arity);
+ pos = 0;
+ for(i = 0; i < arity; ++i) {
+ if (!is_backedge(head_cp, i))
+ ins[pos++] = get_irn_n(phi, i);
+ }
+ new_phi = new_rd_Phi(get_irn_dbg_info(phi),
+ new_head, new_arity, ins,
+ get_irn_mode(phi));
+ ARR_APP1(ir_node *, phis, new_phi);
}
- for_each_phi(invhead, phi) {
- NEW_ARR_A(ir_node *, get_node_info(phi)->ins, ihead_arity);
+
+ pos = 0;
+ for_each_phi_safe(get_Block_phis(head_cp), phi, next) {
+ exchange(phi, phis[pos++]);
}
- for (i = 0; i < headarity; i++) {
- ir_node *pred = get_irn_n(loophead, i);
+ exchange(head_cp, new_head);
+
+ DEL_ARR_F(phis);
+}
+
+
+/* Puts the original condition chain at the end of the loop,
+ * subsequently to the body.
+ * Relies on block phi list and correct backedges.
+ */
+static void fix_head_inversion(void)
+{
+ ir_node *new_head;
+ ir_node **ins;
+ ir_node *phi, *next;
+ ir_node **phis;
+ ir_graph *irg = get_irn_irg(loop_head);
+ int arity = get_irn_arity(loop_head);
+ int backedges = get_backedge_n(loop_head, false);
+ int new_arity = backedges;
+ int pos;
+ int i;
+
+ NEW_ARR_A(ir_node *, ins, new_arity);
+
+ pos = 0;
+ /* Keep only backedges */
+ for(i = 0; i < arity; ++i) {
+ if (is_own_backedge(loop_head, i))
+ ins[pos++] = get_irn_n(loop_head, i);
+ }
+
+ new_head = new_r_Block(irg, new_arity, ins);
+
+ phis = NEW_ARR_F(ir_node *, 0);
+
+ for_each_phi(loop_head, phi) {
+ ir_node *new_phi;
+ DB((dbg, LEVEL_5, "Fixing phi %N of loop head\n", phi));
+
+ NEW_ARR_A(ir_node *, ins, new_arity);
+
+ pos = 0;
+ for (i = 0; i < arity; ++i) {
+ ir_node *pred = get_irn_n(phi, i);
+
+ if (is_own_backedge(loop_head, i)) {
+ /* If assignment is in the condition chain,
+ * we need to create a phi in the new loop head.
+ * This can only happen for df, not cf. See find_condition_chains. */
+ /*if (is_nodes_block_marked(pred)) {
+ ins[pos++] = pred;
+ } else {*/
+ ins[pos++] = pred;
- /**
- * Rewire the head blocks ins and their phi ins.
- * Requires phi list per block.
- */
- if (is_backedge(loophead, i) && !is_alien_edge(loophead, i)) {
- /* just copy these edges */
- loopheadnins[lheadin_c] = pred;
- for_each_phi(loophead, phi) {
- get_node_info(phi)->ins[lheadin_c] = get_irn_n(phi, i);
- }
- ++lheadin_c;
- } else {
- invheadnins[iheadin_c] = pred;
- for_each_phi(invhead, phi) {
- get_node_info(phi)->ins[iheadin_c] = get_irn_n(phi, i) ;
}
- ++iheadin_c;
}
- }
- /* assign the ins to the head blocks */
- set_irn_in(loophead, ARR_LEN(loopheadnins), loopheadnins);
- set_irn_in(invhead, ARR_LEN(invheadnins), invheadnins);
+ new_phi = new_rd_Phi(get_irn_dbg_info(phi),
+ new_head, new_arity, ins,
+ get_irn_mode(phi));
+
+ ARR_APP1(ir_node *, phis, new_phi);
- /* assign the ins for the phis */
- for_each_phi(loophead, phi) {
- ir_node **ins = get_node_info(phi)->ins;
- set_irn_in(phi, lhead_arity, ins);
+ DB((dbg, LEVEL_5, "fix inverted head should exch %N by %N (pos %d)\n", phi, new_phi, pos ));
}
- for_each_phi(invhead, phi) {
- ir_node **ins = get_node_info(phi)->ins;
- set_irn_in(phi, ihead_arity, ins);
+ pos = 0;
+ for_each_phi_safe(get_Block_phis(loop_head), phi, next) {
+ DB((dbg, LEVEL_5, "fix inverted exch phi %N by %N\n", phi, phis[pos]));
+ if (phis[pos] != phi)
+ exchange(phi, phis[pos++]);
}
+
+ DEL_ARR_F(phis);
+
+ DB((dbg, LEVEL_5, "fix inverted head exch head block %N by %N\n", loop_head, new_head));
+ exchange(loop_head, new_head);
}
-static void inversion_walk(out_edge *head_entries)
+/* Does the loop inversion. */
+static void inversion_walk(ir_graph *irg, entry_edge *head_entries)
{
- int i;
- ir_node *phi;
- int entry_c = 0;
- ir_node **entry_buffer;
- ir_node **head_phi_assign;
+ size_t i;
- NEW_ARR_A(ir_node *, entry_buffer, ARR_LEN(head_entries));
+ /*
+ * The order of rewiring bottom-up is crucial.
+ * Any change of the order leads to lost information that would be needed later.
+ */
- head_phi_assign = NEW_ARR_F(ir_node *, 0);
+ ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED);
- /* Find assignments in the condition chain, to construct_ssa for them after the loop inversion. */
- for_each_phi(loop_cf_head , phi) {
- for(i=0; i<get_irn_arity(phi); ++i) {
- ir_node *def = get_irn_n(phi, i);
- if (is_nodesblock_marked(def)) {
- ARR_APP1(ir_node *, head_phi_assign, def);
- }
- }
+ /* 1. clone condition chain */
+ inc_irg_visited(irg);
+
+ for (i = 0; i < ARR_LEN(head_entries); ++i) {
+ entry_edge entry = head_entries[i];
+ ir_node *pred = get_irn_n(entry.node, entry.pos);
+
+ DB((dbg, LEVEL_5, "\nInit walk block %N\n", pred));
+
+ copy_walk(pred, is_nodes_block_marked, cur_loop);
}
- ir_reserve_resources(current_ir_graph, IR_RESOURCE_IRN_VISITED);
+ ir_free_resources(irg, IR_RESOURCE_IRN_VISITED);
- /**
- * duplicate condition chain
- **/
- inc_irg_visited(current_ir_graph);
+ /* 2. Extends the head control flow successors ins
+ * with the definitions of the copied head node. */
+ for (i = 0; i < ARR_LEN(head_entries); ++i) {
+ entry_edge head_out = head_entries[i];
- for(i = 0; i < ARR_LEN(head_entries); ++i) {
- out_edge entry = head_entries[i];
- ir_node *node = entry.node;
- ir_node *pred = get_irn_n(entry.node, entry.pred_irn_n);
+ if (is_Block(head_out.node))
+ extend_ins_by_copy(head_out.node, head_out.pos);
+ }
- if (is_Block(node)) {
- DB((dbg, LEVEL_5, "\nInit walk block %ld\n", get_irn_node_nr(pred)));
- copy_walk(pred, is_nodesblock_marked, cur_loop);
- duplicate_preds(node, entry.pred_irn_n, get_copy(pred) );
- } else {
- DB((dbg, LEVEL_5, "\nInit walk node %ld\n", get_irn_node_nr(pred)));
- copy_walk(pred, is_nodesblock_marked, cur_loop);
-
- /* ignore keepalives */
- if (!is_End(node))
- /* Node is user of a value assigned inside the loop.
- * We will need a phi since we duplicated the head. */
- entry_buffer[entry_c++] = pred;
+ /* 3. construct_ssa for users of definitions in the condition chain,
+ * as there is now a second definition. */
+ for (i = 0; i < ARR_LEN(head_entries); ++i) {
+ entry_edge head_out = head_entries[i];
+
+ /* Ignore keepalives */
+ if (is_End(head_out.node))
+ continue;
+
+ /* Construct ssa for assignments in the condition chain. */
+ if (!is_Block(head_out.node)) {
+ ir_node *pred, *cppred, *block, *cpblock;
+
+ pred = head_out.pred;
+ cppred = get_inversion_copy(pred);
+ block = get_nodes_block(pred);
+ cpblock = get_nodes_block(cppred);
+ construct_ssa(block, pred, cpblock, cppred);
}
}
- ir_free_resources(current_ir_graph, IR_RESOURCE_IRN_VISITED);
+ /*
+ * If there is an assignment in the condition chain
+ * with a user also in the condition chain,
+ * the dominance frontier is in the new loop head.
+ * The dataflow loop is completely in the condition chain.
+ * Goal:
+ * To be wired: >|
+ *
+ * | ,--. |
+ * Phi_cp | | copied condition chain
+ * >| | | |
+ * >| ?__/ |
+ * >| ,-.
+ * Phi* | | new loop head with newly created phi.
+ * | |
+ * Phi | | original, inverted condition chain
+ * | | |
+ * ?__/ |
+ *
+ */
+ for (i = 0; i < ARR_LEN(head_df_loop); ++i) {
+ entry_edge head_out = head_df_loop[i];
- inversion_fix_heads();
+ /* Construct ssa for assignments in the condition chain. */
+ ir_node *pred, *cppred, *block, *cpblock;
- /* Generate phis for users of values assigned in the condition chain
- * and read in the loops body */
- construct_ssa_foreach(entry_buffer, entry_c);
+ pred = head_out.pred;
+ cppred = get_inversion_copy(pred);
+ assert(cppred && pred);
+ block = get_nodes_block(pred);
+ cpblock = get_nodes_block(cppred);
+ construct_ssa(block, pred, cpblock, cppred);
+ }
+
+ /* 4. Remove the ins which are no backedges from the original condition chain
+ * as the cc is now subsequent to the body. */
+ fix_head_inversion();
- /* Generate phis for values that are assigned in the condition chain
- * but not read in the loops body. */
- construct_ssa_foreach(head_phi_assign, ARR_LEN(head_phi_assign));
+ /* 5. Remove the backedges of the copied condition chain,
+ * because it is going to be the new 'head' in advance to the loop. */
+ fix_copy_inversion();
- loop_cf_head = get_copy(loop_cf_head);
}
-/* Loop peeling */
-void loop_peeling(void)
+/* Performs loop inversion of cur_loop if possible and reasonable. */
+static void loop_inversion(ir_graph *irg)
{
- cur_loop_outs = NEW_ARR_F(out_edge, 0);
- irg_walk_graph( current_ir_graph, get_loop_outs, NULL, NULL );
+ int loop_depth;
+ unsigned max_loop_nodes = opt_params.max_loop_size;
+ unsigned max_loop_nodes_adapted;
+ int depth_adaption = opt_params.depth_adaption;
- peel(cur_loop_outs);
+ bool do_inversion = true;
- /* clean up */
- reset_node_infos();
+ /* Depth of 0 is the procedure and 1 a topmost loop. */
+ loop_depth = get_loop_depth(cur_loop) - 1;
- set_irg_doms_inconsistent(current_ir_graph);
- set_irg_loopinfo_inconsistent(current_ir_graph);
- set_irg_outs_inconsistent(current_ir_graph);
+ /* Calculating in per mil. */
+ max_loop_nodes_adapted = get_max_nodes_adapted(loop_depth);
- DEL_ARR_F(cur_loop_outs);
-}
+ DB((dbg, LEVEL_1, "max_nodes: %d\nmax_nodes_adapted %d at depth of %d (adaption %d)\n",
+ max_loop_nodes, max_loop_nodes_adapted, loop_depth, depth_adaption));
-/* Loop inversion */
-void loop_inversion(void)
-{
- unsigned do_inversion = 1;
+ if (loop_info.nodes == 0)
+ return;
- inversion_head_node_limit = INT_MAX;
+ if (loop_info.nodes > max_loop_nodes) {
+ /* Only for stats */
+ DB((dbg, LEVEL_1, "Nodes %d > allowed nodes %d\n",
+ loop_info.nodes, loop_depth, max_loop_nodes));
+ ++stats.too_large;
+ /* no RETURN */
+ /* Adaption might change it */
+ }
- /* Search for condition chains. */
- ir_reserve_resources(current_ir_graph, IR_RESOURCE_BLOCK_MARK);
+ /* Limit processing to loops smaller than given parameter. */
+ if (loop_info.nodes > max_loop_nodes_adapted) {
+ DB((dbg, LEVEL_1, "Nodes %d > allowed nodes (depth %d adapted) %d\n",
+ loop_info.nodes, loop_depth, max_loop_nodes_adapted));
+ ++stats.too_large_adapted;
+ return;
+ }
- irg_walk_graph(current_ir_graph, reset_block_mark, NULL, NULL);
+ if (loop_info.calls > opt_params.allowed_calls) {
+ DB((dbg, LEVEL_1, "Calls %d > allowed calls %d\n",
+ loop_info.calls, opt_params.allowed_calls));
+ ++stats.calls_limit;
+ return;
+ }
+
+ /*inversion_head_node_limit = INT_MAX;*/
+ ir_reserve_resources(irg, IR_RESOURCE_BLOCK_MARK);
- loop_info.blocks = get_loop_n_blocks(cur_loop);
- cond_chain_entries = NEW_ARR_F(out_edge, 0);
+ /* Reset block marks.
+ * We use block marks to flag blocks of the original condition chain. */
+ irg_walk_graph(irg, reset_block_mark, NULL, NULL);
- head_inversion_node_count = 0;
- head_inversion_block_count = 0;
+ /*loop_info.blocks = get_loop_n_blocks(cur_loop);*/
+ cond_chain_entries = NEW_ARR_F(entry_edge, 0);
+ head_df_loop = NEW_ARR_F(entry_edge, 0);
- set_Block_mark(loop_cf_head, 1);
- mark_irn_visited(loop_cf_head);
- inc_irg_visited(current_ir_graph);
+ /*head_inversion_node_count = 0;*/
+ inversion_blocks_in_cc = 0;
- find_condition_chains(loop_cf_head);
+ /* Use phase to keep copy of nodes from the condition chain. */
+ ir_nodemap_init(&map, irg);
+ obstack_init(&obst);
- DB((dbg, LEVEL_3, "Loop contains %d blocks.\n", loop_info.blocks));
- if (loop_info.blocks < 2) {
- do_inversion = 0;
- DB((dbg, LEVEL_3, "Loop contains %d (less than 2) blocks => No Inversion done.\n", loop_info.blocks));
+ /* Search for condition chains and temporarily save the blocks in an array. */
+ cc_blocks = NEW_ARR_F(ir_node *, 0);
+ inc_irg_visited(irg);
+ find_condition_chain(loop_head);
+
+ unmark_not_allowed_cc_blocks();
+ DEL_ARR_F(cc_blocks);
+
+ /* Condition chain too large.
+ * Loop should better be small enough to fit into the cache. */
+ /* TODO Of course, we should take a small enough cc in the first place,
+ * which is not that simple. (bin packing) */
+ if (loop_info.cc_size > opt_params.max_cc_size) {
+ ++stats.cc_limit_reached;
+
+ do_inversion = false;
+
+ /* Unmark cc blocks except the head.
+ * Invert head only for possible unrolling. */
+ unmark_cc_blocks();
}
/* We also catch endless loops here,
* because they do not have a condition chain. */
- if (head_inversion_block_count < 1) {
- do_inversion = 0;
- DB((dbg, LEVEL_3, "Loop contains %d (less than 1) invertible blocks => No Inversion done.\n", head_inversion_block_count));
+ if (inversion_blocks_in_cc < 1) {
+ do_inversion = false;
+ DB((dbg, LEVEL_3,
+ "Loop contains %d (less than 1) invertible blocks => No Inversion done.\n",
+ inversion_blocks_in_cc));
}
if (do_inversion) {
- cur_head_outs = NEW_ARR_F(out_edge, 0);
+ cur_head_outs = NEW_ARR_F(entry_edge, 0);
- /* Get all edges pointing into the head or condition chain (outs). */
- irg_walk_graph(current_ir_graph, get_head_outs, NULL, NULL);
- inversion_walk(cur_head_outs);
+ /* Get all edges pointing into the condition chain. */
+ irg_walk_graph(irg, get_head_outs, NULL, NULL);
+
+ /* Do the inversion */
+ inversion_walk(irg, cur_head_outs);
DEL_ARR_F(cur_head_outs);
- set_irg_doms_inconsistent(current_ir_graph);
- set_irg_loopinfo_inconsistent(current_ir_graph);
- set_irg_outs_inconsistent(current_ir_graph);
+ /* Duplicated blocks changed doms */
+ clear_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_DOMINANCE
+ | IR_GRAPH_PROPERTY_CONSISTENT_LOOPINFO);
+
+ ++stats.inverted;
}
/* free */
+ obstack_free(&obst, NULL);
+ ir_nodemap_destroy(&map);
DEL_ARR_F(cond_chain_entries);
- ir_free_resources(current_ir_graph, IR_RESOURCE_BLOCK_MARK);
+ DEL_ARR_F(head_df_loop);
+
+ ir_free_resources(irg, IR_RESOURCE_BLOCK_MARK);
}
-/**
- * Returns last element of linked list of copies by
- * walking the linked list.
- */
-ir_node *get_last_copy(ir_node *node)
+/* Fix the original loop_heads ins for invariant unrolling case. */
+static void unrolling_fix_loop_head_inv(void)
{
- ir_node *copy, *cur;
- cur = node;
- while ((copy = get_copy(cur))) {
- cur = copy;
+ ir_node *ins[2];
+ ir_node *phi;
+ ir_node *proj = new_Proj(loop_info.duff_cond, mode_X, 0);
+ ir_node *head_pred = get_irn_n(loop_head, loop_info.be_src_pos);
+ ir_node *loop_condition = get_unroll_copy(head_pred, unroll_nr - 1);
+
+ /* Original loop_heads ins are:
+ * duff block and the own backedge */
+
+ ins[0] = loop_condition;
+ ins[1] = proj;
+ set_irn_in(loop_head, 2, ins);
+ DB((dbg, LEVEL_4, "Rewire ins of block loophead %N to pred %N and duffs entry %N \n" , loop_head, ins[0], ins[1]));
+
+ for_each_phi(loop_head, phi) {
+ ir_node *pred = get_irn_n(phi, loop_info.be_src_pos);
+ /* TODO we think it is a phi, but for Mergesort it is not the case.*/
+
+ ir_node *last_pred = get_unroll_copy(pred, unroll_nr - 1);
+
+ ins[0] = last_pred;
+ ins[1] = (ir_node*)get_irn_link(phi);
+ set_irn_in(phi, 2, ins);
+ DB((dbg, LEVEL_4, "Rewire ins of loophead phi %N to pred %N and duffs entry %N \n" , phi, ins[0], ins[1]));
}
- return cur;
}
-/**
- * Rewire floating copies of the current loop.
- */
-void unrolling_fix_cf(void)
+/* Removes previously created phis with only 1 in. */
+static void correct_phis(ir_node *node, void *env)
{
- ir_node *loophead = loop_cf_head;
- int headarity = get_irn_arity(loophead);
- ir_node *phi, *headnode;
- /*ir_node *high, *low;*/
- int i;
+ (void)env;
- int uhead_in_n = 0;
- int backedges_n = get_backedge_n(loophead, 0);
- int unroll_arity = backedges_n;
+ if (is_Phi(node) && get_irn_arity(node) == 1) {
+ ir_node *exch;
+ ir_node *in[1];
- /* Create ins for all heads and their phis */
- headnode = get_copy(loophead);
- for_each_copy(headnode) {
- NEW_ARR_A(ir_node *, get_node_info(headnode)->ins, unroll_arity);
- for_each_phi(headnode, phi) {
- NEW_ARR_A(ir_node *, get_node_info(phi)->ins, unroll_arity);
- }
- }
+ in[0] = get_irn_n(node, 0);
- /* Append the copies to the existing loop. */
- for (i = 0; i < headarity; i++) {
- ir_node *upper_head = loophead;
- ir_node *lower_head = get_copy(loophead);
+ exch = new_rd_Phi(get_irn_dbg_info(node),
+ get_nodes_block(node), 1, in,
+ get_irn_mode(node));
- ir_node *upper_pred = get_irn_n(loophead, i);
- ir_node *lower_pred = get_copy(get_irn_n(loophead, i));
+ exchange(node, exch);
+ }
+}
- ir_node *last_pred;
+/* Unrolling: Rewire floating copies. */
+static void place_copies(int copies)
+{
+ ir_node *loophead = loop_head;
+ size_t i;
+ int c;
+ int be_src_pos = loop_info.be_src_pos;
+
+ /* Serialize loops by fixing their head ins.
+ * Processed are the copies.
+ * The original loop is done after that, to keep backedge infos. */
+ for (c = 0; c < copies; ++c) {
+ ir_node *upper = get_unroll_copy(loophead, c);
+ ir_node *lower = get_unroll_copy(loophead, c + 1);
+ ir_node *phi;
+ ir_node *topmost_be_block = get_nodes_block(get_irn_n(loophead, be_src_pos));
+
+ /* Important: get the preds first and then their copy. */
+ ir_node *upper_be_block = get_unroll_copy(topmost_be_block, c);
+ ir_node *new_jmp = new_r_Jmp(upper_be_block);
+ DB((dbg, LEVEL_5, " place_copies upper %N lower %N\n", upper, lower));
+
+ DB((dbg, LEVEL_5, "topmost be block %N \n", topmost_be_block));
+
+ if (loop_info.unroll_kind == constant) {
+ ir_node *ins[1];
+ ins[0] = new_jmp;
+ set_irn_in(lower, 1, ins);
- /**
- * Build unrolled loop top down
- */
- if (is_backedge(loophead, i) && !is_alien_edge(loophead, i)) {
- for_each_copy2(upper_head, lower_head, upper_pred, lower_pred) {
- get_node_info(lower_head)->ins[uhead_in_n] = upper_pred;
+ for_each_phi(loophead, phi) {
+ ir_node *topmost_def = get_irn_n(phi, be_src_pos);
+ ir_node *upper_def = get_unroll_copy(topmost_def, c);
+ ir_node *lower_phi = get_unroll_copy(phi, c + 1);
+
+ /* It is possible, that the value used
+ * in the OWN backedge path is NOT defined in this loop. */
+ if (is_in_loop(topmost_def))
+ ins[0] = upper_def;
+ else
+ ins[0] = topmost_def;
+
+ set_irn_in(lower_phi, 1, ins);
+ /* Need to replace phis with 1 in later. */
+ }
+ } else {
+ /* Invariant case */
+ /* Every node has 2 ins. One from the duff blocks
+ * and one from the previously unrolled loop. */
+ ir_node *ins[2];
+ /* Calculate corresponding projection of mod result for this copy c */
+ ir_node *proj = new_Proj(loop_info.duff_cond, mode_X, unroll_nr - c - 1);
+ DB((dbg, LEVEL_4, "New duff proj %N\n" , proj));
+
+ ins[0] = new_jmp;
+ ins[1] = proj;
+ set_irn_in(lower, 2, ins);
+ DB((dbg, LEVEL_4, "Rewire ins of Block %N to pred %N and duffs entry %N \n" , lower, ins[0], ins[1]));
- for_each_phi(upper_head, phi) {
- ir_node *phi_copy = get_copy(phi);
- get_node_info(phi_copy)->ins[uhead_in_n] = get_irn_n(phi, i);
+ for_each_phi(loophead, phi) {
+ ir_node *topmost_phi_pred = get_irn_n(phi, be_src_pos);
+ ir_node *upper_phi_pred;
+ ir_node *lower_phi;
+ ir_node *duff_phi;
+
+ lower_phi = get_unroll_copy(phi, c + 1);
+ duff_phi = (ir_node*)get_irn_link(phi);
+ DB((dbg, LEVEL_4, "DD Link of %N is %N\n" , phi, duff_phi));
+
+ /* */
+ if (is_in_loop(topmost_phi_pred)) {
+ upper_phi_pred = get_unroll_copy(topmost_phi_pred, c);
+ } else {
+ upper_phi_pred = topmost_phi_pred;
}
+
+ ins[0] = upper_phi_pred;
+ ins[1] = duff_phi;
+ set_irn_in(lower_phi, 2, ins);
+ DB((dbg, LEVEL_4, "Rewire ins of %N to pred %N and duffs entry %N \n" , lower_phi, ins[0], ins[1]));
}
+ }
+ }
+
+ /* Reconnect last copy. */
+ for (i = 0; i < ARR_LEN(loop_entries); ++i) {
+ entry_edge edge = loop_entries[i];
+ /* Last copy is at the bottom */
+ ir_node *new_pred = get_unroll_copy(edge.pred, copies);
+ set_irn_n(edge.node, edge.pos, new_pred);
+ }
- last_pred = upper_pred;
- ++uhead_in_n;
+ /* Fix original loops head.
+ * Done in the end, as ins and be info were needed before. */
+ if (loop_info.unroll_kind == constant) {
+ ir_node *phi;
+ ir_node *head_pred = get_irn_n(loop_head, be_src_pos);
+ ir_node *loop_condition = get_unroll_copy(head_pred, unroll_nr - 1);
+
+ set_irn_n(loop_head, loop_info.be_src_pos, loop_condition);
+
+ for_each_phi(loop_head, phi) {
+ ir_node *pred = get_irn_n(phi, be_src_pos);
+ ir_node *last_pred;
+
+ /* It is possible, that the value used
+ * in the OWN backedge path is NOT assigned in this loop. */
+ if (is_in_loop(pred))
+ last_pred = get_unroll_copy(pred, copies);
+ else
+ last_pred = pred;
+ set_irn_n(phi, be_src_pos, last_pred);
+ }
- /* Fix the topmost loop heads backedges. */
- set_irn_n(loophead, i, last_pred);
- for_each_phi(loophead, phi) {
- ir_node *last_phi = get_last_copy(phi);
- ir_node *pred = get_irn_n(last_phi, i);
- set_irn_n(phi, i, pred);
- }
+ } else {
+ unrolling_fix_loop_head_inv();
+ }
+}
+
+/* Copies the cur_loop several times. */
+static void copy_loop(entry_edge *cur_loop_outs, int copies)
+{
+ int c;
+
+ ir_reserve_resources(current_ir_graph, IR_RESOURCE_IRN_VISITED);
+
+ for (c = 0; c < copies; ++c) {
+ size_t i;
+
+ inc_irg_visited(current_ir_graph);
+
+ DB((dbg, LEVEL_5, " ### Copy_loop copy nr: %d ###\n", c));
+ for (i = 0; i < ARR_LEN(cur_loop_outs); ++i) {
+ entry_edge entry = cur_loop_outs[i];
+ ir_node *pred = get_irn_n(entry.node, entry.pos);
+
+ copy_walk_n(pred, is_in_loop, c + 1);
+ }
+ }
+
+ ir_free_resources(current_ir_graph, IR_RESOURCE_IRN_VISITED);
+}
+
+
+/* Creates a new phi from the given phi node omitting own bes,
+ * using be_block as supplier of backedge informations. */
+static ir_node *clone_phis_sans_bes(ir_node *phi, ir_node *be_block, ir_node *dest_block)
+{
+ ir_node **ins;
+ int arity = get_irn_arity(phi);
+ int i, c = 0;
+ ir_node *newphi;
+
+ assert(get_irn_arity(phi) == get_irn_arity(be_block));
+ assert(is_Phi(phi));
+
+ ins = NEW_ARR_F(ir_node *, arity);
+ for (i = 0; i < arity; ++i) {
+ if (! is_own_backedge(be_block, i)) {
+ ins[c] = get_irn_n(phi, i);
+ ++c;
}
}
- headnode = get_copy(loophead);
- for_each_copy(headnode) {
- set_irn_in(headnode, unroll_arity, get_node_info(headnode)->ins);
- for_each_phi(headnode, phi) {
- set_irn_in(phi, unroll_arity, get_node_info(phi)->ins);
+ newphi = new_r_Phi(dest_block, c, ins, get_irn_mode(phi));
+
+ set_irn_link(phi, newphi);
+ DB((dbg, LEVEL_4, "Linking for duffs device %N to %N\n", phi, newphi));
+
+ return newphi;
+}
+
+/* Creates a new block from the given block node omitting own bes,
+ * using be_block as supplier of backedge informations. */
+static ir_node *clone_block_sans_bes(ir_node *node, ir_node *be_block)
+{
+ int arity = get_irn_arity(node);
+ int i, c = 0;
+ ir_node **ins;
+
+ assert(get_irn_arity(node) == get_irn_arity(be_block));
+ assert(is_Block(node));
+
+ NEW_ARR_A(ir_node *, ins, arity);
+ for (i = 0; i < arity; ++i) {
+ if (! is_own_backedge(be_block, i)) {
+ ins[c] = get_irn_n(node, i);
+ ++c;
}
}
+
+ return new_Block(c, ins);
}
-#if 0
-static ir_node *add_phi(ir_node *node, int phi_pos)
+/* Creates a structure to calculate absolute value of node op.
+ * Returns mux node with absolute value. */
+static ir_node *new_Abs(ir_node *op, ir_mode *mode)
+{
+ ir_graph *irg = get_irn_irg(op);
+ ir_node *block = get_nodes_block(op);
+ ir_node *zero = new_r_Const(irg, get_mode_null(mode));
+ ir_node *cmp = new_r_Cmp(block, op, zero, ir_relation_less);
+ ir_node *minus_op = new_r_Minus(block, op, mode);
+ ir_node *mux = new_r_Mux(block, cmp, op, minus_op, mode);
+
+ return mux;
+}
+
+
+/* Creates blocks for duffs device, using previously obtained
+ * informations about the iv.
+ * TODO split */
+static void create_duffs_block(void)
{
ir_mode *mode;
+
+ ir_node *block1, *count_block, *duff_block;
+ ir_node *ems, *ems_mod, *ems_div, *ems_mod_proj, *cmp_null,
+ *ems_mode_cond, *x_true, *x_false, *const_null;
+ ir_node *true_val, *false_val;
+ ir_node *ins[2];
+
+ ir_node *duff_mod, *proj, *cond;
+
+ ir_node *count, *correction, *unroll_c;
+ ir_node *cmp_bad_count, *good_count, *bad_count, *count_phi, *bad_count_neg;
ir_node *phi;
- ir_node **in;
- mode = get_irn_mode(get_irn_n(node, phi_pos));
- ir_node *block = get_nodes_block(node);
- int n_cfgpreds = get_irn_arity(block);
- ir_node *pred = get_irn_n(node, phi_pos);
- int i;
- /* create a new Phi */
- NEW_ARR_A(ir_node*, in, n_cfgpreds);
- for(i = 0; i < n_cfgpreds; ++i)
- in[i] = new_Unknown(mode); /*pred;*/
+ mode = get_irn_mode(loop_info.end_val);
+ const_null = new_Const(get_mode_null(mode));
- phi = new_r_Phi(block, n_cfgpreds, in, mode);
+ /* TODO naming
+ * 1. Calculate first approach to count.
+ * Condition: (end - start) % step == 0 */
+ block1 = clone_block_sans_bes(loop_head, loop_head);
+ DB((dbg, LEVEL_4, "Duff block 1 %N\n", block1));
- assert(phi && "phi null");
- assert(is_Bad(phi) && "phi bad");
+ /* Create loop entry phis in first duff block
+ * as it becomes the loops preheader */
+ for_each_phi(loop_head, phi) {
+ /* Returns phis pred if phi would have arity 1*/
+ ir_node *new_phi = clone_phis_sans_bes(phi, loop_head, block1);
- /* Important: always keep block phi list up to date. */
- add_Block_phi(block, phi);
- /* EVERY node is assumed to have a node_info linked. */
- alloc_node_info(phi, NULL);
+ DB((dbg, LEVEL_4, "HEAD %N phi %N\n", loop_head, phi));
+ DB((dbg, LEVEL_4, "BLOCK1 %N phi %N\n", block1, new_phi));
+ }
- set_irn_n(node, phi_pos, phi);
- return phi;
-}
-#endif
+ ems = new_r_Sub(block1, loop_info.end_val, loop_info.start_val,
+ get_irn_mode(loop_info.end_val));
+ DB((dbg, LEVEL_4, "BLOCK1 sub %N\n", ems));
+
+
+ ems = new_Sub(loop_info.end_val, loop_info.start_val,
+ get_irn_mode(loop_info.end_val));
+
+ DB((dbg, LEVEL_4, "mod ins %N %N\n", ems, loop_info.step));
+ ems_mod = new_r_Mod(block1,
+ new_NoMem(),
+ ems,
+ loop_info.step,
+ mode,
+ op_pin_state_pinned);
+ ems_div = new_r_Div(block1,
+ new_NoMem(),
+ ems,
+ loop_info.step,
+ mode,
+ op_pin_state_pinned);
+
+ DB((dbg, LEVEL_4, "New module node %N\n", ems_mod));
+
+ ems_mod_proj = new_r_Proj(ems_mod, mode_Iu, pn_Mod_res);
+ cmp_null = new_r_Cmp(block1, ems_mod_proj, const_null, ir_relation_less);
+ ems_mode_cond = new_r_Cond(block1, cmp_null);
+
+ /* ems % step == 0 */
+ x_true = new_r_Proj(ems_mode_cond, mode_X, pn_Cond_true);
+ /* ems % step != 0 */
+ x_false = new_r_Proj(ems_mode_cond, mode_X, pn_Cond_false);
+
+ /* 2. Second block.
+ * Assures, duffs device receives a valid count.
+ * Condition:
+ * decreasing: count < 0
+ * increasing: count > 0
+ */
+ ins[0] = x_true;
+ ins[1] = x_false;
+ count_block = new_Block(2, ins);
+ DB((dbg, LEVEL_4, "Duff block 2 %N\n", count_block));
-/**
- * Loop unrolling
- * Could be improved with variable range informations.
+
+ /* Increase loop-taken-count depending on the loop condition
+ * uses the latest iv to compare to. */
+ if (loop_info.latest_value == 1) {
+ /* ems % step == 0 : +0 */
+ true_val = new_Const(get_mode_null(mode));
+ /* ems % step != 0 : +1 */
+ false_val = new_Const(get_mode_one(mode));
+ } else {
+ ir_tarval *tv_two = new_tarval_from_long(2, mode);
+ /* ems % step == 0 : +1 */
+ true_val = new_Const(get_mode_one(mode));
+ /* ems % step != 0 : +2 */
+ false_val = new_Const(tv_two);
+ }
+
+ ins[0] = true_val;
+ ins[1] = false_val;
+
+ correction = new_r_Phi(count_block, 2, ins, mode);
+
+ count = new_r_Proj(ems_div, mode, pn_Div_res);
+
+ /* (end - start) / step + correction */
+ count = new_Add(count, correction, mode);
+
+ /* We preconditioned the loop to be tail-controlled.
+ * So, if count is something 'wrong' like 0,
+ * negative/positive (depending on step direction),
+ * we may take the loop once (tail-contr.) and leave it
+ * to the existing condition, to break; */
+
+ /* Depending on step direction, we have to check for > or < 0 */
+ if (loop_info.decreasing == 1) {
+ cmp_bad_count = new_r_Cmp(count_block, count, const_null,
+ ir_relation_less);
+ } else {
+ cmp_bad_count = new_r_Cmp(count_block, count, const_null,
+ ir_relation_greater);
+ }
+
+ bad_count_neg = new_r_Cond(count_block, cmp_bad_count);
+ good_count = new_Proj(bad_count_neg, mode_X, pn_Cond_true);
+ bad_count = new_Proj(ems_mode_cond, mode_X, pn_Cond_false);
+
+ /* 3. Duff Block
+ * Contains module to decide which loop to start from. */
+
+ ins[0] = good_count;
+ ins[1] = bad_count;
+ duff_block = new_Block(2, ins);
+ DB((dbg, LEVEL_4, "Duff block 3 %N\n", duff_block));
+
+ /* Get absolute value */
+ ins[0] = new_Abs(count, mode);
+ /* Manually feed the aforementioned count = 1 (bad case)*/
+ ins[1] = new_Const(get_mode_one(mode));
+ count_phi = new_r_Phi(duff_block, 2, ins, mode);
+
+ unroll_c = new_Const(new_tarval_from_long((long)unroll_nr, mode));
+
+ /* count % unroll_nr */
+ duff_mod = new_r_Mod(duff_block,
+ new_NoMem(),
+ count_phi,
+ unroll_c,
+ mode,
+ op_pin_state_pinned);
+
+
+ proj = new_Proj(duff_mod, mode, pn_Mod_res);
+ /* condition does NOT create itself in the block of the proj! */
+ cond = new_r_Cond(duff_block, proj);
+
+ loop_info.duff_cond = cond;
+}
+
+/* Returns 1 if given node is not in loop,
+ * or if it is a phi of the loop head with only loop invariant defs.
*/
-void loop_unrolling(void)
+static unsigned is_loop_invariant_def(ir_node *node)
{
- int i, j;
+ int i;
- unroll_times = 8;
+ if (! is_in_loop(node)) {
+ DB((dbg, LEVEL_4, "Not in loop %N\n", node));
+ /* || is_Const(node) || is_SymConst(node)) {*/
+ return 1;
+ }
- cur_loop_outs = NEW_ARR_F(out_edge, 0);
- irg_walk_graph( current_ir_graph, get_loop_outs, NULL, NULL );
+ /* If this is a phi of the loophead shared by more than 1 loop,
+ * we need to check if all defs are not in the loop. */
+ if (is_Phi(node)) {
+ ir_node *block;
+ block = get_nodes_block(node);
- ir_reserve_resources(current_ir_graph, IR_RESOURCE_IRN_VISITED);
+ /* To prevent unexpected situations. */
+ if (block != loop_head) {
+ return 0;
+ }
- /* duplicate whole loop content */
- inc_irg_visited(current_ir_graph);
+ for (i = 0; i < get_irn_arity(node); ++i) {
+ /* Check if all bes are just loopbacks. */
+ if (is_own_backedge(block, i) && get_irn_n(node, i) != node)
+ return 0;
+ }
+ DB((dbg, LEVEL_4, "invar %N\n", node));
+ return 1;
+ }
+ DB((dbg, LEVEL_4, "Not invar %N\n", node));
- for(i = 0; i < ARR_LEN(cur_loop_outs); i++) {
- out_edge entry = cur_loop_outs[i];
- ir_node *node = entry.node;
- ir_node *pred = get_irn_n(entry.node, entry.pred_irn_n);
+ return 0;
+}
- if (!is_Block(node)) {
+/* Returns 1 if one pred of node is invariant and the other is not.
+ * invar_pred and other are set analogously. */
+static unsigned get_invariant_pred(ir_node *node, ir_node **invar_pred, ir_node **other)
+{
+ ir_node *pred0 = get_irn_n(node, 0);
+ ir_node *pred1 = get_irn_n(node, 1);
- for (j = 0; j < unroll_times-1; ++j) {
- copy_walk(pred, is_in_loop, cur_loop);
+ *invar_pred = NULL;
+ *other = NULL;
- pred = get_copy(pred);
- }
+ if (is_loop_invariant_def(pred0)) {
+ DB((dbg, LEVEL_4, "pred0 invar %N\n", pred0));
+ *invar_pred = pred0;
+ *other = pred1;
+ }
+
+ if (is_loop_invariant_def(pred1)) {
+ DB((dbg, LEVEL_4, "pred1 invar %N\n", pred1));
+
+ if (*invar_pred != NULL) {
+ /* RETURN. We do not want both preds to be invariant. */
+ return 0;
}
+
+ *other = pred0;
+ *invar_pred = pred1;
+ return 1;
+ } else {
+ DB((dbg, LEVEL_4, "pred1 not invar %N\n", pred1));
+
+ if (*invar_pred != NULL)
+ return 1;
+ else
+ return 0;
}
+}
+
+/* Starts from a phi that may belong to an iv.
+ * If an add forms a loop with iteration_phi,
+ * and add uses a constant, 1 is returned
+ * and 'start' as well as 'add' are sane. */
+static unsigned get_start_and_add(ir_node *iteration_phi, unrolling_kind_flag role)
+{
+ int i;
+ ir_node *found_add = loop_info.add;
+ int arity = get_irn_arity(iteration_phi);
- for(i = 0; i < ARR_LEN(cur_loop_outs); i++) {
- out_edge entry = cur_loop_outs[i];
- ir_node *node = entry.node;
- ir_node *pred = get_irn_n(entry.node, entry.pred_irn_n);
+ DB((dbg, LEVEL_4, "Find start and add from %N\n", iteration_phi));
- /* build linked list of copied nodes/blocks */
- if (is_Block(node)) {
- for (j = 0; j < unroll_times-1; ++j) {
- copy_walk(pred, is_in_loop, cur_loop);
- duplicate_preds(node, entry.pred_irn_n, get_copy(pred));
+ for (i = 0; i < arity; ++i) {
- pred = get_copy(pred);
- }
+ /* Find start_val which needs to be pred of the iteration_phi.
+ * If start_val already known, sanity check. */
+ if (!is_backedge(get_nodes_block(loop_info.iteration_phi), i)) {
+ ir_node *found_start_val = get_irn_n(loop_info.iteration_phi, i);
+
+ DB((dbg, LEVEL_4, "found_start_val %N\n", found_start_val));
+
+ /* We already found a start_val it has to be always the same. */
+ if (loop_info.start_val && found_start_val != loop_info.start_val)
+ return 0;
+
+ if ((role == constant) && !(is_SymConst(found_start_val) || is_Const(found_start_val)))
+ return 0;
+ else if((role == constant) && !(is_loop_invariant_def(found_start_val)))
+ return 0;
+
+ loop_info.start_val = found_start_val;
+ }
+
+ /* The phi has to be in the loop head.
+ * Follow all own backedges. Every value supplied from these preds of the phi
+ * needs to origin from the same add. */
+ if (is_own_backedge(get_nodes_block(loop_info.iteration_phi), i)) {
+ ir_node *new_found = get_irn_n(loop_info.iteration_phi,i);
+
+ DB((dbg, LEVEL_4, "is add? %N\n", new_found));
+
+ if (! (is_Add(new_found) || is_Sub(new_found)) || (found_add && found_add != new_found))
+ return 0;
+ else
+ found_add = new_found;
}
}
- ir_free_resources(current_ir_graph, IR_RESOURCE_IRN_VISITED);
+ loop_info.add = found_add;
- /* Line up the floating copies. */
- unrolling_fix_cf();
+ return 1;
+}
+
+
+/* Returns 1 if one pred of node is a const value and the other is not.
+ * const_pred and other are set analogously. */
+static unsigned get_const_pred(ir_node *node, ir_node **const_pred, ir_node **other)
+{
+ ir_node *pred0 = get_irn_n(node, 0);
+ ir_node *pred1 = get_irn_n(node, 1);
- /* Generate phis for all loop outs */
- for(i = 0; i < ARR_LEN(cur_loop_outs); i++) {
- out_edge entry = cur_loop_outs[i];
- ir_node *node = entry.node;
- ir_node *pred = get_irn_n(entry.node, entry.pred_irn_n);
+ DB((dbg, LEVEL_4, "Checking for constant pred of %N\n", node));
- if (!is_Block(node) && !is_End(node)) {
- DB((dbg, LEVEL_1, " construct_ssa_n def %ld node %ld pos %d\n",
- get_irn_node_nr(pred), get_irn_node_nr(node), entry.pred_irn_n));
- construct_ssa_n(pred, node);
+ *const_pred = NULL;
+ *other = NULL;
+
+ /*DB((dbg, LEVEL_4, "is %N const\n", pred0));*/
+ if (is_Const(pred0) || is_SymConst(pred0)) {
+ *const_pred = pred0;
+ *other = pred1;
+ }
+
+ /*DB((dbg, LEVEL_4, "is %N const\n", pred1));*/
+ if (is_Const(pred1) || is_SymConst(pred1)) {
+ if (*const_pred != NULL) {
+ /* RETURN. We do not want both preds to be constant. */
+ return 0;
}
+
+ *other = pred0;
+ *const_pred = pred1;
}
- DEL_ARR_F(cur_loop_outs);
+ if (*const_pred == NULL)
+ return 0;
+ else
+ return 1;
+}
- set_irg_doms_inconsistent(current_ir_graph);
- set_irg_loopinfo_inconsistent(current_ir_graph);
- set_irg_outs_inconsistent(current_ir_graph);
+/* Returns 1 if loop exits within 2 steps of the iv.
+ * Norm_proj means we do not exit the loop.*/
+static unsigned simulate_next(ir_tarval **count_tar,
+ ir_tarval *stepped, ir_tarval *step_tar, ir_tarval *end_tar,
+ ir_relation norm_proj)
+{
+ ir_tarval *next;
+
+ DB((dbg, LEVEL_4, "Loop taken if (stepped)%ld %s (end)%ld ",
+ get_tarval_long(stepped),
+ get_relation_string((norm_proj)),
+ get_tarval_long(end_tar)));
+ DB((dbg, LEVEL_4, "comparing latest value %d\n", loop_info.latest_value));
+
+ /* If current iv does not stay in the loop,
+ * this run satisfied the exit condition. */
+ if (! (tarval_cmp(stepped, end_tar) & norm_proj))
+ return 1;
+
+ DB((dbg, LEVEL_4, "Result: (stepped)%ld IS %s (end)%ld\n",
+ get_tarval_long(stepped),
+ get_relation_string(tarval_cmp(stepped, end_tar)),
+ get_tarval_long(end_tar)));
+
+ /* next step */
+ if (is_Add(loop_info.add))
+ next = tarval_add(stepped, step_tar);
+ else
+ /* sub */
+ next = tarval_sub(stepped, step_tar, get_irn_mode(loop_info.end_val));
+
+ DB((dbg, LEVEL_4, "Loop taken if %ld %s %ld ",
+ get_tarval_long(next),
+ get_relation_string(norm_proj),
+ get_tarval_long(end_tar)));
+ DB((dbg, LEVEL_4, "comparing latest value %d\n", loop_info.latest_value));
+
+ /* Increase steps. */
+ *count_tar = tarval_add(*count_tar, get_tarval_one(get_tarval_mode(*count_tar)));
+
+ /* Next has to fail the loop condition, or we will never exit. */
+ if (! (tarval_cmp(next, end_tar) & norm_proj))
+ return 1;
+ else
+ return 0;
}
-/* Initialization and */
-static void init_analyze(ir_loop *loop)
+/* Check if loop meets requirements for a 'simple loop':
+ * - Exactly one cf out
+ * - Allowed calls
+ * - Max nodes after unrolling
+ * - tail-controlled
+ * - exactly one be
+ * - cmp
+ * Returns Projection of cmp node or NULL; */
+static ir_node *is_simple_loop(void)
{
- /* Init new for every loop */
- cur_loop = loop;
+ int arity, i;
+ ir_node *loop_block, *exit_block, *projx, *cond, *cmp;
- loop_cf_head = NULL;
- loop_cf_head_valid = 1;
- loop_inv_head = NULL;
- loop_peeled_head = NULL;
+ /* Maximum of one condition, and no endless loops. */
+ if (loop_info.cf_outs != 1)
+ return NULL;
- loop_info.outs = 0;
- loop_info.calls = 0;
- loop_info.loads = 0;
- loop_info.blocks = 0;
+ DB((dbg, LEVEL_4, "1 loop exit\n"));
- DB((dbg, LEVEL_2, " >>>> current loop includes node %ld <<<\n", get_irn_node_nr(get_loop_node(loop, 0))));
+ /* Calculate maximum unroll_nr keeping node count below limit. */
+ loop_info.max_unroll = (int)((double)opt_params.max_unrolled_loop_size / (double)loop_info.nodes);
+ if (loop_info.max_unroll < 2) {
+ ++stats.too_large;
+ return NULL;
+ }
- irg_walk_graph(current_ir_graph, get_loop_info, NULL, NULL);
+ DB((dbg, LEVEL_4, "maximum unroll factor %u, to not exceed node limit \n",
+ opt_params.max_unrolled_loop_size));
- /* RETURN if there is no valid head */
- if (!loop_cf_head || !loop_cf_head_valid) {
- DB((dbg, LEVEL_2, "No valid loop head. Nothing done.\n"));
- return;
+ arity = get_irn_arity(loop_head);
+ /* RETURN if we have more than 1 be. */
+ /* Get my backedges without alien bes. */
+ loop_block = NULL;
+ for (i = 0; i < arity; ++i) {
+ ir_node *pred = get_irn_n(loop_head, i);
+ if (is_own_backedge(loop_head, i)) {
+ if (loop_block)
+ /* Our simple loops may have only one backedge. */
+ return NULL;
+ else {
+ loop_block = get_nodes_block(pred);
+ loop_info.be_src_pos = i;
+ }
+ }
}
- if (enable_peeling)
- loop_peeling();
+ DB((dbg, LEVEL_4, "loop has 1 own backedge.\n"));
- if (enable_inversion)
- loop_inversion();
- if (enable_unrolling)
- loop_unrolling();
+ exit_block = get_nodes_block(loop_info.cf_out.pred);
+ /* The loop has to be tail-controlled.
+ * This can be changed/improved,
+ * but we would need a duff iv. */
+ if (exit_block != loop_block)
+ return NULL;
-#if 0
- /* RETURN if there is a call in the loop */
- if (loop_info.calls)
- return;
-#endif
+ DB((dbg, LEVEL_4, "tail-controlled loop.\n"));
+
+ /* find value on which loop exit depends */
+ projx = loop_info.cf_out.pred;
+ cond = get_irn_n(projx, 0);
+ cmp = get_irn_n(cond, 0);
+
+ if (!is_Cmp(cmp))
+ return NULL;
- DB((dbg, LEVEL_2, " <<<< end of loop with node %ld >>>>\n", get_irn_node_nr(get_loop_node(loop, 0))));
+ DB((dbg, LEVEL_5, "projection is %s\n", get_relation_string(get_Cmp_relation(cmp))));
+
+ switch(get_Proj_proj(projx)) {
+ case pn_Cond_false:
+ loop_info.exit_cond = 0;
+ break;
+ case pn_Cond_true:
+ loop_info.exit_cond = 1;
+ break;
+ default:
+ panic("Cond Proj_proj other than true/false");
+ }
+
+ DB((dbg, LEVEL_4, "Valid Cmp.\n"));
+ return cmp;
}
-/* Find most inner loops and send them to analyze_loop */
-static void find_most_inner_loop(ir_loop *loop)
+/* Returns 1 if all nodes are mode_Iu or mode_Is. */
+static unsigned are_mode_I(ir_node *n1, ir_node* n2, ir_node *n3)
{
- /* descend into sons */
- int sons = get_loop_n_sons(loop);
-
- if (sons == 0) {
- loop_element elem;
- int el_n, i;
-
- el_n = get_loop_n_elements(loop);
-
- for (i=0; i < el_n; ++i) {
- elem = get_loop_element(loop, i);
- /* We can only rely on the blocks,
- * as the loop attribute of the nodes seems not to be set. */
- if (is_ir_node(elem.kind) && is_Block(elem.node)) {
- ARR_APP1(ir_node *, loops, elem.node);
- DB((dbg, LEVEL_5, "Found most inner loop (contains block %+F)\n", elem.node));
- break;
- }
- }
+ ir_mode *m1 = get_irn_mode(n1);
+ ir_mode *m2 = get_irn_mode(n2);
+ ir_mode *m3 = get_irn_mode(n3);
+
+ if ((m1 == mode_Iu && m2 == mode_Iu && m3 == mode_Iu) ||
+ (m1 == mode_Is && m2 == mode_Is && m3 == mode_Is))
+ return 1;
+ else
+ return 0;
+}
+
+/* Checks if cur_loop is a simple tail-controlled counting loop
+ * with start and end value loop invariant, step constant. */
+static unsigned get_unroll_decision_invariant(void)
+{
+
+ ir_node *projres, *loop_condition, *iteration_path;
+ unsigned success;
+ ir_tarval *step_tar;
+ ir_mode *mode;
+
+
+ /* RETURN if loop is not 'simple' */
+ projres = is_simple_loop();
+ if (projres == NULL)
+ return 0;
+
+ /* Use a minimal size for the invariant unrolled loop,
+ * as duffs device produces overhead */
+ if (loop_info.nodes < opt_params.invar_unrolling_min_size)
+ return 0;
+
+ loop_condition = get_irn_n(projres, 0);
+
+ success = get_invariant_pred(loop_condition, &loop_info.end_val, &iteration_path);
+ DB((dbg, LEVEL_4, "pred invar %d\n", success));
+
+ if (! success)
+ return 0;
+
+ DB((dbg, LEVEL_4, "Invariant End_val %N, other %N\n", loop_info.end_val, iteration_path));
+
+ /* We may find the add or the phi first.
+ * Until now we only have end_val. */
+ if (is_Add(iteration_path) || is_Sub(iteration_path)) {
+
+ loop_info.add = iteration_path;
+ DB((dbg, LEVEL_4, "Case 1: Got add %N (maybe not sane)\n", loop_info.add));
+
+ /* Preds of the add should be step and the iteration_phi */
+ success = get_const_pred(loop_info.add, &loop_info.step, &loop_info.iteration_phi);
+ if (! success)
+ return 0;
+
+ DB((dbg, LEVEL_4, "Got step %N\n", loop_info.step));
+
+ if (! is_Phi(loop_info.iteration_phi))
+ return 0;
+
+ DB((dbg, LEVEL_4, "Got phi %N\n", loop_info.iteration_phi));
+
+ /* Find start_val.
+ * Does necessary sanity check of add, if it is already set. */
+ success = get_start_and_add(loop_info.iteration_phi, invariant);
+ if (! success)
+ return 0;
+
+ DB((dbg, LEVEL_4, "Got start A %N\n", loop_info.start_val));
+
+ } else if (is_Phi(iteration_path)) {
+ ir_node *new_iteration_phi;
+
+ loop_info.iteration_phi = iteration_path;
+ DB((dbg, LEVEL_4, "Case 2: Got phi %N\n", loop_info.iteration_phi));
+
+ /* Find start_val and add-node.
+ * Does necessary sanity check of add, if it is already set. */
+ success = get_start_and_add(loop_info.iteration_phi, invariant);
+ if (! success)
+ return 0;
+
+ DB((dbg, LEVEL_4, "Got start B %N\n", loop_info.start_val));
+ DB((dbg, LEVEL_4, "Got add or sub %N\n", loop_info.add));
+
+ success = get_const_pred(loop_info.add, &loop_info.step, &new_iteration_phi);
+ if (! success)
+ return 0;
+
+ DB((dbg, LEVEL_4, "Got step (B) %N\n", loop_info.step));
+
+ if (loop_info.iteration_phi != new_iteration_phi)
+ return 0;
+
} else {
- int s;
- for(s=0; s<sons; s++) {
- find_most_inner_loop(get_loop_son(loop, s));
+ return 0;
+ }
+
+ mode = get_irn_mode(loop_info.end_val);
+
+ DB((dbg, LEVEL_4, "start %N, end %N, step %N\n",
+ loop_info.start_val, loop_info.end_val, loop_info.step));
+
+ if (mode != mode_Is && mode != mode_Iu)
+ return 0;
+
+ /* TODO necessary? */
+ if (!are_mode_I(loop_info.start_val, loop_info.step, loop_info.end_val))
+ return 0;
+
+ DB((dbg, LEVEL_4, "mode integer\n"));
+
+ step_tar = get_Const_tarval(loop_info.step);
+
+ if (tarval_is_null(step_tar)) {
+ /* TODO Might be worth a warning. */
+ return 0;
+ }
+
+ DB((dbg, LEVEL_4, "step is not 0\n"));
+
+ create_duffs_block();
+
+ return loop_info.max_unroll;
+}
+
+/* Returns unroll factor,
+ * given maximum unroll factor and number of loop passes. */
+static unsigned get_preferred_factor_constant(ir_tarval *count_tar)
+{
+ ir_tarval *tar_6, *tar_5, *tar_4, *tar_3, *tar_2;
+ unsigned prefer;
+ ir_mode *mode = get_irn_mode(loop_info.end_val);
+
+ tar_6 = new_tarval_from_long(6, mode);
+ tar_5 = new_tarval_from_long(5, mode);
+ tar_4 = new_tarval_from_long(4, mode);
+ tar_3 = new_tarval_from_long(3, mode);
+ tar_2 = new_tarval_from_long(2, mode);
+
+ /* loop passes % {6, 5, 4, 3, 2} == 0 */
+ if (tarval_is_null(tarval_mod(count_tar, tar_6)))
+ prefer = 6;
+ else if (tarval_is_null(tarval_mod(count_tar, tar_5)))
+ prefer = 5;
+ else if (tarval_is_null(tarval_mod(count_tar, tar_4)))
+ prefer = 4;
+ else if (tarval_is_null(tarval_mod(count_tar, tar_3)))
+ prefer = 3;
+ else if (tarval_is_null(tarval_mod(count_tar, tar_2)))
+ prefer = 2;
+ else {
+ /* gcd(max_unroll, count_tar) */
+ int a = loop_info.max_unroll;
+ int b = (int)get_tarval_long(count_tar);
+ int c;
+
+ DB((dbg, LEVEL_4, "gcd of max_unroll %d and count_tar %d: ", a, b));
+
+ do {
+ c = a % b;
+ a = b; b = c;
+ } while( c != 0);
+
+ DB((dbg, LEVEL_4, "%d\n", a));
+ return a;
+ }
+
+ DB((dbg, LEVEL_4, "preferred unroll factor %d\n", prefer));
+
+ /*
+ * If our preference is greater than the allowed unroll factor
+ * we either might reduce the preferred factor and prevent a duffs device block,
+ * or create a duffs device block, from which in this case (constants only)
+ * we know the startloop at compiletime.
+ * The latter yields the following graphs.
+ * but for code generation we would want to use graph A.
+ * The graphs are equivalent. So, we can only reduce the preferred factor.
+ * A) B)
+ * PreHead PreHead
+ * | ,--. | ,--.
+ * \ Loop1 \ Loop2 \
+ * \ | | / | |
+ * Loop2 / / Loop1 /
+ * | `--' | `--'
+ */
+
+ if (prefer <= loop_info.max_unroll)
+ return prefer;
+ else {
+ switch(prefer) {
+ case 6:
+ if (loop_info.max_unroll >= 3)
+ return 3;
+ else if (loop_info.max_unroll >= 2)
+ return 2;
+ else
+ return 0;
+
+ case 4:
+ if (loop_info.max_unroll >= 2)
+ return 2;
+ else
+ return 0;
+
+ default:
+ return 0;
}
}
}
-/**
- * Assure preconditions are met and go through all loops.
- */
-void loop_optimization(ir_graph *irg)
+/* Check if cur_loop is a simple counting loop.
+ * Start, step and end are constants.
+ * TODO The whole constant case should use procedures similar to
+ * the invariant case, as they are more versatile. */
+/* TODO split. */
+static unsigned get_unroll_decision_constant(void)
{
- ir_loop *loop;
- int i, sons, nr;
+ ir_node *cmp, *iteration_path;
+ unsigned success, is_latest_val;
+ ir_tarval *start_tar, *end_tar, *step_tar, *diff_tar, *count_tar;
+ ir_tarval *stepped;
+ ir_relation proj_proj, norm_proj;
+ ir_mode *mode;
+
+ /* RETURN if loop is not 'simple' */
+ cmp = is_simple_loop();
+ if (cmp == NULL)
+ return 0;
- /* Init */
- link_node_state_list = NULL;
- set_current_ir_graph(irg);
+ /* One in of the loop condition needs to be loop invariant. => end_val
+ * The other in is assigned by an add. => add
+ * The add uses a loop invariant value => step
+ * and a phi with a loop invariant start_val and the add node as ins.
+
+ ^ ^
+ | | .-,
+ | Phi |
+ \ | |
+ ^ Add |
+ \ | \__|
+ cond
+ /\
+ */
+
+ success = get_const_pred(cmp, &loop_info.end_val, &iteration_path);
+ if (! success)
+ return 0;
- /* preconditions */
- edges_assure(irg);
- assure_irg_outs(irg);
+ DB((dbg, LEVEL_4, "End_val %N, other %N\n", loop_info.end_val, iteration_path));
- /* NOTE: sets only the loop attribute of blocks, not nodes */
- /* NOTE: Kills links */
- assure_cf_loop(irg);
+ /* We may find the add or the phi first.
+ * Until now we only have end_val. */
+ if (is_Add(iteration_path) || is_Sub(iteration_path)) {
- ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
- collect_phiprojs(irg);
- ir_free_resources(irg, IR_RESOURCE_IRN_LINK);
+ /* We test against the latest value of the iv. */
+ is_latest_val = 1;
- /* allocate node_info for additional information on nodes */
- ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
- irg_walk_graph(current_ir_graph, alloc_node_info, NULL, NULL);
+ loop_info.add = iteration_path;
+ DB((dbg, LEVEL_4, "Case 2: Got add %N (maybe not sane)\n", loop_info.add));
- loop = get_irg_loop(irg);
- sons = get_loop_n_sons(loop);
+ /* Preds of the add should be step and the iteration_phi */
+ success = get_const_pred(loop_info.add, &loop_info.step, &loop_info.iteration_phi);
+ if (! success)
+ return 0;
+
+ DB((dbg, LEVEL_4, "Got step %N\n", loop_info.step));
+
+ if (! is_Phi(loop_info.iteration_phi))
+ return 0;
+
+ DB((dbg, LEVEL_4, "Got phi %N\n", loop_info.iteration_phi));
+
+ /* Find start_val.
+ * Does necessary sanity check of add, if it is already set. */
+ success = get_start_and_add(loop_info.iteration_phi, constant);
+ if (! success)
+ return 0;
+
+ DB((dbg, LEVEL_4, "Got start %N\n", loop_info.start_val));
+
+ } else if (is_Phi(iteration_path)) {
+ ir_node *new_iteration_phi;
+
+ /* We compare with the value the iv had entering this run. */
+ is_latest_val = 0;
+
+ loop_info.iteration_phi = iteration_path;
+ DB((dbg, LEVEL_4, "Case 1: Got phi %N \n", loop_info.iteration_phi));
+
+ /* Find start_val and add-node.
+ * Does necessary sanity check of add, if it is already set. */
+ success = get_start_and_add(loop_info.iteration_phi, constant);
+ if (! success)
+ return 0;
+
+ DB((dbg, LEVEL_4, "Got start %N\n", loop_info.start_val));
+ DB((dbg, LEVEL_4, "Got add or sub %N\n", loop_info.add));
- loops = NEW_ARR_F(ir_node *, 0);
+ success = get_const_pred(loop_info.add, &loop_info.step, &new_iteration_phi);
+ if (! success)
+ return 0;
- for (nr = 0; nr < sons; ++nr) {
- find_most_inner_loop(get_loop_son(loop, nr));
+ DB((dbg, LEVEL_4, "Got step %N\n", loop_info.step));
+
+ if (loop_info.iteration_phi != new_iteration_phi)
+ return 0;
+
+ } else {
+ /* RETURN */
+ return 0;
}
-/* TODO Keep backedges during optimization to avoid
- * this ugly allocation and deallocation.
- * (set_irn_in seems to destroy them)
- */
-#if 0
- for (i = 0; i < ARR_LEN(loops); ++i) {
- ir_loop *loop;
+ mode = get_irn_mode(loop_info.end_val);
+
+ DB((dbg, LEVEL_4, "start %N, end %N, step %N\n",
+ loop_info.start_val, loop_info.end_val, loop_info.step));
- loop = get_irn_loop(loops[i]);
- init_analyze(loop);
+ if (mode != mode_Is && mode != mode_Iu)
+ return 0;
+
+ /* TODO necessary? */
+ if (!are_mode_I(loop_info.start_val, loop_info.step, loop_info.end_val))
+ return 0;
+
+ DB((dbg, LEVEL_4, "mode integer\n"));
+
+ end_tar = get_Const_tarval(loop_info.end_val);
+ start_tar = get_Const_tarval(loop_info.start_val);
+ step_tar = get_Const_tarval(loop_info.step);
+
+ if (tarval_is_null(step_tar))
+ /* TODO Might be worth a warning. */
+ return 0;
+
+ DB((dbg, LEVEL_4, "step is not 0\n"));
+
+ if ((!tarval_is_negative(step_tar)) ^ (!is_Sub(loop_info.add)))
+ loop_info.decreasing = 1;
+
+ diff_tar = tarval_sub(end_tar, start_tar, mode);
+
+ /* We need at least count_tar steps to be close to end_val, maybe more.
+ * No way, that we have gone too many steps.
+ * This represents the 'latest value'.
+ * (If condition checks against latest value, is checked later) */
+ count_tar = tarval_div(diff_tar, step_tar);
+
+ /* Iv will not pass end_val (except overflows).
+ * Nothing done, as it would yield to no advantage. */
+ if (tarval_is_negative(count_tar)) {
+ DB((dbg, LEVEL_4, "Loop is endless or never taken."));
+ /* TODO Might be worth a warning. */
+ return 0;
}
-#else
- /* This part is useful for testing
- * or has to be used if the backedge information is destroyed.
- * Which is the case at the moment, because the backedge information gets lost
- * before inversion_fix_heads/unrolling_fix_cf, which results in bads.
- * NOTE!: Testsuite runs successfully nevertheless...
- */
- /**
- * assure_cf_loop() creates a completely new loop tree.
- * Thus we cannot optimize a loop, assure_cf_loop() and continue with the next loop,
- * as the next loop must be searched because it is not distinguishable from the
- * already done loops.
- * The links of the loops are also not available anymore (to store a "loop done" flag).
- * Therefore we save a block per loop.
- * NOTE: We rely on the loop optimizations not to remove any block from the loop.
- * Later, we fetch the blocks loop attribute, as it is updated by assure_cf_loop.
- */
- for (i = 0; i < ARR_LEN(loops); ++i) {
- ir_loop *loop;
+ ++stats.u_simple_counting_loop;
- free_node_info();
- ir_free_resources(irg, IR_RESOURCE_IRN_LINK);
+ loop_info.latest_value = is_latest_val;
- edges_assure(current_ir_graph);
- assure_irg_outs(current_ir_graph);
+ /* TODO split here
+ if (! is_simple_counting_loop(&count_tar))
+ return 0;
+ */
+
+ /* stepped can be negative, if step < 0 */
+ stepped = tarval_mul(count_tar, step_tar);
+
+ /* step as close to end_val as possible, */
+ /* |stepped| <= |end_tar|, and dist(stepped, end_tar) is smaller than a step. */
+ if (is_Sub(loop_info.add))
+ stepped = tarval_sub(start_tar, stepped, mode_Is);
+ else
+ stepped = tarval_add(start_tar, stepped);
+
+ DB((dbg, LEVEL_4, "stepped to %ld\n", get_tarval_long(stepped)));
+
+ proj_proj = get_Cmp_relation(cmp);
+ /* Assure that norm_proj is the stay-in-loop case. */
+ if (loop_info.exit_cond == 1)
+ norm_proj = get_negated_relation(proj_proj);
+ else
+ norm_proj = proj_proj;
+
+ DB((dbg, LEVEL_4, "normalized projection %s\n", get_relation_string(norm_proj)));
+ /* Executed at most once (stay in counting loop if a Eq b) */
+ if (norm_proj == ir_relation_equal)
+ /* TODO Might be worth a warning. */
+ return 0;
- /* NOTE: sets only the loop attribute of blocks */
- /* NOTE: Kills links */
- assure_cf_loop(current_ir_graph);
+ /* calculates next values and increases count_tar according to it */
+ success = simulate_next(&count_tar, stepped, step_tar, end_tar, norm_proj);
+ if (! success)
+ return 0;
+
+ /* We run loop once more, if we compare to the
+ * not yet in-/decreased iv. */
+ if (is_latest_val == 0) {
+ DB((dbg, LEVEL_4, "condition uses not latest iv value\n"));
+ count_tar = tarval_add(count_tar, get_tarval_one(mode));
+ }
- irg_walk_graph(current_ir_graph, alloc_node_info, NULL, NULL);
- ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
+ DB((dbg, LEVEL_4, "loop taken %ld times\n", get_tarval_long(count_tar)));
- /* Get loop from block */
- loop = get_irn_loop(loops[i]);
- init_analyze(loop);
+ /* Assure the loop is taken at least 1 time. */
+ if (tarval_is_null(count_tar)) {
+ /* TODO Might be worth a warning. */
+ return 0;
}
-#endif
- /* Free */
- DEL_ARR_F(loops);
+ loop_info.count_tar = count_tar;
+ return get_preferred_factor_constant(count_tar);
+}
+
+/**
+ * Loop unrolling
+ */
+static void unroll_loop(void)
+{
+
+ if (! (loop_info.nodes > 0))
+ return;
+
+ if (loop_info.nodes > opt_params.max_unrolled_loop_size) {
+ DB((dbg, LEVEL_2, "Nodes %d > allowed nodes %d\n",
+ loop_info.nodes, opt_params.max_unrolled_loop_size));
+ ++stats.too_large;
+ return;
+ }
+
+ if (loop_info.calls > 0) {
+ DB((dbg, LEVEL_2, "Calls %d > allowed calls 0\n",
+ loop_info.calls));
+ ++stats.calls_limit;
+ return;
+ }
+
+ unroll_nr = 0;
+
+ /* get_unroll_decision_constant and invariant are completely
+ * independent for flexibility.
+ * Some checks may be performed twice. */
+
+ /* constant case? */
+ if (opt_params.allow_const_unrolling)
+ unroll_nr = get_unroll_decision_constant();
+ if (unroll_nr > 1) {
+ loop_info.unroll_kind = constant;
+
+ } else {
+ /* invariant case? */
+ if (opt_params.allow_invar_unrolling)
+ unroll_nr = get_unroll_decision_invariant();
+ if (unroll_nr > 1)
+ loop_info.unroll_kind = invariant;
+ }
+
+ DB((dbg, LEVEL_2, " *** Unrolling %d times ***\n", unroll_nr));
+
+ if (unroll_nr > 1) {
+ loop_entries = NEW_ARR_F(entry_edge, 0);
+
+ /* Get loop outs */
+ irg_walk_graph(current_ir_graph, get_loop_entries, NULL, NULL);
+
+ if (loop_info.unroll_kind == constant) {
+ if ((int)get_tarval_long(loop_info.count_tar) == unroll_nr)
+ loop_info.needs_backedge = 0;
+ else
+ loop_info.needs_backedge = 1;
+ } else {
+ loop_info.needs_backedge = 1;
+ }
+
+ /* Use phase to keep copy of nodes from the condition chain. */
+ ir_nodemap_init(&map, current_ir_graph);
+ obstack_init(&obst);
+
+ /* Copies the loop */
+ copy_loop(loop_entries, unroll_nr - 1);
+
+ /* Line up the floating copies. */
+ place_copies(unroll_nr - 1);
+
+ /* Remove phis with 1 in
+ * If there were no nested phis, this would not be necessary.
+ * Avoiding the creation in the first place
+ * leads to complex special cases. */
+ irg_walk_graph(current_ir_graph, correct_phis, NULL, NULL);
+
+ if (loop_info.unroll_kind == constant)
+ ++stats.constant_unroll;
+ else
+ ++stats.invariant_unroll;
+
+ clear_irg_properties(current_ir_graph, IR_GRAPH_PROPERTY_CONSISTENT_DOMINANCE);
+
+ DEL_ARR_F(loop_entries);
+ obstack_free(&obst, NULL);
+ ir_nodemap_destroy(&map);
+ }
- free_node_info();
- ir_free_resources(irg, IR_RESOURCE_IRN_LINK);
- ir_free_resources(irg, IR_RESOURCE_PHI_LIST);
}
-void do_loop_unrolling(ir_graph *irg)
+/* Analyzes the loop, and checks if size is within allowed range.
+ * Decides if loop will be processed. */
+static void init_analyze(ir_graph *irg, ir_loop *loop)
{
- enable_unrolling = 1;
- enable_peeling = 0;
- enable_inversion = 0;
+ cur_loop = loop;
- DB((dbg, LEVEL_2, " >>> unrolling (Startnode %ld) <<<\n",
- get_irn_node_nr(get_irg_start(irg))));
+ loop_head = NULL;
+ loop_head_valid = true;
- loop_optimization(irg);
+ /* Reset loop info */
+ memset(&loop_info, 0, sizeof(loop_info_t));
+
+ DB((dbg, LEVEL_1, " >>>> current loop %ld <<<\n",
+ get_loop_loop_nr(loop)));
+
+ /* Collect loop informations: head, node counts. */
+ irg_walk_graph(irg, get_loop_info, NULL, NULL);
+
+ /* RETURN if there is no valid head */
+ if (!loop_head || !loop_head_valid) {
+ DB((dbg, LEVEL_1, "No valid loop head. Nothing done.\n"));
+ return;
+ } else {
+ DB((dbg, LEVEL_1, "Loophead: %N\n", loop_head));
+ }
+
+ if (loop_info.branches > opt_params.max_branches) {
+ DB((dbg, LEVEL_1, "Branches %d > allowed branches %d\n",
+ loop_info.branches, opt_params.max_branches));
+ ++stats.calls_limit;
+ return;
+ }
+
+ switch (loop_op) {
+ case loop_op_inversion:
+ loop_inversion(irg);
+ break;
- DB((dbg, LEVEL_2, " >>> unrolling done (Startnode %ld) <<<\n",
- get_irn_node_nr(get_irg_start(irg))));
+ case loop_op_unrolling:
+ unroll_loop();
+ break;
+
+ default:
+ panic("Loop optimization not implemented.");
+ }
+ DB((dbg, LEVEL_1, " <<<< end of loop with node %ld >>>>\n",
+ get_loop_loop_nr(loop)));
}
-void do_loop_inversion(ir_graph *irg)
+/* Find innermost loops and add them to loops. */
+static void find_innermost_loop(ir_loop *loop)
{
- enable_unrolling = 0;
- enable_peeling = 0;
- enable_inversion = 1;
+ bool had_sons = false;
+ size_t n_elements = get_loop_n_elements(loop);
+ size_t e;
+
+ for (e = 0; e < n_elements; ++e) {
+ loop_element element = get_loop_element(loop, e);
+ if (*element.kind == k_ir_loop) {
+ find_innermost_loop(element.son);
+ had_sons = true;
+ }
+ }
- DB((dbg, LEVEL_2, " >>> inversion (Startnode %ld) <<<\n",
- get_irn_node_nr(get_irg_start(irg))));
+ if (!had_sons) {
+ ARR_APP1(ir_loop*, loops, loop);
+ }
+}
+
+static void set_loop_params(void)
+{
+ opt_params.max_loop_size = 100;
+ opt_params.depth_adaption = -50;
+ opt_params.count_phi = true;
+ opt_params.count_proj = false;
+ opt_params.allowed_calls = 0;
+
+ opt_params.max_cc_size = 5;
- loop_optimization(irg);
- DB((dbg, LEVEL_2, " >>> inversion done (Startnode %ld) <<<\n",
- get_irn_node_nr(get_irg_start(irg))));
+ opt_params.allow_const_unrolling = true;
+ opt_params.allow_invar_unrolling = false;
+
+ opt_params.invar_unrolling_min_size = 20;
+ opt_params.max_unrolled_loop_size = 400;
+ opt_params.max_branches = 9999;
}
-void do_loop_peeling(ir_graph *irg)
+/* Assure preconditions are met and go through all loops. */
+void loop_optimization(ir_graph *irg)
{
- enable_unrolling = 0;
- enable_peeling = 1;
- enable_inversion = 0;
+ ir_loop *loop;
+ size_t i;
+ size_t n_elements;
+
+ assure_irg_properties(irg,
+ IR_GRAPH_PROPERTY_CONSISTENT_OUT_EDGES
+ | IR_GRAPH_PROPERTY_CONSISTENT_OUTS
+ | IR_GRAPH_PROPERTY_CONSISTENT_LOOPINFO);
+
+ set_loop_params();
+
+ /* Reset stats for this procedure */
+ reset_stats();
- DB((dbg, LEVEL_2, " >>> peeling (Startnode %ld) <<<\n",
- get_irn_node_nr(get_irg_start(irg))));
+ /* Preconditions */
+ set_current_ir_graph(irg);
+
+ ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK | IR_RESOURCE_PHI_LIST);
+ collect_phiprojs(irg);
+ loop = get_irg_loop(irg);
+
+ loops = NEW_ARR_F(ir_loop *, 0);
+ /* List all inner loops */
+ n_elements = get_loop_n_elements(loop);
+ for (i = 0; i < n_elements; ++i) {
+ loop_element element = get_loop_element(loop, i);
+ if (*element.kind != k_ir_loop)
+ continue;
+ find_innermost_loop(element.son);
+ }
+
+ /* Set all links to NULL */
+ irg_walk_graph(irg, reset_link, NULL, NULL);
+
+ for (i = 0; i < ARR_LEN(loops); ++i) {
+ ir_loop *loop = loops[i];
+
+ ++stats.loops;
+
+ /* Analyze and handle loop */
+ init_analyze(irg, loop);
+
+ /* Copied blocks do not have their phi list yet */
+ collect_phiprojs(irg);
+
+ /* Set links to NULL
+ * TODO Still necessary? */
+ irg_walk_graph(irg, reset_link, NULL, NULL);
+ }
+
+ print_stats();
+
+ DEL_ARR_F(loops);
+ ir_free_resources(irg, IR_RESOURCE_IRN_LINK | IR_RESOURCE_PHI_LIST);
+
+ confirm_irg_properties(irg, IR_GRAPH_PROPERTIES_NONE);
+}
+
+void do_loop_unrolling(ir_graph *irg)
+{
+ loop_op = loop_op_unrolling;
loop_optimization(irg);
+}
- DB((dbg, LEVEL_2, " >>> peeling done (Startnode %ld) <<<\n",
- get_irn_node_nr(get_irg_start(irg))));
+void do_loop_inversion(ir_graph *irg)
+{
+ loop_op = loop_op_inversion;
+ loop_optimization(irg);
+}
+void do_loop_peeling(ir_graph *irg)
+{
+ loop_op = loop_op_peeling;
+ loop_optimization(irg);
}
ir_graph_pass_t *loop_inversion_pass(const char *name)