+ const dfs_t *dfs = pi->bel->dfs;
+ int pp = dfs_get_post_num(dfs, pi->bl);
+ int pq = dfs_get_post_num(dfs, qi->bl);
+ return pq - pp;
+ }
+
+ diff = qi->exec_freq - pi->exec_freq;
+ return (diff > 0) - (diff < 0);
+}
+
+/*
+ ____ _ ___
+ | __ ) _ __(_)_ __ __ _ |_ _|_ __
+ | _ \| '__| | '_ \ / _` | | || '_ \
+ | |_) | | | | | | | (_| | | || | | |
+ |____/|_| |_|_| |_|\__, | |___|_| |_|
+ |___/
+
+ Data structures to represent bring in variables.
+*/
+
+struct _bring_in_t {
+ ir_node *irn; /**< The node to bring in. */
+ block_info_t *bi; /**< The block to which bring in should happen. */
+ int pressure_so_far; /**< The maximal pressure till the first use of irn in bl. */
+ ir_node *first_use; /**< The first user of irn in bl. */
+ sched_timestep_t use_step; /**< Schedule step of the first use. */
+
+ int is_remat : 1; /**< Is rematerializable. */
+ int sect_pressure; /**< Offset to maximum pressure in block. */
+ struct list_head list;
+ struct list_head sect_list;
+ bring_in_t *sect_head;
+};
+
+static inline bring_in_t *new_bring_in(block_info_t *bi, ir_node *irn, const next_use_t *use)
+{
+ bring_in_t *br = obstack_alloc(&bi->bel->ob, sizeof(br[0]));
+
+ br->irn = irn;
+ br->bi = bi;
+ br->first_use = use->irn;
+ br->use_step = use->step;
+ br->is_remat = be_is_rematerializable(bi->bel->senv, irn, use->irn);
+ br->pressure_so_far = bi->pressure;
+ br->sect_pressure = bi->front_pressure;
+ br->sect_head = br;
+
+ INIT_LIST_HEAD(&br->list);
+ INIT_LIST_HEAD(&br->sect_list);
+ list_add_tail(&br->list, &bi->br_head);
+ return br;
+}
+
+static int bring_in_cmp(const void *a, const void *b)
+{
+ const bring_in_t *p = *(const bring_in_t * const *) a;
+ const bring_in_t *q = *(const bring_in_t * const *) b;
+ double fp, fq;
+
+ /* if one of both is a remat node, it will be done after the other. */
+ if (p->is_remat != q->is_remat)
+ return p->is_remat - q->is_remat;
+
+ /* in the same block, the one further in the front has to be processed first!
+ * Otherwise the front_pressure 'trick' is not exact. */
+ if (p->bi == q->bi)
+ return p->use_step - q->use_step;
+
+ fp = p->bi->exec_freq;
+ fq = q->bi->exec_freq;
+
+ /* if both have the same frequency, inspect the frequency of the definition */
+ if (fp == fq) {
+ double fdp = get_block_info(get_nodes_block(p->irn))->exec_freq;
+ double fdq = get_block_info(get_nodes_block(q->irn))->exec_freq;
+
+ /* if the defs of both have the same freq, we go for reverse dfs post order. */
+ if (fdp == fdq) {
+ const dfs_t *dfs = p->bi->bel->dfs;
+ int pp = dfs_get_post_num(dfs, p->bi->bl);
+ int pq = dfs_get_post_num(dfs, q->bi->bl);
+ return pq - pp;
+ }
+
+ return (fdq > fdp) - (fdq < fdp);
+ }
+
+ return (fq > fp) - (fq < fp);
+}
+
+static inline unsigned get_curr_distance(block_info_t *bi, const ir_node *irn, int is_usage)
+{
+ belady_env_t *env = bi->bel;
+ sched_timestep_t curr_step = sched_get_time_step(env->instr);
+ next_use_t *use = get_current_use(bi, irn);
+ int flags = arch_irn_get_flags(irn);
+
+ assert(!arch_irn_is_ignore(irn));
+
+ /* We have to keep non-spillable nodes in the working set */