keep_alive_barrier operand used wrong block; schedule keep behind phi sequences
[libfirm] / ir / be / bepeephole.c
1 /*
2  * Copyright (C) 1995-2011 University of Karlsruhe.  All right reserved.
3  *
4  * This file is part of libFirm.
5  *
6  * This file may be distributed and/or modified under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation and appearing in the file LICENSE.GPL included in the
9  * packaging of this file.
10  *
11  * Licensees holding valid libFirm Professional Edition licenses may use
12  * this file in accordance with the libFirm Commercial License.
13  * Agreement provided with the Software.
14  *
15  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17  * PURPOSE.
18  */
19
20 /**
21  * @file
22  * @brief       Peephole optimisation framework keeps track of which registers contain which values
23  * @author      Matthias Braun
24  * @version     $Id$
25  */
26 #include "config.h"
27
28 #include "array_t.h"
29 #include "bepeephole.h"
30
31 #include "iredges_t.h"
32 #include "irgwalk.h"
33 #include "irprintf.h"
34 #include "ircons.h"
35 #include "irgmod.h"
36 #include "error.h"
37
38 #include "beirg.h"
39 #include "belive_t.h"
40 #include "bearch.h"
41 #include "benode.h"
42 #include "besched.h"
43 #include "bemodule.h"
44
45 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
46
47 static const arch_env_t *arch_env;
48 static be_lv_t          *lv;
49 static ir_node          *current_node;
50 ir_node               ***register_values;
51
52 static void clear_reg_value(ir_node *node)
53 {
54         const arch_register_t       *reg;
55         const arch_register_class_t *cls;
56         unsigned                     reg_idx;
57         unsigned                     cls_idx;
58
59         if (!mode_is_data(get_irn_mode(node)))
60                 return;
61
62         reg     = arch_get_irn_register(node);
63         if (reg == NULL) {
64                 panic("No register assigned at %+F", node);
65         }
66         if (reg->type & arch_register_type_virtual)
67                 return;
68         cls     = arch_register_get_class(reg);
69         reg_idx = arch_register_get_index(reg);
70         cls_idx = arch_register_class_index(cls);
71
72         //assert(register_values[cls_idx][reg_idx] != NULL);
73         DB((dbg, LEVEL_1, "Clear Register %s\n", reg->name));
74         register_values[cls_idx][reg_idx] = NULL;
75 }
76
77 static void set_reg_value(ir_node *node)
78 {
79         const arch_register_t       *reg;
80         const arch_register_class_t *cls;
81         unsigned                     reg_idx;
82         unsigned                     cls_idx;
83
84         if (!mode_is_data(get_irn_mode(node)))
85                 return;
86
87         reg = arch_get_irn_register(node);
88         if (reg == NULL) {
89                 panic("No register assigned at %+F", node);
90         }
91         if (reg->type & arch_register_type_virtual)
92                 return;
93         cls     = arch_register_get_class(reg);
94         reg_idx = arch_register_get_index(reg);
95         cls_idx = arch_register_class_index(cls);
96
97         DB((dbg, LEVEL_1, "Set Register %s: %+F\n", reg->name, node));
98         register_values[cls_idx][reg_idx] = node;
99 }
100
101 static void clear_defs(ir_node *node)
102 {
103         /* clear values defined */
104         if (get_irn_mode(node) == mode_T) {
105                 const ir_edge_t *edge;
106                 foreach_out_edge(node, edge) {
107                         ir_node *proj = get_edge_src_irn(edge);
108                         clear_reg_value(proj);
109                 }
110         } else {
111                 clear_reg_value(node);
112         }
113 }
114
115 static void set_uses(ir_node *node)
116 {
117         int i, arity;
118
119         /* set values used */
120         arity = get_irn_arity(node);
121         for (i = 0; i < arity; ++i) {
122                 ir_node *in = get_irn_n(node, i);
123                 set_reg_value(in);
124         }
125 }
126
127 void be_peephole_new_node(ir_node * nw)
128 {
129         be_liveness_introduce(lv, nw);
130 }
131
132 /**
133  * must be called from peephole optimisations before a node will be killed
134  * and its users will be redirected to new_node.
135  * so bepeephole can update it's internal state.
136  *
137  * Note: killing a node and rewiring os only allowed if new_node produces
138  * the same registers as old_node.
139  */
140 static void be_peephole_before_exchange(const ir_node *old_node,
141                                         ir_node *new_node)
142 {
143         const arch_register_t       *reg;
144         const arch_register_class_t *cls;
145         unsigned                     reg_idx;
146         unsigned                     cls_idx;
147
148         DB((dbg, LEVEL_1, "About to exchange and kill %+F with %+F\n", old_node, new_node));
149
150         if (current_node == old_node) {
151                 /* next node to be processed will be killed. Its scheduling predecessor
152                  * must be processed next. */
153                 current_node = sched_next(current_node);
154                 assert (!is_Bad(current_node));
155         }
156
157         if (!mode_is_data(get_irn_mode(old_node)))
158                 return;
159
160         reg = arch_get_irn_register(old_node);
161         if (reg == NULL) {
162                 panic("No register assigned at %+F", old_node);
163         }
164         assert(reg == arch_get_irn_register(new_node) &&
165               "KILLING a node and replacing by different register is not allowed");
166
167         cls     = arch_register_get_class(reg);
168         reg_idx = arch_register_get_index(reg);
169         cls_idx = arch_register_class_index(cls);
170
171         if (register_values[cls_idx][reg_idx] == old_node) {
172                 register_values[cls_idx][reg_idx] = new_node;
173         }
174
175         be_liveness_remove(lv, old_node);
176 }
177
178 void be_peephole_exchange(ir_node *old, ir_node *nw)
179 {
180         be_peephole_before_exchange(old, nw);
181         sched_remove(old);
182         exchange(old, nw);
183         be_peephole_new_node(nw);
184 }
185
186 /**
187  * block-walker: run peephole optimization on the given block.
188  */
189 static void process_block(ir_node *block, void *data)
190 {
191         unsigned n_classes;
192         unsigned i;
193         int l;
194         (void) data;
195
196         /* construct initial register assignment */
197         n_classes = arch_env->n_register_classes;
198         for (i = 0; i < n_classes; ++i) {
199                 const arch_register_class_t *cls    = &arch_env->register_classes[i];
200                 unsigned                     n_regs = arch_register_class_n_regs(cls);
201                 memset(register_values[i], 0, sizeof(ir_node*) * n_regs);
202         }
203
204         assert(lv->nodes && "live sets must be computed");
205         DB((dbg, LEVEL_1, "\nProcessing block %+F (from end)\n", block));
206         be_lv_foreach(lv, block, be_lv_state_end, l) {
207                 ir_node *node = be_lv_get_irn(lv, block, l);
208                 set_reg_value(node);
209         }
210         DB((dbg, LEVEL_1, "\nstart processing\n"));
211
212         /* walk the block from last insn to the first */
213         current_node = sched_last(block);
214         for ( ; !sched_is_begin(current_node);
215                         current_node = sched_prev(current_node)) {
216                 ir_op             *op;
217                 peephole_opt_func  peephole_node;
218
219                 assert(!is_Bad(current_node));
220                 if (is_Phi(current_node))
221                         break;
222
223                 clear_defs(current_node);
224                 set_uses(current_node);
225
226                 op            = get_irn_op(current_node);
227                 peephole_node = (peephole_opt_func)op->ops.generic;
228                 if (peephole_node == NULL)
229                         continue;
230
231                 DB((dbg, LEVEL_2, "optimize %+F\n", current_node));
232                 peephole_node(current_node);
233                 assert(!is_Bad(current_node));
234         }
235 }
236
237 static void kill_node_and_preds(ir_node *node)
238 {
239         ir_graph *irg = get_irn_irg(node);
240         int arity, i;
241
242         arity = get_irn_arity(node);
243         for (i = 0; i < arity; ++i) {
244                 ir_node *pred = get_irn_n(node, i);
245
246                 set_irn_n(node, i, new_r_Bad(irg));
247                 if (get_irn_n_edges(pred) != 0)
248                         continue;
249
250                 kill_node_and_preds(pred);
251         }
252
253         if (!is_Proj(node))
254                 sched_remove(node);
255         kill_node(node);
256 }
257
258 static void keep_alive_barrier_operand(ir_node* operand)
259 {
260         ir_node *schedpoint = skip_Proj(operand);
261
262         do {
263                 schedpoint = sched_next(schedpoint);
264         } while (is_Phi(schedpoint));
265
266         /* There already is a keep in the schedule. */
267         if (be_is_Keep(schedpoint)) {
268                 const arch_register_class_t *cls = arch_get_irn_reg_class_out(operand);
269                 be_Keep_add_node(schedpoint, cls, operand);
270         } else {
271                 ir_node *block = get_nodes_block(operand);
272                 ir_node *in[1] = {operand};
273                 ir_node *keep =  be_new_Keep(block, 1, in);
274                 sched_add_before(schedpoint, keep);
275         }
276 }
277
278 /**
279  * Walk through the block schedule and skip all barrier nodes.
280  */
281 static void skip_barrier(ir_node *block, ir_graph *irg)
282 {
283         ir_node *irn;
284
285         sched_foreach_reverse(block, irn) {
286                 int       arity;
287                 unsigned *used;
288                 size_t    n_used;
289                 const ir_edge_t *edge, *next;
290
291                 if (!be_is_Barrier(irn))
292                         continue;
293
294                 /* track which outputs are actually used, as we have to create
295                  * keep nodes for unused outputs */
296                 arity = get_irn_arity(irn);
297                 rbitset_alloca(used, arity);
298
299                 foreach_out_edge_safe(irn, edge, next) {
300                         ir_node *proj = get_edge_src_irn(edge);
301                         int      pn;
302                         ir_node *pred;
303
304                         if (is_Anchor(proj))
305                                 continue;
306
307                         pn   = (int) get_Proj_proj(proj);
308                         pred = get_irn_n(irn, pn);
309
310                         rbitset_set(used, pn);
311
312                         /* We may need to reschedule be_Keeps to keep live-ranges short. */
313                         if (get_irn_n_edges(proj) == 1) {
314                                 const ir_edge_t *proj_edge = get_irn_out_edge_first(proj);
315                                 ir_node         *proj_succ = get_edge_src_irn(proj_edge);
316
317                                 if (be_is_Keep(proj_succ)) {
318                                         int succ_arity = get_irn_arity(proj_succ);
319                                         ir_node *operand = get_irn_n(irn, pn);
320
321                                         keep_alive_barrier_operand(operand);
322
323                                         /* Disconnect old be_Keep. */
324                                         if (succ_arity > 1) {
325                                                 int      edge_pos        = get_edge_src_pos(proj_edge);
326                                                 int      new_arity       = succ_arity - 1;
327                                                 int      pos;
328                                                 int      new_pos         = 0;
329                                                 ir_node **ins;
330
331                                                 NEW_ARR_A(ir_node *, ins, succ_arity);
332                                                 for (pos = 0; pos < succ_arity; ++pos) {
333                                                         if (pos != edge_pos)
334                                                                 ins[new_pos++] = get_irn_n(proj_succ, pos);
335                                                 }
336
337                                                 set_irn_in(proj_succ, new_arity, ins);
338                                         }
339                                         else {
340                                                 sched_remove(proj_succ);
341                                                 kill_node(proj_succ);
342                                         }
343                                 }
344                         }
345
346                         edges_reroute_kind(proj, pred, EDGE_KIND_NORMAL, irg);
347                         edges_reroute_kind(proj, pred, EDGE_KIND_DEP, irg);
348                 }
349
350                 /* the barrier also had the effect of a Keep for unused inputs.
351                  * we now have to create an explicit Keep for them */
352                 n_used = rbitset_popcount(used, arity);
353                 if (n_used < (size_t) arity) {
354                         int i;
355
356                         for (i = 0; i < arity; ++i) {
357                                 ir_node *operand;
358                                 if (rbitset_is_set(used, i))
359                                         continue;
360
361                                 operand = get_irn_n(irn, i);
362                                 keep_alive_barrier_operand(operand);
363                         }
364                 }
365
366                 kill_node_and_preds(irn);
367                 break;
368         }
369 }
370
371 /**
372  * Kill the Barrier nodes for better peephole optimization.
373  */
374 static void kill_barriers(ir_graph *irg)
375 {
376         ir_node *end_blk = get_irg_end_block(irg);
377         ir_node *start_blk = get_irg_start_block(irg);
378         int i;
379
380         /* skip the barrier on all return blocks */
381         for (i = get_Block_n_cfgpreds(end_blk) - 1; i >= 0; --i) {
382                 ir_node *be_ret = get_Block_cfgpred(end_blk, i);
383                 ir_node *ret_blk = get_nodes_block(be_ret);
384
385                 if (ret_blk == start_blk)
386                         continue;
387
388                 skip_barrier(ret_blk, irg);
389         }
390
391         /* skip the barrier on the start block */
392         start_blk = get_irg_start_block(irg);
393         skip_barrier(start_blk, irg);
394 }
395
396 /**
397  * Check whether the node has only one user.  Explicitly ignore the anchor.
398  */
399 static int has_only_one_user(ir_node *node)
400 {
401         int              n = get_irn_n_edges(node);
402         const ir_edge_t *edge;
403
404         if (n <= 1)
405                 return 1;
406
407         if (n > 2)
408                 return 0;
409
410         foreach_out_edge(node, edge) {
411                 ir_node *src = get_edge_src_irn(edge);
412                 if (is_Anchor(src))
413                         return 1;
414         }
415
416         return 0;
417 }
418
419 /*
420  * Tries to optimize a beIncSP node with its previous IncSP node.
421  * Must be run from a be_peephole_opt() context.
422  */
423 ir_node *be_peephole_IncSP_IncSP(ir_node *node)
424 {
425         int      pred_offs;
426         int      curr_offs;
427         int      offs;
428         ir_node *pred = be_get_IncSP_pred(node);
429
430         if (!be_is_IncSP(pred))
431                 return node;
432
433         if (!has_only_one_user(pred))
434                 return node;
435
436         pred_offs = be_get_IncSP_offset(pred);
437         curr_offs = be_get_IncSP_offset(node);
438
439         if (pred_offs == BE_STACK_FRAME_SIZE_EXPAND) {
440                 if (curr_offs != BE_STACK_FRAME_SIZE_SHRINK) {
441                         return node;
442                 }
443                 offs = 0;
444         } else if (pred_offs == BE_STACK_FRAME_SIZE_SHRINK) {
445                 if (curr_offs != BE_STACK_FRAME_SIZE_EXPAND) {
446                         return node;
447                 }
448                 offs = 0;
449         } else if (curr_offs == BE_STACK_FRAME_SIZE_EXPAND ||
450                    curr_offs == BE_STACK_FRAME_SIZE_SHRINK) {
451                 return node;
452         } else {
453                 offs = curr_offs + pred_offs;
454         }
455
456         /* add node offset to pred and remove our IncSP */
457         be_set_IncSP_offset(pred, offs);
458
459         be_peephole_exchange(node, pred);
460         return pred;
461 }
462
463 void be_peephole_opt(ir_graph *irg)
464 {
465         unsigned  n_classes;
466         unsigned  i;
467
468         /* barrier nodes are used for register allocations. They hinders
469          * peephole optimizations, so remove them here. */
470         kill_barriers(irg);
471
472         /* we sometimes find BadE nodes in float apps like optest_float.c or
473          * kahansum.c for example... */
474         be_liveness_invalidate(be_get_irg_liveness(irg));
475         be_liveness_assure_sets(be_assure_liveness(irg));
476
477         arch_env = be_get_irg_arch_env(irg);
478         lv       = be_get_irg_liveness(irg);
479
480         n_classes = arch_env->n_register_classes;
481         register_values = XMALLOCN(ir_node**, n_classes);
482         for (i = 0; i < n_classes; ++i) {
483                 const arch_register_class_t *cls    = &arch_env->register_classes[i];
484                 unsigned                     n_regs = arch_register_class_n_regs(cls);
485                 register_values[i] = XMALLOCN(ir_node*, n_regs);
486         }
487
488         irg_block_walk_graph(irg, process_block, NULL, NULL);
489
490         for (i = 0; i < n_classes; ++i) {
491                 xfree(register_values[i]);
492         }
493         xfree(register_values);
494 }
495
496 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_peephole);
497 void be_init_peephole(void)
498 {
499         FIRM_DBG_REGISTER(dbg, "firm.be.peephole");
500 }