add missing keeps after rematerializing nodes
[libfirm] / ir / be / beflags.c
1 /*
2  * Copyright (C) 1995-2008 University of Karlsruhe.  All right reserved.
3  *
4  * This file is part of libFirm.
5  *
6  * This file may be distributed and/or modified under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation and appearing in the file LICENSE.GPL included in the
9  * packaging of this file.
10  *
11  * Licensees holding valid libFirm Professional Edition licenses may use
12  * this file in accordance with the libFirm Commercial License.
13  * Agreement provided with the Software.
14  *
15  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17  * PURPOSE.
18  */
19
20 /**
21  * @file
22  * @brief       modifies schedule so flags dependencies are respected.
23  * @author      Matthias Braun, Christoph Mallon
24  * @version     $Id: besched.h 14693 2007-06-21 15:35:49Z beck $
25  *
26  * Fixup schedule to respect flag constraints by moving and rematerialisation of
27  * nodes.
28  *
29  * Flags are modeled as register classes with ignore registers. However to avoid
30  * bloating the graph, only flag-consumer -> producer dependencies are
31  * explicitely modeled in the graph. Nodes that just change the flags are only
32  * marked with the arch_irn_flags_modify_flags flag.
33  *
34  * Flags are usually a limited resource that can't (or at least shouldn't) be
35  * spilled. So in some situations (for example 2 adc-nodes that use the flags of
36  * a single add node on x86) operations have to be repeated to work correctly.
37  */
38 #include "config.h"
39
40 #include <stdbool.h>
41
42 #include "irgwalk.h"
43 #include "irnode_t.h"
44 #include "irtools.h"
45 #include "ircons.h"
46 #include "iredges_t.h"
47 #include "irprintf.h"
48 #include "error.h"
49
50 #include "beflags.h"
51 #include "bearch.h"
52 #include "beirg.h"
53 #include "beirgmod.h"
54 #include "besched.h"
55 #include "benode.h"
56 #include "belive.h"
57 #include "beabihelper.h"
58
59 static const arch_register_class_t *flag_class;
60 static const arch_register_t       *flags_reg;
61 static func_rematerialize           remat;
62 static check_modifies_flags         check_modify;
63 static int                          changed;
64
65 static ir_node *default_remat(ir_node *node, ir_node *after)
66 {
67         ir_node *block, *copy;
68         if (is_Block(after))
69                 block = after;
70         else
71                 block = get_nodes_block(after);
72
73         copy = exact_copy(node);
74         set_nodes_block(copy, block);
75         sched_add_after(after, copy);
76
77         return copy;
78 }
79
80 static bool default_check_modifies(const ir_node *node)
81 {
82         return arch_irn_is(node, modify_flags);
83 }
84
85 /**
86  * tests whether we can legally move node node after node after
87  * (only works for nodes in same block)
88  */
89 static bool can_move(ir_node *node, ir_node *after)
90 {
91         const ir_edge_t *edge;
92         ir_node *node_block = get_nodes_block(node);
93         assert(node_block == get_nodes_block(after));
94
95         /* TODO respect dep edges */
96         assert(get_irn_n_edges_kind(node, EDGE_KIND_DEP) == 0);
97
98         /** all users have to be after the after node */
99         foreach_out_edge(node, edge) {
100                 ir_node *out = get_edge_src_irn(edge);
101                 if (is_Proj(out)) {
102                         const ir_edge_t *edge2;
103                         assert(get_irn_n_edges_kind(out, EDGE_KIND_DEP) == 0);
104                         foreach_out_edge(out, edge2) {
105                                 ir_node *out2 = get_edge_src_irn(edge2);
106                                 if (get_nodes_block(out2) != node_block)
107                                         continue;
108                                 /* Phi or End represents a usage at block end. */
109                                 if (is_Phi(out2) || is_End(out2))
110                                         continue;
111                                 if (is_Sync(out2)) {
112                                         const ir_edge_t *edge3;
113                                         foreach_out_edge(out2, edge3) {
114                                                 ir_node *out3 = get_edge_src_irn(edge3);
115                                                 /* Phi or End represents a usage at block end. */
116                                                 if (is_Phi(out3) || is_End(out3))
117                                                         continue;
118                                                 assert(!is_Sync(out3));
119                                                 if (sched_get_time_step(out3) <= sched_get_time_step(after)) {
120                                                         return false;
121                                                 }
122                                         }
123                                 } else if (sched_get_time_step(out2) <= sched_get_time_step(after)) {
124                                         return false;
125                                 }
126                         }
127                 } else {
128                         if (get_nodes_block(out) != node_block)
129                                 continue;
130                         /* phi represents a usage at block end */
131                         if (is_Phi(out))
132                                 continue;
133                         if (sched_get_time_step(out) <= sched_get_time_step(after)) {
134                                 return false;
135                         }
136                 }
137         }
138
139         return true;
140 }
141
142 static void rematerialize_or_move(ir_node *flags_needed, ir_node *node,
143                                   ir_node *flag_consumers, int pn)
144 {
145         ir_node *n;
146         ir_node *copy;
147         ir_node *value;
148
149         if (!is_Block(node) &&
150                         get_nodes_block(flags_needed) == get_nodes_block(node) &&
151                         can_move(flags_needed, node)) {
152                 /* move it */
153                 sched_remove(flags_needed);
154                 sched_add_after(node, flags_needed);
155                 /* No need to update liveness, because the node stays in the same block */
156                 return;
157         }
158
159         changed = 1;
160         copy    = remat(flags_needed, node);
161
162         if (get_irn_mode(copy) == mode_T) {
163                 ir_mode *mode = flag_class->mode;
164                 value = new_rd_Proj(NULL, copy, mode, pn);
165                 be_add_missing_keeps_node(copy);
166         } else {
167                 value = copy;
168         }
169
170         n = flag_consumers;
171         do {
172                 int i;
173                 int arity = get_irn_arity(n);
174                 for (i = 0; i < arity; ++i) {
175                         ir_node *in = get_irn_n(n, i);
176                         in = skip_Proj(in);
177                         if (in == flags_needed) {
178                                 set_irn_n(n, i, value);
179                                 break;
180                         }
181                 }
182                 n = (ir_node*)get_irn_link(n);
183         } while (n != NULL);
184
185         /* No need to introduce the copy, because it only lives in this block, but
186          * we have to update the liveness of all operands */
187         if (is_Block(node) ||
188                         get_nodes_block(node) != get_nodes_block(flags_needed)) {
189                 ir_graph *irg = get_irn_irg(node);
190                 be_lv_t  *lv  = be_get_irg_liveness(irg);
191                 int       i;
192
193                 if (lv != NULL) {
194                         for (i = get_irn_arity(copy) - 1; i >= 0; --i) {
195                                 be_liveness_update(lv, get_irn_n(copy, i));
196                         }
197                 }
198         }
199 }
200
201 /**
202  * walks up the schedule and makes sure there are no flag-destroying nodes
203  * between a flag-consumer -> flag-producer chain. Fixes problematic situations
204  * by moving and/or rematerialisation of the flag-producers.
205  * (This can be extended in the future to do some register allocation on targets
206  *  like ppc32 where we conceptually have 8 flag registers)
207  */
208 static void fix_flags_walker(ir_node *block, void *env)
209 {
210         ir_node *node;
211         ir_node *flags_needed   = NULL;
212         ir_node *flag_consumers = NULL;
213         int      pn = -1;
214         (void) env;
215
216         sched_foreach_reverse(block, node) {
217                 int i, arity;
218                 ir_node *new_flags_needed = NULL;
219                 ir_node *test;
220
221                 if (is_Phi(node))
222                         break;
223
224                 if (node == flags_needed) {
225                         /* all ok */
226                         flags_needed   = NULL;
227                         flag_consumers = NULL;
228                 }
229
230                 /* test whether node destroys the flags */
231                 test = node;
232                 if (be_is_Keep(test))
233                         test = sched_prev(test);
234
235                 if (flags_needed != NULL && check_modify(test)) {
236                         /* rematerialize */
237                         rematerialize_or_move(flags_needed, node, flag_consumers, pn);
238                         flags_needed   = NULL;
239                         flag_consumers = NULL;
240                 }
241
242                 /* test whether the current node needs flags */
243                 arity = get_irn_arity(node);
244                 for (i = 0; i < arity; ++i) {
245                         const arch_register_req_t *req
246                                 = arch_get_irn_register_req_in(node, i);
247                         if (req->cls == flag_class) {
248                                 assert(new_flags_needed == NULL);
249                                 new_flags_needed = get_irn_n(node, i);
250                         }
251                 }
252
253                 if (new_flags_needed == NULL)
254                         continue;
255
256                 /* spiller can't (correctly) remat flag consumers at the moment */
257                 assert(!arch_irn_is(node, rematerializable));
258
259                 if (skip_Proj(new_flags_needed) != flags_needed) {
260                         if (flags_needed != NULL) {
261                                 /* rematerialize node */
262                                 rematerialize_or_move(flags_needed, node, flag_consumers, pn);
263                                 flags_needed   = NULL;
264                                 flag_consumers = NULL;
265                         }
266
267                         flags_needed = new_flags_needed;
268                         arch_set_irn_register(flags_needed, flags_reg);
269                         if (is_Proj(flags_needed)) {
270                                 pn           = get_Proj_proj(flags_needed);
271                                 flags_needed = get_Proj_pred(flags_needed);
272                         }
273                         flag_consumers = node;
274                         set_irn_link(flag_consumers, NULL);
275                         assert(arch_irn_is(flags_needed, rematerializable));
276                 } else {
277                         /* link all consumers in a list */
278                         set_irn_link(node, flag_consumers);
279                         flag_consumers = node;
280                 }
281         }
282
283         if (flags_needed != NULL) {
284                 assert(get_nodes_block(flags_needed) != block);
285                 rematerialize_or_move(flags_needed, node, flag_consumers, pn);
286                 flags_needed   = NULL;
287                 flag_consumers = NULL;
288         }
289
290         assert(flags_needed   == NULL);
291         assert(flag_consumers == NULL);
292 }
293
294 void be_sched_fix_flags(ir_graph *irg, const arch_register_class_t *flag_cls,
295                         func_rematerialize remat_func,
296                         check_modifies_flags check_modifies_flags_func)
297 {
298         flag_class   = flag_cls;
299         flags_reg    = & flag_class->regs[0];
300         remat        = remat_func;
301         check_modify = check_modifies_flags_func;
302         changed      = 0;
303         if (remat == NULL)
304                 remat = &default_remat;
305         if (check_modify == NULL)
306                 check_modify = &default_check_modifies;
307
308         ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
309         irg_block_walk_graph(irg, fix_flags_walker, NULL, NULL);
310         ir_free_resources(irg, IR_RESOURCE_IRN_LINK);
311
312         if (changed) {
313                 be_remove_dead_nodes_from_schedule(irg);
314         }
315 }