make sparc+arm backend completely independent from beabi
[libfirm] / ir / be / bespill.c
1 /*
2  * Copyright (C) 1995-2008 University of Karlsruhe.  All right reserved.
3  *
4  * This file is part of libFirm.
5  *
6  * This file may be distributed and/or modified under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation and appearing in the file LICENSE.GPL included in the
9  * packaging of this file.
10  *
11  * Licensees holding valid libFirm Professional Edition licenses may use
12  * this file in accordance with the libFirm Commercial License.
13  * Agreement provided with the Software.
14  *
15  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17  * PURPOSE.
18  */
19
20 /**
21  * @file
22  * @brief       Spill module selection; Preparation steps
23  * @author      Matthias Braun
24  * @date        29.09.2005
25  * @version     $Id$
26  */
27 #include "config.h"
28
29 #include "irtools.h"
30 #include "debug.h"
31 #include "iredges_t.h"
32 #include "raw_bitset.h"
33 #include "statev.h"
34 #include "irgwalk.h"
35
36 #include "bespill.h"
37 #include "bemodule.h"
38 #include "be.h"
39 #include "belive_t.h"
40 #include "beirg.h"
41 #include "bearch.h"
42 #include "benode.h"
43 #include "besched.h"
44 #include "bera.h"
45 #include "beintlive_t.h"
46
47 #include "lc_opts.h"
48 #include "lc_opts_enum.h"
49
50 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
51
52 typedef struct be_pre_spill_env_t {
53         ir_graph                    *irg;
54         const arch_register_class_t *cls;
55 } be_pre_spill_env_t;
56
57 static void prepare_constr_insn(be_pre_spill_env_t *env, ir_node *node)
58 {
59         const arch_register_class_t *cls = env->cls;
60         ir_node  *block      = get_nodes_block(node);
61         const ir_graph *irg  = env->irg;
62         be_irg_t       *birg = be_birg_from_irg(irg);
63         be_lv_t *lv          = be_get_irg_liveness(irg);
64         unsigned *tmp        = NULL;
65         unsigned *def_constr = NULL;
66         int       arity      = get_irn_arity(node);
67
68         int i, i2;
69
70         /* Insert a copy for constraint inputs attached to a value which can't
71          * fullfil the constraint
72          * (typical example: stack pointer as input to copyb)
73          * TODO: This really just checks precolored registers at the moment and
74          *       ignores the general case of not matching in/out constraints
75          */
76         for (i = 0; i < arity; ++i) {
77                 ir_node                   *op  = get_irn_n(node, i);
78                 const arch_register_req_t *req = arch_get_register_req(node, i);
79                 const arch_register_t     *reg;
80                 ir_node                   *copy;
81
82                 if (req->cls != cls)
83                         continue;
84                 reg = arch_get_irn_register(op);
85                 if (reg == NULL)
86                         continue;
87
88                 /* precolored with an ignore register (which is not a joker like
89                    unknown/noreg) */
90                 if (arch_register_type_is(reg, joker)
91                                 || rbitset_is_set(birg->allocatable_regs, reg->global_index))
92                         continue;
93
94                 if (! (req->type & arch_register_req_type_limited))
95                         continue;
96                 if (rbitset_is_set(req->limited, reg->index))
97                         continue;
98
99                 copy = be_new_Copy(cls, block, op);
100                 stat_ev_int("constr_copy", 1);
101                 sched_add_before(node, copy);
102                 set_irn_n(node, i, copy);
103                 DBG((dbg, LEVEL_3, "inserting ignore arg copy %+F for %+F pos %d\n",
104                      copy, node, i));
105         }
106
107         /* insert copies for nodes that occur constrained more than once. */
108         for (i = 0; i < arity; ++i) {
109                 ir_node                   *in;
110                 ir_node                   *copy;
111                 const arch_register_req_t *req;
112
113                 req = arch_get_register_req(node, i);
114                 if (req->cls != cls)
115                         continue;
116
117                 if (! (req->type & arch_register_req_type_limited))
118                         continue;
119
120                 in = get_irn_n(node, i);
121                 if (!arch_irn_consider_in_reg_alloc(cls, in))
122                         continue;
123
124                 for (i2 = i + 1; i2 < arity; ++i2) {
125                         ir_node *in2;
126                         const arch_register_req_t *req2;
127
128                         req2 = arch_get_register_req(node, i2);
129                         if (req2->cls != cls)
130                                 continue;
131                         if (! (req2->type & arch_register_req_type_limited))
132                                 continue;
133
134                         in2 = get_irn_n(node, i2);
135                         if (in2 != in)
136                                 continue;
137
138                         /* if the constraint is the same, no copy is necessary
139                          * TODO generalise unequal but overlapping constraints */
140                         if (rbitsets_equal(req->limited, req2->limited, cls->n_regs))
141                                 continue;
142
143                         copy = be_new_Copy(cls, block, in);
144                         stat_ev_int("constr_copy", 1);
145
146                         sched_add_before(node, copy);
147                         set_irn_n(node, i2, copy);
148                         DBG((dbg, LEVEL_3,
149                              "inserting multiple constr copy %+F for %+F pos %d\n",
150                              copy, node, i2));
151                 }
152         }
153
154         /* collect all registers occurring in out constraints. */
155         if (get_irn_mode(node) == mode_T) {
156                 const ir_edge_t *edge;
157
158                 foreach_out_edge(node, edge) {
159                         ir_node                   *proj = get_edge_src_irn(edge);
160                         const arch_register_req_t *req  = arch_get_register_req_out(proj);
161                         if (! (req->type & arch_register_req_type_limited))
162                                 continue;
163
164                         if (def_constr == NULL) {
165                                 rbitset_alloca(def_constr, cls->n_regs);
166                         }
167                         rbitset_or(def_constr, req->limited, cls->n_regs);
168                 }
169         } else {
170                 const arch_register_req_t *req = arch_get_register_req_out(node);
171                 if (req->type & arch_register_req_type_limited) {
172                         rbitset_alloca(def_constr, cls->n_regs);
173                         rbitset_or(def_constr, req->limited, cls->n_regs);
174                 }
175         }
176
177         /* no output constraints => we're good */
178         if (def_constr == NULL) {
179                 return;
180         }
181
182         /*
183          * insert copies for all constrained arguments living through the node
184          * and being constrained to a register which also occurs in out constraints.
185          */
186         rbitset_alloca(tmp, cls->n_regs);
187         for (i = 0; i < arity; ++i) {
188                 const arch_register_req_t *req;
189                 ir_node                   *in;
190                 ir_node                   *copy;
191
192                 /*
193                  * Check, if
194                  * 1) the operand is constrained.
195                  * 2) lives through the node.
196                  * 3) is constrained to a register occurring in out constraints.
197                  */
198                 req = arch_get_register_req(node, i);
199                 if (req->cls != cls)
200                         continue;
201                 if (!(req->type & arch_register_req_type_limited))
202                         continue;
203
204                 in = get_irn_n(node, i);
205                 if (!arch_irn_consider_in_reg_alloc(cls, in))
206                         continue;
207                 if (!be_values_interfere(lv, node, in))
208                         continue;
209
210                 rbitset_copy(tmp, req->limited, cls->n_regs);
211                 rbitset_and(tmp, def_constr, cls->n_regs);
212
213                 if (rbitset_is_empty(tmp, cls->n_regs))
214                         continue;
215
216                 /*
217                  * only create the copy if the operand is no copy.
218                  * this is necessary since the assure constraints phase inserts
219                  * Copies and Keeps for operands which must be different from the
220                  * results. Additional copies here would destroy this.
221                  */
222                 if (be_is_Copy(in))
223                         continue;
224
225                 copy = be_new_Copy(cls, block, in);
226                 sched_add_before(node, copy);
227                 set_irn_n(node, i, copy);
228                 DBG((dbg, LEVEL_3, "inserting constr copy %+F for %+F pos %d\n",
229                      copy, node, i));
230                 be_liveness_update(lv, in);
231         }
232 }
233
234 static void pre_spill_prepare_constr_walker(ir_node *block, void *data)
235 {
236         be_pre_spill_env_t *env = data;
237         ir_node *node;
238         sched_foreach(block, node) {
239                 prepare_constr_insn(env, node);
240         }
241 }
242
243 void be_pre_spill_prepare_constr(ir_graph *irg,
244                                  const arch_register_class_t *cls)
245 {
246         be_pre_spill_env_t env;
247         memset(&env, 0, sizeof(env));
248         env.irg = irg;
249         env.cls = cls;
250
251         be_assure_liveness(irg);
252
253         irg_block_walk_graph(irg, pre_spill_prepare_constr_walker, NULL, &env);
254 }
255
256
257
258 int be_coalesce_spill_slots = 1;
259 int be_do_remats = 1;
260
261 static const lc_opt_table_entry_t be_spill_options[] = {
262         LC_OPT_ENT_BOOL ("coalesce_slots", "coalesce the spill slots", &be_coalesce_spill_slots),
263         LC_OPT_ENT_BOOL ("remat", "try to rematerialize values instead of reloading", &be_do_remats),
264         LC_OPT_LAST
265 };
266
267 static be_module_list_entry_t *spillers = NULL;
268 static const be_spiller_t *selected_spiller = NULL;
269
270 void be_register_spiller(const char *name, be_spiller_t *spiller)
271 {
272         if (selected_spiller == NULL)
273                 selected_spiller = spiller;
274         be_add_module_to_list(&spillers, name, spiller);
275 }
276
277 void be_do_spill(ir_graph *irg, const arch_register_class_t *cls)
278 {
279         assert(selected_spiller != NULL);
280
281         selected_spiller->spill(irg, cls);
282 }
283
284 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_spilloptions);
285 void be_init_spilloptions(void)
286 {
287         lc_opt_entry_t *be_grp = lc_opt_get_grp(firm_opt_get_root(), "be");
288         lc_opt_entry_t *spill_grp = lc_opt_get_grp(be_grp, "spill");
289
290         lc_opt_add_table(spill_grp, be_spill_options);
291         be_add_module_list_opt(spill_grp, "spiller", "spill algorithm",
292                                &spillers, (void**) &selected_spiller);
293
294         FIRM_DBG_REGISTER(dbg, "firm.be.spillprepare");
295 }