2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Peephole optimisation framework keeps track of which registers contain which values
23 * @author Matthias Braun
28 #include "bepeephole.h"
30 #include "iredges_t.h"
44 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
46 static const arch_env_t *arch_env;
48 static ir_node *current_node;
49 ir_node ***register_values;
51 static void clear_reg_value(ir_node *node)
53 const arch_register_t *reg;
54 const arch_register_class_t *cls;
58 if (!mode_is_data(get_irn_mode(node)))
61 reg = arch_get_irn_register(node);
63 panic("No register assigned at %+F", node);
65 if (arch_register_type_is(reg, virtual))
67 cls = arch_register_get_class(reg);
68 reg_idx = arch_register_get_index(reg);
69 cls_idx = arch_register_class_index(cls);
71 //assert(register_values[cls_idx][reg_idx] != NULL);
72 DB((dbg, LEVEL_1, "Clear Register %s\n", reg->name));
73 register_values[cls_idx][reg_idx] = NULL;
76 static void set_reg_value(ir_node *node)
78 const arch_register_t *reg;
79 const arch_register_class_t *cls;
83 if (!mode_is_data(get_irn_mode(node)))
86 reg = arch_get_irn_register(node);
88 panic("No register assigned at %+F", node);
90 if (arch_register_type_is(reg, virtual))
92 cls = arch_register_get_class(reg);
93 reg_idx = arch_register_get_index(reg);
94 cls_idx = arch_register_class_index(cls);
96 DB((dbg, LEVEL_1, "Set Register %s: %+F\n", reg->name, node));
97 register_values[cls_idx][reg_idx] = node;
100 static void clear_defs(ir_node *node)
102 /* clear values defined */
103 if (get_irn_mode(node) == mode_T) {
104 const ir_edge_t *edge;
105 foreach_out_edge(node, edge) {
106 ir_node *proj = get_edge_src_irn(edge);
107 clear_reg_value(proj);
110 clear_reg_value(node);
114 static void set_uses(ir_node *node)
118 /* set values used */
119 arity = get_irn_arity(node);
120 for (i = 0; i < arity; ++i) {
121 ir_node *in = get_irn_n(node, i);
126 void be_peephole_new_node(ir_node * nw)
128 be_liveness_introduce(lv, nw);
132 * must be called from peephole optimisations before a node will be killed
133 * and its users will be redirected to new_node.
134 * so bepeephole can update it's internal state.
136 * Note: killing a node and rewiring os only allowed if new_node produces
137 * the same registers as old_node.
139 static void be_peephole_before_exchange(const ir_node *old_node,
142 const arch_register_t *reg;
143 const arch_register_class_t *cls;
147 DB((dbg, LEVEL_1, "About to exchange and kill %+F with %+F\n", old_node, new_node));
149 if (current_node == old_node) {
150 /* next node to be processed will be killed. Its scheduling predecessor
151 * must be processed next. */
152 current_node = sched_next(current_node);
153 assert (!is_Bad(current_node));
156 if (!mode_is_data(get_irn_mode(old_node)))
159 reg = arch_get_irn_register(old_node);
161 panic("No register assigned at %+F", old_node);
163 assert(reg == arch_get_irn_register(new_node) &&
164 "KILLING a node and replacing by different register is not allowed");
166 cls = arch_register_get_class(reg);
167 reg_idx = arch_register_get_index(reg);
168 cls_idx = arch_register_class_index(cls);
170 if (register_values[cls_idx][reg_idx] == old_node) {
171 register_values[cls_idx][reg_idx] = new_node;
174 be_liveness_remove(lv, old_node);
177 void be_peephole_exchange(ir_node *old, ir_node *nw)
179 be_peephole_before_exchange(old, nw);
182 be_peephole_new_node(nw);
186 * block-walker: run peephole optimization on the given block.
188 static void process_block(ir_node *block, void *data)
195 /* construct initial register assignment */
196 n_classes = arch_env_get_n_reg_class(arch_env);
197 for (i = 0; i < n_classes; ++i) {
198 const arch_register_class_t *cls = arch_env_get_reg_class(arch_env, i);
199 unsigned n_regs = arch_register_class_n_regs(cls);
200 memset(register_values[i], 0, sizeof(ir_node*) * n_regs);
203 assert(lv->nodes && "live sets must be computed");
204 DB((dbg, LEVEL_1, "\nProcessing block %+F (from end)\n", block));
205 be_lv_foreach(lv, block, be_lv_state_end, l) {
206 ir_node *node = be_lv_get_irn(lv, block, l);
209 DB((dbg, LEVEL_1, "\nstart processing\n"));
211 /* walk the block from last insn to the first */
212 current_node = sched_last(block);
213 for ( ; !sched_is_begin(current_node);
214 current_node = sched_prev(current_node)) {
216 peephole_opt_func peephole_node;
218 assert(!is_Bad(current_node));
219 if (is_Phi(current_node))
222 clear_defs(current_node);
223 set_uses(current_node);
225 op = get_irn_op(current_node);
226 peephole_node = (peephole_opt_func)op->ops.generic;
227 if (peephole_node == NULL)
230 DB((dbg, LEVEL_2, "optimize %+F\n", current_node));
231 peephole_node(current_node);
232 assert(!is_Bad(current_node));
236 static void kill_node_and_preds(ir_node *node)
240 arity = get_irn_arity(node);
241 for (i = 0; i < arity; ++i) {
242 ir_node *pred = get_irn_n(node, i);
244 set_irn_n(node, i, new_Bad());
245 if (get_irn_n_edges(pred) != 0)
248 kill_node_and_preds(pred);
257 * Walk through the block schedule and skip all barrier nodes.
259 static void skip_barrier(ir_node *block, ir_graph *irg)
263 sched_foreach_reverse(block, irn) {
267 const ir_edge_t *edge, *next;
269 if (!be_is_Barrier(irn))
272 /* track which outputs are actually used, as we have to create
273 * keep nodes for unused outputs */
274 arity = get_irn_arity(irn);
275 rbitset_alloca(used, arity);
277 foreach_out_edge_safe(irn, edge, next) {
278 ir_node *proj = get_edge_src_irn(edge);
285 pn = (int) get_Proj_proj(proj);
286 pred = get_irn_n(irn, pn);
288 rbitset_set(used, pn);
290 edges_reroute_kind(proj, pred, EDGE_KIND_NORMAL, irg);
291 edges_reroute_kind(proj, pred, EDGE_KIND_DEP, irg);
294 /* the barrier also had the effect of a Keep for unused inputs.
295 * we now have to create an explicit Keep for them */
296 n_used = rbitset_popcount(used, arity);
297 if (n_used < (unsigned) arity) {
298 int n_in = arity - (int) n_used;
299 ir_node **in = ALLOCAN(ir_node*, n_in);
304 for (i = 0; i < arity; ++i) {
305 if (rbitset_is_set(used, i))
308 in[n++] = get_irn_n(irn, i);
310 keep = be_new_Barrier(get_nodes_block(irn), n_in, in);
312 sched_add_before(irn, keep);
315 kill_node_and_preds(irn);
321 * Kill the Barrier nodes for better peephole optimization.
323 static void kill_barriers(ir_graph *irg)
325 ir_node *end_blk = get_irg_end_block(irg);
326 ir_node *start_blk = get_irg_start_block(irg);
329 /* skip the barrier on all return blocks */
330 for (i = get_Block_n_cfgpreds(end_blk) - 1; i >= 0; --i) {
331 ir_node *be_ret = get_Block_cfgpred(end_blk, i);
332 ir_node *ret_blk = get_nodes_block(be_ret);
334 if (ret_blk == start_blk)
337 skip_barrier(ret_blk, irg);
340 /* skip the barrier on the start block */
341 start_blk = get_irg_start_block(irg);
342 skip_barrier(start_blk, irg);
346 * Check whether the node has only one user. Explicitly ignore the anchor.
348 static int has_only_one_user(ir_node *node)
350 int n = get_irn_n_edges(node);
351 const ir_edge_t *edge;
359 foreach_out_edge(node, edge) {
360 ir_node *src = get_edge_src_irn(edge);
369 * Tries to optimize a beIncSP node with its previous IncSP node.
370 * Must be run from a be_peephole_opt() context.
372 ir_node *be_peephole_IncSP_IncSP(ir_node *node)
377 ir_node *pred = be_get_IncSP_pred(node);
379 if (!be_is_IncSP(pred))
382 if (!has_only_one_user(pred))
385 pred_offs = be_get_IncSP_offset(pred);
386 curr_offs = be_get_IncSP_offset(node);
388 if (pred_offs == BE_STACK_FRAME_SIZE_EXPAND) {
389 if (curr_offs != BE_STACK_FRAME_SIZE_SHRINK) {
393 } else if (pred_offs == BE_STACK_FRAME_SIZE_SHRINK) {
394 if (curr_offs != BE_STACK_FRAME_SIZE_EXPAND) {
398 } else if (curr_offs == BE_STACK_FRAME_SIZE_EXPAND ||
399 curr_offs == BE_STACK_FRAME_SIZE_SHRINK) {
402 offs = curr_offs + pred_offs;
405 /* add node offset to pred and remove our IncSP */
406 be_set_IncSP_offset(pred, offs);
408 be_peephole_exchange(node, pred);
412 void be_peephole_opt(ir_graph *irg)
417 /* barrier nodes are used for register allocations. They hinders
418 * peephole optimizations, so remove them here. */
421 /* we sometimes find BadE nodes in float apps like optest_float.c or
422 * kahansum.c for example... */
423 be_liveness_invalidate(be_get_irg_liveness(irg));
424 be_liveness_assure_sets(be_assure_liveness(irg));
426 arch_env = be_get_irg_arch_env(irg);
427 lv = be_get_irg_liveness(irg);
429 n_classes = arch_env_get_n_reg_class(arch_env);
430 register_values = XMALLOCN(ir_node**, n_classes);
431 for (i = 0; i < n_classes; ++i) {
432 const arch_register_class_t *cls = arch_env_get_reg_class(arch_env, i);
433 unsigned n_regs = arch_register_class_n_regs(cls);
434 register_values[i] = XMALLOCN(ir_node*, n_regs);
437 irg_block_walk_graph(irg, process_block, NULL, NULL);
439 for (i = 0; i < n_classes; ++i) {
440 xfree(register_values[i]);
442 xfree(register_values);
445 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_peephole);
446 void be_init_peephole(void)
448 FIRM_DBG_REGISTER(dbg, "firm.be.peephole");