2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Peephole optimisation framework keeps track of which registers contain which values
23 * @author Matthias Braun
30 #include "bepeephole.h"
32 #include "iredges_t.h"
42 #include "besched_t.h"
45 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
47 static const arch_env_t *arch_env;
49 static ir_node *current_node;
50 ir_node ***register_values;
52 static void clear_reg_value(ir_node *node)
54 const arch_register_t *reg;
55 const arch_register_class_t *cls;
59 if(!mode_is_data(get_irn_mode(node)))
62 reg = arch_get_irn_register(arch_env, node);
64 panic("No register assigned at %+F\n", node);
66 if(arch_register_type_is(reg, virtual))
68 cls = arch_register_get_class(reg);
69 reg_idx = arch_register_get_index(reg);
70 cls_idx = arch_register_class_index(cls);
72 //assert(register_values[cls_idx][reg_idx] != NULL);
73 DBG((dbg, LEVEL_1, "Clear Register %s\n", reg->name));
74 register_values[cls_idx][reg_idx] = NULL;
77 static void set_reg_value(ir_node *node)
79 const arch_register_t *reg;
80 const arch_register_class_t *cls;
84 if(!mode_is_data(get_irn_mode(node)))
87 reg = arch_get_irn_register(arch_env, node);
89 panic("No register assigned at %+F\n", node);
91 if(arch_register_type_is(reg, virtual))
93 cls = arch_register_get_class(reg);
94 reg_idx = arch_register_get_index(reg);
95 cls_idx = arch_register_class_index(cls);
97 DBG((dbg, LEVEL_1, "Set Register %s: %+F\n", reg->name, node));
98 register_values[cls_idx][reg_idx] = node;
101 static void clear_defs(ir_node *node)
103 /* clear values defined */
104 if(get_irn_mode(node) == mode_T) {
105 const ir_edge_t *edge;
106 foreach_out_edge(node, edge) {
107 ir_node *proj = get_edge_src_irn(edge);
108 clear_reg_value(proj);
111 clear_reg_value(node);
115 static void set_uses(ir_node *node)
119 /* set values used */
120 arity = get_irn_arity(node);
121 for(i = 0; i < arity; ++i) {
122 ir_node *in = get_irn_n(node, i);
127 void be_peephole_before_exchange(const ir_node *old_node, ir_node *new_node)
129 const arch_register_t *reg;
130 const arch_register_class_t *cls;
134 DBG((dbg, LEVEL_1, "About to exchange %+F with %+F\n", old_node, new_node));
136 if(old_node == current_node) {
137 if(is_Proj(new_node)) {
138 current_node = get_Proj_pred(new_node);
140 current_node = new_node;
144 if(!mode_is_data(get_irn_mode(old_node)))
147 reg = arch_get_irn_register(arch_env, old_node);
149 panic("No register assigned at %+F\n", old_node);
151 cls = arch_register_get_class(reg);
152 reg_idx = arch_register_get_index(reg);
153 cls_idx = arch_register_class_index(cls);
155 if(register_values[cls_idx][reg_idx] == old_node) {
156 register_values[cls_idx][reg_idx] = new_node;
159 be_liveness_remove(lv, old_node);
162 void be_peephole_after_exchange(ir_node *new_node)
164 be_liveness_introduce(lv, new_node);
167 static void process_block(ir_node *block, void *data)
174 /* construct initial register assignment */
175 n_classes = arch_env_get_n_reg_class(arch_env);
176 for(i = 0; i < n_classes; ++i) {
177 const arch_register_class_t *cls = arch_env_get_reg_class(arch_env, i);
178 unsigned n_regs = arch_register_class_n_regs(cls);
179 memset(register_values[i], 0, sizeof(ir_node*) * n_regs);
182 assert(lv->nodes && "live sets must be computed");
183 DBG((dbg, LEVEL_1, "\nProcessing block %+F (from end)\n", block));
184 be_lv_foreach(lv, block, be_lv_state_end, l) {
185 ir_node *node = be_lv_get_irn(lv, block, l);
188 DBG((dbg, LEVEL_1, "\nstart processing\n"));
190 /* walk the block from last insn to the first */
191 current_node = sched_last(block);
192 for( ; !sched_is_begin(current_node);
193 current_node = sched_prev(current_node)) {
196 peephole_opt_func func;
198 if(is_Phi(current_node))
201 clear_defs(current_node);
202 set_uses(current_node);
204 op = get_irn_op(current_node);
205 func = (peephole_opt_func) op->ops.generic;
211 /* was the current node replaced? */
212 if(current_node != last) {
213 set_uses(current_node);
219 * Walk through the block schedule and skip all barrier nodes.
221 static void skip_barrier(ir_node *ret_blk, ir_graph *irg) {
224 sched_foreach_reverse(ret_blk, irn) {
225 if (be_is_Barrier(irn)) {
226 const ir_edge_t *edge, *next;
228 foreach_out_edge_safe(irn, edge, next) {
229 ir_node *proj = get_edge_src_irn(edge);
230 int pn = (int)get_Proj_proj(proj);
231 ir_node *pred = get_irn_n(irn, pn);
233 edges_reroute_kind(proj, pred, EDGE_KIND_NORMAL, irg);
234 edges_reroute_kind(proj, pred, EDGE_KIND_DEP, irg);
244 * Kill the Barrier nodes for better peephole optimization.
246 static void kill_barriers(ir_graph *irg) {
247 ir_node *end_blk = get_irg_end_block(irg);
251 /* skip the barrier on all return blocks */
252 for (i = get_Block_n_cfgpreds(end_blk) - 1; i >= 0; --i) {
253 ir_node *be_ret = get_Block_cfgpred(end_blk, i);
254 ir_node *ret_blk = get_nodes_block(be_ret);
256 skip_barrier(ret_blk, irg);
259 /* skip the barrier on the start block */
260 start_blk = get_irg_start_block(irg);
261 skip_barrier(start_blk, irg);
265 * Tries to optimize a beIncSp node with it's previous IncSP node.
266 * Must be run from a be_peephole_opt() context.
268 void be_peephole_IncSP_IncSP(ir_node *node)
273 ir_node *pred = be_get_IncSP_pred(node);
276 if (!be_is_IncSP(pred))
279 if (get_irn_n_edges(pred) > 1)
282 pred_offs = be_get_IncSP_offset(pred);
283 curr_offs = be_get_IncSP_offset(node);
285 if (pred_offs == BE_STACK_FRAME_SIZE_EXPAND) {
286 if (curr_offs != BE_STACK_FRAME_SIZE_SHRINK) {
290 } else if (pred_offs == BE_STACK_FRAME_SIZE_SHRINK) {
291 if (curr_offs != BE_STACK_FRAME_SIZE_EXPAND) {
295 } else if (curr_offs == BE_STACK_FRAME_SIZE_EXPAND ||
296 curr_offs == BE_STACK_FRAME_SIZE_SHRINK) {
299 offs = curr_offs + pred_offs;
302 /* add pred offset to ours and remove pred IncSP */
303 be_set_IncSP_offset(node, offs);
305 predpred = be_get_IncSP_pred(pred);
306 be_peephole_before_exchange(pred, predpred);
308 /* rewire dependency edges */
309 edges_reroute_kind(pred, predpred, EDGE_KIND_DEP, current_ir_graph);
310 be_set_IncSP_pred(node, predpred);
314 be_peephole_after_exchange(predpred);
317 void be_peephole_opt(be_irg_t *birg)
319 ir_graph *irg = be_get_birg_irg(birg);
323 /* barrier nodes are used for register allocations. They hinders
324 * peephole optimizations, so remove them here. */
327 /* we sometimes find BadE nodes in float apps like optest_float.c or
328 * kahansum.c for example... */
329 be_liveness_invalidate(birg->lv);
330 be_liveness_assure_sets(be_assure_liveness(birg));
332 arch_env = be_get_birg_arch_env(birg);
333 lv = be_get_birg_liveness(birg);
335 n_classes = arch_env_get_n_reg_class(arch_env);
336 register_values = alloca(sizeof(register_values[0]) * n_classes);
337 for(i = 0; i < n_classes; ++i) {
338 const arch_register_class_t *cls = arch_env_get_reg_class(arch_env, i);
339 unsigned n_regs = arch_register_class_n_regs(cls);
340 register_values[i] = alloca(sizeof(ir_node*) * n_regs);
343 irg_block_walk_graph(irg, process_block, NULL, NULL);
346 void be_peephole_init(void)
348 clear_irp_opcodes_generic_func();
351 void be_init_peephole(void)
353 FIRM_DBG_REGISTER(dbg, "firm.be.peephole");
356 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_spillbelady);