X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fbe%2Fbepeephole.c;h=6ff06f5f6b90c9cb88ba89a21227a78148cfedc7;hb=12e124bcc9ed93a4037c5122d23e8a6c87f5ef46;hp=48e32bfd9af268a70a74d882f3c32b7e9ad23968;hpb=8539e8b01310e681fc80a7135eb30b09a76cbf47;p=libfirm diff --git a/ir/be/bepeephole.c b/ir/be/bepeephole.c index 48e32bfd9..6ff06f5f6 100644 --- a/ir/be/bepeephole.c +++ b/ir/be/bepeephole.c @@ -1,44 +1,32 @@ /* - * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved. - * * This file is part of libFirm. - * - * This file may be distributed and/or modified under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation and appearing in the file LICENSE.GPL included in the - * packaging of this file. - * - * Licensees holding valid libFirm Professional Edition licenses may use - * this file in accordance with the libFirm Commercial License. - * Agreement provided with the Software. - * - * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE - * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE. + * Copyright (C) 2012 University of Karlsruhe. */ /** * @file * @brief Peephole optimisation framework keeps track of which registers contain which values * @author Matthias Braun - * @version $Id$ */ -#ifdef HAVE_CONFIG_H #include "config.h" -#endif +#include "array_t.h" #include "bepeephole.h" #include "iredges_t.h" #include "irgwalk.h" #include "irprintf.h" +#include "ircons.h" +#include "irgmod.h" +#include "heights.h" #include "error.h" -#include "beirg_t.h" +#include "beirg.h" #include "belive_t.h" -#include "bearch_t.h" -#include "benode_t.h" -#include "besched_t.h" +#include "bearch.h" +#include "beintlive_t.h" +#include "benode.h" +#include "besched.h" #include "bemodule.h" DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;) @@ -46,69 +34,54 @@ DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;) static const arch_env_t *arch_env; static be_lv_t *lv; static ir_node *current_node; -ir_node ***register_values; +ir_node **register_values; static void clear_reg_value(ir_node *node) { - const arch_register_t *reg; - const arch_register_class_t *cls; - unsigned reg_idx; - unsigned cls_idx; + const arch_register_t *reg; + unsigned reg_idx; - if(!mode_is_data(get_irn_mode(node))) + if (!mode_is_data(get_irn_mode(node))) return; - reg = arch_get_irn_register(arch_env, node); - if(reg == NULL) { - panic("No register assigned at %+F\n", node); + reg = arch_get_irn_register(node); + if (reg == NULL) { + panic("No register assigned at %+F", node); } - if(arch_register_type_is(reg, virtual)) + if (reg->type & arch_register_type_virtual) return; - cls = arch_register_get_class(reg); - reg_idx = arch_register_get_index(reg); - cls_idx = arch_register_class_index(cls); + reg_idx = reg->global_index; - //assert(register_values[cls_idx][reg_idx] != NULL); - DBG((dbg, LEVEL_1, "Clear Register %s\n", reg->name)); - register_values[cls_idx][reg_idx] = NULL; + DB((dbg, LEVEL_1, "Clear Register %s\n", reg->name)); + register_values[reg_idx] = NULL; } static void set_reg_value(ir_node *node) { - const arch_register_t *reg; - const arch_register_class_t *cls; - unsigned reg_idx; - unsigned cls_idx; + const arch_register_t *reg; + unsigned reg_idx; - if(!mode_is_data(get_irn_mode(node))) + if (!mode_is_data(get_irn_mode(node))) return; - reg = arch_get_irn_register(arch_env, node); - if(reg == NULL) { - panic("No register assigned at %+F\n", node); + reg = arch_get_irn_register(node); + if (reg == NULL) { + panic("No register assigned at %+F", node); } - if(arch_register_type_is(reg, virtual)) + if (reg->type & arch_register_type_virtual) return; - cls = arch_register_get_class(reg); - reg_idx = arch_register_get_index(reg); - cls_idx = arch_register_class_index(cls); + reg_idx = reg->global_index; - DBG((dbg, LEVEL_1, "Set Register %s: %+F\n", reg->name, node)); - register_values[cls_idx][reg_idx] = node; + DB((dbg, LEVEL_1, "Set Register %s: %+F\n", reg->name, node)); + register_values[reg_idx] = node; } static void clear_defs(ir_node *node) { /* clear values defined */ - if(get_irn_mode(node) == mode_T) { - const ir_edge_t *edge; - foreach_out_edge(node, edge) { - ir_node *proj = get_edge_src_irn(edge); - clear_reg_value(proj); - } - } else { - clear_reg_value(node); - } + be_foreach_value(node, value, + clear_reg_value(value); + ); } static void set_uses(ir_node *node) @@ -117,206 +90,323 @@ static void set_uses(ir_node *node) /* set values used */ arity = get_irn_arity(node); - for(i = 0; i < arity; ++i) { + for (i = 0; i < arity; ++i) { ir_node *in = get_irn_n(node, i); set_reg_value(in); } } -void be_peephole_before_exchange(const ir_node *old_node, ir_node *new_node) +/** + * must be called from peephole optimisations before a node will be killed + * and its users will be redirected to new_node. + * so bepeephole can update its internal state. + * + * Note: killing a node and rewiring is only allowed if new_node produces + * the same registers as old_node. + */ +static void be_peephole_before_exchange(const ir_node *old_node, + ir_node *new_node) { - const arch_register_t *reg; - const arch_register_class_t *cls; - unsigned reg_idx; - unsigned cls_idx; - - DBG((dbg, LEVEL_1, "About to exchange %+F with %+F\n", old_node, new_node)); - - if(old_node == current_node) { - if(is_Proj(new_node)) { - current_node = get_Proj_pred(new_node); - } else { - current_node = new_node; - } + const arch_register_t *reg; + unsigned reg_idx; + bool old_is_current = false; + + DB((dbg, LEVEL_1, "About to exchange and kill %+F with %+F\n", old_node, new_node)); + + assert(sched_is_scheduled(skip_Proj_const(old_node))); + assert(sched_is_scheduled(skip_Proj(new_node))); + + if (current_node == old_node) { + old_is_current = true; + + /* next node to be processed will be killed. Its scheduling predecessor + * must be processed next. */ + current_node = sched_next(current_node); + assert (!is_Bad(current_node)); + + /* we can't handle liveness updates correctly when exchange current node + * with something behind it */ + assert(value_dominates(skip_Proj(new_node), skip_Proj_const(old_node))); } - if(!mode_is_data(get_irn_mode(old_node))) + if (!mode_is_data(get_irn_mode(old_node))) return; - reg = arch_get_irn_register(arch_env, old_node); - if(reg == NULL) { - panic("No register assigned at %+F\n", old_node); + reg = arch_get_irn_register(old_node); + if (reg == NULL) { + panic("No register assigned at %+F", old_node); } - cls = arch_register_get_class(reg); - reg_idx = arch_register_get_index(reg); - cls_idx = arch_register_class_index(cls); + assert(reg == arch_get_irn_register(new_node) && + "KILLING a node and replacing by different register is not allowed"); - if(register_values[cls_idx][reg_idx] == old_node) { - register_values[cls_idx][reg_idx] = new_node; + reg_idx = reg->global_index; + if (register_values[reg_idx] == old_node || old_is_current) { + register_values[reg_idx] = new_node; } be_liveness_remove(lv, old_node); } -void be_peephole_after_exchange(ir_node *new_node) +void be_peephole_exchange(ir_node *old, ir_node *nw) { - be_liveness_introduce(lv, new_node); + be_peephole_before_exchange(old, nw); + sched_remove(old); + exchange(old, nw); + be_liveness_introduce(lv, nw); } +/** + * block-walker: run peephole optimization on the given block. + */ static void process_block(ir_node *block, void *data) { - unsigned n_classes; - unsigned i; - int l; (void) data; /* construct initial register assignment */ - n_classes = arch_env_get_n_reg_class(arch_env); - for(i = 0; i < n_classes; ++i) { - const arch_register_class_t *cls = arch_env_get_reg_class(arch_env, i); - unsigned n_regs = arch_register_class_n_regs(cls); - memset(register_values[i], 0, sizeof(ir_node*) * n_regs); - } + memset(register_values, 0, sizeof(ir_node*) * arch_env->n_registers); - assert(lv->nodes && "live sets must be computed"); - DBG((dbg, LEVEL_1, "\nProcessing block %+F (from end)\n", block)); - be_lv_foreach(lv, block, be_lv_state_end, l) { - ir_node *node = be_lv_get_irn(lv, block, l); + assert(lv->sets_valid && "live sets must be computed"); + DB((dbg, LEVEL_1, "\nProcessing block %+F (from end)\n", block)); + be_lv_foreach(lv, block, be_lv_state_end, node) { set_reg_value(node); } - DBG((dbg, LEVEL_1, "\nstart processing\n")); + DB((dbg, LEVEL_1, "\nstart processing\n")); /* walk the block from last insn to the first */ current_node = sched_last(block); - for( ; !sched_is_begin(current_node); + for ( ; !sched_is_begin(current_node); current_node = sched_prev(current_node)) { ir_op *op; - ir_node *last; - peephole_opt_func func; + peephole_opt_func peephole_node; - if(is_Phi(current_node)) + assert(!is_Bad(current_node)); + if (is_Phi(current_node)) break; clear_defs(current_node); set_uses(current_node); - op = get_irn_op(current_node); - func = (peephole_opt_func) op->ops.generic; - if(func == NULL) + op = get_irn_op(current_node); + peephole_node = (peephole_opt_func)op->ops.generic; + if (peephole_node == NULL) continue; - last = current_node; - func(current_node); - /* was the current node replaced? */ - if(current_node != last) { - set_uses(current_node); - } + DB((dbg, LEVEL_2, "optimize %+F\n", current_node)); + peephole_node(current_node); + assert(!is_Bad(current_node)); } } /** - * Walk through the block schedule and skip all barrier nodes. + * Check whether the node has only one user. Explicitly ignore the anchor. */ -static void skip_barrier(ir_node *ret_blk) { - ir_node *irn; +bool be_has_only_one_user(ir_node *node) +{ + int n = get_irn_n_edges(node); + int n_users; - sched_foreach_reverse(ret_blk, irn) { - int i; + if (n <= 1) + return 1; - for (i = get_irn_arity(irn) - 1; i >= 0; --i) { - ir_node *proj = get_irn_n(irn, i); + n_users = 0; + foreach_out_edge(node, edge) { + ir_node *src = get_edge_src_irn(edge); + /* ignore anchor and keep-alive edges */ + if (is_Anchor(src) || is_End(src)) + continue; + n_users++; + } - if (is_Proj(proj)) { - ir_node *barrier = get_Proj_pred(proj); + return n_users == 1; +} - if (be_is_Barrier(barrier)) { - int pn = (int)get_Proj_proj(proj); - ir_node *pred = get_irn_n(barrier, pn); +static inline bool overlapping_regs(const arch_register_t *reg0, + const arch_register_req_t *req0, const arch_register_t *reg1, + const arch_register_req_t *req1) +{ + if (reg0 == NULL || reg1 == NULL) + return false; + return reg0->global_index < (unsigned)reg1->global_index + req1->width + && reg1->global_index < (unsigned)reg0->global_index + req0->width; +} - set_irn_n(irn, i, pred); - if (sched_is_scheduled(barrier)) - sched_remove(barrier); - } +bool be_can_move_down(ir_heights_t *heights, const ir_node *node, + const ir_node *before) +{ + assert(get_nodes_block(node) == get_nodes_block(before)); + assert(sched_get_time_step(node) < sched_get_time_step(before)); + + int node_arity = get_irn_arity(node); + ir_node *schedpoint = sched_next(node); + + while (schedpoint != before) { + /* schedpoint must not use our computed values */ + if (heights_reachable_in_block(heights, schedpoint, node)) + return false; + + /* schedpoint must not overwrite registers of our inputs */ + for (int i = 0; i < node_arity; ++i) { + ir_node *in = get_irn_n(node, i); + const arch_register_t *reg = arch_get_irn_register(in); + if (reg == NULL) + continue; + const arch_register_req_t *in_req + = arch_get_irn_register_req_in(node, i); + be_foreach_out(schedpoint, o) { + const arch_register_t *outreg + = arch_get_irn_register_out(schedpoint, o); + const arch_register_req_t *outreq + = arch_get_irn_register_req_out(schedpoint, o); + if (overlapping_regs(reg, in_req, outreg, outreq)) + return false; } } - for (i = get_irn_deps(irn) - 1; i >= 0; --i) { - ir_node *proj = get_irn_dep(irn, i); - if (is_Proj(proj)) { - ir_node *barrier = get_Proj_pred(proj); + schedpoint = sched_next(schedpoint); + } + return true; +} - if (be_is_Barrier(barrier)) { - int pn = (int)get_Proj_proj(proj); - ir_node *pred = get_irn_n(barrier, pn); +bool be_can_move_up(ir_heights_t *heights, const ir_node *node, + const ir_node *after) +{ + const ir_node *node_block = get_nodes_block(node); + const ir_node *after_block = get_block_const(after); + const ir_node *schedpoint; + if (node_block != after_block) { + /* currently we can move up exactly 1 block */ + assert(get_Block_cfgpred_block(node_block, 0) == after_block); + ir_node *first = sched_first(node_block); + + /* do not move nodes changing memory */ + if (is_memop(node)) { + ir_node *meminput = get_memop_mem(node); + if (!is_NoMem(meminput)) + return false; + } - set_irn_dep(irn, i, pred); - if (sched_is_scheduled(barrier)) - sched_remove(barrier); + /* make sure we can move to the beginning of the succ block */ + if (node != first && !be_can_move_up(heights, node, sched_prev(first))) + return false; + + /* check if node overrides any of live-in values of other successors */ + ir_graph *irg = get_irn_irg(node); + be_lv_t *lv = be_get_irg_liveness(irg); + foreach_block_succ(after_block, edge) { + ir_node *succ = get_edge_src_irn(edge); + if (succ == node_block) + continue; + + be_lv_foreach(lv, succ, be_lv_state_in, live_node) { + const arch_register_t *reg = arch_get_irn_register(live_node); + const arch_register_req_t *req = arch_get_irn_register_req(live_node); + be_foreach_out(node, o) { + const arch_register_t *outreg + = arch_get_irn_register_out(node, o); + const arch_register_req_t *outreq + = arch_get_irn_register_req_out(node, o); + if (overlapping_regs(outreg, outreq, reg, req)) + return false; + } + } + sched_foreach(succ, phi) { + if (!is_Phi(phi)) + break; + const arch_register_t *reg = arch_get_irn_register(phi); + const arch_register_req_t *req = arch_get_irn_register_req(phi); + be_foreach_out(node, o) { + const arch_register_t *outreg + = arch_get_irn_register_out(node, o); + const arch_register_req_t *outreq + = arch_get_irn_register_req_out(node, o); + if (overlapping_regs(outreg, outreq, reg, req)) + return false; } } } + schedpoint = sched_last(after_block); + } else { + schedpoint = sched_prev(node); + } + + /* move schedule upwards until we hit the "after" node */ + while (schedpoint != after) { + /* TODO: the following heights query only works for nodes in the same + * block, otherwise we have to be conservative here */ + if (get_nodes_block(node) != get_nodes_block(schedpoint)) + return false; + /* node must not depend on schedpoint */ + if (heights_reachable_in_block(heights, node, schedpoint)) + return false; + + /* node must not overwrite registers used by schedpoint */ + int arity = get_irn_arity(schedpoint); + for (int i = 0; i < arity; ++i) { + const arch_register_t *reg + = arch_get_irn_register_in(schedpoint, i); + if (reg == NULL) + continue; + const arch_register_req_t *in_req + = arch_get_irn_register_req_in(schedpoint, i); + be_foreach_out(node, o) { + const arch_register_t *outreg + = arch_get_irn_register_out(node, o); + const arch_register_req_t *outreq + = arch_get_irn_register_req_out(node, o); + if (overlapping_regs(outreg, outreq, reg, in_req)) + return false; + } + } + + schedpoint = sched_prev(schedpoint); } + return true; } -/** - * Kill the Barrier nodes for better peephole optimization. +/* + * Tries to optimize a beIncSP node with its previous IncSP node. + * Must be run from a be_peephole_opt() context. */ -static void kill_barriers(ir_graph *irg) { - ir_node *end_blk = get_irg_end_block(irg); - ir_node *start_blk; - int i; +ir_node *be_peephole_IncSP_IncSP(ir_node *node) +{ + int pred_offs; + int curr_offs; + int offs; + ir_node *pred = be_get_IncSP_pred(node); - /* skip the barrier on all return blocks */ - for (i = get_Block_n_cfgpreds(end_blk) - 1; i >= 0; --i) { - ir_node *be_ret = get_Block_cfgpred(end_blk, i); - ir_node *ret_blk = get_nodes_block(be_ret); + if (!be_is_IncSP(pred)) + return node; - skip_barrier(ret_blk); - } + if (!be_has_only_one_user(pred)) + return node; - /* skip the barrier on the start block */ - start_blk = get_irg_start_block(irg); - skip_barrier(start_blk); -} + pred_offs = be_get_IncSP_offset(pred); + curr_offs = be_get_IncSP_offset(node); + offs = curr_offs + pred_offs; + + /* add node offset to pred and remove our IncSP */ + be_set_IncSP_offset(pred, offs); + be_peephole_exchange(node, pred); + return pred; +} -void be_peephole_opt(be_irg_t *birg) +void be_peephole_opt(ir_graph *irg) { - ir_graph *irg = be_get_birg_irg(birg); - unsigned n_classes; - unsigned i; - - /* barrier nodes are used for register allocations. They hinders - * peephole optimizations, so remove them here. */ - kill_barriers(irg); - - /* we sometimes find BadE nodes in float apps like optest_float.c or - * kahansum.c for example... */ - be_liveness_invalidate(birg->lv); - be_liveness_assure_sets(be_assure_liveness(birg)); - - arch_env = be_get_birg_arch_env(birg); - lv = be_get_birg_liveness(birg); - - n_classes = arch_env_get_n_reg_class(arch_env); - register_values = alloca(sizeof(register_values[0]) * n_classes); - for(i = 0; i < n_classes; ++i) { - const arch_register_class_t *cls = arch_env_get_reg_class(arch_env, i); - unsigned n_regs = arch_register_class_n_regs(cls); - register_values[i] = alloca(sizeof(ir_node*) * n_regs); - } + be_assure_live_sets(irg); + + arch_env = be_get_irg_arch_env(irg); + lv = be_get_irg_liveness(irg); + + register_values = XMALLOCN(ir_node*, arch_env->n_registers); irg_block_walk_graph(irg, process_block, NULL, NULL); -} -void be_peephole_init(void) -{ - clear_irp_opcodes_generic_func(); + xfree(register_values); } +BE_REGISTER_MODULE_CONSTRUCTOR(be_init_peephole) void be_init_peephole(void) { FIRM_DBG_REGISTER(dbg, "firm.be.peephole"); } - -BE_REGISTER_MODULE_CONSTRUCTOR(be_init_spillbelady);