X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fbe%2Fbepeephole.c;h=c2d4d1615e8242c47976e8bfe1462b8d6b73786a;hb=51d5090095b2f4f2285ff1b7ccd13c72a7b5d9d3;hp=8ca19a039ec456f5dbe0756ef11ffa9dff77c3d9;hpb=fc7a995f8446cfcfbb6f41cb53ed5657947084ff;p=libfirm diff --git a/ir/be/bepeephole.c b/ir/be/bepeephole.c index 8ca19a039..c2d4d1615 100644 --- a/ir/be/bepeephole.c +++ b/ir/be/bepeephole.c @@ -23,23 +23,22 @@ * @author Matthias Braun * @version $Id$ */ -#ifdef HAVE_CONFIG_H #include "config.h" -#endif #include "bepeephole.h" #include "iredges_t.h" #include "irgwalk.h" #include "irprintf.h" +#include "ircons.h" #include "irgmod.h" #include "error.h" -#include "beirg_t.h" +#include "beirg.h" #include "belive_t.h" -#include "bearch_t.h" -#include "benode_t.h" -#include "besched_t.h" +#include "bearch.h" +#include "benode.h" +#include "besched.h" #include "bemodule.h" DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;) @@ -47,7 +46,6 @@ DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;) static const arch_env_t *arch_env; static be_lv_t *lv; static ir_node *current_node; -static ir_node *prev_node; ir_node ***register_values; static void clear_reg_value(ir_node *node) @@ -60,9 +58,9 @@ static void clear_reg_value(ir_node *node) if(!mode_is_data(get_irn_mode(node))) return; - reg = arch_get_irn_register(arch_env, node); + reg = arch_get_irn_register(node); if(reg == NULL) { - panic("No register assigned at %+F\n", node); + panic("No register assigned at %+F", node); } if(arch_register_type_is(reg, virtual)) return; @@ -85,9 +83,9 @@ static void set_reg_value(ir_node *node) if(!mode_is_data(get_irn_mode(node))) return; - reg = arch_get_irn_register(arch_env, node); + reg = arch_get_irn_register(node); if(reg == NULL) { - panic("No register assigned at %+F\n", node); + panic("No register assigned at %+F", node); } if(arch_register_type_is(reg, virtual)) return; @@ -125,47 +123,20 @@ static void set_uses(ir_node *node) } } -void be_peephole_before_exchange(const ir_node *old_node, ir_node *new_node) +void be_peephole_new_node(ir_node * nw) { - const arch_register_t *reg; - const arch_register_class_t *cls; - unsigned reg_idx; - unsigned cls_idx; - - DBG((dbg, LEVEL_1, "About to exchange %+F with %+F\n", old_node, new_node)); - - if (old_node == current_node) { - if (is_Proj(new_node)) { - current_node = get_Proj_pred(new_node); - } else { - current_node = new_node; - } - } - - if (!mode_is_data(get_irn_mode(old_node))) - return; - - reg = arch_get_irn_register(arch_env, old_node); - if (reg == NULL) { - panic("No register assigned at %+F\n", old_node); - } - cls = arch_register_get_class(reg); - reg_idx = arch_register_get_index(reg); - cls_idx = arch_register_class_index(cls); - - if (register_values[cls_idx][reg_idx] == old_node) { - register_values[cls_idx][reg_idx] = new_node; - } - - be_liveness_remove(lv, old_node); -} - -void be_peephole_after_exchange(ir_node *new_node) -{ - be_liveness_introduce(lv, new_node); + be_liveness_introduce(lv, nw); } -void be_peephole_before_exchange_and_kill(const ir_node *old_node, ir_node *new_node) +/** + * must be called from peephole optimisations before a node will be killed + * and its users will be redirected to new_node. + * so bepeephole can update it's internal state. + * + * Note: killing a node and rewiring os only allowed if new_node produces + * the same registers as old_node. + */ +void be_peephole_before_exchange(const ir_node *old_node, ir_node *new_node) { const arch_register_t *reg; const arch_register_class_t *cls; @@ -174,20 +145,21 @@ void be_peephole_before_exchange_and_kill(const ir_node *old_node, ir_node *new_ DBG((dbg, LEVEL_1, "About to exchange and kill %+F with %+F\n", old_node, new_node)); - if (old_node == current_node) { - /* current_node will be killed. Its scheduling predecessor - must be processed next. */ - prev_node = sched_prev(current_node); + if (current_node == old_node) { + /* next node to be processed will be killed. Its scheduling predecessor + * must be processed next. */ + current_node = sched_next(current_node); + assert (!is_Bad(current_node)); } if (!mode_is_data(get_irn_mode(old_node))) return; - reg = arch_get_irn_register(arch_env, old_node); + reg = arch_get_irn_register(old_node); if (reg == NULL) { - panic("No register assigned at %+F\n", old_node); + panic("No register assigned at %+F", old_node); } - assert(reg == arch_get_irn_register(arch_env, new_node) && + assert(reg == arch_get_irn_register(new_node) && "KILLING a node and replacing by different register is not allowed"); cls = arch_register_get_class(reg); @@ -201,6 +173,14 @@ void be_peephole_before_exchange_and_kill(const ir_node *old_node, ir_node *new_ be_liveness_remove(lv, old_node); } +void be_peephole_exchange(ir_node *old, ir_node *nw) +{ + be_peephole_before_exchange(old, nw); + sched_remove(old); + exchange(old, nw); + be_peephole_new_node(nw); +} + /** * block-walker: run peephole optimization on the given block. */ @@ -230,29 +210,45 @@ static void process_block(ir_node *block, void *data) /* walk the block from last insn to the first */ current_node = sched_last(block); for( ; !sched_is_begin(current_node); - current_node = prev_node != NULL ? prev_node : sched_prev(current_node)) { + current_node = sched_prev(current_node)) { ir_op *op; - ir_node *last; peephole_opt_func peephole_node; - prev_node = NULL; + assert(!is_Bad(current_node)); if (is_Phi(current_node)) break; clear_defs(current_node); set_uses(current_node); - op = get_irn_op(current_node); + op = get_irn_op(current_node); peephole_node = (peephole_opt_func)op->ops.generic; if (peephole_node == NULL) continue; - last = current_node; peephole_node(current_node); - /* was the current node replaced? */ - if (current_node != last) - set_uses(current_node); + assert(!is_Bad(current_node)); + } +} + +static void kill_node_and_preds(ir_node *node) +{ + int arity, i; + + arity = get_irn_arity(node); + for (i = 0; i < arity; ++i) { + ir_node *pred = get_irn_n(node, i); + + set_irn_n(node, i, new_Bad()); + if (get_irn_n_edges(pred) != 0) + continue; + + kill_node_and_preds(pred); } + + if (!is_Proj(node)) + sched_remove(node); + kill_node(node); } /** @@ -262,21 +258,28 @@ static void skip_barrier(ir_node *ret_blk, ir_graph *irg) { ir_node *irn; sched_foreach_reverse(ret_blk, irn) { - if (be_is_Barrier(irn)) { - const ir_edge_t *edge, *next; - - foreach_out_edge_safe(irn, edge, next) { - ir_node *proj = get_edge_src_irn(edge); - int pn = (int)get_Proj_proj(proj); - ir_node *pred = get_irn_n(irn, pn); - - edges_reroute_kind(proj, pred, EDGE_KIND_NORMAL, irg); - edges_reroute_kind(proj, pred, EDGE_KIND_DEP, irg); - } - sched_remove(irn); - kill_node(irn); - break; + const ir_edge_t *edge, *next; + + if (!be_is_Barrier(irn)) + continue; + + foreach_out_edge_safe(irn, edge, next) { + ir_node *proj = get_edge_src_irn(edge); + int pn; + ir_node *pred; + + if (is_Anchor(proj)) + continue; + + pn = (int) get_Proj_proj(proj); + pred = get_irn_n(irn, pn); + + edges_reroute_kind(proj, pred, EDGE_KIND_NORMAL, irg); + edges_reroute_kind(proj, pred, EDGE_KIND_DEP, irg); } + + kill_node_and_preds(irn); + break; } } @@ -301,8 +304,31 @@ static void kill_barriers(ir_graph *irg) { skip_barrier(start_blk, irg); } +/** + * Check whether the node has only one user. Explicitly ignore the anchor. + */ +static int has_only_one_user(ir_node *node) +{ + int n = get_irn_n_edges(node); + const ir_edge_t *edge; + + if (n <= 1) + return 1; + + if (n > 2) + return 0; + + foreach_out_edge(node, edge) { + ir_node *src = get_edge_src_irn(edge); + if (is_Anchor(src)) + return 1; + } + + return 0; +} + /* - * Tries to optimize a beIncSp node with it's previous IncSP node. + * Tries to optimize a beIncSP node with its previous IncSP node. * Must be run from a be_peephole_opt() context. */ ir_node *be_peephole_IncSP_IncSP(ir_node *node) @@ -315,7 +341,7 @@ ir_node *be_peephole_IncSP_IncSP(ir_node *node) if (!be_is_IncSP(pred)) return node; - if (get_irn_n_edges(pred) > 1) + if (!has_only_one_user(pred)) return node; pred_offs = be_get_IncSP_offset(pred); @@ -341,14 +367,7 @@ ir_node *be_peephole_IncSP_IncSP(ir_node *node) /* add node offset to pred and remove our IncSP */ be_set_IncSP_offset(pred, offs); - be_peephole_before_exchange_and_kill(node, pred); - - /* rewire dependency/data edges */ - edges_reroute_kind(node, pred, EDGE_KIND_DEP, current_ir_graph); - edges_reroute(node, pred, current_ir_graph); - sched_remove(node); - be_kill_node(node); - + be_peephole_exchange(node, pred); return pred; } @@ -371,11 +390,11 @@ void be_peephole_opt(be_irg_t *birg) lv = be_get_birg_liveness(birg); n_classes = arch_env_get_n_reg_class(arch_env); - register_values = alloca(sizeof(register_values[0]) * n_classes); + register_values = ALLOCAN(ir_node**, n_classes); for(i = 0; i < n_classes; ++i) { const arch_register_class_t *cls = arch_env_get_reg_class(arch_env, i); unsigned n_regs = arch_register_class_n_regs(cls); - register_values[i] = alloca(sizeof(ir_node*) * n_regs); + register_values[i] = ALLOCAN(ir_node*, n_regs); } irg_block_walk_graph(irg, process_block, NULL, NULL); @@ -391,4 +410,4 @@ void be_init_peephole(void) FIRM_DBG_REGISTER(dbg, "firm.be.peephole"); } -BE_REGISTER_MODULE_CONSTRUCTOR(be_init_spillbelady); +BE_REGISTER_MODULE_CONSTRUCTOR(be_init_peephole);