X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fbe%2Fbepeephole.c;h=cc26a63ea6f7a9ea017b3a414751440ff953b419;hb=3dff5ea08f916551668dc18b449327a8a593bc9f;hp=03e47fc7517183f9d7f835ad4bfc2e3ce1adf1d2;hpb=133cd801b822b85c204ec60a2f07b8f285cfcc93;p=libfirm diff --git a/ir/be/bepeephole.c b/ir/be/bepeephole.c index 03e47fc75..cc26a63ea 100644 --- a/ir/be/bepeephole.c +++ b/ir/be/bepeephole.c @@ -21,7 +21,6 @@ * @file * @brief Peephole optimisation framework keeps track of which registers contain which values * @author Matthias Braun - * @version $Id$ */ #include "config.h" @@ -95,7 +94,6 @@ static void clear_defs(ir_node *node) { /* clear values defined */ if (get_irn_mode(node) == mode_T) { - const ir_edge_t *edge; foreach_out_edge(node, edge) { ir_node *proj = get_edge_src_irn(edge); clear_reg_value(proj); @@ -186,16 +184,14 @@ void be_peephole_exchange(ir_node *old, ir_node *nw) */ static void process_block(ir_node *block, void *data) { - int l; (void) data; /* construct initial register assignment */ memset(register_values, 0, sizeof(ir_node*) * arch_env->n_registers); - assert(lv->nodes && "live sets must be computed"); + assert(lv->sets_valid && "live sets must be computed"); DB((dbg, LEVEL_1, "\nProcessing block %+F (from end)\n", block)); - be_lv_foreach(lv, block, be_lv_state_end, l) { - ir_node *node = be_lv_get_irn(lv, block, l); + be_lv_foreach(lv, block, be_lv_state_end, node) { set_reg_value(node); } DB((dbg, LEVEL_1, "\nstart processing\n")); @@ -230,9 +226,8 @@ static void process_block(ir_node *block, void *data) */ bool be_has_only_one_user(ir_node *node) { - int n = get_irn_n_edges(node); - int n_users; - const ir_edge_t *edge; + int n = get_irn_n_edges(node); + int n_users; if (n <= 1) return 1; @@ -249,43 +244,45 @@ bool be_has_only_one_user(ir_node *node) return n_users == 1; } -bool be_can_move_before(ir_heights_t *heights, const ir_node *node, - const ir_node *before) +static inline bool overlapping_regs(const arch_register_t *reg0, + const arch_register_req_t *req0, const arch_register_t *reg1, + const arch_register_req_t *req1) { + if (reg0 == NULL || reg1 == NULL) + return false; + return reg0->global_index < (unsigned)reg1->global_index + req1->width + && reg1->global_index < (unsigned)reg0->global_index + req0->width; +} + +bool be_can_move_down(ir_heights_t *heights, const ir_node *node, + const ir_node *before) +{ + assert(get_nodes_block(node) == get_nodes_block(before)); + assert(sched_get_time_step(node) < sched_get_time_step(before)); + int node_arity = get_irn_arity(node); ir_node *schedpoint = sched_next(node); while (schedpoint != before) { - int i; - unsigned n_outs = arch_get_irn_n_outs(schedpoint); - - /* the node must not use our computed values */ + /* schedpoint must not use our computed values */ if (heights_reachable_in_block(heights, schedpoint, node)) return false; - /* the node must not overwrite registers of our inputs */ - for (i = 0; i < node_arity; ++i) { + /* schedpoint must not overwrite registers of our inputs */ + unsigned n_outs = arch_get_irn_n_outs(schedpoint); + for (int i = 0; i < node_arity; ++i) { ir_node *in = get_irn_n(node, i); const arch_register_t *reg = arch_get_irn_register(in); - const arch_register_req_t *in_req - = arch_get_irn_register_req_in(node, i); - unsigned o; if (reg == NULL) continue; - for (o = 0; o < n_outs; ++o) { + const arch_register_req_t *in_req + = arch_get_irn_register_req_in(node, i); + for (unsigned o = 0; o < n_outs; ++o) { const arch_register_t *outreg = arch_get_irn_register_out(schedpoint, o); const arch_register_req_t *outreq = arch_get_irn_register_req_out(schedpoint, o); - if (outreg == NULL) - continue; - if (outreg->global_index >= reg->global_index - && outreg->global_index - < (unsigned)reg->global_index + in_req->width) - return false; - if (reg->global_index >= outreg->global_index - && reg->global_index - < (unsigned)outreg->global_index + outreq->width) + if (overlapping_regs(reg, in_req, outreg, outreq)) return false; } } @@ -295,6 +292,103 @@ bool be_can_move_before(ir_heights_t *heights, const ir_node *node, return true; } +bool be_can_move_up(ir_heights_t *heights, const ir_node *node, + const ir_node *after) +{ + unsigned n_outs = arch_get_irn_n_outs(node); + const ir_node *node_block = get_nodes_block(node); + const ir_node *after_block = get_block_const(after); + const ir_node *schedpoint; + if (node_block != after_block) { + /* currently we can move up exactly 1 block */ + assert(get_Block_cfgpred_block(node_block, 0) == after_block); + ir_node *first = sched_first(node_block); + + /* do not move nodes changing memory */ + if (is_memop(node)) { + ir_node *meminput = get_memop_mem(node); + if (!is_NoMem(meminput)) + return false; + } + + /* make sure we can move to the beginning of the succ block */ + if (node != first && !be_can_move_up(heights, node, sched_prev(first))) + return false; + + /* check if node overrides any of live-in values of other successors */ + ir_graph *irg = get_irn_irg(node); + be_lv_t *lv = be_get_irg_liveness(irg); + foreach_block_succ(after_block, edge) { + ir_node *succ = get_edge_src_irn(edge); + if (succ == node_block) + continue; + + be_lv_foreach(lv, succ, be_lv_state_in, live_node) { + const arch_register_t *reg = arch_get_irn_register(live_node); + const arch_register_req_t *req = arch_get_irn_register_req(live_node); + for (unsigned o = 0; o < n_outs; ++o) { + const arch_register_t *outreg + = arch_get_irn_register_out(node, o); + const arch_register_req_t *outreq + = arch_get_irn_register_req_out(node, o); + if (overlapping_regs(outreg, outreq, reg, req)) + return false; + } + } + sched_foreach(succ, phi) { + if (!is_Phi(phi)) + break; + const arch_register_t *reg = arch_get_irn_register(phi); + const arch_register_req_t *req = arch_get_irn_register_req(phi); + for (unsigned o = 0; o < n_outs; ++o) { + const arch_register_t *outreg + = arch_get_irn_register_out(node, o); + const arch_register_req_t *outreq + = arch_get_irn_register_req_out(node, o); + if (overlapping_regs(outreg, outreq, reg, req)) + return false; + } + } + } + schedpoint = sched_last(after_block); + } else { + schedpoint = sched_prev(node); + } + + /* move schedule upwards until we hit the "after" node */ + while (schedpoint != after) { + /* TODO: the following heights query only works for nodes in the same + * block, otherwise we have to be conservative here */ + if (get_nodes_block(node) != get_nodes_block(schedpoint)) + return false; + /* node must not depend on schedpoint */ + if (heights_reachable_in_block(heights, node, schedpoint)) + return false; + + /* node must not overwrite registers used by schedpoint */ + int arity = get_irn_arity(schedpoint); + for (int i = 0; i < arity; ++i) { + const arch_register_t *reg + = arch_get_irn_register_in(schedpoint, i); + if (reg == NULL) + continue; + const arch_register_req_t *in_req + = arch_get_irn_register_req_in(schedpoint, i); + for (unsigned o = 0; o < n_outs; ++o) { + const arch_register_t *outreg + = arch_get_irn_register_out(node, o); + const arch_register_req_t *outreq + = arch_get_irn_register_req_out(node, o); + if (overlapping_regs(outreg, outreq, reg, in_req)) + return false; + } + } + + schedpoint = sched_prev(schedpoint); + } + return true; +} + /* * Tries to optimize a beIncSP node with its previous IncSP node. * Must be run from a be_peephole_opt() context. @@ -325,10 +419,12 @@ ir_node *be_peephole_IncSP_IncSP(ir_node *node) void be_peephole_opt(ir_graph *irg) { +#if 0 /* we sometimes find BadE nodes in float apps like optest_float.c or * kahansum.c for example... */ - be_liveness_invalidate(be_get_irg_liveness(irg)); - be_liveness_assure_sets(be_assure_liveness(irg)); + be_invalidate_live_sets(irg); +#endif + be_assure_live_sets(irg); arch_env = be_get_irg_arch_env(irg); lv = be_get_irg_liveness(irg);