X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fbe%2Fbelower.c;h=ac713af02f30556ec7107bab9e3eb50a8b2c91b5;hb=4d7a9507baf1737297cd4f7fc91eab209fd5d398;hp=535d8b2d4d87f79be5c6510a876817591cc95d54;hpb=9980b12037694161f3377605ba4a376810ffe2fe;p=libfirm diff --git a/ir/be/belower.c b/ir/be/belower.c index 535d8b2d4..ac713af02 100644 --- a/ir/be/belower.c +++ b/ir/be/belower.c @@ -26,6 +26,12 @@ #include "iredges_t.h" #include "irgwalk.h" +#ifdef _WIN32 +#include +#else +#include +#endif + #undef is_Perm #define is_Perm(arch_env, irn) (arch_irn_classify(arch_env, irn) == arch_irn_class_perm) @@ -164,17 +170,17 @@ static perm_cycle_t *get_perm_cycle(perm_cycle_t *cycle, reg_pair_t *pairs, int int cur_idx = pairs[start].out_reg->index; int cur_pair_idx = start; int n_pairs_done = get_n_checked_pairs(pairs, n); - int idx, done = 0; + int idx; perm_type_t cycle_tp = PERM_CYCLE; /* We could be right in the middle of a chain, so we need to find the start */ - while (head != cur_idx && !done) { + while (head != cur_idx) { /* goto previous register in cycle or chain */ cur_pair_idx = get_pairidx_for_regidx(pairs, n, head, 1); if (cur_pair_idx < 0) { cycle_tp = PERM_CHAIN; - done = 1; + break; } else { head = pairs[cur_pair_idx].in_reg->index; @@ -183,16 +189,16 @@ static perm_cycle_t *get_perm_cycle(perm_cycle_t *cycle, reg_pair_t *pairs, int } /* assume worst case: all remaining pairs build a cycle or chain */ - cycle->elems = calloc((n - n_pairs_done) * 2, sizeof(cycle->elems[0])); + cycle->elems = xcalloc((n - n_pairs_done) * 2, sizeof(cycle->elems[0])); cycle->n_elems = 2; /* initial number of elements is 2 */ cycle->elems[0] = pairs[start].in_reg; cycle->elems[1] = pairs[start].out_reg; cycle->type = cycle_tp; - n_pairs_done++; + cur_idx = pairs[start].out_reg->index; idx = 2; /* check for cycle or end of a chain */ - while (cur_idx != head && n_pairs_done < n) { + while (cur_idx != head) { /* goto next register in cycle or chain */ cur_pair_idx = get_pairidx_for_regidx(pairs, n, cur_idx, 0); @@ -205,8 +211,6 @@ static perm_cycle_t *get_perm_cycle(perm_cycle_t *cycle, reg_pair_t *pairs, int if (cur_idx != head) { cycle->elems[idx++] = pairs[cur_pair_idx].out_reg; cycle->n_elems++; - - n_pairs_done++; } else { /* we are there where we started -> CYCLE */ @@ -253,7 +257,7 @@ static void lower_perm_node(ir_node *irn, void *walk_env) { ir_node *arg1, *arg2, *res1, *res2; ir_node *cpyxchg = NULL; - arch_env = env->chord_env->main_env->arch_env; + arch_env = env->chord_env->birg->main_env->arch_env; do_copy = env->do_copy; mod = env->dbg_module; block = get_nodes_block(irn); @@ -272,7 +276,7 @@ static void lower_perm_node(ir_node *irn, void *walk_env) { assert(n == get_irn_n_edges(irn) && "perm's in and out numbers different"); reg_class = arch_get_irn_register(arch_env, get_irn_n(irn, 0))->reg_class; - pairs = calloc(n, sizeof(pairs[0])); + pairs = alloca(n * sizeof(pairs[0])); /* build the list of register pairs (in, out) */ i = 0; @@ -326,7 +330,7 @@ static void lower_perm_node(ir_node *irn, void *walk_env) { /* go to the first not-checked pair */ while (pairs[i].checked) i++; - cycle = calloc(1, sizeof(*cycle)); + cycle = xcalloc(1, sizeof(*cycle)); cycle = get_perm_cycle(cycle, pairs, n, i); DB((mod, LEVEL_1, "%+F: following %s created:\n ", irn, cycle->type == PERM_CHAIN ? "chain" : "cycle")); @@ -346,8 +350,8 @@ static void lower_perm_node(ir_node *irn, void *walk_env) { //TODO: - iff PERM_CYCLE && do_copy -> determine free temp reg and insert copy to/from it before/after // the copy cascade (this reduces the cycle into a chain) - /* build copy/swap nodes */ - for (i = 0; i < cycle->n_elems - 1; i++) { + /* build copy/swap nodes from back to front */ + for (i = cycle->n_elems - 2; i >= 0; i--) { arg1 = get_node_for_register(pairs, n, cycle->elems[i], 0); arg2 = get_node_for_register(pairs, n, cycle->elems[i + 1], 0); @@ -366,6 +370,33 @@ static void lower_perm_node(ir_node *irn, void *walk_env) { in[0] = arg1; in[1] = arg2; + /* At this point we have to handle the following problem: */ + /* */ + /* If we have a cycle with more than two elements, then */ + /* this could correspond to the following Perm node: */ + /* */ + /* +----+ +----+ +----+ */ + /* | r1 | | r2 | | r3 | */ + /* +-+--+ +-+--+ +--+-+ */ + /* | | | */ + /* | | | */ + /* +-+--------+---------+-+ */ + /* | Perm | */ + /* +-+--------+---------+-+ */ + /* | | | */ + /* | | | */ + /* +-+--+ +-+--+ +--+-+ */ + /* |Proj| |Proj| |Proj| */ + /* | r2 | | r3 | | r1 | */ + /* +----+ +----+ +----+ */ + /* */ + /* This node is about to be split up into two 2x Perm's */ + /* for which we need 4 Proj's and the one additional Proj */ + /* of the first Perm has to be one IN of the second. So in */ + /* general we need to create one additional Proj for each */ + /* "middle" Perm and set this to one in node of the successor */ + /* Perm. */ + DBG((mod, LEVEL_1, "%+F creating exchange node (%+F, %s) and (%+F, %s) with\n", irn, arg1, cycle->elems[i]->name, arg2, cycle->elems[i + 1]->name)); DBG((mod, LEVEL_1, "%+F (%+F, %s) and (%+F, %s)\n", @@ -373,8 +404,21 @@ static void lower_perm_node(ir_node *irn, void *walk_env) { cpyxchg = be_new_Perm(reg_class, env->chord_env->irg, block, 2, in); + if (i > 0) { + /* cycle is not done yet */ + int pidx = get_pairidx_for_regidx(pairs, n, cycle->elems[i]->index, 0); + + /* create intermediate proj */ + res2 = new_r_Proj(get_irn_irg(irn), block, cpyxchg, get_irn_mode(res1), 0); + + /* set as in for next Perm */ + pairs[pidx].in_node = res2; + } + else { + sched_remove(res2); + } + sched_remove(res1); - sched_remove(res2); set_Proj_pred(res2, cpyxchg); set_Proj_proj(res2, 0); @@ -403,11 +447,13 @@ static void lower_perm_node(ir_node *irn, void *walk_env) { /* insert the copy/exchange node in schedule after the magic schedule node (see above) */ sched_add_after(sched_point, cpyxchg); + /* set the new scheduling point */ + sched_point = cpyxchg; DBG((mod, LEVEL_1, "replacing %+F with %+F, placed new node after %+F\n", irn, cpyxchg, sched_point)); } - free(cycle->elems); + free((void *) cycle->elems); free(cycle); } @@ -418,150 +464,61 @@ static void lower_perm_node(ir_node *irn, void *walk_env) { /** - * Adds Projs to keep nodes for each register class, which eats the - * caller saved registers. - * Note: The caller has to make sure, that call is a Call - * - * @param call The Call node - * @param walk_env The walker environment + * Checks if node has a should_be_different constraint in output + * and adds a Keep then to assure the constraint. */ -static void lower_call_node(ir_node *call, const void *walk_env) { - const arch_env_t *arch_env = walk_env; - const arch_register_class_t *reg_class; - int i, j, set_size = 0, pn, keep_arity; - arch_isa_t *isa = arch_env_get_isa(arch_env); - const ir_node *proj_T = NULL; - ir_node **in_keep, *block = get_nodes_block(call); - bitset_t *proj_set; - const ir_edge_t *edge; - const arch_register_t *reg; - - /* Prepare the bitset where we store the projnums which are already in use*/ - for (i = 0; i < arch_isa_get_n_reg_class(isa); i++) { - reg_class = arch_isa_get_reg_class(isa, i); - set_size += arch_register_class_n_regs(reg_class); - } +static void assure_different_constraint(ir_node *irn, be_irg_t *birg) { + const arch_env_t *arch_env = birg->main_env->arch_env; + const arch_register_req_t *req; + arch_register_req_t req_temp; + ir_node *in[2], *keep; + firm_dbg_module_t *mod = firm_dbg_register("firm.be.lower"); - in_keep = malloc(set_size * sizeof(ir_node *)); + req = arch_get_register_req(arch_env, &req_temp, irn, -1); - proj_set = bitset_malloc(set_size); - bitset_clear_all(proj_set); + if (req && arch_register_req_is(req, should_be_different)) { + /* We found a should_be_different constraint. */ + assert(req->other_different && "missing irn for constraint"); - /* check if there is a ProjT node and which arguments are used */ - foreach_out_edge(call, edge) { - if (get_irn_mode(get_edge_src_irn(edge)) == mode_T) - proj_T = get_edge_src_irn(edge); - } + in[0] = irn; + in[1] = req->other_different; - /* set all used arguments */ - if (proj_T) { - foreach_out_edge(proj_T, edge) { - ir_node *proj = get_edge_src_irn(edge); - - assert(is_Proj(proj)); - bitset_set(proj_set, get_Proj_proj(proj)); - } - } - else { - proj_T = new_r_Proj(current_ir_graph, block, call, mode_T, pn_Call_T_result); - } - - /* Create for each caller save register a proj (keep node argument) */ - /* if this proj is not already present */ - for (i = 0; i < arch_isa_get_n_reg_class(isa); i++) { - - /* reset the keep input, as we need one keep for each register class */ - memset(in_keep, 0, set_size * sizeof(ir_node *)); - keep_arity = 0; - reg_class = arch_isa_get_reg_class(isa, i); - - for (j = 0; j < arch_register_class_n_regs(reg_class); j++) { - reg = arch_register_for_index(reg_class, j); - - /* only check caller save registers */ - if (arch_register_type_is(reg, caller_saved)) { - pn = isa->impl->get_projnum_for_register(isa, reg); - if (!bitset_is_set(proj_set, pn)) { - ir_node *proj = new_r_Proj(current_ir_graph, block, (ir_node *)proj_T, mode_Is, pn); - - in_keep[keep_arity++] = proj; - } - } - } - - /* ok, we found some caller save register which are not in use but must be saved */ - if (keep_arity) { - be_new_Keep(reg_class, current_ir_graph, block, keep_arity, in_keep); - } + keep = be_new_Keep(req->cls, birg->irg, get_nodes_block(irn), 2, in); + DBG((mod, LEVEL_1, "created %+F for %+F to assure should_be_different\n", keep, irn)); } - - bitset_free(proj_set); - return; } /** - * Calls the backend code generator functions to lower Spill and - * Reload nodes into Store and Load. The backend is fully responsible - * for creating the new nodes and setting their input correct. - * Note: The caller of this has to make sure that irn is a Spill - * or Reload! + * Calls the functions to assure register constraints. * - * @param irn The Spill/Reload node + * @param irn The node to be checked for lowering * @param walk_env The walker environment */ -static void lower_spill_reload(ir_node *irn, void *walk_env) { - lower_env_t *env = walk_env; - arch_code_generator_t *cg = env->chord_env->main_env->cg; - const arch_env_t *aenv = env->chord_env->main_env->arch_env; - ir_node *res = NULL; - ir_node *sched_point; - - if (be_is_Spill(irn) && cg->impl->lower_spill) { - res = cg->impl->lower_spill(cg, irn); - } - else if (be_is_Reload(irn) && cg->impl->lower_reload) { - res = cg->impl->lower_reload(cg, irn); - if (res && res != irn) { - /* copy the result register from the reload to the load */ - arch_set_irn_register(aenv, res, arch_get_irn_register(aenv, irn)); - } - } +static void assure_constraints_walker(ir_node *irn, void *walk_env) { + if (is_Block(irn)) + return; - if (res && res != irn) { - sched_point = sched_prev(irn); - sched_remove(irn); - exchange(irn, res); - sched_add_after(sched_point, res); - } - else { - DBG((env->dbg_module, LEVEL_1, "node %+F not lowered\n", irn)); - } + if (mode_is_datab(get_irn_mode(irn))) + assure_different_constraint(irn, walk_env); return; } + /** - * Calls the corresponding lowering function for the node. + * Walks over all nodes to assure register constraints. * - * @param irn The node to be checked for lowering - * @param walk_env The walker environment + * @param birg The birg structure containing the irg */ -static void lower_nodes_before_sched_walker(ir_node *irn, const void *walk_env) { - const arch_env_t *arch_env = walk_env; - - if (!is_Block(irn) && !is_Proj(irn)) { - if (is_Call(arch_env, irn)) { - lower_call_node(irn, walk_env); - } - } - - return; +void assure_constraints(be_irg_t *birg) { + irg_walk_blkwise_graph(birg->irg, NULL, assure_constraints_walker, birg); } + /** * Calls the corresponding lowering function for the node. * @@ -570,15 +527,12 @@ static void lower_nodes_before_sched_walker(ir_node *irn, const void *walk_env) */ static void lower_nodes_after_ra_walker(ir_node *irn, void *walk_env) { lower_env_t *env = walk_env; - const arch_env_t *arch_env = env->chord_env->main_env->arch_env; + const arch_env_t *arch_env = env->chord_env->birg->main_env->arch_env; if (!is_Block(irn) && !is_Proj(irn)) { if (is_Perm(arch_env, irn)) { lower_perm_node(irn, walk_env); } - else if (be_is_Spill(irn) || be_is_Reload(irn)) { - lower_spill_reload(irn, walk_env); - } } return; @@ -586,22 +540,9 @@ static void lower_nodes_after_ra_walker(ir_node *irn, void *walk_env) { -/** - * Walks over all blocks in an irg and performs lowering need - * to be done before scheduling (e.g. call lowering). - * - * @param chord_env The chordal environment containing the irg - * @param do_copy 1 == resolve cycles with a free reg if available - */ -void lower_nodes_before_sched(ir_graph *irg, const void *env) { - irg_walk_blkwise_graph(irg, NULL, lower_nodes_before_sched_walker, env); -} - - - /** * Walks over all blocks in an irg and performs lowering need to be - * done after register allocation (e.g. perm and spill/reload lowering). + * done after register allocation (e.g. perm lowering). * * @param chord_env The chordal environment containing the irg * @param do_copy 1 == resolve cycles with a free reg if available @@ -611,7 +552,7 @@ void lower_nodes_after_ra(be_chordal_env_t *chord_env, int do_copy) { env.chord_env = chord_env; env.do_copy = do_copy; - env.dbg_module = firm_dbg_register("ir.be.lower"); + env.dbg_module = firm_dbg_register("firm.be.lower"); irg_walk_blkwise_graph(chord_env->irg, NULL, lower_nodes_after_ra_walker, &env); }