X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fir%2Firarch.c;h=1a5e1def594ca41765e31cdcc9abc5676d8da847;hb=e44426021b5f23c05bcae04ee99d1e7afdd71b82;hp=387310f1936a8d37867529754a8083f3f307720d;hpb=e84184056559e90f2dffc7f7648f40705864e088;p=libfirm diff --git a/ir/ir/irarch.c b/ir/ir/irarch.c index 387310f19..1a5e1def5 100644 --- a/ir/ir/irarch.c +++ b/ir/ir/irarch.c @@ -24,7 +24,8 @@ * @author Sebastian Hack, Michael Beck * @version $Id$ * - * Implements "Strenght Reduction of Multiplications by Integer Constants" by Youfeng Wu. + * Implements "Strength Reduction of Multiplications by Integer Constants" + * by Youfeng Wu. * Implements Division and Modulo by Consts from "Hackers Delight", */ #include "config.h" @@ -38,7 +39,7 @@ #include "iropt_t.h" #include "ircons_t.h" #include "irgmod.h" -#include "irvrfy.h" +#include "irverify.h" #include "tv_t.h" #include "dbginfo_t.h" #include "iropt_dbg.h" @@ -47,38 +48,20 @@ #include "ircons.h" #include "irarch.h" #include "irflag.h" +#include "be.h" #include "error.h" -#undef DEB - -#define MAX_BITSTR 64 - -/* when we need verifying */ -#ifdef NDEBUG -# define IRN_VRFY_IRG(res, irg) -#else -# define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg) -#endif - -/** The params got from the factory in arch_dep_init(...). */ -static const ir_settings_arch_dep_t *params = NULL; - /** The bit mask, which optimizations to apply. */ static arch_dep_opts_t opts; -void arch_dep_init(arch_dep_params_factory_t factory) { - opts = arch_dep_none; - - if (factory != NULL) - params = factory(); -} - -void arch_dep_set_opts(arch_dep_opts_t the_opts) { +void arch_dep_set_opts(arch_dep_opts_t the_opts) +{ opts = the_opts; } /** check, whether a mode allows a Mulh instruction. */ -static int allow_Mulh(ir_mode *mode) { +static int allow_Mulh(const ir_settings_arch_dep_t *params, ir_mode *mode) +{ if (get_mode_size_bits(mode) > params->max_bits_for_mulh) return 0; return (mode_is_signed(mode) && params->allow_mulhs) || (!mode_is_signed(mode) && params->allow_mulhu); @@ -99,14 +82,16 @@ struct instruction { /** * The environment for the strength reduction of multiplications. */ -typedef struct _mul_env { +typedef struct mul_env { struct obstack obst; /**< an obstack for local space. */ + const ir_settings_arch_dep_t *params; ir_mode *mode; /**< the mode of the multiplication constant */ unsigned bits; /**< number of bits in the mode */ unsigned max_S; /**< the maximum LEA shift value. */ instruction *root; /**< the root of the instruction tree */ ir_node *op; /**< the operand that is multiplied */ ir_node *blk; /**< the block where the new graph is built */ + ir_graph *irg; dbg_info *dbg; /**< the debug info for the new graph. */ ir_mode *shf_mode; /**< the (unsigned) mode for the shift constants */ int fail; /**< set to 1 if the instruction sequence fails the constraints */ @@ -119,7 +104,9 @@ typedef struct _mul_env { * Some kind of default evaluator. Return the cost of * instructions. */ -static int default_evaluate(insn_kind kind, tarval *tv) { +static int default_evaluate(insn_kind kind, const ir_mode *mode, ir_tarval *tv) +{ + (void) mode; (void) tv; if (kind == MUL) @@ -130,8 +117,9 @@ static int default_evaluate(insn_kind kind, tarval *tv) { /** * emit a LEA (or an Add) instruction */ -static instruction *emit_LEA(mul_env *env, instruction *a, instruction *b, unsigned shift) { - instruction *res = obstack_alloc(&env->obst, sizeof(*res)); +static instruction *emit_LEA(mul_env *env, instruction *a, instruction *b, unsigned shift) +{ + instruction *res = OALLOC(&env->obst, instruction); res->kind = shift > 0 ? LEA : ADD; res->in[0] = a; res->in[1] = b; @@ -144,8 +132,9 @@ static instruction *emit_LEA(mul_env *env, instruction *a, instruction *b, unsig /** * emit a SHIFT (or an Add or a Zero) instruction */ -static instruction *emit_SHIFT(mul_env *env, instruction *a, unsigned shift) { - instruction *res = obstack_alloc(&env->obst, sizeof(*res)); +static instruction *emit_SHIFT(mul_env *env, instruction *a, unsigned shift) +{ + instruction *res = OALLOC(&env->obst, instruction); if (shift == env->bits) { /* a 2^bits with bits resolution is a zero */ res->kind = ZERO; @@ -171,8 +160,9 @@ static instruction *emit_SHIFT(mul_env *env, instruction *a, unsigned shift) { /** * emit a SUB instruction */ -static instruction *emit_SUB(mul_env *env, instruction *a, instruction *b) { - instruction *res = obstack_alloc(&env->obst, sizeof(*res)); +static instruction *emit_SUB(mul_env *env, instruction *a, instruction *b) +{ + instruction *res = OALLOC(&env->obst, instruction); res->kind = SUB; res->in[0] = a; res->in[1] = b; @@ -185,8 +175,9 @@ static instruction *emit_SUB(mul_env *env, instruction *a, instruction *b) { /** * emit the ROOT instruction */ -static instruction *emit_ROOT(mul_env *env, ir_node *root_op) { - instruction *res = obstack_alloc(&env->obst, sizeof(*res)); +static instruction *emit_ROOT(mul_env *env, ir_node *root_op) +{ + instruction *res = OALLOC(&env->obst, instruction); res->kind = ROOT; res->in[0] = NULL; res->in[1] = NULL; @@ -200,12 +191,13 @@ static instruction *emit_ROOT(mul_env *env, ir_node *root_op) { /** * Returns the condensed representation of the tarval tv */ -static unsigned char *value_to_condensed(mul_env *env, tarval *tv, int *pr) { +static unsigned char *value_to_condensed(mul_env *env, ir_tarval *tv, int *pr) +{ ir_mode *mode = get_tarval_mode(tv); int bits = get_mode_size_bits(mode); char *bitstr = get_tarval_bitpattern(tv); int i, l, r; - unsigned char *R = obstack_alloc(&env->obst, bits); + unsigned char *R = (unsigned char*)obstack_alloc(&env->obst, bits); l = r = 0; for (i = 0; bitstr[i] != '\0'; ++i) { @@ -224,7 +216,8 @@ static unsigned char *value_to_condensed(mul_env *env, tarval *tv, int *pr) { /** * Calculate the gain when using the generalized complementary technique */ -static int calculate_gain(unsigned char *R, int r) { +static int calculate_gain(unsigned char *R, int r) +{ int max_gain = 0; int idx = -1, i; int gain; @@ -246,8 +239,9 @@ static int calculate_gain(unsigned char *R, int r) { /** * Calculates the condensed complement of a given (R,r) tuple */ -static unsigned char *complement_condensed(mul_env *env, unsigned char *R, int r, int gain, int *prs) { - unsigned char *value = obstack_alloc(&env->obst, env->bits); +static unsigned char *complement_condensed(mul_env *env, unsigned char *R, int r, int gain, int *prs) +{ + unsigned char *value = (unsigned char*)obstack_alloc(&env->obst, env->bits); int i, l, j; unsigned char c; @@ -286,8 +280,9 @@ static unsigned char *complement_condensed(mul_env *env, unsigned char *R, int r /** * creates a tarval from a condensed representation. */ -static tarval *condensed_to_value(mul_env *env, unsigned char *R, int r) { - tarval *res, *tv; +static ir_tarval *condensed_to_value(mul_env *env, unsigned char *R, int r) +{ + ir_tarval *res, *tv; int i, j; j = 0; @@ -296,7 +291,7 @@ static tarval *condensed_to_value(mul_env *env, unsigned char *R, int r) { for (i = 0; i < r; ++i) { j = R[i]; if (j) { - tarval *t = new_tarval_from_long(j, mode_Iu); + ir_tarval *t = new_tarval_from_long(j, mode_Iu); tv = tarval_shl(tv, t); } res = res ? tarval_add(res, tv) : tv; @@ -305,12 +300,13 @@ static tarval *condensed_to_value(mul_env *env, unsigned char *R, int r) { } /* forward */ -static instruction *basic_decompose_mul(mul_env *env, unsigned char *R, int r, tarval *N); +static instruction *basic_decompose_mul(mul_env *env, unsigned char *R, int r, ir_tarval *N); /* * handle simple cases with up-to 2 bits set */ -static instruction *decompose_simple_cases(mul_env *env, unsigned char *R, int r, tarval *N) { +static instruction *decompose_simple_cases(mul_env *env, unsigned char *R, int r, ir_tarval *N) +{ instruction *ins, *ins2; (void) N; @@ -339,14 +335,15 @@ static instruction *decompose_simple_cases(mul_env *env, unsigned char *R, int r /** * Main decompose driver. */ -static instruction *decompose_mul(mul_env *env, unsigned char *R, int r, tarval *N) { +static instruction *decompose_mul(mul_env *env, unsigned char *R, int r, ir_tarval *N) +{ unsigned i; int gain; if (r <= 2) return decompose_simple_cases(env, R, r, N); - if (params->also_use_subs) { + if (env->params->also_use_subs) { gain = calculate_gain(R, r); if (gain > 0) { instruction *instr1, *instr2; @@ -355,7 +352,7 @@ static instruction *decompose_mul(mul_env *env, unsigned char *R, int r, tarval R1 = complement_condensed(env, R, r, gain, &r1); r2 = r - gain + 1; - R2 = obstack_alloc(&env->obst, r2); + R2 = (unsigned char*)obstack_alloc(&env->obst, r2); k = 1; for (i = 0; i < gain; ++i) { @@ -384,8 +381,8 @@ static instruction *decompose_mul(mul_env *env, unsigned char *R, int r, tarval N = condensed_to_value(env, R, r); for (i = env->max_S; i > 0; --i) { - tarval *div_res, *mod_res; - tarval *tv = new_tarval_from_long((1 << i) + 1, env->mode); + ir_tarval *div_res, *mod_res; + ir_tarval *tv = new_tarval_from_long((1 << i) + 1, env->mode); div_res = tarval_divmod(N, tv, &mod_res); if (mod_res == get_mode_null(env->mode)) { @@ -407,16 +404,17 @@ static instruction *decompose_mul(mul_env *env, unsigned char *R, int r, tarval /** * basic decomposition routine */ -static instruction *basic_decompose_mul(mul_env *env, unsigned char *R, int r, tarval *N) { +static instruction *basic_decompose_mul(mul_env *env, unsigned char *R, int r, ir_tarval *N) +{ instruction *Ns; unsigned t; - if (R[0] == 0) { /* Case 1 */ + if (R[0] == 0) { /* Case 1 */ t = R[1] > IMAX(env->max_S, R[1]); R[1] -= t; Ns = decompose_mul(env, &R[1], r - 1, N); return emit_LEA(env, env->root, Ns, t); - } else if (R[0] <= env->max_S) { /* Case 2 */ + } else if (R[0] <= env->max_S) { /* Case 2 */ t = R[0]; R[1] += t; Ns = decompose_mul(env, &R[1], r - 1, N); @@ -435,8 +433,10 @@ static instruction *basic_decompose_mul(mul_env *env, unsigned char *R, int r, t * @param env the environment * @param inst the instruction */ -static ir_node *build_graph(mul_env *env, instruction *inst) { +static ir_node *build_graph(mul_env *env, instruction *inst) +{ ir_node *l, *r, *c; + ir_graph *irg = env->irg; if (inst->irn) return inst->irn; @@ -445,26 +445,25 @@ static ir_node *build_graph(mul_env *env, instruction *inst) { case LEA: l = build_graph(env, inst->in[0]); r = build_graph(env, inst->in[1]); - c = new_Const_long(env->shf_mode, inst->shift_count); - r = new_rd_Shl(env->dbg, current_ir_graph, env->blk, r, c, env->mode); - return inst->irn = new_rd_Add(env->dbg, current_ir_graph, env->blk, l, r, env->mode); + c = new_r_Const_long(irg, env->shf_mode, inst->shift_count); + r = new_rd_Shl(env->dbg, env->blk, r, c, env->mode); + return inst->irn = new_rd_Add(env->dbg, env->blk, l, r, env->mode); case SHIFT: l = build_graph(env, inst->in[0]); - c = new_Const_long(env->shf_mode, inst->shift_count); - return inst->irn = new_rd_Shl(env->dbg, current_ir_graph, env->blk, l, c, env->mode); + c = new_r_Const_long(irg, env->shf_mode, inst->shift_count); + return inst->irn = new_rd_Shl(env->dbg, env->blk, l, c, env->mode); case SUB: l = build_graph(env, inst->in[0]); r = build_graph(env, inst->in[1]); - return inst->irn = new_rd_Sub(env->dbg, current_ir_graph, env->blk, l, r, env->mode); + return inst->irn = new_rd_Sub(env->dbg, env->blk, l, r, env->mode); case ADD: l = build_graph(env, inst->in[0]); r = build_graph(env, inst->in[1]); - return inst->irn = new_rd_Add(env->dbg, current_ir_graph, env->blk, l, r, env->mode); + return inst->irn = new_rd_Add(env->dbg, env->blk, l, r, env->mode); case ZERO: - return inst->irn = new_Const(get_mode_null(env->mode)); + return inst->irn = new_r_Const(irg, get_mode_null(env->mode)); default: panic("Unsupported instruction kind"); - return NULL; } } @@ -472,7 +471,8 @@ static ir_node *build_graph(mul_env *env, instruction *inst) { * Calculate the costs for the given instruction sequence. * Note that additional costs due to higher register pressure are NOT evaluated yet */ -static int evaluate_insn(mul_env *env, instruction *inst) { +static int evaluate_insn(mul_env *env, instruction *inst) +{ int costs; if (inst->costs >= 0) { @@ -486,22 +486,22 @@ static int evaluate_insn(mul_env *env, instruction *inst) { case ADD: costs = evaluate_insn(env, inst->in[0]); costs += evaluate_insn(env, inst->in[1]); - costs += env->evaluate(inst->kind, NULL); + costs += env->evaluate(inst->kind, env->mode, NULL); inst->costs = costs; return costs; case SHIFT: - if (inst->shift_count > params->highest_shift_amount) + if (inst->shift_count > env->params->highest_shift_amount) env->fail = 1; if (env->n_shift <= 0) env->fail = 1; else --env->n_shift; costs = evaluate_insn(env, inst->in[0]); - costs += env->evaluate(inst->kind, NULL); + costs += env->evaluate(inst->kind, env->mode, NULL); inst->costs = costs; return costs; case ZERO: - inst->costs = costs = env->evaluate(inst->kind, NULL); + inst->costs = costs = env->evaluate(inst->kind, env->mode, NULL); return costs; case MUL: case ROOT: @@ -521,7 +521,8 @@ static int evaluate_insn(mul_env *env, instruction *inst) { * * @return the new graph */ -static ir_node *do_decomposition(ir_node *irn, ir_node *operand, tarval *tv) { +static ir_node *do_decomposition(ir_node *irn, ir_node *operand, ir_tarval *tv) +{ mul_env env; instruction *inst; unsigned char *R; @@ -530,19 +531,21 @@ static ir_node *do_decomposition(ir_node *irn, ir_node *operand, tarval *tv) { int mul_costs; obstack_init(&env.obst); + env.params = be_get_backend_param()->dep_param; env.mode = get_tarval_mode(tv); env.bits = (unsigned)get_mode_size_bits(env.mode); env.max_S = 3; env.root = emit_ROOT(&env, operand); env.fail = 0; - env.n_shift = params->maximum_shifts; - env.evaluate = params->evaluate != NULL ? params->evaluate : default_evaluate; + env.n_shift = env.params->maximum_shifts; + env.evaluate = env.params->evaluate != NULL ? env.params->evaluate : default_evaluate; + env.irg = get_irn_irg(irn); R = value_to_condensed(&env, tv, &r); inst = decompose_mul(&env, R, r, tv); /* the paper suggests 70% here */ - mul_costs = (env.evaluate(MUL, tv) * 7 + 5) / 10; + mul_costs = (env.evaluate(MUL, env.mode, tv) * 7 + 5) / 10; if (evaluate_insn(&env, inst) <= mul_costs && !env.fail) { env.op = operand; env.blk = get_nodes_block(irn); @@ -558,43 +561,53 @@ static ir_node *do_decomposition(ir_node *irn, ir_node *operand, tarval *tv) { } /* Replace Muls with Shifts and Add/Subs. */ -ir_node *arch_dep_replace_mul_with_shifts(ir_node *irn) { - ir_node *res = irn; - ir_mode *mode = get_irn_mode(irn); +ir_node *arch_dep_replace_mul_with_shifts(ir_node *irn) +{ + ir_node *res = irn; + ir_mode *mode = get_irn_mode(irn); + ir_graph *irg; + ir_node *left; + ir_node *right; + ir_node *operand; + ir_tarval *tv; + const ir_settings_arch_dep_t *params = be_get_backend_param()->dep_param; + /* If the architecture dependent optimizations were not initialized or this optimization was not enabled. */ if (params == NULL || (opts & arch_dep_mul_to_shift) == 0) return irn; - set_arch_dep_running(1); - { - if (is_Mul(irn) && mode_is_int(mode)) { - ir_node *left = get_binop_left(irn); - ir_node *right = get_binop_right(irn); - tarval *tv = NULL; - ir_node *operand = NULL; - - /* Look, if one operand is a constant. */ - if (is_Const(left)) { - tv = get_Const_tarval(left); - operand = right; - } else if (is_Const(right)) { - tv = get_Const_tarval(right); - operand = left; - } + if (!is_Mul(irn) || !mode_is_int(mode)) + return res; + + /* we should never do the reverse transformations again + (like x+x -> 2*x) */ + irg = get_irn_irg(irn); + set_irg_state(irg, IR_GRAPH_STATE_ARCH_DEP); + + left = get_binop_left(irn); + right = get_binop_right(irn); + tv = NULL; + operand = NULL; + + /* Look, if one operand is a constant. */ + if (is_Const(left)) { + tv = get_Const_tarval(left); + operand = right; + } else if (is_Const(right)) { + tv = get_Const_tarval(right); + operand = left; + } - if (tv != NULL) { - res = do_decomposition(irn, operand, tv); + if (tv != NULL) { + res = do_decomposition(irn, operand, tv); - if (res != irn) { - hook_arch_dep_replace_mul_with_shifts(irn); - exchange(irn, res); - } - } + if (res != irn) { + hook_arch_dep_replace_mul_with_shifts(irn); + exchange(irn, res); } } - //set_arch_dep_running(0); return res; } @@ -602,7 +615,8 @@ ir_node *arch_dep_replace_mul_with_shifts(ir_node *irn) { /** * calculated the ld2 of a tarval if tarval is 2^n, else returns -1. */ -static int tv_ld2(tarval *tv, int bits) { +static int tv_ld2(ir_tarval *tv, int bits) +{ int i, k = 0, num; for (num = i = 0; i < bits; ++i) { @@ -642,7 +656,7 @@ static int tv_ld2(tarval *tv, int bits) { /** The result of a the magic() function. */ struct ms { - tarval *M; /**< magic number */ + ir_tarval *M; /**< magic number */ int s; /**< shift amount */ int need_add; /**< an additional add is needed */ int need_sub; /**< an additional sub is needed */ @@ -653,15 +667,16 @@ struct ms { * * see Hacker's Delight: 10-6 Integer Division by Constants: Incorporation into a Compiler */ -static struct ms magic(tarval *d) { +static struct ms magic(ir_tarval *d) +{ ir_mode *mode = get_tarval_mode(d); ir_mode *u_mode = find_unsigned_mode(mode); int bits = get_mode_size_bits(u_mode); int p; - tarval *ad, *anc, *delta, *q1, *r1, *q2, *r2, *t; /* unsigned */ + ir_tarval *ad, *anc, *delta, *q1, *r1, *q2, *r2, *t; /* unsigned */ pn_Cmp d_cmp, M_cmp; - tarval *bits_minus_1, *two_bits_1; + ir_tarval *bits_minus_1, *two_bits_1; struct ms mag; @@ -728,7 +743,7 @@ static struct ms magic(tarval *d) { /** The result of the magicu() function. */ struct mu { - tarval *M; /**< magic add constant */ + ir_tarval *M; /**< magic add constant */ int s; /**< shift amount */ int need_add; /**< add indicator */ }; @@ -738,12 +753,13 @@ struct mu { * * see Hacker's Delight: 10-10 Integer Division by Constants: Incorporation into a Compiler (Unsigned) */ -static struct mu magicu(tarval *d) { +static struct mu magicu(ir_tarval *d) +{ ir_mode *mode = get_tarval_mode(d); int bits = get_mode_size_bits(mode); int p; - tarval *nc, *delta, *q1, *r1, *q2, *r2; - tarval *bits_minus_1, *two_bits_1, *seven_ff; + ir_tarval *nc, *delta, *q1, *r1, *q2, *r2; + ir_tarval *bits_minus_1, *two_bits_1, *seven_ff; struct mu magu; @@ -806,7 +822,8 @@ static struct mu magicu(tarval *d) { * * Note that 'div' might be a mod or DivMod operation as well */ -static ir_node *replace_div_by_mulh(ir_node *div, tarval *tv) { +static ir_node *replace_div_by_mulh(ir_node *div, ir_tarval *tv) +{ dbg_info *dbg = get_irn_dbg_info(div); ir_node *n = get_binop_left(div); ir_node *block = get_irn_n(div, -1); @@ -819,63 +836,67 @@ static ir_node *replace_div_by_mulh(ir_node *div, tarval *tv) { return div; if (mode_is_signed(mode)) { + ir_graph *irg = get_irn_irg(div); struct ms mag = magic(tv); /* generate the Mulh instruction */ - c = new_Const(mag.M); - q = new_rd_Mulh(dbg, current_ir_graph, block, n, c, mode); + c = new_r_Const(irg, mag.M); + q = new_rd_Mulh(dbg, block, n, c, mode); /* do we need an Add or Sub */ if (mag.need_add) - q = new_rd_Add(dbg, current_ir_graph, block, q, n, mode); + q = new_rd_Add(dbg, block, q, n, mode); else if (mag.need_sub) - q = new_rd_Sub(dbg, current_ir_graph, block, q, n, mode); + q = new_rd_Sub(dbg, block, q, n, mode); /* Do we need the shift */ if (mag.s > 0) { - c = new_Const_long(mode_Iu, mag.s); - q = new_rd_Shrs(dbg, current_ir_graph, block, q, c, mode); + c = new_r_Const_long(irg, mode_Iu, mag.s); + q = new_rd_Shrs(dbg, block, q, c, mode); } /* final */ - c = new_Const_long(mode_Iu, bits - 1); - t = new_rd_Shr(dbg, current_ir_graph, block, q, c, mode); + c = new_r_Const_long(irg, mode_Iu, bits - 1); + t = new_rd_Shr(dbg, block, q, c, mode); - q = new_rd_Add(dbg, current_ir_graph, block, q, t, mode); + q = new_rd_Add(dbg, block, q, t, mode); } else { struct mu mag = magicu(tv); ir_node *c; + ir_graph *irg = get_irn_irg(div); /* generate the Mulh instruction */ - c = new_Const(mag.M); - q = new_rd_Mulh(dbg, current_ir_graph, block, n, c, mode); + c = new_r_Const(irg, mag.M); + q = new_rd_Mulh(dbg, block, n, c, mode); if (mag.need_add) { if (mag.s > 0) { /* use the GM scheme */ - t = new_rd_Sub(dbg, current_ir_graph, block, n, q, mode); + t = new_rd_Sub(dbg, block, n, q, mode); - c = new_Const(get_mode_one(mode_Iu)); - t = new_rd_Shr(dbg, current_ir_graph, block, t, c, mode); + c = new_r_Const(irg, get_mode_one(mode_Iu)); + t = new_rd_Shr(dbg, block, t, c, mode); - t = new_rd_Add(dbg, current_ir_graph, block, t, q, mode); + t = new_rd_Add(dbg, block, t, q, mode); - c = new_Const_long(mode_Iu, mag.s - 1); - q = new_rd_Shr(dbg, current_ir_graph, block, t, c, mode); + c = new_r_Const_long(irg, mode_Iu, mag.s - 1); + q = new_rd_Shr(dbg, block, t, c, mode); } else { /* use the default scheme */ - q = new_rd_Add(dbg, current_ir_graph, block, q, n, mode); + q = new_rd_Add(dbg, block, q, n, mode); } } else if (mag.s > 0) { /* default scheme, shift needed */ - c = new_Const_long(mode_Iu, mag.s); - q = new_rd_Shr(dbg, current_ir_graph, block, q, c, mode); + c = new_r_Const_long(irg, mode_Iu, mag.s); + q = new_rd_Shr(dbg, block, q, c, mode); } } return q; } /* Replace Divs with Shifts and Add/Subs and Mulh. */ -ir_node *arch_dep_replace_div_by_const(ir_node *irn) { +ir_node *arch_dep_replace_div_by_const(ir_node *irn) +{ + const ir_settings_arch_dep_t *params = be_get_backend_param()->dep_param; ir_node *res = irn; /* If the architecture dependent optimizations were not initialized @@ -887,10 +908,11 @@ ir_node *arch_dep_replace_div_by_const(ir_node *irn) { ir_node *c = get_Div_right(irn); ir_node *block, *left; ir_mode *mode; - tarval *tv, *ntv; + ir_tarval *tv, *ntv; dbg_info *dbg; int n, bits; - int k, n_flag; + int k; + int n_flag = 0; if (! is_Const(c)) return irn; @@ -923,43 +945,44 @@ ir_node *arch_dep_replace_div_by_const(ir_node *irn) { } if (k >= 0) { /* division by 2^k or -2^k */ + ir_graph *irg = get_irn_irg(irn); if (mode_is_signed(mode)) { ir_node *k_node; ir_node *curr = left; /* create the correction code for signed values only if there might be a remainder */ - if (! is_Div_remainderless(irn)) { + if (! get_Div_no_remainder(irn)) { if (k != 1) { - k_node = new_Const_long(mode_Iu, k - 1); - curr = new_rd_Shrs(dbg, current_ir_graph, block, left, k_node, mode); + k_node = new_r_Const_long(irg, mode_Iu, k - 1); + curr = new_rd_Shrs(dbg, block, left, k_node, mode); } - k_node = new_Const_long(mode_Iu, bits - k); - curr = new_rd_Shr(dbg, current_ir_graph, block, curr, k_node, mode); + k_node = new_r_Const_long(irg, mode_Iu, bits - k); + curr = new_rd_Shr(dbg, block, curr, k_node, mode); - curr = new_rd_Add(dbg, current_ir_graph, block, left, curr, mode); + curr = new_rd_Add(dbg, block, left, curr, mode); } else { k_node = left; } - k_node = new_Const_long(mode_Iu, k); - res = new_rd_Shrs(dbg, current_ir_graph, block, curr, k_node, mode); + k_node = new_r_Const_long(irg, mode_Iu, k); + res = new_rd_Shrs(dbg, block, curr, k_node, mode); if (n_flag) { /* negate the result */ ir_node *k_node; - k_node = new_Const(get_mode_null(mode)); - res = new_rd_Sub(dbg, current_ir_graph, block, k_node, res, mode); + k_node = new_r_Const(irg, get_mode_null(mode)); + res = new_rd_Sub(dbg, block, k_node, res, mode); } } else { /* unsigned case */ ir_node *k_node; - k_node = new_Const_long(mode_Iu, k); - res = new_rd_Shr(dbg, current_ir_graph, block, left, k_node, mode); + k_node = new_r_Const_long(irg, mode_Iu, k); + res = new_rd_Shr(dbg, block, left, k_node, mode); } } else { /* other constant */ - if (allow_Mulh(mode)) + if (allow_Mulh(params, mode)) res = replace_div_by_mulh(irn, tv); } } @@ -971,7 +994,9 @@ ir_node *arch_dep_replace_div_by_const(ir_node *irn) { } /* Replace Mods with Shifts and Add/Subs and Mulh. */ -ir_node *arch_dep_replace_mod_by_const(ir_node *irn) { +ir_node *arch_dep_replace_mod_by_const(ir_node *irn) +{ + const ir_settings_arch_dep_t *params = be_get_backend_param()->dep_param; ir_node *res = irn; /* If the architecture dependent optimizations were not initialized @@ -983,7 +1008,7 @@ ir_node *arch_dep_replace_mod_by_const(ir_node *irn) { ir_node *c = get_Mod_right(irn); ir_node *block, *left; ir_mode *mode; - tarval *tv, *ntv; + ir_tarval *tv, *ntv; dbg_info *dbg; int n, bits; int k; @@ -1016,6 +1041,7 @@ ir_node *arch_dep_replace_mod_by_const(ir_node *irn) { } if (k >= 0) { + ir_graph *irg = get_irn_irg(irn); /* division by 2^k or -2^k: * we use "modulus" here, so x % y == x % -y that's why is no difference between the case 2^k and -2^k */ @@ -1024,35 +1050,35 @@ ir_node *arch_dep_replace_mod_by_const(ir_node *irn) { ir_node *curr = left; if (k != 1) { - k_node = new_Const_long(mode_Iu, k - 1); - curr = new_rd_Shrs(dbg, current_ir_graph, block, left, k_node, mode); + k_node = new_r_Const_long(irg, mode_Iu, k - 1); + curr = new_rd_Shrs(dbg, block, left, k_node, mode); } - k_node = new_Const_long(mode_Iu, bits - k); - curr = new_rd_Shr(dbg, current_ir_graph, block, curr, k_node, mode); + k_node = new_r_Const_long(irg, mode_Iu, bits - k); + curr = new_rd_Shr(dbg, block, curr, k_node, mode); - curr = new_rd_Add(dbg, current_ir_graph, block, left, curr, mode); + curr = new_rd_Add(dbg, block, left, curr, mode); - k_node = new_Const_long(mode, (-1) << k); - curr = new_rd_And(dbg, current_ir_graph, block, curr, k_node, mode); + k_node = new_r_Const_long(irg, mode, (-1) << k); + curr = new_rd_And(dbg, block, curr, k_node, mode); - res = new_rd_Sub(dbg, current_ir_graph, block, left, curr, mode); + res = new_rd_Sub(dbg, block, left, curr, mode); } else { /* unsigned case */ ir_node *k_node; - k_node = new_Const_long(mode, (1 << k) - 1); - res = new_rd_And(dbg, current_ir_graph, block, left, k_node, mode); + k_node = new_r_Const_long(irg, mode, (1 << k) - 1); + res = new_rd_And(dbg, block, left, k_node, mode); } } else { /* other constant */ - if (allow_Mulh(mode)) { + if (allow_Mulh(params, mode)) { res = replace_div_by_mulh(irn, tv); - res = new_rd_Mul(dbg, current_ir_graph, block, res, c, mode); + res = new_rd_Mul(dbg, block, res, c, mode); /* res = arch_dep_mul_to_shift(res); */ - res = new_rd_Sub(dbg, current_ir_graph, block, left, res, mode); + res = new_rd_Sub(dbg, block, left, res, mode); } } } @@ -1064,7 +1090,9 @@ ir_node *arch_dep_replace_mod_by_const(ir_node *irn) { } /* Replace DivMods with Shifts and Add/Subs and Mulh. */ -void arch_dep_replace_divmod_by_const(ir_node **div, ir_node **mod, ir_node *irn) { +void arch_dep_replace_divmod_by_const(ir_node **div, ir_node **mod, ir_node *irn) +{ + const ir_settings_arch_dep_t *params = be_get_backend_param()->dep_param; *div = *mod = NULL; /* If the architecture dependent optimizations were not initialized @@ -1077,10 +1105,11 @@ void arch_dep_replace_divmod_by_const(ir_node **div, ir_node **mod, ir_node *irn ir_node *c = get_DivMod_right(irn); ir_node *block, *left; ir_mode *mode; - tarval *tv, *ntv; + ir_tarval *tv, *ntv; dbg_info *dbg; int n, bits; - int k, n_flag; + int k; + int n_flag = 0; if (! is_Const(c)) return; @@ -1113,56 +1142,53 @@ void arch_dep_replace_divmod_by_const(ir_node **div, ir_node **mod, ir_node *irn } if (k >= 0) { /* division by 2^k or -2^k */ + ir_graph *irg = get_irn_irg(irn); if (mode_is_signed(mode)) { ir_node *k_node, *c_k; ir_node *curr = left; if (k != 1) { - k_node = new_Const_long(mode_Iu, k - 1); - curr = new_rd_Shrs(dbg, current_ir_graph, block, left, k_node, mode); + k_node = new_r_Const_long(irg, mode_Iu, k - 1); + curr = new_rd_Shrs(dbg, block, left, k_node, mode); } - k_node = new_Const_long(mode_Iu, bits - k); - curr = new_rd_Shr(dbg, current_ir_graph, block, curr, k_node, mode); + k_node = new_r_Const_long(irg, mode_Iu, bits - k); + curr = new_rd_Shr(dbg, block, curr, k_node, mode); - curr = new_rd_Add(dbg, current_ir_graph, block, left, curr, mode); + curr = new_rd_Add(dbg, block, left, curr, mode); - c_k = new_Const_long(mode_Iu, k); + c_k = new_r_Const_long(irg, mode_Iu, k); - *div = new_rd_Shrs(dbg, current_ir_graph, block, curr, c_k, mode); + *div = new_rd_Shrs(dbg, block, curr, c_k, mode); if (n_flag) { /* negate the div result */ - ir_node *k_node; - - k_node = new_Const(get_mode_null(mode)); - *div = new_rd_Sub(dbg, current_ir_graph, block, k_node, *div, mode); + ir_node *k_node = new_r_Const(irg, get_mode_null(mode)); + *div = new_rd_Sub(dbg, block, k_node, *div, mode); } - k_node = new_Const_long(mode, (-1) << k); - curr = new_rd_And(dbg, current_ir_graph, block, curr, k_node, mode); + k_node = new_r_Const_long(irg, mode, (-1) << k); + curr = new_rd_And(dbg, block, curr, k_node, mode); - *mod = new_rd_Sub(dbg, current_ir_graph, block, left, curr, mode); + *mod = new_rd_Sub(dbg, block, left, curr, mode); } else { /* unsigned case */ - ir_node *k_node; - - k_node = new_Const_long(mode_Iu, k); - *div = new_rd_Shr(dbg, current_ir_graph, block, left, k_node, mode); + ir_node *k_node = new_r_Const_long(irg, mode_Iu, k); + *div = new_rd_Shr(dbg, block, left, k_node, mode); - k_node = new_Const_long(mode, (1 << k) - 1); - *mod = new_rd_And(dbg, current_ir_graph, block, left, k_node, mode); + k_node = new_r_Const_long(irg, mode, (1 << k) - 1); + *mod = new_rd_And(dbg, block, left, k_node, mode); } } else { /* other constant */ - if (allow_Mulh(mode)) { + if (allow_Mulh(params, mode)) { ir_node *t; *div = replace_div_by_mulh(irn, tv); - t = new_rd_Mul(dbg, current_ir_graph, block, *div, c, mode); + t = new_rd_Mul(dbg, block, *div, c, mode); /* t = arch_dep_mul_to_shift(t); */ - *mod = new_rd_Sub(dbg, current_ir_graph, block, left, t, mode); + *mod = new_rd_Sub(dbg, block, left, t, mode); } } } @@ -1170,20 +1196,3 @@ void arch_dep_replace_divmod_by_const(ir_node **div, ir_node **mod, ir_node *irn if (*div) hook_arch_dep_replace_division_by_const(irn); } - - -static const ir_settings_arch_dep_t default_params = { - 1, /* also use subs */ - 4, /* maximum shifts */ - 31, /* maximum shift amount */ - default_evaluate, /* default evaluator */ - - 0, /* allow Mulhs */ - 0, /* allow Mulus */ - 32 /* Mulh allowed up to 32 bit */ -}; - -/* A default parameter factory for testing purposes. */ -const ir_settings_arch_dep_t *arch_dep_default_factory(void) { - return &default_params; -}