X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fir%2Firarch.c;h=b650a3e674fbf0548d8f2a688bab314b1650f8c6;hb=a6e674f74ea93c3661f100088db520acb701571f;hp=e6d3aba4c12793e17dcb58ad0d1eb5a0704a2827;hpb=5b19d55245a362750650a61a6ab8f44aad58e196;p=libfirm diff --git a/ir/ir/irarch.c b/ir/ir/irarch.c index e6d3aba4c..b650a3e67 100644 --- a/ir/ir/irarch.c +++ b/ir/ir/irarch.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved. + * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved. * * This file is part of libFirm. * @@ -24,17 +24,12 @@ * @author Sebastian Hack, Michael Beck * @version $Id$ * - * Implements "Strenght Reduction of Multiplications by Integer Constants" by Youfeng Wu. + * Implements "Strength Reduction of Multiplications by Integer Constants" by Youfeng Wu. * Implements Division and Modulo by Consts from "Hackers Delight", */ -#ifdef HAVE_CONFIG_H -# include "config.h" -#endif - -#ifdef HAVE_STDLIB_H -# include -#endif +#include "config.h" +#include #include #include "irnode_t.h" @@ -52,6 +47,7 @@ #include "ircons.h" #include "irarch.h" #include "irflag.h" +#include "error.h" #undef DEB @@ -70,51 +66,22 @@ static const ir_settings_arch_dep_t *params = NULL; /** The bit mask, which optimizations to apply. */ static arch_dep_opts_t opts; -/* we need this new pseudo op */ -static ir_op *op_Mulh = NULL; - -/** - * construct a Mulh: Mulh(a,b) = (a * b) >> w, w is the with in bits of a, b - */ -static ir_node * -new_rd_Mulh (dbg_info *db, ir_graph *irg, ir_node *block, - ir_node *op1, ir_node *op2, ir_mode *mode) { - ir_node *in[2]; - ir_node *res; - - in[0] = op1; - in[1] = op2; - res = new_ir_node(db, irg, block, op_Mulh, mode, 2, in); - res = optimize_node(res); - IRN_VRFY_IRG(res, irg); - return res; -} - -ir_op *get_op_Mulh(void) { return op_Mulh; } - -void arch_dep_init(arch_dep_params_factory_t factory) { +void arch_dep_init(arch_dep_params_factory_t factory) +{ opts = arch_dep_none; if (factory != NULL) params = factory(); - - if (! op_Mulh) { - int mulh_opc = get_next_ir_opcode(); - - /* create the Mulh operation */ - op_Mulh = new_ir_op(mulh_opc, "Mulh", op_pin_state_floats, irop_flag_commutative, oparity_binary, 0, 0, NULL); - } } -void arch_dep_set_opts(arch_dep_opts_t the_opts) { +void arch_dep_set_opts(arch_dep_opts_t the_opts) +{ opts = the_opts; - - if (opts & arch_dep_mul_to_shift) - set_opt_arch_dep_running(1); } /** check, whether a mode allows a Mulh instruction. */ -static int allow_Mulh(ir_mode *mode) { +static int allow_Mulh(ir_mode *mode) +{ if (get_mode_size_bits(mode) > params->max_bits_for_mulh) return 0; return (mode_is_signed(mode) && params->allow_mulhs) || (!mode_is_signed(mode) && params->allow_mulhu); @@ -155,7 +122,8 @@ typedef struct _mul_env { * Some kind of default evaluator. Return the cost of * instructions. */ -static int default_evaluate(insn_kind kind, tarval *tv) { +static int default_evaluate(insn_kind kind, tarval *tv) +{ (void) tv; if (kind == MUL) @@ -166,8 +134,9 @@ static int default_evaluate(insn_kind kind, tarval *tv) { /** * emit a LEA (or an Add) instruction */ -static instruction *emit_LEA(mul_env *env, instruction *a, instruction *b, unsigned shift) { - instruction *res = obstack_alloc(&env->obst, sizeof(*res)); +static instruction *emit_LEA(mul_env *env, instruction *a, instruction *b, unsigned shift) +{ + instruction *res = OALLOC(&env->obst, instruction); res->kind = shift > 0 ? LEA : ADD; res->in[0] = a; res->in[1] = b; @@ -180,8 +149,9 @@ static instruction *emit_LEA(mul_env *env, instruction *a, instruction *b, unsig /** * emit a SHIFT (or an Add or a Zero) instruction */ -static instruction *emit_SHIFT(mul_env *env, instruction *a, unsigned shift) { - instruction *res = obstack_alloc(&env->obst, sizeof(*res)); +static instruction *emit_SHIFT(mul_env *env, instruction *a, unsigned shift) +{ + instruction *res = OALLOC(&env->obst, instruction); if (shift == env->bits) { /* a 2^bits with bits resolution is a zero */ res->kind = ZERO; @@ -207,8 +177,9 @@ static instruction *emit_SHIFT(mul_env *env, instruction *a, unsigned shift) { /** * emit a SUB instruction */ -static instruction *emit_SUB(mul_env *env, instruction *a, instruction *b) { - instruction *res = obstack_alloc(&env->obst, sizeof(*res)); +static instruction *emit_SUB(mul_env *env, instruction *a, instruction *b) +{ + instruction *res = OALLOC(&env->obst, instruction); res->kind = SUB; res->in[0] = a; res->in[1] = b; @@ -221,8 +192,9 @@ static instruction *emit_SUB(mul_env *env, instruction *a, instruction *b) { /** * emit the ROOT instruction */ -static instruction *emit_ROOT(mul_env *env, ir_node *root_op) { - instruction *res = obstack_alloc(&env->obst, sizeof(*res)); +static instruction *emit_ROOT(mul_env *env, ir_node *root_op) +{ + instruction *res = OALLOC(&env->obst, instruction); res->kind = ROOT; res->in[0] = NULL; res->in[1] = NULL; @@ -236,7 +208,8 @@ static instruction *emit_ROOT(mul_env *env, ir_node *root_op) { /** * Returns the condensed representation of the tarval tv */ -static unsigned char *value_to_condensed(mul_env *env, tarval *tv, int *pr) { +static unsigned char *value_to_condensed(mul_env *env, tarval *tv, int *pr) +{ ir_mode *mode = get_tarval_mode(tv); int bits = get_mode_size_bits(mode); char *bitstr = get_tarval_bitpattern(tv); @@ -260,9 +233,10 @@ static unsigned char *value_to_condensed(mul_env *env, tarval *tv, int *pr) { /** * Calculate the gain when using the generalized complementary technique */ -static int calculate_gain(unsigned char *R, int r) { - int max_gain = -1; - int idx, i; +static int calculate_gain(unsigned char *R, int r) +{ + int max_gain = 0; + int idx = -1, i; int gain; /* the gain for r == 1 */ @@ -276,15 +250,14 @@ static int calculate_gain(unsigned char *R, int r) { idx = i; } } - if (max_gain > 0) - return idx; - return -1; + return idx; } /** * Calculates the condensed complement of a given (R,r) tuple */ -static unsigned char *complement_condensed(mul_env *env, unsigned char *R, int r, int gain, int *prs) { +static unsigned char *complement_condensed(mul_env *env, unsigned char *R, int r, int gain, int *prs) +{ unsigned char *value = obstack_alloc(&env->obst, env->bits); int i, l, j; unsigned char c; @@ -324,7 +297,8 @@ static unsigned char *complement_condensed(mul_env *env, unsigned char *R, int r /** * creates a tarval from a condensed representation. */ -static tarval *condensed_to_value(mul_env *env, unsigned char *R, int r) { +static tarval *condensed_to_value(mul_env *env, unsigned char *R, int r) +{ tarval *res, *tv; int i, j; @@ -348,7 +322,8 @@ static instruction *basic_decompose_mul(mul_env *env, unsigned char *R, int r, t /* * handle simple cases with up-to 2 bits set */ -static instruction *decompose_simple_cases(mul_env *env, unsigned char *R, int r, tarval *N) { +static instruction *decompose_simple_cases(mul_env *env, unsigned char *R, int r, tarval *N) +{ instruction *ins, *ins2; (void) N; @@ -358,11 +333,16 @@ static instruction *decompose_simple_cases(mul_env *env, unsigned char *R, int r assert(r == 2); ins = env->root; + if (R[1] <= env->max_S) { + ins = emit_LEA(env, ins, ins, R[1]); + if (R[0] != 0) { + ins = emit_SHIFT(env, ins, R[0]); + } + return ins; + } if (R[0] != 0) { ins = emit_SHIFT(env, ins, R[0]); } - if (R[1] <= env->max_S) - return emit_LEA(env, ins, ins, R[1]); ins2 = emit_SHIFT(env, env->root, R[0] + R[1]); return emit_LEA(env, ins, ins2, 0); @@ -372,7 +352,8 @@ static instruction *decompose_simple_cases(mul_env *env, unsigned char *R, int r /** * Main decompose driver. */ -static instruction *decompose_mul(mul_env *env, unsigned char *R, int r, tarval *N) { +static instruction *decompose_mul(mul_env *env, unsigned char *R, int r, tarval *N) +{ unsigned i; int gain; @@ -440,7 +421,8 @@ static instruction *decompose_mul(mul_env *env, unsigned char *R, int r, tarval /** * basic decomposition routine */ -static instruction *basic_decompose_mul(mul_env *env, unsigned char *R, int r, tarval *N) { +static instruction *basic_decompose_mul(mul_env *env, unsigned char *R, int r, tarval *N) +{ instruction *Ns; unsigned t; @@ -468,7 +450,8 @@ static instruction *basic_decompose_mul(mul_env *env, unsigned char *R, int r, t * @param env the environment * @param inst the instruction */ -static ir_node *build_graph(mul_env *env, instruction *inst) { +static ir_node *build_graph(mul_env *env, instruction *inst) +{ ir_node *l, *r, *c; if (inst->irn) @@ -478,26 +461,25 @@ static ir_node *build_graph(mul_env *env, instruction *inst) { case LEA: l = build_graph(env, inst->in[0]); r = build_graph(env, inst->in[1]); - c = new_r_Const(current_ir_graph, env->blk, env->shf_mode, new_tarval_from_long(inst->shift_count, env->shf_mode)); - r = new_rd_Shl(env->dbg, current_ir_graph, env->blk, r, c, env->mode); - return inst->irn = new_rd_Add(env->dbg, current_ir_graph, env->blk, l, r, env->mode); + c = new_Const_long(env->shf_mode, inst->shift_count); + r = new_rd_Shl(env->dbg, env->blk, r, c, env->mode); + return inst->irn = new_rd_Add(env->dbg, env->blk, l, r, env->mode); case SHIFT: l = build_graph(env, inst->in[0]); - c = new_r_Const(current_ir_graph, env->blk, env->shf_mode, new_tarval_from_long(inst->shift_count, env->shf_mode)); - return inst->irn = new_rd_Shl(env->dbg, current_ir_graph, env->blk, l, c, env->mode); + c = new_Const_long(env->shf_mode, inst->shift_count); + return inst->irn = new_rd_Shl(env->dbg, env->blk, l, c, env->mode); case SUB: l = build_graph(env, inst->in[0]); r = build_graph(env, inst->in[1]); - return inst->irn = new_rd_Sub(env->dbg, current_ir_graph, env->blk, l, r, env->mode); + return inst->irn = new_rd_Sub(env->dbg, env->blk, l, r, env->mode); case ADD: l = build_graph(env, inst->in[0]); r = build_graph(env, inst->in[1]); - return inst->irn = new_rd_Add(env->dbg, current_ir_graph, env->blk, l, r, env->mode); + return inst->irn = new_rd_Add(env->dbg, env->blk, l, r, env->mode); case ZERO: - return inst->irn = new_r_Const(current_ir_graph, env->blk, env->mode, get_mode_null(env->mode)); + return inst->irn = new_Const(get_mode_null(env->mode)); default: - assert(0); - return NULL; + panic("Unsupported instruction kind"); } } @@ -505,7 +487,8 @@ static ir_node *build_graph(mul_env *env, instruction *inst) { * Calculate the costs for the given instruction sequence. * Note that additional costs due to higher register pressure are NOT evaluated yet */ -static int evaluate_insn(mul_env *env, instruction *inst) { +static int evaluate_insn(mul_env *env, instruction *inst) +{ int costs; if (inst->costs >= 0) { @@ -536,16 +519,17 @@ static int evaluate_insn(mul_env *env, instruction *inst) { case ZERO: inst->costs = costs = env->evaluate(inst->kind, NULL); return costs; - default: - assert(0); - return 0; + case MUL: + case ROOT: + break; } + panic("Unsupported instruction kind"); } /** * Evaluate the replacement instructions and build a new graph * if faster than the Mul. - * returns the root of the new graph then or irn otherwise. + * Returns the root of the new graph then or irn otherwise. * * @param irn the Mul operation * @param operand the multiplication operand @@ -553,7 +537,8 @@ static int evaluate_insn(mul_env *env, instruction *inst) { * * @return the new graph */ -static ir_node *do_decomposition(ir_node *irn, ir_node *operand, tarval *tv) { +static ir_node *do_decomposition(ir_node *irn, ir_node *operand, tarval *tv) +{ mul_env env; instruction *inst; unsigned char *R; @@ -574,7 +559,7 @@ static ir_node *do_decomposition(ir_node *irn, ir_node *operand, tarval *tv) { inst = decompose_mul(&env, R, r, tv); /* the paper suggests 70% here */ - mul_costs = (env.evaluate(MUL, tv) * 7) / 10; + mul_costs = (env.evaluate(MUL, tv) * 7 + 5) / 10; if (evaluate_insn(&env, inst) <= mul_costs && !env.fail) { env.op = operand; env.blk = get_nodes_block(irn); @@ -590,37 +575,50 @@ static ir_node *do_decomposition(ir_node *irn, ir_node *operand, tarval *tv) { } /* Replace Muls with Shifts and Add/Subs. */ -ir_node *arch_dep_replace_mul_with_shifts(ir_node *irn) { - ir_node *res = irn; +ir_node *arch_dep_replace_mul_with_shifts(ir_node *irn) +{ + ir_graph *irg; + ir_node *res = irn; ir_mode *mode = get_irn_mode(irn); + ir_node *left; + ir_node *right; + ir_node *operand; + tarval *tv; + /* If the architecture dependent optimizations were not initialized or this optimization was not enabled. */ if (params == NULL || (opts & arch_dep_mul_to_shift) == 0) return irn; - if (is_Mul(irn) && mode_is_int(mode)) { - ir_node *left = get_binop_left(irn); - ir_node *right = get_binop_right(irn); - tarval *tv = NULL; - ir_node *operand = NULL; - - /* Look, if one operand is a constant. */ - if (is_Const(left)) { - tv = get_Const_tarval(left); - operand = right; - } else if (is_Const(right)) { - tv = get_Const_tarval(right); - operand = left; - } + if (!is_Mul(irn) || !mode_is_int(mode)) + return res; + + /* we should never do the reverse transformations again + (like x+x -> 2*x) */ + irg = get_irn_irg(irn); + set_irg_state(irg, IR_GRAPH_STATE_ARCH_DEP); + + left = get_binop_left(irn); + right = get_binop_right(irn); + tv = NULL; + operand = NULL; + + /* Look, if one operand is a constant. */ + if (is_Const(left)) { + tv = get_Const_tarval(left); + operand = right; + } else if (is_Const(right)) { + tv = get_Const_tarval(right); + operand = left; + } - if (tv != NULL) { - res = do_decomposition(irn, operand, tv); + if (tv != NULL) { + res = do_decomposition(irn, operand, tv); - if (res != irn) { - hook_arch_dep_replace_mul_with_shifts(irn); - exchange(irn, res); - } + if (res != irn) { + hook_arch_dep_replace_mul_with_shifts(irn); + exchange(irn, res); } } @@ -630,7 +628,8 @@ ir_node *arch_dep_replace_mul_with_shifts(ir_node *irn) { /** * calculated the ld2 of a tarval if tarval is 2^n, else returns -1. */ -static int tv_ld2(tarval *tv, int bits) { +static int tv_ld2(tarval *tv, int bits) +{ int i, k = 0, num; for (num = i = 0; i < bits; ++i) { @@ -659,7 +658,7 @@ static int tv_ld2(tarval *tv, int bits) { #define SHL(a, b) tarval_shl(a, b) #define SHR(a, b) tarval_shr(a, b) #define ADD(a, b) tarval_add(a, b) -#define SUB(a, b) tarval_sub(a, b) +#define SUB(a, b) tarval_sub(a, b, NULL) #define MUL(a, b) tarval_mul(a, b) #define DIV(a, b) tarval_div(a, b) #define MOD(a, b) tarval_mod(a, b) @@ -681,7 +680,8 @@ struct ms { * * see Hacker's Delight: 10-6 Integer Division by Constants: Incorporation into a Compiler */ -static struct ms magic(tarval *d) { +static struct ms magic(tarval *d) +{ ir_mode *mode = get_tarval_mode(d); ir_mode *u_mode = find_unsigned_mode(mode); int bits = get_mode_size_bits(u_mode); @@ -766,7 +766,8 @@ struct mu { * * see Hacker's Delight: 10-10 Integer Division by Constants: Incorporation into a Compiler (Unsigned) */ -static struct mu magicu(tarval *d) { +static struct mu magicu(tarval *d) +{ ir_mode *mode = get_tarval_mode(d); int bits = get_mode_size_bits(mode); int p; @@ -834,7 +835,8 @@ static struct mu magicu(tarval *d) { * * Note that 'div' might be a mod or DivMod operation as well */ -static ir_node *replace_div_by_mulh(ir_node *div, tarval *tv) { +static ir_node *replace_div_by_mulh(ir_node *div, tarval *tv) +{ dbg_info *dbg = get_irn_dbg_info(div); ir_node *n = get_binop_left(div); ir_node *block = get_irn_n(div, -1); @@ -850,60 +852,61 @@ static ir_node *replace_div_by_mulh(ir_node *div, tarval *tv) { struct ms mag = magic(tv); /* generate the Mulh instruction */ - c = new_r_Const(current_ir_graph, block, mode, mag.M); - q = new_rd_Mulh(dbg, current_ir_graph, block, n, c, mode); + c = new_Const(mag.M); + q = new_rd_Mulh(dbg, block, n, c, mode); /* do we need an Add or Sub */ if (mag.need_add) - q = new_rd_Add(dbg, current_ir_graph, block, q, n, mode); + q = new_rd_Add(dbg, block, q, n, mode); else if (mag.need_sub) - q = new_rd_Sub(dbg, current_ir_graph, block, q, n, mode); + q = new_rd_Sub(dbg, block, q, n, mode); /* Do we need the shift */ if (mag.s > 0) { - c = new_r_Const_long(current_ir_graph, block, mode_Iu, mag.s); - q = new_rd_Shrs(dbg, current_ir_graph, block, q, c, mode); + c = new_Const_long(mode_Iu, mag.s); + q = new_rd_Shrs(dbg, block, q, c, mode); } /* final */ - c = new_r_Const_long(current_ir_graph, block, mode_Iu, bits-1); - t = new_rd_Shr(dbg, current_ir_graph, block, q, c, mode); + c = new_Const_long(mode_Iu, bits - 1); + t = new_rd_Shr(dbg, block, q, c, mode); - q = new_rd_Add(dbg, current_ir_graph, block, q, t, mode); + q = new_rd_Add(dbg, block, q, t, mode); } else { struct mu mag = magicu(tv); ir_node *c; /* generate the Mulh instruction */ - c = new_r_Const(current_ir_graph, block, mode, mag.M); - q = new_rd_Mulh(dbg, current_ir_graph, block, n, c, mode); + c = new_Const(mag.M); + q = new_rd_Mulh(dbg, block, n, c, mode); if (mag.need_add) { if (mag.s > 0) { /* use the GM scheme */ - t = new_rd_Sub(dbg, current_ir_graph, block, n, q, mode); + t = new_rd_Sub(dbg, block, n, q, mode); - c = new_r_Const(current_ir_graph, block, mode_Iu, get_mode_one(mode_Iu)); - t = new_rd_Shr(dbg, current_ir_graph, block, t, c, mode); + c = new_Const(get_mode_one(mode_Iu)); + t = new_rd_Shr(dbg, block, t, c, mode); - t = new_rd_Add(dbg, current_ir_graph, block, t, q, mode); + t = new_rd_Add(dbg, block, t, q, mode); - c = new_r_Const_long(current_ir_graph, block, mode_Iu, mag.s-1); - q = new_rd_Shr(dbg, current_ir_graph, block, t, c, mode); + c = new_Const_long(mode_Iu, mag.s - 1); + q = new_rd_Shr(dbg, block, t, c, mode); } else { /* use the default scheme */ - q = new_rd_Add(dbg, current_ir_graph, block, q, n, mode); + q = new_rd_Add(dbg, block, q, n, mode); } } else if (mag.s > 0) { /* default scheme, shift needed */ - c = new_r_Const_long(current_ir_graph, block, mode_Iu, mag.s); - q = new_rd_Shr(dbg, current_ir_graph, block, q, c, mode); + c = new_Const_long(mode_Iu, mag.s); + q = new_rd_Shr(dbg, block, q, c, mode); } } return q; } /* Replace Divs with Shifts and Add/Subs and Mulh. */ -ir_node *arch_dep_replace_div_by_const(ir_node *irn) { +ir_node *arch_dep_replace_div_by_const(ir_node *irn) +{ ir_node *res = irn; /* If the architecture dependent optimizations were not initialized @@ -911,22 +914,23 @@ ir_node *arch_dep_replace_div_by_const(ir_node *irn) { if (params == NULL || (opts & arch_dep_div_by_const) == 0) return irn; - if (get_irn_opcode(irn) == iro_Div) { + if (is_Div(irn)) { ir_node *c = get_Div_right(irn); ir_node *block, *left; ir_mode *mode; tarval *tv, *ntv; dbg_info *dbg; int n, bits; - int k, n_flag; + int k; + int n_flag = 0; - if (get_irn_op(c) != op_Const) + if (! is_Const(c)) return irn; tv = get_Const_tarval(c); /* check for division by zero */ - if (classify_tarval(tv) == TV_CLASSIFY_NULL) + if (tarval_is_null(tv)) return irn; left = get_Div_left(irn); @@ -955,30 +959,35 @@ ir_node *arch_dep_replace_div_by_const(ir_node *irn) { ir_node *k_node; ir_node *curr = left; - if (k != 1) { - k_node = new_r_Const_long(current_ir_graph, block, mode_Iu, k - 1); - curr = new_rd_Shrs(dbg, current_ir_graph, block, left, k_node, mode); - } + /* create the correction code for signed values only if there might be a remainder */ + if (! get_Div_no_remainder(irn)) { + if (k != 1) { + k_node = new_Const_long(mode_Iu, k - 1); + curr = new_rd_Shrs(dbg, block, left, k_node, mode); + } - k_node = new_r_Const_long(current_ir_graph, block, mode_Iu, bits - k); - curr = new_rd_Shr(dbg, current_ir_graph, block, curr, k_node, mode); + k_node = new_Const_long(mode_Iu, bits - k); + curr = new_rd_Shr(dbg, block, curr, k_node, mode); - curr = new_rd_Add(dbg, current_ir_graph, block, left, curr, mode); + curr = new_rd_Add(dbg, block, left, curr, mode); + } else { + k_node = left; + } - k_node = new_r_Const_long(current_ir_graph, block, mode_Iu, k); - res = new_rd_Shrs(dbg, current_ir_graph, block, curr, k_node, mode); + k_node = new_Const_long(mode_Iu, k); + res = new_rd_Shrs(dbg, block, curr, k_node, mode); if (n_flag) { /* negate the result */ ir_node *k_node; - k_node = new_r_Const(current_ir_graph, block, mode, get_mode_null(mode)); - res = new_rd_Sub(dbg, current_ir_graph, block, k_node, res, mode); + k_node = new_Const(get_mode_null(mode)); + res = new_rd_Sub(dbg, block, k_node, res, mode); } } else { /* unsigned case */ ir_node *k_node; - k_node = new_r_Const_long(current_ir_graph, block, mode_Iu, k); - res = new_rd_Shr(dbg, current_ir_graph, block, left, k_node, mode); + k_node = new_Const_long(mode_Iu, k); + res = new_rd_Shr(dbg, block, left, k_node, mode); } } else { /* other constant */ @@ -994,7 +1003,8 @@ ir_node *arch_dep_replace_div_by_const(ir_node *irn) { } /* Replace Mods with Shifts and Add/Subs and Mulh. */ -ir_node *arch_dep_replace_mod_by_const(ir_node *irn) { +ir_node *arch_dep_replace_mod_by_const(ir_node *irn) +{ ir_node *res = irn; /* If the architecture dependent optimizations were not initialized @@ -1002,7 +1012,7 @@ ir_node *arch_dep_replace_mod_by_const(ir_node *irn) { if (params == NULL || (opts & arch_dep_mod_by_const) == 0) return irn; - if (get_irn_opcode(irn) == iro_Mod) { + if (is_Mod(irn)) { ir_node *c = get_Mod_right(irn); ir_node *block, *left; ir_mode *mode; @@ -1011,13 +1021,13 @@ ir_node *arch_dep_replace_mod_by_const(ir_node *irn) { int n, bits; int k; - if (get_irn_op(c) != op_Const) + if (! is_Const(c)) return irn; tv = get_Const_tarval(c); /* check for division by zero */ - if (classify_tarval(tv) == TV_CLASSIFY_NULL) + if (tarval_is_null(tv)) return irn; left = get_Mod_left(irn); @@ -1047,35 +1057,35 @@ ir_node *arch_dep_replace_mod_by_const(ir_node *irn) { ir_node *curr = left; if (k != 1) { - k_node = new_r_Const_long(current_ir_graph, block, mode_Iu, k - 1); - curr = new_rd_Shrs(dbg, current_ir_graph, block, left, k_node, mode); + k_node = new_Const_long(mode_Iu, k - 1); + curr = new_rd_Shrs(dbg, block, left, k_node, mode); } - k_node = new_r_Const_long(current_ir_graph, block, mode_Iu, bits - k); - curr = new_rd_Shr(dbg, current_ir_graph, block, curr, k_node, mode); + k_node = new_Const_long(mode_Iu, bits - k); + curr = new_rd_Shr(dbg, block, curr, k_node, mode); - curr = new_rd_Add(dbg, current_ir_graph, block, left, curr, mode); + curr = new_rd_Add(dbg, block, left, curr, mode); - k_node = new_r_Const_long(current_ir_graph, block, mode, (-1) << k); - curr = new_rd_And(dbg, current_ir_graph, block, curr, k_node, mode); + k_node = new_Const_long(mode, (-1) << k); + curr = new_rd_And(dbg, block, curr, k_node, mode); - res = new_rd_Sub(dbg, current_ir_graph, block, left, curr, mode); + res = new_rd_Sub(dbg, block, left, curr, mode); } else { /* unsigned case */ ir_node *k_node; - k_node = new_r_Const_long(current_ir_graph, block, mode, (1 << k) - 1); - res = new_rd_And(dbg, current_ir_graph, block, left, k_node, mode); + k_node = new_Const_long(mode, (1 << k) - 1); + res = new_rd_And(dbg, block, left, k_node, mode); } } else { /* other constant */ if (allow_Mulh(mode)) { res = replace_div_by_mulh(irn, tv); - res = new_rd_Mul(dbg, current_ir_graph, block, res, c, mode); + res = new_rd_Mul(dbg, block, res, c, mode); /* res = arch_dep_mul_to_shift(res); */ - res = new_rd_Sub(dbg, current_ir_graph, block, left, res, mode); + res = new_rd_Sub(dbg, block, left, res, mode); } } } @@ -1087,7 +1097,8 @@ ir_node *arch_dep_replace_mod_by_const(ir_node *irn) { } /* Replace DivMods with Shifts and Add/Subs and Mulh. */ -void arch_dep_replace_divmod_by_const(ir_node **div, ir_node **mod, ir_node *irn) { +void arch_dep_replace_divmod_by_const(ir_node **div, ir_node **mod, ir_node *irn) +{ *div = *mod = NULL; /* If the architecture dependent optimizations were not initialized @@ -1096,22 +1107,23 @@ void arch_dep_replace_divmod_by_const(ir_node **div, ir_node **mod, ir_node *irn ((opts & (arch_dep_div_by_const|arch_dep_mod_by_const)) != (arch_dep_div_by_const|arch_dep_mod_by_const))) return; - if (get_irn_opcode(irn) == iro_DivMod) { + if (is_DivMod(irn)) { ir_node *c = get_DivMod_right(irn); ir_node *block, *left; ir_mode *mode; tarval *tv, *ntv; dbg_info *dbg; int n, bits; - int k, n_flag; + int k; + int n_flag = 0; - if (get_irn_op(c) != op_Const) + if (! is_Const(c)) return; tv = get_Const_tarval(c); /* check for division by zero */ - if (classify_tarval(tv) == TV_CLASSIFY_NULL) + if (tarval_is_null(tv)) return; left = get_DivMod_left(irn); @@ -1141,38 +1153,38 @@ void arch_dep_replace_divmod_by_const(ir_node **div, ir_node **mod, ir_node *irn ir_node *curr = left; if (k != 1) { - k_node = new_r_Const_long(current_ir_graph, block, mode_Iu, k - 1); - curr = new_rd_Shrs(dbg, current_ir_graph, block, left, k_node, mode); + k_node = new_Const_long(mode_Iu, k - 1); + curr = new_rd_Shrs(dbg, block, left, k_node, mode); } - k_node = new_r_Const_long(current_ir_graph, block, mode_Iu, bits - k); - curr = new_rd_Shr(dbg, current_ir_graph, block, curr, k_node, mode); + k_node = new_Const_long(mode_Iu, bits - k); + curr = new_rd_Shr(dbg, block, curr, k_node, mode); - curr = new_rd_Add(dbg, current_ir_graph, block, left, curr, mode); + curr = new_rd_Add(dbg, block, left, curr, mode); - c_k = new_r_Const_long(current_ir_graph, block, mode_Iu, k); + c_k = new_Const_long(mode_Iu, k); - *div = new_rd_Shrs(dbg, current_ir_graph, block, curr, c_k, mode); + *div = new_rd_Shrs(dbg, block, curr, c_k, mode); if (n_flag) { /* negate the div result */ ir_node *k_node; - k_node = new_r_Const(current_ir_graph, block, mode, get_mode_null(mode)); - *div = new_rd_Sub(dbg, current_ir_graph, block, k_node, *div, mode); + k_node = new_Const(get_mode_null(mode)); + *div = new_rd_Sub(dbg, block, k_node, *div, mode); } - k_node = new_r_Const_long(current_ir_graph, block, mode, (-1) << k); - curr = new_rd_And(dbg, current_ir_graph, block, curr, k_node, mode); + k_node = new_Const_long(mode, (-1) << k); + curr = new_rd_And(dbg, block, curr, k_node, mode); - *mod = new_rd_Sub(dbg, current_ir_graph, block, left, curr, mode); + *mod = new_rd_Sub(dbg, block, left, curr, mode); } else { /* unsigned case */ ir_node *k_node; - k_node = new_r_Const_long(current_ir_graph, block, mode_Iu, k); - *div = new_rd_Shr(dbg, current_ir_graph, block, left, k_node, mode); + k_node = new_Const_long(mode_Iu, k); + *div = new_rd_Shr(dbg, block, left, k_node, mode); - k_node = new_r_Const_long(current_ir_graph, block, mode, (1 << k) - 1); - *mod = new_rd_And(dbg, current_ir_graph, block, left, k_node, mode); + k_node = new_Const_long(mode, (1 << k) - 1); + *mod = new_rd_And(dbg, block, left, k_node, mode); } } else { /* other constant */ @@ -1181,11 +1193,11 @@ void arch_dep_replace_divmod_by_const(ir_node **div, ir_node **mod, ir_node *irn *div = replace_div_by_mulh(irn, tv); - t = new_rd_Mul(dbg, current_ir_graph, block, *div, c, mode); + t = new_rd_Mul(dbg, block, *div, c, mode); /* t = arch_dep_mul_to_shift(t); */ - *mod = new_rd_Sub(dbg, current_ir_graph, block, left, t, mode); + *mod = new_rd_Sub(dbg, block, left, t, mode); } } } @@ -1207,6 +1219,7 @@ static const ir_settings_arch_dep_t default_params = { }; /* A default parameter factory for testing purposes. */ -const ir_settings_arch_dep_t *arch_dep_default_factory(void) { +const ir_settings_arch_dep_t *arch_dep_default_factory(void) +{ return &default_params; }