X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fir%2Firarch.c;h=7f15232ba9e6c3d0cf565b9cb97fc173739d50a5;hb=169fd803ea2ed08171113c1fd7ab4e528e1ebc26;hp=4d2a3ed44bf90a82bf4aa38baaa0fe39c627c47a;hpb=6c5692caa7b18f5a7554aecd9ad6f8d458e5fc27;p=libfirm diff --git a/ir/ir/irarch.c b/ir/ir/irarch.c index 4d2a3ed44..7f15232ba 100644 --- a/ir/ir/irarch.c +++ b/ir/ir/irarch.c @@ -16,14 +16,14 @@ #include "ircons_t.h" #include "irgmod.h" #include "irvrfy.h" -#include "tv.h" +#include "tv_t.h" #include "dbginfo_t.h" #include "iropt_dbg.h" #include "irflag_t.h" -#include "firmstat.h" +#include "irhooks.h" #include "ircons.h" #include "irarch.h" -#include "firmstat.h" +#include "irreflect.h" #undef DEB @@ -56,7 +56,19 @@ new_rd_Mulh (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *res; if (! op_Mulh) { - op_Mulh = new_ir_op(get_next_ir_opcode(), "Mulh", op_pin_state_floats, irop_flag_commutative, oparity_binary, 0, 0); + rflct_sig_t *sig; + int mulh_opc = get_next_ir_opcode(); + + op_Mulh = new_ir_op(mulh_opc, "Mulh", op_pin_state_floats, irop_flag_commutative, oparity_binary, 0, 0); + sig = rflct_signature_allocate(1, 3); + rflct_signature_set_arg(sig, 0, 0, "Res", RFLCT_MC(Int), 0, 0); + rflct_signature_set_arg(sig, 1, 0, "Block", RFLCT_MC(BB), 0, 0); + rflct_signature_set_arg(sig, 1, 1, "Op 0", RFLCT_MC(Int), 0, 0); + rflct_signature_set_arg(sig, 1, 2, "Op 1", RFLCT_MC(Int), 0, 0); + + rflct_new_opcode(mulh_opc, "Mulh", false); + rflct_opcode_add_signature(mulh_opc, sig); + } in[0] = op1; @@ -88,7 +100,7 @@ void arch_dep_set_opts(arch_dep_opts_t the_opts) { opts = the_opts; } -/* check, wheater a mode allows a Mulh instruction */ +/* check, whether a mode allows a Mulh instruction */ static int allow_Mulh(ir_mode *mode) { if (get_mode_size_bits(mode) > params->max_bits_for_mulh) @@ -103,18 +115,18 @@ ir_node *arch_dep_replace_mul_with_shifts(ir_node *irn) /* If the architecture dependent optimizations were not initialized or this optimization was not enabled. */ - if(params == NULL || (opts & arch_dep_mul_to_shift) == 0) + if (params == NULL || (opts & arch_dep_mul_to_shift) == 0) return irn; - if(get_irn_opcode(irn) == iro_Mul && mode_is_int(mode)) { - ir_node *block = get_nodes_block(irn); + if (get_irn_op(irn) == op_Mul && mode_is_int(mode)) { + ir_node *block = get_irn_n(irn, -1); ir_node *left = get_binop_left(irn); ir_node *right = get_binop_right(irn); tarval *tv = NULL; ir_node *operand = NULL; /* Look, if one operand is a constant. */ - if(get_irn_opcode(left) == iro_Const) { + if (get_irn_opcode(left) == iro_Const) { tv = get_Const_tarval(left); operand = right; } else if(get_irn_opcode(right) == iro_Const) { @@ -122,7 +134,7 @@ ir_node *arch_dep_replace_mul_with_shifts(ir_node *irn) operand = left; } - if(tv != NULL) { + if (tv != NULL) { int maximum_shifts = params->maximum_shifts; int also_use_subs = params->also_use_subs; int highest_shift_amount = params->highest_shift_amount; @@ -295,17 +307,15 @@ ir_node *arch_dep_replace_mul_with_shifts(ir_node *irn) int amount = abs(curr_shift) - 1; ir_node *aux = operand; - assert(amount >= 0 && "What is a negative shift??"); - if(amount != 0) { - tarval *shift_amount = new_tarval_from_long(amount, mode_Iu); - ir_node *cnst = new_r_Const(current_ir_graph, block, mode_Iu, shift_amount); + if (amount != 0) { + ir_node *cnst = new_r_Const_long(current_ir_graph, block, mode_Iu, amount); aux = new_r_Shl(current_ir_graph, block, operand, cnst, mode); } - if(curr) { - if(sub) + if (curr) { + if (sub) curr = new_r_Sub(current_ir_graph, block, curr, aux, mode); else curr = new_r_Add(current_ir_graph, block, curr, aux, mode); @@ -321,7 +331,7 @@ ir_node *arch_dep_replace_mul_with_shifts(ir_node *irn) #ifdef DEB { const char *prefix = ""; - for(i = 0; i < n; i++) { + for (i = 0; i < n; ++i) { fprintf(stderr, "%s%d", prefix, shifts[i]); prefix = ", "; } @@ -338,7 +348,7 @@ ir_node *arch_dep_replace_mul_with_shifts(ir_node *irn) } if (res != irn) - stat_arch_dep_replace_mul_with_shifts(irn); + hook_arch_dep_replace_mul_with_shifts(irn); return res; } @@ -404,7 +414,7 @@ static struct ms magic(tarval *d) int bits = get_mode_size_bits(u_mode); int p; tarval *ad, *anc, *delta, *q1, *r1, *q2, *r2, *t; /* unsigned */ - pnc_number d_cmp, M_cmp; + pn_Cmp d_cmp, M_cmp; tarval *bits_minus_1, *two_bits_1; @@ -433,7 +443,7 @@ static struct ms magic(tarval *d) q1 = ADD(q1, q1); /* Update q1 = 2^p/|nc| */ r1 = ADD(r1, r1); /* Update r1 = rem(2^p, |nc|) */ - if (CMP(r1, anc) & Ge) { + if (CMP(r1, anc) & pn_Cmp_Ge) { q1 = ADD(q1, ONE(u_mode)); r1 = SUB(r1, anc); } @@ -441,17 +451,17 @@ static struct ms magic(tarval *d) q2 = ADD(q2, q2); /* Update q2 = 2^p/|d| */ r2 = ADD(r2, r2); /* Update r2 = rem(2^p, |d|) */ - if (CMP(r2, ad) & Ge) { + if (CMP(r2, ad) & pn_Cmp_Ge) { q2 = ADD(q2, ONE(u_mode)); r2 = SUB(r2, ad); } delta = SUB(ad, r2); - } while (CMP(q1, delta) & Lt || (CMP(q1, delta) & Eq && CMP(r1, ZERO(u_mode)) & Eq)); + } while (CMP(q1, delta) & pn_Cmp_Lt || (CMP(q1, delta) & pn_Cmp_Eq && CMP(r1, ZERO(u_mode)) & pn_Cmp_Eq)); d_cmp = CMP(d, ZERO(mode)); - if (d_cmp & Ge) + if (d_cmp & pn_Cmp_Ge) mag.M = ADD(CNV(q2, mode), ONE(mode)); else mag.M = SUB(ZERO(mode), ADD(CNV(q2, mode), ONE(mode))); @@ -461,10 +471,10 @@ static struct ms magic(tarval *d) mag.s = p - bits; /* need an add if d > 0 && M < 0 */ - mag.need_add = d_cmp & Gt && M_cmp & Lt; + mag.need_add = d_cmp & pn_Cmp_Gt && M_cmp & pn_Cmp_Lt; /* need a sub if d < 0 && M > 0 */ - mag.need_sub = d_cmp & Lt && M_cmp & Gt; + mag.need_sub = d_cmp & pn_Cmp_Lt && M_cmp & pn_Cmp_Gt; tarval_set_integer_overflow_mode(rem); @@ -511,7 +521,7 @@ static struct mu magicu(tarval *d) do { ++p; - if (CMP(r1, SUB(nc, r1)) & Ge) { + if (CMP(r1, SUB(nc, r1)) & pn_Cmp_Ge) { q1 = ADD(ADD(q1, q1), ONE(mode)); r1 = SUB(ADD(r1, r1), nc); } @@ -520,15 +530,15 @@ static struct mu magicu(tarval *d) r1 = ADD(r1, r1); } - if (CMP(ADD(r2, ONE(mode)), SUB(d, r2)) & Ge) { - if (CMP(q2, seven_ff) & Ge) + if (CMP(ADD(r2, ONE(mode)), SUB(d, r2)) & pn_Cmp_Ge) { + if (CMP(q2, seven_ff) & pn_Cmp_Ge) magu.need_add = 1; q2 = ADD(ADD(q2, q2), ONE(mode)); r2 = SUB(ADD(ADD(r2, r2), ONE(mode)), d); } else { - if (CMP(q2, two_bits_1) & Ge) + if (CMP(q2, two_bits_1) & pn_Cmp_Ge) magu.need_add = 1; q2 = ADD(q2, q2); @@ -536,7 +546,7 @@ static struct mu magicu(tarval *d) } delta = SUB(SUB(d, ONE(mode)), r2); } while (p < 2*bits && - (CMP(q1, delta) & Lt || (CMP(q1, delta) & Eq && CMP(r1, ZERO(mode)) & Eq))); + (CMP(q1, delta) & pn_Cmp_Lt || (CMP(q1, delta) & pn_Cmp_Eq && CMP(r1, ZERO(mode)) & pn_Cmp_Eq))); magu.M = ADD(q2, ONE(mode)); /* Magic number */ magu.s = p - bits; /* and shift amount */ @@ -549,13 +559,13 @@ static struct mu magicu(tarval *d) /** * build the Mulh replacement code for n / tv * - * Note thet 'div' might be a mod or DivMod operation as well + * Note that 'div' might be a mod or DivMod operation as well */ static ir_node *replace_div_by_mulh(ir_node *div, tarval *tv) { dbg_info *dbg = get_irn_dbg_info(div); ir_node *n = get_binop_left(div); - ir_node *block = get_nodes_block(div); + ir_node *block = get_irn_n(div, -1); ir_mode *mode = get_irn_mode(n); int bits = get_mode_size_bits(mode); ir_node *q, *t, *c; @@ -579,12 +589,12 @@ static ir_node *replace_div_by_mulh(ir_node *div, tarval *tv) /* Do we need the shift */ if (mag.s > 0) { - c = new_r_Const(current_ir_graph, block, mode_Iu, new_tarval_from_long(mag.s, mode_Iu)); + c = new_r_Const_long(current_ir_graph, block, mode_Iu, mag.s); q = new_rd_Shrs(dbg, current_ir_graph, block, q, c, mode); } /* final */ - c = new_r_Const(current_ir_graph, block, mode_Iu, new_tarval_from_long(bits-1, mode_Iu)); + c = new_r_Const_long(current_ir_graph, block, mode_Iu, bits-1); t = new_rd_Shr(dbg, current_ir_graph, block, q, c, mode); q = new_rd_Add(dbg, current_ir_graph, block, q, t, mode); @@ -607,7 +617,7 @@ static ir_node *replace_div_by_mulh(ir_node *div, tarval *tv) t = new_rd_Add(dbg, current_ir_graph, block, t, q, mode); - c = new_r_Const(current_ir_graph, block, mode_Iu, new_tarval_from_long(mag.s-1, mode_Iu)); + c = new_r_Const_long(current_ir_graph, block, mode_Iu, mag.s-1); q = new_rd_Shr(dbg, current_ir_graph, block, t, c, mode); } else { @@ -616,7 +626,7 @@ static ir_node *replace_div_by_mulh(ir_node *div, tarval *tv) } } else if (mag.s > 0) { /* default scheme, shift needed */ - c = new_r_Const(current_ir_graph, block, mode_Iu, new_tarval_from_long(mag.s, mode_Iu)); + c = new_r_Const_long(current_ir_graph, block, mode_Iu, mag.s); q = new_rd_Shr(dbg, current_ir_graph, block, q, c, mode); } } @@ -646,7 +656,7 @@ ir_node *arch_dep_replace_div_by_const(ir_node *irn) left = get_Div_left(irn); mode = get_irn_mode(left); - block = get_nodes_block(irn); + block = get_irn_n(irn, -1); dbg = get_irn_dbg_info(irn); tv = get_Const_tarval(c); @@ -672,16 +682,16 @@ ir_node *arch_dep_replace_div_by_const(ir_node *irn) ir_node *curr = left; if (k != 1) { - k_node = new_r_Const(current_ir_graph, block, mode_Iu, new_tarval_from_long(k - 1, mode_Iu)); + k_node = new_r_Const_long(current_ir_graph, block, mode_Iu, k - 1); curr = new_rd_Shrs(dbg, current_ir_graph, block, left, k_node, mode); } - k_node = new_r_Const(current_ir_graph, block, mode_Iu, new_tarval_from_long(bits - k, mode_Iu)); + k_node = new_r_Const_long(current_ir_graph, block, mode_Iu, bits - k); curr = new_rd_Shr(dbg, current_ir_graph, block, curr, k_node, mode); curr = new_rd_Add(dbg, current_ir_graph, block, left, curr, mode); - k_node = new_r_Const(current_ir_graph, block, mode_Iu, new_tarval_from_long(k, mode_Iu)); + k_node = new_r_Const_long(current_ir_graph, block, mode_Iu, k); res = new_rd_Shrs(dbg, current_ir_graph, block, curr, k_node, mode); if (n_flag) { /* negate the result */ @@ -694,7 +704,7 @@ ir_node *arch_dep_replace_div_by_const(ir_node *irn) else { /* unsigned case */ ir_node *k_node; - k_node = new_r_Const(current_ir_graph, block, mode_Iu, new_tarval_from_long(k, mode_Iu)); + k_node = new_r_Const_long(current_ir_graph, block, mode_Iu, k); res = new_rd_Shr(dbg, current_ir_graph, block, left, k_node, mode); } } @@ -706,7 +716,7 @@ ir_node *arch_dep_replace_div_by_const(ir_node *irn) } if (res != irn) - stat_arch_dep_replace_div_by_const(irn); + hook_arch_dep_replace_div_by_const(irn); return res; } @@ -734,7 +744,7 @@ ir_node *arch_dep_replace_mod_by_const(ir_node *irn) left = get_Mod_left(irn); mode = get_irn_mode(left); - block = get_nodes_block(irn); + block = get_irn_n(irn, -1); dbg = get_irn_dbg_info(irn); tv = get_Const_tarval(c); @@ -761,16 +771,16 @@ ir_node *arch_dep_replace_mod_by_const(ir_node *irn) ir_node *curr = left; if (k != 1) { - k_node = new_r_Const(current_ir_graph, block, mode_Iu, new_tarval_from_long(k - 1, mode_Iu)); + k_node = new_r_Const_long(current_ir_graph, block, mode_Iu, k - 1); curr = new_rd_Shrs(dbg, current_ir_graph, block, left, k_node, mode); } - k_node = new_r_Const(current_ir_graph, block, mode_Iu, new_tarval_from_long(bits - k, mode_Iu)); + k_node = new_r_Const_long(current_ir_graph, block, mode_Iu, bits - k); curr = new_rd_Shr(dbg, current_ir_graph, block, curr, k_node, mode); curr = new_rd_Add(dbg, current_ir_graph, block, left, curr, mode); - k_node = new_r_Const(current_ir_graph, block, mode, new_tarval_from_long((-1) << k, mode)); + k_node = new_r_Const_long(current_ir_graph, block, mode, (-1) << k); curr = new_rd_And(dbg, current_ir_graph, block, curr, k_node, mode); res = new_rd_Sub(dbg, current_ir_graph, block, left, curr, mode); @@ -778,7 +788,7 @@ ir_node *arch_dep_replace_mod_by_const(ir_node *irn) else { /* unsigned case */ ir_node *k_node; - k_node = new_r_Const(current_ir_graph, block, mode, new_tarval_from_long((1 << k) - 1, mode)); + k_node = new_r_Const_long(current_ir_graph, block, mode, (1 << k) - 1); res = new_rd_And(dbg, current_ir_graph, block, left, k_node, mode); } } @@ -797,7 +807,7 @@ ir_node *arch_dep_replace_mod_by_const(ir_node *irn) } if (res != irn) - stat_arch_dep_replace_mod_by_const(irn); + hook_arch_dep_replace_mod_by_const(irn); return res; } @@ -826,7 +836,7 @@ void arch_dep_replace_divmod_by_const(ir_node **div, ir_node **mod, ir_node *irn left = get_DivMod_left(irn); mode = get_irn_mode(left); - block = get_nodes_block(irn); + block = get_irn_n(irn, -1); dbg = get_irn_dbg_info(irn); tv = get_Const_tarval(c); @@ -852,16 +862,16 @@ void arch_dep_replace_divmod_by_const(ir_node **div, ir_node **mod, ir_node *irn ir_node *curr = left; if (k != 1) { - k_node = new_r_Const(current_ir_graph, block, mode_Iu, new_tarval_from_long(k - 1, mode_Iu)); + k_node = new_r_Const_long(current_ir_graph, block, mode_Iu, k - 1); curr = new_rd_Shrs(dbg, current_ir_graph, block, left, k_node, mode); } - k_node = new_r_Const(current_ir_graph, block, mode_Iu, new_tarval_from_long(bits - k, mode_Iu)); + k_node = new_r_Const_long(current_ir_graph, block, mode_Iu, bits - k); curr = new_rd_Shr(dbg, current_ir_graph, block, curr, k_node, mode); curr = new_rd_Add(dbg, current_ir_graph, block, left, curr, mode); - c_k = new_r_Const(current_ir_graph, block, mode_Iu, new_tarval_from_long(k, mode_Iu)); + c_k = new_r_Const_long(current_ir_graph, block, mode_Iu, k); *div = new_rd_Shrs(dbg, current_ir_graph, block, curr, c_k, mode); @@ -872,7 +882,7 @@ void arch_dep_replace_divmod_by_const(ir_node **div, ir_node **mod, ir_node *irn *div = new_rd_Sub(dbg, current_ir_graph, block, k_node, *div, mode); } - k_node = new_r_Const(current_ir_graph, block, mode, new_tarval_from_long((-1) << k, mode)); + k_node = new_r_Const_long(current_ir_graph, block, mode, (-1) << k); curr = new_rd_And(dbg, current_ir_graph, block, curr, k_node, mode); *mod = new_rd_Sub(dbg, current_ir_graph, block, left, curr, mode); @@ -880,10 +890,10 @@ void arch_dep_replace_divmod_by_const(ir_node **div, ir_node **mod, ir_node *irn else { /* unsigned case */ ir_node *k_node; - k_node = new_r_Const(current_ir_graph, block, mode_Iu, new_tarval_from_long(k, mode_Iu)); + k_node = new_r_Const_long(current_ir_graph, block, mode_Iu, k); *div = new_rd_Shr(dbg, current_ir_graph, block, left, k_node, mode); - k_node = new_r_Const(current_ir_graph, block, mode, new_tarval_from_long((1 << k) - 1, mode)); + k_node = new_r_Const_long(current_ir_graph, block, mode, (1 << k) - 1); *mod = new_rd_And(dbg, current_ir_graph, block, left, k_node, mode); } } @@ -904,7 +914,7 @@ void arch_dep_replace_divmod_by_const(ir_node **div, ir_node **mod, ir_node *irn } if (*div) - stat_arch_dep_replace_DivMod_by_const(irn); + hook_arch_dep_replace_DivMod_by_const(irn); }