X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fir%2Firopt.c;h=f6b38c8722bd77d5e0101a7eba26848a7035f50b;hb=4f92e524762e0febc361676111b3b5b79addd03a;hp=28545b25176e743b117cf050163c326866c94d04;hpb=a88bcf9f8b48eefb74e3011f6ac6bfcd468183e5;p=libfirm diff --git a/ir/ir/iropt.c b/ir/ir/iropt.c index 28545b251..f6b38c872 100644 --- a/ir/ir/iropt.c +++ b/ir/ir/iropt.c @@ -3107,27 +3107,6 @@ static ir_node *transform_bitwise_distributive(ir_node *n, return n; } -int ir_is_equality_cmp_0(const ir_node *node) -{ - ir_relation relation = get_Cmp_relation(node); - ir_node *left = get_Cmp_left(node); - ir_node *right = get_Cmp_right(node); - ir_mode *mode = get_irn_mode(left); - - /* this probably makes no sense if unordered is involved */ - assert(!mode_is_float(mode)); - - if (!is_Const(right) || !is_Const_null(right)) - return false; - if (relation == ir_relation_equal) - return true; - if (mode_is_signed(mode)) { - return relation == ir_relation_less_greater; - } else { - return relation == ir_relation_greater; - } -} - /** * Create a 0 constant of given mode. */ @@ -3151,33 +3130,35 @@ static ir_node *transform_node_And(ir_node *n) vrp_attr *a_vrp, *b_vrp; if (is_Cmp(a) && is_Cmp(b)) { - ir_node *a_left = get_Cmp_left(a); - ir_node *a_right = get_Cmp_left(a); - ir_node *b_left = get_Cmp_left(b); - ir_node *b_right = get_Cmp_right(b); + ir_node *a_left = get_Cmp_left(a); + ir_node *a_right = get_Cmp_right(a); + ir_node *b_left = get_Cmp_left(b); + ir_node *b_right = get_Cmp_right(b); + ir_relation a_relation = get_Cmp_relation(a); + ir_relation b_relation = get_Cmp_relation(b); /* we can combine the relations of two compares with the same * operands */ if (a_left == b_left && b_left == b_right) { dbg_info *dbgi = get_irn_dbg_info(n); ir_node *block = get_nodes_block(n); - ir_relation a_relation = get_Cmp_relation(a); - ir_relation b_relation = get_Cmp_relation(b); ir_relation new_relation = a_relation & b_relation; return new_rd_Cmp(dbgi, block, a_left, a_right, new_relation); } - /* Cmp(a, 0) and Cmp(b,0) can be optimized to Cmp(a|b, 0) */ - if (ir_is_equality_cmp_0(a) && ir_is_equality_cmp_0(b) - && (get_Cmp_relation(a) & ir_relation_equal) == (get_Cmp_relation(b) & ir_relation_equal)) { - dbg_info *dbgi = get_irn_dbg_info(n); - ir_node *block = get_nodes_block(n); - ir_relation relation = get_Cmp_relation(a); - ir_mode *mode = get_irn_mode(a_left); - ir_node *n_b_left = get_irn_mode(b_left) != mode ? - new_rd_Conv(dbgi, block, b_left, mode) : b_left; - ir_node *or = new_rd_Or(dbgi, block, a_left, n_b_left, mode); - ir_graph *irg = get_irn_irg(n); - ir_node *zero = create_zero_const(irg, mode); - return new_rd_Cmp(dbgi, block, or, zero, relation); + /* Cmp(a==0) and Cmp(b==0) can be optimized to Cmp(a|b==0) */ + if (is_Const(a_right) && is_Const_null(a_right) + && is_Const(b_right) && is_Const_null(b_right) + && a_relation == b_relation && a_relation == ir_relation_equal + && !mode_is_float(get_irn_mode(a_left)) + && !mode_is_float(get_irn_mode(b_left))) { + dbg_info *dbgi = get_irn_dbg_info(n); + ir_node *block = get_nodes_block(n); + ir_mode *mode = get_irn_mode(a_left); + ir_node *n_b_left = get_irn_mode(b_left) != mode ? + new_rd_Conv(dbgi, block, b_left, mode) : b_left; + ir_node *or = new_rd_Or(dbgi, block, a_left, n_b_left, mode); + ir_graph *irg = get_irn_irg(n); + ir_node *zero = create_zero_const(irg, mode); + return new_rd_Cmp(dbgi, block, or, zero, ir_relation_equal); } } @@ -4821,6 +4802,22 @@ static ir_node *transform_node_Or_Rotl(ir_node *irn_or) return n; } /* transform_node_Or_Rotl */ +static bool is_cmp_unequal_zero(const ir_node *node) +{ + ir_relation relation = get_Cmp_relation(node); + ir_node *left = get_Cmp_left(node); + ir_node *right = get_Cmp_right(node); + ir_mode *mode = get_irn_mode(left); + + if (!is_Const(right) || !is_Const_null(right)) + return false; + if (mode_is_signed(mode)) { + return relation == ir_relation_less_greater; + } else { + return relation == ir_relation_greater; + } +} + /** * Transform an Or. */ @@ -4858,6 +4855,20 @@ static ir_node *transform_node_Or(ir_node *n) ir_relation new_relation = a_relation | b_relation; return new_rd_Cmp(dbgi, block, a_left, a_right, new_relation); } + /* Cmp(a!=0) or Cmp(b!=0) => Cmp(a|b != 0) */ + if (is_cmp_unequal_zero(a) && is_cmp_unequal_zero(b) + && !mode_is_float(get_irn_mode(a_left)) + && !mode_is_float(get_irn_mode(b_left))) { + ir_graph *irg = get_irn_irg(n); + dbg_info *dbgi = get_irn_dbg_info(n); + ir_node *block = get_nodes_block(n); + ir_mode *mode = get_irn_mode(a_left); + ir_node *n_b_left = get_irn_mode(b_left) != mode ? + new_rd_Conv(dbgi, block, b_left, mode) : b_left; + ir_node *or = new_rd_Or(dbgi, block, a_left, n_b_left, mode); + ir_node *zero = create_zero_const(irg, mode); + return new_rd_Cmp(dbgi, block, or, zero, ir_relation_less_greater); + } } mode = get_irn_mode(n); @@ -5675,6 +5686,66 @@ static ir_node *transform_node_Sync(ir_node *n) return n; } /* transform_node_Sync */ +static ir_node *transform_node_Load(ir_node *n) +{ + /* if our memory predecessor is a load from the same address, then reuse the + * previous result */ + ir_node *mem = get_Load_mem(n); + ir_node *mem_pred; + + if (!is_Proj(mem)) + return n; + /* don't touch volatile loads */ + if (get_Load_volatility(n) == volatility_is_volatile) + return n; + mem_pred = get_Proj_pred(mem); + if (is_Load(mem_pred)) { + ir_node *pred_load = mem_pred; + + /* conservatively compare the 2 loads. TODO: This could be less strict + * with fixup code in some situations (like smaller/bigger modes) */ + if (get_Load_ptr(pred_load) != get_Load_ptr(n)) + return n; + if (get_Load_mode(pred_load) != get_Load_mode(n)) + return n; + /* all combinations of aligned/unaligned pred/n should be fine so we do + * not compare the unaligned attribute */ + { + ir_node *block = get_nodes_block(n); + ir_node *jmp = new_r_Jmp(block); + ir_graph *irg = get_irn_irg(n); + ir_node *bad = new_r_Bad(irg); + ir_mode *mode = get_Load_mode(n); + ir_node *res = new_r_Proj(pred_load, mode, pn_Load_res); + ir_node *in[pn_Load_max] = { mem, jmp, bad, res }; + ir_node *tuple = new_r_Tuple(block, ARRAY_SIZE(in), in); + return tuple; + } + } else if (is_Store(mem_pred)) { + ir_node *pred_store = mem_pred; + ir_node *value = get_Store_value(pred_store); + + if (get_Store_ptr(pred_store) != get_Load_ptr(n)) + return n; + if (get_irn_mode(value) != get_Load_mode(n)) + return n; + /* all combinations of aligned/unaligned pred/n should be fine so we do + * not compare the unaligned attribute */ + { + ir_node *block = get_nodes_block(n); + ir_node *jmp = new_r_Jmp(block); + ir_graph *irg = get_irn_irg(n); + ir_node *bad = new_r_Bad(irg); + ir_node *res = value; + ir_node *in[pn_Load_max] = { mem, jmp, bad, res }; + ir_node *tuple = new_r_Tuple(block, ARRAY_SIZE(in), in); + return tuple; + } + } + + return n; +} + /** * optimize a trampoline Call into a direct Call */ @@ -5818,10 +5889,10 @@ static ir_op_ops *firm_set_default_transform_node(ir_opcode code, ir_op_ops *ops CASE(Sync); CASE_PROJ(Bound); CASE_PROJ(CopyB); - CASE_PROJ(Load); CASE_PROJ(Store); CASE_PROJ_EX(Cond); CASE_PROJ_EX(Div); + CASE_PROJ_EX(Load); CASE_PROJ_EX(Mod); default: /* leave NULL */; @@ -6010,29 +6081,31 @@ static int node_cmp_attr_ASM(const ir_node *a, const ir_node *b) /* Should we really check the constraints here? Should be better, but is strange. */ n = get_ASM_n_input_constraints(a); if (n != get_ASM_n_input_constraints(b)) - return 0; + return 1; ca = get_ASM_input_constraints(a); cb = get_ASM_input_constraints(b); for (i = 0; i < n; ++i) { - if (ca[i].pos != cb[i].pos || ca[i].constraint != cb[i].constraint) + if (ca[i].pos != cb[i].pos || ca[i].constraint != cb[i].constraint + || ca[i].mode != cb[i].mode) return 1; } n = get_ASM_n_output_constraints(a); if (n != get_ASM_n_output_constraints(b)) - return 0; + return 1; ca = get_ASM_output_constraints(a); cb = get_ASM_output_constraints(b); for (i = 0; i < n; ++i) { - if (ca[i].pos != cb[i].pos || ca[i].constraint != cb[i].constraint) + if (ca[i].pos != cb[i].pos || ca[i].constraint != cb[i].constraint + || ca[i].mode != cb[i].mode) return 1; } n = get_ASM_n_clobbers(a); if (n != get_ASM_n_clobbers(b)) - return 0; + return 1; cla = get_ASM_clobbers(a); clb = get_ASM_clobbers(b); @@ -6222,7 +6295,7 @@ ir_node *identify_remember(ir_node *n) if (nn != n) { /* n is reachable again */ - edges_node_revival(nn, get_irn_irg(nn)); + edges_node_revival(nn); } return nn; @@ -6386,7 +6459,7 @@ ir_node *optimize_node(ir_node *n) Run always for transformation induced Bads. */ n = gigo(n); if (n != oldn) { - edges_node_deleted(oldn, irg); + edges_node_deleted(oldn); /* We found an existing, better node, so we can deallocate the old node. */ irg_kill_node(irg, oldn); @@ -6417,7 +6490,7 @@ ir_node *optimize_node(ir_node *n) memcpy(oldn->in, n->in, ARR_LEN(n->in) * sizeof(n->in[0])); /* note the inplace edges module */ - edges_node_deleted(n, irg); + edges_node_deleted(n); /* evaluation was successful -- replace the node. */ irg_kill_node(irg, n); @@ -6448,7 +6521,7 @@ ir_node *optimize_node(ir_node *n) n = identify_cons(n); if (n != oldn) { - edges_node_deleted(oldn, irg); + edges_node_deleted(oldn); /* We found an existing, better node, so we can deallocate the old node. */ irg_kill_node(irg, oldn); @@ -6613,7 +6686,7 @@ static unsigned hash_SymConst(const ir_node *node) * @return * The operations. */ -static ir_op_ops *firm_set_default_hash(ir_opcode code, ir_op_ops *ops) +static ir_op_ops *firm_set_default_hash(unsigned code, ir_op_ops *ops) { #define CASE(a) \ case iro_##a: \ @@ -6639,7 +6712,7 @@ static ir_op_ops *firm_set_default_hash(ir_opcode code, ir_op_ops *ops) /* * Sets the default operation for an ir_ops. */ -ir_op_ops *firm_set_default_operations(ir_opcode code, ir_op_ops *ops) +ir_op_ops *firm_set_default_operations(unsigned code, ir_op_ops *ops) { ops = firm_set_default_hash(code, ops); ops = firm_set_default_computed_value(code, ops);