X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fir%2Firopt.c;h=f6b38c8722bd77d5e0101a7eba26848a7035f50b;hb=4f92e524762e0febc361676111b3b5b79addd03a;hp=7a7d41e45069314ff65599dbb929345202d8a53d;hpb=15ad7ccd8dff64e1808e1d093d4a8d7cda5af33e;p=libfirm diff --git a/ir/ir/iropt.c b/ir/ir/iropt.c index 7a7d41e45..f6b38c872 100644 --- a/ir/ir/iropt.c +++ b/ir/ir/iropt.c @@ -3107,6 +3107,17 @@ static ir_node *transform_bitwise_distributive(ir_node *n, return n; } +/** + * Create a 0 constant of given mode. + */ +static ir_node *create_zero_const(ir_graph *irg, ir_mode *mode) +{ + ir_tarval *tv = get_mode_null(mode); + ir_node *cnst = new_r_Const(irg, tv); + + return cnst; +} + /** * Transform an And. */ @@ -3118,20 +3129,37 @@ static ir_node *transform_node_And(ir_node *n) ir_mode *mode; vrp_attr *a_vrp, *b_vrp; - /* we can combine the relations of two compares with the same operands */ if (is_Cmp(a) && is_Cmp(b)) { - ir_node *a_left = get_Cmp_left(a); - ir_node *a_right = get_Cmp_left(a); - ir_node *b_left = get_Cmp_left(b); - ir_node *b_right = get_Cmp_right(b); + ir_node *a_left = get_Cmp_left(a); + ir_node *a_right = get_Cmp_right(a); + ir_node *b_left = get_Cmp_left(b); + ir_node *b_right = get_Cmp_right(b); + ir_relation a_relation = get_Cmp_relation(a); + ir_relation b_relation = get_Cmp_relation(b); + /* we can combine the relations of two compares with the same + * operands */ if (a_left == b_left && b_left == b_right) { dbg_info *dbgi = get_irn_dbg_info(n); ir_node *block = get_nodes_block(n); - ir_relation a_relation = get_Cmp_relation(a); - ir_relation b_relation = get_Cmp_relation(b); ir_relation new_relation = a_relation & b_relation; return new_rd_Cmp(dbgi, block, a_left, a_right, new_relation); } + /* Cmp(a==0) and Cmp(b==0) can be optimized to Cmp(a|b==0) */ + if (is_Const(a_right) && is_Const_null(a_right) + && is_Const(b_right) && is_Const_null(b_right) + && a_relation == b_relation && a_relation == ir_relation_equal + && !mode_is_float(get_irn_mode(a_left)) + && !mode_is_float(get_irn_mode(b_left))) { + dbg_info *dbgi = get_irn_dbg_info(n); + ir_node *block = get_nodes_block(n); + ir_mode *mode = get_irn_mode(a_left); + ir_node *n_b_left = get_irn_mode(b_left) != mode ? + new_rd_Conv(dbgi, block, b_left, mode) : b_left; + ir_node *or = new_rd_Or(dbgi, block, a_left, n_b_left, mode); + ir_graph *irg = get_irn_irg(n); + ir_node *zero = create_zero_const(irg, mode); + return new_rd_Cmp(dbgi, block, or, zero, ir_relation_equal); + } } mode = get_irn_mode(n); @@ -3777,17 +3805,6 @@ static bool is_single_bit(const ir_node *node) return false; } -/** - * Create a 0 constant of given mode. - */ -static ir_node *create_zero_const(ir_graph *irg, ir_mode *mode) -{ - ir_tarval *tv = get_mode_null(mode); - ir_node *cnst = new_r_Const(irg, tv); - - return cnst; -} - /** * Normalizes and optimizes Cmp nodes. */ @@ -4785,6 +4802,22 @@ static ir_node *transform_node_Or_Rotl(ir_node *irn_or) return n; } /* transform_node_Or_Rotl */ +static bool is_cmp_unequal_zero(const ir_node *node) +{ + ir_relation relation = get_Cmp_relation(node); + ir_node *left = get_Cmp_left(node); + ir_node *right = get_Cmp_right(node); + ir_mode *mode = get_irn_mode(left); + + if (!is_Const(right) || !is_Const_null(right)) + return false; + if (mode_is_signed(mode)) { + return relation == ir_relation_less_greater; + } else { + return relation == ir_relation_greater; + } +} + /** * Transform an Or. */ @@ -4822,6 +4855,20 @@ static ir_node *transform_node_Or(ir_node *n) ir_relation new_relation = a_relation | b_relation; return new_rd_Cmp(dbgi, block, a_left, a_right, new_relation); } + /* Cmp(a!=0) or Cmp(b!=0) => Cmp(a|b != 0) */ + if (is_cmp_unequal_zero(a) && is_cmp_unequal_zero(b) + && !mode_is_float(get_irn_mode(a_left)) + && !mode_is_float(get_irn_mode(b_left))) { + ir_graph *irg = get_irn_irg(n); + dbg_info *dbgi = get_irn_dbg_info(n); + ir_node *block = get_nodes_block(n); + ir_mode *mode = get_irn_mode(a_left); + ir_node *n_b_left = get_irn_mode(b_left) != mode ? + new_rd_Conv(dbgi, block, b_left, mode) : b_left; + ir_node *or = new_rd_Or(dbgi, block, a_left, n_b_left, mode); + ir_node *zero = create_zero_const(irg, mode); + return new_rd_Cmp(dbgi, block, or, zero, ir_relation_less_greater); + } } mode = get_irn_mode(n); @@ -5639,6 +5686,66 @@ static ir_node *transform_node_Sync(ir_node *n) return n; } /* transform_node_Sync */ +static ir_node *transform_node_Load(ir_node *n) +{ + /* if our memory predecessor is a load from the same address, then reuse the + * previous result */ + ir_node *mem = get_Load_mem(n); + ir_node *mem_pred; + + if (!is_Proj(mem)) + return n; + /* don't touch volatile loads */ + if (get_Load_volatility(n) == volatility_is_volatile) + return n; + mem_pred = get_Proj_pred(mem); + if (is_Load(mem_pred)) { + ir_node *pred_load = mem_pred; + + /* conservatively compare the 2 loads. TODO: This could be less strict + * with fixup code in some situations (like smaller/bigger modes) */ + if (get_Load_ptr(pred_load) != get_Load_ptr(n)) + return n; + if (get_Load_mode(pred_load) != get_Load_mode(n)) + return n; + /* all combinations of aligned/unaligned pred/n should be fine so we do + * not compare the unaligned attribute */ + { + ir_node *block = get_nodes_block(n); + ir_node *jmp = new_r_Jmp(block); + ir_graph *irg = get_irn_irg(n); + ir_node *bad = new_r_Bad(irg); + ir_mode *mode = get_Load_mode(n); + ir_node *res = new_r_Proj(pred_load, mode, pn_Load_res); + ir_node *in[pn_Load_max] = { mem, jmp, bad, res }; + ir_node *tuple = new_r_Tuple(block, ARRAY_SIZE(in), in); + return tuple; + } + } else if (is_Store(mem_pred)) { + ir_node *pred_store = mem_pred; + ir_node *value = get_Store_value(pred_store); + + if (get_Store_ptr(pred_store) != get_Load_ptr(n)) + return n; + if (get_irn_mode(value) != get_Load_mode(n)) + return n; + /* all combinations of aligned/unaligned pred/n should be fine so we do + * not compare the unaligned attribute */ + { + ir_node *block = get_nodes_block(n); + ir_node *jmp = new_r_Jmp(block); + ir_graph *irg = get_irn_irg(n); + ir_node *bad = new_r_Bad(irg); + ir_node *res = value; + ir_node *in[pn_Load_max] = { mem, jmp, bad, res }; + ir_node *tuple = new_r_Tuple(block, ARRAY_SIZE(in), in); + return tuple; + } + } + + return n; +} + /** * optimize a trampoline Call into a direct Call */ @@ -5694,9 +5801,6 @@ static ir_node *transform_node_Call(ir_node *call) } var = get_method_variadicity(mtp); set_method_variadicity(ctp, var); - if (var == variadicity_variadic) { - set_method_first_variadic_param_index(ctp, get_method_first_variadic_param_index(mtp) + 1); - } /* When we resolve a trampoline, the function must be called by a this-call */ set_method_calling_convention(ctp, get_method_calling_convention(mtp) | cc_this_call); set_method_additional_properties(ctp, get_method_additional_properties(mtp)); @@ -5785,10 +5889,10 @@ static ir_op_ops *firm_set_default_transform_node(ir_opcode code, ir_op_ops *ops CASE(Sync); CASE_PROJ(Bound); CASE_PROJ(CopyB); - CASE_PROJ(Load); CASE_PROJ(Store); CASE_PROJ_EX(Cond); CASE_PROJ_EX(Div); + CASE_PROJ_EX(Load); CASE_PROJ_EX(Mod); default: /* leave NULL */; @@ -5893,7 +5997,7 @@ static int node_cmp_attr_Load(const ir_node *a, const ir_node *b) /* NEVER do CSE on volatile Loads */ return 1; /* do not CSE Loads with different alignment. Be conservative. */ - if (get_Load_align(a) != get_Load_align(b)) + if (get_Load_unaligned(a) != get_Load_unaligned(b)) return 1; return get_Load_mode(a) != get_Load_mode(b); @@ -5903,7 +6007,7 @@ static int node_cmp_attr_Load(const ir_node *a, const ir_node *b) static int node_cmp_attr_Store(const ir_node *a, const ir_node *b) { /* do not CSE Stores with different alignment. Be conservative. */ - if (get_Store_align(a) != get_Store_align(b)) + if (get_Store_unaligned(a) != get_Store_unaligned(b)) return 1; /* NEVER do CSE on volatile Stores */ @@ -5977,29 +6081,31 @@ static int node_cmp_attr_ASM(const ir_node *a, const ir_node *b) /* Should we really check the constraints here? Should be better, but is strange. */ n = get_ASM_n_input_constraints(a); if (n != get_ASM_n_input_constraints(b)) - return 0; + return 1; ca = get_ASM_input_constraints(a); cb = get_ASM_input_constraints(b); for (i = 0; i < n; ++i) { - if (ca[i].pos != cb[i].pos || ca[i].constraint != cb[i].constraint) + if (ca[i].pos != cb[i].pos || ca[i].constraint != cb[i].constraint + || ca[i].mode != cb[i].mode) return 1; } n = get_ASM_n_output_constraints(a); if (n != get_ASM_n_output_constraints(b)) - return 0; + return 1; ca = get_ASM_output_constraints(a); cb = get_ASM_output_constraints(b); for (i = 0; i < n; ++i) { - if (ca[i].pos != cb[i].pos || ca[i].constraint != cb[i].constraint) + if (ca[i].pos != cb[i].pos || ca[i].constraint != cb[i].constraint + || ca[i].mode != cb[i].mode) return 1; } n = get_ASM_n_clobbers(a); if (n != get_ASM_n_clobbers(b)) - return 0; + return 1; cla = get_ASM_clobbers(a); clb = get_ASM_clobbers(b); @@ -6189,7 +6295,7 @@ ir_node *identify_remember(ir_node *n) if (nn != n) { /* n is reachable again */ - edges_node_revival(nn, get_irn_irg(nn)); + edges_node_revival(nn); } return nn; @@ -6353,7 +6459,7 @@ ir_node *optimize_node(ir_node *n) Run always for transformation induced Bads. */ n = gigo(n); if (n != oldn) { - edges_node_deleted(oldn, irg); + edges_node_deleted(oldn); /* We found an existing, better node, so we can deallocate the old node. */ irg_kill_node(irg, oldn); @@ -6384,7 +6490,7 @@ ir_node *optimize_node(ir_node *n) memcpy(oldn->in, n->in, ARR_LEN(n->in) * sizeof(n->in[0])); /* note the inplace edges module */ - edges_node_deleted(n, irg); + edges_node_deleted(n); /* evaluation was successful -- replace the node. */ irg_kill_node(irg, n); @@ -6415,7 +6521,7 @@ ir_node *optimize_node(ir_node *n) n = identify_cons(n); if (n != oldn) { - edges_node_deleted(oldn, irg); + edges_node_deleted(oldn); /* We found an existing, better node, so we can deallocate the old node. */ irg_kill_node(irg, oldn); @@ -6580,7 +6686,7 @@ static unsigned hash_SymConst(const ir_node *node) * @return * The operations. */ -static ir_op_ops *firm_set_default_hash(ir_opcode code, ir_op_ops *ops) +static ir_op_ops *firm_set_default_hash(unsigned code, ir_op_ops *ops) { #define CASE(a) \ case iro_##a: \ @@ -6606,7 +6712,7 @@ static ir_op_ops *firm_set_default_hash(ir_opcode code, ir_op_ops *ops) /* * Sets the default operation for an ir_ops. */ -ir_op_ops *firm_set_default_operations(ir_opcode code, ir_op_ops *ops) +ir_op_ops *firm_set_default_operations(unsigned code, ir_op_ops *ops) { ops = firm_set_default_hash(code, ops); ops = firm_set_default_computed_value(code, ops);